1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * |
9 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
10 | * * |
11 | * This program is free software; you can redistribute it and/or * |
12 | * modify it under the terms of version 2 of the GNU General * |
13 | * Public License as published by the Free Software Foundation. * |
14 | * This program is distributed in the hope that it will be useful. * |
15 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * |
16 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * |
17 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * |
18 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * |
19 | * TO BE LEGALLY INVALID. See the GNU General Public License for * |
20 | * more details, a copy of which can be found in the file COPYING * |
21 | * included with this package. * |
22 | *******************************************************************/ |
23 | #include <linux/pci.h> |
24 | #include <linux/slab.h> |
25 | #include <linux/interrupt.h> |
26 | #include <linux/export.h> |
27 | #include <linux/delay.h> |
28 | #include <asm/unaligned.h> |
29 | #include <linux/t10-pi.h> |
30 | #include <linux/crc-t10dif.h> |
31 | #include <linux/blk-cgroup.h> |
32 | #include <net/checksum.h> |
33 | |
34 | #include <scsi/scsi.h> |
35 | #include <scsi/scsi_device.h> |
36 | #include <scsi/scsi_eh.h> |
37 | #include <scsi/scsi_host.h> |
38 | #include <scsi/scsi_tcq.h> |
39 | #include <scsi/scsi_transport_fc.h> |
40 | |
41 | #include "lpfc_version.h" |
42 | #include "lpfc_hw4.h" |
43 | #include "lpfc_hw.h" |
44 | #include "lpfc_sli.h" |
45 | #include "lpfc_sli4.h" |
46 | #include "lpfc_nl.h" |
47 | #include "lpfc_disc.h" |
48 | #include "lpfc.h" |
49 | #include "lpfc_scsi.h" |
50 | #include "lpfc_logmsg.h" |
51 | #include "lpfc_crtn.h" |
52 | #include "lpfc_vport.h" |
53 | |
54 | #define LPFC_RESET_WAIT 2 |
55 | #define LPFC_ABORT_WAIT 2 |
56 | |
57 | static char *dif_op_str[] = { |
58 | "PROT_NORMAL" , |
59 | "PROT_READ_INSERT" , |
60 | "PROT_WRITE_STRIP" , |
61 | "PROT_READ_STRIP" , |
62 | "PROT_WRITE_INSERT" , |
63 | "PROT_READ_PASS" , |
64 | "PROT_WRITE_PASS" , |
65 | }; |
66 | |
67 | struct scsi_dif_tuple { |
68 | __be16 guard_tag; /* Checksum */ |
69 | __be16 app_tag; /* Opaque storage */ |
70 | __be32 ref_tag; /* Target LBA or indirect LBA */ |
71 | }; |
72 | |
73 | static struct lpfc_rport_data * |
74 | lpfc_rport_data_from_scsi_device(struct scsi_device *sdev) |
75 | { |
76 | struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata; |
77 | |
78 | if (vport->phba->cfg_fof) |
79 | return ((struct lpfc_device_data *)sdev->hostdata)->rport_data; |
80 | else |
81 | return (struct lpfc_rport_data *)sdev->hostdata; |
82 | } |
83 | |
84 | static void |
85 | lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb); |
86 | static void |
87 | lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb); |
88 | static int |
89 | lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc); |
90 | |
91 | /** |
92 | * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge. |
93 | * @phba: Pointer to HBA object. |
94 | * @lpfc_cmd: lpfc scsi command object pointer. |
95 | * |
96 | * This function is called from the lpfc_prep_task_mgmt_cmd function to |
97 | * set the last bit in the response sge entry. |
98 | **/ |
99 | static void |
100 | lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, |
101 | struct lpfc_io_buf *lpfc_cmd) |
102 | { |
103 | struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; |
104 | if (sgl) { |
105 | sgl += 1; |
106 | sgl->word2 = le32_to_cpu(sgl->word2); |
107 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
108 | sgl->word2 = cpu_to_le32(sgl->word2); |
109 | } |
110 | } |
111 | |
112 | /** |
113 | * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread |
114 | * @phba: The Hba for which this call is being executed. |
115 | * |
116 | * This routine is called when there is resource error in driver or firmware. |
117 | * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine |
118 | * posts at most 1 event each second. This routine wakes up worker thread of |
119 | * @phba to process WORKER_RAM_DOWN_EVENT event. |
120 | * |
121 | * This routine should be called with no lock held. |
122 | **/ |
123 | void |
124 | lpfc_rampdown_queue_depth(struct lpfc_hba *phba) |
125 | { |
126 | unsigned long flags; |
127 | uint32_t evt_posted; |
128 | unsigned long expires; |
129 | |
130 | spin_lock_irqsave(&phba->hbalock, flags); |
131 | atomic_inc(v: &phba->num_rsrc_err); |
132 | phba->last_rsrc_error_time = jiffies; |
133 | |
134 | expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL; |
135 | if (time_after(expires, jiffies)) { |
136 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
137 | return; |
138 | } |
139 | |
140 | phba->last_ramp_down_time = jiffies; |
141 | |
142 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
143 | |
144 | spin_lock_irqsave(&phba->pport->work_port_lock, flags); |
145 | evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE; |
146 | if (!evt_posted) |
147 | phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; |
148 | spin_unlock_irqrestore(lock: &phba->pport->work_port_lock, flags); |
149 | |
150 | if (!evt_posted) |
151 | lpfc_worker_wake_up(phba); |
152 | return; |
153 | } |
154 | |
155 | /** |
156 | * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler |
157 | * @phba: The Hba for which this call is being executed. |
158 | * |
159 | * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker |
160 | * thread.This routine reduces queue depth for all scsi device on each vport |
161 | * associated with @phba. |
162 | **/ |
163 | void |
164 | lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) |
165 | { |
166 | struct lpfc_vport **vports; |
167 | struct Scsi_Host *shost; |
168 | struct scsi_device *sdev; |
169 | unsigned long new_queue_depth; |
170 | unsigned long num_rsrc_err; |
171 | int i; |
172 | |
173 | num_rsrc_err = atomic_read(v: &phba->num_rsrc_err); |
174 | |
175 | /* |
176 | * The error and success command counters are global per |
177 | * driver instance. If another handler has already |
178 | * operated on this error event, just exit. |
179 | */ |
180 | if (num_rsrc_err == 0) |
181 | return; |
182 | |
183 | vports = lpfc_create_vport_work_array(phba); |
184 | if (vports != NULL) |
185 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
186 | shost = lpfc_shost_from_vport(vport: vports[i]); |
187 | shost_for_each_device(sdev, shost) { |
188 | if (num_rsrc_err >= sdev->queue_depth) |
189 | new_queue_depth = 1; |
190 | else |
191 | new_queue_depth = sdev->queue_depth - |
192 | num_rsrc_err; |
193 | scsi_change_queue_depth(sdev, new_queue_depth); |
194 | } |
195 | } |
196 | lpfc_destroy_vport_work_array(phba, vports); |
197 | atomic_set(v: &phba->num_rsrc_err, i: 0); |
198 | } |
199 | |
200 | /** |
201 | * lpfc_scsi_dev_block - set all scsi hosts to block state |
202 | * @phba: Pointer to HBA context object. |
203 | * |
204 | * This function walks vport list and set each SCSI host to block state |
205 | * by invoking fc_remote_port_delete() routine. This function is invoked |
206 | * with EEH when device's PCI slot has been permanently disabled. |
207 | **/ |
208 | void |
209 | lpfc_scsi_dev_block(struct lpfc_hba *phba) |
210 | { |
211 | struct lpfc_vport **vports; |
212 | struct Scsi_Host *shost; |
213 | struct scsi_device *sdev; |
214 | struct fc_rport *rport; |
215 | int i; |
216 | |
217 | vports = lpfc_create_vport_work_array(phba); |
218 | if (vports != NULL) |
219 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
220 | shost = lpfc_shost_from_vport(vport: vports[i]); |
221 | shost_for_each_device(sdev, shost) { |
222 | rport = starget_to_rport(scsi_target(sdev)); |
223 | fc_remote_port_delete(rport); |
224 | } |
225 | } |
226 | lpfc_destroy_vport_work_array(phba, vports); |
227 | } |
228 | |
229 | /** |
230 | * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec |
231 | * @vport: The virtual port for which this call being executed. |
232 | * @num_to_alloc: The requested number of buffers to allocate. |
233 | * |
234 | * This routine allocates a scsi buffer for device with SLI-3 interface spec, |
235 | * the scsi buffer contains all the necessary information needed to initiate |
236 | * a SCSI I/O. The non-DMAable buffer region contains information to build |
237 | * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP, |
238 | * and the initial BPL. In addition to allocating memory, the FCP CMND and |
239 | * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB. |
240 | * |
241 | * Return codes: |
242 | * int - number of scsi buffers that were allocated. |
243 | * 0 = failure, less than num_to_alloc is a partial failure. |
244 | **/ |
245 | static int |
246 | lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) |
247 | { |
248 | struct lpfc_hba *phba = vport->phba; |
249 | struct lpfc_io_buf *psb; |
250 | struct ulp_bde64 *bpl; |
251 | IOCB_t *iocb; |
252 | dma_addr_t pdma_phys_fcp_cmd; |
253 | dma_addr_t pdma_phys_fcp_rsp; |
254 | dma_addr_t pdma_phys_sgl; |
255 | uint16_t iotag; |
256 | int bcnt, bpl_size; |
257 | |
258 | bpl_size = phba->cfg_sg_dma_buf_size - |
259 | (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); |
260 | |
261 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
262 | "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n" , |
263 | num_to_alloc, phba->cfg_sg_dma_buf_size, |
264 | (int)sizeof(struct fcp_cmnd), |
265 | (int)sizeof(struct fcp_rsp), bpl_size); |
266 | |
267 | for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { |
268 | psb = kzalloc(size: sizeof(struct lpfc_io_buf), GFP_KERNEL); |
269 | if (!psb) |
270 | break; |
271 | |
272 | /* |
273 | * Get memory from the pci pool to map the virt space to pci |
274 | * bus space for an I/O. The DMA buffer includes space for the |
275 | * struct fcp_cmnd, struct fcp_rsp and the number of bde's |
276 | * necessary to support the sg_tablesize. |
277 | */ |
278 | psb->data = dma_pool_zalloc(pool: phba->lpfc_sg_dma_buf_pool, |
279 | GFP_KERNEL, handle: &psb->dma_handle); |
280 | if (!psb->data) { |
281 | kfree(objp: psb); |
282 | break; |
283 | } |
284 | |
285 | |
286 | /* Allocate iotag for psb->cur_iocbq. */ |
287 | iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); |
288 | if (iotag == 0) { |
289 | dma_pool_free(pool: phba->lpfc_sg_dma_buf_pool, |
290 | vaddr: psb->data, addr: psb->dma_handle); |
291 | kfree(objp: psb); |
292 | break; |
293 | } |
294 | psb->cur_iocbq.cmd_flag |= LPFC_IO_FCP; |
295 | |
296 | psb->fcp_cmnd = psb->data; |
297 | psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); |
298 | psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) + |
299 | sizeof(struct fcp_rsp); |
300 | |
301 | /* Initialize local short-hand pointers. */ |
302 | bpl = (struct ulp_bde64 *)psb->dma_sgl; |
303 | pdma_phys_fcp_cmd = psb->dma_handle; |
304 | pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); |
305 | pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) + |
306 | sizeof(struct fcp_rsp); |
307 | |
308 | /* |
309 | * The first two bdes are the FCP_CMD and FCP_RSP. The balance |
310 | * are sg list bdes. Initialize the first two and leave the |
311 | * rest for queuecommand. |
312 | */ |
313 | bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); |
314 | bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); |
315 | bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); |
316 | bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
317 | bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); |
318 | |
319 | /* Setup the physical region for the FCP RSP */ |
320 | bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); |
321 | bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); |
322 | bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); |
323 | bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
324 | bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); |
325 | |
326 | /* |
327 | * Since the IOCB for the FCP I/O is built into this |
328 | * lpfc_scsi_buf, initialize it with all known data now. |
329 | */ |
330 | iocb = &psb->cur_iocbq.iocb; |
331 | iocb->un.fcpi64.bdl.ulpIoTag32 = 0; |
332 | if ((phba->sli_rev == 3) && |
333 | !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { |
334 | /* fill in immediate fcp command BDE */ |
335 | iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; |
336 | iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); |
337 | iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, |
338 | unsli3.fcp_ext.icd); |
339 | iocb->un.fcpi64.bdl.addrHigh = 0; |
340 | iocb->ulpBdeCount = 0; |
341 | iocb->ulpLe = 0; |
342 | /* fill in response BDE */ |
343 | iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = |
344 | BUFF_TYPE_BDE_64; |
345 | iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = |
346 | sizeof(struct fcp_rsp); |
347 | iocb->unsli3.fcp_ext.rbde.addrLow = |
348 | putPaddrLow(pdma_phys_fcp_rsp); |
349 | iocb->unsli3.fcp_ext.rbde.addrHigh = |
350 | putPaddrHigh(pdma_phys_fcp_rsp); |
351 | } else { |
352 | iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; |
353 | iocb->un.fcpi64.bdl.bdeSize = |
354 | (2 * sizeof(struct ulp_bde64)); |
355 | iocb->un.fcpi64.bdl.addrLow = |
356 | putPaddrLow(pdma_phys_sgl); |
357 | iocb->un.fcpi64.bdl.addrHigh = |
358 | putPaddrHigh(pdma_phys_sgl); |
359 | iocb->ulpBdeCount = 1; |
360 | iocb->ulpLe = 1; |
361 | } |
362 | iocb->ulpClass = CLASS3; |
363 | psb->status = IOSTAT_SUCCESS; |
364 | /* Put it back into the SCSI buffer list */ |
365 | psb->cur_iocbq.io_buf = psb; |
366 | spin_lock_init(&psb->buf_lock); |
367 | lpfc_release_scsi_buf_s3(phba, psb); |
368 | |
369 | } |
370 | |
371 | return bcnt; |
372 | } |
373 | |
374 | /** |
375 | * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport |
376 | * @vport: pointer to lpfc vport data structure. |
377 | * |
378 | * This routine is invoked by the vport cleanup for deletions and the cleanup |
379 | * for an ndlp on removal. |
380 | **/ |
381 | void |
382 | lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) |
383 | { |
384 | struct lpfc_hba *phba = vport->phba; |
385 | struct lpfc_io_buf *psb, *next_psb; |
386 | struct lpfc_sli4_hdw_queue *qp; |
387 | unsigned long iflag = 0; |
388 | int idx; |
389 | |
390 | if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) |
391 | return; |
392 | |
393 | spin_lock_irqsave(&phba->hbalock, iflag); |
394 | for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { |
395 | qp = &phba->sli4_hba.hdwq[idx]; |
396 | |
397 | spin_lock(lock: &qp->abts_io_buf_list_lock); |
398 | list_for_each_entry_safe(psb, next_psb, |
399 | &qp->lpfc_abts_io_buf_list, list) { |
400 | if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME) |
401 | continue; |
402 | |
403 | if (psb->rdata && psb->rdata->pnode && |
404 | psb->rdata->pnode->vport == vport) |
405 | psb->rdata = NULL; |
406 | } |
407 | spin_unlock(lock: &qp->abts_io_buf_list_lock); |
408 | } |
409 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
410 | } |
411 | |
412 | /** |
413 | * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort |
414 | * @phba: pointer to lpfc hba data structure. |
415 | * @axri: pointer to the fcp xri abort wcqe structure. |
416 | * @idx: index into hdwq |
417 | * |
418 | * This routine is invoked by the worker thread to process a SLI4 fast-path |
419 | * FCP or NVME aborted xri. |
420 | **/ |
421 | void |
422 | lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, |
423 | struct sli4_wcqe_xri_aborted *axri, int idx) |
424 | { |
425 | u16 xri = 0; |
426 | u16 rxid = 0; |
427 | struct lpfc_io_buf *psb, *next_psb; |
428 | struct lpfc_sli4_hdw_queue *qp; |
429 | unsigned long iflag = 0; |
430 | struct lpfc_iocbq *iocbq; |
431 | int i; |
432 | struct lpfc_nodelist *ndlp; |
433 | int rrq_empty = 0; |
434 | struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring; |
435 | struct scsi_cmnd *cmd; |
436 | int offline = 0; |
437 | |
438 | if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) |
439 | return; |
440 | offline = pci_channel_offline(pdev: phba->pcidev); |
441 | if (!offline) { |
442 | xri = bf_get(lpfc_wcqe_xa_xri, axri); |
443 | rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); |
444 | } |
445 | qp = &phba->sli4_hba.hdwq[idx]; |
446 | spin_lock_irqsave(&phba->hbalock, iflag); |
447 | spin_lock(lock: &qp->abts_io_buf_list_lock); |
448 | list_for_each_entry_safe(psb, next_psb, |
449 | &qp->lpfc_abts_io_buf_list, list) { |
450 | if (offline) |
451 | xri = psb->cur_iocbq.sli4_xritag; |
452 | if (psb->cur_iocbq.sli4_xritag == xri) { |
453 | list_del_init(entry: &psb->list); |
454 | psb->flags &= ~LPFC_SBUF_XBUSY; |
455 | psb->status = IOSTAT_SUCCESS; |
456 | if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME) { |
457 | qp->abts_nvme_io_bufs--; |
458 | spin_unlock(lock: &qp->abts_io_buf_list_lock); |
459 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
460 | if (!offline) { |
461 | lpfc_sli4_nvme_xri_aborted(phba, axri, |
462 | lpfc_ncmd: psb); |
463 | return; |
464 | } |
465 | lpfc_sli4_nvme_pci_offline_aborted(phba, lpfc_ncmd: psb); |
466 | spin_lock_irqsave(&phba->hbalock, iflag); |
467 | spin_lock(lock: &qp->abts_io_buf_list_lock); |
468 | continue; |
469 | } |
470 | qp->abts_scsi_io_bufs--; |
471 | spin_unlock(lock: &qp->abts_io_buf_list_lock); |
472 | |
473 | if (psb->rdata && psb->rdata->pnode) |
474 | ndlp = psb->rdata->pnode; |
475 | else |
476 | ndlp = NULL; |
477 | |
478 | rrq_empty = list_empty(head: &phba->active_rrq_list); |
479 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
480 | if (ndlp && !offline) { |
481 | lpfc_set_rrq_active(phba, ndlp, |
482 | psb->cur_iocbq.sli4_lxritag, rxid, 1); |
483 | lpfc_sli4_abts_err_handler(phba, ndlp, axri); |
484 | } |
485 | |
486 | if (phba->cfg_fcp_wait_abts_rsp || offline) { |
487 | spin_lock_irqsave(&psb->buf_lock, iflag); |
488 | cmd = psb->pCmd; |
489 | psb->pCmd = NULL; |
490 | spin_unlock_irqrestore(lock: &psb->buf_lock, flags: iflag); |
491 | |
492 | /* The sdev is not guaranteed to be valid post |
493 | * scsi_done upcall. |
494 | */ |
495 | if (cmd) |
496 | scsi_done(cmd); |
497 | |
498 | /* |
499 | * We expect there is an abort thread waiting |
500 | * for command completion wake up the thread. |
501 | */ |
502 | spin_lock_irqsave(&psb->buf_lock, iflag); |
503 | psb->cur_iocbq.cmd_flag &= |
504 | ~LPFC_DRIVER_ABORTED; |
505 | if (psb->waitq) |
506 | wake_up(psb->waitq); |
507 | spin_unlock_irqrestore(lock: &psb->buf_lock, flags: iflag); |
508 | } |
509 | |
510 | lpfc_release_scsi_buf_s4(phba, psb); |
511 | if (rrq_empty) |
512 | lpfc_worker_wake_up(phba); |
513 | if (!offline) |
514 | return; |
515 | spin_lock_irqsave(&phba->hbalock, iflag); |
516 | spin_lock(lock: &qp->abts_io_buf_list_lock); |
517 | continue; |
518 | } |
519 | } |
520 | spin_unlock(lock: &qp->abts_io_buf_list_lock); |
521 | if (!offline) { |
522 | for (i = 1; i <= phba->sli.last_iotag; i++) { |
523 | iocbq = phba->sli.iocbq_lookup[i]; |
524 | |
525 | if (!(iocbq->cmd_flag & LPFC_IO_FCP) || |
526 | (iocbq->cmd_flag & LPFC_IO_LIBDFC)) |
527 | continue; |
528 | if (iocbq->sli4_xritag != xri) |
529 | continue; |
530 | psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); |
531 | psb->flags &= ~LPFC_SBUF_XBUSY; |
532 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
533 | if (!list_empty(head: &pring->txq)) |
534 | lpfc_worker_wake_up(phba); |
535 | return; |
536 | } |
537 | } |
538 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
539 | } |
540 | |
541 | /** |
542 | * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA |
543 | * @phba: The HBA for which this call is being executed. |
544 | * @ndlp: pointer to a node-list data structure. |
545 | * @cmnd: Pointer to scsi_cmnd data structure. |
546 | * |
547 | * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list |
548 | * and returns to caller. |
549 | * |
550 | * Return codes: |
551 | * NULL - Error |
552 | * Pointer to lpfc_scsi_buf - Success |
553 | **/ |
554 | static struct lpfc_io_buf * |
555 | lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, |
556 | struct scsi_cmnd *cmnd) |
557 | { |
558 | struct lpfc_io_buf *lpfc_cmd = NULL; |
559 | struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; |
560 | unsigned long iflag = 0; |
561 | |
562 | spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); |
563 | list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf, |
564 | list); |
565 | if (!lpfc_cmd) { |
566 | spin_lock(lock: &phba->scsi_buf_list_put_lock); |
567 | list_splice(list: &phba->lpfc_scsi_buf_list_put, |
568 | head: &phba->lpfc_scsi_buf_list_get); |
569 | INIT_LIST_HEAD(list: &phba->lpfc_scsi_buf_list_put); |
570 | list_remove_head(scsi_buf_list_get, lpfc_cmd, |
571 | struct lpfc_io_buf, list); |
572 | spin_unlock(lock: &phba->scsi_buf_list_put_lock); |
573 | } |
574 | spin_unlock_irqrestore(lock: &phba->scsi_buf_list_get_lock, flags: iflag); |
575 | |
576 | if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) { |
577 | atomic_inc(v: &ndlp->cmd_pending); |
578 | lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; |
579 | } |
580 | return lpfc_cmd; |
581 | } |
582 | /** |
583 | * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA |
584 | * @phba: The HBA for which this call is being executed. |
585 | * @ndlp: pointer to a node-list data structure. |
586 | * @cmnd: Pointer to scsi_cmnd data structure. |
587 | * |
588 | * This routine removes a scsi buffer from head of @hdwq io_buf_list |
589 | * and returns to caller. |
590 | * |
591 | * Return codes: |
592 | * NULL - Error |
593 | * Pointer to lpfc_scsi_buf - Success |
594 | **/ |
595 | static struct lpfc_io_buf * |
596 | lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, |
597 | struct scsi_cmnd *cmnd) |
598 | { |
599 | struct lpfc_io_buf *lpfc_cmd; |
600 | struct lpfc_sli4_hdw_queue *qp; |
601 | struct sli4_sge *sgl; |
602 | dma_addr_t pdma_phys_fcp_rsp; |
603 | dma_addr_t pdma_phys_fcp_cmd; |
604 | uint32_t cpu, idx; |
605 | int tag; |
606 | struct fcp_cmd_rsp_buf *tmp = NULL; |
607 | |
608 | cpu = raw_smp_processor_id(); |
609 | if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { |
610 | tag = blk_mq_unique_tag(rq: scsi_cmd_to_rq(scmd: cmnd)); |
611 | idx = blk_mq_unique_tag_to_hwq(unique_tag: tag); |
612 | } else { |
613 | idx = phba->sli4_hba.cpu_map[cpu].hdwq; |
614 | } |
615 | |
616 | lpfc_cmd = lpfc_get_io_buf(phba, ndlp, hwqid: idx, |
617 | !phba->cfg_xri_rebalancing); |
618 | if (!lpfc_cmd) { |
619 | qp = &phba->sli4_hba.hdwq[idx]; |
620 | qp->empty_io_bufs++; |
621 | return NULL; |
622 | } |
623 | |
624 | /* Setup key fields in buffer that may have been changed |
625 | * if other protocols used this buffer. |
626 | */ |
627 | lpfc_cmd->cur_iocbq.cmd_flag = LPFC_IO_FCP; |
628 | lpfc_cmd->prot_seg_cnt = 0; |
629 | lpfc_cmd->seg_cnt = 0; |
630 | lpfc_cmd->timeout = 0; |
631 | lpfc_cmd->flags = 0; |
632 | lpfc_cmd->start_time = jiffies; |
633 | lpfc_cmd->waitq = NULL; |
634 | lpfc_cmd->cpu = cpu; |
635 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
636 | lpfc_cmd->prot_data_type = 0; |
637 | #endif |
638 | tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, buf: lpfc_cmd); |
639 | if (!tmp) { |
640 | lpfc_release_io_buf(phba, ncmd: lpfc_cmd, qp: lpfc_cmd->hdwq); |
641 | return NULL; |
642 | } |
643 | |
644 | lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd; |
645 | lpfc_cmd->fcp_rsp = tmp->fcp_rsp; |
646 | |
647 | /* |
648 | * The first two SGEs are the FCP_CMD and FCP_RSP. |
649 | * The balance are sg list bdes. Initialize the |
650 | * first two and leave the rest for queuecommand. |
651 | */ |
652 | sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; |
653 | pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle; |
654 | sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); |
655 | sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); |
656 | sgl->word2 = le32_to_cpu(sgl->word2); |
657 | bf_set(lpfc_sli4_sge_last, sgl, 0); |
658 | sgl->word2 = cpu_to_le32(sgl->word2); |
659 | sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); |
660 | sgl++; |
661 | |
662 | /* Setup the physical region for the FCP RSP */ |
663 | pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); |
664 | sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); |
665 | sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); |
666 | sgl->word2 = le32_to_cpu(sgl->word2); |
667 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
668 | sgl->word2 = cpu_to_le32(sgl->word2); |
669 | sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); |
670 | |
671 | if (lpfc_ndlp_check_qdepth(phba, ndlp)) { |
672 | atomic_inc(v: &ndlp->cmd_pending); |
673 | lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; |
674 | } |
675 | return lpfc_cmd; |
676 | } |
677 | /** |
678 | * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA |
679 | * @phba: The HBA for which this call is being executed. |
680 | * @ndlp: pointer to a node-list data structure. |
681 | * @cmnd: Pointer to scsi_cmnd data structure. |
682 | * |
683 | * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list |
684 | * and returns to caller. |
685 | * |
686 | * Return codes: |
687 | * NULL - Error |
688 | * Pointer to lpfc_scsi_buf - Success |
689 | **/ |
690 | static struct lpfc_io_buf* |
691 | lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, |
692 | struct scsi_cmnd *cmnd) |
693 | { |
694 | return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd); |
695 | } |
696 | |
697 | /** |
698 | * lpfc_release_scsi_buf_s3 - Return a scsi buffer back to hba scsi buf list |
699 | * @phba: The Hba for which this call is being executed. |
700 | * @psb: The scsi buffer which is being released. |
701 | * |
702 | * This routine releases @psb scsi buffer by adding it to tail of @phba |
703 | * lpfc_scsi_buf_list list. |
704 | **/ |
705 | static void |
706 | lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb) |
707 | { |
708 | unsigned long iflag = 0; |
709 | |
710 | psb->seg_cnt = 0; |
711 | psb->prot_seg_cnt = 0; |
712 | |
713 | spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); |
714 | psb->pCmd = NULL; |
715 | psb->cur_iocbq.cmd_flag = LPFC_IO_FCP; |
716 | list_add_tail(new: &psb->list, head: &phba->lpfc_scsi_buf_list_put); |
717 | spin_unlock_irqrestore(lock: &phba->scsi_buf_list_put_lock, flags: iflag); |
718 | } |
719 | |
720 | /** |
721 | * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list. |
722 | * @phba: The Hba for which this call is being executed. |
723 | * @psb: The scsi buffer which is being released. |
724 | * |
725 | * This routine releases @psb scsi buffer by adding it to tail of @hdwq |
726 | * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer |
727 | * and cannot be reused for at least RA_TOV amount of time if it was |
728 | * aborted. |
729 | **/ |
730 | static void |
731 | lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb) |
732 | { |
733 | struct lpfc_sli4_hdw_queue *qp; |
734 | unsigned long iflag = 0; |
735 | |
736 | psb->seg_cnt = 0; |
737 | psb->prot_seg_cnt = 0; |
738 | |
739 | qp = psb->hdwq; |
740 | if (psb->flags & LPFC_SBUF_XBUSY) { |
741 | spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); |
742 | if (!phba->cfg_fcp_wait_abts_rsp) |
743 | psb->pCmd = NULL; |
744 | list_add_tail(new: &psb->list, head: &qp->lpfc_abts_io_buf_list); |
745 | qp->abts_scsi_io_bufs++; |
746 | spin_unlock_irqrestore(lock: &qp->abts_io_buf_list_lock, flags: iflag); |
747 | } else { |
748 | lpfc_release_io_buf(phba, ncmd: (struct lpfc_io_buf *)psb, qp); |
749 | } |
750 | } |
751 | |
752 | /** |
753 | * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. |
754 | * @phba: The Hba for which this call is being executed. |
755 | * @psb: The scsi buffer which is being released. |
756 | * |
757 | * This routine releases @psb scsi buffer by adding it to tail of @phba |
758 | * lpfc_scsi_buf_list list. |
759 | **/ |
760 | static void |
761 | lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) |
762 | { |
763 | if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp) |
764 | atomic_dec(v: &psb->ndlp->cmd_pending); |
765 | |
766 | psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH; |
767 | phba->lpfc_release_scsi_buf(phba, psb); |
768 | } |
769 | |
770 | /** |
771 | * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB |
772 | * @data: A pointer to the immediate command data portion of the IOCB. |
773 | * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. |
774 | * |
775 | * The routine copies the entire FCP command from @fcp_cmnd to @data while |
776 | * byte swapping the data to big endian format for transmission on the wire. |
777 | **/ |
778 | static void |
779 | lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd) |
780 | { |
781 | int i, j; |
782 | |
783 | for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); |
784 | i += sizeof(uint32_t), j++) { |
785 | ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); |
786 | } |
787 | } |
788 | |
789 | /** |
790 | * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec |
791 | * @phba: The Hba for which this call is being executed. |
792 | * @lpfc_cmd: The scsi buffer which is going to be mapped. |
793 | * |
794 | * This routine does the pci dma mapping for scatter-gather list of scsi cmnd |
795 | * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans |
796 | * through sg elements and format the bde. This routine also initializes all |
797 | * IOCB fields which are dependent on scsi command request buffer. |
798 | * |
799 | * Return codes: |
800 | * 1 - Error |
801 | * 0 - Success |
802 | **/ |
803 | static int |
804 | lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) |
805 | { |
806 | struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; |
807 | struct scatterlist *sgel = NULL; |
808 | struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; |
809 | struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; |
810 | struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq; |
811 | IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; |
812 | struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; |
813 | dma_addr_t physaddr; |
814 | uint32_t num_bde = 0; |
815 | int nseg, datadir = scsi_cmnd->sc_data_direction; |
816 | |
817 | /* |
818 | * There are three possibilities here - use scatter-gather segment, use |
819 | * the single mapping, or neither. Start the lpfc command prep by |
820 | * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first |
821 | * data bde entry. |
822 | */ |
823 | bpl += 2; |
824 | if (scsi_sg_count(cmd: scsi_cmnd)) { |
825 | /* |
826 | * The driver stores the segment count returned from dma_map_sg |
827 | * because this a count of dma-mappings used to map the use_sg |
828 | * pages. They are not guaranteed to be the same for those |
829 | * architectures that implement an IOMMU. |
830 | */ |
831 | |
832 | nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd), |
833 | scsi_sg_count(scsi_cmnd), datadir); |
834 | if (unlikely(!nseg)) |
835 | return 1; |
836 | |
837 | lpfc_cmd->seg_cnt = nseg; |
838 | if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { |
839 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
840 | "9064 BLKGRD: %s: Too many sg segments" |
841 | " from dma_map_sg. Config %d, seg_cnt" |
842 | " %d\n" , __func__, phba->cfg_sg_seg_cnt, |
843 | lpfc_cmd->seg_cnt); |
844 | WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); |
845 | lpfc_cmd->seg_cnt = 0; |
846 | scsi_dma_unmap(cmd: scsi_cmnd); |
847 | return 2; |
848 | } |
849 | |
850 | /* |
851 | * The driver established a maximum scatter-gather segment count |
852 | * during probe that limits the number of sg elements in any |
853 | * single scsi command. Just run through the seg_cnt and format |
854 | * the bde's. |
855 | * When using SLI-3 the driver will try to fit all the BDEs into |
856 | * the IOCB. If it can't then the BDEs get added to a BPL as it |
857 | * does for SLI-2 mode. |
858 | */ |
859 | scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { |
860 | physaddr = sg_dma_address(sgel); |
861 | if (phba->sli_rev == 3 && |
862 | !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && |
863 | !(iocbq->cmd_flag & DSS_SECURITY_OP) && |
864 | nseg <= LPFC_EXT_DATA_BDE_COUNT) { |
865 | data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
866 | data_bde->tus.f.bdeSize = sg_dma_len(sgel); |
867 | data_bde->addrLow = putPaddrLow(physaddr); |
868 | data_bde->addrHigh = putPaddrHigh(physaddr); |
869 | data_bde++; |
870 | } else { |
871 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
872 | bpl->tus.f.bdeSize = sg_dma_len(sgel); |
873 | bpl->tus.w = le32_to_cpu(bpl->tus.w); |
874 | bpl->addrLow = |
875 | le32_to_cpu(putPaddrLow(physaddr)); |
876 | bpl->addrHigh = |
877 | le32_to_cpu(putPaddrHigh(physaddr)); |
878 | bpl++; |
879 | } |
880 | } |
881 | } |
882 | |
883 | /* |
884 | * Finish initializing those IOCB fields that are dependent on the |
885 | * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is |
886 | * explicitly reinitialized and for SLI-3 the extended bde count is |
887 | * explicitly reinitialized since all iocb memory resources are reused. |
888 | */ |
889 | if (phba->sli_rev == 3 && |
890 | !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && |
891 | !(iocbq->cmd_flag & DSS_SECURITY_OP)) { |
892 | if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { |
893 | /* |
894 | * The extended IOCB format can only fit 3 BDE or a BPL. |
895 | * This I/O has more than 3 BDE so the 1st data bde will |
896 | * be a BPL that is filled in here. |
897 | */ |
898 | physaddr = lpfc_cmd->dma_handle; |
899 | data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64; |
900 | data_bde->tus.f.bdeSize = (num_bde * |
901 | sizeof(struct ulp_bde64)); |
902 | physaddr += (sizeof(struct fcp_cmnd) + |
903 | sizeof(struct fcp_rsp) + |
904 | (2 * sizeof(struct ulp_bde64))); |
905 | data_bde->addrHigh = putPaddrHigh(physaddr); |
906 | data_bde->addrLow = putPaddrLow(physaddr); |
907 | /* ebde count includes the response bde and data bpl */ |
908 | iocb_cmd->unsli3.fcp_ext.ebde_count = 2; |
909 | } else { |
910 | /* ebde count includes the response bde and data bdes */ |
911 | iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); |
912 | } |
913 | } else { |
914 | iocb_cmd->un.fcpi64.bdl.bdeSize = |
915 | ((num_bde + 2) * sizeof(struct ulp_bde64)); |
916 | iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); |
917 | } |
918 | fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); |
919 | |
920 | /* |
921 | * Due to difference in data length between DIF/non-DIF paths, |
922 | * we need to set word 4 of IOCB here |
923 | */ |
924 | iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(cmd: scsi_cmnd); |
925 | lpfc_fcpcmd_to_iocb(data: iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); |
926 | return 0; |
927 | } |
928 | |
929 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
930 | |
931 | /* Return BG_ERR_INIT if error injection is detected by Initiator */ |
932 | #define BG_ERR_INIT 0x1 |
933 | /* Return BG_ERR_TGT if error injection is detected by Target */ |
934 | #define BG_ERR_TGT 0x2 |
935 | /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */ |
936 | #define BG_ERR_SWAP 0x10 |
937 | /* |
938 | * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for |
939 | * error injection |
940 | */ |
941 | #define BG_ERR_CHECK 0x20 |
942 | |
943 | /** |
944 | * lpfc_bg_err_inject - Determine if we should inject an error |
945 | * @phba: The Hba for which this call is being executed. |
946 | * @sc: The SCSI command to examine |
947 | * @reftag: (out) BlockGuard reference tag for transmitted data |
948 | * @apptag: (out) BlockGuard application tag for transmitted data |
949 | * @new_guard: (in) Value to replace CRC with if needed |
950 | * |
951 | * Returns BG_ERR_* bit mask or 0 if request ignored |
952 | **/ |
953 | static int |
954 | lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, |
955 | uint32_t *reftag, uint16_t *apptag, uint32_t new_guard) |
956 | { |
957 | struct scatterlist *sgpe; /* s/g prot entry */ |
958 | struct lpfc_io_buf *lpfc_cmd = NULL; |
959 | struct scsi_dif_tuple *src = NULL; |
960 | struct lpfc_nodelist *ndlp; |
961 | struct lpfc_rport_data *rdata; |
962 | uint32_t op = scsi_get_prot_op(scmd: sc); |
963 | uint32_t blksize; |
964 | uint32_t numblks; |
965 | u32 lba; |
966 | int rc = 0; |
967 | int blockoff = 0; |
968 | |
969 | if (op == SCSI_PROT_NORMAL) |
970 | return 0; |
971 | |
972 | sgpe = scsi_prot_sglist(cmd: sc); |
973 | lba = scsi_prot_ref_tag(scmd: sc); |
974 | |
975 | /* First check if we need to match the LBA */ |
976 | if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) { |
977 | blksize = scsi_prot_interval(scmd: sc); |
978 | numblks = (scsi_bufflen(cmd: sc) + blksize - 1) / blksize; |
979 | |
980 | /* Make sure we have the right LBA if one is specified */ |
981 | if (phba->lpfc_injerr_lba < (u64)lba || |
982 | (phba->lpfc_injerr_lba >= (u64)(lba + numblks))) |
983 | return 0; |
984 | if (sgpe) { |
985 | blockoff = phba->lpfc_injerr_lba - (u64)lba; |
986 | numblks = sg_dma_len(sgpe) / |
987 | sizeof(struct scsi_dif_tuple); |
988 | if (numblks < blockoff) |
989 | blockoff = numblks; |
990 | } |
991 | } |
992 | |
993 | /* Next check if we need to match the remote NPortID or WWPN */ |
994 | rdata = lpfc_rport_data_from_scsi_device(sdev: sc->device); |
995 | if (rdata && rdata->pnode) { |
996 | ndlp = rdata->pnode; |
997 | |
998 | /* Make sure we have the right NPortID if one is specified */ |
999 | if (phba->lpfc_injerr_nportid && |
1000 | (phba->lpfc_injerr_nportid != ndlp->nlp_DID)) |
1001 | return 0; |
1002 | |
1003 | /* |
1004 | * Make sure we have the right WWPN if one is specified. |
1005 | * wwn[0] should be a non-zero NAA in a good WWPN. |
1006 | */ |
1007 | if (phba->lpfc_injerr_wwpn.u.wwn[0] && |
1008 | (memcmp(p: &ndlp->nlp_portname, q: &phba->lpfc_injerr_wwpn, |
1009 | size: sizeof(struct lpfc_name)) != 0)) |
1010 | return 0; |
1011 | } |
1012 | |
1013 | /* Setup a ptr to the protection data if the SCSI host provides it */ |
1014 | if (sgpe) { |
1015 | src = (struct scsi_dif_tuple *)sg_virt(sg: sgpe); |
1016 | src += blockoff; |
1017 | lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble; |
1018 | } |
1019 | |
1020 | /* Should we change the Reference Tag */ |
1021 | if (reftag) { |
1022 | if (phba->lpfc_injerr_wref_cnt) { |
1023 | switch (op) { |
1024 | case SCSI_PROT_WRITE_PASS: |
1025 | if (src) { |
1026 | /* |
1027 | * For WRITE_PASS, force the error |
1028 | * to be sent on the wire. It should |
1029 | * be detected by the Target. |
1030 | * If blockoff != 0 error will be |
1031 | * inserted in middle of the IO. |
1032 | */ |
1033 | |
1034 | lpfc_printf_log(phba, KERN_ERR, |
1035 | LOG_TRACE_EVENT, |
1036 | "9076 BLKGRD: Injecting reftag error: " |
1037 | "write lba x%lx + x%x oldrefTag x%x\n" , |
1038 | (unsigned long)lba, blockoff, |
1039 | be32_to_cpu(src->ref_tag)); |
1040 | |
1041 | /* |
1042 | * Save the old ref_tag so we can |
1043 | * restore it on completion. |
1044 | */ |
1045 | if (lpfc_cmd) { |
1046 | lpfc_cmd->prot_data_type = |
1047 | LPFC_INJERR_REFTAG; |
1048 | lpfc_cmd->prot_data_segment = |
1049 | src; |
1050 | lpfc_cmd->prot_data = |
1051 | src->ref_tag; |
1052 | } |
1053 | src->ref_tag = cpu_to_be32(0xDEADBEEF); |
1054 | phba->lpfc_injerr_wref_cnt--; |
1055 | if (phba->lpfc_injerr_wref_cnt == 0) { |
1056 | phba->lpfc_injerr_nportid = 0; |
1057 | phba->lpfc_injerr_lba = |
1058 | LPFC_INJERR_LBA_OFF; |
1059 | memset(&phba->lpfc_injerr_wwpn, |
1060 | 0, sizeof(struct lpfc_name)); |
1061 | } |
1062 | rc = BG_ERR_TGT | BG_ERR_CHECK; |
1063 | |
1064 | break; |
1065 | } |
1066 | fallthrough; |
1067 | case SCSI_PROT_WRITE_INSERT: |
1068 | /* |
1069 | * For WRITE_INSERT, force the error |
1070 | * to be sent on the wire. It should be |
1071 | * detected by the Target. |
1072 | */ |
1073 | /* DEADBEEF will be the reftag on the wire */ |
1074 | *reftag = 0xDEADBEEF; |
1075 | phba->lpfc_injerr_wref_cnt--; |
1076 | if (phba->lpfc_injerr_wref_cnt == 0) { |
1077 | phba->lpfc_injerr_nportid = 0; |
1078 | phba->lpfc_injerr_lba = |
1079 | LPFC_INJERR_LBA_OFF; |
1080 | memset(&phba->lpfc_injerr_wwpn, |
1081 | 0, sizeof(struct lpfc_name)); |
1082 | } |
1083 | rc = BG_ERR_TGT | BG_ERR_CHECK; |
1084 | |
1085 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1086 | "9078 BLKGRD: Injecting reftag error: " |
1087 | "write lba x%lx\n" , (unsigned long)lba); |
1088 | break; |
1089 | case SCSI_PROT_WRITE_STRIP: |
1090 | /* |
1091 | * For WRITE_STRIP and WRITE_PASS, |
1092 | * force the error on data |
1093 | * being copied from SLI-Host to SLI-Port. |
1094 | */ |
1095 | *reftag = 0xDEADBEEF; |
1096 | phba->lpfc_injerr_wref_cnt--; |
1097 | if (phba->lpfc_injerr_wref_cnt == 0) { |
1098 | phba->lpfc_injerr_nportid = 0; |
1099 | phba->lpfc_injerr_lba = |
1100 | LPFC_INJERR_LBA_OFF; |
1101 | memset(&phba->lpfc_injerr_wwpn, |
1102 | 0, sizeof(struct lpfc_name)); |
1103 | } |
1104 | rc = BG_ERR_INIT; |
1105 | |
1106 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1107 | "9077 BLKGRD: Injecting reftag error: " |
1108 | "write lba x%lx\n" , (unsigned long)lba); |
1109 | break; |
1110 | } |
1111 | } |
1112 | if (phba->lpfc_injerr_rref_cnt) { |
1113 | switch (op) { |
1114 | case SCSI_PROT_READ_INSERT: |
1115 | case SCSI_PROT_READ_STRIP: |
1116 | case SCSI_PROT_READ_PASS: |
1117 | /* |
1118 | * For READ_STRIP and READ_PASS, force the |
1119 | * error on data being read off the wire. It |
1120 | * should force an IO error to the driver. |
1121 | */ |
1122 | *reftag = 0xDEADBEEF; |
1123 | phba->lpfc_injerr_rref_cnt--; |
1124 | if (phba->lpfc_injerr_rref_cnt == 0) { |
1125 | phba->lpfc_injerr_nportid = 0; |
1126 | phba->lpfc_injerr_lba = |
1127 | LPFC_INJERR_LBA_OFF; |
1128 | memset(&phba->lpfc_injerr_wwpn, |
1129 | 0, sizeof(struct lpfc_name)); |
1130 | } |
1131 | rc = BG_ERR_INIT; |
1132 | |
1133 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1134 | "9079 BLKGRD: Injecting reftag error: " |
1135 | "read lba x%lx\n" , (unsigned long)lba); |
1136 | break; |
1137 | } |
1138 | } |
1139 | } |
1140 | |
1141 | /* Should we change the Application Tag */ |
1142 | if (apptag) { |
1143 | if (phba->lpfc_injerr_wapp_cnt) { |
1144 | switch (op) { |
1145 | case SCSI_PROT_WRITE_PASS: |
1146 | if (src) { |
1147 | /* |
1148 | * For WRITE_PASS, force the error |
1149 | * to be sent on the wire. It should |
1150 | * be detected by the Target. |
1151 | * If blockoff != 0 error will be |
1152 | * inserted in middle of the IO. |
1153 | */ |
1154 | |
1155 | lpfc_printf_log(phba, KERN_ERR, |
1156 | LOG_TRACE_EVENT, |
1157 | "9080 BLKGRD: Injecting apptag error: " |
1158 | "write lba x%lx + x%x oldappTag x%x\n" , |
1159 | (unsigned long)lba, blockoff, |
1160 | be16_to_cpu(src->app_tag)); |
1161 | |
1162 | /* |
1163 | * Save the old app_tag so we can |
1164 | * restore it on completion. |
1165 | */ |
1166 | if (lpfc_cmd) { |
1167 | lpfc_cmd->prot_data_type = |
1168 | LPFC_INJERR_APPTAG; |
1169 | lpfc_cmd->prot_data_segment = |
1170 | src; |
1171 | lpfc_cmd->prot_data = |
1172 | src->app_tag; |
1173 | } |
1174 | src->app_tag = cpu_to_be16(0xDEAD); |
1175 | phba->lpfc_injerr_wapp_cnt--; |
1176 | if (phba->lpfc_injerr_wapp_cnt == 0) { |
1177 | phba->lpfc_injerr_nportid = 0; |
1178 | phba->lpfc_injerr_lba = |
1179 | LPFC_INJERR_LBA_OFF; |
1180 | memset(&phba->lpfc_injerr_wwpn, |
1181 | 0, sizeof(struct lpfc_name)); |
1182 | } |
1183 | rc = BG_ERR_TGT | BG_ERR_CHECK; |
1184 | break; |
1185 | } |
1186 | fallthrough; |
1187 | case SCSI_PROT_WRITE_INSERT: |
1188 | /* |
1189 | * For WRITE_INSERT, force the |
1190 | * error to be sent on the wire. It should be |
1191 | * detected by the Target. |
1192 | */ |
1193 | /* DEAD will be the apptag on the wire */ |
1194 | *apptag = 0xDEAD; |
1195 | phba->lpfc_injerr_wapp_cnt--; |
1196 | if (phba->lpfc_injerr_wapp_cnt == 0) { |
1197 | phba->lpfc_injerr_nportid = 0; |
1198 | phba->lpfc_injerr_lba = |
1199 | LPFC_INJERR_LBA_OFF; |
1200 | memset(&phba->lpfc_injerr_wwpn, |
1201 | 0, sizeof(struct lpfc_name)); |
1202 | } |
1203 | rc = BG_ERR_TGT | BG_ERR_CHECK; |
1204 | |
1205 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1206 | "0813 BLKGRD: Injecting apptag error: " |
1207 | "write lba x%lx\n" , (unsigned long)lba); |
1208 | break; |
1209 | case SCSI_PROT_WRITE_STRIP: |
1210 | /* |
1211 | * For WRITE_STRIP and WRITE_PASS, |
1212 | * force the error on data |
1213 | * being copied from SLI-Host to SLI-Port. |
1214 | */ |
1215 | *apptag = 0xDEAD; |
1216 | phba->lpfc_injerr_wapp_cnt--; |
1217 | if (phba->lpfc_injerr_wapp_cnt == 0) { |
1218 | phba->lpfc_injerr_nportid = 0; |
1219 | phba->lpfc_injerr_lba = |
1220 | LPFC_INJERR_LBA_OFF; |
1221 | memset(&phba->lpfc_injerr_wwpn, |
1222 | 0, sizeof(struct lpfc_name)); |
1223 | } |
1224 | rc = BG_ERR_INIT; |
1225 | |
1226 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1227 | "0812 BLKGRD: Injecting apptag error: " |
1228 | "write lba x%lx\n" , (unsigned long)lba); |
1229 | break; |
1230 | } |
1231 | } |
1232 | if (phba->lpfc_injerr_rapp_cnt) { |
1233 | switch (op) { |
1234 | case SCSI_PROT_READ_INSERT: |
1235 | case SCSI_PROT_READ_STRIP: |
1236 | case SCSI_PROT_READ_PASS: |
1237 | /* |
1238 | * For READ_STRIP and READ_PASS, force the |
1239 | * error on data being read off the wire. It |
1240 | * should force an IO error to the driver. |
1241 | */ |
1242 | *apptag = 0xDEAD; |
1243 | phba->lpfc_injerr_rapp_cnt--; |
1244 | if (phba->lpfc_injerr_rapp_cnt == 0) { |
1245 | phba->lpfc_injerr_nportid = 0; |
1246 | phba->lpfc_injerr_lba = |
1247 | LPFC_INJERR_LBA_OFF; |
1248 | memset(&phba->lpfc_injerr_wwpn, |
1249 | 0, sizeof(struct lpfc_name)); |
1250 | } |
1251 | rc = BG_ERR_INIT; |
1252 | |
1253 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1254 | "0814 BLKGRD: Injecting apptag error: " |
1255 | "read lba x%lx\n" , (unsigned long)lba); |
1256 | break; |
1257 | } |
1258 | } |
1259 | } |
1260 | |
1261 | |
1262 | /* Should we change the Guard Tag */ |
1263 | if (new_guard) { |
1264 | if (phba->lpfc_injerr_wgrd_cnt) { |
1265 | switch (op) { |
1266 | case SCSI_PROT_WRITE_PASS: |
1267 | rc = BG_ERR_CHECK; |
1268 | fallthrough; |
1269 | |
1270 | case SCSI_PROT_WRITE_INSERT: |
1271 | /* |
1272 | * For WRITE_INSERT, force the |
1273 | * error to be sent on the wire. It should be |
1274 | * detected by the Target. |
1275 | */ |
1276 | phba->lpfc_injerr_wgrd_cnt--; |
1277 | if (phba->lpfc_injerr_wgrd_cnt == 0) { |
1278 | phba->lpfc_injerr_nportid = 0; |
1279 | phba->lpfc_injerr_lba = |
1280 | LPFC_INJERR_LBA_OFF; |
1281 | memset(&phba->lpfc_injerr_wwpn, |
1282 | 0, sizeof(struct lpfc_name)); |
1283 | } |
1284 | |
1285 | rc |= BG_ERR_TGT | BG_ERR_SWAP; |
1286 | /* Signals the caller to swap CRC->CSUM */ |
1287 | |
1288 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1289 | "0817 BLKGRD: Injecting guard error: " |
1290 | "write lba x%lx\n" , (unsigned long)lba); |
1291 | break; |
1292 | case SCSI_PROT_WRITE_STRIP: |
1293 | /* |
1294 | * For WRITE_STRIP and WRITE_PASS, |
1295 | * force the error on data |
1296 | * being copied from SLI-Host to SLI-Port. |
1297 | */ |
1298 | phba->lpfc_injerr_wgrd_cnt--; |
1299 | if (phba->lpfc_injerr_wgrd_cnt == 0) { |
1300 | phba->lpfc_injerr_nportid = 0; |
1301 | phba->lpfc_injerr_lba = |
1302 | LPFC_INJERR_LBA_OFF; |
1303 | memset(&phba->lpfc_injerr_wwpn, |
1304 | 0, sizeof(struct lpfc_name)); |
1305 | } |
1306 | |
1307 | rc = BG_ERR_INIT | BG_ERR_SWAP; |
1308 | /* Signals the caller to swap CRC->CSUM */ |
1309 | |
1310 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1311 | "0816 BLKGRD: Injecting guard error: " |
1312 | "write lba x%lx\n" , (unsigned long)lba); |
1313 | break; |
1314 | } |
1315 | } |
1316 | if (phba->lpfc_injerr_rgrd_cnt) { |
1317 | switch (op) { |
1318 | case SCSI_PROT_READ_INSERT: |
1319 | case SCSI_PROT_READ_STRIP: |
1320 | case SCSI_PROT_READ_PASS: |
1321 | /* |
1322 | * For READ_STRIP and READ_PASS, force the |
1323 | * error on data being read off the wire. It |
1324 | * should force an IO error to the driver. |
1325 | */ |
1326 | phba->lpfc_injerr_rgrd_cnt--; |
1327 | if (phba->lpfc_injerr_rgrd_cnt == 0) { |
1328 | phba->lpfc_injerr_nportid = 0; |
1329 | phba->lpfc_injerr_lba = |
1330 | LPFC_INJERR_LBA_OFF; |
1331 | memset(&phba->lpfc_injerr_wwpn, |
1332 | 0, sizeof(struct lpfc_name)); |
1333 | } |
1334 | |
1335 | rc = BG_ERR_INIT | BG_ERR_SWAP; |
1336 | /* Signals the caller to swap CRC->CSUM */ |
1337 | |
1338 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1339 | "0818 BLKGRD: Injecting guard error: " |
1340 | "read lba x%lx\n" , (unsigned long)lba); |
1341 | } |
1342 | } |
1343 | } |
1344 | |
1345 | return rc; |
1346 | } |
1347 | #endif |
1348 | |
1349 | /** |
1350 | * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with |
1351 | * the specified SCSI command. |
1352 | * @phba: The Hba for which this call is being executed. |
1353 | * @sc: The SCSI command to examine |
1354 | * @txop: (out) BlockGuard operation for transmitted data |
1355 | * @rxop: (out) BlockGuard operation for received data |
1356 | * |
1357 | * Returns: zero on success; non-zero if tx and/or rx op cannot be determined |
1358 | * |
1359 | **/ |
1360 | static int |
1361 | lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, |
1362 | uint8_t *txop, uint8_t *rxop) |
1363 | { |
1364 | uint8_t ret = 0; |
1365 | |
1366 | if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { |
1367 | switch (scsi_get_prot_op(scmd: sc)) { |
1368 | case SCSI_PROT_READ_INSERT: |
1369 | case SCSI_PROT_WRITE_STRIP: |
1370 | *rxop = BG_OP_IN_NODIF_OUT_CSUM; |
1371 | *txop = BG_OP_IN_CSUM_OUT_NODIF; |
1372 | break; |
1373 | |
1374 | case SCSI_PROT_READ_STRIP: |
1375 | case SCSI_PROT_WRITE_INSERT: |
1376 | *rxop = BG_OP_IN_CRC_OUT_NODIF; |
1377 | *txop = BG_OP_IN_NODIF_OUT_CRC; |
1378 | break; |
1379 | |
1380 | case SCSI_PROT_READ_PASS: |
1381 | case SCSI_PROT_WRITE_PASS: |
1382 | *rxop = BG_OP_IN_CRC_OUT_CSUM; |
1383 | *txop = BG_OP_IN_CSUM_OUT_CRC; |
1384 | break; |
1385 | |
1386 | case SCSI_PROT_NORMAL: |
1387 | default: |
1388 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1389 | "9063 BLKGRD: Bad op/guard:%d/IP combination\n" , |
1390 | scsi_get_prot_op(sc)); |
1391 | ret = 1; |
1392 | break; |
1393 | |
1394 | } |
1395 | } else { |
1396 | switch (scsi_get_prot_op(scmd: sc)) { |
1397 | case SCSI_PROT_READ_STRIP: |
1398 | case SCSI_PROT_WRITE_INSERT: |
1399 | *rxop = BG_OP_IN_CRC_OUT_NODIF; |
1400 | *txop = BG_OP_IN_NODIF_OUT_CRC; |
1401 | break; |
1402 | |
1403 | case SCSI_PROT_READ_PASS: |
1404 | case SCSI_PROT_WRITE_PASS: |
1405 | *rxop = BG_OP_IN_CRC_OUT_CRC; |
1406 | *txop = BG_OP_IN_CRC_OUT_CRC; |
1407 | break; |
1408 | |
1409 | case SCSI_PROT_READ_INSERT: |
1410 | case SCSI_PROT_WRITE_STRIP: |
1411 | *rxop = BG_OP_IN_NODIF_OUT_CRC; |
1412 | *txop = BG_OP_IN_CRC_OUT_NODIF; |
1413 | break; |
1414 | |
1415 | case SCSI_PROT_NORMAL: |
1416 | default: |
1417 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1418 | "9075 BLKGRD: Bad op/guard:%d/CRC combination\n" , |
1419 | scsi_get_prot_op(sc)); |
1420 | ret = 1; |
1421 | break; |
1422 | } |
1423 | } |
1424 | |
1425 | return ret; |
1426 | } |
1427 | |
1428 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1429 | /** |
1430 | * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with |
1431 | * the specified SCSI command in order to force a guard tag error. |
1432 | * @phba: The Hba for which this call is being executed. |
1433 | * @sc: The SCSI command to examine |
1434 | * @txop: (out) BlockGuard operation for transmitted data |
1435 | * @rxop: (out) BlockGuard operation for received data |
1436 | * |
1437 | * Returns: zero on success; non-zero if tx and/or rx op cannot be determined |
1438 | * |
1439 | **/ |
1440 | static int |
1441 | lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, |
1442 | uint8_t *txop, uint8_t *rxop) |
1443 | { |
1444 | |
1445 | if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { |
1446 | switch (scsi_get_prot_op(scmd: sc)) { |
1447 | case SCSI_PROT_READ_INSERT: |
1448 | case SCSI_PROT_WRITE_STRIP: |
1449 | *rxop = BG_OP_IN_NODIF_OUT_CRC; |
1450 | *txop = BG_OP_IN_CRC_OUT_NODIF; |
1451 | break; |
1452 | |
1453 | case SCSI_PROT_READ_STRIP: |
1454 | case SCSI_PROT_WRITE_INSERT: |
1455 | *rxop = BG_OP_IN_CSUM_OUT_NODIF; |
1456 | *txop = BG_OP_IN_NODIF_OUT_CSUM; |
1457 | break; |
1458 | |
1459 | case SCSI_PROT_READ_PASS: |
1460 | case SCSI_PROT_WRITE_PASS: |
1461 | *rxop = BG_OP_IN_CSUM_OUT_CRC; |
1462 | *txop = BG_OP_IN_CRC_OUT_CSUM; |
1463 | break; |
1464 | |
1465 | case SCSI_PROT_NORMAL: |
1466 | default: |
1467 | break; |
1468 | |
1469 | } |
1470 | } else { |
1471 | switch (scsi_get_prot_op(scmd: sc)) { |
1472 | case SCSI_PROT_READ_STRIP: |
1473 | case SCSI_PROT_WRITE_INSERT: |
1474 | *rxop = BG_OP_IN_CSUM_OUT_NODIF; |
1475 | *txop = BG_OP_IN_NODIF_OUT_CSUM; |
1476 | break; |
1477 | |
1478 | case SCSI_PROT_READ_PASS: |
1479 | case SCSI_PROT_WRITE_PASS: |
1480 | *rxop = BG_OP_IN_CSUM_OUT_CSUM; |
1481 | *txop = BG_OP_IN_CSUM_OUT_CSUM; |
1482 | break; |
1483 | |
1484 | case SCSI_PROT_READ_INSERT: |
1485 | case SCSI_PROT_WRITE_STRIP: |
1486 | *rxop = BG_OP_IN_NODIF_OUT_CSUM; |
1487 | *txop = BG_OP_IN_CSUM_OUT_NODIF; |
1488 | break; |
1489 | |
1490 | case SCSI_PROT_NORMAL: |
1491 | default: |
1492 | break; |
1493 | } |
1494 | } |
1495 | |
1496 | return 0; |
1497 | } |
1498 | #endif |
1499 | |
1500 | /** |
1501 | * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data |
1502 | * @phba: The Hba for which this call is being executed. |
1503 | * @sc: pointer to scsi command we're working on |
1504 | * @bpl: pointer to buffer list for protection groups |
1505 | * @datasegcnt: number of segments of data that have been dma mapped |
1506 | * |
1507 | * This function sets up BPL buffer list for protection groups of |
1508 | * type LPFC_PG_TYPE_NO_DIF |
1509 | * |
1510 | * This is usually used when the HBA is instructed to generate |
1511 | * DIFs and insert them into data stream (or strip DIF from |
1512 | * incoming data stream) |
1513 | * |
1514 | * The buffer list consists of just one protection group described |
1515 | * below: |
1516 | * +-------------------------+ |
1517 | * start of prot group --> | PDE_5 | |
1518 | * +-------------------------+ |
1519 | * | PDE_6 | |
1520 | * +-------------------------+ |
1521 | * | Data BDE | |
1522 | * +-------------------------+ |
1523 | * |more Data BDE's ... (opt)| |
1524 | * +-------------------------+ |
1525 | * |
1526 | * |
1527 | * Note: Data s/g buffers have been dma mapped |
1528 | * |
1529 | * Returns the number of BDEs added to the BPL. |
1530 | **/ |
1531 | static int |
1532 | lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, |
1533 | struct ulp_bde64 *bpl, int datasegcnt) |
1534 | { |
1535 | struct scatterlist *sgde = NULL; /* s/g data entry */ |
1536 | struct lpfc_pde5 *pde5 = NULL; |
1537 | struct lpfc_pde6 *pde6 = NULL; |
1538 | dma_addr_t physaddr; |
1539 | int i = 0, num_bde = 0, status; |
1540 | int datadir = sc->sc_data_direction; |
1541 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1542 | uint32_t rc; |
1543 | #endif |
1544 | uint32_t checking = 1; |
1545 | uint32_t reftag; |
1546 | uint8_t txop, rxop; |
1547 | |
1548 | status = lpfc_sc_to_bg_opcodes(phba, sc, txop: &txop, rxop: &rxop); |
1549 | if (status) |
1550 | goto out; |
1551 | |
1552 | /* extract some info from the scsi command for pde*/ |
1553 | reftag = scsi_prot_ref_tag(scmd: sc); |
1554 | |
1555 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1556 | rc = lpfc_bg_err_inject(phba, sc, reftag: &reftag, NULL, new_guard: 1); |
1557 | if (rc) { |
1558 | if (rc & BG_ERR_SWAP) |
1559 | lpfc_bg_err_opcodes(phba, sc, txop: &txop, rxop: &rxop); |
1560 | if (rc & BG_ERR_CHECK) |
1561 | checking = 0; |
1562 | } |
1563 | #endif |
1564 | |
1565 | /* setup PDE5 with what we have */ |
1566 | pde5 = (struct lpfc_pde5 *) bpl; |
1567 | memset(pde5, 0, sizeof(struct lpfc_pde5)); |
1568 | bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); |
1569 | |
1570 | /* Endianness conversion if necessary for PDE5 */ |
1571 | pde5->word0 = cpu_to_le32(pde5->word0); |
1572 | pde5->reftag = cpu_to_le32(reftag); |
1573 | |
1574 | /* advance bpl and increment bde count */ |
1575 | num_bde++; |
1576 | bpl++; |
1577 | pde6 = (struct lpfc_pde6 *) bpl; |
1578 | |
1579 | /* setup PDE6 with the rest of the info */ |
1580 | memset(pde6, 0, sizeof(struct lpfc_pde6)); |
1581 | bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); |
1582 | bf_set(pde6_optx, pde6, txop); |
1583 | bf_set(pde6_oprx, pde6, rxop); |
1584 | |
1585 | /* |
1586 | * We only need to check the data on READs, for WRITEs |
1587 | * protection data is automatically generated, not checked. |
1588 | */ |
1589 | if (datadir == DMA_FROM_DEVICE) { |
1590 | if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) |
1591 | bf_set(pde6_ce, pde6, checking); |
1592 | else |
1593 | bf_set(pde6_ce, pde6, 0); |
1594 | |
1595 | if (sc->prot_flags & SCSI_PROT_REF_CHECK) |
1596 | bf_set(pde6_re, pde6, checking); |
1597 | else |
1598 | bf_set(pde6_re, pde6, 0); |
1599 | } |
1600 | bf_set(pde6_ai, pde6, 1); |
1601 | bf_set(pde6_ae, pde6, 0); |
1602 | bf_set(pde6_apptagval, pde6, 0); |
1603 | |
1604 | /* Endianness conversion if necessary for PDE6 */ |
1605 | pde6->word0 = cpu_to_le32(pde6->word0); |
1606 | pde6->word1 = cpu_to_le32(pde6->word1); |
1607 | pde6->word2 = cpu_to_le32(pde6->word2); |
1608 | |
1609 | /* advance bpl and increment bde count */ |
1610 | num_bde++; |
1611 | bpl++; |
1612 | |
1613 | /* assumption: caller has already run dma_map_sg on command data */ |
1614 | scsi_for_each_sg(sc, sgde, datasegcnt, i) { |
1615 | physaddr = sg_dma_address(sgde); |
1616 | bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); |
1617 | bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); |
1618 | bpl->tus.f.bdeSize = sg_dma_len(sgde); |
1619 | if (datadir == DMA_TO_DEVICE) |
1620 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
1621 | else |
1622 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; |
1623 | bpl->tus.w = le32_to_cpu(bpl->tus.w); |
1624 | bpl++; |
1625 | num_bde++; |
1626 | } |
1627 | |
1628 | out: |
1629 | return num_bde; |
1630 | } |
1631 | |
1632 | /** |
1633 | * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data |
1634 | * @phba: The Hba for which this call is being executed. |
1635 | * @sc: pointer to scsi command we're working on |
1636 | * @bpl: pointer to buffer list for protection groups |
1637 | * @datacnt: number of segments of data that have been dma mapped |
1638 | * @protcnt: number of segment of protection data that have been dma mapped |
1639 | * |
1640 | * This function sets up BPL buffer list for protection groups of |
1641 | * type LPFC_PG_TYPE_DIF |
1642 | * |
1643 | * This is usually used when DIFs are in their own buffers, |
1644 | * separate from the data. The HBA can then by instructed |
1645 | * to place the DIFs in the outgoing stream. For read operations, |
1646 | * The HBA could extract the DIFs and place it in DIF buffers. |
1647 | * |
1648 | * The buffer list for this type consists of one or more of the |
1649 | * protection groups described below: |
1650 | * +-------------------------+ |
1651 | * start of first prot group --> | PDE_5 | |
1652 | * +-------------------------+ |
1653 | * | PDE_6 | |
1654 | * +-------------------------+ |
1655 | * | PDE_7 (Prot BDE) | |
1656 | * +-------------------------+ |
1657 | * | Data BDE | |
1658 | * +-------------------------+ |
1659 | * |more Data BDE's ... (opt)| |
1660 | * +-------------------------+ |
1661 | * start of new prot group --> | PDE_5 | |
1662 | * +-------------------------+ |
1663 | * | ... | |
1664 | * +-------------------------+ |
1665 | * |
1666 | * Note: It is assumed that both data and protection s/g buffers have been |
1667 | * mapped for DMA |
1668 | * |
1669 | * Returns the number of BDEs added to the BPL. |
1670 | **/ |
1671 | static int |
1672 | lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, |
1673 | struct ulp_bde64 *bpl, int datacnt, int protcnt) |
1674 | { |
1675 | struct scatterlist *sgde = NULL; /* s/g data entry */ |
1676 | struct scatterlist *sgpe = NULL; /* s/g prot entry */ |
1677 | struct lpfc_pde5 *pde5 = NULL; |
1678 | struct lpfc_pde6 *pde6 = NULL; |
1679 | struct lpfc_pde7 *pde7 = NULL; |
1680 | dma_addr_t dataphysaddr, protphysaddr; |
1681 | unsigned short curr_prot = 0; |
1682 | unsigned int split_offset; |
1683 | unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; |
1684 | unsigned int protgrp_blks, protgrp_bytes; |
1685 | unsigned int remainder, subtotal; |
1686 | int status; |
1687 | int datadir = sc->sc_data_direction; |
1688 | unsigned char pgdone = 0, alldone = 0; |
1689 | unsigned blksize; |
1690 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1691 | uint32_t rc; |
1692 | #endif |
1693 | uint32_t checking = 1; |
1694 | uint32_t reftag; |
1695 | uint8_t txop, rxop; |
1696 | int num_bde = 0; |
1697 | |
1698 | sgpe = scsi_prot_sglist(cmd: sc); |
1699 | sgde = scsi_sglist(cmd: sc); |
1700 | |
1701 | if (!sgpe || !sgde) { |
1702 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1703 | "9020 Invalid s/g entry: data=x%px prot=x%px\n" , |
1704 | sgpe, sgde); |
1705 | return 0; |
1706 | } |
1707 | |
1708 | status = lpfc_sc_to_bg_opcodes(phba, sc, txop: &txop, rxop: &rxop); |
1709 | if (status) |
1710 | goto out; |
1711 | |
1712 | /* extract some info from the scsi command */ |
1713 | blksize = scsi_prot_interval(scmd: sc); |
1714 | reftag = scsi_prot_ref_tag(scmd: sc); |
1715 | |
1716 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1717 | rc = lpfc_bg_err_inject(phba, sc, reftag: &reftag, NULL, new_guard: 1); |
1718 | if (rc) { |
1719 | if (rc & BG_ERR_SWAP) |
1720 | lpfc_bg_err_opcodes(phba, sc, txop: &txop, rxop: &rxop); |
1721 | if (rc & BG_ERR_CHECK) |
1722 | checking = 0; |
1723 | } |
1724 | #endif |
1725 | |
1726 | split_offset = 0; |
1727 | do { |
1728 | /* Check to see if we ran out of space */ |
1729 | if (num_bde >= (phba->cfg_total_seg_cnt - 2)) |
1730 | return num_bde + 3; |
1731 | |
1732 | /* setup PDE5 with what we have */ |
1733 | pde5 = (struct lpfc_pde5 *) bpl; |
1734 | memset(pde5, 0, sizeof(struct lpfc_pde5)); |
1735 | bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); |
1736 | |
1737 | /* Endianness conversion if necessary for PDE5 */ |
1738 | pde5->word0 = cpu_to_le32(pde5->word0); |
1739 | pde5->reftag = cpu_to_le32(reftag); |
1740 | |
1741 | /* advance bpl and increment bde count */ |
1742 | num_bde++; |
1743 | bpl++; |
1744 | pde6 = (struct lpfc_pde6 *) bpl; |
1745 | |
1746 | /* setup PDE6 with the rest of the info */ |
1747 | memset(pde6, 0, sizeof(struct lpfc_pde6)); |
1748 | bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); |
1749 | bf_set(pde6_optx, pde6, txop); |
1750 | bf_set(pde6_oprx, pde6, rxop); |
1751 | |
1752 | if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) |
1753 | bf_set(pde6_ce, pde6, checking); |
1754 | else |
1755 | bf_set(pde6_ce, pde6, 0); |
1756 | |
1757 | if (sc->prot_flags & SCSI_PROT_REF_CHECK) |
1758 | bf_set(pde6_re, pde6, checking); |
1759 | else |
1760 | bf_set(pde6_re, pde6, 0); |
1761 | |
1762 | bf_set(pde6_ai, pde6, 1); |
1763 | bf_set(pde6_ae, pde6, 0); |
1764 | bf_set(pde6_apptagval, pde6, 0); |
1765 | |
1766 | /* Endianness conversion if necessary for PDE6 */ |
1767 | pde6->word0 = cpu_to_le32(pde6->word0); |
1768 | pde6->word1 = cpu_to_le32(pde6->word1); |
1769 | pde6->word2 = cpu_to_le32(pde6->word2); |
1770 | |
1771 | /* advance bpl and increment bde count */ |
1772 | num_bde++; |
1773 | bpl++; |
1774 | |
1775 | /* setup the first BDE that points to protection buffer */ |
1776 | protphysaddr = sg_dma_address(sgpe) + protgroup_offset; |
1777 | protgroup_len = sg_dma_len(sgpe) - protgroup_offset; |
1778 | |
1779 | /* must be integer multiple of the DIF block length */ |
1780 | BUG_ON(protgroup_len % 8); |
1781 | |
1782 | pde7 = (struct lpfc_pde7 *) bpl; |
1783 | memset(pde7, 0, sizeof(struct lpfc_pde7)); |
1784 | bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR); |
1785 | |
1786 | pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr)); |
1787 | pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr)); |
1788 | |
1789 | protgrp_blks = protgroup_len / 8; |
1790 | protgrp_bytes = protgrp_blks * blksize; |
1791 | |
1792 | /* check if this pde is crossing the 4K boundary; if so split */ |
1793 | if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) { |
1794 | protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff); |
1795 | protgroup_offset += protgroup_remainder; |
1796 | protgrp_blks = protgroup_remainder / 8; |
1797 | protgrp_bytes = protgrp_blks * blksize; |
1798 | } else { |
1799 | protgroup_offset = 0; |
1800 | curr_prot++; |
1801 | } |
1802 | |
1803 | num_bde++; |
1804 | |
1805 | /* setup BDE's for data blocks associated with DIF data */ |
1806 | pgdone = 0; |
1807 | subtotal = 0; /* total bytes processed for current prot grp */ |
1808 | while (!pgdone) { |
1809 | /* Check to see if we ran out of space */ |
1810 | if (num_bde >= phba->cfg_total_seg_cnt) |
1811 | return num_bde + 1; |
1812 | |
1813 | if (!sgde) { |
1814 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1815 | "9065 BLKGRD:%s Invalid data segment\n" , |
1816 | __func__); |
1817 | return 0; |
1818 | } |
1819 | bpl++; |
1820 | dataphysaddr = sg_dma_address(sgde) + split_offset; |
1821 | bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr)); |
1822 | bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr)); |
1823 | |
1824 | remainder = sg_dma_len(sgde) - split_offset; |
1825 | |
1826 | if ((subtotal + remainder) <= protgrp_bytes) { |
1827 | /* we can use this whole buffer */ |
1828 | bpl->tus.f.bdeSize = remainder; |
1829 | split_offset = 0; |
1830 | |
1831 | if ((subtotal + remainder) == protgrp_bytes) |
1832 | pgdone = 1; |
1833 | } else { |
1834 | /* must split this buffer with next prot grp */ |
1835 | bpl->tus.f.bdeSize = protgrp_bytes - subtotal; |
1836 | split_offset += bpl->tus.f.bdeSize; |
1837 | } |
1838 | |
1839 | subtotal += bpl->tus.f.bdeSize; |
1840 | |
1841 | if (datadir == DMA_TO_DEVICE) |
1842 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
1843 | else |
1844 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; |
1845 | bpl->tus.w = le32_to_cpu(bpl->tus.w); |
1846 | |
1847 | num_bde++; |
1848 | |
1849 | if (split_offset) |
1850 | break; |
1851 | |
1852 | /* Move to the next s/g segment if possible */ |
1853 | sgde = sg_next(sgde); |
1854 | |
1855 | } |
1856 | |
1857 | if (protgroup_offset) { |
1858 | /* update the reference tag */ |
1859 | reftag += protgrp_blks; |
1860 | bpl++; |
1861 | continue; |
1862 | } |
1863 | |
1864 | /* are we done ? */ |
1865 | if (curr_prot == protcnt) { |
1866 | alldone = 1; |
1867 | } else if (curr_prot < protcnt) { |
1868 | /* advance to next prot buffer */ |
1869 | sgpe = sg_next(sgpe); |
1870 | bpl++; |
1871 | |
1872 | /* update the reference tag */ |
1873 | reftag += protgrp_blks; |
1874 | } else { |
1875 | /* if we're here, we have a bug */ |
1876 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
1877 | "9054 BLKGRD: bug in %s\n" , __func__); |
1878 | } |
1879 | |
1880 | } while (!alldone); |
1881 | out: |
1882 | |
1883 | return num_bde; |
1884 | } |
1885 | |
1886 | /** |
1887 | * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data |
1888 | * @phba: The Hba for which this call is being executed. |
1889 | * @sc: pointer to scsi command we're working on |
1890 | * @sgl: pointer to buffer list for protection groups |
1891 | * @datasegcnt: number of segments of data that have been dma mapped |
1892 | * @lpfc_cmd: lpfc scsi command object pointer. |
1893 | * |
1894 | * This function sets up SGL buffer list for protection groups of |
1895 | * type LPFC_PG_TYPE_NO_DIF |
1896 | * |
1897 | * This is usually used when the HBA is instructed to generate |
1898 | * DIFs and insert them into data stream (or strip DIF from |
1899 | * incoming data stream) |
1900 | * |
1901 | * The buffer list consists of just one protection group described |
1902 | * below: |
1903 | * +-------------------------+ |
1904 | * start of prot group --> | DI_SEED | |
1905 | * +-------------------------+ |
1906 | * | Data SGE | |
1907 | * +-------------------------+ |
1908 | * |more Data SGE's ... (opt)| |
1909 | * +-------------------------+ |
1910 | * |
1911 | * |
1912 | * Note: Data s/g buffers have been dma mapped |
1913 | * |
1914 | * Returns the number of SGEs added to the SGL. |
1915 | **/ |
1916 | static uint32_t |
1917 | lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, |
1918 | struct sli4_sge *sgl, int datasegcnt, |
1919 | struct lpfc_io_buf *lpfc_cmd) |
1920 | { |
1921 | struct scatterlist *sgde = NULL; /* s/g data entry */ |
1922 | struct sli4_sge_diseed *diseed = NULL; |
1923 | dma_addr_t physaddr; |
1924 | int i = 0, status; |
1925 | uint32_t reftag, num_sge = 0; |
1926 | uint8_t txop, rxop; |
1927 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1928 | uint32_t rc; |
1929 | #endif |
1930 | uint32_t checking = 1; |
1931 | uint32_t dma_len; |
1932 | uint32_t dma_offset = 0; |
1933 | struct sli4_hybrid_sgl *sgl_xtra = NULL; |
1934 | int j; |
1935 | bool lsp_just_set = false; |
1936 | |
1937 | status = lpfc_sc_to_bg_opcodes(phba, sc, txop: &txop, rxop: &rxop); |
1938 | if (status) |
1939 | goto out; |
1940 | |
1941 | /* extract some info from the scsi command for pde*/ |
1942 | reftag = scsi_prot_ref_tag(scmd: sc); |
1943 | |
1944 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1945 | rc = lpfc_bg_err_inject(phba, sc, reftag: &reftag, NULL, new_guard: 1); |
1946 | if (rc) { |
1947 | if (rc & BG_ERR_SWAP) |
1948 | lpfc_bg_err_opcodes(phba, sc, txop: &txop, rxop: &rxop); |
1949 | if (rc & BG_ERR_CHECK) |
1950 | checking = 0; |
1951 | } |
1952 | #endif |
1953 | |
1954 | /* setup DISEED with what we have */ |
1955 | diseed = (struct sli4_sge_diseed *) sgl; |
1956 | memset(diseed, 0, sizeof(struct sli4_sge_diseed)); |
1957 | bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); |
1958 | |
1959 | /* Endianness conversion if necessary */ |
1960 | diseed->ref_tag = cpu_to_le32(reftag); |
1961 | diseed->ref_tag_tran = diseed->ref_tag; |
1962 | |
1963 | /* |
1964 | * We only need to check the data on READs, for WRITEs |
1965 | * protection data is automatically generated, not checked. |
1966 | */ |
1967 | if (sc->sc_data_direction == DMA_FROM_DEVICE) { |
1968 | if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) |
1969 | bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); |
1970 | else |
1971 | bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); |
1972 | |
1973 | if (sc->prot_flags & SCSI_PROT_REF_CHECK) |
1974 | bf_set(lpfc_sli4_sge_dif_re, diseed, checking); |
1975 | else |
1976 | bf_set(lpfc_sli4_sge_dif_re, diseed, 0); |
1977 | } |
1978 | |
1979 | /* setup DISEED with the rest of the info */ |
1980 | bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); |
1981 | bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); |
1982 | |
1983 | bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); |
1984 | bf_set(lpfc_sli4_sge_dif_me, diseed, 0); |
1985 | |
1986 | /* Endianness conversion if necessary for DISEED */ |
1987 | diseed->word2 = cpu_to_le32(diseed->word2); |
1988 | diseed->word3 = cpu_to_le32(diseed->word3); |
1989 | |
1990 | /* advance bpl and increment sge count */ |
1991 | num_sge++; |
1992 | sgl++; |
1993 | |
1994 | /* assumption: caller has already run dma_map_sg on command data */ |
1995 | sgde = scsi_sglist(cmd: sc); |
1996 | j = 3; |
1997 | for (i = 0; i < datasegcnt; i++) { |
1998 | /* clear it */ |
1999 | sgl->word2 = 0; |
2000 | |
2001 | /* do we need to expand the segment */ |
2002 | if (!lsp_just_set && !((j + 1) % phba->border_sge_num) && |
2003 | ((datasegcnt - 1) != i)) { |
2004 | /* set LSP type */ |
2005 | bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); |
2006 | |
2007 | sgl_xtra = lpfc_get_sgl_per_hdwq(phba, buf: lpfc_cmd); |
2008 | |
2009 | if (unlikely(!sgl_xtra)) { |
2010 | lpfc_cmd->seg_cnt = 0; |
2011 | return 0; |
2012 | } |
2013 | sgl->addr_lo = cpu_to_le32(putPaddrLow( |
2014 | sgl_xtra->dma_phys_sgl)); |
2015 | sgl->addr_hi = cpu_to_le32(putPaddrHigh( |
2016 | sgl_xtra->dma_phys_sgl)); |
2017 | |
2018 | } else { |
2019 | bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); |
2020 | } |
2021 | |
2022 | if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) { |
2023 | if ((datasegcnt - 1) == i) |
2024 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
2025 | physaddr = sg_dma_address(sgde); |
2026 | dma_len = sg_dma_len(sgde); |
2027 | sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); |
2028 | sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); |
2029 | |
2030 | bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); |
2031 | sgl->word2 = cpu_to_le32(sgl->word2); |
2032 | sgl->sge_len = cpu_to_le32(dma_len); |
2033 | |
2034 | dma_offset += dma_len; |
2035 | sgde = sg_next(sgde); |
2036 | |
2037 | sgl++; |
2038 | num_sge++; |
2039 | lsp_just_set = false; |
2040 | |
2041 | } else { |
2042 | sgl->word2 = cpu_to_le32(sgl->word2); |
2043 | sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); |
2044 | |
2045 | sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; |
2046 | i = i - 1; |
2047 | |
2048 | lsp_just_set = true; |
2049 | } |
2050 | |
2051 | j++; |
2052 | |
2053 | } |
2054 | |
2055 | out: |
2056 | return num_sge; |
2057 | } |
2058 | |
2059 | /** |
2060 | * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data |
2061 | * @phba: The Hba for which this call is being executed. |
2062 | * @sc: pointer to scsi command we're working on |
2063 | * @sgl: pointer to buffer list for protection groups |
2064 | * @datacnt: number of segments of data that have been dma mapped |
2065 | * @protcnt: number of segment of protection data that have been dma mapped |
2066 | * @lpfc_cmd: lpfc scsi command object pointer. |
2067 | * |
2068 | * This function sets up SGL buffer list for protection groups of |
2069 | * type LPFC_PG_TYPE_DIF |
2070 | * |
2071 | * This is usually used when DIFs are in their own buffers, |
2072 | * separate from the data. The HBA can then by instructed |
2073 | * to place the DIFs in the outgoing stream. For read operations, |
2074 | * The HBA could extract the DIFs and place it in DIF buffers. |
2075 | * |
2076 | * The buffer list for this type consists of one or more of the |
2077 | * protection groups described below: |
2078 | * +-------------------------+ |
2079 | * start of first prot group --> | DISEED | |
2080 | * +-------------------------+ |
2081 | * | DIF (Prot SGE) | |
2082 | * +-------------------------+ |
2083 | * | Data SGE | |
2084 | * +-------------------------+ |
2085 | * |more Data SGE's ... (opt)| |
2086 | * +-------------------------+ |
2087 | * start of new prot group --> | DISEED | |
2088 | * +-------------------------+ |
2089 | * | ... | |
2090 | * +-------------------------+ |
2091 | * |
2092 | * Note: It is assumed that both data and protection s/g buffers have been |
2093 | * mapped for DMA |
2094 | * |
2095 | * Returns the number of SGEs added to the SGL. |
2096 | **/ |
2097 | static uint32_t |
2098 | lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, |
2099 | struct sli4_sge *sgl, int datacnt, int protcnt, |
2100 | struct lpfc_io_buf *lpfc_cmd) |
2101 | { |
2102 | struct scatterlist *sgde = NULL; /* s/g data entry */ |
2103 | struct scatterlist *sgpe = NULL; /* s/g prot entry */ |
2104 | struct sli4_sge_diseed *diseed = NULL; |
2105 | dma_addr_t dataphysaddr, protphysaddr; |
2106 | unsigned short curr_prot = 0; |
2107 | unsigned int split_offset; |
2108 | unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; |
2109 | unsigned int protgrp_blks, protgrp_bytes; |
2110 | unsigned int remainder, subtotal; |
2111 | int status; |
2112 | unsigned char pgdone = 0, alldone = 0; |
2113 | unsigned blksize; |
2114 | uint32_t reftag; |
2115 | uint8_t txop, rxop; |
2116 | uint32_t dma_len; |
2117 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
2118 | uint32_t rc; |
2119 | #endif |
2120 | uint32_t checking = 1; |
2121 | uint32_t dma_offset = 0, num_sge = 0; |
2122 | int j = 2; |
2123 | struct sli4_hybrid_sgl *sgl_xtra = NULL; |
2124 | |
2125 | sgpe = scsi_prot_sglist(cmd: sc); |
2126 | sgde = scsi_sglist(cmd: sc); |
2127 | |
2128 | if (!sgpe || !sgde) { |
2129 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2130 | "9082 Invalid s/g entry: data=x%px prot=x%px\n" , |
2131 | sgpe, sgde); |
2132 | return 0; |
2133 | } |
2134 | |
2135 | status = lpfc_sc_to_bg_opcodes(phba, sc, txop: &txop, rxop: &rxop); |
2136 | if (status) |
2137 | goto out; |
2138 | |
2139 | /* extract some info from the scsi command */ |
2140 | blksize = scsi_prot_interval(scmd: sc); |
2141 | reftag = scsi_prot_ref_tag(scmd: sc); |
2142 | |
2143 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
2144 | rc = lpfc_bg_err_inject(phba, sc, reftag: &reftag, NULL, new_guard: 1); |
2145 | if (rc) { |
2146 | if (rc & BG_ERR_SWAP) |
2147 | lpfc_bg_err_opcodes(phba, sc, txop: &txop, rxop: &rxop); |
2148 | if (rc & BG_ERR_CHECK) |
2149 | checking = 0; |
2150 | } |
2151 | #endif |
2152 | |
2153 | split_offset = 0; |
2154 | do { |
2155 | /* Check to see if we ran out of space */ |
2156 | if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) && |
2157 | !(phba->cfg_xpsgl)) |
2158 | return num_sge + 3; |
2159 | |
2160 | /* DISEED and DIF have to be together */ |
2161 | if (!((j + 1) % phba->border_sge_num) || |
2162 | !((j + 2) % phba->border_sge_num) || |
2163 | !((j + 3) % phba->border_sge_num)) { |
2164 | sgl->word2 = 0; |
2165 | |
2166 | /* set LSP type */ |
2167 | bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); |
2168 | |
2169 | sgl_xtra = lpfc_get_sgl_per_hdwq(phba, buf: lpfc_cmd); |
2170 | |
2171 | if (unlikely(!sgl_xtra)) { |
2172 | goto out; |
2173 | } else { |
2174 | sgl->addr_lo = cpu_to_le32(putPaddrLow( |
2175 | sgl_xtra->dma_phys_sgl)); |
2176 | sgl->addr_hi = cpu_to_le32(putPaddrHigh( |
2177 | sgl_xtra->dma_phys_sgl)); |
2178 | } |
2179 | |
2180 | sgl->word2 = cpu_to_le32(sgl->word2); |
2181 | sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); |
2182 | |
2183 | sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; |
2184 | j = 0; |
2185 | } |
2186 | |
2187 | /* setup DISEED with what we have */ |
2188 | diseed = (struct sli4_sge_diseed *) sgl; |
2189 | memset(diseed, 0, sizeof(struct sli4_sge_diseed)); |
2190 | bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); |
2191 | |
2192 | /* Endianness conversion if necessary */ |
2193 | diseed->ref_tag = cpu_to_le32(reftag); |
2194 | diseed->ref_tag_tran = diseed->ref_tag; |
2195 | |
2196 | if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) { |
2197 | bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); |
2198 | } else { |
2199 | bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); |
2200 | /* |
2201 | * When in this mode, the hardware will replace |
2202 | * the guard tag from the host with a |
2203 | * newly generated good CRC for the wire. |
2204 | * Switch to raw mode here to avoid this |
2205 | * behavior. What the host sends gets put on the wire. |
2206 | */ |
2207 | if (txop == BG_OP_IN_CRC_OUT_CRC) { |
2208 | txop = BG_OP_RAW_MODE; |
2209 | rxop = BG_OP_RAW_MODE; |
2210 | } |
2211 | } |
2212 | |
2213 | |
2214 | if (sc->prot_flags & SCSI_PROT_REF_CHECK) |
2215 | bf_set(lpfc_sli4_sge_dif_re, diseed, checking); |
2216 | else |
2217 | bf_set(lpfc_sli4_sge_dif_re, diseed, 0); |
2218 | |
2219 | /* setup DISEED with the rest of the info */ |
2220 | bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); |
2221 | bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); |
2222 | |
2223 | bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); |
2224 | bf_set(lpfc_sli4_sge_dif_me, diseed, 0); |
2225 | |
2226 | /* Endianness conversion if necessary for DISEED */ |
2227 | diseed->word2 = cpu_to_le32(diseed->word2); |
2228 | diseed->word3 = cpu_to_le32(diseed->word3); |
2229 | |
2230 | /* advance sgl and increment bde count */ |
2231 | num_sge++; |
2232 | |
2233 | sgl++; |
2234 | j++; |
2235 | |
2236 | /* setup the first BDE that points to protection buffer */ |
2237 | protphysaddr = sg_dma_address(sgpe) + protgroup_offset; |
2238 | protgroup_len = sg_dma_len(sgpe) - protgroup_offset; |
2239 | |
2240 | /* must be integer multiple of the DIF block length */ |
2241 | BUG_ON(protgroup_len % 8); |
2242 | |
2243 | /* Now setup DIF SGE */ |
2244 | sgl->word2 = 0; |
2245 | bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF); |
2246 | sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr)); |
2247 | sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr)); |
2248 | sgl->word2 = cpu_to_le32(sgl->word2); |
2249 | sgl->sge_len = 0; |
2250 | |
2251 | protgrp_blks = protgroup_len / 8; |
2252 | protgrp_bytes = protgrp_blks * blksize; |
2253 | |
2254 | /* check if DIF SGE is crossing the 4K boundary; if so split */ |
2255 | if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) { |
2256 | protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff); |
2257 | protgroup_offset += protgroup_remainder; |
2258 | protgrp_blks = protgroup_remainder / 8; |
2259 | protgrp_bytes = protgrp_blks * blksize; |
2260 | } else { |
2261 | protgroup_offset = 0; |
2262 | curr_prot++; |
2263 | } |
2264 | |
2265 | num_sge++; |
2266 | |
2267 | /* setup SGE's for data blocks associated with DIF data */ |
2268 | pgdone = 0; |
2269 | subtotal = 0; /* total bytes processed for current prot grp */ |
2270 | |
2271 | sgl++; |
2272 | j++; |
2273 | |
2274 | while (!pgdone) { |
2275 | /* Check to see if we ran out of space */ |
2276 | if ((num_sge >= phba->cfg_total_seg_cnt) && |
2277 | !phba->cfg_xpsgl) |
2278 | return num_sge + 1; |
2279 | |
2280 | if (!sgde) { |
2281 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2282 | "9086 BLKGRD:%s Invalid data segment\n" , |
2283 | __func__); |
2284 | return 0; |
2285 | } |
2286 | |
2287 | if (!((j + 1) % phba->border_sge_num)) { |
2288 | sgl->word2 = 0; |
2289 | |
2290 | /* set LSP type */ |
2291 | bf_set(lpfc_sli4_sge_type, sgl, |
2292 | LPFC_SGE_TYPE_LSP); |
2293 | |
2294 | sgl_xtra = lpfc_get_sgl_per_hdwq(phba, |
2295 | buf: lpfc_cmd); |
2296 | |
2297 | if (unlikely(!sgl_xtra)) { |
2298 | goto out; |
2299 | } else { |
2300 | sgl->addr_lo = cpu_to_le32( |
2301 | putPaddrLow(sgl_xtra->dma_phys_sgl)); |
2302 | sgl->addr_hi = cpu_to_le32( |
2303 | putPaddrHigh(sgl_xtra->dma_phys_sgl)); |
2304 | } |
2305 | |
2306 | sgl->word2 = cpu_to_le32(sgl->word2); |
2307 | sgl->sge_len = cpu_to_le32( |
2308 | phba->cfg_sg_dma_buf_size); |
2309 | |
2310 | sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; |
2311 | } else { |
2312 | dataphysaddr = sg_dma_address(sgde) + |
2313 | split_offset; |
2314 | |
2315 | remainder = sg_dma_len(sgde) - split_offset; |
2316 | |
2317 | if ((subtotal + remainder) <= protgrp_bytes) { |
2318 | /* we can use this whole buffer */ |
2319 | dma_len = remainder; |
2320 | split_offset = 0; |
2321 | |
2322 | if ((subtotal + remainder) == |
2323 | protgrp_bytes) |
2324 | pgdone = 1; |
2325 | } else { |
2326 | /* must split this buffer with next |
2327 | * prot grp |
2328 | */ |
2329 | dma_len = protgrp_bytes - subtotal; |
2330 | split_offset += dma_len; |
2331 | } |
2332 | |
2333 | subtotal += dma_len; |
2334 | |
2335 | sgl->word2 = 0; |
2336 | sgl->addr_lo = cpu_to_le32(putPaddrLow( |
2337 | dataphysaddr)); |
2338 | sgl->addr_hi = cpu_to_le32(putPaddrHigh( |
2339 | dataphysaddr)); |
2340 | bf_set(lpfc_sli4_sge_last, sgl, 0); |
2341 | bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); |
2342 | bf_set(lpfc_sli4_sge_type, sgl, |
2343 | LPFC_SGE_TYPE_DATA); |
2344 | |
2345 | sgl->sge_len = cpu_to_le32(dma_len); |
2346 | dma_offset += dma_len; |
2347 | |
2348 | num_sge++; |
2349 | |
2350 | if (split_offset) { |
2351 | sgl++; |
2352 | j++; |
2353 | break; |
2354 | } |
2355 | |
2356 | /* Move to the next s/g segment if possible */ |
2357 | sgde = sg_next(sgde); |
2358 | |
2359 | sgl++; |
2360 | } |
2361 | |
2362 | j++; |
2363 | } |
2364 | |
2365 | if (protgroup_offset) { |
2366 | /* update the reference tag */ |
2367 | reftag += protgrp_blks; |
2368 | continue; |
2369 | } |
2370 | |
2371 | /* are we done ? */ |
2372 | if (curr_prot == protcnt) { |
2373 | /* mark the last SGL */ |
2374 | sgl--; |
2375 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
2376 | alldone = 1; |
2377 | } else if (curr_prot < protcnt) { |
2378 | /* advance to next prot buffer */ |
2379 | sgpe = sg_next(sgpe); |
2380 | |
2381 | /* update the reference tag */ |
2382 | reftag += protgrp_blks; |
2383 | } else { |
2384 | /* if we're here, we have a bug */ |
2385 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2386 | "9085 BLKGRD: bug in %s\n" , __func__); |
2387 | } |
2388 | |
2389 | } while (!alldone); |
2390 | |
2391 | out: |
2392 | |
2393 | return num_sge; |
2394 | } |
2395 | |
2396 | /** |
2397 | * lpfc_prot_group_type - Get prtotection group type of SCSI command |
2398 | * @phba: The Hba for which this call is being executed. |
2399 | * @sc: pointer to scsi command we're working on |
2400 | * |
2401 | * Given a SCSI command that supports DIF, determine composition of protection |
2402 | * groups involved in setting up buffer lists |
2403 | * |
2404 | * Returns: Protection group type (with or without DIF) |
2405 | * |
2406 | **/ |
2407 | static int |
2408 | lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) |
2409 | { |
2410 | int ret = LPFC_PG_TYPE_INVALID; |
2411 | unsigned char op = scsi_get_prot_op(scmd: sc); |
2412 | |
2413 | switch (op) { |
2414 | case SCSI_PROT_READ_STRIP: |
2415 | case SCSI_PROT_WRITE_INSERT: |
2416 | ret = LPFC_PG_TYPE_NO_DIF; |
2417 | break; |
2418 | case SCSI_PROT_READ_INSERT: |
2419 | case SCSI_PROT_WRITE_STRIP: |
2420 | case SCSI_PROT_READ_PASS: |
2421 | case SCSI_PROT_WRITE_PASS: |
2422 | ret = LPFC_PG_TYPE_DIF_BUF; |
2423 | break; |
2424 | default: |
2425 | if (phba) |
2426 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2427 | "9021 Unsupported protection op:%d\n" , |
2428 | op); |
2429 | break; |
2430 | } |
2431 | return ret; |
2432 | } |
2433 | |
2434 | /** |
2435 | * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard |
2436 | * @phba: The Hba for which this call is being executed. |
2437 | * @lpfc_cmd: The scsi buffer which is going to be adjusted. |
2438 | * |
2439 | * Adjust the data length to account for how much data |
2440 | * is actually on the wire. |
2441 | * |
2442 | * returns the adjusted data length |
2443 | **/ |
2444 | static int |
2445 | lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, |
2446 | struct lpfc_io_buf *lpfc_cmd) |
2447 | { |
2448 | struct scsi_cmnd *sc = lpfc_cmd->pCmd; |
2449 | int fcpdl; |
2450 | |
2451 | fcpdl = scsi_bufflen(cmd: sc); |
2452 | |
2453 | /* Check if there is protection data on the wire */ |
2454 | if (sc->sc_data_direction == DMA_FROM_DEVICE) { |
2455 | /* Read check for protection data */ |
2456 | if (scsi_get_prot_op(scmd: sc) == SCSI_PROT_READ_INSERT) |
2457 | return fcpdl; |
2458 | |
2459 | } else { |
2460 | /* Write check for protection data */ |
2461 | if (scsi_get_prot_op(scmd: sc) == SCSI_PROT_WRITE_STRIP) |
2462 | return fcpdl; |
2463 | } |
2464 | |
2465 | /* |
2466 | * If we are in DIF Type 1 mode every data block has a 8 byte |
2467 | * DIF (trailer) attached to it. Must ajust FCP data length |
2468 | * to account for the protection data. |
2469 | */ |
2470 | fcpdl += (fcpdl / scsi_prot_interval(scmd: sc)) * 8; |
2471 | |
2472 | return fcpdl; |
2473 | } |
2474 | |
2475 | /** |
2476 | * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec |
2477 | * @phba: The Hba for which this call is being executed. |
2478 | * @lpfc_cmd: The scsi buffer which is going to be prep'ed. |
2479 | * |
2480 | * This is the protection/DIF aware version of |
2481 | * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the |
2482 | * two functions eventually, but for now, it's here. |
2483 | * RETURNS 0 - SUCCESS, |
2484 | * 1 - Failed DMA map, retry. |
2485 | * 2 - Invalid scsi cmd or prot-type. Do not rety. |
2486 | **/ |
2487 | static int |
2488 | lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, |
2489 | struct lpfc_io_buf *lpfc_cmd) |
2490 | { |
2491 | struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; |
2492 | struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; |
2493 | struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; |
2494 | IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; |
2495 | uint32_t num_bde = 0; |
2496 | int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; |
2497 | int prot_group_type = 0; |
2498 | int fcpdl; |
2499 | int ret = 1; |
2500 | struct lpfc_vport *vport = phba->pport; |
2501 | |
2502 | /* |
2503 | * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd |
2504 | * fcp_rsp regions to the first data bde entry |
2505 | */ |
2506 | bpl += 2; |
2507 | if (scsi_sg_count(cmd: scsi_cmnd)) { |
2508 | /* |
2509 | * The driver stores the segment count returned from dma_map_sg |
2510 | * because this a count of dma-mappings used to map the use_sg |
2511 | * pages. They are not guaranteed to be the same for those |
2512 | * architectures that implement an IOMMU. |
2513 | */ |
2514 | datasegcnt = dma_map_sg(&phba->pcidev->dev, |
2515 | scsi_sglist(scsi_cmnd), |
2516 | scsi_sg_count(scsi_cmnd), datadir); |
2517 | if (unlikely(!datasegcnt)) |
2518 | return 1; |
2519 | |
2520 | lpfc_cmd->seg_cnt = datasegcnt; |
2521 | |
2522 | /* First check if data segment count from SCSI Layer is good */ |
2523 | if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { |
2524 | WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); |
2525 | ret = 2; |
2526 | goto err; |
2527 | } |
2528 | |
2529 | prot_group_type = lpfc_prot_group_type(phba, sc: scsi_cmnd); |
2530 | |
2531 | switch (prot_group_type) { |
2532 | case LPFC_PG_TYPE_NO_DIF: |
2533 | |
2534 | /* Here we need to add a PDE5 and PDE6 to the count */ |
2535 | if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) { |
2536 | ret = 2; |
2537 | goto err; |
2538 | } |
2539 | |
2540 | num_bde = lpfc_bg_setup_bpl(phba, sc: scsi_cmnd, bpl, |
2541 | datasegcnt); |
2542 | /* we should have 2 or more entries in buffer list */ |
2543 | if (num_bde < 2) { |
2544 | ret = 2; |
2545 | goto err; |
2546 | } |
2547 | break; |
2548 | |
2549 | case LPFC_PG_TYPE_DIF_BUF: |
2550 | /* |
2551 | * This type indicates that protection buffers are |
2552 | * passed to the driver, so that needs to be prepared |
2553 | * for DMA |
2554 | */ |
2555 | protsegcnt = dma_map_sg(&phba->pcidev->dev, |
2556 | scsi_prot_sglist(scsi_cmnd), |
2557 | scsi_prot_sg_count(scsi_cmnd), datadir); |
2558 | if (unlikely(!protsegcnt)) { |
2559 | scsi_dma_unmap(cmd: scsi_cmnd); |
2560 | return 1; |
2561 | } |
2562 | |
2563 | lpfc_cmd->prot_seg_cnt = protsegcnt; |
2564 | |
2565 | /* |
2566 | * There is a minimun of 4 BPLs used for every |
2567 | * protection data segment. |
2568 | */ |
2569 | if ((lpfc_cmd->prot_seg_cnt * 4) > |
2570 | (phba->cfg_total_seg_cnt - 2)) { |
2571 | ret = 2; |
2572 | goto err; |
2573 | } |
2574 | |
2575 | num_bde = lpfc_bg_setup_bpl_prot(phba, sc: scsi_cmnd, bpl, |
2576 | datacnt: datasegcnt, protcnt: protsegcnt); |
2577 | /* we should have 3 or more entries in buffer list */ |
2578 | if ((num_bde < 3) || |
2579 | (num_bde > phba->cfg_total_seg_cnt)) { |
2580 | ret = 2; |
2581 | goto err; |
2582 | } |
2583 | break; |
2584 | |
2585 | case LPFC_PG_TYPE_INVALID: |
2586 | default: |
2587 | scsi_dma_unmap(cmd: scsi_cmnd); |
2588 | lpfc_cmd->seg_cnt = 0; |
2589 | |
2590 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2591 | "9022 Unexpected protection group %i\n" , |
2592 | prot_group_type); |
2593 | return 2; |
2594 | } |
2595 | } |
2596 | |
2597 | /* |
2598 | * Finish initializing those IOCB fields that are dependent on the |
2599 | * scsi_cmnd request_buffer. Note that the bdeSize is explicitly |
2600 | * reinitialized since all iocb memory resources are used many times |
2601 | * for transmit, receive, and continuation bpl's. |
2602 | */ |
2603 | iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); |
2604 | iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64)); |
2605 | iocb_cmd->ulpBdeCount = 1; |
2606 | iocb_cmd->ulpLe = 1; |
2607 | |
2608 | fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); |
2609 | fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); |
2610 | |
2611 | /* |
2612 | * Due to difference in data length between DIF/non-DIF paths, |
2613 | * we need to set word 4 of IOCB here |
2614 | */ |
2615 | iocb_cmd->un.fcpi.fcpi_parm = fcpdl; |
2616 | |
2617 | /* |
2618 | * For First burst, we may need to adjust the initial transfer |
2619 | * length for DIF |
2620 | */ |
2621 | if (iocb_cmd->un.fcpi.fcpi_XRdy && |
2622 | (fcpdl < vport->cfg_first_burst_size)) |
2623 | iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl; |
2624 | |
2625 | return 0; |
2626 | err: |
2627 | if (lpfc_cmd->seg_cnt) |
2628 | scsi_dma_unmap(cmd: scsi_cmnd); |
2629 | if (lpfc_cmd->prot_seg_cnt) |
2630 | dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), |
2631 | scsi_prot_sg_count(scsi_cmnd), |
2632 | scsi_cmnd->sc_data_direction); |
2633 | |
2634 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
2635 | "9023 Cannot setup S/G List for HBA" |
2636 | "IO segs %d/%d BPL %d SCSI %d: %d %d\n" , |
2637 | lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, |
2638 | phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, |
2639 | prot_group_type, num_bde); |
2640 | |
2641 | lpfc_cmd->seg_cnt = 0; |
2642 | lpfc_cmd->prot_seg_cnt = 0; |
2643 | return ret; |
2644 | } |
2645 | |
2646 | /* |
2647 | * This function calcuates the T10 DIF guard tag |
2648 | * on the specified data using a CRC algorithmn |
2649 | * using crc_t10dif. |
2650 | */ |
2651 | static uint16_t |
2652 | lpfc_bg_crc(uint8_t *data, int count) |
2653 | { |
2654 | uint16_t crc = 0; |
2655 | uint16_t x; |
2656 | |
2657 | crc = crc_t10dif(data, count); |
2658 | x = cpu_to_be16(crc); |
2659 | return x; |
2660 | } |
2661 | |
2662 | /* |
2663 | * This function calcuates the T10 DIF guard tag |
2664 | * on the specified data using a CSUM algorithmn |
2665 | * using ip_compute_csum. |
2666 | */ |
2667 | static uint16_t |
2668 | lpfc_bg_csum(uint8_t *data, int count) |
2669 | { |
2670 | uint16_t ret; |
2671 | |
2672 | ret = ip_compute_csum(buff: data, len: count); |
2673 | return ret; |
2674 | } |
2675 | |
2676 | /* |
2677 | * This function examines the protection data to try to determine |
2678 | * what type of T10-DIF error occurred. |
2679 | */ |
2680 | static void |
2681 | lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) |
2682 | { |
2683 | struct scatterlist *sgpe; /* s/g prot entry */ |
2684 | struct scatterlist *sgde; /* s/g data entry */ |
2685 | struct scsi_cmnd *cmd = lpfc_cmd->pCmd; |
2686 | struct scsi_dif_tuple *src = NULL; |
2687 | uint8_t *data_src = NULL; |
2688 | uint16_t guard_tag; |
2689 | uint16_t start_app_tag, app_tag; |
2690 | uint32_t start_ref_tag, ref_tag; |
2691 | int prot, protsegcnt; |
2692 | int err_type, len, data_len; |
2693 | int chk_ref, chk_app, chk_guard; |
2694 | uint16_t sum; |
2695 | unsigned blksize; |
2696 | |
2697 | err_type = BGS_GUARD_ERR_MASK; |
2698 | sum = 0; |
2699 | guard_tag = 0; |
2700 | |
2701 | /* First check to see if there is protection data to examine */ |
2702 | prot = scsi_get_prot_op(scmd: cmd); |
2703 | if ((prot == SCSI_PROT_READ_STRIP) || |
2704 | (prot == SCSI_PROT_WRITE_INSERT) || |
2705 | (prot == SCSI_PROT_NORMAL)) |
2706 | goto out; |
2707 | |
2708 | /* Currently the driver just supports ref_tag and guard_tag checking */ |
2709 | chk_ref = 1; |
2710 | chk_app = 0; |
2711 | chk_guard = 0; |
2712 | |
2713 | /* Setup a ptr to the protection data provided by the SCSI host */ |
2714 | sgpe = scsi_prot_sglist(cmd); |
2715 | protsegcnt = lpfc_cmd->prot_seg_cnt; |
2716 | |
2717 | if (sgpe && protsegcnt) { |
2718 | |
2719 | /* |
2720 | * We will only try to verify guard tag if the segment |
2721 | * data length is a multiple of the blksize. |
2722 | */ |
2723 | sgde = scsi_sglist(cmd); |
2724 | blksize = scsi_prot_interval(scmd: cmd); |
2725 | data_src = (uint8_t *)sg_virt(sg: sgde); |
2726 | data_len = sg_dma_len(sgde); |
2727 | if ((data_len & (blksize - 1)) == 0) |
2728 | chk_guard = 1; |
2729 | |
2730 | src = (struct scsi_dif_tuple *)sg_virt(sg: sgpe); |
2731 | start_ref_tag = scsi_prot_ref_tag(scmd: cmd); |
2732 | start_app_tag = src->app_tag; |
2733 | len = sg_dma_len(sgpe); |
2734 | while (src && protsegcnt) { |
2735 | while (len) { |
2736 | |
2737 | /* |
2738 | * First check to see if a protection data |
2739 | * check is valid |
2740 | */ |
2741 | if ((src->ref_tag == T10_PI_REF_ESCAPE) || |
2742 | (src->app_tag == T10_PI_APP_ESCAPE)) { |
2743 | start_ref_tag++; |
2744 | goto skipit; |
2745 | } |
2746 | |
2747 | /* First Guard Tag checking */ |
2748 | if (chk_guard) { |
2749 | guard_tag = src->guard_tag; |
2750 | if (cmd->prot_flags |
2751 | & SCSI_PROT_IP_CHECKSUM) |
2752 | sum = lpfc_bg_csum(data: data_src, |
2753 | count: blksize); |
2754 | else |
2755 | sum = lpfc_bg_crc(data: data_src, |
2756 | count: blksize); |
2757 | if ((guard_tag != sum)) { |
2758 | err_type = BGS_GUARD_ERR_MASK; |
2759 | goto out; |
2760 | } |
2761 | } |
2762 | |
2763 | /* Reference Tag checking */ |
2764 | ref_tag = be32_to_cpu(src->ref_tag); |
2765 | if (chk_ref && (ref_tag != start_ref_tag)) { |
2766 | err_type = BGS_REFTAG_ERR_MASK; |
2767 | goto out; |
2768 | } |
2769 | start_ref_tag++; |
2770 | |
2771 | /* App Tag checking */ |
2772 | app_tag = src->app_tag; |
2773 | if (chk_app && (app_tag != start_app_tag)) { |
2774 | err_type = BGS_APPTAG_ERR_MASK; |
2775 | goto out; |
2776 | } |
2777 | skipit: |
2778 | len -= sizeof(struct scsi_dif_tuple); |
2779 | if (len < 0) |
2780 | len = 0; |
2781 | src++; |
2782 | |
2783 | data_src += blksize; |
2784 | data_len -= blksize; |
2785 | |
2786 | /* |
2787 | * Are we at the end of the Data segment? |
2788 | * The data segment is only used for Guard |
2789 | * tag checking. |
2790 | */ |
2791 | if (chk_guard && (data_len == 0)) { |
2792 | chk_guard = 0; |
2793 | sgde = sg_next(sgde); |
2794 | if (!sgde) |
2795 | goto out; |
2796 | |
2797 | data_src = (uint8_t *)sg_virt(sg: sgde); |
2798 | data_len = sg_dma_len(sgde); |
2799 | if ((data_len & (blksize - 1)) == 0) |
2800 | chk_guard = 1; |
2801 | } |
2802 | } |
2803 | |
2804 | /* Goto the next Protection data segment */ |
2805 | sgpe = sg_next(sgpe); |
2806 | if (sgpe) { |
2807 | src = (struct scsi_dif_tuple *)sg_virt(sg: sgpe); |
2808 | len = sg_dma_len(sgpe); |
2809 | } else { |
2810 | src = NULL; |
2811 | } |
2812 | protsegcnt--; |
2813 | } |
2814 | } |
2815 | out: |
2816 | if (err_type == BGS_GUARD_ERR_MASK) { |
2817 | scsi_build_sense(scmd: cmd, desc: 1, ILLEGAL_REQUEST, asc: 0x10, ascq: 0x1); |
2818 | set_host_byte(cmd, status: DID_ABORT); |
2819 | phba->bg_guard_err_cnt++; |
2820 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
2821 | "9069 BLKGRD: reftag %x grd_tag err %x != %x\n" , |
2822 | scsi_prot_ref_tag(cmd), |
2823 | sum, guard_tag); |
2824 | |
2825 | } else if (err_type == BGS_REFTAG_ERR_MASK) { |
2826 | scsi_build_sense(scmd: cmd, desc: 1, ILLEGAL_REQUEST, asc: 0x10, ascq: 0x3); |
2827 | set_host_byte(cmd, status: DID_ABORT); |
2828 | |
2829 | phba->bg_reftag_err_cnt++; |
2830 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
2831 | "9066 BLKGRD: reftag %x ref_tag err %x != %x\n" , |
2832 | scsi_prot_ref_tag(cmd), |
2833 | ref_tag, start_ref_tag); |
2834 | |
2835 | } else if (err_type == BGS_APPTAG_ERR_MASK) { |
2836 | scsi_build_sense(scmd: cmd, desc: 1, ILLEGAL_REQUEST, asc: 0x10, ascq: 0x2); |
2837 | set_host_byte(cmd, status: DID_ABORT); |
2838 | |
2839 | phba->bg_apptag_err_cnt++; |
2840 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
2841 | "9041 BLKGRD: reftag %x app_tag err %x != %x\n" , |
2842 | scsi_prot_ref_tag(cmd), |
2843 | app_tag, start_app_tag); |
2844 | } |
2845 | } |
2846 | |
2847 | /* |
2848 | * This function checks for BlockGuard errors detected by |
2849 | * the HBA. In case of errors, the ASC/ASCQ fields in the |
2850 | * sense buffer will be set accordingly, paired with |
2851 | * ILLEGAL_REQUEST to signal to the kernel that the HBA |
2852 | * detected corruption. |
2853 | * |
2854 | * Returns: |
2855 | * 0 - No error found |
2856 | * 1 - BlockGuard error found |
2857 | * -1 - Internal error (bad profile, ...etc) |
2858 | */ |
2859 | static int |
2860 | lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, |
2861 | struct lpfc_iocbq *pIocbOut) |
2862 | { |
2863 | struct scsi_cmnd *cmd = lpfc_cmd->pCmd; |
2864 | struct sli3_bg_fields *bgf; |
2865 | int ret = 0; |
2866 | struct lpfc_wcqe_complete *wcqe; |
2867 | u32 status; |
2868 | u32 bghm = 0; |
2869 | u32 bgstat = 0; |
2870 | u64 failing_sector = 0; |
2871 | |
2872 | if (phba->sli_rev == LPFC_SLI_REV4) { |
2873 | wcqe = &pIocbOut->wcqe_cmpl; |
2874 | status = bf_get(lpfc_wcqe_c_status, wcqe); |
2875 | |
2876 | if (status == CQE_STATUS_DI_ERROR) { |
2877 | /* Guard Check failed */ |
2878 | if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) |
2879 | bgstat |= BGS_GUARD_ERR_MASK; |
2880 | |
2881 | /* AppTag Check failed */ |
2882 | if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) |
2883 | bgstat |= BGS_APPTAG_ERR_MASK; |
2884 | |
2885 | /* RefTag Check failed */ |
2886 | if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) |
2887 | bgstat |= BGS_REFTAG_ERR_MASK; |
2888 | |
2889 | /* Check to see if there was any good data before the |
2890 | * error |
2891 | */ |
2892 | if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { |
2893 | bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK; |
2894 | bghm = wcqe->total_data_placed; |
2895 | } |
2896 | |
2897 | /* |
2898 | * Set ALL the error bits to indicate we don't know what |
2899 | * type of error it is. |
2900 | */ |
2901 | if (!bgstat) |
2902 | bgstat |= (BGS_REFTAG_ERR_MASK | |
2903 | BGS_APPTAG_ERR_MASK | |
2904 | BGS_GUARD_ERR_MASK); |
2905 | } |
2906 | |
2907 | } else { |
2908 | bgf = &pIocbOut->iocb.unsli3.sli3_bg; |
2909 | bghm = bgf->bghm; |
2910 | bgstat = bgf->bgstat; |
2911 | } |
2912 | |
2913 | if (lpfc_bgs_get_invalid_prof(bgstat)) { |
2914 | cmd->result = DID_ERROR << 16; |
2915 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
2916 | "9072 BLKGRD: Invalid BG Profile in cmd " |
2917 | "0x%x reftag 0x%x blk cnt 0x%x " |
2918 | "bgstat=x%x bghm=x%x\n" , cmd->cmnd[0], |
2919 | scsi_prot_ref_tag(cmd), |
2920 | scsi_logical_block_count(cmd), bgstat, bghm); |
2921 | ret = (-1); |
2922 | goto out; |
2923 | } |
2924 | |
2925 | if (lpfc_bgs_get_uninit_dif_block(bgstat)) { |
2926 | cmd->result = DID_ERROR << 16; |
2927 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
2928 | "9073 BLKGRD: Invalid BG PDIF Block in cmd " |
2929 | "0x%x reftag 0x%x blk cnt 0x%x " |
2930 | "bgstat=x%x bghm=x%x\n" , cmd->cmnd[0], |
2931 | scsi_prot_ref_tag(cmd), |
2932 | scsi_logical_block_count(cmd), bgstat, bghm); |
2933 | ret = (-1); |
2934 | goto out; |
2935 | } |
2936 | |
2937 | if (lpfc_bgs_get_guard_err(bgstat)) { |
2938 | ret = 1; |
2939 | scsi_build_sense(scmd: cmd, desc: 1, ILLEGAL_REQUEST, asc: 0x10, ascq: 0x1); |
2940 | set_host_byte(cmd, status: DID_ABORT); |
2941 | phba->bg_guard_err_cnt++; |
2942 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
2943 | "9055 BLKGRD: Guard Tag error in cmd " |
2944 | "0x%x reftag 0x%x blk cnt 0x%x " |
2945 | "bgstat=x%x bghm=x%x\n" , cmd->cmnd[0], |
2946 | scsi_prot_ref_tag(cmd), |
2947 | scsi_logical_block_count(cmd), bgstat, bghm); |
2948 | } |
2949 | |
2950 | if (lpfc_bgs_get_reftag_err(bgstat)) { |
2951 | ret = 1; |
2952 | scsi_build_sense(scmd: cmd, desc: 1, ILLEGAL_REQUEST, asc: 0x10, ascq: 0x3); |
2953 | set_host_byte(cmd, status: DID_ABORT); |
2954 | phba->bg_reftag_err_cnt++; |
2955 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
2956 | "9056 BLKGRD: Ref Tag error in cmd " |
2957 | "0x%x reftag 0x%x blk cnt 0x%x " |
2958 | "bgstat=x%x bghm=x%x\n" , cmd->cmnd[0], |
2959 | scsi_prot_ref_tag(cmd), |
2960 | scsi_logical_block_count(cmd), bgstat, bghm); |
2961 | } |
2962 | |
2963 | if (lpfc_bgs_get_apptag_err(bgstat)) { |
2964 | ret = 1; |
2965 | scsi_build_sense(scmd: cmd, desc: 1, ILLEGAL_REQUEST, asc: 0x10, ascq: 0x2); |
2966 | set_host_byte(cmd, status: DID_ABORT); |
2967 | phba->bg_apptag_err_cnt++; |
2968 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
2969 | "9061 BLKGRD: App Tag error in cmd " |
2970 | "0x%x reftag 0x%x blk cnt 0x%x " |
2971 | "bgstat=x%x bghm=x%x\n" , cmd->cmnd[0], |
2972 | scsi_prot_ref_tag(cmd), |
2973 | scsi_logical_block_count(cmd), bgstat, bghm); |
2974 | } |
2975 | |
2976 | if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { |
2977 | /* |
2978 | * setup sense data descriptor 0 per SPC-4 as an information |
2979 | * field, and put the failing LBA in it. |
2980 | * This code assumes there was also a guard/app/ref tag error |
2981 | * indication. |
2982 | */ |
2983 | cmd->sense_buffer[7] = 0xc; /* Additional sense length */ |
2984 | cmd->sense_buffer[8] = 0; /* Information descriptor type */ |
2985 | cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */ |
2986 | cmd->sense_buffer[10] = 0x80; /* Validity bit */ |
2987 | |
2988 | /* bghm is a "on the wire" FC frame based count */ |
2989 | switch (scsi_get_prot_op(scmd: cmd)) { |
2990 | case SCSI_PROT_READ_INSERT: |
2991 | case SCSI_PROT_WRITE_STRIP: |
2992 | bghm /= cmd->device->sector_size; |
2993 | break; |
2994 | case SCSI_PROT_READ_STRIP: |
2995 | case SCSI_PROT_WRITE_INSERT: |
2996 | case SCSI_PROT_READ_PASS: |
2997 | case SCSI_PROT_WRITE_PASS: |
2998 | bghm /= (cmd->device->sector_size + |
2999 | sizeof(struct scsi_dif_tuple)); |
3000 | break; |
3001 | } |
3002 | |
3003 | failing_sector = scsi_get_lba(scmd: cmd); |
3004 | failing_sector += bghm; |
3005 | |
3006 | /* Descriptor Information */ |
3007 | put_unaligned_be64(val: failing_sector, p: &cmd->sense_buffer[12]); |
3008 | } |
3009 | |
3010 | if (!ret) { |
3011 | /* No error was reported - problem in FW? */ |
3012 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
3013 | "9057 BLKGRD: Unknown error in cmd " |
3014 | "0x%x reftag 0x%x blk cnt 0x%x " |
3015 | "bgstat=x%x bghm=x%x\n" , cmd->cmnd[0], |
3016 | scsi_prot_ref_tag(cmd), |
3017 | scsi_logical_block_count(cmd), bgstat, bghm); |
3018 | |
3019 | /* Calculate what type of error it was */ |
3020 | lpfc_calc_bg_err(phba, lpfc_cmd); |
3021 | } |
3022 | out: |
3023 | return ret; |
3024 | } |
3025 | |
3026 | /** |
3027 | * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec |
3028 | * @phba: The Hba for which this call is being executed. |
3029 | * @lpfc_cmd: The scsi buffer which is going to be mapped. |
3030 | * |
3031 | * This routine does the pci dma mapping for scatter-gather list of scsi cmnd |
3032 | * field of @lpfc_cmd for device with SLI-4 interface spec. |
3033 | * |
3034 | * Return codes: |
3035 | * 2 - Error - Do not retry |
3036 | * 1 - Error - Retry |
3037 | * 0 - Success |
3038 | **/ |
3039 | static int |
3040 | lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) |
3041 | { |
3042 | struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; |
3043 | struct scatterlist *sgel = NULL; |
3044 | struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; |
3045 | struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; |
3046 | struct sli4_sge *first_data_sgl; |
3047 | struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; |
3048 | struct lpfc_vport *vport = phba->pport; |
3049 | union lpfc_wqe128 *wqe = &pwqeq->wqe; |
3050 | dma_addr_t physaddr; |
3051 | uint32_t dma_len; |
3052 | uint32_t dma_offset = 0; |
3053 | int nseg, i, j; |
3054 | struct ulp_bde64 *bde; |
3055 | bool lsp_just_set = false; |
3056 | struct sli4_hybrid_sgl *sgl_xtra = NULL; |
3057 | |
3058 | /* |
3059 | * There are three possibilities here - use scatter-gather segment, use |
3060 | * the single mapping, or neither. Start the lpfc command prep by |
3061 | * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first |
3062 | * data bde entry. |
3063 | */ |
3064 | if (scsi_sg_count(cmd: scsi_cmnd)) { |
3065 | /* |
3066 | * The driver stores the segment count returned from dma_map_sg |
3067 | * because this a count of dma-mappings used to map the use_sg |
3068 | * pages. They are not guaranteed to be the same for those |
3069 | * architectures that implement an IOMMU. |
3070 | */ |
3071 | |
3072 | nseg = scsi_dma_map(cmd: scsi_cmnd); |
3073 | if (unlikely(nseg <= 0)) |
3074 | return 1; |
3075 | sgl += 1; |
3076 | /* clear the last flag in the fcp_rsp map entry */ |
3077 | sgl->word2 = le32_to_cpu(sgl->word2); |
3078 | bf_set(lpfc_sli4_sge_last, sgl, 0); |
3079 | sgl->word2 = cpu_to_le32(sgl->word2); |
3080 | sgl += 1; |
3081 | first_data_sgl = sgl; |
3082 | lpfc_cmd->seg_cnt = nseg; |
3083 | if (!phba->cfg_xpsgl && |
3084 | lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { |
3085 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
3086 | "9074 BLKGRD:" |
3087 | " %s: Too many sg segments from " |
3088 | "dma_map_sg. Config %d, seg_cnt %d\n" , |
3089 | __func__, phba->cfg_sg_seg_cnt, |
3090 | lpfc_cmd->seg_cnt); |
3091 | WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); |
3092 | lpfc_cmd->seg_cnt = 0; |
3093 | scsi_dma_unmap(cmd: scsi_cmnd); |
3094 | return 2; |
3095 | } |
3096 | |
3097 | /* |
3098 | * The driver established a maximum scatter-gather segment count |
3099 | * during probe that limits the number of sg elements in any |
3100 | * single scsi command. Just run through the seg_cnt and format |
3101 | * the sge's. |
3102 | * When using SLI-3 the driver will try to fit all the BDEs into |
3103 | * the IOCB. If it can't then the BDEs get added to a BPL as it |
3104 | * does for SLI-2 mode. |
3105 | */ |
3106 | |
3107 | /* for tracking segment boundaries */ |
3108 | sgel = scsi_sglist(cmd: scsi_cmnd); |
3109 | j = 2; |
3110 | for (i = 0; i < nseg; i++) { |
3111 | sgl->word2 = 0; |
3112 | if (nseg == 1) { |
3113 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
3114 | bf_set(lpfc_sli4_sge_type, sgl, |
3115 | LPFC_SGE_TYPE_DATA); |
3116 | } else { |
3117 | bf_set(lpfc_sli4_sge_last, sgl, 0); |
3118 | |
3119 | /* do we need to expand the segment */ |
3120 | if (!lsp_just_set && |
3121 | !((j + 1) % phba->border_sge_num) && |
3122 | ((nseg - 1) != i)) { |
3123 | /* set LSP type */ |
3124 | bf_set(lpfc_sli4_sge_type, sgl, |
3125 | LPFC_SGE_TYPE_LSP); |
3126 | |
3127 | sgl_xtra = lpfc_get_sgl_per_hdwq( |
3128 | phba, buf: lpfc_cmd); |
3129 | |
3130 | if (unlikely(!sgl_xtra)) { |
3131 | lpfc_cmd->seg_cnt = 0; |
3132 | scsi_dma_unmap(cmd: scsi_cmnd); |
3133 | return 1; |
3134 | } |
3135 | sgl->addr_lo = cpu_to_le32(putPaddrLow( |
3136 | sgl_xtra->dma_phys_sgl)); |
3137 | sgl->addr_hi = cpu_to_le32(putPaddrHigh( |
3138 | sgl_xtra->dma_phys_sgl)); |
3139 | |
3140 | } else { |
3141 | bf_set(lpfc_sli4_sge_type, sgl, |
3142 | LPFC_SGE_TYPE_DATA); |
3143 | } |
3144 | } |
3145 | |
3146 | if (!(bf_get(lpfc_sli4_sge_type, sgl) & |
3147 | LPFC_SGE_TYPE_LSP)) { |
3148 | if ((nseg - 1) == i) |
3149 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
3150 | |
3151 | physaddr = sg_dma_address(sgel); |
3152 | dma_len = sg_dma_len(sgel); |
3153 | sgl->addr_lo = cpu_to_le32(putPaddrLow( |
3154 | physaddr)); |
3155 | sgl->addr_hi = cpu_to_le32(putPaddrHigh( |
3156 | physaddr)); |
3157 | |
3158 | bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); |
3159 | sgl->word2 = cpu_to_le32(sgl->word2); |
3160 | sgl->sge_len = cpu_to_le32(dma_len); |
3161 | |
3162 | dma_offset += dma_len; |
3163 | sgel = sg_next(sgel); |
3164 | |
3165 | sgl++; |
3166 | lsp_just_set = false; |
3167 | |
3168 | } else { |
3169 | sgl->word2 = cpu_to_le32(sgl->word2); |
3170 | sgl->sge_len = cpu_to_le32( |
3171 | phba->cfg_sg_dma_buf_size); |
3172 | |
3173 | sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; |
3174 | i = i - 1; |
3175 | |
3176 | lsp_just_set = true; |
3177 | } |
3178 | |
3179 | j++; |
3180 | } |
3181 | |
3182 | /* PBDE support for first data SGE only. |
3183 | * For FCoE, we key off Performance Hints. |
3184 | * For FC, we key off lpfc_enable_pbde. |
3185 | */ |
3186 | if (nseg == 1 && |
3187 | ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || |
3188 | phba->cfg_enable_pbde)) { |
3189 | /* Words 13-15 */ |
3190 | bde = (struct ulp_bde64 *) |
3191 | &wqe->words[13]; |
3192 | bde->addrLow = first_data_sgl->addr_lo; |
3193 | bde->addrHigh = first_data_sgl->addr_hi; |
3194 | bde->tus.f.bdeSize = |
3195 | le32_to_cpu(first_data_sgl->sge_len); |
3196 | bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
3197 | bde->tus.w = cpu_to_le32(bde->tus.w); |
3198 | |
3199 | /* Word 11 - set PBDE bit */ |
3200 | bf_set(wqe_pbde, &wqe->generic.wqe_com, 1); |
3201 | } else { |
3202 | memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); |
3203 | /* Word 11 - PBDE bit disabled by default template */ |
3204 | } |
3205 | } else { |
3206 | sgl += 1; |
3207 | /* set the last flag in the fcp_rsp map entry */ |
3208 | sgl->word2 = le32_to_cpu(sgl->word2); |
3209 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
3210 | sgl->word2 = cpu_to_le32(sgl->word2); |
3211 | |
3212 | if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || |
3213 | phba->cfg_enable_pbde) { |
3214 | bde = (struct ulp_bde64 *) |
3215 | &wqe->words[13]; |
3216 | memset(bde, 0, (sizeof(uint32_t) * 3)); |
3217 | } |
3218 | } |
3219 | |
3220 | /* |
3221 | * Finish initializing those IOCB fields that are dependent on the |
3222 | * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is |
3223 | * explicitly reinitialized. |
3224 | * all iocb memory resources are reused. |
3225 | */ |
3226 | fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); |
3227 | /* Set first-burst provided it was successfully negotiated */ |
3228 | if (!(phba->hba_flag & HBA_FCOE_MODE) && |
3229 | vport->cfg_first_burst_size && |
3230 | scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { |
3231 | u32 init_len, total_len; |
3232 | |
3233 | total_len = be32_to_cpu(fcp_cmnd->fcpDl); |
3234 | init_len = min(total_len, vport->cfg_first_burst_size); |
3235 | |
3236 | /* Word 4 & 5 */ |
3237 | wqe->fcp_iwrite.initial_xfer_len = init_len; |
3238 | wqe->fcp_iwrite.total_xfer_len = total_len; |
3239 | } else { |
3240 | /* Word 4 */ |
3241 | wqe->fcp_iwrite.total_xfer_len = |
3242 | be32_to_cpu(fcp_cmnd->fcpDl); |
3243 | } |
3244 | |
3245 | /* |
3246 | * If the OAS driver feature is enabled and the lun is enabled for |
3247 | * OAS, set the oas iocb related flags. |
3248 | */ |
3249 | if ((phba->cfg_fof) && ((struct lpfc_device_data *) |
3250 | scsi_cmnd->device->hostdata)->oas_enabled) { |
3251 | lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); |
3252 | lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *) |
3253 | scsi_cmnd->device->hostdata)->priority; |
3254 | |
3255 | /* Word 10 */ |
3256 | bf_set(wqe_oas, &wqe->generic.wqe_com, 1); |
3257 | bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1); |
3258 | |
3259 | if (lpfc_cmd->cur_iocbq.priority) |
3260 | bf_set(wqe_ccp, &wqe->generic.wqe_com, |
3261 | (lpfc_cmd->cur_iocbq.priority << 1)); |
3262 | else |
3263 | bf_set(wqe_ccp, &wqe->generic.wqe_com, |
3264 | (phba->cfg_XLanePriority << 1)); |
3265 | } |
3266 | |
3267 | return 0; |
3268 | } |
3269 | |
3270 | /** |
3271 | * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec |
3272 | * @phba: The Hba for which this call is being executed. |
3273 | * @lpfc_cmd: The scsi buffer which is going to be mapped. |
3274 | * |
3275 | * This is the protection/DIF aware version of |
3276 | * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the |
3277 | * two functions eventually, but for now, it's here |
3278 | * Return codes: |
3279 | * 2 - Error - Do not retry |
3280 | * 1 - Error - Retry |
3281 | * 0 - Success |
3282 | **/ |
3283 | static int |
3284 | lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, |
3285 | struct lpfc_io_buf *lpfc_cmd) |
3286 | { |
3287 | struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; |
3288 | struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; |
3289 | struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl); |
3290 | struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; |
3291 | union lpfc_wqe128 *wqe = &pwqeq->wqe; |
3292 | uint32_t num_sge = 0; |
3293 | int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; |
3294 | int prot_group_type = 0; |
3295 | int fcpdl; |
3296 | int ret = 1; |
3297 | struct lpfc_vport *vport = phba->pport; |
3298 | |
3299 | /* |
3300 | * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd |
3301 | * fcp_rsp regions to the first data sge entry |
3302 | */ |
3303 | if (scsi_sg_count(cmd: scsi_cmnd)) { |
3304 | /* |
3305 | * The driver stores the segment count returned from dma_map_sg |
3306 | * because this a count of dma-mappings used to map the use_sg |
3307 | * pages. They are not guaranteed to be the same for those |
3308 | * architectures that implement an IOMMU. |
3309 | */ |
3310 | datasegcnt = dma_map_sg(&phba->pcidev->dev, |
3311 | scsi_sglist(scsi_cmnd), |
3312 | scsi_sg_count(scsi_cmnd), datadir); |
3313 | if (unlikely(!datasegcnt)) |
3314 | return 1; |
3315 | |
3316 | sgl += 1; |
3317 | /* clear the last flag in the fcp_rsp map entry */ |
3318 | sgl->word2 = le32_to_cpu(sgl->word2); |
3319 | bf_set(lpfc_sli4_sge_last, sgl, 0); |
3320 | sgl->word2 = cpu_to_le32(sgl->word2); |
3321 | |
3322 | sgl += 1; |
3323 | lpfc_cmd->seg_cnt = datasegcnt; |
3324 | |
3325 | /* First check if data segment count from SCSI Layer is good */ |
3326 | if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt && |
3327 | !phba->cfg_xpsgl) { |
3328 | WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); |
3329 | ret = 2; |
3330 | goto err; |
3331 | } |
3332 | |
3333 | prot_group_type = lpfc_prot_group_type(phba, sc: scsi_cmnd); |
3334 | |
3335 | switch (prot_group_type) { |
3336 | case LPFC_PG_TYPE_NO_DIF: |
3337 | /* Here we need to add a DISEED to the count */ |
3338 | if (((lpfc_cmd->seg_cnt + 1) > |
3339 | phba->cfg_total_seg_cnt) && |
3340 | !phba->cfg_xpsgl) { |
3341 | ret = 2; |
3342 | goto err; |
3343 | } |
3344 | |
3345 | num_sge = lpfc_bg_setup_sgl(phba, sc: scsi_cmnd, sgl, |
3346 | datasegcnt, lpfc_cmd); |
3347 | |
3348 | /* we should have 2 or more entries in buffer list */ |
3349 | if (num_sge < 2) { |
3350 | ret = 2; |
3351 | goto err; |
3352 | } |
3353 | break; |
3354 | |
3355 | case LPFC_PG_TYPE_DIF_BUF: |
3356 | /* |
3357 | * This type indicates that protection buffers are |
3358 | * passed to the driver, so that needs to be prepared |
3359 | * for DMA |
3360 | */ |
3361 | protsegcnt = dma_map_sg(&phba->pcidev->dev, |
3362 | scsi_prot_sglist(scsi_cmnd), |
3363 | scsi_prot_sg_count(scsi_cmnd), datadir); |
3364 | if (unlikely(!protsegcnt)) { |
3365 | scsi_dma_unmap(cmd: scsi_cmnd); |
3366 | return 1; |
3367 | } |
3368 | |
3369 | lpfc_cmd->prot_seg_cnt = protsegcnt; |
3370 | /* |
3371 | * There is a minimun of 3 SGEs used for every |
3372 | * protection data segment. |
3373 | */ |
3374 | if (((lpfc_cmd->prot_seg_cnt * 3) > |
3375 | (phba->cfg_total_seg_cnt - 2)) && |
3376 | !phba->cfg_xpsgl) { |
3377 | ret = 2; |
3378 | goto err; |
3379 | } |
3380 | |
3381 | num_sge = lpfc_bg_setup_sgl_prot(phba, sc: scsi_cmnd, sgl, |
3382 | datacnt: datasegcnt, protcnt: protsegcnt, lpfc_cmd); |
3383 | |
3384 | /* we should have 3 or more entries in buffer list */ |
3385 | if (num_sge < 3 || |
3386 | (num_sge > phba->cfg_total_seg_cnt && |
3387 | !phba->cfg_xpsgl)) { |
3388 | ret = 2; |
3389 | goto err; |
3390 | } |
3391 | break; |
3392 | |
3393 | case LPFC_PG_TYPE_INVALID: |
3394 | default: |
3395 | scsi_dma_unmap(cmd: scsi_cmnd); |
3396 | lpfc_cmd->seg_cnt = 0; |
3397 | |
3398 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
3399 | "9083 Unexpected protection group %i\n" , |
3400 | prot_group_type); |
3401 | return 2; |
3402 | } |
3403 | } |
3404 | |
3405 | switch (scsi_get_prot_op(scmd: scsi_cmnd)) { |
3406 | case SCSI_PROT_WRITE_STRIP: |
3407 | case SCSI_PROT_READ_STRIP: |
3408 | lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_STRIP; |
3409 | break; |
3410 | case SCSI_PROT_WRITE_INSERT: |
3411 | case SCSI_PROT_READ_INSERT: |
3412 | lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_INSERT; |
3413 | break; |
3414 | case SCSI_PROT_WRITE_PASS: |
3415 | case SCSI_PROT_READ_PASS: |
3416 | lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_PASS; |
3417 | break; |
3418 | } |
3419 | |
3420 | fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); |
3421 | fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); |
3422 | |
3423 | /* Set first-burst provided it was successfully negotiated */ |
3424 | if (!(phba->hba_flag & HBA_FCOE_MODE) && |
3425 | vport->cfg_first_burst_size && |
3426 | scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { |
3427 | u32 init_len, total_len; |
3428 | |
3429 | total_len = be32_to_cpu(fcp_cmnd->fcpDl); |
3430 | init_len = min(total_len, vport->cfg_first_burst_size); |
3431 | |
3432 | /* Word 4 & 5 */ |
3433 | wqe->fcp_iwrite.initial_xfer_len = init_len; |
3434 | wqe->fcp_iwrite.total_xfer_len = total_len; |
3435 | } else { |
3436 | /* Word 4 */ |
3437 | wqe->fcp_iwrite.total_xfer_len = |
3438 | be32_to_cpu(fcp_cmnd->fcpDl); |
3439 | } |
3440 | |
3441 | /* |
3442 | * If the OAS driver feature is enabled and the lun is enabled for |
3443 | * OAS, set the oas iocb related flags. |
3444 | */ |
3445 | if ((phba->cfg_fof) && ((struct lpfc_device_data *) |
3446 | scsi_cmnd->device->hostdata)->oas_enabled) { |
3447 | lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); |
3448 | |
3449 | /* Word 10 */ |
3450 | bf_set(wqe_oas, &wqe->generic.wqe_com, 1); |
3451 | bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1); |
3452 | bf_set(wqe_ccp, &wqe->generic.wqe_com, |
3453 | (phba->cfg_XLanePriority << 1)); |
3454 | } |
3455 | |
3456 | /* Word 7. DIF Flags */ |
3457 | if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_PASS) |
3458 | bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); |
3459 | else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_STRIP) |
3460 | bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); |
3461 | else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_INSERT) |
3462 | bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); |
3463 | |
3464 | lpfc_cmd->cur_iocbq.cmd_flag &= ~(LPFC_IO_DIF_PASS | |
3465 | LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT); |
3466 | |
3467 | return 0; |
3468 | err: |
3469 | if (lpfc_cmd->seg_cnt) |
3470 | scsi_dma_unmap(cmd: scsi_cmnd); |
3471 | if (lpfc_cmd->prot_seg_cnt) |
3472 | dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), |
3473 | scsi_prot_sg_count(scsi_cmnd), |
3474 | scsi_cmnd->sc_data_direction); |
3475 | |
3476 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
3477 | "9084 Cannot setup S/G List for HBA " |
3478 | "IO segs %d/%d SGL %d SCSI %d: %d %d %d\n" , |
3479 | lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, |
3480 | phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, |
3481 | prot_group_type, num_sge, ret); |
3482 | |
3483 | lpfc_cmd->seg_cnt = 0; |
3484 | lpfc_cmd->prot_seg_cnt = 0; |
3485 | return ret; |
3486 | } |
3487 | |
3488 | /** |
3489 | * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer |
3490 | * @phba: The Hba for which this call is being executed. |
3491 | * @lpfc_cmd: The scsi buffer which is going to be mapped. |
3492 | * |
3493 | * This routine wraps the actual DMA mapping function pointer from the |
3494 | * lpfc_hba struct. |
3495 | * |
3496 | * Return codes: |
3497 | * 1 - Error |
3498 | * 0 - Success |
3499 | **/ |
3500 | static inline int |
3501 | lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) |
3502 | { |
3503 | return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); |
3504 | } |
3505 | |
3506 | /** |
3507 | * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer |
3508 | * using BlockGuard. |
3509 | * @phba: The Hba for which this call is being executed. |
3510 | * @lpfc_cmd: The scsi buffer which is going to be mapped. |
3511 | * |
3512 | * This routine wraps the actual DMA mapping function pointer from the |
3513 | * lpfc_hba struct. |
3514 | * |
3515 | * Return codes: |
3516 | * 1 - Error |
3517 | * 0 - Success |
3518 | **/ |
3519 | static inline int |
3520 | lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) |
3521 | { |
3522 | return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); |
3523 | } |
3524 | |
3525 | /** |
3526 | * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi |
3527 | * buffer |
3528 | * @vport: Pointer to vport object. |
3529 | * @lpfc_cmd: The scsi buffer which is going to be mapped. |
3530 | * @tmo: Timeout value for IO |
3531 | * |
3532 | * This routine initializes IOCB/WQE data structure from scsi command |
3533 | * |
3534 | * Return codes: |
3535 | * 1 - Error |
3536 | * 0 - Success |
3537 | **/ |
3538 | static inline int |
3539 | lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, |
3540 | uint8_t tmo) |
3541 | { |
3542 | return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo); |
3543 | } |
3544 | |
3545 | /** |
3546 | * lpfc_send_scsi_error_event - Posts an event when there is SCSI error |
3547 | * @phba: Pointer to hba context object. |
3548 | * @vport: Pointer to vport object. |
3549 | * @lpfc_cmd: Pointer to lpfc scsi command which reported the error. |
3550 | * @fcpi_parm: FCP Initiator parameter. |
3551 | * |
3552 | * This function posts an event when there is a SCSI command reporting |
3553 | * error from the scsi device. |
3554 | **/ |
3555 | static void |
3556 | lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, |
3557 | struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) { |
3558 | struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; |
3559 | struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; |
3560 | uint32_t resp_info = fcprsp->rspStatus2; |
3561 | uint32_t scsi_status = fcprsp->rspStatus3; |
3562 | struct lpfc_fast_path_event *fast_path_evt = NULL; |
3563 | struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode; |
3564 | unsigned long flags; |
3565 | |
3566 | if (!pnode) |
3567 | return; |
3568 | |
3569 | /* If there is queuefull or busy condition send a scsi event */ |
3570 | if ((cmnd->result == SAM_STAT_TASK_SET_FULL) || |
3571 | (cmnd->result == SAM_STAT_BUSY)) { |
3572 | fast_path_evt = lpfc_alloc_fast_evt(phba); |
3573 | if (!fast_path_evt) |
3574 | return; |
3575 | fast_path_evt->un.scsi_evt.event_type = |
3576 | FC_REG_SCSI_EVENT; |
3577 | fast_path_evt->un.scsi_evt.subcategory = |
3578 | (cmnd->result == SAM_STAT_TASK_SET_FULL) ? |
3579 | LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY; |
3580 | fast_path_evt->un.scsi_evt.lun = cmnd->device->lun; |
3581 | memcpy(&fast_path_evt->un.scsi_evt.wwpn, |
3582 | &pnode->nlp_portname, sizeof(struct lpfc_name)); |
3583 | memcpy(&fast_path_evt->un.scsi_evt.wwnn, |
3584 | &pnode->nlp_nodename, sizeof(struct lpfc_name)); |
3585 | } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen && |
3586 | ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) { |
3587 | fast_path_evt = lpfc_alloc_fast_evt(phba); |
3588 | if (!fast_path_evt) |
3589 | return; |
3590 | fast_path_evt->un.check_cond_evt.scsi_event.event_type = |
3591 | FC_REG_SCSI_EVENT; |
3592 | fast_path_evt->un.check_cond_evt.scsi_event.subcategory = |
3593 | LPFC_EVENT_CHECK_COND; |
3594 | fast_path_evt->un.check_cond_evt.scsi_event.lun = |
3595 | cmnd->device->lun; |
3596 | memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn, |
3597 | &pnode->nlp_portname, sizeof(struct lpfc_name)); |
3598 | memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn, |
3599 | &pnode->nlp_nodename, sizeof(struct lpfc_name)); |
3600 | fast_path_evt->un.check_cond_evt.sense_key = |
3601 | cmnd->sense_buffer[2] & 0xf; |
3602 | fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12]; |
3603 | fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13]; |
3604 | } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && |
3605 | fcpi_parm && |
3606 | ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) || |
3607 | ((scsi_status == SAM_STAT_GOOD) && |
3608 | !(resp_info & (RESID_UNDER | RESID_OVER))))) { |
3609 | /* |
3610 | * If status is good or resid does not match with fcp_param and |
3611 | * there is valid fcpi_parm, then there is a read_check error |
3612 | */ |
3613 | fast_path_evt = lpfc_alloc_fast_evt(phba); |
3614 | if (!fast_path_evt) |
3615 | return; |
3616 | fast_path_evt->un.read_check_error.header.event_type = |
3617 | FC_REG_FABRIC_EVENT; |
3618 | fast_path_evt->un.read_check_error.header.subcategory = |
3619 | LPFC_EVENT_FCPRDCHKERR; |
3620 | memcpy(&fast_path_evt->un.read_check_error.header.wwpn, |
3621 | &pnode->nlp_portname, sizeof(struct lpfc_name)); |
3622 | memcpy(&fast_path_evt->un.read_check_error.header.wwnn, |
3623 | &pnode->nlp_nodename, sizeof(struct lpfc_name)); |
3624 | fast_path_evt->un.read_check_error.lun = cmnd->device->lun; |
3625 | fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0]; |
3626 | fast_path_evt->un.read_check_error.fcpiparam = |
3627 | fcpi_parm; |
3628 | } else |
3629 | return; |
3630 | |
3631 | fast_path_evt->vport = vport; |
3632 | spin_lock_irqsave(&phba->hbalock, flags); |
3633 | list_add_tail(new: &fast_path_evt->work_evt.evt_listp, head: &phba->work_list); |
3634 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
3635 | lpfc_worker_wake_up(phba); |
3636 | return; |
3637 | } |
3638 | |
3639 | /** |
3640 | * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev |
3641 | * @phba: The HBA for which this call is being executed. |
3642 | * @psb: The scsi buffer which is going to be un-mapped. |
3643 | * |
3644 | * This routine does DMA un-mapping of scatter gather list of scsi command |
3645 | * field of @lpfc_cmd for device with SLI-3 interface spec. |
3646 | **/ |
3647 | static void |
3648 | lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) |
3649 | { |
3650 | /* |
3651 | * There are only two special cases to consider. (1) the scsi command |
3652 | * requested scatter-gather usage or (2) the scsi command allocated |
3653 | * a request buffer, but did not request use_sg. There is a third |
3654 | * case, but it does not require resource deallocation. |
3655 | */ |
3656 | if (psb->seg_cnt > 0) |
3657 | scsi_dma_unmap(cmd: psb->pCmd); |
3658 | if (psb->prot_seg_cnt > 0) |
3659 | dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd), |
3660 | scsi_prot_sg_count(psb->pCmd), |
3661 | psb->pCmd->sc_data_direction); |
3662 | } |
3663 | |
3664 | /** |
3665 | * lpfc_unblock_requests - allow further commands to be queued. |
3666 | * @phba: pointer to phba object |
3667 | * |
3668 | * For single vport, just call scsi_unblock_requests on physical port. |
3669 | * For multiple vports, send scsi_unblock_requests for all the vports. |
3670 | */ |
3671 | void |
3672 | lpfc_unblock_requests(struct lpfc_hba *phba) |
3673 | { |
3674 | struct lpfc_vport **vports; |
3675 | struct Scsi_Host *shost; |
3676 | int i; |
3677 | |
3678 | if (phba->sli_rev == LPFC_SLI_REV4 && |
3679 | !phba->sli4_hba.max_cfg_param.vpi_used) { |
3680 | shost = lpfc_shost_from_vport(vport: phba->pport); |
3681 | scsi_unblock_requests(shost); |
3682 | return; |
3683 | } |
3684 | |
3685 | vports = lpfc_create_vport_work_array(phba); |
3686 | if (vports != NULL) |
3687 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
3688 | shost = lpfc_shost_from_vport(vport: vports[i]); |
3689 | scsi_unblock_requests(shost); |
3690 | } |
3691 | lpfc_destroy_vport_work_array(phba, vports); |
3692 | } |
3693 | |
3694 | /** |
3695 | * lpfc_block_requests - prevent further commands from being queued. |
3696 | * @phba: pointer to phba object |
3697 | * |
3698 | * For single vport, just call scsi_block_requests on physical port. |
3699 | * For multiple vports, send scsi_block_requests for all the vports. |
3700 | */ |
3701 | void |
3702 | lpfc_block_requests(struct lpfc_hba *phba) |
3703 | { |
3704 | struct lpfc_vport **vports; |
3705 | struct Scsi_Host *shost; |
3706 | int i; |
3707 | |
3708 | if (atomic_read(v: &phba->cmf_stop_io)) |
3709 | return; |
3710 | |
3711 | if (phba->sli_rev == LPFC_SLI_REV4 && |
3712 | !phba->sli4_hba.max_cfg_param.vpi_used) { |
3713 | shost = lpfc_shost_from_vport(vport: phba->pport); |
3714 | scsi_block_requests(shost); |
3715 | return; |
3716 | } |
3717 | |
3718 | vports = lpfc_create_vport_work_array(phba); |
3719 | if (vports != NULL) |
3720 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
3721 | shost = lpfc_shost_from_vport(vport: vports[i]); |
3722 | scsi_block_requests(shost); |
3723 | } |
3724 | lpfc_destroy_vport_work_array(phba, vports); |
3725 | } |
3726 | |
3727 | /** |
3728 | * lpfc_update_cmf_cmpl - Adjust CMF counters for IO completion |
3729 | * @phba: The HBA for which this call is being executed. |
3730 | * @time: The latency of the IO that completed (in ns) |
3731 | * @size: The size of the IO that completed |
3732 | * @shost: SCSI host the IO completed on (NULL for a NVME IO) |
3733 | * |
3734 | * The routine adjusts the various Burst and Bandwidth counters used in |
3735 | * Congestion management and E2E. If time is set to LPFC_CGN_NOT_SENT, |
3736 | * that means the IO was never issued to the HBA, so this routine is |
3737 | * just being called to cleanup the counter from a previous |
3738 | * lpfc_update_cmf_cmd call. |
3739 | */ |
3740 | int |
3741 | lpfc_update_cmf_cmpl(struct lpfc_hba *phba, |
3742 | uint64_t time, uint32_t size, struct Scsi_Host *shost) |
3743 | { |
3744 | struct lpfc_cgn_stat *cgs; |
3745 | |
3746 | if (time != LPFC_CGN_NOT_SENT) { |
3747 | /* lat is ns coming in, save latency in us */ |
3748 | if (time < 1000) |
3749 | time = 1; |
3750 | else |
3751 | time = div_u64(dividend: time + 500, divisor: 1000); /* round it */ |
3752 | |
3753 | cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id()); |
3754 | atomic64_add(i: size, v: &cgs->rcv_bytes); |
3755 | atomic64_add(i: time, v: &cgs->rx_latency); |
3756 | atomic_inc(v: &cgs->rx_io_cnt); |
3757 | } |
3758 | return 0; |
3759 | } |
3760 | |
3761 | /** |
3762 | * lpfc_update_cmf_cmd - Adjust CMF counters for IO submission |
3763 | * @phba: The HBA for which this call is being executed. |
3764 | * @size: The size of the IO that will be issued |
3765 | * |
3766 | * The routine adjusts the various Burst and Bandwidth counters used in |
3767 | * Congestion management and E2E. |
3768 | */ |
3769 | int |
3770 | lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size) |
3771 | { |
3772 | uint64_t total; |
3773 | struct lpfc_cgn_stat *cgs; |
3774 | int cpu; |
3775 | |
3776 | /* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */ |
3777 | if (phba->cmf_active_mode == LPFC_CFG_MANAGED && |
3778 | phba->cmf_max_bytes_per_interval) { |
3779 | total = 0; |
3780 | for_each_present_cpu(cpu) { |
3781 | cgs = per_cpu_ptr(phba->cmf_stat, cpu); |
3782 | total += atomic64_read(v: &cgs->total_bytes); |
3783 | } |
3784 | if (total >= phba->cmf_max_bytes_per_interval) { |
3785 | if (!atomic_xchg(v: &phba->cmf_bw_wait, new: 1)) { |
3786 | lpfc_block_requests(phba); |
3787 | phba->cmf_last_ts = |
3788 | lpfc_calc_cmf_latency(phba); |
3789 | } |
3790 | atomic_inc(v: &phba->cmf_busy); |
3791 | return -EBUSY; |
3792 | } |
3793 | if (size > atomic_read(v: &phba->rx_max_read_cnt)) |
3794 | atomic_set(v: &phba->rx_max_read_cnt, i: size); |
3795 | } |
3796 | |
3797 | cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id()); |
3798 | atomic64_add(i: size, v: &cgs->total_bytes); |
3799 | return 0; |
3800 | } |
3801 | |
3802 | /** |
3803 | * lpfc_handle_fcp_err - FCP response handler |
3804 | * @vport: The virtual port for which this call is being executed. |
3805 | * @lpfc_cmd: Pointer to lpfc_io_buf data structure. |
3806 | * @fcpi_parm: FCP Initiator parameter. |
3807 | * |
3808 | * This routine is called to process response IOCB with status field |
3809 | * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command |
3810 | * based upon SCSI and FCP error. |
3811 | **/ |
3812 | static void |
3813 | lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, |
3814 | uint32_t fcpi_parm) |
3815 | { |
3816 | struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; |
3817 | struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; |
3818 | struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; |
3819 | uint32_t resp_info = fcprsp->rspStatus2; |
3820 | uint32_t scsi_status = fcprsp->rspStatus3; |
3821 | uint32_t *lp; |
3822 | uint32_t host_status = DID_OK; |
3823 | uint32_t rsplen = 0; |
3824 | uint32_t fcpDl; |
3825 | uint32_t logit = LOG_FCP | LOG_FCP_ERROR; |
3826 | |
3827 | |
3828 | /* |
3829 | * If this is a task management command, there is no |
3830 | * scsi packet associated with this lpfc_cmd. The driver |
3831 | * consumes it. |
3832 | */ |
3833 | if (fcpcmd->fcpCntl2) { |
3834 | scsi_status = 0; |
3835 | goto out; |
3836 | } |
3837 | |
3838 | if (resp_info & RSP_LEN_VALID) { |
3839 | rsplen = be32_to_cpu(fcprsp->rspRspLen); |
3840 | if (rsplen != 0 && rsplen != 4 && rsplen != 8) { |
3841 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3842 | "2719 Invalid response length: " |
3843 | "tgt x%x lun x%llx cmnd x%x rsplen " |
3844 | "x%x\n" , cmnd->device->id, |
3845 | cmnd->device->lun, cmnd->cmnd[0], |
3846 | rsplen); |
3847 | host_status = DID_ERROR; |
3848 | goto out; |
3849 | } |
3850 | if (fcprsp->rspInfo3 != RSP_NO_FAILURE) { |
3851 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
3852 | "2757 Protocol failure detected during " |
3853 | "processing of FCP I/O op: " |
3854 | "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n" , |
3855 | cmnd->device->id, |
3856 | cmnd->device->lun, cmnd->cmnd[0], |
3857 | fcprsp->rspInfo3); |
3858 | host_status = DID_ERROR; |
3859 | goto out; |
3860 | } |
3861 | } |
3862 | |
3863 | if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { |
3864 | uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); |
3865 | if (snslen > SCSI_SENSE_BUFFERSIZE) |
3866 | snslen = SCSI_SENSE_BUFFERSIZE; |
3867 | |
3868 | if (resp_info & RSP_LEN_VALID) |
3869 | rsplen = be32_to_cpu(fcprsp->rspRspLen); |
3870 | memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); |
3871 | } |
3872 | lp = (uint32_t *)cmnd->sense_buffer; |
3873 | |
3874 | /* special handling for under run conditions */ |
3875 | if (!scsi_status && (resp_info & RESID_UNDER)) { |
3876 | /* don't log under runs if fcp set... */ |
3877 | if (vport->cfg_log_verbose & LOG_FCP) |
3878 | logit = LOG_FCP_ERROR; |
3879 | /* unless operator says so */ |
3880 | if (vport->cfg_log_verbose & LOG_FCP_UNDER) |
3881 | logit = LOG_FCP_UNDER; |
3882 | } |
3883 | |
3884 | lpfc_printf_vlog(vport, KERN_WARNING, logit, |
3885 | "9024 FCP command x%x failed: x%x SNS x%x x%x " |
3886 | "Data: x%x x%x x%x x%x x%x\n" , |
3887 | cmnd->cmnd[0], scsi_status, |
3888 | be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, |
3889 | be32_to_cpu(fcprsp->rspResId), |
3890 | be32_to_cpu(fcprsp->rspSnsLen), |
3891 | be32_to_cpu(fcprsp->rspRspLen), |
3892 | fcprsp->rspInfo3); |
3893 | |
3894 | scsi_set_resid(cmd: cmnd, resid: 0); |
3895 | fcpDl = be32_to_cpu(fcpcmd->fcpDl); |
3896 | if (resp_info & RESID_UNDER) { |
3897 | scsi_set_resid(cmd: cmnd, be32_to_cpu(fcprsp->rspResId)); |
3898 | |
3899 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER, |
3900 | "9025 FCP Underrun, expected %d, " |
3901 | "residual %d Data: x%x x%x x%x\n" , |
3902 | fcpDl, |
3903 | scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], |
3904 | cmnd->underflow); |
3905 | |
3906 | /* |
3907 | * If there is an under run, check if under run reported by |
3908 | * storage array is same as the under run reported by HBA. |
3909 | * If this is not same, there is a dropped frame. |
3910 | */ |
3911 | if (fcpi_parm && (scsi_get_resid(cmd: cmnd) != fcpi_parm)) { |
3912 | lpfc_printf_vlog(vport, KERN_WARNING, |
3913 | LOG_FCP | LOG_FCP_ERROR, |
3914 | "9026 FCP Read Check Error " |
3915 | "and Underrun Data: x%x x%x x%x x%x\n" , |
3916 | fcpDl, |
3917 | scsi_get_resid(cmnd), fcpi_parm, |
3918 | cmnd->cmnd[0]); |
3919 | scsi_set_resid(cmd: cmnd, resid: scsi_bufflen(cmd: cmnd)); |
3920 | host_status = DID_ERROR; |
3921 | } |
3922 | /* |
3923 | * The cmnd->underflow is the minimum number of bytes that must |
3924 | * be transferred for this command. Provided a sense condition |
3925 | * is not present, make sure the actual amount transferred is at |
3926 | * least the underflow value or fail. |
3927 | */ |
3928 | if (!(resp_info & SNS_LEN_VALID) && |
3929 | (scsi_status == SAM_STAT_GOOD) && |
3930 | (scsi_bufflen(cmd: cmnd) - scsi_get_resid(cmd: cmnd) |
3931 | < cmnd->underflow)) { |
3932 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
3933 | "9027 FCP command x%x residual " |
3934 | "underrun converted to error " |
3935 | "Data: x%x x%x x%x\n" , |
3936 | cmnd->cmnd[0], scsi_bufflen(cmnd), |
3937 | scsi_get_resid(cmnd), cmnd->underflow); |
3938 | host_status = DID_ERROR; |
3939 | } |
3940 | } else if (resp_info & RESID_OVER) { |
3941 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
3942 | "9028 FCP command x%x residual overrun error. " |
3943 | "Data: x%x x%x\n" , cmnd->cmnd[0], |
3944 | scsi_bufflen(cmnd), scsi_get_resid(cmnd)); |
3945 | host_status = DID_ERROR; |
3946 | |
3947 | /* |
3948 | * Check SLI validation that all the transfer was actually done |
3949 | * (fcpi_parm should be zero). Apply check only to reads. |
3950 | */ |
3951 | } else if (fcpi_parm) { |
3952 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, |
3953 | "9029 FCP %s Check Error Data: " |
3954 | "x%x x%x x%x x%x x%x\n" , |
3955 | ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ? |
3956 | "Read" : "Write" ), |
3957 | fcpDl, be32_to_cpu(fcprsp->rspResId), |
3958 | fcpi_parm, cmnd->cmnd[0], scsi_status); |
3959 | |
3960 | /* There is some issue with the LPe12000 that causes it |
3961 | * to miscalculate the fcpi_parm and falsely trip this |
3962 | * recovery logic. Detect this case and don't error when true. |
3963 | */ |
3964 | if (fcpi_parm > fcpDl) |
3965 | goto out; |
3966 | |
3967 | switch (scsi_status) { |
3968 | case SAM_STAT_GOOD: |
3969 | case SAM_STAT_CHECK_CONDITION: |
3970 | /* Fabric dropped a data frame. Fail any successful |
3971 | * command in which we detected dropped frames. |
3972 | * A status of good or some check conditions could |
3973 | * be considered a successful command. |
3974 | */ |
3975 | host_status = DID_ERROR; |
3976 | break; |
3977 | } |
3978 | scsi_set_resid(cmd: cmnd, resid: scsi_bufflen(cmd: cmnd)); |
3979 | } |
3980 | |
3981 | out: |
3982 | cmnd->result = host_status << 16 | scsi_status; |
3983 | lpfc_send_scsi_error_event(phba: vport->phba, vport, lpfc_cmd, fcpi_parm); |
3984 | } |
3985 | |
3986 | /** |
3987 | * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO |
3988 | * @phba: The hba for which this call is being executed. |
3989 | * @pwqeIn: The command WQE for the scsi cmnd. |
3990 | * @pwqeOut: Pointer to driver response WQE object. |
3991 | * |
3992 | * This routine assigns scsi command result by looking into response WQE |
3993 | * status field appropriately. This routine handles QUEUE FULL condition as |
3994 | * well by ramping down device queue depth. |
3995 | **/ |
3996 | static void |
3997 | lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, |
3998 | struct lpfc_iocbq *pwqeOut) |
3999 | { |
4000 | struct lpfc_io_buf *lpfc_cmd = pwqeIn->io_buf; |
4001 | struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl; |
4002 | struct lpfc_vport *vport = pwqeIn->vport; |
4003 | struct lpfc_rport_data *rdata; |
4004 | struct lpfc_nodelist *ndlp; |
4005 | struct scsi_cmnd *cmd; |
4006 | unsigned long flags; |
4007 | struct lpfc_fast_path_event *fast_path_evt; |
4008 | struct Scsi_Host *shost; |
4009 | u32 logit = LOG_FCP; |
4010 | u32 idx; |
4011 | u32 lat; |
4012 | u8 wait_xb_clr = 0; |
4013 | |
4014 | /* Sanity check on return of outstanding command */ |
4015 | if (!lpfc_cmd) { |
4016 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
4017 | "9032 Null lpfc_cmd pointer. No " |
4018 | "release, skip completion\n" ); |
4019 | return; |
4020 | } |
4021 | |
4022 | rdata = lpfc_cmd->rdata; |
4023 | ndlp = rdata->pnode; |
4024 | |
4025 | /* Sanity check on return of outstanding command */ |
4026 | cmd = lpfc_cmd->pCmd; |
4027 | if (!cmd) { |
4028 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
4029 | "9042 I/O completion: Not an active IO\n" ); |
4030 | lpfc_release_scsi_buf(phba, psb: lpfc_cmd); |
4031 | return; |
4032 | } |
4033 | /* Guard against abort handler being called at same time */ |
4034 | spin_lock(lock: &lpfc_cmd->buf_lock); |
4035 | idx = lpfc_cmd->cur_iocbq.hba_wqidx; |
4036 | if (phba->sli4_hba.hdwq) |
4037 | phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; |
4038 | |
4039 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
4040 | if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) |
4041 | this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); |
4042 | #endif |
4043 | shost = cmd->device->host; |
4044 | |
4045 | lpfc_cmd->status = bf_get(lpfc_wcqe_c_status, wcqe); |
4046 | lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK); |
4047 | |
4048 | lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; |
4049 | if (bf_get(lpfc_wcqe_c_xb, wcqe)) { |
4050 | lpfc_cmd->flags |= LPFC_SBUF_XBUSY; |
4051 | if (phba->cfg_fcp_wait_abts_rsp) |
4052 | wait_xb_clr = 1; |
4053 | } |
4054 | |
4055 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
4056 | if (lpfc_cmd->prot_data_type) { |
4057 | struct scsi_dif_tuple *src = NULL; |
4058 | |
4059 | src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; |
4060 | /* |
4061 | * Used to restore any changes to protection |
4062 | * data for error injection. |
4063 | */ |
4064 | switch (lpfc_cmd->prot_data_type) { |
4065 | case LPFC_INJERR_REFTAG: |
4066 | src->ref_tag = |
4067 | lpfc_cmd->prot_data; |
4068 | break; |
4069 | case LPFC_INJERR_APPTAG: |
4070 | src->app_tag = |
4071 | (uint16_t)lpfc_cmd->prot_data; |
4072 | break; |
4073 | case LPFC_INJERR_GUARD: |
4074 | src->guard_tag = |
4075 | (uint16_t)lpfc_cmd->prot_data; |
4076 | break; |
4077 | default: |
4078 | break; |
4079 | } |
4080 | |
4081 | lpfc_cmd->prot_data = 0; |
4082 | lpfc_cmd->prot_data_type = 0; |
4083 | lpfc_cmd->prot_data_segment = NULL; |
4084 | } |
4085 | #endif |
4086 | if (unlikely(lpfc_cmd->status)) { |
4087 | if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && |
4088 | !lpfc_cmd->fcp_rsp->rspStatus3 && |
4089 | (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && |
4090 | !(vport->cfg_log_verbose & LOG_FCP_UNDER)) |
4091 | logit = 0; |
4092 | else |
4093 | logit = LOG_FCP | LOG_FCP_UNDER; |
4094 | lpfc_printf_vlog(vport, KERN_WARNING, logit, |
4095 | "9034 FCP cmd x%x failed <%d/%lld> " |
4096 | "status: x%x result: x%x " |
4097 | "sid: x%x did: x%x oxid: x%x " |
4098 | "Data: x%x x%x x%x\n" , |
4099 | cmd->cmnd[0], |
4100 | cmd->device ? cmd->device->id : 0xffff, |
4101 | cmd->device ? cmd->device->lun : 0xffff, |
4102 | lpfc_cmd->status, lpfc_cmd->result, |
4103 | vport->fc_myDID, |
4104 | (ndlp) ? ndlp->nlp_DID : 0, |
4105 | lpfc_cmd->cur_iocbq.sli4_xritag, |
4106 | wcqe->parameter, wcqe->total_data_placed, |
4107 | lpfc_cmd->cur_iocbq.iotag); |
4108 | } |
4109 | |
4110 | switch (lpfc_cmd->status) { |
4111 | case CQE_STATUS_SUCCESS: |
4112 | cmd->result = DID_OK << 16; |
4113 | break; |
4114 | case CQE_STATUS_FCP_RSP_FAILURE: |
4115 | lpfc_handle_fcp_err(vport, lpfc_cmd, |
4116 | fcpi_parm: pwqeIn->wqe.fcp_iread.total_xfer_len - |
4117 | wcqe->total_data_placed); |
4118 | break; |
4119 | case CQE_STATUS_NPORT_BSY: |
4120 | case CQE_STATUS_FABRIC_BSY: |
4121 | cmd->result = DID_TRANSPORT_DISRUPTED << 16; |
4122 | fast_path_evt = lpfc_alloc_fast_evt(phba); |
4123 | if (!fast_path_evt) |
4124 | break; |
4125 | fast_path_evt->un.fabric_evt.event_type = |
4126 | FC_REG_FABRIC_EVENT; |
4127 | fast_path_evt->un.fabric_evt.subcategory = |
4128 | (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? |
4129 | LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; |
4130 | if (ndlp) { |
4131 | memcpy(&fast_path_evt->un.fabric_evt.wwpn, |
4132 | &ndlp->nlp_portname, |
4133 | sizeof(struct lpfc_name)); |
4134 | memcpy(&fast_path_evt->un.fabric_evt.wwnn, |
4135 | &ndlp->nlp_nodename, |
4136 | sizeof(struct lpfc_name)); |
4137 | } |
4138 | fast_path_evt->vport = vport; |
4139 | fast_path_evt->work_evt.evt = |
4140 | LPFC_EVT_FASTPATH_MGMT_EVT; |
4141 | spin_lock_irqsave(&phba->hbalock, flags); |
4142 | list_add_tail(new: &fast_path_evt->work_evt.evt_listp, |
4143 | head: &phba->work_list); |
4144 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
4145 | lpfc_worker_wake_up(phba); |
4146 | lpfc_printf_vlog(vport, KERN_WARNING, logit, |
4147 | "9035 Fabric/Node busy FCP cmd x%x failed" |
4148 | " <%d/%lld> " |
4149 | "status: x%x result: x%x " |
4150 | "sid: x%x did: x%x oxid: x%x " |
4151 | "Data: x%x x%x x%x\n" , |
4152 | cmd->cmnd[0], |
4153 | cmd->device ? cmd->device->id : 0xffff, |
4154 | cmd->device ? cmd->device->lun : 0xffff, |
4155 | lpfc_cmd->status, lpfc_cmd->result, |
4156 | vport->fc_myDID, |
4157 | (ndlp) ? ndlp->nlp_DID : 0, |
4158 | lpfc_cmd->cur_iocbq.sli4_xritag, |
4159 | wcqe->parameter, |
4160 | wcqe->total_data_placed, |
4161 | lpfc_cmd->cur_iocbq.iocb.ulpIoTag); |
4162 | break; |
4163 | case CQE_STATUS_DI_ERROR: |
4164 | if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) |
4165 | lpfc_cmd->result = IOERR_RX_DMA_FAILED; |
4166 | else |
4167 | lpfc_cmd->result = IOERR_TX_DMA_FAILED; |
4168 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_BG, |
4169 | "9048 DI Error xri x%x status x%x DI ext " |
4170 | "status x%x data placed x%x\n" , |
4171 | lpfc_cmd->cur_iocbq.sli4_xritag, |
4172 | lpfc_cmd->status, wcqe->parameter, |
4173 | wcqe->total_data_placed); |
4174 | if (scsi_get_prot_op(scmd: cmd) != SCSI_PROT_NORMAL) { |
4175 | /* BG enabled cmd. Parse BG error */ |
4176 | lpfc_parse_bg_err(phba, lpfc_cmd, pIocbOut: pwqeOut); |
4177 | break; |
4178 | } |
4179 | cmd->result = DID_ERROR << 16; |
4180 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, |
4181 | "9040 DI Error on unprotected cmd\n" ); |
4182 | break; |
4183 | case CQE_STATUS_REMOTE_STOP: |
4184 | if (ndlp) { |
4185 | /* This I/O was aborted by the target, we don't |
4186 | * know the rxid and because we did not send the |
4187 | * ABTS we cannot generate and RRQ. |
4188 | */ |
4189 | lpfc_set_rrq_active(phba, ndlp, |
4190 | lpfc_cmd->cur_iocbq.sli4_lxritag, |
4191 | 0, 0); |
4192 | } |
4193 | fallthrough; |
4194 | case CQE_STATUS_LOCAL_REJECT: |
4195 | if (lpfc_cmd->result & IOERR_DRVR_MASK) |
4196 | lpfc_cmd->status = IOSTAT_DRIVER_REJECT; |
4197 | if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || |
4198 | lpfc_cmd->result == |
4199 | IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || |
4200 | lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || |
4201 | lpfc_cmd->result == |
4202 | IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { |
4203 | cmd->result = DID_NO_CONNECT << 16; |
4204 | break; |
4205 | } |
4206 | if (lpfc_cmd->result == IOERR_INVALID_RPI || |
4207 | lpfc_cmd->result == IOERR_LINK_DOWN || |
4208 | lpfc_cmd->result == IOERR_NO_RESOURCES || |
4209 | lpfc_cmd->result == IOERR_ABORT_REQUESTED || |
4210 | lpfc_cmd->result == IOERR_RPI_SUSPENDED || |
4211 | lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { |
4212 | cmd->result = DID_TRANSPORT_DISRUPTED << 16; |
4213 | break; |
4214 | } |
4215 | lpfc_printf_vlog(vport, KERN_WARNING, logit, |
4216 | "9036 Local Reject FCP cmd x%x failed" |
4217 | " <%d/%lld> " |
4218 | "status: x%x result: x%x " |
4219 | "sid: x%x did: x%x oxid: x%x " |
4220 | "Data: x%x x%x x%x\n" , |
4221 | cmd->cmnd[0], |
4222 | cmd->device ? cmd->device->id : 0xffff, |
4223 | cmd->device ? cmd->device->lun : 0xffff, |
4224 | lpfc_cmd->status, lpfc_cmd->result, |
4225 | vport->fc_myDID, |
4226 | (ndlp) ? ndlp->nlp_DID : 0, |
4227 | lpfc_cmd->cur_iocbq.sli4_xritag, |
4228 | wcqe->parameter, |
4229 | wcqe->total_data_placed, |
4230 | lpfc_cmd->cur_iocbq.iocb.ulpIoTag); |
4231 | fallthrough; |
4232 | default: |
4233 | cmd->result = DID_ERROR << 16; |
4234 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
4235 | "9037 FCP Completion Error: xri %x " |
4236 | "status x%x result x%x [x%x] " |
4237 | "placed x%x\n" , |
4238 | lpfc_cmd->cur_iocbq.sli4_xritag, |
4239 | lpfc_cmd->status, lpfc_cmd->result, |
4240 | wcqe->parameter, |
4241 | wcqe->total_data_placed); |
4242 | } |
4243 | if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { |
4244 | u32 *lp = (u32 *)cmd->sense_buffer; |
4245 | |
4246 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
4247 | "9039 Iodone <%d/%llu> cmd x%px, error " |
4248 | "x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n" , |
4249 | cmd->device->id, cmd->device->lun, cmd, |
4250 | cmd->result, *lp, *(lp + 3), |
4251 | (cmd->device->sector_size) ? |
4252 | (u64)scsi_get_lba(cmd) : 0, |
4253 | cmd->retries, scsi_get_resid(cmd)); |
4254 | } |
4255 | |
4256 | if (vport->cfg_max_scsicmpl_time && |
4257 | time_after(jiffies, lpfc_cmd->start_time + |
4258 | msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { |
4259 | spin_lock_irqsave(shost->host_lock, flags); |
4260 | if (ndlp) { |
4261 | if (ndlp->cmd_qdepth > |
4262 | atomic_read(v: &ndlp->cmd_pending) && |
4263 | (atomic_read(v: &ndlp->cmd_pending) > |
4264 | LPFC_MIN_TGT_QDEPTH) && |
4265 | (cmd->cmnd[0] == READ_10 || |
4266 | cmd->cmnd[0] == WRITE_10)) |
4267 | ndlp->cmd_qdepth = |
4268 | atomic_read(v: &ndlp->cmd_pending); |
4269 | |
4270 | ndlp->last_change_time = jiffies; |
4271 | } |
4272 | spin_unlock_irqrestore(lock: shost->host_lock, flags); |
4273 | } |
4274 | lpfc_scsi_unprep_dma_buf(phba, psb: lpfc_cmd); |
4275 | |
4276 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
4277 | if (lpfc_cmd->ts_cmd_start) { |
4278 | lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp; |
4279 | lpfc_cmd->ts_data_io = ktime_get_ns(); |
4280 | phba->ktime_last_cmd = lpfc_cmd->ts_data_io; |
4281 | lpfc_io_ktime(phba, ncmd: lpfc_cmd); |
4282 | } |
4283 | #endif |
4284 | if (likely(!wait_xb_clr)) |
4285 | lpfc_cmd->pCmd = NULL; |
4286 | spin_unlock(lock: &lpfc_cmd->buf_lock); |
4287 | |
4288 | /* Check if IO qualified for CMF */ |
4289 | if (phba->cmf_active_mode != LPFC_CFG_OFF && |
4290 | cmd->sc_data_direction == DMA_FROM_DEVICE && |
4291 | (scsi_sg_count(cmd))) { |
4292 | /* Used when calculating average latency */ |
4293 | lat = ktime_get_ns() - lpfc_cmd->rx_cmd_start; |
4294 | lpfc_update_cmf_cmpl(phba, time: lat, size: scsi_bufflen(cmd), shost); |
4295 | } |
4296 | |
4297 | if (wait_xb_clr) |
4298 | goto out; |
4299 | |
4300 | /* The sdev is not guaranteed to be valid post scsi_done upcall. */ |
4301 | scsi_done(cmd); |
4302 | |
4303 | /* |
4304 | * If there is an abort thread waiting for command completion |
4305 | * wake up the thread. |
4306 | */ |
4307 | spin_lock(lock: &lpfc_cmd->buf_lock); |
4308 | lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED; |
4309 | if (lpfc_cmd->waitq) |
4310 | wake_up(lpfc_cmd->waitq); |
4311 | spin_unlock(lock: &lpfc_cmd->buf_lock); |
4312 | out: |
4313 | lpfc_release_scsi_buf(phba, psb: lpfc_cmd); |
4314 | } |
4315 | |
4316 | /** |
4317 | * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine |
4318 | * @phba: The Hba for which this call is being executed. |
4319 | * @pIocbIn: The command IOCBQ for the scsi cmnd. |
4320 | * @pIocbOut: The response IOCBQ for the scsi cmnd. |
4321 | * |
4322 | * This routine assigns scsi command result by looking into response IOCB |
4323 | * status field appropriately. This routine handles QUEUE FULL condition as |
4324 | * well by ramping down device queue depth. |
4325 | **/ |
4326 | static void |
4327 | lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, |
4328 | struct lpfc_iocbq *pIocbOut) |
4329 | { |
4330 | struct lpfc_io_buf *lpfc_cmd = |
4331 | (struct lpfc_io_buf *) pIocbIn->io_buf; |
4332 | struct lpfc_vport *vport = pIocbIn->vport; |
4333 | struct lpfc_rport_data *rdata = lpfc_cmd->rdata; |
4334 | struct lpfc_nodelist *pnode = rdata->pnode; |
4335 | struct scsi_cmnd *cmd; |
4336 | unsigned long flags; |
4337 | struct lpfc_fast_path_event *fast_path_evt; |
4338 | struct Scsi_Host *shost; |
4339 | int idx; |
4340 | uint32_t logit = LOG_FCP; |
4341 | |
4342 | /* Guard against abort handler being called at same time */ |
4343 | spin_lock(lock: &lpfc_cmd->buf_lock); |
4344 | |
4345 | /* Sanity check on return of outstanding command */ |
4346 | cmd = lpfc_cmd->pCmd; |
4347 | if (!cmd || !phba) { |
4348 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
4349 | "2621 IO completion: Not an active IO\n" ); |
4350 | spin_unlock(lock: &lpfc_cmd->buf_lock); |
4351 | return; |
4352 | } |
4353 | |
4354 | idx = lpfc_cmd->cur_iocbq.hba_wqidx; |
4355 | if (phba->sli4_hba.hdwq) |
4356 | phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; |
4357 | |
4358 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
4359 | if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) |
4360 | this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); |
4361 | #endif |
4362 | shost = cmd->device->host; |
4363 | |
4364 | lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK); |
4365 | lpfc_cmd->status = pIocbOut->iocb.ulpStatus; |
4366 | /* pick up SLI4 exchange busy status from HBA */ |
4367 | lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; |
4368 | if (pIocbOut->cmd_flag & LPFC_EXCHANGE_BUSY) |
4369 | lpfc_cmd->flags |= LPFC_SBUF_XBUSY; |
4370 | |
4371 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
4372 | if (lpfc_cmd->prot_data_type) { |
4373 | struct scsi_dif_tuple *src = NULL; |
4374 | |
4375 | src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; |
4376 | /* |
4377 | * Used to restore any changes to protection |
4378 | * data for error injection. |
4379 | */ |
4380 | switch (lpfc_cmd->prot_data_type) { |
4381 | case LPFC_INJERR_REFTAG: |
4382 | src->ref_tag = |
4383 | lpfc_cmd->prot_data; |
4384 | break; |
4385 | case LPFC_INJERR_APPTAG: |
4386 | src->app_tag = |
4387 | (uint16_t)lpfc_cmd->prot_data; |
4388 | break; |
4389 | case LPFC_INJERR_GUARD: |
4390 | src->guard_tag = |
4391 | (uint16_t)lpfc_cmd->prot_data; |
4392 | break; |
4393 | default: |
4394 | break; |
4395 | } |
4396 | |
4397 | lpfc_cmd->prot_data = 0; |
4398 | lpfc_cmd->prot_data_type = 0; |
4399 | lpfc_cmd->prot_data_segment = NULL; |
4400 | } |
4401 | #endif |
4402 | |
4403 | if (unlikely(lpfc_cmd->status)) { |
4404 | if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && |
4405 | (lpfc_cmd->result & IOERR_DRVR_MASK)) |
4406 | lpfc_cmd->status = IOSTAT_DRIVER_REJECT; |
4407 | else if (lpfc_cmd->status >= IOSTAT_CNT) |
4408 | lpfc_cmd->status = IOSTAT_DEFAULT; |
4409 | if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && |
4410 | !lpfc_cmd->fcp_rsp->rspStatus3 && |
4411 | (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && |
4412 | !(vport->cfg_log_verbose & LOG_FCP_UNDER)) |
4413 | logit = 0; |
4414 | else |
4415 | logit = LOG_FCP | LOG_FCP_UNDER; |
4416 | lpfc_printf_vlog(vport, KERN_WARNING, logit, |
4417 | "9030 FCP cmd x%x failed <%d/%lld> " |
4418 | "status: x%x result: x%x " |
4419 | "sid: x%x did: x%x oxid: x%x " |
4420 | "Data: x%x x%x\n" , |
4421 | cmd->cmnd[0], |
4422 | cmd->device ? cmd->device->id : 0xffff, |
4423 | cmd->device ? cmd->device->lun : 0xffff, |
4424 | lpfc_cmd->status, lpfc_cmd->result, |
4425 | vport->fc_myDID, |
4426 | (pnode) ? pnode->nlp_DID : 0, |
4427 | phba->sli_rev == LPFC_SLI_REV4 ? |
4428 | lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, |
4429 | pIocbOut->iocb.ulpContext, |
4430 | lpfc_cmd->cur_iocbq.iocb.ulpIoTag); |
4431 | |
4432 | switch (lpfc_cmd->status) { |
4433 | case IOSTAT_FCP_RSP_ERROR: |
4434 | /* Call FCP RSP handler to determine result */ |
4435 | lpfc_handle_fcp_err(vport, lpfc_cmd, |
4436 | fcpi_parm: pIocbOut->iocb.un.fcpi.fcpi_parm); |
4437 | break; |
4438 | case IOSTAT_NPORT_BSY: |
4439 | case IOSTAT_FABRIC_BSY: |
4440 | cmd->result = DID_TRANSPORT_DISRUPTED << 16; |
4441 | fast_path_evt = lpfc_alloc_fast_evt(phba); |
4442 | if (!fast_path_evt) |
4443 | break; |
4444 | fast_path_evt->un.fabric_evt.event_type = |
4445 | FC_REG_FABRIC_EVENT; |
4446 | fast_path_evt->un.fabric_evt.subcategory = |
4447 | (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? |
4448 | LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; |
4449 | if (pnode) { |
4450 | memcpy(&fast_path_evt->un.fabric_evt.wwpn, |
4451 | &pnode->nlp_portname, |
4452 | sizeof(struct lpfc_name)); |
4453 | memcpy(&fast_path_evt->un.fabric_evt.wwnn, |
4454 | &pnode->nlp_nodename, |
4455 | sizeof(struct lpfc_name)); |
4456 | } |
4457 | fast_path_evt->vport = vport; |
4458 | fast_path_evt->work_evt.evt = |
4459 | LPFC_EVT_FASTPATH_MGMT_EVT; |
4460 | spin_lock_irqsave(&phba->hbalock, flags); |
4461 | list_add_tail(new: &fast_path_evt->work_evt.evt_listp, |
4462 | head: &phba->work_list); |
4463 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
4464 | lpfc_worker_wake_up(phba); |
4465 | break; |
4466 | case IOSTAT_LOCAL_REJECT: |
4467 | case IOSTAT_REMOTE_STOP: |
4468 | if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || |
4469 | lpfc_cmd->result == |
4470 | IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || |
4471 | lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || |
4472 | lpfc_cmd->result == |
4473 | IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { |
4474 | cmd->result = DID_NO_CONNECT << 16; |
4475 | break; |
4476 | } |
4477 | if (lpfc_cmd->result == IOERR_INVALID_RPI || |
4478 | lpfc_cmd->result == IOERR_NO_RESOURCES || |
4479 | lpfc_cmd->result == IOERR_ABORT_REQUESTED || |
4480 | lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { |
4481 | cmd->result = DID_TRANSPORT_DISRUPTED << 16; |
4482 | break; |
4483 | } |
4484 | if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || |
4485 | lpfc_cmd->result == IOERR_TX_DMA_FAILED) && |
4486 | pIocbOut->iocb.unsli3.sli3_bg.bgstat) { |
4487 | if (scsi_get_prot_op(scmd: cmd) != SCSI_PROT_NORMAL) { |
4488 | /* |
4489 | * This is a response for a BG enabled |
4490 | * cmd. Parse BG error |
4491 | */ |
4492 | lpfc_parse_bg_err(phba, lpfc_cmd, |
4493 | pIocbOut); |
4494 | break; |
4495 | } else { |
4496 | lpfc_printf_vlog(vport, KERN_WARNING, |
4497 | LOG_BG, |
4498 | "9031 non-zero BGSTAT " |
4499 | "on unprotected cmd\n" ); |
4500 | } |
4501 | } |
4502 | if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP) |
4503 | && (phba->sli_rev == LPFC_SLI_REV4) |
4504 | && pnode) { |
4505 | /* This IO was aborted by the target, we don't |
4506 | * know the rxid and because we did not send the |
4507 | * ABTS we cannot generate and RRQ. |
4508 | */ |
4509 | lpfc_set_rrq_active(phba, pnode, |
4510 | lpfc_cmd->cur_iocbq.sli4_lxritag, |
4511 | 0, 0); |
4512 | } |
4513 | fallthrough; |
4514 | default: |
4515 | cmd->result = DID_ERROR << 16; |
4516 | break; |
4517 | } |
4518 | |
4519 | if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) |
4520 | cmd->result = DID_TRANSPORT_DISRUPTED << 16 | |
4521 | SAM_STAT_BUSY; |
4522 | } else |
4523 | cmd->result = DID_OK << 16; |
4524 | |
4525 | if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { |
4526 | uint32_t *lp = (uint32_t *)cmd->sense_buffer; |
4527 | |
4528 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
4529 | "0710 Iodone <%d/%llu> cmd x%px, error " |
4530 | "x%x SNS x%x x%x Data: x%x x%x\n" , |
4531 | cmd->device->id, cmd->device->lun, cmd, |
4532 | cmd->result, *lp, *(lp + 3), cmd->retries, |
4533 | scsi_get_resid(cmd)); |
4534 | } |
4535 | |
4536 | if (vport->cfg_max_scsicmpl_time && |
4537 | time_after(jiffies, lpfc_cmd->start_time + |
4538 | msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { |
4539 | spin_lock_irqsave(shost->host_lock, flags); |
4540 | if (pnode) { |
4541 | if (pnode->cmd_qdepth > |
4542 | atomic_read(v: &pnode->cmd_pending) && |
4543 | (atomic_read(v: &pnode->cmd_pending) > |
4544 | LPFC_MIN_TGT_QDEPTH) && |
4545 | ((cmd->cmnd[0] == READ_10) || |
4546 | (cmd->cmnd[0] == WRITE_10))) |
4547 | pnode->cmd_qdepth = |
4548 | atomic_read(v: &pnode->cmd_pending); |
4549 | |
4550 | pnode->last_change_time = jiffies; |
4551 | } |
4552 | spin_unlock_irqrestore(lock: shost->host_lock, flags); |
4553 | } |
4554 | lpfc_scsi_unprep_dma_buf(phba, psb: lpfc_cmd); |
4555 | |
4556 | lpfc_cmd->pCmd = NULL; |
4557 | spin_unlock(lock: &lpfc_cmd->buf_lock); |
4558 | |
4559 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
4560 | if (lpfc_cmd->ts_cmd_start) { |
4561 | lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp; |
4562 | lpfc_cmd->ts_data_io = ktime_get_ns(); |
4563 | phba->ktime_last_cmd = lpfc_cmd->ts_data_io; |
4564 | lpfc_io_ktime(phba, ncmd: lpfc_cmd); |
4565 | } |
4566 | #endif |
4567 | |
4568 | /* The sdev is not guaranteed to be valid post scsi_done upcall. */ |
4569 | scsi_done(cmd); |
4570 | |
4571 | /* |
4572 | * If there is an abort thread waiting for command completion |
4573 | * wake up the thread. |
4574 | */ |
4575 | spin_lock(lock: &lpfc_cmd->buf_lock); |
4576 | lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED; |
4577 | if (lpfc_cmd->waitq) |
4578 | wake_up(lpfc_cmd->waitq); |
4579 | spin_unlock(lock: &lpfc_cmd->buf_lock); |
4580 | |
4581 | lpfc_release_scsi_buf(phba, psb: lpfc_cmd); |
4582 | } |
4583 | |
4584 | /** |
4585 | * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO |
4586 | * @vport: Pointer to vport object. |
4587 | * @lpfc_cmd: The scsi buffer which is going to be prep'ed. |
4588 | * @tmo: timeout value for the IO |
4589 | * |
4590 | * Based on the data-direction of the command, initialize IOCB |
4591 | * in the I/O buffer. Fill in the IOCB fields which are independent |
4592 | * of the scsi buffer |
4593 | * |
4594 | * RETURNS 0 - SUCCESS, |
4595 | **/ |
4596 | static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport, |
4597 | struct lpfc_io_buf *lpfc_cmd, |
4598 | uint8_t tmo) |
4599 | { |
4600 | IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; |
4601 | struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq; |
4602 | struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; |
4603 | struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; |
4604 | struct lpfc_nodelist *pnode = lpfc_cmd->ndlp; |
4605 | int datadir = scsi_cmnd->sc_data_direction; |
4606 | u32 fcpdl; |
4607 | |
4608 | piocbq->iocb.un.fcpi.fcpi_XRdy = 0; |
4609 | |
4610 | /* |
4611 | * There are three possibilities here - use scatter-gather segment, use |
4612 | * the single mapping, or neither. Start the lpfc command prep by |
4613 | * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first |
4614 | * data bde entry. |
4615 | */ |
4616 | if (scsi_sg_count(cmd: scsi_cmnd)) { |
4617 | if (datadir == DMA_TO_DEVICE) { |
4618 | iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; |
4619 | iocb_cmd->ulpPU = PARM_READ_CHECK; |
4620 | if (vport->cfg_first_burst_size && |
4621 | (pnode->nlp_flag & NLP_FIRSTBURST)) { |
4622 | u32 xrdy_len; |
4623 | |
4624 | fcpdl = scsi_bufflen(cmd: scsi_cmnd); |
4625 | xrdy_len = min(fcpdl, |
4626 | vport->cfg_first_burst_size); |
4627 | piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len; |
4628 | } |
4629 | fcp_cmnd->fcpCntl3 = WRITE_DATA; |
4630 | } else { |
4631 | iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; |
4632 | iocb_cmd->ulpPU = PARM_READ_CHECK; |
4633 | fcp_cmnd->fcpCntl3 = READ_DATA; |
4634 | } |
4635 | } else { |
4636 | iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; |
4637 | iocb_cmd->un.fcpi.fcpi_parm = 0; |
4638 | iocb_cmd->ulpPU = 0; |
4639 | fcp_cmnd->fcpCntl3 = 0; |
4640 | } |
4641 | |
4642 | /* |
4643 | * Finish initializing those IOCB fields that are independent |
4644 | * of the scsi_cmnd request_buffer |
4645 | */ |
4646 | piocbq->iocb.ulpContext = pnode->nlp_rpi; |
4647 | if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) |
4648 | piocbq->iocb.ulpFCP2Rcvy = 1; |
4649 | else |
4650 | piocbq->iocb.ulpFCP2Rcvy = 0; |
4651 | |
4652 | piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); |
4653 | piocbq->io_buf = lpfc_cmd; |
4654 | if (!piocbq->cmd_cmpl) |
4655 | piocbq->cmd_cmpl = lpfc_scsi_cmd_iocb_cmpl; |
4656 | piocbq->iocb.ulpTimeout = tmo; |
4657 | piocbq->vport = vport; |
4658 | return 0; |
4659 | } |
4660 | |
4661 | /** |
4662 | * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO |
4663 | * @vport: Pointer to vport object. |
4664 | * @lpfc_cmd: The scsi buffer which is going to be prep'ed. |
4665 | * @tmo: timeout value for the IO |
4666 | * |
4667 | * Based on the data-direction of the command copy WQE template |
4668 | * to I/O buffer WQE. Fill in the WQE fields which are independent |
4669 | * of the scsi buffer |
4670 | * |
4671 | * RETURNS 0 - SUCCESS, |
4672 | **/ |
4673 | static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport, |
4674 | struct lpfc_io_buf *lpfc_cmd, |
4675 | uint8_t tmo) |
4676 | { |
4677 | struct lpfc_hba *phba = vport->phba; |
4678 | struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; |
4679 | struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; |
4680 | struct lpfc_sli4_hdw_queue *hdwq = NULL; |
4681 | struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; |
4682 | struct lpfc_nodelist *pnode = lpfc_cmd->ndlp; |
4683 | union lpfc_wqe128 *wqe = &pwqeq->wqe; |
4684 | u16 idx = lpfc_cmd->hdwq_no; |
4685 | int datadir = scsi_cmnd->sc_data_direction; |
4686 | |
4687 | hdwq = &phba->sli4_hba.hdwq[idx]; |
4688 | |
4689 | /* Initialize 64 bytes only */ |
4690 | memset(wqe, 0, sizeof(union lpfc_wqe128)); |
4691 | |
4692 | /* |
4693 | * There are three possibilities here - use scatter-gather segment, use |
4694 | * the single mapping, or neither. |
4695 | */ |
4696 | if (scsi_sg_count(cmd: scsi_cmnd)) { |
4697 | if (datadir == DMA_TO_DEVICE) { |
4698 | /* From the iwrite template, initialize words 7 - 11 */ |
4699 | memcpy(&wqe->words[7], |
4700 | &lpfc_iwrite_cmd_template.words[7], |
4701 | sizeof(uint32_t) * 5); |
4702 | |
4703 | fcp_cmnd->fcpCntl3 = WRITE_DATA; |
4704 | if (hdwq) |
4705 | hdwq->scsi_cstat.output_requests++; |
4706 | } else { |
4707 | /* From the iread template, initialize words 7 - 11 */ |
4708 | memcpy(&wqe->words[7], |
4709 | &lpfc_iread_cmd_template.words[7], |
4710 | sizeof(uint32_t) * 5); |
4711 | |
4712 | /* Word 7 */ |
4713 | bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo); |
4714 | |
4715 | fcp_cmnd->fcpCntl3 = READ_DATA; |
4716 | if (hdwq) |
4717 | hdwq->scsi_cstat.input_requests++; |
4718 | |
4719 | /* For a CMF Managed port, iod must be zero'ed */ |
4720 | if (phba->cmf_active_mode == LPFC_CFG_MANAGED) |
4721 | bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, |
4722 | LPFC_WQE_IOD_NONE); |
4723 | } |
4724 | } else { |
4725 | /* From the icmnd template, initialize words 4 - 11 */ |
4726 | memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], |
4727 | sizeof(uint32_t) * 8); |
4728 | |
4729 | /* Word 7 */ |
4730 | bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo); |
4731 | |
4732 | fcp_cmnd->fcpCntl3 = 0; |
4733 | if (hdwq) |
4734 | hdwq->scsi_cstat.control_requests++; |
4735 | } |
4736 | |
4737 | /* |
4738 | * Finish initializing those WQE fields that are independent |
4739 | * of the request_buffer |
4740 | */ |
4741 | |
4742 | /* Word 3 */ |
4743 | bf_set(payload_offset_len, &wqe->fcp_icmd, |
4744 | sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); |
4745 | |
4746 | /* Word 6 */ |
4747 | bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, |
4748 | phba->sli4_hba.rpi_ids[pnode->nlp_rpi]); |
4749 | bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); |
4750 | |
4751 | /* Word 7*/ |
4752 | if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) |
4753 | bf_set(wqe_erp, &wqe->generic.wqe_com, 1); |
4754 | |
4755 | bf_set(wqe_class, &wqe->generic.wqe_com, |
4756 | (pnode->nlp_fcp_info & 0x0f)); |
4757 | |
4758 | /* Word 8 */ |
4759 | wqe->generic.wqe_com.abort_tag = pwqeq->iotag; |
4760 | |
4761 | /* Word 9 */ |
4762 | bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); |
4763 | |
4764 | pwqeq->vport = vport; |
4765 | pwqeq->io_buf = lpfc_cmd; |
4766 | pwqeq->hba_wqidx = lpfc_cmd->hdwq_no; |
4767 | pwqeq->cmd_cmpl = lpfc_fcp_io_cmd_wqe_cmpl; |
4768 | |
4769 | return 0; |
4770 | } |
4771 | |
4772 | /** |
4773 | * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit |
4774 | * @vport: The virtual port for which this call is being executed. |
4775 | * @lpfc_cmd: The scsi command which needs to send. |
4776 | * @pnode: Pointer to lpfc_nodelist. |
4777 | * |
4778 | * This routine initializes fcp_cmnd and iocb data structure from scsi command |
4779 | * to transfer for device with SLI3 interface spec. |
4780 | **/ |
4781 | static int |
4782 | lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, |
4783 | struct lpfc_nodelist *pnode) |
4784 | { |
4785 | struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; |
4786 | struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; |
4787 | u8 *ptr; |
4788 | |
4789 | if (!pnode) |
4790 | return 0; |
4791 | |
4792 | lpfc_cmd->fcp_rsp->rspSnsLen = 0; |
4793 | /* clear task management bits */ |
4794 | lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; |
4795 | |
4796 | int_to_scsilun(lpfc_cmd->pCmd->device->lun, |
4797 | &lpfc_cmd->fcp_cmnd->fcp_lun); |
4798 | |
4799 | ptr = &fcp_cmnd->fcpCdb[0]; |
4800 | memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); |
4801 | if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) { |
4802 | ptr += scsi_cmnd->cmd_len; |
4803 | memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len)); |
4804 | } |
4805 | |
4806 | fcp_cmnd->fcpCntl1 = SIMPLE_Q; |
4807 | |
4808 | lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo: lpfc_cmd->timeout); |
4809 | |
4810 | return 0; |
4811 | } |
4812 | |
4813 | /** |
4814 | * lpfc_scsi_prep_task_mgmt_cmd_s3 - Convert SLI3 scsi TM cmd to FCP info unit |
4815 | * @vport: The virtual port for which this call is being executed. |
4816 | * @lpfc_cmd: Pointer to lpfc_io_buf data structure. |
4817 | * @lun: Logical unit number. |
4818 | * @task_mgmt_cmd: SCSI task management command. |
4819 | * |
4820 | * This routine creates FCP information unit corresponding to @task_mgmt_cmd |
4821 | * for device with SLI-3 interface spec. |
4822 | * |
4823 | * Return codes: |
4824 | * 0 - Error |
4825 | * 1 - Success |
4826 | **/ |
4827 | static int |
4828 | lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport, |
4829 | struct lpfc_io_buf *lpfc_cmd, |
4830 | u64 lun, u8 task_mgmt_cmd) |
4831 | { |
4832 | struct lpfc_iocbq *piocbq; |
4833 | IOCB_t *piocb; |
4834 | struct fcp_cmnd *fcp_cmnd; |
4835 | struct lpfc_rport_data *rdata = lpfc_cmd->rdata; |
4836 | struct lpfc_nodelist *ndlp = rdata->pnode; |
4837 | |
4838 | if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE) |
4839 | return 0; |
4840 | |
4841 | piocbq = &(lpfc_cmd->cur_iocbq); |
4842 | piocbq->vport = vport; |
4843 | |
4844 | piocb = &piocbq->iocb; |
4845 | |
4846 | fcp_cmnd = lpfc_cmd->fcp_cmnd; |
4847 | /* Clear out any old data in the FCP command area */ |
4848 | memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); |
4849 | int_to_scsilun(lun, &fcp_cmnd->fcp_lun); |
4850 | fcp_cmnd->fcpCntl2 = task_mgmt_cmd; |
4851 | if (!(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED)) |
4852 | lpfc_fcpcmd_to_iocb(data: piocb->unsli3.fcp_ext.icd, fcp_cmnd); |
4853 | piocb->ulpCommand = CMD_FCP_ICMND64_CR; |
4854 | piocb->ulpContext = ndlp->nlp_rpi; |
4855 | piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0; |
4856 | piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); |
4857 | piocb->ulpPU = 0; |
4858 | piocb->un.fcpi.fcpi_parm = 0; |
4859 | |
4860 | /* ulpTimeout is only one byte */ |
4861 | if (lpfc_cmd->timeout > 0xff) { |
4862 | /* |
4863 | * Do not timeout the command at the firmware level. |
4864 | * The driver will provide the timeout mechanism. |
4865 | */ |
4866 | piocb->ulpTimeout = 0; |
4867 | } else |
4868 | piocb->ulpTimeout = lpfc_cmd->timeout; |
4869 | |
4870 | return 1; |
4871 | } |
4872 | |
4873 | /** |
4874 | * lpfc_scsi_prep_task_mgmt_cmd_s4 - Convert SLI4 scsi TM cmd to FCP info unit |
4875 | * @vport: The virtual port for which this call is being executed. |
4876 | * @lpfc_cmd: Pointer to lpfc_io_buf data structure. |
4877 | * @lun: Logical unit number. |
4878 | * @task_mgmt_cmd: SCSI task management command. |
4879 | * |
4880 | * This routine creates FCP information unit corresponding to @task_mgmt_cmd |
4881 | * for device with SLI-4 interface spec. |
4882 | * |
4883 | * Return codes: |
4884 | * 0 - Error |
4885 | * 1 - Success |
4886 | **/ |
4887 | static int |
4888 | lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport, |
4889 | struct lpfc_io_buf *lpfc_cmd, |
4890 | u64 lun, u8 task_mgmt_cmd) |
4891 | { |
4892 | struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; |
4893 | union lpfc_wqe128 *wqe = &pwqeq->wqe; |
4894 | struct fcp_cmnd *fcp_cmnd; |
4895 | struct lpfc_rport_data *rdata = lpfc_cmd->rdata; |
4896 | struct lpfc_nodelist *ndlp = rdata->pnode; |
4897 | |
4898 | if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE) |
4899 | return 0; |
4900 | |
4901 | pwqeq->vport = vport; |
4902 | /* Initialize 64 bytes only */ |
4903 | memset(wqe, 0, sizeof(union lpfc_wqe128)); |
4904 | |
4905 | /* From the icmnd template, initialize words 4 - 11 */ |
4906 | memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], |
4907 | sizeof(uint32_t) * 8); |
4908 | |
4909 | fcp_cmnd = lpfc_cmd->fcp_cmnd; |
4910 | /* Clear out any old data in the FCP command area */ |
4911 | memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); |
4912 | int_to_scsilun(lun, &fcp_cmnd->fcp_lun); |
4913 | fcp_cmnd->fcpCntl3 = 0; |
4914 | fcp_cmnd->fcpCntl2 = task_mgmt_cmd; |
4915 | |
4916 | bf_set(payload_offset_len, &wqe->fcp_icmd, |
4917 | sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); |
4918 | bf_set(cmd_buff_len, &wqe->fcp_icmd, 0); |
4919 | bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, /* ulpContext */ |
4920 | vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); |
4921 | bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, |
4922 | ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0)); |
4923 | bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, |
4924 | (ndlp->nlp_fcp_info & 0x0f)); |
4925 | |
4926 | /* ulpTimeout is only one byte */ |
4927 | if (lpfc_cmd->timeout > 0xff) { |
4928 | /* |
4929 | * Do not timeout the command at the firmware level. |
4930 | * The driver will provide the timeout mechanism. |
4931 | */ |
4932 | bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, 0); |
4933 | } else { |
4934 | bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, lpfc_cmd->timeout); |
4935 | } |
4936 | |
4937 | lpfc_prep_embed_io(phba: vport->phba, lpfc_ncmd: lpfc_cmd); |
4938 | bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); |
4939 | wqe->generic.wqe_com.abort_tag = pwqeq->iotag; |
4940 | bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); |
4941 | |
4942 | lpfc_sli4_set_rsp_sgl_last(phba: vport->phba, lpfc_cmd); |
4943 | |
4944 | return 1; |
4945 | } |
4946 | |
4947 | /** |
4948 | * lpfc_scsi_api_table_setup - Set up scsi api function jump table |
4949 | * @phba: The hba struct for which this call is being executed. |
4950 | * @dev_grp: The HBA PCI-Device group number. |
4951 | * |
4952 | * This routine sets up the SCSI interface API function jump table in @phba |
4953 | * struct. |
4954 | * Returns: 0 - success, -ENODEV - failure. |
4955 | **/ |
4956 | int |
4957 | lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) |
4958 | { |
4959 | |
4960 | phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf; |
4961 | |
4962 | switch (dev_grp) { |
4963 | case LPFC_PCI_DEV_LP: |
4964 | phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; |
4965 | phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3; |
4966 | phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; |
4967 | phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3; |
4968 | phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3; |
4969 | phba->lpfc_scsi_prep_task_mgmt_cmd = |
4970 | lpfc_scsi_prep_task_mgmt_cmd_s3; |
4971 | break; |
4972 | case LPFC_PCI_DEV_OC: |
4973 | phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; |
4974 | phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4; |
4975 | phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; |
4976 | phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4; |
4977 | phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4; |
4978 | phba->lpfc_scsi_prep_task_mgmt_cmd = |
4979 | lpfc_scsi_prep_task_mgmt_cmd_s4; |
4980 | break; |
4981 | default: |
4982 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
4983 | "1418 Invalid HBA PCI-device group: 0x%x\n" , |
4984 | dev_grp); |
4985 | return -ENODEV; |
4986 | } |
4987 | phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; |
4988 | return 0; |
4989 | } |
4990 | |
4991 | /** |
4992 | * lpfc_tskmgmt_def_cmpl - IOCB completion routine for task management command |
4993 | * @phba: The Hba for which this call is being executed. |
4994 | * @cmdiocbq: Pointer to lpfc_iocbq data structure. |
4995 | * @rspiocbq: Pointer to lpfc_iocbq data structure. |
4996 | * |
4997 | * This routine is IOCB completion routine for device reset and target reset |
4998 | * routine. This routine release scsi buffer associated with lpfc_cmd. |
4999 | **/ |
5000 | static void |
5001 | lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, |
5002 | struct lpfc_iocbq *cmdiocbq, |
5003 | struct lpfc_iocbq *rspiocbq) |
5004 | { |
5005 | struct lpfc_io_buf *lpfc_cmd = cmdiocbq->io_buf; |
5006 | if (lpfc_cmd) |
5007 | lpfc_release_scsi_buf(phba, psb: lpfc_cmd); |
5008 | return; |
5009 | } |
5010 | |
5011 | /** |
5012 | * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check |
5013 | * if issuing a pci_bus_reset is possibly unsafe |
5014 | * @phba: lpfc_hba pointer. |
5015 | * |
5016 | * Description: |
5017 | * Walks the bus_list to ensure only PCI devices with Emulex |
5018 | * vendor id, device ids that support hot reset, and only one occurrence |
5019 | * of function 0. |
5020 | * |
5021 | * Returns: |
5022 | * -EBADSLT, detected invalid device |
5023 | * 0, successful |
5024 | */ |
5025 | int |
5026 | lpfc_check_pci_resettable(struct lpfc_hba *phba) |
5027 | { |
5028 | const struct pci_dev *pdev = phba->pcidev; |
5029 | struct pci_dev *ptr = NULL; |
5030 | u8 counter = 0; |
5031 | |
5032 | /* Walk the list of devices on the pci_dev's bus */ |
5033 | list_for_each_entry(ptr, &pdev->bus->devices, bus_list) { |
5034 | /* Check for Emulex Vendor ID */ |
5035 | if (ptr->vendor != PCI_VENDOR_ID_EMULEX) { |
5036 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
5037 | "8346 Non-Emulex vendor found: " |
5038 | "0x%04x\n" , ptr->vendor); |
5039 | return -EBADSLT; |
5040 | } |
5041 | |
5042 | /* Check for valid Emulex Device ID */ |
5043 | if (phba->sli_rev != LPFC_SLI_REV4 || |
5044 | phba->hba_flag & HBA_FCOE_MODE) { |
5045 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
5046 | "8347 Incapable PCI reset device: " |
5047 | "0x%04x\n" , ptr->device); |
5048 | return -EBADSLT; |
5049 | } |
5050 | |
5051 | /* Check for only one function 0 ID to ensure only one HBA on |
5052 | * secondary bus |
5053 | */ |
5054 | if (ptr->devfn == 0) { |
5055 | if (++counter > 1) { |
5056 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
5057 | "8348 More than one device on " |
5058 | "secondary bus found\n" ); |
5059 | return -EBADSLT; |
5060 | } |
5061 | } |
5062 | } |
5063 | |
5064 | return 0; |
5065 | } |
5066 | |
5067 | /** |
5068 | * lpfc_info - Info entry point of scsi_host_template data structure |
5069 | * @host: The scsi host for which this call is being executed. |
5070 | * |
5071 | * This routine provides module information about hba. |
5072 | * |
5073 | * Reutrn code: |
5074 | * Pointer to char - Success. |
5075 | **/ |
5076 | const char * |
5077 | lpfc_info(struct Scsi_Host *host) |
5078 | { |
5079 | struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; |
5080 | struct lpfc_hba *phba = vport->phba; |
5081 | int link_speed = 0; |
5082 | static char lpfcinfobuf[384]; |
5083 | char tmp[384] = {0}; |
5084 | |
5085 | memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf)); |
5086 | if (phba && phba->pcidev){ |
5087 | /* Model Description */ |
5088 | scnprintf(buf: tmp, size: sizeof(tmp), fmt: phba->ModelDesc); |
5089 | if (strlcat(p: lpfcinfobuf, q: tmp, avail: sizeof(lpfcinfobuf)) >= |
5090 | sizeof(lpfcinfobuf)) |
5091 | goto buffer_done; |
5092 | |
5093 | /* PCI Info */ |
5094 | scnprintf(buf: tmp, size: sizeof(tmp), |
5095 | fmt: " on PCI bus %02x device %02x irq %d" , |
5096 | phba->pcidev->bus->number, phba->pcidev->devfn, |
5097 | phba->pcidev->irq); |
5098 | if (strlcat(p: lpfcinfobuf, q: tmp, avail: sizeof(lpfcinfobuf)) >= |
5099 | sizeof(lpfcinfobuf)) |
5100 | goto buffer_done; |
5101 | |
5102 | /* Port Number */ |
5103 | if (phba->Port[0]) { |
5104 | scnprintf(buf: tmp, size: sizeof(tmp), fmt: " port %s" , phba->Port); |
5105 | if (strlcat(p: lpfcinfobuf, q: tmp, avail: sizeof(lpfcinfobuf)) >= |
5106 | sizeof(lpfcinfobuf)) |
5107 | goto buffer_done; |
5108 | } |
5109 | |
5110 | /* Link Speed */ |
5111 | link_speed = lpfc_sli_port_speed_get(phba); |
5112 | if (link_speed != 0) { |
5113 | scnprintf(buf: tmp, size: sizeof(tmp), |
5114 | fmt: " Logical Link Speed: %d Mbps" , link_speed); |
5115 | if (strlcat(p: lpfcinfobuf, q: tmp, avail: sizeof(lpfcinfobuf)) >= |
5116 | sizeof(lpfcinfobuf)) |
5117 | goto buffer_done; |
5118 | } |
5119 | |
5120 | /* PCI resettable */ |
5121 | if (!lpfc_check_pci_resettable(phba)) { |
5122 | scnprintf(buf: tmp, size: sizeof(tmp), fmt: " PCI resettable" ); |
5123 | strlcat(p: lpfcinfobuf, q: tmp, avail: sizeof(lpfcinfobuf)); |
5124 | } |
5125 | } |
5126 | |
5127 | buffer_done: |
5128 | return lpfcinfobuf; |
5129 | } |
5130 | |
5131 | /** |
5132 | * lpfc_poll_rearm_timer - Routine to modify fcp_poll timer of hba |
5133 | * @phba: The Hba for which this call is being executed. |
5134 | * |
5135 | * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo. |
5136 | * The default value of cfg_poll_tmo is 10 milliseconds. |
5137 | **/ |
5138 | static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) |
5139 | { |
5140 | unsigned long poll_tmo_expires = |
5141 | (jiffies + msecs_to_jiffies(m: phba->cfg_poll_tmo)); |
5142 | |
5143 | if (!list_empty(head: &phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq)) |
5144 | mod_timer(timer: &phba->fcp_poll_timer, |
5145 | expires: poll_tmo_expires); |
5146 | } |
5147 | |
5148 | /** |
5149 | * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA |
5150 | * @phba: The Hba for which this call is being executed. |
5151 | * |
5152 | * This routine starts the fcp_poll_timer of @phba. |
5153 | **/ |
5154 | void lpfc_poll_start_timer(struct lpfc_hba * phba) |
5155 | { |
5156 | lpfc_poll_rearm_timer(phba); |
5157 | } |
5158 | |
5159 | /** |
5160 | * lpfc_poll_timeout - Restart polling timer |
5161 | * @t: Timer construct where lpfc_hba data structure pointer is obtained. |
5162 | * |
5163 | * This routine restarts fcp_poll timer, when FCP ring polling is enable |
5164 | * and FCP Ring interrupt is disable. |
5165 | **/ |
5166 | void lpfc_poll_timeout(struct timer_list *t) |
5167 | { |
5168 | struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer); |
5169 | |
5170 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { |
5171 | lpfc_sli_handle_fast_ring_event(phba, |
5172 | &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); |
5173 | |
5174 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) |
5175 | lpfc_poll_rearm_timer(phba); |
5176 | } |
5177 | } |
5178 | |
5179 | /* |
5180 | * lpfc_is_command_vm_io - get the UUID from blk cgroup |
5181 | * @cmd: Pointer to scsi_cmnd data structure |
5182 | * Returns UUID if present, otherwise NULL |
5183 | */ |
5184 | static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd) |
5185 | { |
5186 | struct bio *bio = scsi_cmd_to_rq(scmd: cmd)->bio; |
5187 | |
5188 | if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !bio) |
5189 | return NULL; |
5190 | return blkcg_get_fc_appid(bio); |
5191 | } |
5192 | |
5193 | /** |
5194 | * lpfc_queuecommand - scsi_host_template queuecommand entry point |
5195 | * @shost: kernel scsi host pointer. |
5196 | * @cmnd: Pointer to scsi_cmnd data structure. |
5197 | * |
5198 | * Driver registers this routine to scsi midlayer to submit a @cmd to process. |
5199 | * This routine prepares an IOCB from scsi command and provides to firmware. |
5200 | * The @done callback is invoked after driver finished processing the command. |
5201 | * |
5202 | * Return value : |
5203 | * 0 - Success |
5204 | * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. |
5205 | **/ |
5206 | static int |
5207 | lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) |
5208 | { |
5209 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
5210 | struct lpfc_hba *phba = vport->phba; |
5211 | struct lpfc_iocbq *cur_iocbq = NULL; |
5212 | struct lpfc_rport_data *rdata; |
5213 | struct lpfc_nodelist *ndlp; |
5214 | struct lpfc_io_buf *lpfc_cmd; |
5215 | struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); |
5216 | int err, idx; |
5217 | u8 *uuid = NULL; |
5218 | uint64_t start; |
5219 | |
5220 | start = ktime_get_ns(); |
5221 | rdata = lpfc_rport_data_from_scsi_device(sdev: cmnd->device); |
5222 | |
5223 | /* sanity check on references */ |
5224 | if (unlikely(!rdata) || unlikely(!rport)) |
5225 | goto out_fail_command; |
5226 | |
5227 | err = fc_remote_port_chkready(rport); |
5228 | if (err) { |
5229 | cmnd->result = err; |
5230 | goto out_fail_command; |
5231 | } |
5232 | ndlp = rdata->pnode; |
5233 | |
5234 | if ((scsi_get_prot_op(scmd: cmnd) != SCSI_PROT_NORMAL) && |
5235 | (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) { |
5236 | |
5237 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
5238 | "9058 BLKGRD: ERROR: rcvd protected cmd:%02x" |
5239 | " op:%02x str=%s without registering for" |
5240 | " BlockGuard - Rejecting command\n" , |
5241 | cmnd->cmnd[0], scsi_get_prot_op(cmnd), |
5242 | dif_op_str[scsi_get_prot_op(cmnd)]); |
5243 | goto out_fail_command; |
5244 | } |
5245 | |
5246 | /* |
5247 | * Catch race where our node has transitioned, but the |
5248 | * transport is still transitioning. |
5249 | */ |
5250 | if (!ndlp) |
5251 | goto out_tgt_busy1; |
5252 | |
5253 | /* Check if IO qualifies for CMF */ |
5254 | if (phba->cmf_active_mode != LPFC_CFG_OFF && |
5255 | cmnd->sc_data_direction == DMA_FROM_DEVICE && |
5256 | (scsi_sg_count(cmd: cmnd))) { |
5257 | /* Latency start time saved in rx_cmd_start later in routine */ |
5258 | err = lpfc_update_cmf_cmd(phba, size: scsi_bufflen(cmd: cmnd)); |
5259 | if (err) |
5260 | goto out_tgt_busy1; |
5261 | } |
5262 | |
5263 | if (lpfc_ndlp_check_qdepth(phba, ndlp)) { |
5264 | if (atomic_read(v: &ndlp->cmd_pending) >= ndlp->cmd_qdepth) { |
5265 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, |
5266 | "3377 Target Queue Full, scsi Id:%d " |
5267 | "Qdepth:%d Pending command:%d" |
5268 | " WWNN:%02x:%02x:%02x:%02x:" |
5269 | "%02x:%02x:%02x:%02x, " |
5270 | " WWPN:%02x:%02x:%02x:%02x:" |
5271 | "%02x:%02x:%02x:%02x" , |
5272 | ndlp->nlp_sid, ndlp->cmd_qdepth, |
5273 | atomic_read(&ndlp->cmd_pending), |
5274 | ndlp->nlp_nodename.u.wwn[0], |
5275 | ndlp->nlp_nodename.u.wwn[1], |
5276 | ndlp->nlp_nodename.u.wwn[2], |
5277 | ndlp->nlp_nodename.u.wwn[3], |
5278 | ndlp->nlp_nodename.u.wwn[4], |
5279 | ndlp->nlp_nodename.u.wwn[5], |
5280 | ndlp->nlp_nodename.u.wwn[6], |
5281 | ndlp->nlp_nodename.u.wwn[7], |
5282 | ndlp->nlp_portname.u.wwn[0], |
5283 | ndlp->nlp_portname.u.wwn[1], |
5284 | ndlp->nlp_portname.u.wwn[2], |
5285 | ndlp->nlp_portname.u.wwn[3], |
5286 | ndlp->nlp_portname.u.wwn[4], |
5287 | ndlp->nlp_portname.u.wwn[5], |
5288 | ndlp->nlp_portname.u.wwn[6], |
5289 | ndlp->nlp_portname.u.wwn[7]); |
5290 | goto out_tgt_busy2; |
5291 | } |
5292 | } |
5293 | |
5294 | lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd); |
5295 | if (lpfc_cmd == NULL) { |
5296 | lpfc_rampdown_queue_depth(phba); |
5297 | |
5298 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, |
5299 | "0707 driver's buffer pool is empty, " |
5300 | "IO busied\n" ); |
5301 | goto out_host_busy; |
5302 | } |
5303 | lpfc_cmd->rx_cmd_start = start; |
5304 | |
5305 | cur_iocbq = &lpfc_cmd->cur_iocbq; |
5306 | /* |
5307 | * Store the midlayer's command structure for the completion phase |
5308 | * and complete the command initialization. |
5309 | */ |
5310 | lpfc_cmd->pCmd = cmnd; |
5311 | lpfc_cmd->rdata = rdata; |
5312 | lpfc_cmd->ndlp = ndlp; |
5313 | cur_iocbq->cmd_cmpl = NULL; |
5314 | cmnd->host_scribble = (unsigned char *)lpfc_cmd; |
5315 | |
5316 | err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, pnode: ndlp); |
5317 | if (err) |
5318 | goto out_host_busy_release_buf; |
5319 | |
5320 | if (scsi_get_prot_op(scmd: cmnd) != SCSI_PROT_NORMAL) { |
5321 | if (vport->phba->cfg_enable_bg) { |
5322 | lpfc_printf_vlog(vport, |
5323 | KERN_INFO, LOG_SCSI_CMD, |
5324 | "9033 BLKGRD: rcvd %s cmd:x%x " |
5325 | "reftag x%x cnt %u pt %x\n" , |
5326 | dif_op_str[scsi_get_prot_op(cmnd)], |
5327 | cmnd->cmnd[0], |
5328 | scsi_prot_ref_tag(cmnd), |
5329 | scsi_logical_block_count(cmnd), |
5330 | (cmnd->cmnd[1]>>5)); |
5331 | } |
5332 | err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); |
5333 | } else { |
5334 | err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); |
5335 | } |
5336 | |
5337 | if (unlikely(err)) { |
5338 | if (err == 2) { |
5339 | cmnd->result = DID_ERROR << 16; |
5340 | goto out_fail_command_release_buf; |
5341 | } |
5342 | goto out_host_busy_free_buf; |
5343 | } |
5344 | |
5345 | /* check the necessary and sufficient condition to support VMID */ |
5346 | if (lpfc_is_vmid_enabled(phba) && |
5347 | (ndlp->vmid_support || |
5348 | phba->pport->vmid_priority_tagging == |
5349 | LPFC_VMID_PRIO_TAG_ALL_TARGETS)) { |
5350 | /* is the I/O generated by a VM, get the associated virtual */ |
5351 | /* entity id */ |
5352 | uuid = lpfc_is_command_vm_io(cmd: cmnd); |
5353 | |
5354 | if (uuid) { |
5355 | err = lpfc_vmid_get_appid(vport, uuid, |
5356 | iodir: cmnd->sc_data_direction, |
5357 | tag: (union lpfc_vmid_io_tag *) |
5358 | &cur_iocbq->vmid_tag); |
5359 | if (!err) |
5360 | cur_iocbq->cmd_flag |= LPFC_IO_VMID; |
5361 | } |
5362 | } |
5363 | |
5364 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
5365 | if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) |
5366 | this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); |
5367 | #endif |
5368 | /* Issue I/O to adapter */ |
5369 | err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING, piocb: cur_iocbq, |
5370 | SLI_IOCB_RET_IOCB); |
5371 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
5372 | if (start) { |
5373 | lpfc_cmd->ts_cmd_start = start; |
5374 | lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd; |
5375 | lpfc_cmd->ts_cmd_wqput = ktime_get_ns(); |
5376 | } else { |
5377 | lpfc_cmd->ts_cmd_start = 0; |
5378 | } |
5379 | #endif |
5380 | if (err) { |
5381 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
5382 | "3376 FCP could not issue iocb err %x " |
5383 | "FCP cmd x%x <%d/%llu> " |
5384 | "sid: x%x did: x%x oxid: x%x " |
5385 | "Data: x%x x%x x%x x%x\n" , |
5386 | err, cmnd->cmnd[0], |
5387 | cmnd->device ? cmnd->device->id : 0xffff, |
5388 | cmnd->device ? cmnd->device->lun : (u64)-1, |
5389 | vport->fc_myDID, ndlp->nlp_DID, |
5390 | phba->sli_rev == LPFC_SLI_REV4 ? |
5391 | cur_iocbq->sli4_xritag : 0xffff, |
5392 | phba->sli_rev == LPFC_SLI_REV4 ? |
5393 | phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] : |
5394 | cur_iocbq->iocb.ulpContext, |
5395 | cur_iocbq->iotag, |
5396 | phba->sli_rev == LPFC_SLI_REV4 ? |
5397 | bf_get(wqe_tmo, |
5398 | &cur_iocbq->wqe.generic.wqe_com) : |
5399 | cur_iocbq->iocb.ulpTimeout, |
5400 | (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000)); |
5401 | |
5402 | goto out_host_busy_free_buf; |
5403 | } |
5404 | |
5405 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { |
5406 | lpfc_sli_handle_fast_ring_event(phba, |
5407 | &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); |
5408 | |
5409 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) |
5410 | lpfc_poll_rearm_timer(phba); |
5411 | } |
5412 | |
5413 | if (phba->cfg_xri_rebalancing) |
5414 | lpfc_keep_pvt_pool_above_lowwm(phba, hwqid: lpfc_cmd->hdwq_no); |
5415 | |
5416 | return 0; |
5417 | |
5418 | out_host_busy_free_buf: |
5419 | idx = lpfc_cmd->hdwq_no; |
5420 | lpfc_scsi_unprep_dma_buf(phba, psb: lpfc_cmd); |
5421 | if (phba->sli4_hba.hdwq) { |
5422 | switch (lpfc_cmd->fcp_cmnd->fcpCntl3) { |
5423 | case WRITE_DATA: |
5424 | phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--; |
5425 | break; |
5426 | case READ_DATA: |
5427 | phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--; |
5428 | break; |
5429 | default: |
5430 | phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--; |
5431 | } |
5432 | } |
5433 | out_host_busy_release_buf: |
5434 | lpfc_release_scsi_buf(phba, psb: lpfc_cmd); |
5435 | out_host_busy: |
5436 | lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, size: scsi_bufflen(cmd: cmnd), |
5437 | shost); |
5438 | return SCSI_MLQUEUE_HOST_BUSY; |
5439 | |
5440 | out_tgt_busy2: |
5441 | lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, size: scsi_bufflen(cmd: cmnd), |
5442 | shost); |
5443 | out_tgt_busy1: |
5444 | return SCSI_MLQUEUE_TARGET_BUSY; |
5445 | |
5446 | out_fail_command_release_buf: |
5447 | lpfc_release_scsi_buf(phba, psb: lpfc_cmd); |
5448 | lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, size: scsi_bufflen(cmd: cmnd), |
5449 | shost); |
5450 | |
5451 | out_fail_command: |
5452 | scsi_done(cmd: cmnd); |
5453 | return 0; |
5454 | } |
5455 | |
5456 | /* |
5457 | * lpfc_vmid_vport_cleanup - cleans up the resources associated with a vport |
5458 | * @vport: The virtual port for which this call is being executed. |
5459 | */ |
5460 | void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport) |
5461 | { |
5462 | u32 bucket; |
5463 | struct lpfc_vmid *cur; |
5464 | |
5465 | if (vport->port_type == LPFC_PHYSICAL_PORT) |
5466 | del_timer_sync(timer: &vport->phba->inactive_vmid_poll); |
5467 | |
5468 | kfree(objp: vport->qfpa_res); |
5469 | kfree(objp: vport->vmid_priority.vmid_range); |
5470 | kfree(objp: vport->vmid); |
5471 | |
5472 | if (!hash_empty(vport->hash_table)) |
5473 | hash_for_each(vport->hash_table, bucket, cur, hnode) |
5474 | hash_del(node: &cur->hnode); |
5475 | |
5476 | vport->qfpa_res = NULL; |
5477 | vport->vmid_priority.vmid_range = NULL; |
5478 | vport->vmid = NULL; |
5479 | vport->cur_vmid_cnt = 0; |
5480 | } |
5481 | |
5482 | /** |
5483 | * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point |
5484 | * @cmnd: Pointer to scsi_cmnd data structure. |
5485 | * |
5486 | * This routine aborts @cmnd pending in base driver. |
5487 | * |
5488 | * Return code : |
5489 | * 0x2003 - Error |
5490 | * 0x2002 - Success |
5491 | **/ |
5492 | static int |
5493 | lpfc_abort_handler(struct scsi_cmnd *cmnd) |
5494 | { |
5495 | struct Scsi_Host *shost = cmnd->device->host; |
5496 | struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); |
5497 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
5498 | struct lpfc_hba *phba = vport->phba; |
5499 | struct lpfc_iocbq *iocb; |
5500 | struct lpfc_io_buf *lpfc_cmd; |
5501 | int ret = SUCCESS, status = 0; |
5502 | struct lpfc_sli_ring *pring_s4 = NULL; |
5503 | struct lpfc_sli_ring *pring = NULL; |
5504 | int ret_val; |
5505 | unsigned long flags; |
5506 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); |
5507 | |
5508 | status = fc_block_rport(rport); |
5509 | if (status != 0 && status != SUCCESS) |
5510 | return status; |
5511 | |
5512 | lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble; |
5513 | if (!lpfc_cmd) |
5514 | return ret; |
5515 | |
5516 | /* Guard against IO completion being called at same time */ |
5517 | spin_lock_irqsave(&lpfc_cmd->buf_lock, flags); |
5518 | |
5519 | spin_lock(lock: &phba->hbalock); |
5520 | /* driver queued commands are in process of being flushed */ |
5521 | if (phba->hba_flag & HBA_IOQ_FLUSH) { |
5522 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
5523 | "3168 SCSI Layer abort requested I/O has been " |
5524 | "flushed by LLD.\n" ); |
5525 | ret = FAILED; |
5526 | goto out_unlock_hba; |
5527 | } |
5528 | |
5529 | if (!lpfc_cmd->pCmd) { |
5530 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
5531 | "2873 SCSI Layer I/O Abort Request IO CMPL Status " |
5532 | "x%x ID %d LUN %llu\n" , |
5533 | SUCCESS, cmnd->device->id, cmnd->device->lun); |
5534 | goto out_unlock_hba; |
5535 | } |
5536 | |
5537 | iocb = &lpfc_cmd->cur_iocbq; |
5538 | if (phba->sli_rev == LPFC_SLI_REV4) { |
5539 | pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring; |
5540 | if (!pring_s4) { |
5541 | ret = FAILED; |
5542 | goto out_unlock_hba; |
5543 | } |
5544 | spin_lock(lock: &pring_s4->ring_lock); |
5545 | } |
5546 | /* the command is in process of being cancelled */ |
5547 | if (!(iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ)) { |
5548 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
5549 | "3169 SCSI Layer abort requested I/O has been " |
5550 | "cancelled by LLD.\n" ); |
5551 | ret = FAILED; |
5552 | goto out_unlock_ring; |
5553 | } |
5554 | /* |
5555 | * If pCmd field of the corresponding lpfc_io_buf structure |
5556 | * points to a different SCSI command, then the driver has |
5557 | * already completed this command, but the midlayer did not |
5558 | * see the completion before the eh fired. Just return SUCCESS. |
5559 | */ |
5560 | if (lpfc_cmd->pCmd != cmnd) { |
5561 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
5562 | "3170 SCSI Layer abort requested I/O has been " |
5563 | "completed by LLD.\n" ); |
5564 | goto out_unlock_ring; |
5565 | } |
5566 | |
5567 | WARN_ON(iocb->io_buf != lpfc_cmd); |
5568 | |
5569 | /* abort issued in recovery is still in progress */ |
5570 | if (iocb->cmd_flag & LPFC_DRIVER_ABORTED) { |
5571 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
5572 | "3389 SCSI Layer I/O Abort Request is pending\n" ); |
5573 | if (phba->sli_rev == LPFC_SLI_REV4) |
5574 | spin_unlock(lock: &pring_s4->ring_lock); |
5575 | spin_unlock(lock: &phba->hbalock); |
5576 | spin_unlock_irqrestore(lock: &lpfc_cmd->buf_lock, flags); |
5577 | goto wait_for_cmpl; |
5578 | } |
5579 | |
5580 | lpfc_cmd->waitq = &waitq; |
5581 | if (phba->sli_rev == LPFC_SLI_REV4) { |
5582 | spin_unlock(lock: &pring_s4->ring_lock); |
5583 | ret_val = lpfc_sli4_issue_abort_iotag(phba, cmdiocb: iocb, |
5584 | cmpl: lpfc_sli_abort_fcp_cmpl); |
5585 | } else { |
5586 | pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; |
5587 | ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb, |
5588 | lpfc_sli_abort_fcp_cmpl); |
5589 | } |
5590 | |
5591 | /* Make sure HBA is alive */ |
5592 | lpfc_issue_hb_tmo(phba); |
5593 | |
5594 | if (ret_val != IOCB_SUCCESS) { |
5595 | /* Indicate the IO is not being aborted by the driver. */ |
5596 | lpfc_cmd->waitq = NULL; |
5597 | ret = FAILED; |
5598 | goto out_unlock_hba; |
5599 | } |
5600 | |
5601 | /* no longer need the lock after this point */ |
5602 | spin_unlock(lock: &phba->hbalock); |
5603 | spin_unlock_irqrestore(lock: &lpfc_cmd->buf_lock, flags); |
5604 | |
5605 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) |
5606 | lpfc_sli_handle_fast_ring_event(phba, |
5607 | &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); |
5608 | |
5609 | wait_for_cmpl: |
5610 | /* |
5611 | * cmd_flag is set to LPFC_DRIVER_ABORTED before we wait |
5612 | * for abort to complete. |
5613 | */ |
5614 | wait_event_timeout(waitq, |
5615 | (lpfc_cmd->pCmd != cmnd), |
5616 | msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); |
5617 | |
5618 | spin_lock(lock: &lpfc_cmd->buf_lock); |
5619 | |
5620 | if (lpfc_cmd->pCmd == cmnd) { |
5621 | ret = FAILED; |
5622 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
5623 | "0748 abort handler timed out waiting " |
5624 | "for aborting I/O (xri:x%x) to complete: " |
5625 | "ret %#x, ID %d, LUN %llu\n" , |
5626 | iocb->sli4_xritag, ret, |
5627 | cmnd->device->id, cmnd->device->lun); |
5628 | } |
5629 | |
5630 | lpfc_cmd->waitq = NULL; |
5631 | |
5632 | spin_unlock(lock: &lpfc_cmd->buf_lock); |
5633 | goto out; |
5634 | |
5635 | out_unlock_ring: |
5636 | if (phba->sli_rev == LPFC_SLI_REV4) |
5637 | spin_unlock(lock: &pring_s4->ring_lock); |
5638 | out_unlock_hba: |
5639 | spin_unlock(lock: &phba->hbalock); |
5640 | spin_unlock_irqrestore(lock: &lpfc_cmd->buf_lock, flags); |
5641 | out: |
5642 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
5643 | "0749 SCSI Layer I/O Abort Request Status x%x ID %d " |
5644 | "LUN %llu\n" , ret, cmnd->device->id, |
5645 | cmnd->device->lun); |
5646 | return ret; |
5647 | } |
5648 | |
5649 | static char * |
5650 | lpfc_taskmgmt_name(uint8_t task_mgmt_cmd) |
5651 | { |
5652 | switch (task_mgmt_cmd) { |
5653 | case FCP_ABORT_TASK_SET: |
5654 | return "ABORT_TASK_SET" ; |
5655 | case FCP_CLEAR_TASK_SET: |
5656 | return "FCP_CLEAR_TASK_SET" ; |
5657 | case FCP_BUS_RESET: |
5658 | return "FCP_BUS_RESET" ; |
5659 | case FCP_LUN_RESET: |
5660 | return "FCP_LUN_RESET" ; |
5661 | case FCP_TARGET_RESET: |
5662 | return "FCP_TARGET_RESET" ; |
5663 | case FCP_CLEAR_ACA: |
5664 | return "FCP_CLEAR_ACA" ; |
5665 | case FCP_TERMINATE_TASK: |
5666 | return "FCP_TERMINATE_TASK" ; |
5667 | default: |
5668 | return "unknown" ; |
5669 | } |
5670 | } |
5671 | |
5672 | |
5673 | /** |
5674 | * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed |
5675 | * @vport: The virtual port for which this call is being executed. |
5676 | * @lpfc_cmd: Pointer to lpfc_io_buf data structure. |
5677 | * |
5678 | * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded |
5679 | * |
5680 | * Return code : |
5681 | * 0x2003 - Error |
5682 | * 0x2002 - Success |
5683 | **/ |
5684 | static int |
5685 | lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd) |
5686 | { |
5687 | struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; |
5688 | uint32_t rsp_info; |
5689 | uint32_t rsp_len; |
5690 | uint8_t rsp_info_code; |
5691 | int ret = FAILED; |
5692 | |
5693 | |
5694 | if (fcprsp == NULL) |
5695 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
5696 | "0703 fcp_rsp is missing\n" ); |
5697 | else { |
5698 | rsp_info = fcprsp->rspStatus2; |
5699 | rsp_len = be32_to_cpu(fcprsp->rspRspLen); |
5700 | rsp_info_code = fcprsp->rspInfo3; |
5701 | |
5702 | |
5703 | lpfc_printf_vlog(vport, KERN_INFO, |
5704 | LOG_FCP, |
5705 | "0706 fcp_rsp valid 0x%x," |
5706 | " rsp len=%d code 0x%x\n" , |
5707 | rsp_info, |
5708 | rsp_len, rsp_info_code); |
5709 | |
5710 | /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN |
5711 | * field specifies the number of valid bytes of FCP_RSP_INFO. |
5712 | * The FCP_RSP_LEN field shall be set to 0x04 or 0x08 |
5713 | */ |
5714 | if ((fcprsp->rspStatus2 & RSP_LEN_VALID) && |
5715 | ((rsp_len == 8) || (rsp_len == 4))) { |
5716 | switch (rsp_info_code) { |
5717 | case RSP_NO_FAILURE: |
5718 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
5719 | "0715 Task Mgmt No Failure\n" ); |
5720 | ret = SUCCESS; |
5721 | break; |
5722 | case RSP_TM_NOT_SUPPORTED: /* TM rejected */ |
5723 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
5724 | "0716 Task Mgmt Target " |
5725 | "reject\n" ); |
5726 | break; |
5727 | case RSP_TM_NOT_COMPLETED: /* TM failed */ |
5728 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
5729 | "0717 Task Mgmt Target " |
5730 | "failed TM\n" ); |
5731 | break; |
5732 | case RSP_TM_INVALID_LU: /* TM to invalid LU! */ |
5733 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
5734 | "0718 Task Mgmt to invalid " |
5735 | "LUN\n" ); |
5736 | break; |
5737 | } |
5738 | } |
5739 | } |
5740 | return ret; |
5741 | } |
5742 | |
5743 | |
5744 | /** |
5745 | * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler |
5746 | * @vport: The virtual port for which this call is being executed. |
5747 | * @rport: Pointer to remote port |
5748 | * @tgt_id: Target ID of remote device. |
5749 | * @lun_id: Lun number for the TMF |
5750 | * @task_mgmt_cmd: type of TMF to send |
5751 | * |
5752 | * This routine builds and sends a TMF (SCSI Task Mgmt Function) to |
5753 | * a remote port. |
5754 | * |
5755 | * Return Code: |
5756 | * 0x2003 - Error |
5757 | * 0x2002 - Success. |
5758 | **/ |
5759 | static int |
5760 | lpfc_send_taskmgmt(struct lpfc_vport *vport, struct fc_rport *rport, |
5761 | unsigned int tgt_id, uint64_t lun_id, |
5762 | uint8_t task_mgmt_cmd) |
5763 | { |
5764 | struct lpfc_hba *phba = vport->phba; |
5765 | struct lpfc_io_buf *lpfc_cmd; |
5766 | struct lpfc_iocbq *iocbq; |
5767 | struct lpfc_iocbq *iocbqrsp; |
5768 | struct lpfc_rport_data *rdata; |
5769 | struct lpfc_nodelist *pnode; |
5770 | int ret; |
5771 | int status; |
5772 | |
5773 | rdata = rport->dd_data; |
5774 | if (!rdata || !rdata->pnode) |
5775 | return FAILED; |
5776 | pnode = rdata->pnode; |
5777 | |
5778 | lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp: rdata->pnode, NULL); |
5779 | if (lpfc_cmd == NULL) |
5780 | return FAILED; |
5781 | lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; |
5782 | lpfc_cmd->rdata = rdata; |
5783 | lpfc_cmd->pCmd = NULL; |
5784 | lpfc_cmd->ndlp = pnode; |
5785 | |
5786 | status = phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, |
5787 | task_mgmt_cmd); |
5788 | if (!status) { |
5789 | lpfc_release_scsi_buf(phba, psb: lpfc_cmd); |
5790 | return FAILED; |
5791 | } |
5792 | |
5793 | iocbq = &lpfc_cmd->cur_iocbq; |
5794 | iocbqrsp = lpfc_sli_get_iocbq(phba); |
5795 | if (iocbqrsp == NULL) { |
5796 | lpfc_release_scsi_buf(phba, psb: lpfc_cmd); |
5797 | return FAILED; |
5798 | } |
5799 | iocbq->cmd_cmpl = lpfc_tskmgmt_def_cmpl; |
5800 | iocbq->vport = vport; |
5801 | |
5802 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
5803 | "0702 Issue %s to TGT %d LUN %llu " |
5804 | "rpi x%x nlp_flag x%x Data: x%x x%x\n" , |
5805 | lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, |
5806 | pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag, |
5807 | iocbq->cmd_flag); |
5808 | |
5809 | status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, |
5810 | iocbq, iocbqrsp, lpfc_cmd->timeout); |
5811 | if ((status != IOCB_SUCCESS) || |
5812 | (get_job_ulpstatus(phba, iocbq: iocbqrsp) != IOSTAT_SUCCESS)) { |
5813 | if (status != IOCB_SUCCESS || |
5814 | get_job_ulpstatus(phba, iocbq: iocbqrsp) != IOSTAT_FCP_RSP_ERROR) |
5815 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
5816 | "0727 TMF %s to TGT %d LUN %llu " |
5817 | "failed (%d, %d) cmd_flag x%x\n" , |
5818 | lpfc_taskmgmt_name(task_mgmt_cmd), |
5819 | tgt_id, lun_id, |
5820 | get_job_ulpstatus(phba, iocbqrsp), |
5821 | get_job_word4(phba, iocbqrsp), |
5822 | iocbq->cmd_flag); |
5823 | /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */ |
5824 | if (status == IOCB_SUCCESS) { |
5825 | if (get_job_ulpstatus(phba, iocbq: iocbqrsp) == |
5826 | IOSTAT_FCP_RSP_ERROR) |
5827 | /* Something in the FCP_RSP was invalid. |
5828 | * Check conditions */ |
5829 | ret = lpfc_check_fcp_rsp(vport, lpfc_cmd); |
5830 | else |
5831 | ret = FAILED; |
5832 | } else if ((status == IOCB_TIMEDOUT) || |
5833 | (status == IOCB_ABORTED)) { |
5834 | ret = TIMEOUT_ERROR; |
5835 | } else { |
5836 | ret = FAILED; |
5837 | } |
5838 | } else |
5839 | ret = SUCCESS; |
5840 | |
5841 | lpfc_sli_release_iocbq(phba, iocbqrsp); |
5842 | |
5843 | if (status != IOCB_TIMEDOUT) |
5844 | lpfc_release_scsi_buf(phba, psb: lpfc_cmd); |
5845 | |
5846 | return ret; |
5847 | } |
5848 | |
5849 | /** |
5850 | * lpfc_chk_tgt_mapped - |
5851 | * @vport: The virtual port to check on |
5852 | * @rport: Pointer to fc_rport data structure. |
5853 | * |
5854 | * This routine delays until the scsi target (aka rport) for the |
5855 | * command exists (is present and logged in) or we declare it non-existent. |
5856 | * |
5857 | * Return code : |
5858 | * 0x2003 - Error |
5859 | * 0x2002 - Success |
5860 | **/ |
5861 | static int |
5862 | lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport) |
5863 | { |
5864 | struct lpfc_rport_data *rdata; |
5865 | struct lpfc_nodelist *pnode = NULL; |
5866 | unsigned long later; |
5867 | |
5868 | rdata = rport->dd_data; |
5869 | if (!rdata) { |
5870 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
5871 | "0797 Tgt Map rport failure: rdata x%px\n" , rdata); |
5872 | return FAILED; |
5873 | } |
5874 | pnode = rdata->pnode; |
5875 | |
5876 | /* |
5877 | * If target is not in a MAPPED state, delay until |
5878 | * target is rediscovered or devloss timeout expires. |
5879 | */ |
5880 | later = msecs_to_jiffies(m: 2 * vport->cfg_devloss_tmo * 1000) + jiffies; |
5881 | while (time_after(later, jiffies)) { |
5882 | if (!pnode) |
5883 | return FAILED; |
5884 | if (pnode->nlp_state == NLP_STE_MAPPED_NODE) |
5885 | return SUCCESS; |
5886 | schedule_timeout_uninterruptible(timeout: msecs_to_jiffies(m: 500)); |
5887 | rdata = rport->dd_data; |
5888 | if (!rdata) |
5889 | return FAILED; |
5890 | pnode = rdata->pnode; |
5891 | } |
5892 | if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) |
5893 | return FAILED; |
5894 | return SUCCESS; |
5895 | } |
5896 | |
5897 | /** |
5898 | * lpfc_reset_flush_io_context - |
5899 | * @vport: The virtual port (scsi_host) for the flush context |
5900 | * @tgt_id: If aborting by Target contect - specifies the target id |
5901 | * @lun_id: If aborting by Lun context - specifies the lun id |
5902 | * @context: specifies the context level to flush at. |
5903 | * |
5904 | * After a reset condition via TMF, we need to flush orphaned i/o |
5905 | * contexts from the adapter. This routine aborts any contexts |
5906 | * outstanding, then waits for their completions. The wait is |
5907 | * bounded by devloss_tmo though. |
5908 | * |
5909 | * Return code : |
5910 | * 0x2003 - Error |
5911 | * 0x2002 - Success |
5912 | **/ |
5913 | static int |
5914 | lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, |
5915 | uint64_t lun_id, lpfc_ctx_cmd context) |
5916 | { |
5917 | struct lpfc_hba *phba = vport->phba; |
5918 | unsigned long later; |
5919 | int cnt; |
5920 | |
5921 | cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); |
5922 | if (cnt) |
5923 | lpfc_sli_abort_taskmgmt(vport, |
5924 | &phba->sli.sli3_ring[LPFC_FCP_RING], |
5925 | tgt_id, lun_id, context); |
5926 | later = msecs_to_jiffies(m: 2 * vport->cfg_devloss_tmo * 1000) + jiffies; |
5927 | while (time_after(later, jiffies) && cnt) { |
5928 | schedule_timeout_uninterruptible(timeout: msecs_to_jiffies(m: 20)); |
5929 | cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); |
5930 | } |
5931 | if (cnt) { |
5932 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
5933 | "0724 I/O flush failure for context %s : cnt x%x\n" , |
5934 | ((context == LPFC_CTX_LUN) ? "LUN" : |
5935 | ((context == LPFC_CTX_TGT) ? "TGT" : |
5936 | ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown" ))), |
5937 | cnt); |
5938 | return FAILED; |
5939 | } |
5940 | return SUCCESS; |
5941 | } |
5942 | |
5943 | /** |
5944 | * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point |
5945 | * @cmnd: Pointer to scsi_cmnd data structure. |
5946 | * |
5947 | * This routine does a device reset by sending a LUN_RESET task management |
5948 | * command. |
5949 | * |
5950 | * Return code : |
5951 | * 0x2003 - Error |
5952 | * 0x2002 - Success |
5953 | **/ |
5954 | static int |
5955 | lpfc_device_reset_handler(struct scsi_cmnd *cmnd) |
5956 | { |
5957 | struct Scsi_Host *shost = cmnd->device->host; |
5958 | struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); |
5959 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
5960 | struct lpfc_rport_data *rdata; |
5961 | struct lpfc_nodelist *pnode; |
5962 | unsigned tgt_id = cmnd->device->id; |
5963 | uint64_t lun_id = cmnd->device->lun; |
5964 | struct lpfc_scsi_event_header scsi_event; |
5965 | int status; |
5966 | u32 logit = LOG_FCP; |
5967 | |
5968 | if (!rport) |
5969 | return FAILED; |
5970 | |
5971 | rdata = rport->dd_data; |
5972 | if (!rdata || !rdata->pnode) { |
5973 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
5974 | "0798 Device Reset rdata failure: rdata x%px\n" , |
5975 | rdata); |
5976 | return FAILED; |
5977 | } |
5978 | pnode = rdata->pnode; |
5979 | status = fc_block_rport(rport); |
5980 | if (status != 0 && status != SUCCESS) |
5981 | return status; |
5982 | |
5983 | status = lpfc_chk_tgt_mapped(vport, rport); |
5984 | if (status == FAILED) { |
5985 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
5986 | "0721 Device Reset rport failure: rdata x%px\n" , rdata); |
5987 | return FAILED; |
5988 | } |
5989 | |
5990 | scsi_event.event_type = FC_REG_SCSI_EVENT; |
5991 | scsi_event.subcategory = LPFC_EVENT_LUNRESET; |
5992 | scsi_event.lun = lun_id; |
5993 | memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); |
5994 | memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); |
5995 | |
5996 | fc_host_post_vendor_event(shost, event_number: fc_get_event_number(), |
5997 | data_len: sizeof(scsi_event), data_buf: (char *)&scsi_event, LPFC_NL_VENDOR_ID); |
5998 | |
5999 | status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id, |
6000 | FCP_LUN_RESET); |
6001 | if (status != SUCCESS) |
6002 | logit = LOG_TRACE_EVENT; |
6003 | |
6004 | lpfc_printf_vlog(vport, KERN_ERR, logit, |
6005 | "0713 SCSI layer issued Device Reset (%d, %llu) " |
6006 | "return x%x\n" , tgt_id, lun_id, status); |
6007 | |
6008 | /* |
6009 | * We have to clean up i/o as : they may be orphaned by the TMF; |
6010 | * or if the TMF failed, they may be in an indeterminate state. |
6011 | * So, continue on. |
6012 | * We will report success if all the i/o aborts successfully. |
6013 | */ |
6014 | if (status == SUCCESS) |
6015 | status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, |
6016 | context: LPFC_CTX_LUN); |
6017 | |
6018 | return status; |
6019 | } |
6020 | |
6021 | /** |
6022 | * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point |
6023 | * @cmnd: Pointer to scsi_cmnd data structure. |
6024 | * |
6025 | * This routine does a target reset by sending a TARGET_RESET task management |
6026 | * command. |
6027 | * |
6028 | * Return code : |
6029 | * 0x2003 - Error |
6030 | * 0x2002 - Success |
6031 | **/ |
6032 | static int |
6033 | lpfc_target_reset_handler(struct scsi_cmnd *cmnd) |
6034 | { |
6035 | struct Scsi_Host *shost = cmnd->device->host; |
6036 | struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); |
6037 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
6038 | struct lpfc_rport_data *rdata; |
6039 | struct lpfc_nodelist *pnode; |
6040 | unsigned tgt_id = cmnd->device->id; |
6041 | uint64_t lun_id = cmnd->device->lun; |
6042 | struct lpfc_scsi_event_header scsi_event; |
6043 | int status; |
6044 | u32 logit = LOG_FCP; |
6045 | u32 dev_loss_tmo = vport->cfg_devloss_tmo; |
6046 | unsigned long flags; |
6047 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); |
6048 | |
6049 | if (!rport) |
6050 | return FAILED; |
6051 | |
6052 | rdata = rport->dd_data; |
6053 | if (!rdata || !rdata->pnode) { |
6054 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
6055 | "0799 Target Reset rdata failure: rdata x%px\n" , |
6056 | rdata); |
6057 | return FAILED; |
6058 | } |
6059 | pnode = rdata->pnode; |
6060 | status = fc_block_rport(rport); |
6061 | if (status != 0 && status != SUCCESS) |
6062 | return status; |
6063 | |
6064 | status = lpfc_chk_tgt_mapped(vport, rport); |
6065 | if (status == FAILED) { |
6066 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
6067 | "0722 Target Reset rport failure: rdata x%px\n" , rdata); |
6068 | if (pnode) { |
6069 | spin_lock_irqsave(&pnode->lock, flags); |
6070 | pnode->nlp_flag &= ~NLP_NPR_ADISC; |
6071 | pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; |
6072 | spin_unlock_irqrestore(lock: &pnode->lock, flags); |
6073 | } |
6074 | lpfc_reset_flush_io_context(vport, tgt_id, lun_id, |
6075 | context: LPFC_CTX_TGT); |
6076 | return FAST_IO_FAIL; |
6077 | } |
6078 | |
6079 | scsi_event.event_type = FC_REG_SCSI_EVENT; |
6080 | scsi_event.subcategory = LPFC_EVENT_TGTRESET; |
6081 | scsi_event.lun = 0; |
6082 | memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); |
6083 | memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); |
6084 | |
6085 | fc_host_post_vendor_event(shost, event_number: fc_get_event_number(), |
6086 | data_len: sizeof(scsi_event), data_buf: (char *)&scsi_event, LPFC_NL_VENDOR_ID); |
6087 | |
6088 | status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id, |
6089 | FCP_TARGET_RESET); |
6090 | if (status != SUCCESS) { |
6091 | logit = LOG_TRACE_EVENT; |
6092 | |
6093 | /* Issue LOGO, if no LOGO is outstanding */ |
6094 | spin_lock_irqsave(&pnode->lock, flags); |
6095 | if (!(pnode->save_flags & NLP_WAIT_FOR_LOGO) && |
6096 | !pnode->logo_waitq) { |
6097 | pnode->logo_waitq = &waitq; |
6098 | pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; |
6099 | pnode->nlp_flag |= NLP_ISSUE_LOGO; |
6100 | pnode->save_flags |= NLP_WAIT_FOR_LOGO; |
6101 | spin_unlock_irqrestore(lock: &pnode->lock, flags); |
6102 | lpfc_unreg_rpi(vport, pnode); |
6103 | wait_event_timeout(waitq, |
6104 | (!(pnode->save_flags & |
6105 | NLP_WAIT_FOR_LOGO)), |
6106 | msecs_to_jiffies(dev_loss_tmo * |
6107 | 1000)); |
6108 | |
6109 | if (pnode->save_flags & NLP_WAIT_FOR_LOGO) { |
6110 | lpfc_printf_vlog(vport, KERN_ERR, logit, |
6111 | "0725 SCSI layer TGTRST " |
6112 | "failed & LOGO TMO (%d, %llu) " |
6113 | "return x%x\n" , |
6114 | tgt_id, lun_id, status); |
6115 | spin_lock_irqsave(&pnode->lock, flags); |
6116 | pnode->save_flags &= ~NLP_WAIT_FOR_LOGO; |
6117 | } else { |
6118 | spin_lock_irqsave(&pnode->lock, flags); |
6119 | } |
6120 | pnode->logo_waitq = NULL; |
6121 | spin_unlock_irqrestore(lock: &pnode->lock, flags); |
6122 | status = SUCCESS; |
6123 | |
6124 | } else { |
6125 | spin_unlock_irqrestore(lock: &pnode->lock, flags); |
6126 | status = FAILED; |
6127 | } |
6128 | } |
6129 | |
6130 | lpfc_printf_vlog(vport, KERN_ERR, logit, |
6131 | "0723 SCSI layer issued Target Reset (%d, %llu) " |
6132 | "return x%x\n" , tgt_id, lun_id, status); |
6133 | |
6134 | /* |
6135 | * We have to clean up i/o as : they may be orphaned by the TMF; |
6136 | * or if the TMF failed, they may be in an indeterminate state. |
6137 | * So, continue on. |
6138 | * We will report success if all the i/o aborts successfully. |
6139 | */ |
6140 | if (status == SUCCESS) |
6141 | status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, |
6142 | context: LPFC_CTX_TGT); |
6143 | return status; |
6144 | } |
6145 | |
6146 | /** |
6147 | * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt |
6148 | * @cmnd: Pointer to scsi_cmnd data structure. |
6149 | * |
6150 | * This routine does host reset to the adaptor port. It brings the HBA |
6151 | * offline, performs a board restart, and then brings the board back online. |
6152 | * The lpfc_offline calls lpfc_sli_hba_down which will abort and local |
6153 | * reject all outstanding SCSI commands to the host and error returned |
6154 | * back to SCSI mid-level. As this will be SCSI mid-level's last resort |
6155 | * of error handling, it will only return error if resetting of the adapter |
6156 | * is not successful; in all other cases, will return success. |
6157 | * |
6158 | * Return code : |
6159 | * 0x2003 - Error |
6160 | * 0x2002 - Success |
6161 | **/ |
6162 | static int |
6163 | lpfc_host_reset_handler(struct scsi_cmnd *cmnd) |
6164 | { |
6165 | struct Scsi_Host *shost = cmnd->device->host; |
6166 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
6167 | struct lpfc_hba *phba = vport->phba; |
6168 | int rc, ret = SUCCESS; |
6169 | |
6170 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, |
6171 | "3172 SCSI layer issued Host Reset Data:\n" ); |
6172 | |
6173 | lpfc_offline_prep(phba, LPFC_MBX_WAIT); |
6174 | lpfc_offline(phba); |
6175 | rc = lpfc_sli_brdrestart(phba); |
6176 | if (rc) |
6177 | goto error; |
6178 | |
6179 | /* Wait for successful restart of adapter */ |
6180 | if (phba->sli_rev < LPFC_SLI_REV4) { |
6181 | rc = lpfc_sli_chipset_init(phba); |
6182 | if (rc) |
6183 | goto error; |
6184 | } |
6185 | |
6186 | rc = lpfc_online(phba); |
6187 | if (rc) |
6188 | goto error; |
6189 | |
6190 | lpfc_unblock_mgmt_io(phba); |
6191 | |
6192 | return ret; |
6193 | error: |
6194 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
6195 | "3323 Failed host reset\n" ); |
6196 | lpfc_unblock_mgmt_io(phba); |
6197 | return FAILED; |
6198 | } |
6199 | |
6200 | /** |
6201 | * lpfc_slave_alloc - scsi_host_template slave_alloc entry point |
6202 | * @sdev: Pointer to scsi_device. |
6203 | * |
6204 | * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's |
6205 | * globally available list of scsi buffers. This routine also makes sure scsi |
6206 | * buffer is not allocated more than HBA limit conveyed to midlayer. This list |
6207 | * of scsi buffer exists for the lifetime of the driver. |
6208 | * |
6209 | * Return codes: |
6210 | * non-0 - Error |
6211 | * 0 - Success |
6212 | **/ |
6213 | static int |
6214 | lpfc_slave_alloc(struct scsi_device *sdev) |
6215 | { |
6216 | struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; |
6217 | struct lpfc_hba *phba = vport->phba; |
6218 | struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); |
6219 | uint32_t total = 0; |
6220 | uint32_t num_to_alloc = 0; |
6221 | int num_allocated = 0; |
6222 | uint32_t sdev_cnt; |
6223 | struct lpfc_device_data *device_data; |
6224 | unsigned long flags; |
6225 | struct lpfc_name target_wwpn; |
6226 | |
6227 | if (!rport || fc_remote_port_chkready(rport)) |
6228 | return -ENXIO; |
6229 | |
6230 | if (phba->cfg_fof) { |
6231 | |
6232 | /* |
6233 | * Check to see if the device data structure for the lun |
6234 | * exists. If not, create one. |
6235 | */ |
6236 | |
6237 | u64_to_wwn(inm: rport->port_name, wwn: target_wwpn.u.wwn); |
6238 | spin_lock_irqsave(&phba->devicelock, flags); |
6239 | device_data = __lpfc_get_device_data(phba, |
6240 | list: &phba->luns, |
6241 | &vport->fc_portname, |
6242 | &target_wwpn, |
6243 | sdev->lun); |
6244 | if (!device_data) { |
6245 | spin_unlock_irqrestore(lock: &phba->devicelock, flags); |
6246 | device_data = lpfc_create_device_data(phba, |
6247 | &vport->fc_portname, |
6248 | &target_wwpn, |
6249 | sdev->lun, |
6250 | phba->cfg_XLanePriority, |
6251 | true); |
6252 | if (!device_data) |
6253 | return -ENOMEM; |
6254 | spin_lock_irqsave(&phba->devicelock, flags); |
6255 | list_add_tail(new: &device_data->listentry, head: &phba->luns); |
6256 | } |
6257 | device_data->rport_data = rport->dd_data; |
6258 | device_data->available = true; |
6259 | spin_unlock_irqrestore(lock: &phba->devicelock, flags); |
6260 | sdev->hostdata = device_data; |
6261 | } else { |
6262 | sdev->hostdata = rport->dd_data; |
6263 | } |
6264 | sdev_cnt = atomic_inc_return(v: &phba->sdev_cnt); |
6265 | |
6266 | /* For SLI4, all IO buffers are pre-allocated */ |
6267 | if (phba->sli_rev == LPFC_SLI_REV4) |
6268 | return 0; |
6269 | |
6270 | /* This code path is now ONLY for SLI3 adapters */ |
6271 | |
6272 | /* |
6273 | * Populate the cmds_per_lun count scsi_bufs into this host's globally |
6274 | * available list of scsi buffers. Don't allocate more than the |
6275 | * HBA limit conveyed to the midlayer via the host structure. The |
6276 | * formula accounts for the lun_queue_depth + error handlers + 1 |
6277 | * extra. This list of scsi bufs exists for the lifetime of the driver. |
6278 | */ |
6279 | total = phba->total_scsi_bufs; |
6280 | num_to_alloc = vport->cfg_lun_queue_depth + 2; |
6281 | |
6282 | /* If allocated buffers are enough do nothing */ |
6283 | if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total) |
6284 | return 0; |
6285 | |
6286 | /* Allow some exchanges to be available always to complete discovery */ |
6287 | if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { |
6288 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
6289 | "0704 At limitation of %d preallocated " |
6290 | "command buffers\n" , total); |
6291 | return 0; |
6292 | /* Allow some exchanges to be available always to complete discovery */ |
6293 | } else if (total + num_to_alloc > |
6294 | phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { |
6295 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
6296 | "0705 Allocation request of %d " |
6297 | "command buffers will exceed max of %d. " |
6298 | "Reducing allocation request to %d.\n" , |
6299 | num_to_alloc, phba->cfg_hba_queue_depth, |
6300 | (phba->cfg_hba_queue_depth - total)); |
6301 | num_to_alloc = phba->cfg_hba_queue_depth - total; |
6302 | } |
6303 | num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc); |
6304 | if (num_to_alloc != num_allocated) { |
6305 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
6306 | "0708 Allocation request of %d " |
6307 | "command buffers did not succeed. " |
6308 | "Allocated %d buffers.\n" , |
6309 | num_to_alloc, num_allocated); |
6310 | } |
6311 | if (num_allocated > 0) |
6312 | phba->total_scsi_bufs += num_allocated; |
6313 | return 0; |
6314 | } |
6315 | |
6316 | /** |
6317 | * lpfc_slave_configure - scsi_host_template slave_configure entry point |
6318 | * @sdev: Pointer to scsi_device. |
6319 | * |
6320 | * This routine configures following items |
6321 | * - Tag command queuing support for @sdev if supported. |
6322 | * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set. |
6323 | * |
6324 | * Return codes: |
6325 | * 0 - Success |
6326 | **/ |
6327 | static int |
6328 | lpfc_slave_configure(struct scsi_device *sdev) |
6329 | { |
6330 | struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; |
6331 | struct lpfc_hba *phba = vport->phba; |
6332 | |
6333 | scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth); |
6334 | |
6335 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { |
6336 | lpfc_sli_handle_fast_ring_event(phba, |
6337 | &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); |
6338 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) |
6339 | lpfc_poll_rearm_timer(phba); |
6340 | } |
6341 | |
6342 | return 0; |
6343 | } |
6344 | |
6345 | /** |
6346 | * lpfc_slave_destroy - slave_destroy entry point of SHT data structure |
6347 | * @sdev: Pointer to scsi_device. |
6348 | * |
6349 | * This routine sets @sdev hostatdata filed to null. |
6350 | **/ |
6351 | static void |
6352 | lpfc_slave_destroy(struct scsi_device *sdev) |
6353 | { |
6354 | struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; |
6355 | struct lpfc_hba *phba = vport->phba; |
6356 | unsigned long flags; |
6357 | struct lpfc_device_data *device_data = sdev->hostdata; |
6358 | |
6359 | atomic_dec(v: &phba->sdev_cnt); |
6360 | if ((phba->cfg_fof) && (device_data)) { |
6361 | spin_lock_irqsave(&phba->devicelock, flags); |
6362 | device_data->available = false; |
6363 | if (!device_data->oas_enabled) |
6364 | lpfc_delete_device_data(phba, device_data); |
6365 | spin_unlock_irqrestore(lock: &phba->devicelock, flags); |
6366 | } |
6367 | sdev->hostdata = NULL; |
6368 | return; |
6369 | } |
6370 | |
6371 | /** |
6372 | * lpfc_create_device_data - creates and initializes device data structure for OAS |
6373 | * @phba: Pointer to host bus adapter structure. |
6374 | * @vport_wwpn: Pointer to vport's wwpn information |
6375 | * @target_wwpn: Pointer to target's wwpn information |
6376 | * @lun: Lun on target |
6377 | * @pri: Priority |
6378 | * @atomic_create: Flag to indicate if memory should be allocated using the |
6379 | * GFP_ATOMIC flag or not. |
6380 | * |
6381 | * This routine creates a device data structure which will contain identifying |
6382 | * information for the device (host wwpn, target wwpn, lun), state of OAS, |
6383 | * whether or not the corresponding lun is available by the system, |
6384 | * and pointer to the rport data. |
6385 | * |
6386 | * Return codes: |
6387 | * NULL - Error |
6388 | * Pointer to lpfc_device_data - Success |
6389 | **/ |
6390 | struct lpfc_device_data* |
6391 | lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, |
6392 | struct lpfc_name *target_wwpn, uint64_t lun, |
6393 | uint32_t pri, bool atomic_create) |
6394 | { |
6395 | |
6396 | struct lpfc_device_data *lun_info; |
6397 | int memory_flags; |
6398 | |
6399 | if (unlikely(!phba) || !vport_wwpn || !target_wwpn || |
6400 | !(phba->cfg_fof)) |
6401 | return NULL; |
6402 | |
6403 | /* Attempt to create the device data to contain lun info */ |
6404 | |
6405 | if (atomic_create) |
6406 | memory_flags = GFP_ATOMIC; |
6407 | else |
6408 | memory_flags = GFP_KERNEL; |
6409 | lun_info = mempool_alloc(pool: phba->device_data_mem_pool, gfp_mask: memory_flags); |
6410 | if (!lun_info) |
6411 | return NULL; |
6412 | INIT_LIST_HEAD(list: &lun_info->listentry); |
6413 | lun_info->rport_data = NULL; |
6414 | memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn, |
6415 | sizeof(struct lpfc_name)); |
6416 | memcpy(&lun_info->device_id.target_wwpn, target_wwpn, |
6417 | sizeof(struct lpfc_name)); |
6418 | lun_info->device_id.lun = lun; |
6419 | lun_info->oas_enabled = false; |
6420 | lun_info->priority = pri; |
6421 | lun_info->available = false; |
6422 | return lun_info; |
6423 | } |
6424 | |
6425 | /** |
6426 | * lpfc_delete_device_data - frees a device data structure for OAS |
6427 | * @phba: Pointer to host bus adapter structure. |
6428 | * @lun_info: Pointer to device data structure to free. |
6429 | * |
6430 | * This routine frees the previously allocated device data structure passed. |
6431 | * |
6432 | **/ |
6433 | void |
6434 | lpfc_delete_device_data(struct lpfc_hba *phba, |
6435 | struct lpfc_device_data *lun_info) |
6436 | { |
6437 | |
6438 | if (unlikely(!phba) || !lun_info || |
6439 | !(phba->cfg_fof)) |
6440 | return; |
6441 | |
6442 | if (!list_empty(head: &lun_info->listentry)) |
6443 | list_del(entry: &lun_info->listentry); |
6444 | mempool_free(element: lun_info, pool: phba->device_data_mem_pool); |
6445 | return; |
6446 | } |
6447 | |
6448 | /** |
6449 | * __lpfc_get_device_data - returns the device data for the specified lun |
6450 | * @phba: Pointer to host bus adapter structure. |
6451 | * @list: Point to list to search. |
6452 | * @vport_wwpn: Pointer to vport's wwpn information |
6453 | * @target_wwpn: Pointer to target's wwpn information |
6454 | * @lun: Lun on target |
6455 | * |
6456 | * This routine searches the list passed for the specified lun's device data. |
6457 | * This function does not hold locks, it is the responsibility of the caller |
6458 | * to ensure the proper lock is held before calling the function. |
6459 | * |
6460 | * Return codes: |
6461 | * NULL - Error |
6462 | * Pointer to lpfc_device_data - Success |
6463 | **/ |
6464 | struct lpfc_device_data* |
6465 | __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list, |
6466 | struct lpfc_name *vport_wwpn, |
6467 | struct lpfc_name *target_wwpn, uint64_t lun) |
6468 | { |
6469 | |
6470 | struct lpfc_device_data *lun_info; |
6471 | |
6472 | if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn || |
6473 | !phba->cfg_fof) |
6474 | return NULL; |
6475 | |
6476 | /* Check to see if the lun is already enabled for OAS. */ |
6477 | |
6478 | list_for_each_entry(lun_info, list, listentry) { |
6479 | if ((memcmp(p: &lun_info->device_id.vport_wwpn, q: vport_wwpn, |
6480 | size: sizeof(struct lpfc_name)) == 0) && |
6481 | (memcmp(p: &lun_info->device_id.target_wwpn, q: target_wwpn, |
6482 | size: sizeof(struct lpfc_name)) == 0) && |
6483 | (lun_info->device_id.lun == lun)) |
6484 | return lun_info; |
6485 | } |
6486 | |
6487 | return NULL; |
6488 | } |
6489 | |
6490 | /** |
6491 | * lpfc_find_next_oas_lun - searches for the next oas lun |
6492 | * @phba: Pointer to host bus adapter structure. |
6493 | * @vport_wwpn: Pointer to vport's wwpn information |
6494 | * @target_wwpn: Pointer to target's wwpn information |
6495 | * @starting_lun: Pointer to the lun to start searching for |
6496 | * @found_vport_wwpn: Pointer to the found lun's vport wwpn information |
6497 | * @found_target_wwpn: Pointer to the found lun's target wwpn information |
6498 | * @found_lun: Pointer to the found lun. |
6499 | * @found_lun_status: Pointer to status of the found lun. |
6500 | * @found_lun_pri: Pointer to priority of the found lun. |
6501 | * |
6502 | * This routine searches the luns list for the specified lun |
6503 | * or the first lun for the vport/target. If the vport wwpn contains |
6504 | * a zero value then a specific vport is not specified. In this case |
6505 | * any vport which contains the lun will be considered a match. If the |
6506 | * target wwpn contains a zero value then a specific target is not specified. |
6507 | * In this case any target which contains the lun will be considered a |
6508 | * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status |
6509 | * are returned. The function will also return the next lun if available. |
6510 | * If the next lun is not found, starting_lun parameter will be set to |
6511 | * NO_MORE_OAS_LUN. |
6512 | * |
6513 | * Return codes: |
6514 | * non-0 - Error |
6515 | * 0 - Success |
6516 | **/ |
6517 | bool |
6518 | lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, |
6519 | struct lpfc_name *target_wwpn, uint64_t *starting_lun, |
6520 | struct lpfc_name *found_vport_wwpn, |
6521 | struct lpfc_name *found_target_wwpn, |
6522 | uint64_t *found_lun, |
6523 | uint32_t *found_lun_status, |
6524 | uint32_t *found_lun_pri) |
6525 | { |
6526 | |
6527 | unsigned long flags; |
6528 | struct lpfc_device_data *lun_info; |
6529 | struct lpfc_device_id *device_id; |
6530 | uint64_t lun; |
6531 | bool found = false; |
6532 | |
6533 | if (unlikely(!phba) || !vport_wwpn || !target_wwpn || |
6534 | !starting_lun || !found_vport_wwpn || |
6535 | !found_target_wwpn || !found_lun || !found_lun_status || |
6536 | (*starting_lun == NO_MORE_OAS_LUN) || |
6537 | !phba->cfg_fof) |
6538 | return false; |
6539 | |
6540 | lun = *starting_lun; |
6541 | *found_lun = NO_MORE_OAS_LUN; |
6542 | *starting_lun = NO_MORE_OAS_LUN; |
6543 | |
6544 | /* Search for lun or the lun closet in value */ |
6545 | |
6546 | spin_lock_irqsave(&phba->devicelock, flags); |
6547 | list_for_each_entry(lun_info, &phba->luns, listentry) { |
6548 | if (((wwn_to_u64(wwn: vport_wwpn->u.wwn) == 0) || |
6549 | (memcmp(p: &lun_info->device_id.vport_wwpn, q: vport_wwpn, |
6550 | size: sizeof(struct lpfc_name)) == 0)) && |
6551 | ((wwn_to_u64(wwn: target_wwpn->u.wwn) == 0) || |
6552 | (memcmp(p: &lun_info->device_id.target_wwpn, q: target_wwpn, |
6553 | size: sizeof(struct lpfc_name)) == 0)) && |
6554 | (lun_info->oas_enabled)) { |
6555 | device_id = &lun_info->device_id; |
6556 | if ((!found) && |
6557 | ((lun == FIND_FIRST_OAS_LUN) || |
6558 | (device_id->lun == lun))) { |
6559 | *found_lun = device_id->lun; |
6560 | memcpy(found_vport_wwpn, |
6561 | &device_id->vport_wwpn, |
6562 | sizeof(struct lpfc_name)); |
6563 | memcpy(found_target_wwpn, |
6564 | &device_id->target_wwpn, |
6565 | sizeof(struct lpfc_name)); |
6566 | if (lun_info->available) |
6567 | *found_lun_status = |
6568 | OAS_LUN_STATUS_EXISTS; |
6569 | else |
6570 | *found_lun_status = 0; |
6571 | *found_lun_pri = lun_info->priority; |
6572 | if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT) |
6573 | memset(vport_wwpn, 0x0, |
6574 | sizeof(struct lpfc_name)); |
6575 | if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET) |
6576 | memset(target_wwpn, 0x0, |
6577 | sizeof(struct lpfc_name)); |
6578 | found = true; |
6579 | } else if (found) { |
6580 | *starting_lun = device_id->lun; |
6581 | memcpy(vport_wwpn, &device_id->vport_wwpn, |
6582 | sizeof(struct lpfc_name)); |
6583 | memcpy(target_wwpn, &device_id->target_wwpn, |
6584 | sizeof(struct lpfc_name)); |
6585 | break; |
6586 | } |
6587 | } |
6588 | } |
6589 | spin_unlock_irqrestore(lock: &phba->devicelock, flags); |
6590 | return found; |
6591 | } |
6592 | |
6593 | /** |
6594 | * lpfc_enable_oas_lun - enables a lun for OAS operations |
6595 | * @phba: Pointer to host bus adapter structure. |
6596 | * @vport_wwpn: Pointer to vport's wwpn information |
6597 | * @target_wwpn: Pointer to target's wwpn information |
6598 | * @lun: Lun |
6599 | * @pri: Priority |
6600 | * |
6601 | * This routine enables a lun for oas operations. The routines does so by |
6602 | * doing the following : |
6603 | * |
6604 | * 1) Checks to see if the device data for the lun has been created. |
6605 | * 2) If found, sets the OAS enabled flag if not set and returns. |
6606 | * 3) Otherwise, creates a device data structure. |
6607 | * 4) If successfully created, indicates the device data is for an OAS lun, |
6608 | * indicates the lun is not available and add to the list of luns. |
6609 | * |
6610 | * Return codes: |
6611 | * false - Error |
6612 | * true - Success |
6613 | **/ |
6614 | bool |
6615 | lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, |
6616 | struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) |
6617 | { |
6618 | |
6619 | struct lpfc_device_data *lun_info; |
6620 | unsigned long flags; |
6621 | |
6622 | if (unlikely(!phba) || !vport_wwpn || !target_wwpn || |
6623 | !phba->cfg_fof) |
6624 | return false; |
6625 | |
6626 | spin_lock_irqsave(&phba->devicelock, flags); |
6627 | |
6628 | /* Check to see if the device data for the lun has been created */ |
6629 | lun_info = __lpfc_get_device_data(phba, list: &phba->luns, vport_wwpn, |
6630 | target_wwpn, lun); |
6631 | if (lun_info) { |
6632 | if (!lun_info->oas_enabled) |
6633 | lun_info->oas_enabled = true; |
6634 | lun_info->priority = pri; |
6635 | spin_unlock_irqrestore(lock: &phba->devicelock, flags); |
6636 | return true; |
6637 | } |
6638 | |
6639 | /* Create an lun info structure and add to list of luns */ |
6640 | lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun, |
6641 | pri, atomic_create: true); |
6642 | if (lun_info) { |
6643 | lun_info->oas_enabled = true; |
6644 | lun_info->priority = pri; |
6645 | lun_info->available = false; |
6646 | list_add_tail(new: &lun_info->listentry, head: &phba->luns); |
6647 | spin_unlock_irqrestore(lock: &phba->devicelock, flags); |
6648 | return true; |
6649 | } |
6650 | spin_unlock_irqrestore(lock: &phba->devicelock, flags); |
6651 | return false; |
6652 | } |
6653 | |
6654 | /** |
6655 | * lpfc_disable_oas_lun - disables a lun for OAS operations |
6656 | * @phba: Pointer to host bus adapter structure. |
6657 | * @vport_wwpn: Pointer to vport's wwpn information |
6658 | * @target_wwpn: Pointer to target's wwpn information |
6659 | * @lun: Lun |
6660 | * @pri: Priority |
6661 | * |
6662 | * This routine disables a lun for oas operations. The routines does so by |
6663 | * doing the following : |
6664 | * |
6665 | * 1) Checks to see if the device data for the lun is created. |
6666 | * 2) If present, clears the flag indicating this lun is for OAS. |
6667 | * 3) If the lun is not available by the system, the device data is |
6668 | * freed. |
6669 | * |
6670 | * Return codes: |
6671 | * false - Error |
6672 | * true - Success |
6673 | **/ |
6674 | bool |
6675 | lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, |
6676 | struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) |
6677 | { |
6678 | |
6679 | struct lpfc_device_data *lun_info; |
6680 | unsigned long flags; |
6681 | |
6682 | if (unlikely(!phba) || !vport_wwpn || !target_wwpn || |
6683 | !phba->cfg_fof) |
6684 | return false; |
6685 | |
6686 | spin_lock_irqsave(&phba->devicelock, flags); |
6687 | |
6688 | /* Check to see if the lun is available. */ |
6689 | lun_info = __lpfc_get_device_data(phba, |
6690 | list: &phba->luns, vport_wwpn, |
6691 | target_wwpn, lun); |
6692 | if (lun_info) { |
6693 | lun_info->oas_enabled = false; |
6694 | lun_info->priority = pri; |
6695 | if (!lun_info->available) |
6696 | lpfc_delete_device_data(phba, lun_info); |
6697 | spin_unlock_irqrestore(lock: &phba->devicelock, flags); |
6698 | return true; |
6699 | } |
6700 | |
6701 | spin_unlock_irqrestore(lock: &phba->devicelock, flags); |
6702 | return false; |
6703 | } |
6704 | |
6705 | static int |
6706 | lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) |
6707 | { |
6708 | return SCSI_MLQUEUE_HOST_BUSY; |
6709 | } |
6710 | |
6711 | static int |
6712 | lpfc_no_slave(struct scsi_device *sdev) |
6713 | { |
6714 | return -ENODEV; |
6715 | } |
6716 | |
6717 | struct scsi_host_template lpfc_template_nvme = { |
6718 | .module = THIS_MODULE, |
6719 | .name = LPFC_DRIVER_NAME, |
6720 | .proc_name = LPFC_DRIVER_NAME, |
6721 | .info = lpfc_info, |
6722 | .queuecommand = lpfc_no_command, |
6723 | .slave_alloc = lpfc_no_slave, |
6724 | .slave_configure = lpfc_no_slave, |
6725 | .scan_finished = lpfc_scan_finished, |
6726 | .this_id = -1, |
6727 | .sg_tablesize = 1, |
6728 | .cmd_per_lun = 1, |
6729 | .shost_groups = lpfc_hba_groups, |
6730 | .max_sectors = 0xFFFFFFFF, |
6731 | .vendor_id = LPFC_NL_VENDOR_ID, |
6732 | .track_queue_depth = 0, |
6733 | }; |
6734 | |
6735 | struct scsi_host_template lpfc_template = { |
6736 | .module = THIS_MODULE, |
6737 | .name = LPFC_DRIVER_NAME, |
6738 | .proc_name = LPFC_DRIVER_NAME, |
6739 | .info = lpfc_info, |
6740 | .queuecommand = lpfc_queuecommand, |
6741 | .eh_timed_out = fc_eh_timed_out, |
6742 | .eh_should_retry_cmd = fc_eh_should_retry_cmd, |
6743 | .eh_abort_handler = lpfc_abort_handler, |
6744 | .eh_device_reset_handler = lpfc_device_reset_handler, |
6745 | .eh_target_reset_handler = lpfc_target_reset_handler, |
6746 | .eh_host_reset_handler = lpfc_host_reset_handler, |
6747 | .slave_alloc = lpfc_slave_alloc, |
6748 | .slave_configure = lpfc_slave_configure, |
6749 | .slave_destroy = lpfc_slave_destroy, |
6750 | .scan_finished = lpfc_scan_finished, |
6751 | .this_id = -1, |
6752 | .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, |
6753 | .cmd_per_lun = LPFC_CMD_PER_LUN, |
6754 | .shost_groups = lpfc_hba_groups, |
6755 | .max_sectors = 0xFFFFFFFF, |
6756 | .vendor_id = LPFC_NL_VENDOR_ID, |
6757 | .change_queue_depth = scsi_change_queue_depth, |
6758 | .track_queue_depth = 1, |
6759 | }; |
6760 | |
6761 | struct scsi_host_template lpfc_vport_template = { |
6762 | .module = THIS_MODULE, |
6763 | .name = LPFC_DRIVER_NAME, |
6764 | .proc_name = LPFC_DRIVER_NAME, |
6765 | .info = lpfc_info, |
6766 | .queuecommand = lpfc_queuecommand, |
6767 | .eh_timed_out = fc_eh_timed_out, |
6768 | .eh_should_retry_cmd = fc_eh_should_retry_cmd, |
6769 | .eh_abort_handler = lpfc_abort_handler, |
6770 | .eh_device_reset_handler = lpfc_device_reset_handler, |
6771 | .eh_target_reset_handler = lpfc_target_reset_handler, |
6772 | .eh_bus_reset_handler = NULL, |
6773 | .eh_host_reset_handler = NULL, |
6774 | .slave_alloc = lpfc_slave_alloc, |
6775 | .slave_configure = lpfc_slave_configure, |
6776 | .slave_destroy = lpfc_slave_destroy, |
6777 | .scan_finished = lpfc_scan_finished, |
6778 | .this_id = -1, |
6779 | .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, |
6780 | .cmd_per_lun = LPFC_CMD_PER_LUN, |
6781 | .shost_groups = lpfc_vport_groups, |
6782 | .max_sectors = 0xFFFFFFFF, |
6783 | .vendor_id = 0, |
6784 | .change_queue_depth = scsi_change_queue_depth, |
6785 | .track_queue_depth = 1, |
6786 | }; |
6787 | |