1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (c) 2016 Avago Technologies. All rights reserved. |
4 | */ |
5 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
6 | #include <linux/module.h> |
7 | #include <linux/slab.h> |
8 | #include <linux/blk-mq.h> |
9 | #include <linux/parser.h> |
10 | #include <linux/random.h> |
11 | #include <uapi/scsi/fc/fc_fs.h> |
12 | #include <uapi/scsi/fc/fc_els.h> |
13 | |
14 | #include "nvmet.h" |
15 | #include <linux/nvme-fc-driver.h> |
16 | #include <linux/nvme-fc.h> |
17 | #include "../host/fc.h" |
18 | |
19 | |
20 | /* *************************** Data Structures/Defines ****************** */ |
21 | |
22 | |
23 | #define NVMET_LS_CTX_COUNT 256 |
24 | |
25 | struct nvmet_fc_tgtport; |
26 | struct nvmet_fc_tgt_assoc; |
27 | |
28 | struct nvmet_fc_ls_iod { /* for an LS RQST RCV */ |
29 | struct nvmefc_ls_rsp *lsrsp; |
30 | struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */ |
31 | |
32 | struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */ |
33 | |
34 | struct nvmet_fc_tgtport *tgtport; |
35 | struct nvmet_fc_tgt_assoc *assoc; |
36 | void *hosthandle; |
37 | |
38 | union nvmefc_ls_requests *rqstbuf; |
39 | union nvmefc_ls_responses *rspbuf; |
40 | u16 rqstdatalen; |
41 | dma_addr_t rspdma; |
42 | |
43 | struct scatterlist sg[2]; |
44 | |
45 | struct work_struct work; |
46 | } __aligned(sizeof(unsigned long long)); |
47 | |
48 | struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */ |
49 | struct nvmefc_ls_req ls_req; |
50 | |
51 | struct nvmet_fc_tgtport *tgtport; |
52 | void *hosthandle; |
53 | |
54 | int ls_error; |
55 | struct list_head lsreq_list; /* tgtport->ls_req_list */ |
56 | bool req_queued; |
57 | }; |
58 | |
59 | |
60 | /* desired maximum for a single sequence - if sg list allows it */ |
61 | #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) |
62 | |
63 | enum nvmet_fcp_datadir { |
64 | NVMET_FCP_NODATA, |
65 | NVMET_FCP_WRITE, |
66 | NVMET_FCP_READ, |
67 | NVMET_FCP_ABORTED, |
68 | }; |
69 | |
70 | struct nvmet_fc_fcp_iod { |
71 | struct nvmefc_tgt_fcp_req *fcpreq; |
72 | |
73 | struct nvme_fc_cmd_iu cmdiubuf; |
74 | struct nvme_fc_ersp_iu rspiubuf; |
75 | dma_addr_t rspdma; |
76 | struct scatterlist *next_sg; |
77 | struct scatterlist *data_sg; |
78 | int data_sg_cnt; |
79 | u32 offset; |
80 | enum nvmet_fcp_datadir io_dir; |
81 | bool active; |
82 | bool abort; |
83 | bool aborted; |
84 | bool writedataactive; |
85 | spinlock_t flock; |
86 | |
87 | struct nvmet_req req; |
88 | struct work_struct defer_work; |
89 | |
90 | struct nvmet_fc_tgtport *tgtport; |
91 | struct nvmet_fc_tgt_queue *queue; |
92 | |
93 | struct list_head fcp_list; /* tgtport->fcp_list */ |
94 | }; |
95 | |
96 | struct nvmet_fc_tgtport { |
97 | struct nvmet_fc_target_port fc_target_port; |
98 | |
99 | struct list_head tgt_list; /* nvmet_fc_target_list */ |
100 | struct device *dev; /* dev for dma mapping */ |
101 | struct nvmet_fc_target_template *ops; |
102 | |
103 | struct nvmet_fc_ls_iod *iod; |
104 | spinlock_t lock; |
105 | struct list_head ls_rcv_list; |
106 | struct list_head ls_req_list; |
107 | struct list_head ls_busylist; |
108 | struct list_head assoc_list; |
109 | struct list_head host_list; |
110 | struct ida assoc_cnt; |
111 | struct nvmet_fc_port_entry *pe; |
112 | struct kref ref; |
113 | u32 max_sg_cnt; |
114 | |
115 | struct work_struct put_work; |
116 | }; |
117 | |
118 | struct nvmet_fc_port_entry { |
119 | struct nvmet_fc_tgtport *tgtport; |
120 | struct nvmet_port *port; |
121 | u64 node_name; |
122 | u64 port_name; |
123 | struct list_head pe_list; |
124 | }; |
125 | |
126 | struct nvmet_fc_defer_fcp_req { |
127 | struct list_head req_list; |
128 | struct nvmefc_tgt_fcp_req *fcp_req; |
129 | }; |
130 | |
131 | struct nvmet_fc_tgt_queue { |
132 | bool ninetypercent; |
133 | u16 qid; |
134 | u16 sqsize; |
135 | u16 ersp_ratio; |
136 | __le16 sqhd; |
137 | atomic_t connected; |
138 | atomic_t sqtail; |
139 | atomic_t zrspcnt; |
140 | atomic_t rsn; |
141 | spinlock_t qlock; |
142 | struct nvmet_cq nvme_cq; |
143 | struct nvmet_sq nvme_sq; |
144 | struct nvmet_fc_tgt_assoc *assoc; |
145 | struct list_head fod_list; |
146 | struct list_head pending_cmd_list; |
147 | struct list_head avail_defer_list; |
148 | struct workqueue_struct *work_q; |
149 | struct kref ref; |
150 | /* array of fcp_iods */ |
151 | struct nvmet_fc_fcp_iod fod[] __counted_by(sqsize); |
152 | } __aligned(sizeof(unsigned long long)); |
153 | |
154 | struct nvmet_fc_hostport { |
155 | struct nvmet_fc_tgtport *tgtport; |
156 | void *hosthandle; |
157 | struct list_head host_list; |
158 | struct kref ref; |
159 | u8 invalid; |
160 | }; |
161 | |
162 | struct nvmet_fc_tgt_assoc { |
163 | u64 association_id; |
164 | u32 a_id; |
165 | atomic_t terminating; |
166 | struct nvmet_fc_tgtport *tgtport; |
167 | struct nvmet_fc_hostport *hostport; |
168 | struct nvmet_fc_ls_iod *rcv_disconn; |
169 | struct list_head a_list; |
170 | struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; |
171 | struct kref ref; |
172 | struct work_struct del_work; |
173 | }; |
174 | |
175 | |
176 | static inline int |
177 | nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr) |
178 | { |
179 | return (iodptr - iodptr->tgtport->iod); |
180 | } |
181 | |
182 | static inline int |
183 | nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr) |
184 | { |
185 | return (fodptr - fodptr->queue->fod); |
186 | } |
187 | |
188 | |
189 | /* |
190 | * Association and Connection IDs: |
191 | * |
192 | * Association ID will have random number in upper 6 bytes and zero |
193 | * in lower 2 bytes |
194 | * |
195 | * Connection IDs will be Association ID with QID or'd in lower 2 bytes |
196 | * |
197 | * note: Association ID = Connection ID for queue 0 |
198 | */ |
199 | #define BYTES_FOR_QID sizeof(u16) |
200 | #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8) |
201 | #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1)) |
202 | |
203 | static inline u64 |
204 | nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid) |
205 | { |
206 | return (assoc->association_id | qid); |
207 | } |
208 | |
209 | static inline u64 |
210 | nvmet_fc_getassociationid(u64 connectionid) |
211 | { |
212 | return connectionid & ~NVMET_FC_QUEUEID_MASK; |
213 | } |
214 | |
215 | static inline u16 |
216 | nvmet_fc_getqueueid(u64 connectionid) |
217 | { |
218 | return (u16)(connectionid & NVMET_FC_QUEUEID_MASK); |
219 | } |
220 | |
221 | static inline struct nvmet_fc_tgtport * |
222 | targetport_to_tgtport(struct nvmet_fc_target_port *targetport) |
223 | { |
224 | return container_of(targetport, struct nvmet_fc_tgtport, |
225 | fc_target_port); |
226 | } |
227 | |
228 | static inline struct nvmet_fc_fcp_iod * |
229 | nvmet_req_to_fod(struct nvmet_req *nvme_req) |
230 | { |
231 | return container_of(nvme_req, struct nvmet_fc_fcp_iod, req); |
232 | } |
233 | |
234 | |
235 | /* *************************** Globals **************************** */ |
236 | |
237 | |
238 | static DEFINE_SPINLOCK(nvmet_fc_tgtlock); |
239 | |
240 | static LIST_HEAD(nvmet_fc_target_list); |
241 | static DEFINE_IDA(nvmet_fc_tgtport_cnt); |
242 | static LIST_HEAD(nvmet_fc_portentry_list); |
243 | |
244 | |
245 | static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work); |
246 | static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work); |
247 | static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc); |
248 | static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); |
249 | static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); |
250 | static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); |
251 | static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); |
252 | static void nvmet_fc_put_tgtport_work(struct work_struct *work) |
253 | { |
254 | struct nvmet_fc_tgtport *tgtport = |
255 | container_of(work, struct nvmet_fc_tgtport, put_work); |
256 | |
257 | nvmet_fc_tgtport_put(tgtport); |
258 | } |
259 | static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); |
260 | static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, |
261 | struct nvmet_fc_fcp_iod *fod); |
262 | static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc); |
263 | static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, |
264 | struct nvmet_fc_ls_iod *iod); |
265 | |
266 | |
267 | /* *********************** FC-NVME DMA Handling **************************** */ |
268 | |
269 | /* |
270 | * The fcloop device passes in a NULL device pointer. Real LLD's will |
271 | * pass in a valid device pointer. If NULL is passed to the dma mapping |
272 | * routines, depending on the platform, it may or may not succeed, and |
273 | * may crash. |
274 | * |
275 | * As such: |
276 | * Wrapper all the dma routines and check the dev pointer. |
277 | * |
278 | * If simple mappings (return just a dma address, we'll noop them, |
279 | * returning a dma address of 0. |
280 | * |
281 | * On more complex mappings (dma_map_sg), a pseudo routine fills |
282 | * in the scatter list, setting all dma addresses to 0. |
283 | */ |
284 | |
285 | static inline dma_addr_t |
286 | fc_dma_map_single(struct device *dev, void *ptr, size_t size, |
287 | enum dma_data_direction dir) |
288 | { |
289 | return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; |
290 | } |
291 | |
292 | static inline int |
293 | fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
294 | { |
295 | return dev ? dma_mapping_error(dev, dma_addr) : 0; |
296 | } |
297 | |
298 | static inline void |
299 | fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, |
300 | enum dma_data_direction dir) |
301 | { |
302 | if (dev) |
303 | dma_unmap_single(dev, addr, size, dir); |
304 | } |
305 | |
306 | static inline void |
307 | fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, |
308 | enum dma_data_direction dir) |
309 | { |
310 | if (dev) |
311 | dma_sync_single_for_cpu(dev, addr, size, dir); |
312 | } |
313 | |
314 | static inline void |
315 | fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, |
316 | enum dma_data_direction dir) |
317 | { |
318 | if (dev) |
319 | dma_sync_single_for_device(dev, addr, size, dir); |
320 | } |
321 | |
322 | /* pseudo dma_map_sg call */ |
323 | static int |
324 | fc_map_sg(struct scatterlist *sg, int nents) |
325 | { |
326 | struct scatterlist *s; |
327 | int i; |
328 | |
329 | WARN_ON(nents == 0 || sg[0].length == 0); |
330 | |
331 | for_each_sg(sg, s, nents, i) { |
332 | s->dma_address = 0L; |
333 | #ifdef CONFIG_NEED_SG_DMA_LENGTH |
334 | s->dma_length = s->length; |
335 | #endif |
336 | } |
337 | return nents; |
338 | } |
339 | |
340 | static inline int |
341 | fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
342 | enum dma_data_direction dir) |
343 | { |
344 | return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); |
345 | } |
346 | |
347 | static inline void |
348 | fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
349 | enum dma_data_direction dir) |
350 | { |
351 | if (dev) |
352 | dma_unmap_sg(dev, sg, nents, dir); |
353 | } |
354 | |
355 | |
356 | /* ********************** FC-NVME LS XMT Handling ************************* */ |
357 | |
358 | |
359 | static void |
360 | __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop) |
361 | { |
362 | struct nvmet_fc_tgtport *tgtport = lsop->tgtport; |
363 | struct nvmefc_ls_req *lsreq = &lsop->ls_req; |
364 | unsigned long flags; |
365 | |
366 | spin_lock_irqsave(&tgtport->lock, flags); |
367 | |
368 | if (!lsop->req_queued) { |
369 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
370 | goto out_putwork; |
371 | } |
372 | |
373 | list_del(entry: &lsop->lsreq_list); |
374 | |
375 | lsop->req_queued = false; |
376 | |
377 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
378 | |
379 | fc_dma_unmap_single(dev: tgtport->dev, addr: lsreq->rqstdma, |
380 | size: (lsreq->rqstlen + lsreq->rsplen), |
381 | dir: DMA_BIDIRECTIONAL); |
382 | |
383 | out_putwork: |
384 | queue_work(wq: nvmet_wq, work: &tgtport->put_work); |
385 | } |
386 | |
387 | static int |
388 | __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport, |
389 | struct nvmet_fc_ls_req_op *lsop, |
390 | void (*done)(struct nvmefc_ls_req *req, int status)) |
391 | { |
392 | struct nvmefc_ls_req *lsreq = &lsop->ls_req; |
393 | unsigned long flags; |
394 | int ret = 0; |
395 | |
396 | if (!tgtport->ops->ls_req) |
397 | return -EOPNOTSUPP; |
398 | |
399 | if (!nvmet_fc_tgtport_get(tgtport)) |
400 | return -ESHUTDOWN; |
401 | |
402 | lsreq->done = done; |
403 | lsop->req_queued = false; |
404 | INIT_LIST_HEAD(list: &lsop->lsreq_list); |
405 | |
406 | lsreq->rqstdma = fc_dma_map_single(dev: tgtport->dev, ptr: lsreq->rqstaddr, |
407 | size: lsreq->rqstlen + lsreq->rsplen, |
408 | dir: DMA_BIDIRECTIONAL); |
409 | if (fc_dma_mapping_error(dev: tgtport->dev, dma_addr: lsreq->rqstdma)) { |
410 | ret = -EFAULT; |
411 | goto out_puttgtport; |
412 | } |
413 | lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; |
414 | |
415 | spin_lock_irqsave(&tgtport->lock, flags); |
416 | |
417 | list_add_tail(new: &lsop->lsreq_list, head: &tgtport->ls_req_list); |
418 | |
419 | lsop->req_queued = true; |
420 | |
421 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
422 | |
423 | ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle, |
424 | lsreq); |
425 | if (ret) |
426 | goto out_unlink; |
427 | |
428 | return 0; |
429 | |
430 | out_unlink: |
431 | lsop->ls_error = ret; |
432 | spin_lock_irqsave(&tgtport->lock, flags); |
433 | lsop->req_queued = false; |
434 | list_del(entry: &lsop->lsreq_list); |
435 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
436 | fc_dma_unmap_single(dev: tgtport->dev, addr: lsreq->rqstdma, |
437 | size: (lsreq->rqstlen + lsreq->rsplen), |
438 | dir: DMA_BIDIRECTIONAL); |
439 | out_puttgtport: |
440 | nvmet_fc_tgtport_put(tgtport); |
441 | |
442 | return ret; |
443 | } |
444 | |
445 | static int |
446 | nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport, |
447 | struct nvmet_fc_ls_req_op *lsop, |
448 | void (*done)(struct nvmefc_ls_req *req, int status)) |
449 | { |
450 | /* don't wait for completion */ |
451 | |
452 | return __nvmet_fc_send_ls_req(tgtport, lsop, done); |
453 | } |
454 | |
455 | static void |
456 | nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) |
457 | { |
458 | struct nvmet_fc_ls_req_op *lsop = |
459 | container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req); |
460 | |
461 | __nvmet_fc_finish_ls_req(lsop); |
462 | |
463 | /* fc-nvme target doesn't care about success or failure of cmd */ |
464 | |
465 | kfree(objp: lsop); |
466 | } |
467 | |
468 | /* |
469 | * This routine sends a FC-NVME LS to disconnect (aka terminate) |
470 | * the FC-NVME Association. Terminating the association also |
471 | * terminates the FC-NVME connections (per queue, both admin and io |
472 | * queues) that are part of the association. E.g. things are torn |
473 | * down, and the related FC-NVME Association ID and Connection IDs |
474 | * become invalid. |
475 | * |
476 | * The behavior of the fc-nvme target is such that it's |
477 | * understanding of the association and connections will implicitly |
478 | * be torn down. The action is implicit as it may be due to a loss of |
479 | * connectivity with the fc-nvme host, so the target may never get a |
480 | * response even if it tried. As such, the action of this routine |
481 | * is to asynchronously send the LS, ignore any results of the LS, and |
482 | * continue on with terminating the association. If the fc-nvme host |
483 | * is present and receives the LS, it too can tear down. |
484 | */ |
485 | static void |
486 | nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) |
487 | { |
488 | struct nvmet_fc_tgtport *tgtport = assoc->tgtport; |
489 | struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; |
490 | struct fcnvme_ls_disconnect_assoc_acc *discon_acc; |
491 | struct nvmet_fc_ls_req_op *lsop; |
492 | struct nvmefc_ls_req *lsreq; |
493 | int ret; |
494 | |
495 | /* |
496 | * If ls_req is NULL or no hosthandle, it's an older lldd and no |
497 | * message is normal. Otherwise, send unless the hostport has |
498 | * already been invalidated by the lldd. |
499 | */ |
500 | if (!tgtport->ops->ls_req || assoc->hostport->invalid) |
501 | return; |
502 | |
503 | lsop = kzalloc(size: (sizeof(*lsop) + |
504 | sizeof(*discon_rqst) + sizeof(*discon_acc) + |
505 | tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); |
506 | if (!lsop) { |
507 | dev_info(tgtport->dev, |
508 | "{%d:%d} send Disconnect Association failed: ENOMEM\n" , |
509 | tgtport->fc_target_port.port_num, assoc->a_id); |
510 | return; |
511 | } |
512 | |
513 | discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; |
514 | discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; |
515 | lsreq = &lsop->ls_req; |
516 | if (tgtport->ops->lsrqst_priv_sz) |
517 | lsreq->private = (void *)&discon_acc[1]; |
518 | else |
519 | lsreq->private = NULL; |
520 | |
521 | lsop->tgtport = tgtport; |
522 | lsop->hosthandle = assoc->hostport->hosthandle; |
523 | |
524 | nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, |
525 | association_id: assoc->association_id); |
526 | |
527 | ret = nvmet_fc_send_ls_req_async(tgtport, lsop, |
528 | done: nvmet_fc_disconnect_assoc_done); |
529 | if (ret) { |
530 | dev_info(tgtport->dev, |
531 | "{%d:%d} XMT Disconnect Association failed: %d\n" , |
532 | tgtport->fc_target_port.port_num, assoc->a_id, ret); |
533 | kfree(objp: lsop); |
534 | } |
535 | } |
536 | |
537 | |
538 | /* *********************** FC-NVME Port Management ************************ */ |
539 | |
540 | |
541 | static int |
542 | nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport) |
543 | { |
544 | struct nvmet_fc_ls_iod *iod; |
545 | int i; |
546 | |
547 | iod = kcalloc(NVMET_LS_CTX_COUNT, size: sizeof(struct nvmet_fc_ls_iod), |
548 | GFP_KERNEL); |
549 | if (!iod) |
550 | return -ENOMEM; |
551 | |
552 | tgtport->iod = iod; |
553 | |
554 | for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { |
555 | INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); |
556 | iod->tgtport = tgtport; |
557 | list_add_tail(new: &iod->ls_rcv_list, head: &tgtport->ls_rcv_list); |
558 | |
559 | iod->rqstbuf = kzalloc(size: sizeof(union nvmefc_ls_requests) + |
560 | sizeof(union nvmefc_ls_responses), |
561 | GFP_KERNEL); |
562 | if (!iod->rqstbuf) |
563 | goto out_fail; |
564 | |
565 | iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1]; |
566 | |
567 | iod->rspdma = fc_dma_map_single(dev: tgtport->dev, ptr: iod->rspbuf, |
568 | size: sizeof(*iod->rspbuf), |
569 | dir: DMA_TO_DEVICE); |
570 | if (fc_dma_mapping_error(dev: tgtport->dev, dma_addr: iod->rspdma)) |
571 | goto out_fail; |
572 | } |
573 | |
574 | return 0; |
575 | |
576 | out_fail: |
577 | kfree(objp: iod->rqstbuf); |
578 | list_del(entry: &iod->ls_rcv_list); |
579 | for (iod--, i--; i >= 0; iod--, i--) { |
580 | fc_dma_unmap_single(dev: tgtport->dev, addr: iod->rspdma, |
581 | size: sizeof(*iod->rspbuf), dir: DMA_TO_DEVICE); |
582 | kfree(objp: iod->rqstbuf); |
583 | list_del(entry: &iod->ls_rcv_list); |
584 | } |
585 | |
586 | kfree(objp: iod); |
587 | |
588 | return -EFAULT; |
589 | } |
590 | |
591 | static void |
592 | nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) |
593 | { |
594 | struct nvmet_fc_ls_iod *iod = tgtport->iod; |
595 | int i; |
596 | |
597 | for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { |
598 | fc_dma_unmap_single(dev: tgtport->dev, |
599 | addr: iod->rspdma, size: sizeof(*iod->rspbuf), |
600 | dir: DMA_TO_DEVICE); |
601 | kfree(objp: iod->rqstbuf); |
602 | list_del(entry: &iod->ls_rcv_list); |
603 | } |
604 | kfree(objp: tgtport->iod); |
605 | } |
606 | |
607 | static struct nvmet_fc_ls_iod * |
608 | nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) |
609 | { |
610 | struct nvmet_fc_ls_iod *iod; |
611 | unsigned long flags; |
612 | |
613 | spin_lock_irqsave(&tgtport->lock, flags); |
614 | iod = list_first_entry_or_null(&tgtport->ls_rcv_list, |
615 | struct nvmet_fc_ls_iod, ls_rcv_list); |
616 | if (iod) |
617 | list_move_tail(list: &iod->ls_rcv_list, head: &tgtport->ls_busylist); |
618 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
619 | return iod; |
620 | } |
621 | |
622 | |
623 | static void |
624 | nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport, |
625 | struct nvmet_fc_ls_iod *iod) |
626 | { |
627 | unsigned long flags; |
628 | |
629 | spin_lock_irqsave(&tgtport->lock, flags); |
630 | list_move(list: &iod->ls_rcv_list, head: &tgtport->ls_rcv_list); |
631 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
632 | } |
633 | |
634 | static void |
635 | nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, |
636 | struct nvmet_fc_tgt_queue *queue) |
637 | { |
638 | struct nvmet_fc_fcp_iod *fod = queue->fod; |
639 | int i; |
640 | |
641 | for (i = 0; i < queue->sqsize; fod++, i++) { |
642 | INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); |
643 | fod->tgtport = tgtport; |
644 | fod->queue = queue; |
645 | fod->active = false; |
646 | fod->abort = false; |
647 | fod->aborted = false; |
648 | fod->fcpreq = NULL; |
649 | list_add_tail(new: &fod->fcp_list, head: &queue->fod_list); |
650 | spin_lock_init(&fod->flock); |
651 | |
652 | fod->rspdma = fc_dma_map_single(dev: tgtport->dev, ptr: &fod->rspiubuf, |
653 | size: sizeof(fod->rspiubuf), dir: DMA_TO_DEVICE); |
654 | if (fc_dma_mapping_error(dev: tgtport->dev, dma_addr: fod->rspdma)) { |
655 | list_del(entry: &fod->fcp_list); |
656 | for (fod--, i--; i >= 0; fod--, i--) { |
657 | fc_dma_unmap_single(dev: tgtport->dev, addr: fod->rspdma, |
658 | size: sizeof(fod->rspiubuf), |
659 | dir: DMA_TO_DEVICE); |
660 | fod->rspdma = 0L; |
661 | list_del(entry: &fod->fcp_list); |
662 | } |
663 | |
664 | return; |
665 | } |
666 | } |
667 | } |
668 | |
669 | static void |
670 | nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, |
671 | struct nvmet_fc_tgt_queue *queue) |
672 | { |
673 | struct nvmet_fc_fcp_iod *fod = queue->fod; |
674 | int i; |
675 | |
676 | for (i = 0; i < queue->sqsize; fod++, i++) { |
677 | if (fod->rspdma) |
678 | fc_dma_unmap_single(dev: tgtport->dev, addr: fod->rspdma, |
679 | size: sizeof(fod->rspiubuf), dir: DMA_TO_DEVICE); |
680 | } |
681 | } |
682 | |
683 | static struct nvmet_fc_fcp_iod * |
684 | nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) |
685 | { |
686 | struct nvmet_fc_fcp_iod *fod; |
687 | |
688 | lockdep_assert_held(&queue->qlock); |
689 | |
690 | fod = list_first_entry_or_null(&queue->fod_list, |
691 | struct nvmet_fc_fcp_iod, fcp_list); |
692 | if (fod) { |
693 | list_del(entry: &fod->fcp_list); |
694 | fod->active = true; |
695 | /* |
696 | * no queue reference is taken, as it was taken by the |
697 | * queue lookup just prior to the allocation. The iod |
698 | * will "inherit" that reference. |
699 | */ |
700 | } |
701 | return fod; |
702 | } |
703 | |
704 | |
705 | static void |
706 | nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport, |
707 | struct nvmet_fc_tgt_queue *queue, |
708 | struct nvmefc_tgt_fcp_req *fcpreq) |
709 | { |
710 | struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; |
711 | |
712 | /* |
713 | * put all admin cmds on hw queue id 0. All io commands go to |
714 | * the respective hw queue based on a modulo basis |
715 | */ |
716 | fcpreq->hwqid = queue->qid ? |
717 | ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; |
718 | |
719 | nvmet_fc_handle_fcp_rqst(tgtport, fod); |
720 | } |
721 | |
722 | static void |
723 | nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work) |
724 | { |
725 | struct nvmet_fc_fcp_iod *fod = |
726 | container_of(work, struct nvmet_fc_fcp_iod, defer_work); |
727 | |
728 | /* Submit deferred IO for processing */ |
729 | nvmet_fc_queue_fcp_req(tgtport: fod->tgtport, queue: fod->queue, fcpreq: fod->fcpreq); |
730 | |
731 | } |
732 | |
733 | static void |
734 | nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, |
735 | struct nvmet_fc_fcp_iod *fod) |
736 | { |
737 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; |
738 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; |
739 | struct nvmet_fc_defer_fcp_req *deferfcp; |
740 | unsigned long flags; |
741 | |
742 | fc_dma_sync_single_for_cpu(dev: tgtport->dev, addr: fod->rspdma, |
743 | size: sizeof(fod->rspiubuf), dir: DMA_TO_DEVICE); |
744 | |
745 | fcpreq->nvmet_fc_private = NULL; |
746 | |
747 | fod->active = false; |
748 | fod->abort = false; |
749 | fod->aborted = false; |
750 | fod->writedataactive = false; |
751 | fod->fcpreq = NULL; |
752 | |
753 | tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); |
754 | |
755 | /* release the queue lookup reference on the completed IO */ |
756 | nvmet_fc_tgt_q_put(queue); |
757 | |
758 | spin_lock_irqsave(&queue->qlock, flags); |
759 | deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, |
760 | struct nvmet_fc_defer_fcp_req, req_list); |
761 | if (!deferfcp) { |
762 | list_add_tail(new: &fod->fcp_list, head: &fod->queue->fod_list); |
763 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
764 | return; |
765 | } |
766 | |
767 | /* Re-use the fod for the next pending cmd that was deferred */ |
768 | list_del(entry: &deferfcp->req_list); |
769 | |
770 | fcpreq = deferfcp->fcp_req; |
771 | |
772 | /* deferfcp can be reused for another IO at a later date */ |
773 | list_add_tail(new: &deferfcp->req_list, head: &queue->avail_defer_list); |
774 | |
775 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
776 | |
777 | /* Save NVME CMD IO in fod */ |
778 | memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); |
779 | |
780 | /* Setup new fcpreq to be processed */ |
781 | fcpreq->rspaddr = NULL; |
782 | fcpreq->rsplen = 0; |
783 | fcpreq->nvmet_fc_private = fod; |
784 | fod->fcpreq = fcpreq; |
785 | fod->active = true; |
786 | |
787 | /* inform LLDD IO is now being processed */ |
788 | tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); |
789 | |
790 | /* |
791 | * Leave the queue lookup get reference taken when |
792 | * fod was originally allocated. |
793 | */ |
794 | |
795 | queue_work(wq: queue->work_q, work: &fod->defer_work); |
796 | } |
797 | |
798 | static struct nvmet_fc_tgt_queue * |
799 | nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, |
800 | u16 qid, u16 sqsize) |
801 | { |
802 | struct nvmet_fc_tgt_queue *queue; |
803 | int ret; |
804 | |
805 | if (qid > NVMET_NR_QUEUES) |
806 | return NULL; |
807 | |
808 | queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); |
809 | if (!queue) |
810 | return NULL; |
811 | |
812 | queue->work_q = alloc_workqueue(fmt: "ntfc%d.%d.%d" , flags: 0, max_active: 0, |
813 | assoc->tgtport->fc_target_port.port_num, |
814 | assoc->a_id, qid); |
815 | if (!queue->work_q) |
816 | goto out_free_queue; |
817 | |
818 | queue->qid = qid; |
819 | queue->sqsize = sqsize; |
820 | queue->assoc = assoc; |
821 | INIT_LIST_HEAD(list: &queue->fod_list); |
822 | INIT_LIST_HEAD(list: &queue->avail_defer_list); |
823 | INIT_LIST_HEAD(list: &queue->pending_cmd_list); |
824 | atomic_set(v: &queue->connected, i: 0); |
825 | atomic_set(v: &queue->sqtail, i: 0); |
826 | atomic_set(v: &queue->rsn, i: 1); |
827 | atomic_set(v: &queue->zrspcnt, i: 0); |
828 | spin_lock_init(&queue->qlock); |
829 | kref_init(kref: &queue->ref); |
830 | |
831 | nvmet_fc_prep_fcp_iodlist(tgtport: assoc->tgtport, queue); |
832 | |
833 | ret = nvmet_sq_init(sq: &queue->nvme_sq); |
834 | if (ret) |
835 | goto out_fail_iodlist; |
836 | |
837 | WARN_ON(assoc->queues[qid]); |
838 | assoc->queues[qid] = queue; |
839 | |
840 | return queue; |
841 | |
842 | out_fail_iodlist: |
843 | nvmet_fc_destroy_fcp_iodlist(tgtport: assoc->tgtport, queue); |
844 | destroy_workqueue(wq: queue->work_q); |
845 | out_free_queue: |
846 | kfree(objp: queue); |
847 | return NULL; |
848 | } |
849 | |
850 | |
851 | static void |
852 | nvmet_fc_tgt_queue_free(struct kref *ref) |
853 | { |
854 | struct nvmet_fc_tgt_queue *queue = |
855 | container_of(ref, struct nvmet_fc_tgt_queue, ref); |
856 | |
857 | nvmet_fc_destroy_fcp_iodlist(tgtport: queue->assoc->tgtport, queue); |
858 | |
859 | destroy_workqueue(wq: queue->work_q); |
860 | |
861 | kfree(objp: queue); |
862 | } |
863 | |
864 | static void |
865 | nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue) |
866 | { |
867 | kref_put(kref: &queue->ref, release: nvmet_fc_tgt_queue_free); |
868 | } |
869 | |
870 | static int |
871 | nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue) |
872 | { |
873 | return kref_get_unless_zero(kref: &queue->ref); |
874 | } |
875 | |
876 | |
877 | static void |
878 | nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) |
879 | { |
880 | struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; |
881 | struct nvmet_fc_fcp_iod *fod = queue->fod; |
882 | struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr; |
883 | unsigned long flags; |
884 | int i; |
885 | bool disconnect; |
886 | |
887 | disconnect = atomic_xchg(v: &queue->connected, new: 0); |
888 | |
889 | /* if not connected, nothing to do */ |
890 | if (!disconnect) |
891 | return; |
892 | |
893 | spin_lock_irqsave(&queue->qlock, flags); |
894 | /* abort outstanding io's */ |
895 | for (i = 0; i < queue->sqsize; fod++, i++) { |
896 | if (fod->active) { |
897 | spin_lock(lock: &fod->flock); |
898 | fod->abort = true; |
899 | /* |
900 | * only call lldd abort routine if waiting for |
901 | * writedata. other outstanding ops should finish |
902 | * on their own. |
903 | */ |
904 | if (fod->writedataactive) { |
905 | fod->aborted = true; |
906 | spin_unlock(lock: &fod->flock); |
907 | tgtport->ops->fcp_abort( |
908 | &tgtport->fc_target_port, fod->fcpreq); |
909 | } else |
910 | spin_unlock(lock: &fod->flock); |
911 | } |
912 | } |
913 | |
914 | /* Cleanup defer'ed IOs in queue */ |
915 | list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list, |
916 | req_list) { |
917 | list_del(entry: &deferfcp->req_list); |
918 | kfree(objp: deferfcp); |
919 | } |
920 | |
921 | for (;;) { |
922 | deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, |
923 | struct nvmet_fc_defer_fcp_req, req_list); |
924 | if (!deferfcp) |
925 | break; |
926 | |
927 | list_del(entry: &deferfcp->req_list); |
928 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
929 | |
930 | tgtport->ops->defer_rcv(&tgtport->fc_target_port, |
931 | deferfcp->fcp_req); |
932 | |
933 | tgtport->ops->fcp_abort(&tgtport->fc_target_port, |
934 | deferfcp->fcp_req); |
935 | |
936 | tgtport->ops->fcp_req_release(&tgtport->fc_target_port, |
937 | deferfcp->fcp_req); |
938 | |
939 | /* release the queue lookup reference */ |
940 | nvmet_fc_tgt_q_put(queue); |
941 | |
942 | kfree(objp: deferfcp); |
943 | |
944 | spin_lock_irqsave(&queue->qlock, flags); |
945 | } |
946 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
947 | |
948 | flush_workqueue(queue->work_q); |
949 | |
950 | nvmet_sq_destroy(sq: &queue->nvme_sq); |
951 | |
952 | nvmet_fc_tgt_q_put(queue); |
953 | } |
954 | |
955 | static struct nvmet_fc_tgt_queue * |
956 | nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, |
957 | u64 connection_id) |
958 | { |
959 | struct nvmet_fc_tgt_assoc *assoc; |
960 | struct nvmet_fc_tgt_queue *queue; |
961 | u64 association_id = nvmet_fc_getassociationid(connectionid: connection_id); |
962 | u16 qid = nvmet_fc_getqueueid(connectionid: connection_id); |
963 | |
964 | if (qid > NVMET_NR_QUEUES) |
965 | return NULL; |
966 | |
967 | rcu_read_lock(); |
968 | list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { |
969 | if (association_id == assoc->association_id) { |
970 | queue = assoc->queues[qid]; |
971 | if (queue && |
972 | (!atomic_read(v: &queue->connected) || |
973 | !nvmet_fc_tgt_q_get(queue))) |
974 | queue = NULL; |
975 | rcu_read_unlock(); |
976 | return queue; |
977 | } |
978 | } |
979 | rcu_read_unlock(); |
980 | return NULL; |
981 | } |
982 | |
983 | static void |
984 | nvmet_fc_hostport_free(struct kref *ref) |
985 | { |
986 | struct nvmet_fc_hostport *hostport = |
987 | container_of(ref, struct nvmet_fc_hostport, ref); |
988 | struct nvmet_fc_tgtport *tgtport = hostport->tgtport; |
989 | unsigned long flags; |
990 | |
991 | spin_lock_irqsave(&tgtport->lock, flags); |
992 | list_del(entry: &hostport->host_list); |
993 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
994 | if (tgtport->ops->host_release && hostport->invalid) |
995 | tgtport->ops->host_release(hostport->hosthandle); |
996 | kfree(objp: hostport); |
997 | nvmet_fc_tgtport_put(tgtport); |
998 | } |
999 | |
1000 | static void |
1001 | nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport) |
1002 | { |
1003 | kref_put(kref: &hostport->ref, release: nvmet_fc_hostport_free); |
1004 | } |
1005 | |
1006 | static int |
1007 | nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport) |
1008 | { |
1009 | return kref_get_unless_zero(kref: &hostport->ref); |
1010 | } |
1011 | |
1012 | static void |
1013 | nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport) |
1014 | { |
1015 | /* if LLDD not implemented, leave as NULL */ |
1016 | if (!hostport || !hostport->hosthandle) |
1017 | return; |
1018 | |
1019 | nvmet_fc_hostport_put(hostport); |
1020 | } |
1021 | |
1022 | static struct nvmet_fc_hostport * |
1023 | nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) |
1024 | { |
1025 | struct nvmet_fc_hostport *host; |
1026 | |
1027 | lockdep_assert_held(&tgtport->lock); |
1028 | |
1029 | list_for_each_entry(host, &tgtport->host_list, host_list) { |
1030 | if (host->hosthandle == hosthandle && !host->invalid) { |
1031 | if (nvmet_fc_hostport_get(hostport: host)) |
1032 | return host; |
1033 | } |
1034 | } |
1035 | |
1036 | return NULL; |
1037 | } |
1038 | |
1039 | static struct nvmet_fc_hostport * |
1040 | nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) |
1041 | { |
1042 | struct nvmet_fc_hostport *newhost, *match = NULL; |
1043 | unsigned long flags; |
1044 | |
1045 | /* if LLDD not implemented, leave as NULL */ |
1046 | if (!hosthandle) |
1047 | return NULL; |
1048 | |
1049 | /* |
1050 | * take reference for what will be the newly allocated hostport if |
1051 | * we end up using a new allocation |
1052 | */ |
1053 | if (!nvmet_fc_tgtport_get(tgtport)) |
1054 | return ERR_PTR(error: -EINVAL); |
1055 | |
1056 | spin_lock_irqsave(&tgtport->lock, flags); |
1057 | match = nvmet_fc_match_hostport(tgtport, hosthandle); |
1058 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
1059 | |
1060 | if (match) { |
1061 | /* no new allocation - release reference */ |
1062 | nvmet_fc_tgtport_put(tgtport); |
1063 | return match; |
1064 | } |
1065 | |
1066 | newhost = kzalloc(size: sizeof(*newhost), GFP_KERNEL); |
1067 | if (!newhost) { |
1068 | /* no new allocation - release reference */ |
1069 | nvmet_fc_tgtport_put(tgtport); |
1070 | return ERR_PTR(error: -ENOMEM); |
1071 | } |
1072 | |
1073 | spin_lock_irqsave(&tgtport->lock, flags); |
1074 | match = nvmet_fc_match_hostport(tgtport, hosthandle); |
1075 | if (match) { |
1076 | /* new allocation not needed */ |
1077 | kfree(objp: newhost); |
1078 | newhost = match; |
1079 | } else { |
1080 | newhost->tgtport = tgtport; |
1081 | newhost->hosthandle = hosthandle; |
1082 | INIT_LIST_HEAD(list: &newhost->host_list); |
1083 | kref_init(kref: &newhost->ref); |
1084 | |
1085 | list_add_tail(new: &newhost->host_list, head: &tgtport->host_list); |
1086 | } |
1087 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
1088 | |
1089 | return newhost; |
1090 | } |
1091 | |
1092 | static void |
1093 | nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) |
1094 | { |
1095 | nvmet_fc_delete_target_assoc(assoc); |
1096 | nvmet_fc_tgt_a_put(assoc); |
1097 | } |
1098 | |
1099 | static void |
1100 | nvmet_fc_delete_assoc_work(struct work_struct *work) |
1101 | { |
1102 | struct nvmet_fc_tgt_assoc *assoc = |
1103 | container_of(work, struct nvmet_fc_tgt_assoc, del_work); |
1104 | struct nvmet_fc_tgtport *tgtport = assoc->tgtport; |
1105 | |
1106 | nvmet_fc_delete_assoc(assoc); |
1107 | nvmet_fc_tgtport_put(tgtport); |
1108 | } |
1109 | |
1110 | static void |
1111 | nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) |
1112 | { |
1113 | nvmet_fc_tgtport_get(tgtport: assoc->tgtport); |
1114 | queue_work(wq: nvmet_wq, work: &assoc->del_work); |
1115 | } |
1116 | |
1117 | static bool |
1118 | nvmet_fc_assoc_exists(struct nvmet_fc_tgtport *tgtport, u64 association_id) |
1119 | { |
1120 | struct nvmet_fc_tgt_assoc *a; |
1121 | bool found = false; |
1122 | |
1123 | rcu_read_lock(); |
1124 | list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) { |
1125 | if (association_id == a->association_id) { |
1126 | found = true; |
1127 | break; |
1128 | } |
1129 | } |
1130 | rcu_read_unlock(); |
1131 | |
1132 | return found; |
1133 | } |
1134 | |
1135 | static struct nvmet_fc_tgt_assoc * |
1136 | nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) |
1137 | { |
1138 | struct nvmet_fc_tgt_assoc *assoc; |
1139 | unsigned long flags; |
1140 | bool done; |
1141 | u64 ran; |
1142 | int idx; |
1143 | |
1144 | if (!tgtport->pe) |
1145 | return NULL; |
1146 | |
1147 | assoc = kzalloc(size: sizeof(*assoc), GFP_KERNEL); |
1148 | if (!assoc) |
1149 | return NULL; |
1150 | |
1151 | idx = ida_alloc(ida: &tgtport->assoc_cnt, GFP_KERNEL); |
1152 | if (idx < 0) |
1153 | goto out_free_assoc; |
1154 | |
1155 | assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); |
1156 | if (IS_ERR(ptr: assoc->hostport)) |
1157 | goto out_ida; |
1158 | |
1159 | assoc->tgtport = tgtport; |
1160 | assoc->a_id = idx; |
1161 | INIT_LIST_HEAD(list: &assoc->a_list); |
1162 | kref_init(kref: &assoc->ref); |
1163 | INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work); |
1164 | atomic_set(v: &assoc->terminating, i: 0); |
1165 | |
1166 | done = false; |
1167 | do { |
1168 | get_random_bytes(buf: &ran, len: sizeof(ran) - BYTES_FOR_QID); |
1169 | ran = ran << BYTES_FOR_QID_SHIFT; |
1170 | |
1171 | spin_lock_irqsave(&tgtport->lock, flags); |
1172 | if (!nvmet_fc_assoc_exists(tgtport, association_id: ran)) { |
1173 | assoc->association_id = ran; |
1174 | list_add_tail_rcu(new: &assoc->a_list, head: &tgtport->assoc_list); |
1175 | done = true; |
1176 | } |
1177 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
1178 | } while (!done); |
1179 | |
1180 | return assoc; |
1181 | |
1182 | out_ida: |
1183 | ida_free(&tgtport->assoc_cnt, id: idx); |
1184 | out_free_assoc: |
1185 | kfree(objp: assoc); |
1186 | return NULL; |
1187 | } |
1188 | |
1189 | static void |
1190 | nvmet_fc_target_assoc_free(struct kref *ref) |
1191 | { |
1192 | struct nvmet_fc_tgt_assoc *assoc = |
1193 | container_of(ref, struct nvmet_fc_tgt_assoc, ref); |
1194 | struct nvmet_fc_tgtport *tgtport = assoc->tgtport; |
1195 | struct nvmet_fc_ls_iod *oldls; |
1196 | unsigned long flags; |
1197 | int i; |
1198 | |
1199 | for (i = NVMET_NR_QUEUES; i >= 0; i--) { |
1200 | if (assoc->queues[i]) |
1201 | nvmet_fc_delete_target_queue(queue: assoc->queues[i]); |
1202 | } |
1203 | |
1204 | /* Send Disconnect now that all i/o has completed */ |
1205 | nvmet_fc_xmt_disconnect_assoc(assoc); |
1206 | |
1207 | nvmet_fc_free_hostport(hostport: assoc->hostport); |
1208 | spin_lock_irqsave(&tgtport->lock, flags); |
1209 | oldls = assoc->rcv_disconn; |
1210 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
1211 | /* if pending Rcv Disconnect Association LS, send rsp now */ |
1212 | if (oldls) |
1213 | nvmet_fc_xmt_ls_rsp(tgtport, iod: oldls); |
1214 | ida_free(&tgtport->assoc_cnt, id: assoc->a_id); |
1215 | dev_info(tgtport->dev, |
1216 | "{%d:%d} Association freed\n" , |
1217 | tgtport->fc_target_port.port_num, assoc->a_id); |
1218 | kfree(objp: assoc); |
1219 | } |
1220 | |
1221 | static void |
1222 | nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc) |
1223 | { |
1224 | kref_put(kref: &assoc->ref, release: nvmet_fc_target_assoc_free); |
1225 | } |
1226 | |
1227 | static int |
1228 | nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc) |
1229 | { |
1230 | return kref_get_unless_zero(kref: &assoc->ref); |
1231 | } |
1232 | |
1233 | static void |
1234 | nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) |
1235 | { |
1236 | struct nvmet_fc_tgtport *tgtport = assoc->tgtport; |
1237 | unsigned long flags; |
1238 | int i, terminating; |
1239 | |
1240 | terminating = atomic_xchg(v: &assoc->terminating, new: 1); |
1241 | |
1242 | /* if already terminating, do nothing */ |
1243 | if (terminating) |
1244 | return; |
1245 | |
1246 | spin_lock_irqsave(&tgtport->lock, flags); |
1247 | list_del_rcu(entry: &assoc->a_list); |
1248 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
1249 | |
1250 | synchronize_rcu(); |
1251 | |
1252 | /* ensure all in-flight I/Os have been processed */ |
1253 | for (i = NVMET_NR_QUEUES; i >= 0; i--) { |
1254 | if (assoc->queues[i]) |
1255 | flush_workqueue(assoc->queues[i]->work_q); |
1256 | } |
1257 | |
1258 | dev_info(tgtport->dev, |
1259 | "{%d:%d} Association deleted\n" , |
1260 | tgtport->fc_target_port.port_num, assoc->a_id); |
1261 | } |
1262 | |
1263 | static struct nvmet_fc_tgt_assoc * |
1264 | nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, |
1265 | u64 association_id) |
1266 | { |
1267 | struct nvmet_fc_tgt_assoc *assoc; |
1268 | struct nvmet_fc_tgt_assoc *ret = NULL; |
1269 | |
1270 | rcu_read_lock(); |
1271 | list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { |
1272 | if (association_id == assoc->association_id) { |
1273 | ret = assoc; |
1274 | if (!nvmet_fc_tgt_a_get(assoc)) |
1275 | ret = NULL; |
1276 | break; |
1277 | } |
1278 | } |
1279 | rcu_read_unlock(); |
1280 | |
1281 | return ret; |
1282 | } |
1283 | |
1284 | static void |
1285 | nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport, |
1286 | struct nvmet_fc_port_entry *pe, |
1287 | struct nvmet_port *port) |
1288 | { |
1289 | lockdep_assert_held(&nvmet_fc_tgtlock); |
1290 | |
1291 | pe->tgtport = tgtport; |
1292 | tgtport->pe = pe; |
1293 | |
1294 | pe->port = port; |
1295 | port->priv = pe; |
1296 | |
1297 | pe->node_name = tgtport->fc_target_port.node_name; |
1298 | pe->port_name = tgtport->fc_target_port.port_name; |
1299 | INIT_LIST_HEAD(list: &pe->pe_list); |
1300 | |
1301 | list_add_tail(new: &pe->pe_list, head: &nvmet_fc_portentry_list); |
1302 | } |
1303 | |
1304 | static void |
1305 | nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe) |
1306 | { |
1307 | unsigned long flags; |
1308 | |
1309 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
1310 | if (pe->tgtport) |
1311 | pe->tgtport->pe = NULL; |
1312 | list_del(entry: &pe->pe_list); |
1313 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
1314 | } |
1315 | |
1316 | /* |
1317 | * called when a targetport deregisters. Breaks the relationship |
1318 | * with the nvmet port, but leaves the port_entry in place so that |
1319 | * re-registration can resume operation. |
1320 | */ |
1321 | static void |
1322 | nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport) |
1323 | { |
1324 | struct nvmet_fc_port_entry *pe; |
1325 | unsigned long flags; |
1326 | |
1327 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
1328 | pe = tgtport->pe; |
1329 | if (pe) |
1330 | pe->tgtport = NULL; |
1331 | tgtport->pe = NULL; |
1332 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
1333 | } |
1334 | |
1335 | /* |
1336 | * called when a new targetport is registered. Looks in the |
1337 | * existing nvmet port_entries to see if the nvmet layer is |
1338 | * configured for the targetport's wwn's. (the targetport existed, |
1339 | * nvmet configured, the lldd unregistered the tgtport, and is now |
1340 | * reregistering the same targetport). If so, set the nvmet port |
1341 | * port entry on the targetport. |
1342 | */ |
1343 | static void |
1344 | nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) |
1345 | { |
1346 | struct nvmet_fc_port_entry *pe; |
1347 | unsigned long flags; |
1348 | |
1349 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
1350 | list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) { |
1351 | if (tgtport->fc_target_port.node_name == pe->node_name && |
1352 | tgtport->fc_target_port.port_name == pe->port_name) { |
1353 | WARN_ON(pe->tgtport); |
1354 | tgtport->pe = pe; |
1355 | pe->tgtport = tgtport; |
1356 | break; |
1357 | } |
1358 | } |
1359 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
1360 | } |
1361 | |
1362 | /** |
1363 | * nvmet_fc_register_targetport - transport entry point called by an |
1364 | * LLDD to register the existence of a local |
1365 | * NVME subystem FC port. |
1366 | * @pinfo: pointer to information about the port to be registered |
1367 | * @template: LLDD entrypoints and operational parameters for the port |
1368 | * @dev: physical hardware device node port corresponds to. Will be |
1369 | * used for DMA mappings |
1370 | * @portptr: pointer to a local port pointer. Upon success, the routine |
1371 | * will allocate a nvme_fc_local_port structure and place its |
1372 | * address in the local port pointer. Upon failure, local port |
1373 | * pointer will be set to NULL. |
1374 | * |
1375 | * Returns: |
1376 | * a completion status. Must be 0 upon success; a negative errno |
1377 | * (ex: -ENXIO) upon failure. |
1378 | */ |
1379 | int |
1380 | nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, |
1381 | struct nvmet_fc_target_template *template, |
1382 | struct device *dev, |
1383 | struct nvmet_fc_target_port **portptr) |
1384 | { |
1385 | struct nvmet_fc_tgtport *newrec; |
1386 | unsigned long flags; |
1387 | int ret, idx; |
1388 | |
1389 | if (!template->xmt_ls_rsp || !template->fcp_op || |
1390 | !template->fcp_abort || |
1391 | !template->fcp_req_release || !template->targetport_delete || |
1392 | !template->max_hw_queues || !template->max_sgl_segments || |
1393 | !template->max_dif_sgl_segments || !template->dma_boundary) { |
1394 | ret = -EINVAL; |
1395 | goto out_regtgt_failed; |
1396 | } |
1397 | |
1398 | newrec = kzalloc(size: (sizeof(*newrec) + template->target_priv_sz), |
1399 | GFP_KERNEL); |
1400 | if (!newrec) { |
1401 | ret = -ENOMEM; |
1402 | goto out_regtgt_failed; |
1403 | } |
1404 | |
1405 | idx = ida_alloc(ida: &nvmet_fc_tgtport_cnt, GFP_KERNEL); |
1406 | if (idx < 0) { |
1407 | ret = -ENOSPC; |
1408 | goto out_fail_kfree; |
1409 | } |
1410 | |
1411 | if (!get_device(dev) && dev) { |
1412 | ret = -ENODEV; |
1413 | goto out_ida_put; |
1414 | } |
1415 | |
1416 | newrec->fc_target_port.node_name = pinfo->node_name; |
1417 | newrec->fc_target_port.port_name = pinfo->port_name; |
1418 | if (template->target_priv_sz) |
1419 | newrec->fc_target_port.private = &newrec[1]; |
1420 | else |
1421 | newrec->fc_target_port.private = NULL; |
1422 | newrec->fc_target_port.port_id = pinfo->port_id; |
1423 | newrec->fc_target_port.port_num = idx; |
1424 | INIT_LIST_HEAD(list: &newrec->tgt_list); |
1425 | newrec->dev = dev; |
1426 | newrec->ops = template; |
1427 | spin_lock_init(&newrec->lock); |
1428 | INIT_LIST_HEAD(list: &newrec->ls_rcv_list); |
1429 | INIT_LIST_HEAD(list: &newrec->ls_req_list); |
1430 | INIT_LIST_HEAD(list: &newrec->ls_busylist); |
1431 | INIT_LIST_HEAD(list: &newrec->assoc_list); |
1432 | INIT_LIST_HEAD(list: &newrec->host_list); |
1433 | kref_init(kref: &newrec->ref); |
1434 | ida_init(ida: &newrec->assoc_cnt); |
1435 | newrec->max_sg_cnt = template->max_sgl_segments; |
1436 | INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work); |
1437 | |
1438 | ret = nvmet_fc_alloc_ls_iodlist(tgtport: newrec); |
1439 | if (ret) { |
1440 | ret = -ENOMEM; |
1441 | goto out_free_newrec; |
1442 | } |
1443 | |
1444 | nvmet_fc_portentry_rebind_tgt(tgtport: newrec); |
1445 | |
1446 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
1447 | list_add_tail(new: &newrec->tgt_list, head: &nvmet_fc_target_list); |
1448 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
1449 | |
1450 | *portptr = &newrec->fc_target_port; |
1451 | return 0; |
1452 | |
1453 | out_free_newrec: |
1454 | put_device(dev); |
1455 | out_ida_put: |
1456 | ida_free(&nvmet_fc_tgtport_cnt, id: idx); |
1457 | out_fail_kfree: |
1458 | kfree(objp: newrec); |
1459 | out_regtgt_failed: |
1460 | *portptr = NULL; |
1461 | return ret; |
1462 | } |
1463 | EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport); |
1464 | |
1465 | |
1466 | static void |
1467 | nvmet_fc_free_tgtport(struct kref *ref) |
1468 | { |
1469 | struct nvmet_fc_tgtport *tgtport = |
1470 | container_of(ref, struct nvmet_fc_tgtport, ref); |
1471 | struct device *dev = tgtport->dev; |
1472 | unsigned long flags; |
1473 | |
1474 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
1475 | list_del(entry: &tgtport->tgt_list); |
1476 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
1477 | |
1478 | nvmet_fc_free_ls_iodlist(tgtport); |
1479 | |
1480 | /* let the LLDD know we've finished tearing it down */ |
1481 | tgtport->ops->targetport_delete(&tgtport->fc_target_port); |
1482 | |
1483 | ida_free(&nvmet_fc_tgtport_cnt, |
1484 | id: tgtport->fc_target_port.port_num); |
1485 | |
1486 | ida_destroy(ida: &tgtport->assoc_cnt); |
1487 | |
1488 | kfree(objp: tgtport); |
1489 | |
1490 | put_device(dev); |
1491 | } |
1492 | |
1493 | static void |
1494 | nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport) |
1495 | { |
1496 | kref_put(kref: &tgtport->ref, release: nvmet_fc_free_tgtport); |
1497 | } |
1498 | |
1499 | static int |
1500 | nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport) |
1501 | { |
1502 | return kref_get_unless_zero(kref: &tgtport->ref); |
1503 | } |
1504 | |
1505 | static void |
1506 | __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) |
1507 | { |
1508 | struct nvmet_fc_tgt_assoc *assoc; |
1509 | |
1510 | rcu_read_lock(); |
1511 | list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { |
1512 | if (!nvmet_fc_tgt_a_get(assoc)) |
1513 | continue; |
1514 | nvmet_fc_schedule_delete_assoc(assoc); |
1515 | nvmet_fc_tgt_a_put(assoc); |
1516 | } |
1517 | rcu_read_unlock(); |
1518 | } |
1519 | |
1520 | /** |
1521 | * nvmet_fc_invalidate_host - transport entry point called by an LLDD |
1522 | * to remove references to a hosthandle for LS's. |
1523 | * |
1524 | * The nvmet-fc layer ensures that any references to the hosthandle |
1525 | * on the targetport are forgotten (set to NULL). The LLDD will |
1526 | * typically call this when a login with a remote host port has been |
1527 | * lost, thus LS's for the remote host port are no longer possible. |
1528 | * |
1529 | * If an LS request is outstanding to the targetport/hosthandle (or |
1530 | * issued concurrently with the call to invalidate the host), the |
1531 | * LLDD is responsible for terminating/aborting the LS and completing |
1532 | * the LS request. It is recommended that these terminations/aborts |
1533 | * occur after calling to invalidate the host handle to avoid additional |
1534 | * retries by the nvmet-fc transport. The nvmet-fc transport may |
1535 | * continue to reference host handle while it cleans up outstanding |
1536 | * NVME associations. The nvmet-fc transport will call the |
1537 | * ops->host_release() callback to notify the LLDD that all references |
1538 | * are complete and the related host handle can be recovered. |
1539 | * Note: if there are no references, the callback may be called before |
1540 | * the invalidate host call returns. |
1541 | * |
1542 | * @target_port: pointer to the (registered) target port that a prior |
1543 | * LS was received on and which supplied the transport the |
1544 | * hosthandle. |
1545 | * @hosthandle: the handle (pointer) that represents the host port |
1546 | * that no longer has connectivity and that LS's should |
1547 | * no longer be directed to. |
1548 | */ |
1549 | void |
1550 | nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, |
1551 | void *hosthandle) |
1552 | { |
1553 | struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(targetport: target_port); |
1554 | struct nvmet_fc_tgt_assoc *assoc, *next; |
1555 | unsigned long flags; |
1556 | bool noassoc = true; |
1557 | |
1558 | spin_lock_irqsave(&tgtport->lock, flags); |
1559 | list_for_each_entry_safe(assoc, next, |
1560 | &tgtport->assoc_list, a_list) { |
1561 | if (assoc->hostport->hosthandle != hosthandle) |
1562 | continue; |
1563 | if (!nvmet_fc_tgt_a_get(assoc)) |
1564 | continue; |
1565 | assoc->hostport->invalid = 1; |
1566 | noassoc = false; |
1567 | nvmet_fc_schedule_delete_assoc(assoc); |
1568 | nvmet_fc_tgt_a_put(assoc); |
1569 | } |
1570 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
1571 | |
1572 | /* if there's nothing to wait for - call the callback */ |
1573 | if (noassoc && tgtport->ops->host_release) |
1574 | tgtport->ops->host_release(hosthandle); |
1575 | } |
1576 | EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host); |
1577 | |
1578 | /* |
1579 | * nvmet layer has called to terminate an association |
1580 | */ |
1581 | static void |
1582 | nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) |
1583 | { |
1584 | struct nvmet_fc_tgtport *tgtport, *next; |
1585 | struct nvmet_fc_tgt_assoc *assoc; |
1586 | struct nvmet_fc_tgt_queue *queue; |
1587 | unsigned long flags; |
1588 | bool found_ctrl = false; |
1589 | |
1590 | /* this is a bit ugly, but don't want to make locks layered */ |
1591 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
1592 | list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list, |
1593 | tgt_list) { |
1594 | if (!nvmet_fc_tgtport_get(tgtport)) |
1595 | continue; |
1596 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
1597 | |
1598 | rcu_read_lock(); |
1599 | list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { |
1600 | queue = assoc->queues[0]; |
1601 | if (queue && queue->nvme_sq.ctrl == ctrl) { |
1602 | if (nvmet_fc_tgt_a_get(assoc)) |
1603 | found_ctrl = true; |
1604 | break; |
1605 | } |
1606 | } |
1607 | rcu_read_unlock(); |
1608 | |
1609 | nvmet_fc_tgtport_put(tgtport); |
1610 | |
1611 | if (found_ctrl) { |
1612 | nvmet_fc_schedule_delete_assoc(assoc); |
1613 | nvmet_fc_tgt_a_put(assoc); |
1614 | return; |
1615 | } |
1616 | |
1617 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
1618 | } |
1619 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
1620 | } |
1621 | |
1622 | /** |
1623 | * nvmet_fc_unregister_targetport - transport entry point called by an |
1624 | * LLDD to deregister/remove a previously |
1625 | * registered a local NVME subsystem FC port. |
1626 | * @target_port: pointer to the (registered) target port that is to be |
1627 | * deregistered. |
1628 | * |
1629 | * Returns: |
1630 | * a completion status. Must be 0 upon success; a negative errno |
1631 | * (ex: -ENXIO) upon failure. |
1632 | */ |
1633 | int |
1634 | nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) |
1635 | { |
1636 | struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(targetport: target_port); |
1637 | |
1638 | nvmet_fc_portentry_unbind_tgt(tgtport); |
1639 | |
1640 | /* terminate any outstanding associations */ |
1641 | __nvmet_fc_free_assocs(tgtport); |
1642 | |
1643 | flush_workqueue(nvmet_wq); |
1644 | |
1645 | /* |
1646 | * should terminate LS's as well. However, LS's will be generated |
1647 | * at the tail end of association termination, so they likely don't |
1648 | * exist yet. And even if they did, it's worthwhile to just let |
1649 | * them finish and targetport ref counting will clean things up. |
1650 | */ |
1651 | |
1652 | nvmet_fc_tgtport_put(tgtport); |
1653 | |
1654 | return 0; |
1655 | } |
1656 | EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport); |
1657 | |
1658 | |
1659 | /* ********************** FC-NVME LS RCV Handling ************************* */ |
1660 | |
1661 | |
1662 | static void |
1663 | nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, |
1664 | struct nvmet_fc_ls_iod *iod) |
1665 | { |
1666 | struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc; |
1667 | struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc; |
1668 | struct nvmet_fc_tgt_queue *queue; |
1669 | int ret = 0; |
1670 | |
1671 | memset(acc, 0, sizeof(*acc)); |
1672 | |
1673 | /* |
1674 | * FC-NVME spec changes. There are initiators sending different |
1675 | * lengths as padding sizes for Create Association Cmd descriptor |
1676 | * was incorrect. |
1677 | * Accept anything of "minimum" length. Assume format per 1.15 |
1678 | * spec (with HOSTID reduced to 16 bytes), ignore how long the |
1679 | * trailing pad length is. |
1680 | */ |
1681 | if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN) |
1682 | ret = VERR_CR_ASSOC_LEN; |
1683 | else if (be32_to_cpu(rqst->desc_list_len) < |
1684 | FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN) |
1685 | ret = VERR_CR_ASSOC_RQST_LEN; |
1686 | else if (rqst->assoc_cmd.desc_tag != |
1687 | cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD)) |
1688 | ret = VERR_CR_ASSOC_CMD; |
1689 | else if (be32_to_cpu(rqst->assoc_cmd.desc_len) < |
1690 | FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN) |
1691 | ret = VERR_CR_ASSOC_CMD_LEN; |
1692 | else if (!rqst->assoc_cmd.ersp_ratio || |
1693 | (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= |
1694 | be16_to_cpu(rqst->assoc_cmd.sqsize))) |
1695 | ret = VERR_ERSP_RATIO; |
1696 | |
1697 | else { |
1698 | /* new association w/ admin queue */ |
1699 | iod->assoc = nvmet_fc_alloc_target_assoc( |
1700 | tgtport, hosthandle: iod->hosthandle); |
1701 | if (!iod->assoc) |
1702 | ret = VERR_ASSOC_ALLOC_FAIL; |
1703 | else { |
1704 | queue = nvmet_fc_alloc_target_queue(assoc: iod->assoc, qid: 0, |
1705 | be16_to_cpu(rqst->assoc_cmd.sqsize)); |
1706 | if (!queue) { |
1707 | ret = VERR_QUEUE_ALLOC_FAIL; |
1708 | nvmet_fc_tgt_a_put(assoc: iod->assoc); |
1709 | } |
1710 | } |
1711 | } |
1712 | |
1713 | if (ret) { |
1714 | dev_err(tgtport->dev, |
1715 | "Create Association LS failed: %s\n" , |
1716 | validation_errors[ret]); |
1717 | iod->lsrsp->rsplen = nvme_fc_format_rjt(buf: acc, |
1718 | buflen: sizeof(*acc), ls_cmd: rqst->w0.ls_cmd, |
1719 | reason: FCNVME_RJT_RC_LOGIC, |
1720 | explanation: FCNVME_RJT_EXP_NONE, vendor: 0); |
1721 | return; |
1722 | } |
1723 | |
1724 | queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio); |
1725 | atomic_set(v: &queue->connected, i: 1); |
1726 | queue->sqhd = 0; /* best place to init value */ |
1727 | |
1728 | dev_info(tgtport->dev, |
1729 | "{%d:%d} Association created\n" , |
1730 | tgtport->fc_target_port.port_num, iod->assoc->a_id); |
1731 | |
1732 | /* format a response */ |
1733 | |
1734 | iod->lsrsp->rsplen = sizeof(*acc); |
1735 | |
1736 | nvme_fc_format_rsp_hdr(buf: acc, ls_cmd: FCNVME_LS_ACC, |
1737 | desc_len: fcnvme_lsdesc_len( |
1738 | sz: sizeof(struct fcnvme_ls_cr_assoc_acc)), |
1739 | rqst_ls_cmd: FCNVME_LS_CREATE_ASSOCIATION); |
1740 | acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); |
1741 | acc->associd.desc_len = |
1742 | fcnvme_lsdesc_len( |
1743 | sz: sizeof(struct fcnvme_lsdesc_assoc_id)); |
1744 | acc->associd.association_id = |
1745 | cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0)); |
1746 | acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); |
1747 | acc->connectid.desc_len = |
1748 | fcnvme_lsdesc_len( |
1749 | sz: sizeof(struct fcnvme_lsdesc_conn_id)); |
1750 | acc->connectid.connection_id = acc->associd.association_id; |
1751 | } |
1752 | |
1753 | static void |
1754 | nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, |
1755 | struct nvmet_fc_ls_iod *iod) |
1756 | { |
1757 | struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn; |
1758 | struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn; |
1759 | struct nvmet_fc_tgt_queue *queue; |
1760 | int ret = 0; |
1761 | |
1762 | memset(acc, 0, sizeof(*acc)); |
1763 | |
1764 | if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst)) |
1765 | ret = VERR_CR_CONN_LEN; |
1766 | else if (rqst->desc_list_len != |
1767 | fcnvme_lsdesc_len( |
1768 | sz: sizeof(struct fcnvme_ls_cr_conn_rqst))) |
1769 | ret = VERR_CR_CONN_RQST_LEN; |
1770 | else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) |
1771 | ret = VERR_ASSOC_ID; |
1772 | else if (rqst->associd.desc_len != |
1773 | fcnvme_lsdesc_len( |
1774 | sz: sizeof(struct fcnvme_lsdesc_assoc_id))) |
1775 | ret = VERR_ASSOC_ID_LEN; |
1776 | else if (rqst->connect_cmd.desc_tag != |
1777 | cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD)) |
1778 | ret = VERR_CR_CONN_CMD; |
1779 | else if (rqst->connect_cmd.desc_len != |
1780 | fcnvme_lsdesc_len( |
1781 | sz: sizeof(struct fcnvme_lsdesc_cr_conn_cmd))) |
1782 | ret = VERR_CR_CONN_CMD_LEN; |
1783 | else if (!rqst->connect_cmd.ersp_ratio || |
1784 | (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >= |
1785 | be16_to_cpu(rqst->connect_cmd.sqsize))) |
1786 | ret = VERR_ERSP_RATIO; |
1787 | |
1788 | else { |
1789 | /* new io queue */ |
1790 | iod->assoc = nvmet_fc_find_target_assoc(tgtport, |
1791 | be64_to_cpu(rqst->associd.association_id)); |
1792 | if (!iod->assoc) |
1793 | ret = VERR_NO_ASSOC; |
1794 | else { |
1795 | queue = nvmet_fc_alloc_target_queue(assoc: iod->assoc, |
1796 | be16_to_cpu(rqst->connect_cmd.qid), |
1797 | be16_to_cpu(rqst->connect_cmd.sqsize)); |
1798 | if (!queue) |
1799 | ret = VERR_QUEUE_ALLOC_FAIL; |
1800 | |
1801 | /* release get taken in nvmet_fc_find_target_assoc */ |
1802 | nvmet_fc_tgt_a_put(assoc: iod->assoc); |
1803 | } |
1804 | } |
1805 | |
1806 | if (ret) { |
1807 | dev_err(tgtport->dev, |
1808 | "Create Connection LS failed: %s\n" , |
1809 | validation_errors[ret]); |
1810 | iod->lsrsp->rsplen = nvme_fc_format_rjt(buf: acc, |
1811 | buflen: sizeof(*acc), ls_cmd: rqst->w0.ls_cmd, |
1812 | reason: (ret == VERR_NO_ASSOC) ? |
1813 | FCNVME_RJT_RC_INV_ASSOC : |
1814 | FCNVME_RJT_RC_LOGIC, |
1815 | explanation: FCNVME_RJT_EXP_NONE, vendor: 0); |
1816 | return; |
1817 | } |
1818 | |
1819 | queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio); |
1820 | atomic_set(v: &queue->connected, i: 1); |
1821 | queue->sqhd = 0; /* best place to init value */ |
1822 | |
1823 | /* format a response */ |
1824 | |
1825 | iod->lsrsp->rsplen = sizeof(*acc); |
1826 | |
1827 | nvme_fc_format_rsp_hdr(buf: acc, ls_cmd: FCNVME_LS_ACC, |
1828 | desc_len: fcnvme_lsdesc_len(sz: sizeof(struct fcnvme_ls_cr_conn_acc)), |
1829 | rqst_ls_cmd: FCNVME_LS_CREATE_CONNECTION); |
1830 | acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); |
1831 | acc->connectid.desc_len = |
1832 | fcnvme_lsdesc_len( |
1833 | sz: sizeof(struct fcnvme_lsdesc_conn_id)); |
1834 | acc->connectid.connection_id = |
1835 | cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, |
1836 | be16_to_cpu(rqst->connect_cmd.qid))); |
1837 | } |
1838 | |
1839 | /* |
1840 | * Returns true if the LS response is to be transmit |
1841 | * Returns false if the LS response is to be delayed |
1842 | */ |
1843 | static int |
1844 | nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, |
1845 | struct nvmet_fc_ls_iod *iod) |
1846 | { |
1847 | struct fcnvme_ls_disconnect_assoc_rqst *rqst = |
1848 | &iod->rqstbuf->rq_dis_assoc; |
1849 | struct fcnvme_ls_disconnect_assoc_acc *acc = |
1850 | &iod->rspbuf->rsp_dis_assoc; |
1851 | struct nvmet_fc_tgt_assoc *assoc = NULL; |
1852 | struct nvmet_fc_ls_iod *oldls = NULL; |
1853 | unsigned long flags; |
1854 | int ret = 0; |
1855 | |
1856 | memset(acc, 0, sizeof(*acc)); |
1857 | |
1858 | ret = nvmefc_vldt_lsreq_discon_assoc(rqstlen: iod->rqstdatalen, rqst); |
1859 | if (!ret) { |
1860 | /* match an active association - takes an assoc ref if !NULL */ |
1861 | assoc = nvmet_fc_find_target_assoc(tgtport, |
1862 | be64_to_cpu(rqst->associd.association_id)); |
1863 | iod->assoc = assoc; |
1864 | if (!assoc) |
1865 | ret = VERR_NO_ASSOC; |
1866 | } |
1867 | |
1868 | if (ret || !assoc) { |
1869 | dev_err(tgtport->dev, |
1870 | "Disconnect LS failed: %s\n" , |
1871 | validation_errors[ret]); |
1872 | iod->lsrsp->rsplen = nvme_fc_format_rjt(buf: acc, |
1873 | buflen: sizeof(*acc), ls_cmd: rqst->w0.ls_cmd, |
1874 | reason: (ret == VERR_NO_ASSOC) ? |
1875 | FCNVME_RJT_RC_INV_ASSOC : |
1876 | FCNVME_RJT_RC_LOGIC, |
1877 | explanation: FCNVME_RJT_EXP_NONE, vendor: 0); |
1878 | return true; |
1879 | } |
1880 | |
1881 | /* format a response */ |
1882 | |
1883 | iod->lsrsp->rsplen = sizeof(*acc); |
1884 | |
1885 | nvme_fc_format_rsp_hdr(buf: acc, ls_cmd: FCNVME_LS_ACC, |
1886 | desc_len: fcnvme_lsdesc_len( |
1887 | sz: sizeof(struct fcnvme_ls_disconnect_assoc_acc)), |
1888 | rqst_ls_cmd: FCNVME_LS_DISCONNECT_ASSOC); |
1889 | |
1890 | /* |
1891 | * The rules for LS response says the response cannot |
1892 | * go back until ABTS's have been sent for all outstanding |
1893 | * I/O and a Disconnect Association LS has been sent. |
1894 | * So... save off the Disconnect LS to send the response |
1895 | * later. If there was a prior LS already saved, replace |
1896 | * it with the newer one and send a can't perform reject |
1897 | * on the older one. |
1898 | */ |
1899 | spin_lock_irqsave(&tgtport->lock, flags); |
1900 | oldls = assoc->rcv_disconn; |
1901 | assoc->rcv_disconn = iod; |
1902 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
1903 | |
1904 | if (oldls) { |
1905 | dev_info(tgtport->dev, |
1906 | "{%d:%d} Multiple Disconnect Association LS's " |
1907 | "received\n" , |
1908 | tgtport->fc_target_port.port_num, assoc->a_id); |
1909 | /* overwrite good response with bogus failure */ |
1910 | oldls->lsrsp->rsplen = nvme_fc_format_rjt(buf: oldls->rspbuf, |
1911 | buflen: sizeof(*iod->rspbuf), |
1912 | /* ok to use rqst, LS is same */ |
1913 | ls_cmd: rqst->w0.ls_cmd, |
1914 | reason: FCNVME_RJT_RC_UNAB, |
1915 | explanation: FCNVME_RJT_EXP_NONE, vendor: 0); |
1916 | nvmet_fc_xmt_ls_rsp(tgtport, iod: oldls); |
1917 | } |
1918 | |
1919 | nvmet_fc_schedule_delete_assoc(assoc); |
1920 | nvmet_fc_tgt_a_put(assoc); |
1921 | |
1922 | return false; |
1923 | } |
1924 | |
1925 | |
1926 | /* *********************** NVME Ctrl Routines **************************** */ |
1927 | |
1928 | |
1929 | static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req); |
1930 | |
1931 | static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops; |
1932 | |
1933 | static void |
1934 | nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) |
1935 | { |
1936 | struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private; |
1937 | struct nvmet_fc_tgtport *tgtport = iod->tgtport; |
1938 | |
1939 | fc_dma_sync_single_for_cpu(dev: tgtport->dev, addr: iod->rspdma, |
1940 | size: sizeof(*iod->rspbuf), dir: DMA_TO_DEVICE); |
1941 | nvmet_fc_free_ls_iod(tgtport, iod); |
1942 | nvmet_fc_tgtport_put(tgtport); |
1943 | } |
1944 | |
1945 | static void |
1946 | nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, |
1947 | struct nvmet_fc_ls_iod *iod) |
1948 | { |
1949 | int ret; |
1950 | |
1951 | fc_dma_sync_single_for_device(dev: tgtport->dev, addr: iod->rspdma, |
1952 | size: sizeof(*iod->rspbuf), dir: DMA_TO_DEVICE); |
1953 | |
1954 | ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); |
1955 | if (ret) |
1956 | nvmet_fc_xmt_ls_rsp_done(lsrsp: iod->lsrsp); |
1957 | } |
1958 | |
1959 | /* |
1960 | * Actual processing routine for received FC-NVME LS Requests from the LLD |
1961 | */ |
1962 | static void |
1963 | nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, |
1964 | struct nvmet_fc_ls_iod *iod) |
1965 | { |
1966 | struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0; |
1967 | bool sendrsp = true; |
1968 | |
1969 | iod->lsrsp->nvme_fc_private = iod; |
1970 | iod->lsrsp->rspbuf = iod->rspbuf; |
1971 | iod->lsrsp->rspdma = iod->rspdma; |
1972 | iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done; |
1973 | /* Be preventative. handlers will later set to valid length */ |
1974 | iod->lsrsp->rsplen = 0; |
1975 | |
1976 | iod->assoc = NULL; |
1977 | |
1978 | /* |
1979 | * handlers: |
1980 | * parse request input, execute the request, and format the |
1981 | * LS response |
1982 | */ |
1983 | switch (w0->ls_cmd) { |
1984 | case FCNVME_LS_CREATE_ASSOCIATION: |
1985 | /* Creates Association and initial Admin Queue/Connection */ |
1986 | nvmet_fc_ls_create_association(tgtport, iod); |
1987 | break; |
1988 | case FCNVME_LS_CREATE_CONNECTION: |
1989 | /* Creates an IO Queue/Connection */ |
1990 | nvmet_fc_ls_create_connection(tgtport, iod); |
1991 | break; |
1992 | case FCNVME_LS_DISCONNECT_ASSOC: |
1993 | /* Terminate a Queue/Connection or the Association */ |
1994 | sendrsp = nvmet_fc_ls_disconnect(tgtport, iod); |
1995 | break; |
1996 | default: |
1997 | iod->lsrsp->rsplen = nvme_fc_format_rjt(buf: iod->rspbuf, |
1998 | buflen: sizeof(*iod->rspbuf), ls_cmd: w0->ls_cmd, |
1999 | reason: FCNVME_RJT_RC_INVAL, explanation: FCNVME_RJT_EXP_NONE, vendor: 0); |
2000 | } |
2001 | |
2002 | if (sendrsp) |
2003 | nvmet_fc_xmt_ls_rsp(tgtport, iod); |
2004 | } |
2005 | |
2006 | /* |
2007 | * Actual processing routine for received FC-NVME LS Requests from the LLD |
2008 | */ |
2009 | static void |
2010 | nvmet_fc_handle_ls_rqst_work(struct work_struct *work) |
2011 | { |
2012 | struct nvmet_fc_ls_iod *iod = |
2013 | container_of(work, struct nvmet_fc_ls_iod, work); |
2014 | struct nvmet_fc_tgtport *tgtport = iod->tgtport; |
2015 | |
2016 | nvmet_fc_handle_ls_rqst(tgtport, iod); |
2017 | } |
2018 | |
2019 | |
2020 | /** |
2021 | * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD |
2022 | * upon the reception of a NVME LS request. |
2023 | * |
2024 | * The nvmet-fc layer will copy payload to an internal structure for |
2025 | * processing. As such, upon completion of the routine, the LLDD may |
2026 | * immediately free/reuse the LS request buffer passed in the call. |
2027 | * |
2028 | * If this routine returns error, the LLDD should abort the exchange. |
2029 | * |
2030 | * @target_port: pointer to the (registered) target port the LS was |
2031 | * received on. |
2032 | * @hosthandle: pointer to the host specific data, gets stored in iod. |
2033 | * @lsrsp: pointer to a lsrsp structure to be used to reference |
2034 | * the exchange corresponding to the LS. |
2035 | * @lsreqbuf: pointer to the buffer containing the LS Request |
2036 | * @lsreqbuf_len: length, in bytes, of the received LS request |
2037 | */ |
2038 | int |
2039 | nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, |
2040 | void *hosthandle, |
2041 | struct nvmefc_ls_rsp *lsrsp, |
2042 | void *lsreqbuf, u32 lsreqbuf_len) |
2043 | { |
2044 | struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(targetport: target_port); |
2045 | struct nvmet_fc_ls_iod *iod; |
2046 | struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; |
2047 | |
2048 | if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { |
2049 | dev_info(tgtport->dev, |
2050 | "RCV %s LS failed: payload too large (%d)\n" , |
2051 | (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? |
2052 | nvmefc_ls_names[w0->ls_cmd] : "" , |
2053 | lsreqbuf_len); |
2054 | return -E2BIG; |
2055 | } |
2056 | |
2057 | if (!nvmet_fc_tgtport_get(tgtport)) { |
2058 | dev_info(tgtport->dev, |
2059 | "RCV %s LS failed: target deleting\n" , |
2060 | (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? |
2061 | nvmefc_ls_names[w0->ls_cmd] : "" ); |
2062 | return -ESHUTDOWN; |
2063 | } |
2064 | |
2065 | iod = nvmet_fc_alloc_ls_iod(tgtport); |
2066 | if (!iod) { |
2067 | dev_info(tgtport->dev, |
2068 | "RCV %s LS failed: context allocation failed\n" , |
2069 | (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? |
2070 | nvmefc_ls_names[w0->ls_cmd] : "" ); |
2071 | nvmet_fc_tgtport_put(tgtport); |
2072 | return -ENOENT; |
2073 | } |
2074 | |
2075 | iod->lsrsp = lsrsp; |
2076 | iod->fcpreq = NULL; |
2077 | memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); |
2078 | iod->rqstdatalen = lsreqbuf_len; |
2079 | iod->hosthandle = hosthandle; |
2080 | |
2081 | queue_work(wq: nvmet_wq, work: &iod->work); |
2082 | |
2083 | return 0; |
2084 | } |
2085 | EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req); |
2086 | |
2087 | |
2088 | /* |
2089 | * ********************** |
2090 | * Start of FCP handling |
2091 | * ********************** |
2092 | */ |
2093 | |
2094 | static int |
2095 | nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) |
2096 | { |
2097 | struct scatterlist *sg; |
2098 | unsigned int nent; |
2099 | |
2100 | sg = sgl_alloc(length: fod->req.transfer_len, GFP_KERNEL, nent_p: &nent); |
2101 | if (!sg) |
2102 | goto out; |
2103 | |
2104 | fod->data_sg = sg; |
2105 | fod->data_sg_cnt = nent; |
2106 | fod->data_sg_cnt = fc_dma_map_sg(dev: fod->tgtport->dev, sg, nents: nent, |
2107 | dir: ((fod->io_dir == NVMET_FCP_WRITE) ? |
2108 | DMA_FROM_DEVICE : DMA_TO_DEVICE)); |
2109 | /* note: write from initiator perspective */ |
2110 | fod->next_sg = fod->data_sg; |
2111 | |
2112 | return 0; |
2113 | |
2114 | out: |
2115 | return NVME_SC_INTERNAL; |
2116 | } |
2117 | |
2118 | static void |
2119 | nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod) |
2120 | { |
2121 | if (!fod->data_sg || !fod->data_sg_cnt) |
2122 | return; |
2123 | |
2124 | fc_dma_unmap_sg(dev: fod->tgtport->dev, sg: fod->data_sg, nents: fod->data_sg_cnt, |
2125 | dir: ((fod->io_dir == NVMET_FCP_WRITE) ? |
2126 | DMA_FROM_DEVICE : DMA_TO_DEVICE)); |
2127 | sgl_free(sgl: fod->data_sg); |
2128 | fod->data_sg = NULL; |
2129 | fod->data_sg_cnt = 0; |
2130 | } |
2131 | |
2132 | |
2133 | static bool |
2134 | queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd) |
2135 | { |
2136 | u32 sqtail, used; |
2137 | |
2138 | /* egad, this is ugly. And sqtail is just a best guess */ |
2139 | sqtail = atomic_read(v: &q->sqtail) % q->sqsize; |
2140 | |
2141 | used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd); |
2142 | return ((used * 10) >= (((u32)(q->sqsize - 1) * 9))); |
2143 | } |
2144 | |
2145 | /* |
2146 | * Prep RSP payload. |
2147 | * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op |
2148 | */ |
2149 | static void |
2150 | nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport, |
2151 | struct nvmet_fc_fcp_iod *fod) |
2152 | { |
2153 | struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; |
2154 | struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; |
2155 | struct nvme_completion *cqe = &ersp->cqe; |
2156 | u32 *cqewd = (u32 *)cqe; |
2157 | bool send_ersp = false; |
2158 | u32 rsn, rspcnt, xfr_length; |
2159 | |
2160 | if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) |
2161 | xfr_length = fod->req.transfer_len; |
2162 | else |
2163 | xfr_length = fod->offset; |
2164 | |
2165 | /* |
2166 | * check to see if we can send a 0's rsp. |
2167 | * Note: to send a 0's response, the NVME-FC host transport will |
2168 | * recreate the CQE. The host transport knows: sq id, SQHD (last |
2169 | * seen in an ersp), and command_id. Thus it will create a |
2170 | * zero-filled CQE with those known fields filled in. Transport |
2171 | * must send an ersp for any condition where the cqe won't match |
2172 | * this. |
2173 | * |
2174 | * Here are the FC-NVME mandated cases where we must send an ersp: |
2175 | * every N responses, where N=ersp_ratio |
2176 | * force fabric commands to send ersp's (not in FC-NVME but good |
2177 | * practice) |
2178 | * normal cmds: any time status is non-zero, or status is zero |
2179 | * but words 0 or 1 are non-zero. |
2180 | * the SQ is 90% or more full |
2181 | * the cmd is a fused command |
2182 | * transferred data length not equal to cmd iu length |
2183 | */ |
2184 | rspcnt = atomic_inc_return(v: &fod->queue->zrspcnt); |
2185 | if (!(rspcnt % fod->queue->ersp_ratio) || |
2186 | nvme_is_fabrics(cmd: (struct nvme_command *) sqe) || |
2187 | xfr_length != fod->req.transfer_len || |
2188 | (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || |
2189 | (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || |
2190 | queue_90percent_full(q: fod->queue, le16_to_cpu(cqe->sq_head))) |
2191 | send_ersp = true; |
2192 | |
2193 | /* re-set the fields */ |
2194 | fod->fcpreq->rspaddr = ersp; |
2195 | fod->fcpreq->rspdma = fod->rspdma; |
2196 | |
2197 | if (!send_ersp) { |
2198 | memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP); |
2199 | fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; |
2200 | } else { |
2201 | ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32)); |
2202 | rsn = atomic_inc_return(v: &fod->queue->rsn); |
2203 | ersp->rsn = cpu_to_be32(rsn); |
2204 | ersp->xfrd_len = cpu_to_be32(xfr_length); |
2205 | fod->fcpreq->rsplen = sizeof(*ersp); |
2206 | } |
2207 | |
2208 | fc_dma_sync_single_for_device(dev: tgtport->dev, addr: fod->rspdma, |
2209 | size: sizeof(fod->rspiubuf), dir: DMA_TO_DEVICE); |
2210 | } |
2211 | |
2212 | static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq); |
2213 | |
2214 | static void |
2215 | nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport, |
2216 | struct nvmet_fc_fcp_iod *fod) |
2217 | { |
2218 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; |
2219 | |
2220 | /* data no longer needed */ |
2221 | nvmet_fc_free_tgt_pgs(fod); |
2222 | |
2223 | /* |
2224 | * if an ABTS was received or we issued the fcp_abort early |
2225 | * don't call abort routine again. |
2226 | */ |
2227 | /* no need to take lock - lock was taken earlier to get here */ |
2228 | if (!fod->aborted) |
2229 | tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq); |
2230 | |
2231 | nvmet_fc_free_fcp_iod(queue: fod->queue, fod); |
2232 | } |
2233 | |
2234 | static void |
2235 | nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport, |
2236 | struct nvmet_fc_fcp_iod *fod) |
2237 | { |
2238 | int ret; |
2239 | |
2240 | fod->fcpreq->op = NVMET_FCOP_RSP; |
2241 | fod->fcpreq->timeout = 0; |
2242 | |
2243 | nvmet_fc_prep_fcp_rsp(tgtport, fod); |
2244 | |
2245 | ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); |
2246 | if (ret) |
2247 | nvmet_fc_abort_op(tgtport, fod); |
2248 | } |
2249 | |
2250 | static void |
2251 | nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, |
2252 | struct nvmet_fc_fcp_iod *fod, u8 op) |
2253 | { |
2254 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; |
2255 | struct scatterlist *sg = fod->next_sg; |
2256 | unsigned long flags; |
2257 | u32 remaininglen = fod->req.transfer_len - fod->offset; |
2258 | u32 tlen = 0; |
2259 | int ret; |
2260 | |
2261 | fcpreq->op = op; |
2262 | fcpreq->offset = fod->offset; |
2263 | fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; |
2264 | |
2265 | /* |
2266 | * for next sequence: |
2267 | * break at a sg element boundary |
2268 | * attempt to keep sequence length capped at |
2269 | * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to |
2270 | * be longer if a single sg element is larger |
2271 | * than that amount. This is done to avoid creating |
2272 | * a new sg list to use for the tgtport api. |
2273 | */ |
2274 | fcpreq->sg = sg; |
2275 | fcpreq->sg_cnt = 0; |
2276 | while (tlen < remaininglen && |
2277 | fcpreq->sg_cnt < tgtport->max_sg_cnt && |
2278 | tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) { |
2279 | fcpreq->sg_cnt++; |
2280 | tlen += sg_dma_len(sg); |
2281 | sg = sg_next(sg); |
2282 | } |
2283 | if (tlen < remaininglen && fcpreq->sg_cnt == 0) { |
2284 | fcpreq->sg_cnt++; |
2285 | tlen += min_t(u32, sg_dma_len(sg), remaininglen); |
2286 | sg = sg_next(sg); |
2287 | } |
2288 | if (tlen < remaininglen) |
2289 | fod->next_sg = sg; |
2290 | else |
2291 | fod->next_sg = NULL; |
2292 | |
2293 | fcpreq->transfer_length = tlen; |
2294 | fcpreq->transferred_length = 0; |
2295 | fcpreq->fcp_error = 0; |
2296 | fcpreq->rsplen = 0; |
2297 | |
2298 | /* |
2299 | * If the last READDATA request: check if LLDD supports |
2300 | * combined xfr with response. |
2301 | */ |
2302 | if ((op == NVMET_FCOP_READDATA) && |
2303 | ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) && |
2304 | (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) { |
2305 | fcpreq->op = NVMET_FCOP_READDATA_RSP; |
2306 | nvmet_fc_prep_fcp_rsp(tgtport, fod); |
2307 | } |
2308 | |
2309 | ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); |
2310 | if (ret) { |
2311 | /* |
2312 | * should be ok to set w/o lock as its in the thread of |
2313 | * execution (not an async timer routine) and doesn't |
2314 | * contend with any clearing action |
2315 | */ |
2316 | fod->abort = true; |
2317 | |
2318 | if (op == NVMET_FCOP_WRITEDATA) { |
2319 | spin_lock_irqsave(&fod->flock, flags); |
2320 | fod->writedataactive = false; |
2321 | spin_unlock_irqrestore(lock: &fod->flock, flags); |
2322 | nvmet_req_complete(req: &fod->req, status: NVME_SC_INTERNAL); |
2323 | } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { |
2324 | fcpreq->fcp_error = ret; |
2325 | fcpreq->transferred_length = 0; |
2326 | nvmet_fc_xmt_fcp_op_done(fcpreq: fod->fcpreq); |
2327 | } |
2328 | } |
2329 | } |
2330 | |
2331 | static inline bool |
2332 | __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort) |
2333 | { |
2334 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; |
2335 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; |
2336 | |
2337 | /* if in the middle of an io and we need to tear down */ |
2338 | if (abort) { |
2339 | if (fcpreq->op == NVMET_FCOP_WRITEDATA) { |
2340 | nvmet_req_complete(req: &fod->req, status: NVME_SC_INTERNAL); |
2341 | return true; |
2342 | } |
2343 | |
2344 | nvmet_fc_abort_op(tgtport, fod); |
2345 | return true; |
2346 | } |
2347 | |
2348 | return false; |
2349 | } |
2350 | |
2351 | /* |
2352 | * actual done handler for FCP operations when completed by the lldd |
2353 | */ |
2354 | static void |
2355 | nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) |
2356 | { |
2357 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; |
2358 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; |
2359 | unsigned long flags; |
2360 | bool abort; |
2361 | |
2362 | spin_lock_irqsave(&fod->flock, flags); |
2363 | abort = fod->abort; |
2364 | fod->writedataactive = false; |
2365 | spin_unlock_irqrestore(lock: &fod->flock, flags); |
2366 | |
2367 | switch (fcpreq->op) { |
2368 | |
2369 | case NVMET_FCOP_WRITEDATA: |
2370 | if (__nvmet_fc_fod_op_abort(fod, abort)) |
2371 | return; |
2372 | if (fcpreq->fcp_error || |
2373 | fcpreq->transferred_length != fcpreq->transfer_length) { |
2374 | spin_lock_irqsave(&fod->flock, flags); |
2375 | fod->abort = true; |
2376 | spin_unlock_irqrestore(lock: &fod->flock, flags); |
2377 | |
2378 | nvmet_req_complete(req: &fod->req, status: NVME_SC_INTERNAL); |
2379 | return; |
2380 | } |
2381 | |
2382 | fod->offset += fcpreq->transferred_length; |
2383 | if (fod->offset != fod->req.transfer_len) { |
2384 | spin_lock_irqsave(&fod->flock, flags); |
2385 | fod->writedataactive = true; |
2386 | spin_unlock_irqrestore(lock: &fod->flock, flags); |
2387 | |
2388 | /* transfer the next chunk */ |
2389 | nvmet_fc_transfer_fcp_data(tgtport, fod, |
2390 | op: NVMET_FCOP_WRITEDATA); |
2391 | return; |
2392 | } |
2393 | |
2394 | /* data transfer complete, resume with nvmet layer */ |
2395 | fod->req.execute(&fod->req); |
2396 | break; |
2397 | |
2398 | case NVMET_FCOP_READDATA: |
2399 | case NVMET_FCOP_READDATA_RSP: |
2400 | if (__nvmet_fc_fod_op_abort(fod, abort)) |
2401 | return; |
2402 | if (fcpreq->fcp_error || |
2403 | fcpreq->transferred_length != fcpreq->transfer_length) { |
2404 | nvmet_fc_abort_op(tgtport, fod); |
2405 | return; |
2406 | } |
2407 | |
2408 | /* success */ |
2409 | |
2410 | if (fcpreq->op == NVMET_FCOP_READDATA_RSP) { |
2411 | /* data no longer needed */ |
2412 | nvmet_fc_free_tgt_pgs(fod); |
2413 | nvmet_fc_free_fcp_iod(queue: fod->queue, fod); |
2414 | return; |
2415 | } |
2416 | |
2417 | fod->offset += fcpreq->transferred_length; |
2418 | if (fod->offset != fod->req.transfer_len) { |
2419 | /* transfer the next chunk */ |
2420 | nvmet_fc_transfer_fcp_data(tgtport, fod, |
2421 | op: NVMET_FCOP_READDATA); |
2422 | return; |
2423 | } |
2424 | |
2425 | /* data transfer complete, send response */ |
2426 | |
2427 | /* data no longer needed */ |
2428 | nvmet_fc_free_tgt_pgs(fod); |
2429 | |
2430 | nvmet_fc_xmt_fcp_rsp(tgtport, fod); |
2431 | |
2432 | break; |
2433 | |
2434 | case NVMET_FCOP_RSP: |
2435 | if (__nvmet_fc_fod_op_abort(fod, abort)) |
2436 | return; |
2437 | nvmet_fc_free_fcp_iod(queue: fod->queue, fod); |
2438 | break; |
2439 | |
2440 | default: |
2441 | break; |
2442 | } |
2443 | } |
2444 | |
2445 | static void |
2446 | nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq) |
2447 | { |
2448 | struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; |
2449 | |
2450 | nvmet_fc_fod_op_done(fod); |
2451 | } |
2452 | |
2453 | /* |
2454 | * actual completion handler after execution by the nvmet layer |
2455 | */ |
2456 | static void |
2457 | __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport, |
2458 | struct nvmet_fc_fcp_iod *fod, int status) |
2459 | { |
2460 | struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; |
2461 | struct nvme_completion *cqe = &fod->rspiubuf.cqe; |
2462 | unsigned long flags; |
2463 | bool abort; |
2464 | |
2465 | spin_lock_irqsave(&fod->flock, flags); |
2466 | abort = fod->abort; |
2467 | spin_unlock_irqrestore(lock: &fod->flock, flags); |
2468 | |
2469 | /* if we have a CQE, snoop the last sq_head value */ |
2470 | if (!status) |
2471 | fod->queue->sqhd = cqe->sq_head; |
2472 | |
2473 | if (abort) { |
2474 | nvmet_fc_abort_op(tgtport, fod); |
2475 | return; |
2476 | } |
2477 | |
2478 | /* if an error handling the cmd post initial parsing */ |
2479 | if (status) { |
2480 | /* fudge up a failed CQE status for our transport error */ |
2481 | memset(cqe, 0, sizeof(*cqe)); |
2482 | cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ |
2483 | cqe->sq_id = cpu_to_le16(fod->queue->qid); |
2484 | cqe->command_id = sqe->command_id; |
2485 | cqe->status = cpu_to_le16(status); |
2486 | } else { |
2487 | |
2488 | /* |
2489 | * try to push the data even if the SQE status is non-zero. |
2490 | * There may be a status where data still was intended to |
2491 | * be moved |
2492 | */ |
2493 | if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { |
2494 | /* push the data over before sending rsp */ |
2495 | nvmet_fc_transfer_fcp_data(tgtport, fod, |
2496 | op: NVMET_FCOP_READDATA); |
2497 | return; |
2498 | } |
2499 | |
2500 | /* writes & no data - fall thru */ |
2501 | } |
2502 | |
2503 | /* data no longer needed */ |
2504 | nvmet_fc_free_tgt_pgs(fod); |
2505 | |
2506 | nvmet_fc_xmt_fcp_rsp(tgtport, fod); |
2507 | } |
2508 | |
2509 | |
2510 | static void |
2511 | nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req) |
2512 | { |
2513 | struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req); |
2514 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; |
2515 | |
2516 | __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, status: 0); |
2517 | } |
2518 | |
2519 | |
2520 | /* |
2521 | * Actual processing routine for received FC-NVME I/O Requests from the LLD |
2522 | */ |
2523 | static void |
2524 | nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, |
2525 | struct nvmet_fc_fcp_iod *fod) |
2526 | { |
2527 | struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; |
2528 | u32 xfrlen = be32_to_cpu(cmdiu->data_len); |
2529 | int ret; |
2530 | |
2531 | /* |
2532 | * Fused commands are currently not supported in the linux |
2533 | * implementation. |
2534 | * |
2535 | * As such, the implementation of the FC transport does not |
2536 | * look at the fused commands and order delivery to the upper |
2537 | * layer until we have both based on csn. |
2538 | */ |
2539 | |
2540 | fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; |
2541 | |
2542 | if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) { |
2543 | fod->io_dir = NVMET_FCP_WRITE; |
2544 | if (!nvme_is_write(cmd: &cmdiu->sqe)) |
2545 | goto transport_error; |
2546 | } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) { |
2547 | fod->io_dir = NVMET_FCP_READ; |
2548 | if (nvme_is_write(cmd: &cmdiu->sqe)) |
2549 | goto transport_error; |
2550 | } else { |
2551 | fod->io_dir = NVMET_FCP_NODATA; |
2552 | if (xfrlen) |
2553 | goto transport_error; |
2554 | } |
2555 | |
2556 | fod->req.cmd = &fod->cmdiubuf.sqe; |
2557 | fod->req.cqe = &fod->rspiubuf.cqe; |
2558 | if (!tgtport->pe) |
2559 | goto transport_error; |
2560 | fod->req.port = tgtport->pe->port; |
2561 | |
2562 | /* clear any response payload */ |
2563 | memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); |
2564 | |
2565 | fod->data_sg = NULL; |
2566 | fod->data_sg_cnt = 0; |
2567 | |
2568 | ret = nvmet_req_init(req: &fod->req, |
2569 | cq: &fod->queue->nvme_cq, |
2570 | sq: &fod->queue->nvme_sq, |
2571 | ops: &nvmet_fc_tgt_fcp_ops); |
2572 | if (!ret) { |
2573 | /* bad SQE content or invalid ctrl state */ |
2574 | /* nvmet layer has already called op done to send rsp. */ |
2575 | return; |
2576 | } |
2577 | |
2578 | fod->req.transfer_len = xfrlen; |
2579 | |
2580 | /* keep a running counter of tail position */ |
2581 | atomic_inc(v: &fod->queue->sqtail); |
2582 | |
2583 | if (fod->req.transfer_len) { |
2584 | ret = nvmet_fc_alloc_tgt_pgs(fod); |
2585 | if (ret) { |
2586 | nvmet_req_complete(req: &fod->req, status: ret); |
2587 | return; |
2588 | } |
2589 | } |
2590 | fod->req.sg = fod->data_sg; |
2591 | fod->req.sg_cnt = fod->data_sg_cnt; |
2592 | fod->offset = 0; |
2593 | |
2594 | if (fod->io_dir == NVMET_FCP_WRITE) { |
2595 | /* pull the data over before invoking nvmet layer */ |
2596 | nvmet_fc_transfer_fcp_data(tgtport, fod, op: NVMET_FCOP_WRITEDATA); |
2597 | return; |
2598 | } |
2599 | |
2600 | /* |
2601 | * Reads or no data: |
2602 | * |
2603 | * can invoke the nvmet_layer now. If read data, cmd completion will |
2604 | * push the data |
2605 | */ |
2606 | fod->req.execute(&fod->req); |
2607 | return; |
2608 | |
2609 | transport_error: |
2610 | nvmet_fc_abort_op(tgtport, fod); |
2611 | } |
2612 | |
2613 | /** |
2614 | * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD |
2615 | * upon the reception of a NVME FCP CMD IU. |
2616 | * |
2617 | * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc |
2618 | * layer for processing. |
2619 | * |
2620 | * The nvmet_fc layer allocates a local job structure (struct |
2621 | * nvmet_fc_fcp_iod) from the queue for the io and copies the |
2622 | * CMD IU buffer to the job structure. As such, on a successful |
2623 | * completion (returns 0), the LLDD may immediately free/reuse |
2624 | * the CMD IU buffer passed in the call. |
2625 | * |
2626 | * However, in some circumstances, due to the packetized nature of FC |
2627 | * and the api of the FC LLDD which may issue a hw command to send the |
2628 | * response, but the LLDD may not get the hw completion for that command |
2629 | * and upcall the nvmet_fc layer before a new command may be |
2630 | * asynchronously received - its possible for a command to be received |
2631 | * before the LLDD and nvmet_fc have recycled the job structure. It gives |
2632 | * the appearance of more commands received than fits in the sq. |
2633 | * To alleviate this scenario, a temporary queue is maintained in the |
2634 | * transport for pending LLDD requests waiting for a queue job structure. |
2635 | * In these "overrun" cases, a temporary queue element is allocated |
2636 | * the LLDD request and CMD iu buffer information remembered, and the |
2637 | * routine returns a -EOVERFLOW status. Subsequently, when a queue job |
2638 | * structure is freed, it is immediately reallocated for anything on the |
2639 | * pending request list. The LLDDs defer_rcv() callback is called, |
2640 | * informing the LLDD that it may reuse the CMD IU buffer, and the io |
2641 | * is then started normally with the transport. |
2642 | * |
2643 | * The LLDD, when receiving an -EOVERFLOW completion status, is to treat |
2644 | * the completion as successful but must not reuse the CMD IU buffer |
2645 | * until the LLDD's defer_rcv() callback has been called for the |
2646 | * corresponding struct nvmefc_tgt_fcp_req pointer. |
2647 | * |
2648 | * If there is any other condition in which an error occurs, the |
2649 | * transport will return a non-zero status indicating the error. |
2650 | * In all cases other than -EOVERFLOW, the transport has not accepted the |
2651 | * request and the LLDD should abort the exchange. |
2652 | * |
2653 | * @target_port: pointer to the (registered) target port the FCP CMD IU |
2654 | * was received on. |
2655 | * @fcpreq: pointer to a fcpreq request structure to be used to reference |
2656 | * the exchange corresponding to the FCP Exchange. |
2657 | * @cmdiubuf: pointer to the buffer containing the FCP CMD IU |
2658 | * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU |
2659 | */ |
2660 | int |
2661 | nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, |
2662 | struct nvmefc_tgt_fcp_req *fcpreq, |
2663 | void *cmdiubuf, u32 cmdiubuf_len) |
2664 | { |
2665 | struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(targetport: target_port); |
2666 | struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; |
2667 | struct nvmet_fc_tgt_queue *queue; |
2668 | struct nvmet_fc_fcp_iod *fod; |
2669 | struct nvmet_fc_defer_fcp_req *deferfcp; |
2670 | unsigned long flags; |
2671 | |
2672 | /* validate iu, so the connection id can be used to find the queue */ |
2673 | if ((cmdiubuf_len != sizeof(*cmdiu)) || |
2674 | (cmdiu->format_id != NVME_CMD_FORMAT_ID) || |
2675 | (cmdiu->fc_id != NVME_CMD_FC_ID) || |
2676 | (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) |
2677 | return -EIO; |
2678 | |
2679 | queue = nvmet_fc_find_target_queue(tgtport, |
2680 | be64_to_cpu(cmdiu->connection_id)); |
2681 | if (!queue) |
2682 | return -ENOTCONN; |
2683 | |
2684 | /* |
2685 | * note: reference taken by find_target_queue |
2686 | * After successful fod allocation, the fod will inherit the |
2687 | * ownership of that reference and will remove the reference |
2688 | * when the fod is freed. |
2689 | */ |
2690 | |
2691 | spin_lock_irqsave(&queue->qlock, flags); |
2692 | |
2693 | fod = nvmet_fc_alloc_fcp_iod(queue); |
2694 | if (fod) { |
2695 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
2696 | |
2697 | fcpreq->nvmet_fc_private = fod; |
2698 | fod->fcpreq = fcpreq; |
2699 | |
2700 | memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); |
2701 | |
2702 | nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); |
2703 | |
2704 | return 0; |
2705 | } |
2706 | |
2707 | if (!tgtport->ops->defer_rcv) { |
2708 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
2709 | /* release the queue lookup reference */ |
2710 | nvmet_fc_tgt_q_put(queue); |
2711 | return -ENOENT; |
2712 | } |
2713 | |
2714 | deferfcp = list_first_entry_or_null(&queue->avail_defer_list, |
2715 | struct nvmet_fc_defer_fcp_req, req_list); |
2716 | if (deferfcp) { |
2717 | /* Just re-use one that was previously allocated */ |
2718 | list_del(entry: &deferfcp->req_list); |
2719 | } else { |
2720 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
2721 | |
2722 | /* Now we need to dynamically allocate one */ |
2723 | deferfcp = kmalloc(size: sizeof(*deferfcp), GFP_KERNEL); |
2724 | if (!deferfcp) { |
2725 | /* release the queue lookup reference */ |
2726 | nvmet_fc_tgt_q_put(queue); |
2727 | return -ENOMEM; |
2728 | } |
2729 | spin_lock_irqsave(&queue->qlock, flags); |
2730 | } |
2731 | |
2732 | /* For now, use rspaddr / rsplen to save payload information */ |
2733 | fcpreq->rspaddr = cmdiubuf; |
2734 | fcpreq->rsplen = cmdiubuf_len; |
2735 | deferfcp->fcp_req = fcpreq; |
2736 | |
2737 | /* defer processing till a fod becomes available */ |
2738 | list_add_tail(new: &deferfcp->req_list, head: &queue->pending_cmd_list); |
2739 | |
2740 | /* NOTE: the queue lookup reference is still valid */ |
2741 | |
2742 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
2743 | |
2744 | return -EOVERFLOW; |
2745 | } |
2746 | EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); |
2747 | |
2748 | /** |
2749 | * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD |
2750 | * upon the reception of an ABTS for a FCP command |
2751 | * |
2752 | * Notify the transport that an ABTS has been received for a FCP command |
2753 | * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The |
2754 | * LLDD believes the command is still being worked on |
2755 | * (template_ops->fcp_req_release() has not been called). |
2756 | * |
2757 | * The transport will wait for any outstanding work (an op to the LLDD, |
2758 | * which the lldd should complete with error due to the ABTS; or the |
2759 | * completion from the nvmet layer of the nvme command), then will |
2760 | * stop processing and call the nvmet_fc_rcv_fcp_req() callback to |
2761 | * return the i/o context to the LLDD. The LLDD may send the BA_ACC |
2762 | * to the ABTS either after return from this function (assuming any |
2763 | * outstanding op work has been terminated) or upon the callback being |
2764 | * called. |
2765 | * |
2766 | * @target_port: pointer to the (registered) target port the FCP CMD IU |
2767 | * was received on. |
2768 | * @fcpreq: pointer to the fcpreq request structure that corresponds |
2769 | * to the exchange that received the ABTS. |
2770 | */ |
2771 | void |
2772 | nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port, |
2773 | struct nvmefc_tgt_fcp_req *fcpreq) |
2774 | { |
2775 | struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; |
2776 | struct nvmet_fc_tgt_queue *queue; |
2777 | unsigned long flags; |
2778 | |
2779 | if (!fod || fod->fcpreq != fcpreq) |
2780 | /* job appears to have already completed, ignore abort */ |
2781 | return; |
2782 | |
2783 | queue = fod->queue; |
2784 | |
2785 | spin_lock_irqsave(&queue->qlock, flags); |
2786 | if (fod->active) { |
2787 | /* |
2788 | * mark as abort. The abort handler, invoked upon completion |
2789 | * of any work, will detect the aborted status and do the |
2790 | * callback. |
2791 | */ |
2792 | spin_lock(lock: &fod->flock); |
2793 | fod->abort = true; |
2794 | fod->aborted = true; |
2795 | spin_unlock(lock: &fod->flock); |
2796 | } |
2797 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
2798 | } |
2799 | EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort); |
2800 | |
2801 | |
2802 | struct nvmet_fc_traddr { |
2803 | u64 nn; |
2804 | u64 pn; |
2805 | }; |
2806 | |
2807 | static int |
2808 | __nvme_fc_parse_u64(substring_t *sstr, u64 *val) |
2809 | { |
2810 | u64 token64; |
2811 | |
2812 | if (match_u64(sstr, result: &token64)) |
2813 | return -EINVAL; |
2814 | *val = token64; |
2815 | |
2816 | return 0; |
2817 | } |
2818 | |
2819 | /* |
2820 | * This routine validates and extracts the WWN's from the TRADDR string. |
2821 | * As kernel parsers need the 0x to determine number base, universally |
2822 | * build string to parse with 0x prefix before parsing name strings. |
2823 | */ |
2824 | static int |
2825 | nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) |
2826 | { |
2827 | char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; |
2828 | substring_t wwn = { name, &name[sizeof(name)-1] }; |
2829 | int nnoffset, pnoffset; |
2830 | |
2831 | /* validate if string is one of the 2 allowed formats */ |
2832 | if (strnlen(p: buf, maxlen: blen) == NVME_FC_TRADDR_MAXLENGTH && |
2833 | !strncmp(buf, "nn-0x" , NVME_FC_TRADDR_OXNNLEN) && |
2834 | !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], |
2835 | "pn-0x" , NVME_FC_TRADDR_OXNNLEN)) { |
2836 | nnoffset = NVME_FC_TRADDR_OXNNLEN; |
2837 | pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + |
2838 | NVME_FC_TRADDR_OXNNLEN; |
2839 | } else if ((strnlen(p: buf, maxlen: blen) == NVME_FC_TRADDR_MINLENGTH && |
2840 | !strncmp(buf, "nn-" , NVME_FC_TRADDR_NNLEN) && |
2841 | !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], |
2842 | "pn-" , NVME_FC_TRADDR_NNLEN))) { |
2843 | nnoffset = NVME_FC_TRADDR_NNLEN; |
2844 | pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; |
2845 | } else |
2846 | goto out_einval; |
2847 | |
2848 | name[0] = '0'; |
2849 | name[1] = 'x'; |
2850 | name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; |
2851 | |
2852 | memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); |
2853 | if (__nvme_fc_parse_u64(sstr: &wwn, val: &traddr->nn)) |
2854 | goto out_einval; |
2855 | |
2856 | memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); |
2857 | if (__nvme_fc_parse_u64(sstr: &wwn, val: &traddr->pn)) |
2858 | goto out_einval; |
2859 | |
2860 | return 0; |
2861 | |
2862 | out_einval: |
2863 | pr_warn("%s: bad traddr string\n" , __func__); |
2864 | return -EINVAL; |
2865 | } |
2866 | |
2867 | static int |
2868 | nvmet_fc_add_port(struct nvmet_port *port) |
2869 | { |
2870 | struct nvmet_fc_tgtport *tgtport; |
2871 | struct nvmet_fc_port_entry *pe; |
2872 | struct nvmet_fc_traddr traddr = { 0L, 0L }; |
2873 | unsigned long flags; |
2874 | int ret; |
2875 | |
2876 | /* validate the address info */ |
2877 | if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) || |
2878 | (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC)) |
2879 | return -EINVAL; |
2880 | |
2881 | /* map the traddr address info to a target port */ |
2882 | |
2883 | ret = nvme_fc_parse_traddr(traddr: &traddr, buf: port->disc_addr.traddr, |
2884 | blen: sizeof(port->disc_addr.traddr)); |
2885 | if (ret) |
2886 | return ret; |
2887 | |
2888 | pe = kzalloc(size: sizeof(*pe), GFP_KERNEL); |
2889 | if (!pe) |
2890 | return -ENOMEM; |
2891 | |
2892 | ret = -ENXIO; |
2893 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
2894 | list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { |
2895 | if ((tgtport->fc_target_port.node_name == traddr.nn) && |
2896 | (tgtport->fc_target_port.port_name == traddr.pn)) { |
2897 | /* a FC port can only be 1 nvmet port id */ |
2898 | if (!tgtport->pe) { |
2899 | nvmet_fc_portentry_bind(tgtport, pe, port); |
2900 | ret = 0; |
2901 | } else |
2902 | ret = -EALREADY; |
2903 | break; |
2904 | } |
2905 | } |
2906 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
2907 | |
2908 | if (ret) |
2909 | kfree(objp: pe); |
2910 | |
2911 | return ret; |
2912 | } |
2913 | |
2914 | static void |
2915 | nvmet_fc_remove_port(struct nvmet_port *port) |
2916 | { |
2917 | struct nvmet_fc_port_entry *pe = port->priv; |
2918 | |
2919 | nvmet_fc_portentry_unbind(pe); |
2920 | |
2921 | /* terminate any outstanding associations */ |
2922 | __nvmet_fc_free_assocs(tgtport: pe->tgtport); |
2923 | |
2924 | kfree(objp: pe); |
2925 | } |
2926 | |
2927 | static void |
2928 | nvmet_fc_discovery_chg(struct nvmet_port *port) |
2929 | { |
2930 | struct nvmet_fc_port_entry *pe = port->priv; |
2931 | struct nvmet_fc_tgtport *tgtport = pe->tgtport; |
2932 | |
2933 | if (tgtport && tgtport->ops->discovery_event) |
2934 | tgtport->ops->discovery_event(&tgtport->fc_target_port); |
2935 | } |
2936 | |
2937 | static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { |
2938 | .owner = THIS_MODULE, |
2939 | .type = NVMF_TRTYPE_FC, |
2940 | .msdbd = 1, |
2941 | .add_port = nvmet_fc_add_port, |
2942 | .remove_port = nvmet_fc_remove_port, |
2943 | .queue_response = nvmet_fc_fcp_nvme_cmd_done, |
2944 | .delete_ctrl = nvmet_fc_delete_ctrl, |
2945 | .discovery_chg = nvmet_fc_discovery_chg, |
2946 | }; |
2947 | |
2948 | static int __init nvmet_fc_init_module(void) |
2949 | { |
2950 | return nvmet_register_transport(ops: &nvmet_fc_tgt_fcp_ops); |
2951 | } |
2952 | |
2953 | static void __exit nvmet_fc_exit_module(void) |
2954 | { |
2955 | /* ensure any shutdown operation, e.g. delete ctrls have finished */ |
2956 | flush_workqueue(nvmet_wq); |
2957 | |
2958 | /* sanity check - all lports should be removed */ |
2959 | if (!list_empty(head: &nvmet_fc_target_list)) |
2960 | pr_warn("%s: targetport list not empty\n" , __func__); |
2961 | |
2962 | nvmet_unregister_transport(ops: &nvmet_fc_tgt_fcp_ops); |
2963 | |
2964 | ida_destroy(ida: &nvmet_fc_tgtport_cnt); |
2965 | } |
2966 | |
2967 | module_init(nvmet_fc_init_module); |
2968 | module_exit(nvmet_fc_exit_module); |
2969 | |
2970 | MODULE_DESCRIPTION("NVMe target FC transport driver" ); |
2971 | MODULE_LICENSE("GPL v2" ); |
2972 | |