1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Common code for the NVMe target. |
4 | * Copyright (c) 2015-2016 HGST, a Western Digital Company. |
5 | */ |
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
7 | #include <linux/module.h> |
8 | #include <linux/random.h> |
9 | #include <linux/rculist.h> |
10 | #include <linux/pci-p2pdma.h> |
11 | #include <linux/scatterlist.h> |
12 | |
13 | #include <generated/utsrelease.h> |
14 | |
15 | #define CREATE_TRACE_POINTS |
16 | #include "trace.h" |
17 | |
18 | #include "nvmet.h" |
19 | |
20 | struct kmem_cache *nvmet_bvec_cache; |
21 | struct workqueue_struct *buffered_io_wq; |
22 | struct workqueue_struct *zbd_wq; |
23 | static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; |
24 | static DEFINE_IDA(cntlid_ida); |
25 | |
26 | struct workqueue_struct *nvmet_wq; |
27 | EXPORT_SYMBOL_GPL(nvmet_wq); |
28 | |
29 | /* |
30 | * This read/write semaphore is used to synchronize access to configuration |
31 | * information on a target system that will result in discovery log page |
32 | * information change for at least one host. |
33 | * The full list of resources to protected by this semaphore is: |
34 | * |
35 | * - subsystems list |
36 | * - per-subsystem allowed hosts list |
37 | * - allow_any_host subsystem attribute |
38 | * - nvmet_genctr |
39 | * - the nvmet_transports array |
40 | * |
41 | * When updating any of those lists/structures write lock should be obtained, |
42 | * while when reading (popolating discovery log page or checking host-subsystem |
43 | * link) read lock is obtained to allow concurrent reads. |
44 | */ |
45 | DECLARE_RWSEM(nvmet_config_sem); |
46 | |
47 | u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1]; |
48 | u64 nvmet_ana_chgcnt; |
49 | DECLARE_RWSEM(nvmet_ana_sem); |
50 | |
51 | inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) |
52 | { |
53 | switch (errno) { |
54 | case 0: |
55 | return NVME_SC_SUCCESS; |
56 | case -ENOSPC: |
57 | req->error_loc = offsetof(struct nvme_rw_command, length); |
58 | return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; |
59 | case -EREMOTEIO: |
60 | req->error_loc = offsetof(struct nvme_rw_command, slba); |
61 | return NVME_SC_LBA_RANGE | NVME_SC_DNR; |
62 | case -EOPNOTSUPP: |
63 | req->error_loc = offsetof(struct nvme_common_command, opcode); |
64 | switch (req->cmd->common.opcode) { |
65 | case nvme_cmd_dsm: |
66 | case nvme_cmd_write_zeroes: |
67 | return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR; |
68 | default: |
69 | return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; |
70 | } |
71 | break; |
72 | case -ENODATA: |
73 | req->error_loc = offsetof(struct nvme_rw_command, nsid); |
74 | return NVME_SC_ACCESS_DENIED; |
75 | case -EIO: |
76 | fallthrough; |
77 | default: |
78 | req->error_loc = offsetof(struct nvme_common_command, opcode); |
79 | return NVME_SC_INTERNAL | NVME_SC_DNR; |
80 | } |
81 | } |
82 | |
83 | u16 nvmet_report_invalid_opcode(struct nvmet_req *req) |
84 | { |
85 | pr_debug("unhandled cmd %d on qid %d\n" , req->cmd->common.opcode, |
86 | req->sq->qid); |
87 | |
88 | req->error_loc = offsetof(struct nvme_common_command, opcode); |
89 | return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; |
90 | } |
91 | |
92 | static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, |
93 | const char *subsysnqn); |
94 | |
95 | u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf, |
96 | size_t len) |
97 | { |
98 | if (sg_pcopy_from_buffer(sgl: req->sg, nents: req->sg_cnt, buf, buflen: len, skip: off) != len) { |
99 | req->error_loc = offsetof(struct nvme_common_command, dptr); |
100 | return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; |
101 | } |
102 | return 0; |
103 | } |
104 | |
105 | u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len) |
106 | { |
107 | if (sg_pcopy_to_buffer(sgl: req->sg, nents: req->sg_cnt, buf, buflen: len, skip: off) != len) { |
108 | req->error_loc = offsetof(struct nvme_common_command, dptr); |
109 | return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; |
110 | } |
111 | return 0; |
112 | } |
113 | |
114 | u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len) |
115 | { |
116 | if (sg_zero_buffer(sgl: req->sg, nents: req->sg_cnt, buflen: len, skip: off) != len) { |
117 | req->error_loc = offsetof(struct nvme_common_command, dptr); |
118 | return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; |
119 | } |
120 | return 0; |
121 | } |
122 | |
123 | static u32 nvmet_max_nsid(struct nvmet_subsys *subsys) |
124 | { |
125 | struct nvmet_ns *cur; |
126 | unsigned long idx; |
127 | u32 nsid = 0; |
128 | |
129 | xa_for_each(&subsys->namespaces, idx, cur) |
130 | nsid = cur->nsid; |
131 | |
132 | return nsid; |
133 | } |
134 | |
135 | static u32 nvmet_async_event_result(struct nvmet_async_event *aen) |
136 | { |
137 | return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16); |
138 | } |
139 | |
140 | static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl) |
141 | { |
142 | struct nvmet_req *req; |
143 | |
144 | mutex_lock(&ctrl->lock); |
145 | while (ctrl->nr_async_event_cmds) { |
146 | req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; |
147 | mutex_unlock(lock: &ctrl->lock); |
148 | nvmet_req_complete(req, status: NVME_SC_INTERNAL | NVME_SC_DNR); |
149 | mutex_lock(&ctrl->lock); |
150 | } |
151 | mutex_unlock(lock: &ctrl->lock); |
152 | } |
153 | |
154 | static void nvmet_async_events_process(struct nvmet_ctrl *ctrl) |
155 | { |
156 | struct nvmet_async_event *aen; |
157 | struct nvmet_req *req; |
158 | |
159 | mutex_lock(&ctrl->lock); |
160 | while (ctrl->nr_async_event_cmds && !list_empty(head: &ctrl->async_events)) { |
161 | aen = list_first_entry(&ctrl->async_events, |
162 | struct nvmet_async_event, entry); |
163 | req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; |
164 | nvmet_set_result(req, result: nvmet_async_event_result(aen)); |
165 | |
166 | list_del(entry: &aen->entry); |
167 | kfree(objp: aen); |
168 | |
169 | mutex_unlock(lock: &ctrl->lock); |
170 | trace_nvmet_async_event(ctrl, result: req->cqe->result.u32); |
171 | nvmet_req_complete(req, status: 0); |
172 | mutex_lock(&ctrl->lock); |
173 | } |
174 | mutex_unlock(lock: &ctrl->lock); |
175 | } |
176 | |
177 | static void nvmet_async_events_free(struct nvmet_ctrl *ctrl) |
178 | { |
179 | struct nvmet_async_event *aen, *tmp; |
180 | |
181 | mutex_lock(&ctrl->lock); |
182 | list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) { |
183 | list_del(entry: &aen->entry); |
184 | kfree(objp: aen); |
185 | } |
186 | mutex_unlock(lock: &ctrl->lock); |
187 | } |
188 | |
189 | static void nvmet_async_event_work(struct work_struct *work) |
190 | { |
191 | struct nvmet_ctrl *ctrl = |
192 | container_of(work, struct nvmet_ctrl, async_event_work); |
193 | |
194 | nvmet_async_events_process(ctrl); |
195 | } |
196 | |
197 | void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, |
198 | u8 event_info, u8 log_page) |
199 | { |
200 | struct nvmet_async_event *aen; |
201 | |
202 | aen = kmalloc(size: sizeof(*aen), GFP_KERNEL); |
203 | if (!aen) |
204 | return; |
205 | |
206 | aen->event_type = event_type; |
207 | aen->event_info = event_info; |
208 | aen->log_page = log_page; |
209 | |
210 | mutex_lock(&ctrl->lock); |
211 | list_add_tail(new: &aen->entry, head: &ctrl->async_events); |
212 | mutex_unlock(lock: &ctrl->lock); |
213 | |
214 | queue_work(wq: nvmet_wq, work: &ctrl->async_event_work); |
215 | } |
216 | |
217 | static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid) |
218 | { |
219 | u32 i; |
220 | |
221 | mutex_lock(&ctrl->lock); |
222 | if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES) |
223 | goto out_unlock; |
224 | |
225 | for (i = 0; i < ctrl->nr_changed_ns; i++) { |
226 | if (ctrl->changed_ns_list[i] == nsid) |
227 | goto out_unlock; |
228 | } |
229 | |
230 | if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) { |
231 | ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff); |
232 | ctrl->nr_changed_ns = U32_MAX; |
233 | goto out_unlock; |
234 | } |
235 | |
236 | ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid; |
237 | out_unlock: |
238 | mutex_unlock(lock: &ctrl->lock); |
239 | } |
240 | |
241 | void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid) |
242 | { |
243 | struct nvmet_ctrl *ctrl; |
244 | |
245 | lockdep_assert_held(&subsys->lock); |
246 | |
247 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { |
248 | nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid)); |
249 | if (nvmet_aen_bit_disabled(ctrl, bn: NVME_AEN_BIT_NS_ATTR)) |
250 | continue; |
251 | nvmet_add_async_event(ctrl, event_type: NVME_AER_NOTICE, |
252 | event_info: NVME_AER_NOTICE_NS_CHANGED, |
253 | log_page: NVME_LOG_CHANGED_NS); |
254 | } |
255 | } |
256 | |
257 | void nvmet_send_ana_event(struct nvmet_subsys *subsys, |
258 | struct nvmet_port *port) |
259 | { |
260 | struct nvmet_ctrl *ctrl; |
261 | |
262 | mutex_lock(&subsys->lock); |
263 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { |
264 | if (port && ctrl->port != port) |
265 | continue; |
266 | if (nvmet_aen_bit_disabled(ctrl, bn: NVME_AEN_BIT_ANA_CHANGE)) |
267 | continue; |
268 | nvmet_add_async_event(ctrl, event_type: NVME_AER_NOTICE, |
269 | event_info: NVME_AER_NOTICE_ANA, log_page: NVME_LOG_ANA); |
270 | } |
271 | mutex_unlock(lock: &subsys->lock); |
272 | } |
273 | |
274 | void nvmet_port_send_ana_event(struct nvmet_port *port) |
275 | { |
276 | struct nvmet_subsys_link *p; |
277 | |
278 | down_read(sem: &nvmet_config_sem); |
279 | list_for_each_entry(p, &port->subsystems, entry) |
280 | nvmet_send_ana_event(subsys: p->subsys, port); |
281 | up_read(sem: &nvmet_config_sem); |
282 | } |
283 | |
284 | int nvmet_register_transport(const struct nvmet_fabrics_ops *ops) |
285 | { |
286 | int ret = 0; |
287 | |
288 | down_write(sem: &nvmet_config_sem); |
289 | if (nvmet_transports[ops->type]) |
290 | ret = -EINVAL; |
291 | else |
292 | nvmet_transports[ops->type] = ops; |
293 | up_write(sem: &nvmet_config_sem); |
294 | |
295 | return ret; |
296 | } |
297 | EXPORT_SYMBOL_GPL(nvmet_register_transport); |
298 | |
299 | void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops) |
300 | { |
301 | down_write(sem: &nvmet_config_sem); |
302 | nvmet_transports[ops->type] = NULL; |
303 | up_write(sem: &nvmet_config_sem); |
304 | } |
305 | EXPORT_SYMBOL_GPL(nvmet_unregister_transport); |
306 | |
307 | void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys) |
308 | { |
309 | struct nvmet_ctrl *ctrl; |
310 | |
311 | mutex_lock(&subsys->lock); |
312 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { |
313 | if (ctrl->port == port) |
314 | ctrl->ops->delete_ctrl(ctrl); |
315 | } |
316 | mutex_unlock(lock: &subsys->lock); |
317 | } |
318 | |
319 | int nvmet_enable_port(struct nvmet_port *port) |
320 | { |
321 | const struct nvmet_fabrics_ops *ops; |
322 | int ret; |
323 | |
324 | lockdep_assert_held(&nvmet_config_sem); |
325 | |
326 | ops = nvmet_transports[port->disc_addr.trtype]; |
327 | if (!ops) { |
328 | up_write(sem: &nvmet_config_sem); |
329 | request_module("nvmet-transport-%d" , port->disc_addr.trtype); |
330 | down_write(sem: &nvmet_config_sem); |
331 | ops = nvmet_transports[port->disc_addr.trtype]; |
332 | if (!ops) { |
333 | pr_err("transport type %d not supported\n" , |
334 | port->disc_addr.trtype); |
335 | return -EINVAL; |
336 | } |
337 | } |
338 | |
339 | if (!try_module_get(module: ops->owner)) |
340 | return -EINVAL; |
341 | |
342 | /* |
343 | * If the user requested PI support and the transport isn't pi capable, |
344 | * don't enable the port. |
345 | */ |
346 | if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) { |
347 | pr_err("T10-PI is not supported by transport type %d\n" , |
348 | port->disc_addr.trtype); |
349 | ret = -EINVAL; |
350 | goto out_put; |
351 | } |
352 | |
353 | ret = ops->add_port(port); |
354 | if (ret) |
355 | goto out_put; |
356 | |
357 | /* If the transport didn't set inline_data_size, then disable it. */ |
358 | if (port->inline_data_size < 0) |
359 | port->inline_data_size = 0; |
360 | |
361 | /* |
362 | * If the transport didn't set the max_queue_size properly, then clamp |
363 | * it to the target limits. Also set default values in case the |
364 | * transport didn't set it at all. |
365 | */ |
366 | if (port->max_queue_size < 0) |
367 | port->max_queue_size = NVMET_MAX_QUEUE_SIZE; |
368 | else |
369 | port->max_queue_size = clamp_t(int, port->max_queue_size, |
370 | NVMET_MIN_QUEUE_SIZE, |
371 | NVMET_MAX_QUEUE_SIZE); |
372 | |
373 | port->enabled = true; |
374 | port->tr_ops = ops; |
375 | return 0; |
376 | |
377 | out_put: |
378 | module_put(module: ops->owner); |
379 | return ret; |
380 | } |
381 | |
382 | void nvmet_disable_port(struct nvmet_port *port) |
383 | { |
384 | const struct nvmet_fabrics_ops *ops; |
385 | |
386 | lockdep_assert_held(&nvmet_config_sem); |
387 | |
388 | port->enabled = false; |
389 | port->tr_ops = NULL; |
390 | |
391 | ops = nvmet_transports[port->disc_addr.trtype]; |
392 | ops->remove_port(port); |
393 | module_put(module: ops->owner); |
394 | } |
395 | |
396 | static void nvmet_keep_alive_timer(struct work_struct *work) |
397 | { |
398 | struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work), |
399 | struct nvmet_ctrl, ka_work); |
400 | bool reset_tbkas = ctrl->reset_tbkas; |
401 | |
402 | ctrl->reset_tbkas = false; |
403 | if (reset_tbkas) { |
404 | pr_debug("ctrl %d reschedule traffic based keep-alive timer\n" , |
405 | ctrl->cntlid); |
406 | queue_delayed_work(wq: nvmet_wq, dwork: &ctrl->ka_work, delay: ctrl->kato * HZ); |
407 | return; |
408 | } |
409 | |
410 | pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n" , |
411 | ctrl->cntlid, ctrl->kato); |
412 | |
413 | nvmet_ctrl_fatal_error(ctrl); |
414 | } |
415 | |
416 | void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) |
417 | { |
418 | if (unlikely(ctrl->kato == 0)) |
419 | return; |
420 | |
421 | pr_debug("ctrl %d start keep-alive timer for %d secs\n" , |
422 | ctrl->cntlid, ctrl->kato); |
423 | |
424 | queue_delayed_work(wq: nvmet_wq, dwork: &ctrl->ka_work, delay: ctrl->kato * HZ); |
425 | } |
426 | |
427 | void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) |
428 | { |
429 | if (unlikely(ctrl->kato == 0)) |
430 | return; |
431 | |
432 | pr_debug("ctrl %d stop keep-alive\n" , ctrl->cntlid); |
433 | |
434 | cancel_delayed_work_sync(dwork: &ctrl->ka_work); |
435 | } |
436 | |
437 | u16 nvmet_req_find_ns(struct nvmet_req *req) |
438 | { |
439 | u32 nsid = le32_to_cpu(req->cmd->common.nsid); |
440 | |
441 | req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, index: nsid); |
442 | if (unlikely(!req->ns)) { |
443 | req->error_loc = offsetof(struct nvme_common_command, nsid); |
444 | return NVME_SC_INVALID_NS | NVME_SC_DNR; |
445 | } |
446 | |
447 | percpu_ref_get(ref: &req->ns->ref); |
448 | return NVME_SC_SUCCESS; |
449 | } |
450 | |
451 | static void nvmet_destroy_namespace(struct percpu_ref *ref) |
452 | { |
453 | struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref); |
454 | |
455 | complete(&ns->disable_done); |
456 | } |
457 | |
458 | void nvmet_put_namespace(struct nvmet_ns *ns) |
459 | { |
460 | percpu_ref_put(ref: &ns->ref); |
461 | } |
462 | |
463 | static void nvmet_ns_dev_disable(struct nvmet_ns *ns) |
464 | { |
465 | nvmet_bdev_ns_disable(ns); |
466 | nvmet_file_ns_disable(ns); |
467 | } |
468 | |
469 | static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns) |
470 | { |
471 | int ret; |
472 | struct pci_dev *p2p_dev; |
473 | |
474 | if (!ns->use_p2pmem) |
475 | return 0; |
476 | |
477 | if (!ns->bdev) { |
478 | pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n" ); |
479 | return -EINVAL; |
480 | } |
481 | |
482 | if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) { |
483 | pr_err("peer-to-peer DMA is not supported by the driver of %s\n" , |
484 | ns->device_path); |
485 | return -EINVAL; |
486 | } |
487 | |
488 | if (ns->p2p_dev) { |
489 | ret = pci_p2pdma_distance(provider: ns->p2p_dev, client: nvmet_ns_dev(ns), verbose: true); |
490 | if (ret < 0) |
491 | return -EINVAL; |
492 | } else { |
493 | /* |
494 | * Right now we just check that there is p2pmem available so |
495 | * we can report an error to the user right away if there |
496 | * is not. We'll find the actual device to use once we |
497 | * setup the controller when the port's device is available. |
498 | */ |
499 | |
500 | p2p_dev = pci_p2pmem_find(client: nvmet_ns_dev(ns)); |
501 | if (!p2p_dev) { |
502 | pr_err("no peer-to-peer memory is available for %s\n" , |
503 | ns->device_path); |
504 | return -EINVAL; |
505 | } |
506 | |
507 | pci_dev_put(dev: p2p_dev); |
508 | } |
509 | |
510 | return 0; |
511 | } |
512 | |
513 | /* |
514 | * Note: ctrl->subsys->lock should be held when calling this function |
515 | */ |
516 | static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl, |
517 | struct nvmet_ns *ns) |
518 | { |
519 | struct device *clients[2]; |
520 | struct pci_dev *p2p_dev; |
521 | int ret; |
522 | |
523 | if (!ctrl->p2p_client || !ns->use_p2pmem) |
524 | return; |
525 | |
526 | if (ns->p2p_dev) { |
527 | ret = pci_p2pdma_distance(provider: ns->p2p_dev, client: ctrl->p2p_client, verbose: true); |
528 | if (ret < 0) |
529 | return; |
530 | |
531 | p2p_dev = pci_dev_get(dev: ns->p2p_dev); |
532 | } else { |
533 | clients[0] = ctrl->p2p_client; |
534 | clients[1] = nvmet_ns_dev(ns); |
535 | |
536 | p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients)); |
537 | if (!p2p_dev) { |
538 | pr_err("no peer-to-peer memory is available that's supported by %s and %s\n" , |
539 | dev_name(ctrl->p2p_client), ns->device_path); |
540 | return; |
541 | } |
542 | } |
543 | |
544 | ret = radix_tree_insert(&ctrl->p2p_ns_map, index: ns->nsid, p2p_dev); |
545 | if (ret < 0) |
546 | pci_dev_put(dev: p2p_dev); |
547 | |
548 | pr_info("using p2pmem on %s for nsid %d\n" , pci_name(p2p_dev), |
549 | ns->nsid); |
550 | } |
551 | |
552 | bool nvmet_ns_revalidate(struct nvmet_ns *ns) |
553 | { |
554 | loff_t oldsize = ns->size; |
555 | |
556 | if (ns->bdev) |
557 | nvmet_bdev_ns_revalidate(ns); |
558 | else |
559 | nvmet_file_ns_revalidate(ns); |
560 | |
561 | return oldsize != ns->size; |
562 | } |
563 | |
564 | int nvmet_ns_enable(struct nvmet_ns *ns) |
565 | { |
566 | struct nvmet_subsys *subsys = ns->subsys; |
567 | struct nvmet_ctrl *ctrl; |
568 | int ret; |
569 | |
570 | mutex_lock(&subsys->lock); |
571 | ret = 0; |
572 | |
573 | if (nvmet_is_passthru_subsys(subsys)) { |
574 | pr_info("cannot enable both passthru and regular namespaces for a single subsystem" ); |
575 | goto out_unlock; |
576 | } |
577 | |
578 | if (ns->enabled) |
579 | goto out_unlock; |
580 | |
581 | ret = -EMFILE; |
582 | if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES) |
583 | goto out_unlock; |
584 | |
585 | ret = nvmet_bdev_ns_enable(ns); |
586 | if (ret == -ENOTBLK) |
587 | ret = nvmet_file_ns_enable(ns); |
588 | if (ret) |
589 | goto out_unlock; |
590 | |
591 | ret = nvmet_p2pmem_ns_enable(ns); |
592 | if (ret) |
593 | goto out_dev_disable; |
594 | |
595 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) |
596 | nvmet_p2pmem_ns_add_p2p(ctrl, ns); |
597 | |
598 | ret = percpu_ref_init(ref: &ns->ref, release: nvmet_destroy_namespace, |
599 | flags: 0, GFP_KERNEL); |
600 | if (ret) |
601 | goto out_dev_put; |
602 | |
603 | if (ns->nsid > subsys->max_nsid) |
604 | subsys->max_nsid = ns->nsid; |
605 | |
606 | ret = xa_insert(xa: &subsys->namespaces, index: ns->nsid, entry: ns, GFP_KERNEL); |
607 | if (ret) |
608 | goto out_restore_subsys_maxnsid; |
609 | |
610 | subsys->nr_namespaces++; |
611 | |
612 | nvmet_ns_changed(subsys, nsid: ns->nsid); |
613 | ns->enabled = true; |
614 | ret = 0; |
615 | out_unlock: |
616 | mutex_unlock(lock: &subsys->lock); |
617 | return ret; |
618 | |
619 | out_restore_subsys_maxnsid: |
620 | subsys->max_nsid = nvmet_max_nsid(subsys); |
621 | percpu_ref_exit(ref: &ns->ref); |
622 | out_dev_put: |
623 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) |
624 | pci_dev_put(dev: radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); |
625 | out_dev_disable: |
626 | nvmet_ns_dev_disable(ns); |
627 | goto out_unlock; |
628 | } |
629 | |
630 | void nvmet_ns_disable(struct nvmet_ns *ns) |
631 | { |
632 | struct nvmet_subsys *subsys = ns->subsys; |
633 | struct nvmet_ctrl *ctrl; |
634 | |
635 | mutex_lock(&subsys->lock); |
636 | if (!ns->enabled) |
637 | goto out_unlock; |
638 | |
639 | ns->enabled = false; |
640 | xa_erase(&ns->subsys->namespaces, index: ns->nsid); |
641 | if (ns->nsid == subsys->max_nsid) |
642 | subsys->max_nsid = nvmet_max_nsid(subsys); |
643 | |
644 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) |
645 | pci_dev_put(dev: radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); |
646 | |
647 | mutex_unlock(lock: &subsys->lock); |
648 | |
649 | /* |
650 | * Now that we removed the namespaces from the lookup list, we |
651 | * can kill the per_cpu ref and wait for any remaining references |
652 | * to be dropped, as well as a RCU grace period for anyone only |
653 | * using the namepace under rcu_read_lock(). Note that we can't |
654 | * use call_rcu here as we need to ensure the namespaces have |
655 | * been fully destroyed before unloading the module. |
656 | */ |
657 | percpu_ref_kill(ref: &ns->ref); |
658 | synchronize_rcu(); |
659 | wait_for_completion(&ns->disable_done); |
660 | percpu_ref_exit(ref: &ns->ref); |
661 | |
662 | mutex_lock(&subsys->lock); |
663 | |
664 | subsys->nr_namespaces--; |
665 | nvmet_ns_changed(subsys, nsid: ns->nsid); |
666 | nvmet_ns_dev_disable(ns); |
667 | out_unlock: |
668 | mutex_unlock(lock: &subsys->lock); |
669 | } |
670 | |
671 | void nvmet_ns_free(struct nvmet_ns *ns) |
672 | { |
673 | nvmet_ns_disable(ns); |
674 | |
675 | down_write(sem: &nvmet_ana_sem); |
676 | nvmet_ana_group_enabled[ns->anagrpid]--; |
677 | up_write(sem: &nvmet_ana_sem); |
678 | |
679 | kfree(objp: ns->device_path); |
680 | kfree(objp: ns); |
681 | } |
682 | |
683 | struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid) |
684 | { |
685 | struct nvmet_ns *ns; |
686 | |
687 | ns = kzalloc(size: sizeof(*ns), GFP_KERNEL); |
688 | if (!ns) |
689 | return NULL; |
690 | |
691 | init_completion(x: &ns->disable_done); |
692 | |
693 | ns->nsid = nsid; |
694 | ns->subsys = subsys; |
695 | |
696 | down_write(sem: &nvmet_ana_sem); |
697 | ns->anagrpid = NVMET_DEFAULT_ANA_GRPID; |
698 | nvmet_ana_group_enabled[ns->anagrpid]++; |
699 | up_write(sem: &nvmet_ana_sem); |
700 | |
701 | uuid_gen(u: &ns->uuid); |
702 | ns->buffered_io = false; |
703 | ns->csi = NVME_CSI_NVM; |
704 | |
705 | return ns; |
706 | } |
707 | |
708 | static void nvmet_update_sq_head(struct nvmet_req *req) |
709 | { |
710 | if (req->sq->size) { |
711 | u32 old_sqhd, new_sqhd; |
712 | |
713 | old_sqhd = READ_ONCE(req->sq->sqhd); |
714 | do { |
715 | new_sqhd = (old_sqhd + 1) % req->sq->size; |
716 | } while (!try_cmpxchg(&req->sq->sqhd, &old_sqhd, new_sqhd)); |
717 | } |
718 | req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF); |
719 | } |
720 | |
721 | static void nvmet_set_error(struct nvmet_req *req, u16 status) |
722 | { |
723 | struct nvmet_ctrl *ctrl = req->sq->ctrl; |
724 | struct nvme_error_slot *new_error_slot; |
725 | unsigned long flags; |
726 | |
727 | req->cqe->status = cpu_to_le16(status << 1); |
728 | |
729 | if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC) |
730 | return; |
731 | |
732 | spin_lock_irqsave(&ctrl->error_lock, flags); |
733 | ctrl->err_counter++; |
734 | new_error_slot = |
735 | &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS]; |
736 | |
737 | new_error_slot->error_count = cpu_to_le64(ctrl->err_counter); |
738 | new_error_slot->sqid = cpu_to_le16(req->sq->qid); |
739 | new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id); |
740 | new_error_slot->status_field = cpu_to_le16(status << 1); |
741 | new_error_slot->param_error_location = cpu_to_le16(req->error_loc); |
742 | new_error_slot->lba = cpu_to_le64(req->error_slba); |
743 | new_error_slot->nsid = req->cmd->common.nsid; |
744 | spin_unlock_irqrestore(lock: &ctrl->error_lock, flags); |
745 | |
746 | /* set the more bit for this request */ |
747 | req->cqe->status |= cpu_to_le16(1 << 14); |
748 | } |
749 | |
750 | static void __nvmet_req_complete(struct nvmet_req *req, u16 status) |
751 | { |
752 | struct nvmet_ns *ns = req->ns; |
753 | |
754 | if (!req->sq->sqhd_disabled) |
755 | nvmet_update_sq_head(req); |
756 | req->cqe->sq_id = cpu_to_le16(req->sq->qid); |
757 | req->cqe->command_id = req->cmd->common.command_id; |
758 | |
759 | if (unlikely(status)) |
760 | nvmet_set_error(req, status); |
761 | |
762 | trace_nvmet_req_complete(req); |
763 | |
764 | req->ops->queue_response(req); |
765 | if (ns) |
766 | nvmet_put_namespace(ns); |
767 | } |
768 | |
769 | void nvmet_req_complete(struct nvmet_req *req, u16 status) |
770 | { |
771 | struct nvmet_sq *sq = req->sq; |
772 | |
773 | __nvmet_req_complete(req, status); |
774 | percpu_ref_put(ref: &sq->ref); |
775 | } |
776 | EXPORT_SYMBOL_GPL(nvmet_req_complete); |
777 | |
778 | void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, |
779 | u16 qid, u16 size) |
780 | { |
781 | cq->qid = qid; |
782 | cq->size = size; |
783 | } |
784 | |
785 | void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, |
786 | u16 qid, u16 size) |
787 | { |
788 | sq->sqhd = 0; |
789 | sq->qid = qid; |
790 | sq->size = size; |
791 | |
792 | ctrl->sqs[qid] = sq; |
793 | } |
794 | |
795 | static void nvmet_confirm_sq(struct percpu_ref *ref) |
796 | { |
797 | struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); |
798 | |
799 | complete(&sq->confirm_done); |
800 | } |
801 | |
802 | void nvmet_sq_destroy(struct nvmet_sq *sq) |
803 | { |
804 | struct nvmet_ctrl *ctrl = sq->ctrl; |
805 | |
806 | /* |
807 | * If this is the admin queue, complete all AERs so that our |
808 | * queue doesn't have outstanding requests on it. |
809 | */ |
810 | if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq) |
811 | nvmet_async_events_failall(ctrl); |
812 | percpu_ref_kill_and_confirm(ref: &sq->ref, confirm_kill: nvmet_confirm_sq); |
813 | wait_for_completion(&sq->confirm_done); |
814 | wait_for_completion(&sq->free_done); |
815 | percpu_ref_exit(ref: &sq->ref); |
816 | nvmet_auth_sq_free(sq); |
817 | |
818 | if (ctrl) { |
819 | /* |
820 | * The teardown flow may take some time, and the host may not |
821 | * send us keep-alive during this period, hence reset the |
822 | * traffic based keep-alive timer so we don't trigger a |
823 | * controller teardown as a result of a keep-alive expiration. |
824 | */ |
825 | ctrl->reset_tbkas = true; |
826 | sq->ctrl->sqs[sq->qid] = NULL; |
827 | nvmet_ctrl_put(ctrl); |
828 | sq->ctrl = NULL; /* allows reusing the queue later */ |
829 | } |
830 | } |
831 | EXPORT_SYMBOL_GPL(nvmet_sq_destroy); |
832 | |
833 | static void nvmet_sq_free(struct percpu_ref *ref) |
834 | { |
835 | struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); |
836 | |
837 | complete(&sq->free_done); |
838 | } |
839 | |
840 | int nvmet_sq_init(struct nvmet_sq *sq) |
841 | { |
842 | int ret; |
843 | |
844 | ret = percpu_ref_init(ref: &sq->ref, release: nvmet_sq_free, flags: 0, GFP_KERNEL); |
845 | if (ret) { |
846 | pr_err("percpu_ref init failed!\n" ); |
847 | return ret; |
848 | } |
849 | init_completion(x: &sq->free_done); |
850 | init_completion(x: &sq->confirm_done); |
851 | nvmet_auth_sq_init(sq); |
852 | |
853 | return 0; |
854 | } |
855 | EXPORT_SYMBOL_GPL(nvmet_sq_init); |
856 | |
857 | static inline u16 nvmet_check_ana_state(struct nvmet_port *port, |
858 | struct nvmet_ns *ns) |
859 | { |
860 | enum nvme_ana_state state = port->ana_state[ns->anagrpid]; |
861 | |
862 | if (unlikely(state == NVME_ANA_INACCESSIBLE)) |
863 | return NVME_SC_ANA_INACCESSIBLE; |
864 | if (unlikely(state == NVME_ANA_PERSISTENT_LOSS)) |
865 | return NVME_SC_ANA_PERSISTENT_LOSS; |
866 | if (unlikely(state == NVME_ANA_CHANGE)) |
867 | return NVME_SC_ANA_TRANSITION; |
868 | return 0; |
869 | } |
870 | |
871 | static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req) |
872 | { |
873 | if (unlikely(req->ns->readonly)) { |
874 | switch (req->cmd->common.opcode) { |
875 | case nvme_cmd_read: |
876 | case nvme_cmd_flush: |
877 | break; |
878 | default: |
879 | return NVME_SC_NS_WRITE_PROTECTED; |
880 | } |
881 | } |
882 | |
883 | return 0; |
884 | } |
885 | |
886 | static u16 nvmet_parse_io_cmd(struct nvmet_req *req) |
887 | { |
888 | struct nvme_command *cmd = req->cmd; |
889 | u16 ret; |
890 | |
891 | if (nvme_is_fabrics(cmd)) |
892 | return nvmet_parse_fabrics_io_cmd(req); |
893 | |
894 | if (unlikely(!nvmet_check_auth_status(req))) |
895 | return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; |
896 | |
897 | ret = nvmet_check_ctrl_status(req); |
898 | if (unlikely(ret)) |
899 | return ret; |
900 | |
901 | if (nvmet_is_passthru_req(req)) |
902 | return nvmet_parse_passthru_io_cmd(req); |
903 | |
904 | ret = nvmet_req_find_ns(req); |
905 | if (unlikely(ret)) |
906 | return ret; |
907 | |
908 | ret = nvmet_check_ana_state(port: req->port, ns: req->ns); |
909 | if (unlikely(ret)) { |
910 | req->error_loc = offsetof(struct nvme_common_command, nsid); |
911 | return ret; |
912 | } |
913 | ret = nvmet_io_cmd_check_access(req); |
914 | if (unlikely(ret)) { |
915 | req->error_loc = offsetof(struct nvme_common_command, nsid); |
916 | return ret; |
917 | } |
918 | |
919 | switch (req->ns->csi) { |
920 | case NVME_CSI_NVM: |
921 | if (req->ns->file) |
922 | return nvmet_file_parse_io_cmd(req); |
923 | return nvmet_bdev_parse_io_cmd(req); |
924 | case NVME_CSI_ZNS: |
925 | if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) |
926 | return nvmet_bdev_zns_parse_io_cmd(req); |
927 | return NVME_SC_INVALID_IO_CMD_SET; |
928 | default: |
929 | return NVME_SC_INVALID_IO_CMD_SET; |
930 | } |
931 | } |
932 | |
933 | bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, |
934 | struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops) |
935 | { |
936 | u8 flags = req->cmd->common.flags; |
937 | u16 status; |
938 | |
939 | req->cq = cq; |
940 | req->sq = sq; |
941 | req->ops = ops; |
942 | req->sg = NULL; |
943 | req->metadata_sg = NULL; |
944 | req->sg_cnt = 0; |
945 | req->metadata_sg_cnt = 0; |
946 | req->transfer_len = 0; |
947 | req->metadata_len = 0; |
948 | req->cqe->status = 0; |
949 | req->cqe->sq_head = 0; |
950 | req->ns = NULL; |
951 | req->error_loc = NVMET_NO_ERROR_LOC; |
952 | req->error_slba = 0; |
953 | |
954 | /* no support for fused commands yet */ |
955 | if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) { |
956 | req->error_loc = offsetof(struct nvme_common_command, flags); |
957 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
958 | goto fail; |
959 | } |
960 | |
961 | /* |
962 | * For fabrics, PSDT field shall describe metadata pointer (MPTR) that |
963 | * contains an address of a single contiguous physical buffer that is |
964 | * byte aligned. |
965 | */ |
966 | if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) { |
967 | req->error_loc = offsetof(struct nvme_common_command, flags); |
968 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
969 | goto fail; |
970 | } |
971 | |
972 | if (unlikely(!req->sq->ctrl)) |
973 | /* will return an error for any non-connect command: */ |
974 | status = nvmet_parse_connect_cmd(req); |
975 | else if (likely(req->sq->qid != 0)) |
976 | status = nvmet_parse_io_cmd(req); |
977 | else |
978 | status = nvmet_parse_admin_cmd(req); |
979 | |
980 | if (status) |
981 | goto fail; |
982 | |
983 | trace_nvmet_req_init(req, cmd: req->cmd); |
984 | |
985 | if (unlikely(!percpu_ref_tryget_live(&sq->ref))) { |
986 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
987 | goto fail; |
988 | } |
989 | |
990 | if (sq->ctrl) |
991 | sq->ctrl->reset_tbkas = true; |
992 | |
993 | return true; |
994 | |
995 | fail: |
996 | __nvmet_req_complete(req, status); |
997 | return false; |
998 | } |
999 | EXPORT_SYMBOL_GPL(nvmet_req_init); |
1000 | |
1001 | void nvmet_req_uninit(struct nvmet_req *req) |
1002 | { |
1003 | percpu_ref_put(ref: &req->sq->ref); |
1004 | if (req->ns) |
1005 | nvmet_put_namespace(ns: req->ns); |
1006 | } |
1007 | EXPORT_SYMBOL_GPL(nvmet_req_uninit); |
1008 | |
1009 | bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len) |
1010 | { |
1011 | if (unlikely(len != req->transfer_len)) { |
1012 | req->error_loc = offsetof(struct nvme_common_command, dptr); |
1013 | nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); |
1014 | return false; |
1015 | } |
1016 | |
1017 | return true; |
1018 | } |
1019 | EXPORT_SYMBOL_GPL(nvmet_check_transfer_len); |
1020 | |
1021 | bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len) |
1022 | { |
1023 | if (unlikely(data_len > req->transfer_len)) { |
1024 | req->error_loc = offsetof(struct nvme_common_command, dptr); |
1025 | nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); |
1026 | return false; |
1027 | } |
1028 | |
1029 | return true; |
1030 | } |
1031 | |
1032 | static unsigned int nvmet_data_transfer_len(struct nvmet_req *req) |
1033 | { |
1034 | return req->transfer_len - req->metadata_len; |
1035 | } |
1036 | |
1037 | static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev, |
1038 | struct nvmet_req *req) |
1039 | { |
1040 | req->sg = pci_p2pmem_alloc_sgl(pdev: p2p_dev, nents: &req->sg_cnt, |
1041 | length: nvmet_data_transfer_len(req)); |
1042 | if (!req->sg) |
1043 | goto out_err; |
1044 | |
1045 | if (req->metadata_len) { |
1046 | req->metadata_sg = pci_p2pmem_alloc_sgl(pdev: p2p_dev, |
1047 | nents: &req->metadata_sg_cnt, length: req->metadata_len); |
1048 | if (!req->metadata_sg) |
1049 | goto out_free_sg; |
1050 | } |
1051 | |
1052 | req->p2p_dev = p2p_dev; |
1053 | |
1054 | return 0; |
1055 | out_free_sg: |
1056 | pci_p2pmem_free_sgl(pdev: req->p2p_dev, sgl: req->sg); |
1057 | out_err: |
1058 | return -ENOMEM; |
1059 | } |
1060 | |
1061 | static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req) |
1062 | { |
1063 | if (!IS_ENABLED(CONFIG_PCI_P2PDMA) || |
1064 | !req->sq->ctrl || !req->sq->qid || !req->ns) |
1065 | return NULL; |
1066 | return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid); |
1067 | } |
1068 | |
1069 | int nvmet_req_alloc_sgls(struct nvmet_req *req) |
1070 | { |
1071 | struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req); |
1072 | |
1073 | if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req)) |
1074 | return 0; |
1075 | |
1076 | req->sg = sgl_alloc(length: nvmet_data_transfer_len(req), GFP_KERNEL, |
1077 | nent_p: &req->sg_cnt); |
1078 | if (unlikely(!req->sg)) |
1079 | goto out; |
1080 | |
1081 | if (req->metadata_len) { |
1082 | req->metadata_sg = sgl_alloc(length: req->metadata_len, GFP_KERNEL, |
1083 | nent_p: &req->metadata_sg_cnt); |
1084 | if (unlikely(!req->metadata_sg)) |
1085 | goto out_free; |
1086 | } |
1087 | |
1088 | return 0; |
1089 | out_free: |
1090 | sgl_free(sgl: req->sg); |
1091 | out: |
1092 | return -ENOMEM; |
1093 | } |
1094 | EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls); |
1095 | |
1096 | void nvmet_req_free_sgls(struct nvmet_req *req) |
1097 | { |
1098 | if (req->p2p_dev) { |
1099 | pci_p2pmem_free_sgl(pdev: req->p2p_dev, sgl: req->sg); |
1100 | if (req->metadata_sg) |
1101 | pci_p2pmem_free_sgl(pdev: req->p2p_dev, sgl: req->metadata_sg); |
1102 | req->p2p_dev = NULL; |
1103 | } else { |
1104 | sgl_free(sgl: req->sg); |
1105 | if (req->metadata_sg) |
1106 | sgl_free(sgl: req->metadata_sg); |
1107 | } |
1108 | |
1109 | req->sg = NULL; |
1110 | req->metadata_sg = NULL; |
1111 | req->sg_cnt = 0; |
1112 | req->metadata_sg_cnt = 0; |
1113 | } |
1114 | EXPORT_SYMBOL_GPL(nvmet_req_free_sgls); |
1115 | |
1116 | static inline bool nvmet_cc_en(u32 cc) |
1117 | { |
1118 | return (cc >> NVME_CC_EN_SHIFT) & 0x1; |
1119 | } |
1120 | |
1121 | static inline u8 nvmet_cc_css(u32 cc) |
1122 | { |
1123 | return (cc >> NVME_CC_CSS_SHIFT) & 0x7; |
1124 | } |
1125 | |
1126 | static inline u8 nvmet_cc_mps(u32 cc) |
1127 | { |
1128 | return (cc >> NVME_CC_MPS_SHIFT) & 0xf; |
1129 | } |
1130 | |
1131 | static inline u8 nvmet_cc_ams(u32 cc) |
1132 | { |
1133 | return (cc >> NVME_CC_AMS_SHIFT) & 0x7; |
1134 | } |
1135 | |
1136 | static inline u8 nvmet_cc_shn(u32 cc) |
1137 | { |
1138 | return (cc >> NVME_CC_SHN_SHIFT) & 0x3; |
1139 | } |
1140 | |
1141 | static inline u8 nvmet_cc_iosqes(u32 cc) |
1142 | { |
1143 | return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf; |
1144 | } |
1145 | |
1146 | static inline u8 nvmet_cc_iocqes(u32 cc) |
1147 | { |
1148 | return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf; |
1149 | } |
1150 | |
1151 | static inline bool nvmet_css_supported(u8 cc_css) |
1152 | { |
1153 | switch (cc_css << NVME_CC_CSS_SHIFT) { |
1154 | case NVME_CC_CSS_NVM: |
1155 | case NVME_CC_CSS_CSI: |
1156 | return true; |
1157 | default: |
1158 | return false; |
1159 | } |
1160 | } |
1161 | |
1162 | static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) |
1163 | { |
1164 | lockdep_assert_held(&ctrl->lock); |
1165 | |
1166 | /* |
1167 | * Only I/O controllers should verify iosqes,iocqes. |
1168 | * Strictly speaking, the spec says a discovery controller |
1169 | * should verify iosqes,iocqes are zeroed, however that |
1170 | * would break backwards compatibility, so don't enforce it. |
1171 | */ |
1172 | if (!nvmet_is_disc_subsys(subsys: ctrl->subsys) && |
1173 | (nvmet_cc_iosqes(cc: ctrl->cc) != NVME_NVM_IOSQES || |
1174 | nvmet_cc_iocqes(cc: ctrl->cc) != NVME_NVM_IOCQES)) { |
1175 | ctrl->csts = NVME_CSTS_CFS; |
1176 | return; |
1177 | } |
1178 | |
1179 | if (nvmet_cc_mps(cc: ctrl->cc) != 0 || |
1180 | nvmet_cc_ams(cc: ctrl->cc) != 0 || |
1181 | !nvmet_css_supported(cc_css: nvmet_cc_css(cc: ctrl->cc))) { |
1182 | ctrl->csts = NVME_CSTS_CFS; |
1183 | return; |
1184 | } |
1185 | |
1186 | ctrl->csts = NVME_CSTS_RDY; |
1187 | |
1188 | /* |
1189 | * Controllers that are not yet enabled should not really enforce the |
1190 | * keep alive timeout, but we still want to track a timeout and cleanup |
1191 | * in case a host died before it enabled the controller. Hence, simply |
1192 | * reset the keep alive timer when the controller is enabled. |
1193 | */ |
1194 | if (ctrl->kato) |
1195 | mod_delayed_work(wq: nvmet_wq, dwork: &ctrl->ka_work, delay: ctrl->kato * HZ); |
1196 | } |
1197 | |
1198 | static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) |
1199 | { |
1200 | lockdep_assert_held(&ctrl->lock); |
1201 | |
1202 | /* XXX: tear down queues? */ |
1203 | ctrl->csts &= ~NVME_CSTS_RDY; |
1204 | ctrl->cc = 0; |
1205 | } |
1206 | |
1207 | void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new) |
1208 | { |
1209 | u32 old; |
1210 | |
1211 | mutex_lock(&ctrl->lock); |
1212 | old = ctrl->cc; |
1213 | ctrl->cc = new; |
1214 | |
1215 | if (nvmet_cc_en(cc: new) && !nvmet_cc_en(cc: old)) |
1216 | nvmet_start_ctrl(ctrl); |
1217 | if (!nvmet_cc_en(cc: new) && nvmet_cc_en(cc: old)) |
1218 | nvmet_clear_ctrl(ctrl); |
1219 | if (nvmet_cc_shn(cc: new) && !nvmet_cc_shn(cc: old)) { |
1220 | nvmet_clear_ctrl(ctrl); |
1221 | ctrl->csts |= NVME_CSTS_SHST_CMPLT; |
1222 | } |
1223 | if (!nvmet_cc_shn(cc: new) && nvmet_cc_shn(cc: old)) |
1224 | ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; |
1225 | mutex_unlock(lock: &ctrl->lock); |
1226 | } |
1227 | |
1228 | static void nvmet_init_cap(struct nvmet_ctrl *ctrl) |
1229 | { |
1230 | /* command sets supported: NVMe command set: */ |
1231 | ctrl->cap = (1ULL << 37); |
1232 | /* Controller supports one or more I/O Command Sets */ |
1233 | ctrl->cap |= (1ULL << 43); |
1234 | /* CC.EN timeout in 500msec units: */ |
1235 | ctrl->cap |= (15ULL << 24); |
1236 | /* maximum queue entries supported: */ |
1237 | if (ctrl->ops->get_max_queue_size) |
1238 | ctrl->cap |= min_t(u16, ctrl->ops->get_max_queue_size(ctrl), |
1239 | ctrl->port->max_queue_size) - 1; |
1240 | else |
1241 | ctrl->cap |= ctrl->port->max_queue_size - 1; |
1242 | |
1243 | if (nvmet_is_passthru_subsys(subsys: ctrl->subsys)) |
1244 | nvmet_passthrough_override_cap(ctrl); |
1245 | } |
1246 | |
1247 | struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn, |
1248 | const char *hostnqn, u16 cntlid, |
1249 | struct nvmet_req *req) |
1250 | { |
1251 | struct nvmet_ctrl *ctrl = NULL; |
1252 | struct nvmet_subsys *subsys; |
1253 | |
1254 | subsys = nvmet_find_get_subsys(port: req->port, subsysnqn); |
1255 | if (!subsys) { |
1256 | pr_warn("connect request for invalid subsystem %s!\n" , |
1257 | subsysnqn); |
1258 | req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); |
1259 | goto out; |
1260 | } |
1261 | |
1262 | mutex_lock(&subsys->lock); |
1263 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { |
1264 | if (ctrl->cntlid == cntlid) { |
1265 | if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) { |
1266 | pr_warn("hostnqn mismatch.\n" ); |
1267 | continue; |
1268 | } |
1269 | if (!kref_get_unless_zero(kref: &ctrl->ref)) |
1270 | continue; |
1271 | |
1272 | /* ctrl found */ |
1273 | goto found; |
1274 | } |
1275 | } |
1276 | |
1277 | ctrl = NULL; /* ctrl not found */ |
1278 | pr_warn("could not find controller %d for subsys %s / host %s\n" , |
1279 | cntlid, subsysnqn, hostnqn); |
1280 | req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); |
1281 | |
1282 | found: |
1283 | mutex_unlock(lock: &subsys->lock); |
1284 | nvmet_subsys_put(subsys); |
1285 | out: |
1286 | return ctrl; |
1287 | } |
1288 | |
1289 | u16 nvmet_check_ctrl_status(struct nvmet_req *req) |
1290 | { |
1291 | if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) { |
1292 | pr_err("got cmd %d while CC.EN == 0 on qid = %d\n" , |
1293 | req->cmd->common.opcode, req->sq->qid); |
1294 | return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; |
1295 | } |
1296 | |
1297 | if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { |
1298 | pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n" , |
1299 | req->cmd->common.opcode, req->sq->qid); |
1300 | return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; |
1301 | } |
1302 | |
1303 | if (unlikely(!nvmet_check_auth_status(req))) { |
1304 | pr_warn("qid %d not authenticated\n" , req->sq->qid); |
1305 | return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; |
1306 | } |
1307 | return 0; |
1308 | } |
1309 | |
1310 | bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn) |
1311 | { |
1312 | struct nvmet_host_link *p; |
1313 | |
1314 | lockdep_assert_held(&nvmet_config_sem); |
1315 | |
1316 | if (subsys->allow_any_host) |
1317 | return true; |
1318 | |
1319 | if (nvmet_is_disc_subsys(subsys)) /* allow all access to disc subsys */ |
1320 | return true; |
1321 | |
1322 | list_for_each_entry(p, &subsys->hosts, entry) { |
1323 | if (!strcmp(nvmet_host_name(host: p->host), hostnqn)) |
1324 | return true; |
1325 | } |
1326 | |
1327 | return false; |
1328 | } |
1329 | |
1330 | /* |
1331 | * Note: ctrl->subsys->lock should be held when calling this function |
1332 | */ |
1333 | static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl, |
1334 | struct nvmet_req *req) |
1335 | { |
1336 | struct nvmet_ns *ns; |
1337 | unsigned long idx; |
1338 | |
1339 | if (!req->p2p_client) |
1340 | return; |
1341 | |
1342 | ctrl->p2p_client = get_device(dev: req->p2p_client); |
1343 | |
1344 | xa_for_each(&ctrl->subsys->namespaces, idx, ns) |
1345 | nvmet_p2pmem_ns_add_p2p(ctrl, ns); |
1346 | } |
1347 | |
1348 | /* |
1349 | * Note: ctrl->subsys->lock should be held when calling this function |
1350 | */ |
1351 | static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl) |
1352 | { |
1353 | struct radix_tree_iter iter; |
1354 | void __rcu **slot; |
1355 | |
1356 | radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0) |
1357 | pci_dev_put(dev: radix_tree_deref_slot(slot)); |
1358 | |
1359 | put_device(dev: ctrl->p2p_client); |
1360 | } |
1361 | |
1362 | static void nvmet_fatal_error_handler(struct work_struct *work) |
1363 | { |
1364 | struct nvmet_ctrl *ctrl = |
1365 | container_of(work, struct nvmet_ctrl, fatal_err_work); |
1366 | |
1367 | pr_err("ctrl %d fatal error occurred!\n" , ctrl->cntlid); |
1368 | ctrl->ops->delete_ctrl(ctrl); |
1369 | } |
1370 | |
1371 | u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, |
1372 | struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp) |
1373 | { |
1374 | struct nvmet_subsys *subsys; |
1375 | struct nvmet_ctrl *ctrl; |
1376 | int ret; |
1377 | u16 status; |
1378 | |
1379 | status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; |
1380 | subsys = nvmet_find_get_subsys(port: req->port, subsysnqn); |
1381 | if (!subsys) { |
1382 | pr_warn("connect request for invalid subsystem %s!\n" , |
1383 | subsysnqn); |
1384 | req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); |
1385 | req->error_loc = offsetof(struct nvme_common_command, dptr); |
1386 | goto out; |
1387 | } |
1388 | |
1389 | down_read(sem: &nvmet_config_sem); |
1390 | if (!nvmet_host_allowed(subsys, hostnqn)) { |
1391 | pr_info("connect by host %s for subsystem %s not allowed\n" , |
1392 | hostnqn, subsysnqn); |
1393 | req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); |
1394 | up_read(sem: &nvmet_config_sem); |
1395 | status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR; |
1396 | req->error_loc = offsetof(struct nvme_common_command, dptr); |
1397 | goto out_put_subsystem; |
1398 | } |
1399 | up_read(sem: &nvmet_config_sem); |
1400 | |
1401 | status = NVME_SC_INTERNAL; |
1402 | ctrl = kzalloc(size: sizeof(*ctrl), GFP_KERNEL); |
1403 | if (!ctrl) |
1404 | goto out_put_subsystem; |
1405 | mutex_init(&ctrl->lock); |
1406 | |
1407 | ctrl->port = req->port; |
1408 | ctrl->ops = req->ops; |
1409 | |
1410 | #ifdef CONFIG_NVME_TARGET_PASSTHRU |
1411 | /* By default, set loop targets to clear IDS by default */ |
1412 | if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP) |
1413 | subsys->clear_ids = 1; |
1414 | #endif |
1415 | |
1416 | INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); |
1417 | INIT_LIST_HEAD(list: &ctrl->async_events); |
1418 | INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL); |
1419 | INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); |
1420 | INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer); |
1421 | |
1422 | memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); |
1423 | memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); |
1424 | |
1425 | kref_init(kref: &ctrl->ref); |
1426 | ctrl->subsys = subsys; |
1427 | ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support; |
1428 | nvmet_init_cap(ctrl); |
1429 | WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL); |
1430 | |
1431 | ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES, |
1432 | size: sizeof(__le32), GFP_KERNEL); |
1433 | if (!ctrl->changed_ns_list) |
1434 | goto out_free_ctrl; |
1435 | |
1436 | ctrl->sqs = kcalloc(n: subsys->max_qid + 1, |
1437 | size: sizeof(struct nvmet_sq *), |
1438 | GFP_KERNEL); |
1439 | if (!ctrl->sqs) |
1440 | goto out_free_changed_ns_list; |
1441 | |
1442 | ret = ida_alloc_range(&cntlid_ida, |
1443 | min: subsys->cntlid_min, max: subsys->cntlid_max, |
1444 | GFP_KERNEL); |
1445 | if (ret < 0) { |
1446 | status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; |
1447 | goto out_free_sqs; |
1448 | } |
1449 | ctrl->cntlid = ret; |
1450 | |
1451 | /* |
1452 | * Discovery controllers may use some arbitrary high value |
1453 | * in order to cleanup stale discovery sessions |
1454 | */ |
1455 | if (nvmet_is_disc_subsys(subsys: ctrl->subsys) && !kato) |
1456 | kato = NVMET_DISC_KATO_MS; |
1457 | |
1458 | /* keep-alive timeout in seconds */ |
1459 | ctrl->kato = DIV_ROUND_UP(kato, 1000); |
1460 | |
1461 | ctrl->err_counter = 0; |
1462 | spin_lock_init(&ctrl->error_lock); |
1463 | |
1464 | nvmet_start_keep_alive_timer(ctrl); |
1465 | |
1466 | mutex_lock(&subsys->lock); |
1467 | list_add_tail(new: &ctrl->subsys_entry, head: &subsys->ctrls); |
1468 | nvmet_setup_p2p_ns_map(ctrl, req); |
1469 | mutex_unlock(lock: &subsys->lock); |
1470 | |
1471 | *ctrlp = ctrl; |
1472 | return 0; |
1473 | |
1474 | out_free_sqs: |
1475 | kfree(objp: ctrl->sqs); |
1476 | out_free_changed_ns_list: |
1477 | kfree(objp: ctrl->changed_ns_list); |
1478 | out_free_ctrl: |
1479 | kfree(objp: ctrl); |
1480 | out_put_subsystem: |
1481 | nvmet_subsys_put(subsys); |
1482 | out: |
1483 | return status; |
1484 | } |
1485 | |
1486 | static void nvmet_ctrl_free(struct kref *ref) |
1487 | { |
1488 | struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref); |
1489 | struct nvmet_subsys *subsys = ctrl->subsys; |
1490 | |
1491 | mutex_lock(&subsys->lock); |
1492 | nvmet_release_p2p_ns_map(ctrl); |
1493 | list_del(entry: &ctrl->subsys_entry); |
1494 | mutex_unlock(lock: &subsys->lock); |
1495 | |
1496 | nvmet_stop_keep_alive_timer(ctrl); |
1497 | |
1498 | flush_work(work: &ctrl->async_event_work); |
1499 | cancel_work_sync(work: &ctrl->fatal_err_work); |
1500 | |
1501 | nvmet_destroy_auth(ctrl); |
1502 | |
1503 | ida_free(&cntlid_ida, id: ctrl->cntlid); |
1504 | |
1505 | nvmet_async_events_free(ctrl); |
1506 | kfree(objp: ctrl->sqs); |
1507 | kfree(objp: ctrl->changed_ns_list); |
1508 | kfree(objp: ctrl); |
1509 | |
1510 | nvmet_subsys_put(subsys); |
1511 | } |
1512 | |
1513 | void nvmet_ctrl_put(struct nvmet_ctrl *ctrl) |
1514 | { |
1515 | kref_put(kref: &ctrl->ref, release: nvmet_ctrl_free); |
1516 | } |
1517 | |
1518 | void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) |
1519 | { |
1520 | mutex_lock(&ctrl->lock); |
1521 | if (!(ctrl->csts & NVME_CSTS_CFS)) { |
1522 | ctrl->csts |= NVME_CSTS_CFS; |
1523 | queue_work(wq: nvmet_wq, work: &ctrl->fatal_err_work); |
1524 | } |
1525 | mutex_unlock(lock: &ctrl->lock); |
1526 | } |
1527 | EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error); |
1528 | |
1529 | static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, |
1530 | const char *subsysnqn) |
1531 | { |
1532 | struct nvmet_subsys_link *p; |
1533 | |
1534 | if (!port) |
1535 | return NULL; |
1536 | |
1537 | if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) { |
1538 | if (!kref_get_unless_zero(kref: &nvmet_disc_subsys->ref)) |
1539 | return NULL; |
1540 | return nvmet_disc_subsys; |
1541 | } |
1542 | |
1543 | down_read(sem: &nvmet_config_sem); |
1544 | if (!strncmp(nvmet_disc_subsys->subsysnqn, subsysnqn, |
1545 | NVMF_NQN_SIZE)) { |
1546 | if (kref_get_unless_zero(kref: &nvmet_disc_subsys->ref)) { |
1547 | up_read(sem: &nvmet_config_sem); |
1548 | return nvmet_disc_subsys; |
1549 | } |
1550 | } |
1551 | list_for_each_entry(p, &port->subsystems, entry) { |
1552 | if (!strncmp(p->subsys->subsysnqn, subsysnqn, |
1553 | NVMF_NQN_SIZE)) { |
1554 | if (!kref_get_unless_zero(kref: &p->subsys->ref)) |
1555 | break; |
1556 | up_read(sem: &nvmet_config_sem); |
1557 | return p->subsys; |
1558 | } |
1559 | } |
1560 | up_read(sem: &nvmet_config_sem); |
1561 | return NULL; |
1562 | } |
1563 | |
1564 | struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, |
1565 | enum nvme_subsys_type type) |
1566 | { |
1567 | struct nvmet_subsys *subsys; |
1568 | char serial[NVMET_SN_MAX_SIZE / 2]; |
1569 | int ret; |
1570 | |
1571 | subsys = kzalloc(size: sizeof(*subsys), GFP_KERNEL); |
1572 | if (!subsys) |
1573 | return ERR_PTR(error: -ENOMEM); |
1574 | |
1575 | subsys->ver = NVMET_DEFAULT_VS; |
1576 | /* generate a random serial number as our controllers are ephemeral: */ |
1577 | get_random_bytes(buf: &serial, len: sizeof(serial)); |
1578 | bin2hex(dst: subsys->serial, src: &serial, count: sizeof(serial)); |
1579 | |
1580 | subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL); |
1581 | if (!subsys->model_number) { |
1582 | ret = -ENOMEM; |
1583 | goto free_subsys; |
1584 | } |
1585 | |
1586 | subsys->ieee_oui = 0; |
1587 | |
1588 | subsys->firmware_rev = kstrndup(UTS_RELEASE, NVMET_FR_MAX_SIZE, GFP_KERNEL); |
1589 | if (!subsys->firmware_rev) { |
1590 | ret = -ENOMEM; |
1591 | goto free_mn; |
1592 | } |
1593 | |
1594 | switch (type) { |
1595 | case NVME_NQN_NVME: |
1596 | subsys->max_qid = NVMET_NR_QUEUES; |
1597 | break; |
1598 | case NVME_NQN_DISC: |
1599 | case NVME_NQN_CURR: |
1600 | subsys->max_qid = 0; |
1601 | break; |
1602 | default: |
1603 | pr_err("%s: Unknown Subsystem type - %d\n" , __func__, type); |
1604 | ret = -EINVAL; |
1605 | goto free_fr; |
1606 | } |
1607 | subsys->type = type; |
1608 | subsys->subsysnqn = kstrndup(s: subsysnqn, NVMF_NQN_SIZE, |
1609 | GFP_KERNEL); |
1610 | if (!subsys->subsysnqn) { |
1611 | ret = -ENOMEM; |
1612 | goto free_fr; |
1613 | } |
1614 | subsys->cntlid_min = NVME_CNTLID_MIN; |
1615 | subsys->cntlid_max = NVME_CNTLID_MAX; |
1616 | kref_init(kref: &subsys->ref); |
1617 | |
1618 | mutex_init(&subsys->lock); |
1619 | xa_init(xa: &subsys->namespaces); |
1620 | INIT_LIST_HEAD(list: &subsys->ctrls); |
1621 | INIT_LIST_HEAD(list: &subsys->hosts); |
1622 | |
1623 | return subsys; |
1624 | |
1625 | free_fr: |
1626 | kfree(objp: subsys->firmware_rev); |
1627 | free_mn: |
1628 | kfree(objp: subsys->model_number); |
1629 | free_subsys: |
1630 | kfree(objp: subsys); |
1631 | return ERR_PTR(error: ret); |
1632 | } |
1633 | |
1634 | static void nvmet_subsys_free(struct kref *ref) |
1635 | { |
1636 | struct nvmet_subsys *subsys = |
1637 | container_of(ref, struct nvmet_subsys, ref); |
1638 | |
1639 | WARN_ON_ONCE(!xa_empty(&subsys->namespaces)); |
1640 | |
1641 | xa_destroy(&subsys->namespaces); |
1642 | nvmet_passthru_subsys_free(subsys); |
1643 | |
1644 | kfree(objp: subsys->subsysnqn); |
1645 | kfree(objp: subsys->model_number); |
1646 | kfree(objp: subsys->firmware_rev); |
1647 | kfree(objp: subsys); |
1648 | } |
1649 | |
1650 | void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys) |
1651 | { |
1652 | struct nvmet_ctrl *ctrl; |
1653 | |
1654 | mutex_lock(&subsys->lock); |
1655 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) |
1656 | ctrl->ops->delete_ctrl(ctrl); |
1657 | mutex_unlock(lock: &subsys->lock); |
1658 | } |
1659 | |
1660 | void nvmet_subsys_put(struct nvmet_subsys *subsys) |
1661 | { |
1662 | kref_put(kref: &subsys->ref, release: nvmet_subsys_free); |
1663 | } |
1664 | |
1665 | static int __init nvmet_init(void) |
1666 | { |
1667 | int error = -ENOMEM; |
1668 | |
1669 | nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1; |
1670 | |
1671 | nvmet_bvec_cache = kmem_cache_create(name: "nvmet-bvec" , |
1672 | NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), align: 0, |
1673 | SLAB_HWCACHE_ALIGN, NULL); |
1674 | if (!nvmet_bvec_cache) |
1675 | return -ENOMEM; |
1676 | |
1677 | zbd_wq = alloc_workqueue(fmt: "nvmet-zbd-wq" , flags: WQ_MEM_RECLAIM, max_active: 0); |
1678 | if (!zbd_wq) |
1679 | goto out_destroy_bvec_cache; |
1680 | |
1681 | buffered_io_wq = alloc_workqueue(fmt: "nvmet-buffered-io-wq" , |
1682 | flags: WQ_MEM_RECLAIM, max_active: 0); |
1683 | if (!buffered_io_wq) |
1684 | goto out_free_zbd_work_queue; |
1685 | |
1686 | nvmet_wq = alloc_workqueue(fmt: "nvmet-wq" , flags: WQ_MEM_RECLAIM, max_active: 0); |
1687 | if (!nvmet_wq) |
1688 | goto out_free_buffered_work_queue; |
1689 | |
1690 | error = nvmet_init_discovery(); |
1691 | if (error) |
1692 | goto out_free_nvmet_work_queue; |
1693 | |
1694 | error = nvmet_init_configfs(); |
1695 | if (error) |
1696 | goto out_exit_discovery; |
1697 | return 0; |
1698 | |
1699 | out_exit_discovery: |
1700 | nvmet_exit_discovery(); |
1701 | out_free_nvmet_work_queue: |
1702 | destroy_workqueue(wq: nvmet_wq); |
1703 | out_free_buffered_work_queue: |
1704 | destroy_workqueue(wq: buffered_io_wq); |
1705 | out_free_zbd_work_queue: |
1706 | destroy_workqueue(wq: zbd_wq); |
1707 | out_destroy_bvec_cache: |
1708 | kmem_cache_destroy(s: nvmet_bvec_cache); |
1709 | return error; |
1710 | } |
1711 | |
1712 | static void __exit nvmet_exit(void) |
1713 | { |
1714 | nvmet_exit_configfs(); |
1715 | nvmet_exit_discovery(); |
1716 | ida_destroy(ida: &cntlid_ida); |
1717 | destroy_workqueue(wq: nvmet_wq); |
1718 | destroy_workqueue(wq: buffered_io_wq); |
1719 | destroy_workqueue(wq: zbd_wq); |
1720 | kmem_cache_destroy(s: nvmet_bvec_cache); |
1721 | |
1722 | BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024); |
1723 | BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024); |
1724 | } |
1725 | |
1726 | module_init(nvmet_init); |
1727 | module_exit(nvmet_exit); |
1728 | |
1729 | MODULE_DESCRIPTION("NVMe target core framework" ); |
1730 | MODULE_LICENSE("GPL v2" ); |
1731 | |