1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | /* |
3 | * NVM Express device driver |
4 | * Copyright (c) 2011-2014, Intel Corporation. |
5 | */ |
6 | |
7 | #include <linux/acpi.h> |
8 | #include <linux/async.h> |
9 | #include <linux/blkdev.h> |
10 | #include <linux/blk-mq.h> |
11 | #include <linux/blk-mq-pci.h> |
12 | #include <linux/blk-integrity.h> |
13 | #include <linux/dmi.h> |
14 | #include <linux/init.h> |
15 | #include <linux/interrupt.h> |
16 | #include <linux/io.h> |
17 | #include <linux/kstrtox.h> |
18 | #include <linux/memremap.h> |
19 | #include <linux/mm.h> |
20 | #include <linux/module.h> |
21 | #include <linux/mutex.h> |
22 | #include <linux/once.h> |
23 | #include <linux/pci.h> |
24 | #include <linux/suspend.h> |
25 | #include <linux/t10-pi.h> |
26 | #include <linux/types.h> |
27 | #include <linux/io-64-nonatomic-lo-hi.h> |
28 | #include <linux/io-64-nonatomic-hi-lo.h> |
29 | #include <linux/sed-opal.h> |
30 | #include <linux/pci-p2pdma.h> |
31 | |
32 | #include "trace.h" |
33 | #include "nvme.h" |
34 | |
35 | #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) |
36 | #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) |
37 | |
38 | #define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc)) |
39 | |
40 | /* |
41 | * These can be higher, but we need to ensure that any command doesn't |
42 | * require an sg allocation that needs more than a page of data. |
43 | */ |
44 | #define NVME_MAX_KB_SZ 8192 |
45 | #define NVME_MAX_SEGS 128 |
46 | #define NVME_MAX_NR_ALLOCATIONS 5 |
47 | |
48 | static int use_threaded_interrupts; |
49 | module_param(use_threaded_interrupts, int, 0444); |
50 | |
51 | static bool use_cmb_sqes = true; |
52 | module_param(use_cmb_sqes, bool, 0444); |
53 | MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); |
54 | |
55 | static unsigned int max_host_mem_size_mb = 128; |
56 | module_param(max_host_mem_size_mb, uint, 0444); |
57 | MODULE_PARM_DESC(max_host_mem_size_mb, |
58 | "Maximum Host Memory Buffer (HMB) size per controller (in MiB)"); |
59 | |
60 | static unsigned int sgl_threshold = SZ_32K; |
61 | module_param(sgl_threshold, uint, 0644); |
62 | MODULE_PARM_DESC(sgl_threshold, |
63 | "Use SGLs when average request segment size is larger or equal to " |
64 | "this size. Use 0 to disable SGLs."); |
65 | |
66 | #define NVME_PCI_MIN_QUEUE_SIZE 2 |
67 | #define NVME_PCI_MAX_QUEUE_SIZE 4095 |
68 | static int io_queue_depth_set(const char *val, const struct kernel_param *kp); |
69 | static const struct kernel_param_ops io_queue_depth_ops = { |
70 | .set = io_queue_depth_set, |
71 | .get = param_get_uint, |
72 | }; |
73 | |
74 | static unsigned int io_queue_depth = 1024; |
75 | module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); |
76 | MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2 and < 4096"); |
77 | |
78 | static int io_queue_count_set(const char *val, const struct kernel_param *kp) |
79 | { |
80 | unsigned int n; |
81 | int ret; |
82 | |
83 | ret = kstrtouint(s: val, base: 10, res: &n); |
84 | if (ret != 0 || n > num_possible_cpus()) |
85 | return -EINVAL; |
86 | return param_set_uint(val, kp); |
87 | } |
88 | |
89 | static const struct kernel_param_ops io_queue_count_ops = { |
90 | .set = io_queue_count_set, |
91 | .get = param_get_uint, |
92 | }; |
93 | |
94 | static unsigned int write_queues; |
95 | module_param_cb(write_queues, &io_queue_count_ops, &write_queues, 0644); |
96 | MODULE_PARM_DESC(write_queues, |
97 | "Number of queues to use for writes. If not set, reads and writes " |
98 | "will share a queue set."); |
99 | |
100 | static unsigned int poll_queues; |
101 | module_param_cb(poll_queues, &io_queue_count_ops, &poll_queues, 0644); |
102 | MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO."); |
103 | |
104 | static bool noacpi; |
105 | module_param(noacpi, bool, 0444); |
106 | MODULE_PARM_DESC(noacpi, "disable acpi bios quirks"); |
107 | |
108 | struct nvme_dev; |
109 | struct nvme_queue; |
110 | |
111 | static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); |
112 | static void nvme_delete_io_queues(struct nvme_dev *dev); |
113 | static void nvme_update_attrs(struct nvme_dev *dev); |
114 | |
115 | /* |
116 | * Represents an NVM Express device. Each nvme_dev is a PCI function. |
117 | */ |
118 | struct nvme_dev { |
119 | struct nvme_queue *queues; |
120 | struct blk_mq_tag_set tagset; |
121 | struct blk_mq_tag_set admin_tagset; |
122 | u32 __iomem *dbs; |
123 | struct device *dev; |
124 | struct dma_pool *prp_page_pool; |
125 | struct dma_pool *prp_small_pool; |
126 | unsigned online_queues; |
127 | unsigned max_qid; |
128 | unsigned io_queues[HCTX_MAX_TYPES]; |
129 | unsigned int num_vecs; |
130 | u32 q_depth; |
131 | int io_sqes; |
132 | u32 db_stride; |
133 | void __iomem *bar; |
134 | unsigned long bar_mapped_size; |
135 | struct mutex shutdown_lock; |
136 | bool subsystem; |
137 | u64 cmb_size; |
138 | bool cmb_use_sqes; |
139 | u32 cmbsz; |
140 | u32 cmbloc; |
141 | struct nvme_ctrl ctrl; |
142 | u32 last_ps; |
143 | bool hmb; |
144 | |
145 | mempool_t *iod_mempool; |
146 | |
147 | /* shadow doorbell buffer support: */ |
148 | __le32 *dbbuf_dbs; |
149 | dma_addr_t dbbuf_dbs_dma_addr; |
150 | __le32 *dbbuf_eis; |
151 | dma_addr_t dbbuf_eis_dma_addr; |
152 | |
153 | /* host memory buffer support: */ |
154 | u64 host_mem_size; |
155 | u32 nr_host_mem_descs; |
156 | dma_addr_t host_mem_descs_dma; |
157 | struct nvme_host_mem_buf_desc *host_mem_descs; |
158 | void **host_mem_desc_bufs; |
159 | unsigned int nr_allocated_queues; |
160 | unsigned int nr_write_queues; |
161 | unsigned int nr_poll_queues; |
162 | }; |
163 | |
164 | static int io_queue_depth_set(const char *val, const struct kernel_param *kp) |
165 | { |
166 | return param_set_uint_minmax(val, kp, NVME_PCI_MIN_QUEUE_SIZE, |
167 | NVME_PCI_MAX_QUEUE_SIZE); |
168 | } |
169 | |
170 | static inline unsigned int sq_idx(unsigned int qid, u32 stride) |
171 | { |
172 | return qid * 2 * stride; |
173 | } |
174 | |
175 | static inline unsigned int cq_idx(unsigned int qid, u32 stride) |
176 | { |
177 | return (qid * 2 + 1) * stride; |
178 | } |
179 | |
180 | static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) |
181 | { |
182 | return container_of(ctrl, struct nvme_dev, ctrl); |
183 | } |
184 | |
185 | /* |
186 | * An NVM Express queue. Each device has at least two (one for admin |
187 | * commands and one for I/O commands). |
188 | */ |
189 | struct nvme_queue { |
190 | struct nvme_dev *dev; |
191 | spinlock_t sq_lock; |
192 | void *sq_cmds; |
193 | /* only used for poll queues: */ |
194 | spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; |
195 | struct nvme_completion *cqes; |
196 | dma_addr_t sq_dma_addr; |
197 | dma_addr_t cq_dma_addr; |
198 | u32 __iomem *q_db; |
199 | u32 q_depth; |
200 | u16 cq_vector; |
201 | u16 sq_tail; |
202 | u16 last_sq_tail; |
203 | u16 cq_head; |
204 | u16 qid; |
205 | u8 cq_phase; |
206 | u8 sqes; |
207 | unsigned long flags; |
208 | #define NVMEQ_ENABLED 0 |
209 | #define NVMEQ_SQ_CMB 1 |
210 | #define NVMEQ_DELETE_ERROR 2 |
211 | #define NVMEQ_POLLED 3 |
212 | __le32 *dbbuf_sq_db; |
213 | __le32 *dbbuf_cq_db; |
214 | __le32 *dbbuf_sq_ei; |
215 | __le32 *dbbuf_cq_ei; |
216 | struct completion delete_done; |
217 | }; |
218 | |
219 | union nvme_descriptor { |
220 | struct nvme_sgl_desc *sg_list; |
221 | __le64 *prp_list; |
222 | }; |
223 | |
224 | /* |
225 | * The nvme_iod describes the data in an I/O. |
226 | * |
227 | * The sg pointer contains the list of PRP/SGL chunk allocations in addition |
228 | * to the actual struct scatterlist. |
229 | */ |
230 | struct nvme_iod { |
231 | struct nvme_request req; |
232 | struct nvme_command cmd; |
233 | bool aborted; |
234 | s8 nr_allocations; /* PRP list pool allocations. 0 means small |
235 | pool in use */ |
236 | unsigned int dma_len; /* length of single DMA segment mapping */ |
237 | dma_addr_t first_dma; |
238 | dma_addr_t meta_dma; |
239 | struct sg_table sgt; |
240 | union nvme_descriptor list[NVME_MAX_NR_ALLOCATIONS]; |
241 | }; |
242 | |
243 | static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) |
244 | { |
245 | return dev->nr_allocated_queues * 8 * dev->db_stride; |
246 | } |
247 | |
248 | static void nvme_dbbuf_dma_alloc(struct nvme_dev *dev) |
249 | { |
250 | unsigned int mem_size = nvme_dbbuf_size(dev); |
251 | |
252 | if (!(dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP)) |
253 | return; |
254 | |
255 | if (dev->dbbuf_dbs) { |
256 | /* |
257 | * Clear the dbbuf memory so the driver doesn't observe stale |
258 | * values from the previous instantiation. |
259 | */ |
260 | memset(dev->dbbuf_dbs, 0, mem_size); |
261 | memset(dev->dbbuf_eis, 0, mem_size); |
262 | return; |
263 | } |
264 | |
265 | dev->dbbuf_dbs = dma_alloc_coherent(dev: dev->dev, size: mem_size, |
266 | dma_handle: &dev->dbbuf_dbs_dma_addr, |
267 | GFP_KERNEL); |
268 | if (!dev->dbbuf_dbs) |
269 | goto fail; |
270 | dev->dbbuf_eis = dma_alloc_coherent(dev: dev->dev, size: mem_size, |
271 | dma_handle: &dev->dbbuf_eis_dma_addr, |
272 | GFP_KERNEL); |
273 | if (!dev->dbbuf_eis) |
274 | goto fail_free_dbbuf_dbs; |
275 | return; |
276 | |
277 | fail_free_dbbuf_dbs: |
278 | dma_free_coherent(dev: dev->dev, size: mem_size, cpu_addr: dev->dbbuf_dbs, |
279 | dma_handle: dev->dbbuf_dbs_dma_addr); |
280 | dev->dbbuf_dbs = NULL; |
281 | fail: |
282 | dev_warn(dev->dev, "unable to allocate dma for dbbuf\n"); |
283 | } |
284 | |
285 | static void nvme_dbbuf_dma_free(struct nvme_dev *dev) |
286 | { |
287 | unsigned int mem_size = nvme_dbbuf_size(dev); |
288 | |
289 | if (dev->dbbuf_dbs) { |
290 | dma_free_coherent(dev: dev->dev, size: mem_size, |
291 | cpu_addr: dev->dbbuf_dbs, dma_handle: dev->dbbuf_dbs_dma_addr); |
292 | dev->dbbuf_dbs = NULL; |
293 | } |
294 | if (dev->dbbuf_eis) { |
295 | dma_free_coherent(dev: dev->dev, size: mem_size, |
296 | cpu_addr: dev->dbbuf_eis, dma_handle: dev->dbbuf_eis_dma_addr); |
297 | dev->dbbuf_eis = NULL; |
298 | } |
299 | } |
300 | |
301 | static void nvme_dbbuf_init(struct nvme_dev *dev, |
302 | struct nvme_queue *nvmeq, int qid) |
303 | { |
304 | if (!dev->dbbuf_dbs || !qid) |
305 | return; |
306 | |
307 | nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, stride: dev->db_stride)]; |
308 | nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, stride: dev->db_stride)]; |
309 | nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, stride: dev->db_stride)]; |
310 | nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, stride: dev->db_stride)]; |
311 | } |
312 | |
313 | static void nvme_dbbuf_free(struct nvme_queue *nvmeq) |
314 | { |
315 | if (!nvmeq->qid) |
316 | return; |
317 | |
318 | nvmeq->dbbuf_sq_db = NULL; |
319 | nvmeq->dbbuf_cq_db = NULL; |
320 | nvmeq->dbbuf_sq_ei = NULL; |
321 | nvmeq->dbbuf_cq_ei = NULL; |
322 | } |
323 | |
324 | static void nvme_dbbuf_set(struct nvme_dev *dev) |
325 | { |
326 | struct nvme_command c = { }; |
327 | unsigned int i; |
328 | |
329 | if (!dev->dbbuf_dbs) |
330 | return; |
331 | |
332 | c.dbbuf.opcode = nvme_admin_dbbuf; |
333 | c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); |
334 | c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); |
335 | |
336 | if (nvme_submit_sync_cmd(q: dev->ctrl.admin_q, cmd: &c, NULL, bufflen: 0)) { |
337 | dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); |
338 | /* Free memory and continue on */ |
339 | nvme_dbbuf_dma_free(dev); |
340 | |
341 | for (i = 1; i <= dev->online_queues; i++) |
342 | nvme_dbbuf_free(nvmeq: &dev->queues[i]); |
343 | } |
344 | } |
345 | |
346 | static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) |
347 | { |
348 | return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); |
349 | } |
350 | |
351 | /* Update dbbuf and return true if an MMIO is required */ |
352 | static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db, |
353 | volatile __le32 *dbbuf_ei) |
354 | { |
355 | if (dbbuf_db) { |
356 | u16 old_value, event_idx; |
357 | |
358 | /* |
359 | * Ensure that the queue is written before updating |
360 | * the doorbell in memory |
361 | */ |
362 | wmb(); |
363 | |
364 | old_value = le32_to_cpu(*dbbuf_db); |
365 | *dbbuf_db = cpu_to_le32(value); |
366 | |
367 | /* |
368 | * Ensure that the doorbell is updated before reading the event |
369 | * index from memory. The controller needs to provide similar |
370 | * ordering to ensure the envent index is updated before reading |
371 | * the doorbell. |
372 | */ |
373 | mb(); |
374 | |
375 | event_idx = le32_to_cpu(*dbbuf_ei); |
376 | if (!nvme_dbbuf_need_event(event_idx, new_idx: value, old: old_value)) |
377 | return false; |
378 | } |
379 | |
380 | return true; |
381 | } |
382 | |
383 | /* |
384 | * Will slightly overestimate the number of pages needed. This is OK |
385 | * as it only leads to a small amount of wasted memory for the lifetime of |
386 | * the I/O. |
387 | */ |
388 | static int nvme_pci_npages_prp(void) |
389 | { |
390 | unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE; |
391 | unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE); |
392 | return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8); |
393 | } |
394 | |
395 | static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, |
396 | unsigned int hctx_idx) |
397 | { |
398 | struct nvme_dev *dev = to_nvme_dev(ctrl: data); |
399 | struct nvme_queue *nvmeq = &dev->queues[0]; |
400 | |
401 | WARN_ON(hctx_idx != 0); |
402 | WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); |
403 | |
404 | hctx->driver_data = nvmeq; |
405 | return 0; |
406 | } |
407 | |
408 | static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, |
409 | unsigned int hctx_idx) |
410 | { |
411 | struct nvme_dev *dev = to_nvme_dev(ctrl: data); |
412 | struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; |
413 | |
414 | WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); |
415 | hctx->driver_data = nvmeq; |
416 | return 0; |
417 | } |
418 | |
419 | static int nvme_pci_init_request(struct blk_mq_tag_set *set, |
420 | struct request *req, unsigned int hctx_idx, |
421 | unsigned int numa_node) |
422 | { |
423 | struct nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
424 | |
425 | nvme_req(req)->ctrl = set->driver_data; |
426 | nvme_req(req)->cmd = &iod->cmd; |
427 | return 0; |
428 | } |
429 | |
430 | static int queue_irq_offset(struct nvme_dev *dev) |
431 | { |
432 | /* if we have more than 1 vec, admin queue offsets us by 1 */ |
433 | if (dev->num_vecs > 1) |
434 | return 1; |
435 | |
436 | return 0; |
437 | } |
438 | |
439 | static void nvme_pci_map_queues(struct blk_mq_tag_set *set) |
440 | { |
441 | struct nvme_dev *dev = to_nvme_dev(ctrl: set->driver_data); |
442 | int i, qoff, offset; |
443 | |
444 | offset = queue_irq_offset(dev); |
445 | for (i = 0, qoff = 0; i < set->nr_maps; i++) { |
446 | struct blk_mq_queue_map *map = &set->map[i]; |
447 | |
448 | map->nr_queues = dev->io_queues[i]; |
449 | if (!map->nr_queues) { |
450 | BUG_ON(i == HCTX_TYPE_DEFAULT); |
451 | continue; |
452 | } |
453 | |
454 | /* |
455 | * The poll queue(s) doesn't have an IRQ (and hence IRQ |
456 | * affinity), so use the regular blk-mq cpu mapping |
457 | */ |
458 | map->queue_offset = qoff; |
459 | if (i != HCTX_TYPE_POLL && offset) |
460 | blk_mq_pci_map_queues(qmap: map, to_pci_dev(dev->dev), offset); |
461 | else |
462 | blk_mq_map_queues(qmap: map); |
463 | qoff += map->nr_queues; |
464 | offset += map->nr_queues; |
465 | } |
466 | } |
467 | |
468 | /* |
469 | * Write sq tail if we are asked to, or if the next command would wrap. |
470 | */ |
471 | static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) |
472 | { |
473 | if (!write_sq) { |
474 | u16 next_tail = nvmeq->sq_tail + 1; |
475 | |
476 | if (next_tail == nvmeq->q_depth) |
477 | next_tail = 0; |
478 | if (next_tail != nvmeq->last_sq_tail) |
479 | return; |
480 | } |
481 | |
482 | if (nvme_dbbuf_update_and_check_event(value: nvmeq->sq_tail, |
483 | dbbuf_db: nvmeq->dbbuf_sq_db, dbbuf_ei: nvmeq->dbbuf_sq_ei)) |
484 | writel(val: nvmeq->sq_tail, addr: nvmeq->q_db); |
485 | nvmeq->last_sq_tail = nvmeq->sq_tail; |
486 | } |
487 | |
488 | static inline void nvme_sq_copy_cmd(struct nvme_queue *nvmeq, |
489 | struct nvme_command *cmd) |
490 | { |
491 | memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), |
492 | absolute_pointer(cmd), sizeof(*cmd)); |
493 | if (++nvmeq->sq_tail == nvmeq->q_depth) |
494 | nvmeq->sq_tail = 0; |
495 | } |
496 | |
497 | static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) |
498 | { |
499 | struct nvme_queue *nvmeq = hctx->driver_data; |
500 | |
501 | spin_lock(lock: &nvmeq->sq_lock); |
502 | if (nvmeq->sq_tail != nvmeq->last_sq_tail) |
503 | nvme_write_sq_db(nvmeq, write_sq: true); |
504 | spin_unlock(lock: &nvmeq->sq_lock); |
505 | } |
506 | |
507 | static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req, |
508 | int nseg) |
509 | { |
510 | struct nvme_queue *nvmeq = req->mq_hctx->driver_data; |
511 | unsigned int avg_seg_size; |
512 | |
513 | avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); |
514 | |
515 | if (!nvme_ctrl_sgl_supported(ctrl: &dev->ctrl)) |
516 | return false; |
517 | if (!nvmeq->qid) |
518 | return false; |
519 | if (!sgl_threshold || avg_seg_size < sgl_threshold) |
520 | return false; |
521 | return true; |
522 | } |
523 | |
524 | static void nvme_free_prps(struct nvme_dev *dev, struct request *req) |
525 | { |
526 | const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; |
527 | struct nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
528 | dma_addr_t dma_addr = iod->first_dma; |
529 | int i; |
530 | |
531 | for (i = 0; i < iod->nr_allocations; i++) { |
532 | __le64 *prp_list = iod->list[i].prp_list; |
533 | dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); |
534 | |
535 | dma_pool_free(pool: dev->prp_page_pool, vaddr: prp_list, addr: dma_addr); |
536 | dma_addr = next_dma_addr; |
537 | } |
538 | } |
539 | |
540 | static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) |
541 | { |
542 | struct nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
543 | |
544 | if (iod->dma_len) { |
545 | dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, |
546 | rq_dma_dir(req)); |
547 | return; |
548 | } |
549 | |
550 | WARN_ON_ONCE(!iod->sgt.nents); |
551 | |
552 | dma_unmap_sgtable(dev: dev->dev, sgt: &iod->sgt, rq_dma_dir(req), attrs: 0); |
553 | |
554 | if (iod->nr_allocations == 0) |
555 | dma_pool_free(pool: dev->prp_small_pool, vaddr: iod->list[0].sg_list, |
556 | addr: iod->first_dma); |
557 | else if (iod->nr_allocations == 1) |
558 | dma_pool_free(pool: dev->prp_page_pool, vaddr: iod->list[0].sg_list, |
559 | addr: iod->first_dma); |
560 | else |
561 | nvme_free_prps(dev, req); |
562 | mempool_free(element: iod->sgt.sgl, pool: dev->iod_mempool); |
563 | } |
564 | |
565 | static void nvme_print_sgl(struct scatterlist *sgl, int nents) |
566 | { |
567 | int i; |
568 | struct scatterlist *sg; |
569 | |
570 | for_each_sg(sgl, sg, nents, i) { |
571 | dma_addr_t phys = sg_phys(sg); |
572 | pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " |
573 | "dma_address:%pad dma_length:%d\n", |
574 | i, &phys, sg->offset, sg->length, &sg_dma_address(sg), |
575 | sg_dma_len(sg)); |
576 | } |
577 | } |
578 | |
579 | static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, |
580 | struct request *req, struct nvme_rw_command *cmnd) |
581 | { |
582 | struct nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
583 | struct dma_pool *pool; |
584 | int length = blk_rq_payload_bytes(rq: req); |
585 | struct scatterlist *sg = iod->sgt.sgl; |
586 | int dma_len = sg_dma_len(sg); |
587 | u64 dma_addr = sg_dma_address(sg); |
588 | int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); |
589 | __le64 *prp_list; |
590 | dma_addr_t prp_dma; |
591 | int nprps, i; |
592 | |
593 | length -= (NVME_CTRL_PAGE_SIZE - offset); |
594 | if (length <= 0) { |
595 | iod->first_dma = 0; |
596 | goto done; |
597 | } |
598 | |
599 | dma_len -= (NVME_CTRL_PAGE_SIZE - offset); |
600 | if (dma_len) { |
601 | dma_addr += (NVME_CTRL_PAGE_SIZE - offset); |
602 | } else { |
603 | sg = sg_next(sg); |
604 | dma_addr = sg_dma_address(sg); |
605 | dma_len = sg_dma_len(sg); |
606 | } |
607 | |
608 | if (length <= NVME_CTRL_PAGE_SIZE) { |
609 | iod->first_dma = dma_addr; |
610 | goto done; |
611 | } |
612 | |
613 | nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); |
614 | if (nprps <= (256 / 8)) { |
615 | pool = dev->prp_small_pool; |
616 | iod->nr_allocations = 0; |
617 | } else { |
618 | pool = dev->prp_page_pool; |
619 | iod->nr_allocations = 1; |
620 | } |
621 | |
622 | prp_list = dma_pool_alloc(pool, GFP_ATOMIC, handle: &prp_dma); |
623 | if (!prp_list) { |
624 | iod->nr_allocations = -1; |
625 | return BLK_STS_RESOURCE; |
626 | } |
627 | iod->list[0].prp_list = prp_list; |
628 | iod->first_dma = prp_dma; |
629 | i = 0; |
630 | for (;;) { |
631 | if (i == NVME_CTRL_PAGE_SIZE >> 3) { |
632 | __le64 *old_prp_list = prp_list; |
633 | prp_list = dma_pool_alloc(pool, GFP_ATOMIC, handle: &prp_dma); |
634 | if (!prp_list) |
635 | goto free_prps; |
636 | iod->list[iod->nr_allocations++].prp_list = prp_list; |
637 | prp_list[0] = old_prp_list[i - 1]; |
638 | old_prp_list[i - 1] = cpu_to_le64(prp_dma); |
639 | i = 1; |
640 | } |
641 | prp_list[i++] = cpu_to_le64(dma_addr); |
642 | dma_len -= NVME_CTRL_PAGE_SIZE; |
643 | dma_addr += NVME_CTRL_PAGE_SIZE; |
644 | length -= NVME_CTRL_PAGE_SIZE; |
645 | if (length <= 0) |
646 | break; |
647 | if (dma_len > 0) |
648 | continue; |
649 | if (unlikely(dma_len < 0)) |
650 | goto bad_sgl; |
651 | sg = sg_next(sg); |
652 | dma_addr = sg_dma_address(sg); |
653 | dma_len = sg_dma_len(sg); |
654 | } |
655 | done: |
656 | cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl)); |
657 | cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); |
658 | return BLK_STS_OK; |
659 | free_prps: |
660 | nvme_free_prps(dev, req); |
661 | return BLK_STS_RESOURCE; |
662 | bad_sgl: |
663 | WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents), |
664 | "Invalid SGL for payload:%d nents:%d\n", |
665 | blk_rq_payload_bytes(req), iod->sgt.nents); |
666 | return BLK_STS_IOERR; |
667 | } |
668 | |
669 | static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge, |
670 | struct scatterlist *sg) |
671 | { |
672 | sge->addr = cpu_to_le64(sg_dma_address(sg)); |
673 | sge->length = cpu_to_le32(sg_dma_len(sg)); |
674 | sge->type = NVME_SGL_FMT_DATA_DESC << 4; |
675 | } |
676 | |
677 | static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, |
678 | dma_addr_t dma_addr, int entries) |
679 | { |
680 | sge->addr = cpu_to_le64(dma_addr); |
681 | sge->length = cpu_to_le32(entries * sizeof(*sge)); |
682 | sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; |
683 | } |
684 | |
685 | static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, |
686 | struct request *req, struct nvme_rw_command *cmd) |
687 | { |
688 | struct nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
689 | struct dma_pool *pool; |
690 | struct nvme_sgl_desc *sg_list; |
691 | struct scatterlist *sg = iod->sgt.sgl; |
692 | unsigned int entries = iod->sgt.nents; |
693 | dma_addr_t sgl_dma; |
694 | int i = 0; |
695 | |
696 | /* setting the transfer type as SGL */ |
697 | cmd->flags = NVME_CMD_SGL_METABUF; |
698 | |
699 | if (entries == 1) { |
700 | nvme_pci_sgl_set_data(sge: &cmd->dptr.sgl, sg); |
701 | return BLK_STS_OK; |
702 | } |
703 | |
704 | if (entries <= (256 / sizeof(struct nvme_sgl_desc))) { |
705 | pool = dev->prp_small_pool; |
706 | iod->nr_allocations = 0; |
707 | } else { |
708 | pool = dev->prp_page_pool; |
709 | iod->nr_allocations = 1; |
710 | } |
711 | |
712 | sg_list = dma_pool_alloc(pool, GFP_ATOMIC, handle: &sgl_dma); |
713 | if (!sg_list) { |
714 | iod->nr_allocations = -1; |
715 | return BLK_STS_RESOURCE; |
716 | } |
717 | |
718 | iod->list[0].sg_list = sg_list; |
719 | iod->first_dma = sgl_dma; |
720 | |
721 | nvme_pci_sgl_set_seg(sge: &cmd->dptr.sgl, dma_addr: sgl_dma, entries); |
722 | do { |
723 | nvme_pci_sgl_set_data(sge: &sg_list[i++], sg); |
724 | sg = sg_next(sg); |
725 | } while (--entries > 0); |
726 | |
727 | return BLK_STS_OK; |
728 | } |
729 | |
730 | static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, |
731 | struct request *req, struct nvme_rw_command *cmnd, |
732 | struct bio_vec *bv) |
733 | { |
734 | struct nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
735 | unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); |
736 | unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset; |
737 | |
738 | iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); |
739 | if (dma_mapping_error(dev: dev->dev, dma_addr: iod->first_dma)) |
740 | return BLK_STS_RESOURCE; |
741 | iod->dma_len = bv->bv_len; |
742 | |
743 | cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); |
744 | if (bv->bv_len > first_prp_len) |
745 | cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); |
746 | else |
747 | cmnd->dptr.prp2 = 0; |
748 | return BLK_STS_OK; |
749 | } |
750 | |
751 | static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev, |
752 | struct request *req, struct nvme_rw_command *cmnd, |
753 | struct bio_vec *bv) |
754 | { |
755 | struct nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
756 | |
757 | iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); |
758 | if (dma_mapping_error(dev: dev->dev, dma_addr: iod->first_dma)) |
759 | return BLK_STS_RESOURCE; |
760 | iod->dma_len = bv->bv_len; |
761 | |
762 | cmnd->flags = NVME_CMD_SGL_METABUF; |
763 | cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); |
764 | cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len); |
765 | cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; |
766 | return BLK_STS_OK; |
767 | } |
768 | |
769 | static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, |
770 | struct nvme_command *cmnd) |
771 | { |
772 | struct nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
773 | blk_status_t ret = BLK_STS_RESOURCE; |
774 | int rc; |
775 | |
776 | if (blk_rq_nr_phys_segments(rq: req) == 1) { |
777 | struct nvme_queue *nvmeq = req->mq_hctx->driver_data; |
778 | struct bio_vec bv = req_bvec(rq: req); |
779 | |
780 | if (!is_pci_p2pdma_page(page: bv.bv_page)) { |
781 | if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2) |
782 | return nvme_setup_prp_simple(dev, req, |
783 | cmnd: &cmnd->rw, bv: &bv); |
784 | |
785 | if (nvmeq->qid && sgl_threshold && |
786 | nvme_ctrl_sgl_supported(ctrl: &dev->ctrl)) |
787 | return nvme_setup_sgl_simple(dev, req, |
788 | cmnd: &cmnd->rw, bv: &bv); |
789 | } |
790 | } |
791 | |
792 | iod->dma_len = 0; |
793 | iod->sgt.sgl = mempool_alloc(pool: dev->iod_mempool, GFP_ATOMIC); |
794 | if (!iod->sgt.sgl) |
795 | return BLK_STS_RESOURCE; |
796 | sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(rq: req)); |
797 | iod->sgt.orig_nents = blk_rq_map_sg(q: req->q, rq: req, sglist: iod->sgt.sgl); |
798 | if (!iod->sgt.orig_nents) |
799 | goto out_free_sg; |
800 | |
801 | rc = dma_map_sgtable(dev: dev->dev, sgt: &iod->sgt, rq_dma_dir(req), |
802 | DMA_ATTR_NO_WARN); |
803 | if (rc) { |
804 | if (rc == -EREMOTEIO) |
805 | ret = BLK_STS_TARGET; |
806 | goto out_free_sg; |
807 | } |
808 | |
809 | if (nvme_pci_use_sgls(dev, req, nseg: iod->sgt.nents)) |
810 | ret = nvme_pci_setup_sgls(dev, req, cmd: &cmnd->rw); |
811 | else |
812 | ret = nvme_pci_setup_prps(dev, req, cmnd: &cmnd->rw); |
813 | if (ret != BLK_STS_OK) |
814 | goto out_unmap_sg; |
815 | return BLK_STS_OK; |
816 | |
817 | out_unmap_sg: |
818 | dma_unmap_sgtable(dev: dev->dev, sgt: &iod->sgt, rq_dma_dir(req), attrs: 0); |
819 | out_free_sg: |
820 | mempool_free(element: iod->sgt.sgl, pool: dev->iod_mempool); |
821 | return ret; |
822 | } |
823 | |
824 | static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, |
825 | struct nvme_command *cmnd) |
826 | { |
827 | struct nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
828 | |
829 | iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), |
830 | rq_dma_dir(req), 0); |
831 | if (dma_mapping_error(dev: dev->dev, dma_addr: iod->meta_dma)) |
832 | return BLK_STS_IOERR; |
833 | cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); |
834 | return BLK_STS_OK; |
835 | } |
836 | |
837 | static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) |
838 | { |
839 | struct nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
840 | blk_status_t ret; |
841 | |
842 | iod->aborted = false; |
843 | iod->nr_allocations = -1; |
844 | iod->sgt.nents = 0; |
845 | |
846 | ret = nvme_setup_cmd(ns: req->q->queuedata, req); |
847 | if (ret) |
848 | return ret; |
849 | |
850 | if (blk_rq_nr_phys_segments(rq: req)) { |
851 | ret = nvme_map_data(dev, req, cmnd: &iod->cmd); |
852 | if (ret) |
853 | goto out_free_cmd; |
854 | } |
855 | |
856 | if (blk_integrity_rq(rq: req)) { |
857 | ret = nvme_map_metadata(dev, req, cmnd: &iod->cmd); |
858 | if (ret) |
859 | goto out_unmap_data; |
860 | } |
861 | |
862 | nvme_start_request(rq: req); |
863 | return BLK_STS_OK; |
864 | out_unmap_data: |
865 | nvme_unmap_data(dev, req); |
866 | out_free_cmd: |
867 | nvme_cleanup_cmd(req); |
868 | return ret; |
869 | } |
870 | |
871 | /* |
872 | * NOTE: ns is NULL when called on the admin queue. |
873 | */ |
874 | static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, |
875 | const struct blk_mq_queue_data *bd) |
876 | { |
877 | struct nvme_queue *nvmeq = hctx->driver_data; |
878 | struct nvme_dev *dev = nvmeq->dev; |
879 | struct request *req = bd->rq; |
880 | struct nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
881 | blk_status_t ret; |
882 | |
883 | /* |
884 | * We should not need to do this, but we're still using this to |
885 | * ensure we can drain requests on a dying queue. |
886 | */ |
887 | if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) |
888 | return BLK_STS_IOERR; |
889 | |
890 | if (unlikely(!nvme_check_ready(&dev->ctrl, req, true))) |
891 | return nvme_fail_nonready_command(ctrl: &dev->ctrl, req); |
892 | |
893 | ret = nvme_prep_rq(dev, req); |
894 | if (unlikely(ret)) |
895 | return ret; |
896 | spin_lock(lock: &nvmeq->sq_lock); |
897 | nvme_sq_copy_cmd(nvmeq, cmd: &iod->cmd); |
898 | nvme_write_sq_db(nvmeq, write_sq: bd->last); |
899 | spin_unlock(lock: &nvmeq->sq_lock); |
900 | return BLK_STS_OK; |
901 | } |
902 | |
903 | static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist) |
904 | { |
905 | spin_lock(lock: &nvmeq->sq_lock); |
906 | while (!rq_list_empty(*rqlist)) { |
907 | struct request *req = rq_list_pop(rqlist); |
908 | struct nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
909 | |
910 | nvme_sq_copy_cmd(nvmeq, cmd: &iod->cmd); |
911 | } |
912 | nvme_write_sq_db(nvmeq, write_sq: true); |
913 | spin_unlock(lock: &nvmeq->sq_lock); |
914 | } |
915 | |
916 | static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req) |
917 | { |
918 | /* |
919 | * We should not need to do this, but we're still using this to |
920 | * ensure we can drain requests on a dying queue. |
921 | */ |
922 | if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) |
923 | return false; |
924 | if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true))) |
925 | return false; |
926 | |
927 | return nvme_prep_rq(dev: nvmeq->dev, req) == BLK_STS_OK; |
928 | } |
929 | |
930 | static void nvme_queue_rqs(struct request **rqlist) |
931 | { |
932 | struct request *req, *next, *prev = NULL; |
933 | struct request *requeue_list = NULL; |
934 | |
935 | rq_list_for_each_safe(rqlist, req, next) { |
936 | struct nvme_queue *nvmeq = req->mq_hctx->driver_data; |
937 | |
938 | if (!nvme_prep_rq_batch(nvmeq, req)) { |
939 | /* detach 'req' and add to remainder list */ |
940 | rq_list_move(src: rqlist, dst: &requeue_list, rq: req, prev); |
941 | |
942 | req = prev; |
943 | if (!req) |
944 | continue; |
945 | } |
946 | |
947 | if (!next || req->mq_hctx != next->mq_hctx) { |
948 | /* detach rest of list, and submit */ |
949 | req->rq_next = NULL; |
950 | nvme_submit_cmds(nvmeq, rqlist); |
951 | *rqlist = next; |
952 | prev = NULL; |
953 | } else |
954 | prev = req; |
955 | } |
956 | |
957 | *rqlist = requeue_list; |
958 | } |
959 | |
960 | static __always_inline void nvme_pci_unmap_rq(struct request *req) |
961 | { |
962 | struct nvme_queue *nvmeq = req->mq_hctx->driver_data; |
963 | struct nvme_dev *dev = nvmeq->dev; |
964 | |
965 | if (blk_integrity_rq(rq: req)) { |
966 | struct nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
967 | |
968 | dma_unmap_page(dev->dev, iod->meta_dma, |
969 | rq_integrity_vec(req)->bv_len, rq_dma_dir(req)); |
970 | } |
971 | |
972 | if (blk_rq_nr_phys_segments(rq: req)) |
973 | nvme_unmap_data(dev, req); |
974 | } |
975 | |
976 | static void nvme_pci_complete_rq(struct request *req) |
977 | { |
978 | nvme_pci_unmap_rq(req); |
979 | nvme_complete_rq(req); |
980 | } |
981 | |
982 | static void nvme_pci_complete_batch(struct io_comp_batch *iob) |
983 | { |
984 | nvme_complete_batch(iob, fn: nvme_pci_unmap_rq); |
985 | } |
986 | |
987 | /* We read the CQE phase first to check if the rest of the entry is valid */ |
988 | static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq) |
989 | { |
990 | struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; |
991 | |
992 | return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase; |
993 | } |
994 | |
995 | static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) |
996 | { |
997 | u16 head = nvmeq->cq_head; |
998 | |
999 | if (nvme_dbbuf_update_and_check_event(value: head, dbbuf_db: nvmeq->dbbuf_cq_db, |
1000 | dbbuf_ei: nvmeq->dbbuf_cq_ei)) |
1001 | writel(val: head, addr: nvmeq->q_db + nvmeq->dev->db_stride); |
1002 | } |
1003 | |
1004 | static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq) |
1005 | { |
1006 | if (!nvmeq->qid) |
1007 | return nvmeq->dev->admin_tagset.tags[0]; |
1008 | return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; |
1009 | } |
1010 | |
1011 | static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, |
1012 | struct io_comp_batch *iob, u16 idx) |
1013 | { |
1014 | struct nvme_completion *cqe = &nvmeq->cqes[idx]; |
1015 | __u16 command_id = READ_ONCE(cqe->command_id); |
1016 | struct request *req; |
1017 | |
1018 | /* |
1019 | * AEN requests are special as they don't time out and can |
1020 | * survive any kind of queue freeze and often don't respond to |
1021 | * aborts. We don't even bother to allocate a struct request |
1022 | * for them but rather special case them here. |
1023 | */ |
1024 | if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) { |
1025 | nvme_complete_async_event(ctrl: &nvmeq->dev->ctrl, |
1026 | status: cqe->status, res: &cqe->result); |
1027 | return; |
1028 | } |
1029 | |
1030 | req = nvme_find_rq(tags: nvme_queue_tagset(nvmeq), command_id); |
1031 | if (unlikely(!req)) { |
1032 | dev_warn(nvmeq->dev->ctrl.device, |
1033 | "invalid id %d completed on queue %d\n", |
1034 | command_id, le16_to_cpu(cqe->sq_id)); |
1035 | return; |
1036 | } |
1037 | |
1038 | trace_nvme_sq(req, sq_head: cqe->sq_head, sq_tail: nvmeq->sq_tail); |
1039 | if (!nvme_try_complete_req(req, status: cqe->status, result: cqe->result) && |
1040 | !blk_mq_add_to_batch(req, iob, ioerror: nvme_req(req)->status, |
1041 | complete: nvme_pci_complete_batch)) |
1042 | nvme_pci_complete_rq(req); |
1043 | } |
1044 | |
1045 | static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) |
1046 | { |
1047 | u32 tmp = nvmeq->cq_head + 1; |
1048 | |
1049 | if (tmp == nvmeq->q_depth) { |
1050 | nvmeq->cq_head = 0; |
1051 | nvmeq->cq_phase ^= 1; |
1052 | } else { |
1053 | nvmeq->cq_head = tmp; |
1054 | } |
1055 | } |
1056 | |
1057 | static inline int nvme_poll_cq(struct nvme_queue *nvmeq, |
1058 | struct io_comp_batch *iob) |
1059 | { |
1060 | int found = 0; |
1061 | |
1062 | while (nvme_cqe_pending(nvmeq)) { |
1063 | found++; |
1064 | /* |
1065 | * load-load control dependency between phase and the rest of |
1066 | * the cqe requires a full read memory barrier |
1067 | */ |
1068 | dma_rmb(); |
1069 | nvme_handle_cqe(nvmeq, iob, idx: nvmeq->cq_head); |
1070 | nvme_update_cq_head(nvmeq); |
1071 | } |
1072 | |
1073 | if (found) |
1074 | nvme_ring_cq_doorbell(nvmeq); |
1075 | return found; |
1076 | } |
1077 | |
1078 | static irqreturn_t nvme_irq(int irq, void *data) |
1079 | { |
1080 | struct nvme_queue *nvmeq = data; |
1081 | DEFINE_IO_COMP_BATCH(iob); |
1082 | |
1083 | if (nvme_poll_cq(nvmeq, iob: &iob)) { |
1084 | if (!rq_list_empty(iob.req_list)) |
1085 | nvme_pci_complete_batch(iob: &iob); |
1086 | return IRQ_HANDLED; |
1087 | } |
1088 | return IRQ_NONE; |
1089 | } |
1090 | |
1091 | static irqreturn_t nvme_irq_check(int irq, void *data) |
1092 | { |
1093 | struct nvme_queue *nvmeq = data; |
1094 | |
1095 | if (nvme_cqe_pending(nvmeq)) |
1096 | return IRQ_WAKE_THREAD; |
1097 | return IRQ_NONE; |
1098 | } |
1099 | |
1100 | /* |
1101 | * Poll for completions for any interrupt driven queue |
1102 | * Can be called from any context. |
1103 | */ |
1104 | static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) |
1105 | { |
1106 | struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); |
1107 | |
1108 | WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); |
1109 | |
1110 | disable_irq(irq: pci_irq_vector(dev: pdev, nr: nvmeq->cq_vector)); |
1111 | nvme_poll_cq(nvmeq, NULL); |
1112 | enable_irq(irq: pci_irq_vector(dev: pdev, nr: nvmeq->cq_vector)); |
1113 | } |
1114 | |
1115 | static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) |
1116 | { |
1117 | struct nvme_queue *nvmeq = hctx->driver_data; |
1118 | bool found; |
1119 | |
1120 | if (!nvme_cqe_pending(nvmeq)) |
1121 | return 0; |
1122 | |
1123 | spin_lock(lock: &nvmeq->cq_poll_lock); |
1124 | found = nvme_poll_cq(nvmeq, iob); |
1125 | spin_unlock(lock: &nvmeq->cq_poll_lock); |
1126 | |
1127 | return found; |
1128 | } |
1129 | |
1130 | static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) |
1131 | { |
1132 | struct nvme_dev *dev = to_nvme_dev(ctrl); |
1133 | struct nvme_queue *nvmeq = &dev->queues[0]; |
1134 | struct nvme_command c = { }; |
1135 | |
1136 | c.common.opcode = nvme_admin_async_event; |
1137 | c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; |
1138 | |
1139 | spin_lock(lock: &nvmeq->sq_lock); |
1140 | nvme_sq_copy_cmd(nvmeq, cmd: &c); |
1141 | nvme_write_sq_db(nvmeq, write_sq: true); |
1142 | spin_unlock(lock: &nvmeq->sq_lock); |
1143 | } |
1144 | |
1145 | static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) |
1146 | { |
1147 | struct nvme_command c = { }; |
1148 | |
1149 | c.delete_queue.opcode = opcode; |
1150 | c.delete_queue.qid = cpu_to_le16(id); |
1151 | |
1152 | return nvme_submit_sync_cmd(q: dev->ctrl.admin_q, cmd: &c, NULL, bufflen: 0); |
1153 | } |
1154 | |
1155 | static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, |
1156 | struct nvme_queue *nvmeq, s16 vector) |
1157 | { |
1158 | struct nvme_command c = { }; |
1159 | int flags = NVME_QUEUE_PHYS_CONTIG; |
1160 | |
1161 | if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) |
1162 | flags |= NVME_CQ_IRQ_ENABLED; |
1163 | |
1164 | /* |
1165 | * Note: we (ab)use the fact that the prp fields survive if no data |
1166 | * is attached to the request. |
1167 | */ |
1168 | c.create_cq.opcode = nvme_admin_create_cq; |
1169 | c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); |
1170 | c.create_cq.cqid = cpu_to_le16(qid); |
1171 | c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); |
1172 | c.create_cq.cq_flags = cpu_to_le16(flags); |
1173 | c.create_cq.irq_vector = cpu_to_le16(vector); |
1174 | |
1175 | return nvme_submit_sync_cmd(q: dev->ctrl.admin_q, cmd: &c, NULL, bufflen: 0); |
1176 | } |
1177 | |
1178 | static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, |
1179 | struct nvme_queue *nvmeq) |
1180 | { |
1181 | struct nvme_ctrl *ctrl = &dev->ctrl; |
1182 | struct nvme_command c = { }; |
1183 | int flags = NVME_QUEUE_PHYS_CONTIG; |
1184 | |
1185 | /* |
1186 | * Some drives have a bug that auto-enables WRRU if MEDIUM isn't |
1187 | * set. Since URGENT priority is zeroes, it makes all queues |
1188 | * URGENT. |
1189 | */ |
1190 | if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) |
1191 | flags |= NVME_SQ_PRIO_MEDIUM; |
1192 | |
1193 | /* |
1194 | * Note: we (ab)use the fact that the prp fields survive if no data |
1195 | * is attached to the request. |
1196 | */ |
1197 | c.create_sq.opcode = nvme_admin_create_sq; |
1198 | c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); |
1199 | c.create_sq.sqid = cpu_to_le16(qid); |
1200 | c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); |
1201 | c.create_sq.sq_flags = cpu_to_le16(flags); |
1202 | c.create_sq.cqid = cpu_to_le16(qid); |
1203 | |
1204 | return nvme_submit_sync_cmd(q: dev->ctrl.admin_q, cmd: &c, NULL, bufflen: 0); |
1205 | } |
1206 | |
1207 | static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) |
1208 | { |
1209 | return adapter_delete_queue(dev, opcode: nvme_admin_delete_cq, id: cqid); |
1210 | } |
1211 | |
1212 | static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) |
1213 | { |
1214 | return adapter_delete_queue(dev, opcode: nvme_admin_delete_sq, id: sqid); |
1215 | } |
1216 | |
1217 | static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error) |
1218 | { |
1219 | struct nvme_queue *nvmeq = req->mq_hctx->driver_data; |
1220 | |
1221 | dev_warn(nvmeq->dev->ctrl.device, |
1222 | "Abort status: 0x%x", nvme_req(req)->status); |
1223 | atomic_inc(v: &nvmeq->dev->ctrl.abort_limit); |
1224 | blk_mq_free_request(rq: req); |
1225 | return RQ_END_IO_NONE; |
1226 | } |
1227 | |
1228 | static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) |
1229 | { |
1230 | /* If true, indicates loss of adapter communication, possibly by a |
1231 | * NVMe Subsystem reset. |
1232 | */ |
1233 | bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); |
1234 | |
1235 | /* If there is a reset/reinit ongoing, we shouldn't reset again. */ |
1236 | switch (nvme_ctrl_state(ctrl: &dev->ctrl)) { |
1237 | case NVME_CTRL_RESETTING: |
1238 | case NVME_CTRL_CONNECTING: |
1239 | return false; |
1240 | default: |
1241 | break; |
1242 | } |
1243 | |
1244 | /* We shouldn't reset unless the controller is on fatal error state |
1245 | * _or_ if we lost the communication with it. |
1246 | */ |
1247 | if (!(csts & NVME_CSTS_CFS) && !nssro) |
1248 | return false; |
1249 | |
1250 | return true; |
1251 | } |
1252 | |
1253 | static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) |
1254 | { |
1255 | /* Read a config register to help see what died. */ |
1256 | u16 pci_status; |
1257 | int result; |
1258 | |
1259 | result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, |
1260 | val: &pci_status); |
1261 | if (result == PCIBIOS_SUCCESSFUL) |
1262 | dev_warn(dev->ctrl.device, |
1263 | "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", |
1264 | csts, pci_status); |
1265 | else |
1266 | dev_warn(dev->ctrl.device, |
1267 | "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", |
1268 | csts, result); |
1269 | |
1270 | if (csts != ~0) |
1271 | return; |
1272 | |
1273 | dev_warn(dev->ctrl.device, |
1274 | "Does your device have a faulty power saving mode enabled?\n"); |
1275 | dev_warn(dev->ctrl.device, |
1276 | "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n"); |
1277 | } |
1278 | |
1279 | static enum blk_eh_timer_return nvme_timeout(struct request *req) |
1280 | { |
1281 | struct nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
1282 | struct nvme_queue *nvmeq = req->mq_hctx->driver_data; |
1283 | struct nvme_dev *dev = nvmeq->dev; |
1284 | struct request *abort_req; |
1285 | struct nvme_command cmd = { }; |
1286 | u32 csts = readl(addr: dev->bar + NVME_REG_CSTS); |
1287 | u8 opcode; |
1288 | |
1289 | /* If PCI error recovery process is happening, we cannot reset or |
1290 | * the recovery mechanism will surely fail. |
1291 | */ |
1292 | mb(); |
1293 | if (pci_channel_offline(to_pci_dev(dev->dev))) |
1294 | return BLK_EH_RESET_TIMER; |
1295 | |
1296 | /* |
1297 | * Reset immediately if the controller is failed |
1298 | */ |
1299 | if (nvme_should_reset(dev, csts)) { |
1300 | nvme_warn_reset(dev, csts); |
1301 | goto disable; |
1302 | } |
1303 | |
1304 | /* |
1305 | * Did we miss an interrupt? |
1306 | */ |
1307 | if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) |
1308 | nvme_poll(hctx: req->mq_hctx, NULL); |
1309 | else |
1310 | nvme_poll_irqdisable(nvmeq); |
1311 | |
1312 | if (blk_mq_rq_state(rq: req) != MQ_RQ_IN_FLIGHT) { |
1313 | dev_warn(dev->ctrl.device, |
1314 | "I/O tag %d (%04x) QID %d timeout, completion polled\n", |
1315 | req->tag, nvme_cid(req), nvmeq->qid); |
1316 | return BLK_EH_DONE; |
1317 | } |
1318 | |
1319 | /* |
1320 | * Shutdown immediately if controller times out while starting. The |
1321 | * reset work will see the pci device disabled when it gets the forced |
1322 | * cancellation error. All outstanding requests are completed on |
1323 | * shutdown, so we return BLK_EH_DONE. |
1324 | */ |
1325 | switch (nvme_ctrl_state(ctrl: &dev->ctrl)) { |
1326 | case NVME_CTRL_CONNECTING: |
1327 | nvme_change_ctrl_state(ctrl: &dev->ctrl, new_state: NVME_CTRL_DELETING); |
1328 | fallthrough; |
1329 | case NVME_CTRL_DELETING: |
1330 | dev_warn_ratelimited(dev->ctrl.device, |
1331 | "I/O tag %d (%04x) QID %d timeout, disable controller\n", |
1332 | req->tag, nvme_cid(req), nvmeq->qid); |
1333 | nvme_req(req)->flags |= NVME_REQ_CANCELLED; |
1334 | nvme_dev_disable(dev, shutdown: true); |
1335 | return BLK_EH_DONE; |
1336 | case NVME_CTRL_RESETTING: |
1337 | return BLK_EH_RESET_TIMER; |
1338 | default: |
1339 | break; |
1340 | } |
1341 | |
1342 | /* |
1343 | * Shutdown the controller immediately and schedule a reset if the |
1344 | * command was already aborted once before and still hasn't been |
1345 | * returned to the driver, or if this is the admin queue. |
1346 | */ |
1347 | opcode = nvme_req(req)->cmd->common.opcode; |
1348 | if (!nvmeq->qid || iod->aborted) { |
1349 | dev_warn(dev->ctrl.device, |
1350 | "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n", |
1351 | req->tag, nvme_cid(req), opcode, |
1352 | nvme_opcode_str(nvmeq->qid, opcode), nvmeq->qid); |
1353 | nvme_req(req)->flags |= NVME_REQ_CANCELLED; |
1354 | goto disable; |
1355 | } |
1356 | |
1357 | if (atomic_dec_return(v: &dev->ctrl.abort_limit) < 0) { |
1358 | atomic_inc(v: &dev->ctrl.abort_limit); |
1359 | return BLK_EH_RESET_TIMER; |
1360 | } |
1361 | iod->aborted = true; |
1362 | |
1363 | cmd.abort.opcode = nvme_admin_abort_cmd; |
1364 | cmd.abort.cid = nvme_cid(rq: req); |
1365 | cmd.abort.sqid = cpu_to_le16(nvmeq->qid); |
1366 | |
1367 | dev_warn(nvmeq->dev->ctrl.device, |
1368 | "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, aborting req_op:%s(%u) size:%u\n", |
1369 | req->tag, nvme_cid(req), opcode, nvme_get_opcode_str(opcode), |
1370 | nvmeq->qid, blk_op_str(req_op(req)), req_op(req), |
1371 | blk_rq_bytes(req)); |
1372 | |
1373 | abort_req = blk_mq_alloc_request(q: dev->ctrl.admin_q, opf: nvme_req_op(cmd: &cmd), |
1374 | flags: BLK_MQ_REQ_NOWAIT); |
1375 | if (IS_ERR(ptr: abort_req)) { |
1376 | atomic_inc(v: &dev->ctrl.abort_limit); |
1377 | return BLK_EH_RESET_TIMER; |
1378 | } |
1379 | nvme_init_request(req: abort_req, cmd: &cmd); |
1380 | |
1381 | abort_req->end_io = abort_endio; |
1382 | abort_req->end_io_data = NULL; |
1383 | blk_execute_rq_nowait(rq: abort_req, at_head: false); |
1384 | |
1385 | /* |
1386 | * The aborted req will be completed on receiving the abort req. |
1387 | * We enable the timer again. If hit twice, it'll cause a device reset, |
1388 | * as the device then is in a faulty state. |
1389 | */ |
1390 | return BLK_EH_RESET_TIMER; |
1391 | |
1392 | disable: |
1393 | if (!nvme_change_ctrl_state(ctrl: &dev->ctrl, new_state: NVME_CTRL_RESETTING)) |
1394 | return BLK_EH_DONE; |
1395 | |
1396 | nvme_dev_disable(dev, shutdown: false); |
1397 | if (nvme_try_sched_reset(ctrl: &dev->ctrl)) |
1398 | nvme_unquiesce_io_queues(ctrl: &dev->ctrl); |
1399 | return BLK_EH_DONE; |
1400 | } |
1401 | |
1402 | static void nvme_free_queue(struct nvme_queue *nvmeq) |
1403 | { |
1404 | dma_free_coherent(dev: nvmeq->dev->dev, CQ_SIZE(nvmeq), |
1405 | cpu_addr: (void *)nvmeq->cqes, dma_handle: nvmeq->cq_dma_addr); |
1406 | if (!nvmeq->sq_cmds) |
1407 | return; |
1408 | |
1409 | if (test_and_clear_bit(NVMEQ_SQ_CMB, addr: &nvmeq->flags)) { |
1410 | pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), |
1411 | addr: nvmeq->sq_cmds, SQ_SIZE(nvmeq)); |
1412 | } else { |
1413 | dma_free_coherent(dev: nvmeq->dev->dev, SQ_SIZE(nvmeq), |
1414 | cpu_addr: nvmeq->sq_cmds, dma_handle: nvmeq->sq_dma_addr); |
1415 | } |
1416 | } |
1417 | |
1418 | static void nvme_free_queues(struct nvme_dev *dev, int lowest) |
1419 | { |
1420 | int i; |
1421 | |
1422 | for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { |
1423 | dev->ctrl.queue_count--; |
1424 | nvme_free_queue(nvmeq: &dev->queues[i]); |
1425 | } |
1426 | } |
1427 | |
1428 | static void nvme_suspend_queue(struct nvme_dev *dev, unsigned int qid) |
1429 | { |
1430 | struct nvme_queue *nvmeq = &dev->queues[qid]; |
1431 | |
1432 | if (!test_and_clear_bit(NVMEQ_ENABLED, addr: &nvmeq->flags)) |
1433 | return; |
1434 | |
1435 | /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */ |
1436 | mb(); |
1437 | |
1438 | nvmeq->dev->online_queues--; |
1439 | if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) |
1440 | nvme_quiesce_admin_queue(ctrl: &nvmeq->dev->ctrl); |
1441 | if (!test_and_clear_bit(NVMEQ_POLLED, addr: &nvmeq->flags)) |
1442 | pci_free_irq(to_pci_dev(dev->dev), nr: nvmeq->cq_vector, dev_id: nvmeq); |
1443 | } |
1444 | |
1445 | static void nvme_suspend_io_queues(struct nvme_dev *dev) |
1446 | { |
1447 | int i; |
1448 | |
1449 | for (i = dev->ctrl.queue_count - 1; i > 0; i--) |
1450 | nvme_suspend_queue(dev, qid: i); |
1451 | } |
1452 | |
1453 | /* |
1454 | * Called only on a device that has been disabled and after all other threads |
1455 | * that can check this device's completion queues have synced, except |
1456 | * nvme_poll(). This is the last chance for the driver to see a natural |
1457 | * completion before nvme_cancel_request() terminates all incomplete requests. |
1458 | */ |
1459 | static void nvme_reap_pending_cqes(struct nvme_dev *dev) |
1460 | { |
1461 | int i; |
1462 | |
1463 | for (i = dev->ctrl.queue_count - 1; i > 0; i--) { |
1464 | spin_lock(lock: &dev->queues[i].cq_poll_lock); |
1465 | nvme_poll_cq(nvmeq: &dev->queues[i], NULL); |
1466 | spin_unlock(lock: &dev->queues[i].cq_poll_lock); |
1467 | } |
1468 | } |
1469 | |
1470 | static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, |
1471 | int entry_size) |
1472 | { |
1473 | int q_depth = dev->q_depth; |
1474 | unsigned q_size_aligned = roundup(q_depth * entry_size, |
1475 | NVME_CTRL_PAGE_SIZE); |
1476 | |
1477 | if (q_size_aligned * nr_io_queues > dev->cmb_size) { |
1478 | u64 mem_per_q = div_u64(dividend: dev->cmb_size, divisor: nr_io_queues); |
1479 | |
1480 | mem_per_q = round_down(mem_per_q, NVME_CTRL_PAGE_SIZE); |
1481 | q_depth = div_u64(dividend: mem_per_q, divisor: entry_size); |
1482 | |
1483 | /* |
1484 | * Ensure the reduced q_depth is above some threshold where it |
1485 | * would be better to map queues in system memory with the |
1486 | * original depth |
1487 | */ |
1488 | if (q_depth < 64) |
1489 | return -ENOMEM; |
1490 | } |
1491 | |
1492 | return q_depth; |
1493 | } |
1494 | |
1495 | static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, |
1496 | int qid) |
1497 | { |
1498 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
1499 | |
1500 | if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { |
1501 | nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); |
1502 | if (nvmeq->sq_cmds) { |
1503 | nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, |
1504 | addr: nvmeq->sq_cmds); |
1505 | if (nvmeq->sq_dma_addr) { |
1506 | set_bit(NVMEQ_SQ_CMB, addr: &nvmeq->flags); |
1507 | return 0; |
1508 | } |
1509 | |
1510 | pci_free_p2pmem(pdev, addr: nvmeq->sq_cmds, SQ_SIZE(nvmeq)); |
1511 | } |
1512 | } |
1513 | |
1514 | nvmeq->sq_cmds = dma_alloc_coherent(dev: dev->dev, SQ_SIZE(nvmeq), |
1515 | dma_handle: &nvmeq->sq_dma_addr, GFP_KERNEL); |
1516 | if (!nvmeq->sq_cmds) |
1517 | return -ENOMEM; |
1518 | return 0; |
1519 | } |
1520 | |
1521 | static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) |
1522 | { |
1523 | struct nvme_queue *nvmeq = &dev->queues[qid]; |
1524 | |
1525 | if (dev->ctrl.queue_count > qid) |
1526 | return 0; |
1527 | |
1528 | nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; |
1529 | nvmeq->q_depth = depth; |
1530 | nvmeq->cqes = dma_alloc_coherent(dev: dev->dev, CQ_SIZE(nvmeq), |
1531 | dma_handle: &nvmeq->cq_dma_addr, GFP_KERNEL); |
1532 | if (!nvmeq->cqes) |
1533 | goto free_nvmeq; |
1534 | |
1535 | if (nvme_alloc_sq_cmds(dev, nvmeq, qid)) |
1536 | goto free_cqdma; |
1537 | |
1538 | nvmeq->dev = dev; |
1539 | spin_lock_init(&nvmeq->sq_lock); |
1540 | spin_lock_init(&nvmeq->cq_poll_lock); |
1541 | nvmeq->cq_head = 0; |
1542 | nvmeq->cq_phase = 1; |
1543 | nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; |
1544 | nvmeq->qid = qid; |
1545 | dev->ctrl.queue_count++; |
1546 | |
1547 | return 0; |
1548 | |
1549 | free_cqdma: |
1550 | dma_free_coherent(dev: dev->dev, CQ_SIZE(nvmeq), cpu_addr: (void *)nvmeq->cqes, |
1551 | dma_handle: nvmeq->cq_dma_addr); |
1552 | free_nvmeq: |
1553 | return -ENOMEM; |
1554 | } |
1555 | |
1556 | static int queue_request_irq(struct nvme_queue *nvmeq) |
1557 | { |
1558 | struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); |
1559 | int nr = nvmeq->dev->ctrl.instance; |
1560 | |
1561 | if (use_threaded_interrupts) { |
1562 | return pci_request_irq(dev: pdev, nr: nvmeq->cq_vector, handler: nvme_irq_check, |
1563 | thread_fn: nvme_irq, dev_id: nvmeq, fmt: "nvme%dq%d", nr, nvmeq->qid); |
1564 | } else { |
1565 | return pci_request_irq(dev: pdev, nr: nvmeq->cq_vector, handler: nvme_irq, |
1566 | NULL, dev_id: nvmeq, fmt: "nvme%dq%d", nr, nvmeq->qid); |
1567 | } |
1568 | } |
1569 | |
1570 | static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) |
1571 | { |
1572 | struct nvme_dev *dev = nvmeq->dev; |
1573 | |
1574 | nvmeq->sq_tail = 0; |
1575 | nvmeq->last_sq_tail = 0; |
1576 | nvmeq->cq_head = 0; |
1577 | nvmeq->cq_phase = 1; |
1578 | nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; |
1579 | memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); |
1580 | nvme_dbbuf_init(dev, nvmeq, qid); |
1581 | dev->online_queues++; |
1582 | wmb(); /* ensure the first interrupt sees the initialization */ |
1583 | } |
1584 | |
1585 | /* |
1586 | * Try getting shutdown_lock while setting up IO queues. |
1587 | */ |
1588 | static int nvme_setup_io_queues_trylock(struct nvme_dev *dev) |
1589 | { |
1590 | /* |
1591 | * Give up if the lock is being held by nvme_dev_disable. |
1592 | */ |
1593 | if (!mutex_trylock(lock: &dev->shutdown_lock)) |
1594 | return -ENODEV; |
1595 | |
1596 | /* |
1597 | * Controller is in wrong state, fail early. |
1598 | */ |
1599 | if (nvme_ctrl_state(ctrl: &dev->ctrl) != NVME_CTRL_CONNECTING) { |
1600 | mutex_unlock(lock: &dev->shutdown_lock); |
1601 | return -ENODEV; |
1602 | } |
1603 | |
1604 | return 0; |
1605 | } |
1606 | |
1607 | static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) |
1608 | { |
1609 | struct nvme_dev *dev = nvmeq->dev; |
1610 | int result; |
1611 | u16 vector = 0; |
1612 | |
1613 | clear_bit(NVMEQ_DELETE_ERROR, addr: &nvmeq->flags); |
1614 | |
1615 | /* |
1616 | * A queue's vector matches the queue identifier unless the controller |
1617 | * has only one vector available. |
1618 | */ |
1619 | if (!polled) |
1620 | vector = dev->num_vecs == 1 ? 0 : qid; |
1621 | else |
1622 | set_bit(NVMEQ_POLLED, addr: &nvmeq->flags); |
1623 | |
1624 | result = adapter_alloc_cq(dev, qid, nvmeq, vector); |
1625 | if (result) |
1626 | return result; |
1627 | |
1628 | result = adapter_alloc_sq(dev, qid, nvmeq); |
1629 | if (result < 0) |
1630 | return result; |
1631 | if (result) |
1632 | goto release_cq; |
1633 | |
1634 | nvmeq->cq_vector = vector; |
1635 | |
1636 | result = nvme_setup_io_queues_trylock(dev); |
1637 | if (result) |
1638 | return result; |
1639 | nvme_init_queue(nvmeq, qid); |
1640 | if (!polled) { |
1641 | result = queue_request_irq(nvmeq); |
1642 | if (result < 0) |
1643 | goto release_sq; |
1644 | } |
1645 | |
1646 | set_bit(NVMEQ_ENABLED, addr: &nvmeq->flags); |
1647 | mutex_unlock(lock: &dev->shutdown_lock); |
1648 | return result; |
1649 | |
1650 | release_sq: |
1651 | dev->online_queues--; |
1652 | mutex_unlock(lock: &dev->shutdown_lock); |
1653 | adapter_delete_sq(dev, sqid: qid); |
1654 | release_cq: |
1655 | adapter_delete_cq(dev, cqid: qid); |
1656 | return result; |
1657 | } |
1658 | |
1659 | static const struct blk_mq_ops nvme_mq_admin_ops = { |
1660 | .queue_rq = nvme_queue_rq, |
1661 | .complete = nvme_pci_complete_rq, |
1662 | .init_hctx = nvme_admin_init_hctx, |
1663 | .init_request = nvme_pci_init_request, |
1664 | .timeout = nvme_timeout, |
1665 | }; |
1666 | |
1667 | static const struct blk_mq_ops nvme_mq_ops = { |
1668 | .queue_rq = nvme_queue_rq, |
1669 | .queue_rqs = nvme_queue_rqs, |
1670 | .complete = nvme_pci_complete_rq, |
1671 | .commit_rqs = nvme_commit_rqs, |
1672 | .init_hctx = nvme_init_hctx, |
1673 | .init_request = nvme_pci_init_request, |
1674 | .map_queues = nvme_pci_map_queues, |
1675 | .timeout = nvme_timeout, |
1676 | .poll = nvme_poll, |
1677 | }; |
1678 | |
1679 | static void nvme_dev_remove_admin(struct nvme_dev *dev) |
1680 | { |
1681 | if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { |
1682 | /* |
1683 | * If the controller was reset during removal, it's possible |
1684 | * user requests may be waiting on a stopped queue. Start the |
1685 | * queue to flush these to completion. |
1686 | */ |
1687 | nvme_unquiesce_admin_queue(ctrl: &dev->ctrl); |
1688 | nvme_remove_admin_tag_set(ctrl: &dev->ctrl); |
1689 | } |
1690 | } |
1691 | |
1692 | static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) |
1693 | { |
1694 | return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); |
1695 | } |
1696 | |
1697 | static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size) |
1698 | { |
1699 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
1700 | |
1701 | if (size <= dev->bar_mapped_size) |
1702 | return 0; |
1703 | if (size > pci_resource_len(pdev, 0)) |
1704 | return -ENOMEM; |
1705 | if (dev->bar) |
1706 | iounmap(addr: dev->bar); |
1707 | dev->bar = ioremap(pci_resource_start(pdev, 0), size); |
1708 | if (!dev->bar) { |
1709 | dev->bar_mapped_size = 0; |
1710 | return -ENOMEM; |
1711 | } |
1712 | dev->bar_mapped_size = size; |
1713 | dev->dbs = dev->bar + NVME_REG_DBS; |
1714 | |
1715 | return 0; |
1716 | } |
1717 | |
1718 | static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) |
1719 | { |
1720 | int result; |
1721 | u32 aqa; |
1722 | struct nvme_queue *nvmeq; |
1723 | |
1724 | result = nvme_remap_bar(dev, size: db_bar_size(dev, nr_io_queues: 0)); |
1725 | if (result < 0) |
1726 | return result; |
1727 | |
1728 | dev->subsystem = readl(addr: dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? |
1729 | NVME_CAP_NSSRC(dev->ctrl.cap) : 0; |
1730 | |
1731 | if (dev->subsystem && |
1732 | (readl(addr: dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) |
1733 | writel(val: NVME_CSTS_NSSRO, addr: dev->bar + NVME_REG_CSTS); |
1734 | |
1735 | /* |
1736 | * If the device has been passed off to us in an enabled state, just |
1737 | * clear the enabled bit. The spec says we should set the 'shutdown |
1738 | * notification bits', but doing so may cause the device to complete |
1739 | * commands to the admin queue ... and we don't know what memory that |
1740 | * might be pointing at! |
1741 | */ |
1742 | result = nvme_disable_ctrl(ctrl: &dev->ctrl, shutdown: false); |
1743 | if (result < 0) |
1744 | return result; |
1745 | |
1746 | result = nvme_alloc_queue(dev, qid: 0, NVME_AQ_DEPTH); |
1747 | if (result) |
1748 | return result; |
1749 | |
1750 | dev->ctrl.numa_node = dev_to_node(dev: dev->dev); |
1751 | |
1752 | nvmeq = &dev->queues[0]; |
1753 | aqa = nvmeq->q_depth - 1; |
1754 | aqa |= aqa << 16; |
1755 | |
1756 | writel(val: aqa, addr: dev->bar + NVME_REG_AQA); |
1757 | lo_hi_writeq(val: nvmeq->sq_dma_addr, addr: dev->bar + NVME_REG_ASQ); |
1758 | lo_hi_writeq(val: nvmeq->cq_dma_addr, addr: dev->bar + NVME_REG_ACQ); |
1759 | |
1760 | result = nvme_enable_ctrl(ctrl: &dev->ctrl); |
1761 | if (result) |
1762 | return result; |
1763 | |
1764 | nvmeq->cq_vector = 0; |
1765 | nvme_init_queue(nvmeq, qid: 0); |
1766 | result = queue_request_irq(nvmeq); |
1767 | if (result) { |
1768 | dev->online_queues--; |
1769 | return result; |
1770 | } |
1771 | |
1772 | set_bit(NVMEQ_ENABLED, addr: &nvmeq->flags); |
1773 | return result; |
1774 | } |
1775 | |
1776 | static int nvme_create_io_queues(struct nvme_dev *dev) |
1777 | { |
1778 | unsigned i, max, rw_queues; |
1779 | int ret = 0; |
1780 | |
1781 | for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { |
1782 | if (nvme_alloc_queue(dev, qid: i, depth: dev->q_depth)) { |
1783 | ret = -ENOMEM; |
1784 | break; |
1785 | } |
1786 | } |
1787 | |
1788 | max = min(dev->max_qid, dev->ctrl.queue_count - 1); |
1789 | if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) { |
1790 | rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] + |
1791 | dev->io_queues[HCTX_TYPE_READ]; |
1792 | } else { |
1793 | rw_queues = max; |
1794 | } |
1795 | |
1796 | for (i = dev->online_queues; i <= max; i++) { |
1797 | bool polled = i > rw_queues; |
1798 | |
1799 | ret = nvme_create_queue(nvmeq: &dev->queues[i], qid: i, polled); |
1800 | if (ret) |
1801 | break; |
1802 | } |
1803 | |
1804 | /* |
1805 | * Ignore failing Create SQ/CQ commands, we can continue with less |
1806 | * than the desired amount of queues, and even a controller without |
1807 | * I/O queues can still be used to issue admin commands. This might |
1808 | * be useful to upgrade a buggy firmware for example. |
1809 | */ |
1810 | return ret >= 0 ? 0 : ret; |
1811 | } |
1812 | |
1813 | static u64 nvme_cmb_size_unit(struct nvme_dev *dev) |
1814 | { |
1815 | u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK; |
1816 | |
1817 | return 1ULL << (12 + 4 * szu); |
1818 | } |
1819 | |
1820 | static u32 nvme_cmb_size(struct nvme_dev *dev) |
1821 | { |
1822 | return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK; |
1823 | } |
1824 | |
1825 | static void nvme_map_cmb(struct nvme_dev *dev) |
1826 | { |
1827 | u64 size, offset; |
1828 | resource_size_t bar_size; |
1829 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
1830 | int bar; |
1831 | |
1832 | if (dev->cmb_size) |
1833 | return; |
1834 | |
1835 | if (NVME_CAP_CMBS(dev->ctrl.cap)) |
1836 | writel(val: NVME_CMBMSC_CRE, addr: dev->bar + NVME_REG_CMBMSC); |
1837 | |
1838 | dev->cmbsz = readl(addr: dev->bar + NVME_REG_CMBSZ); |
1839 | if (!dev->cmbsz) |
1840 | return; |
1841 | dev->cmbloc = readl(addr: dev->bar + NVME_REG_CMBLOC); |
1842 | |
1843 | size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev); |
1844 | offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); |
1845 | bar = NVME_CMB_BIR(dev->cmbloc); |
1846 | bar_size = pci_resource_len(pdev, bar); |
1847 | |
1848 | if (offset > bar_size) |
1849 | return; |
1850 | |
1851 | /* |
1852 | * Tell the controller about the host side address mapping the CMB, |
1853 | * and enable CMB decoding for the NVMe 1.4+ scheme: |
1854 | */ |
1855 | if (NVME_CAP_CMBS(dev->ctrl.cap)) { |
1856 | hi_lo_writeq(val: NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE | |
1857 | (pci_bus_address(pdev, bar) + offset), |
1858 | addr: dev->bar + NVME_REG_CMBMSC); |
1859 | } |
1860 | |
1861 | /* |
1862 | * Controllers may support a CMB size larger than their BAR, |
1863 | * for example, due to being behind a bridge. Reduce the CMB to |
1864 | * the reported size of the BAR |
1865 | */ |
1866 | if (size > bar_size - offset) |
1867 | size = bar_size - offset; |
1868 | |
1869 | if (pci_p2pdma_add_resource(pdev, bar, size, offset)) { |
1870 | dev_warn(dev->ctrl.device, |
1871 | "failed to register the CMB\n"); |
1872 | return; |
1873 | } |
1874 | |
1875 | dev->cmb_size = size; |
1876 | dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); |
1877 | |
1878 | if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == |
1879 | (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) |
1880 | pci_p2pmem_publish(pdev, publish: true); |
1881 | |
1882 | nvme_update_attrs(dev); |
1883 | } |
1884 | |
1885 | static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) |
1886 | { |
1887 | u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT; |
1888 | u64 dma_addr = dev->host_mem_descs_dma; |
1889 | struct nvme_command c = { }; |
1890 | int ret; |
1891 | |
1892 | c.features.opcode = nvme_admin_set_features; |
1893 | c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); |
1894 | c.features.dword11 = cpu_to_le32(bits); |
1895 | c.features.dword12 = cpu_to_le32(host_mem_size); |
1896 | c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr)); |
1897 | c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr)); |
1898 | c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); |
1899 | |
1900 | ret = nvme_submit_sync_cmd(q: dev->ctrl.admin_q, cmd: &c, NULL, bufflen: 0); |
1901 | if (ret) { |
1902 | dev_warn(dev->ctrl.device, |
1903 | "failed to set host mem (err %d, flags %#x).\n", |
1904 | ret, bits); |
1905 | } else |
1906 | dev->hmb = bits & NVME_HOST_MEM_ENABLE; |
1907 | |
1908 | return ret; |
1909 | } |
1910 | |
1911 | static void nvme_free_host_mem(struct nvme_dev *dev) |
1912 | { |
1913 | int i; |
1914 | |
1915 | for (i = 0; i < dev->nr_host_mem_descs; i++) { |
1916 | struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; |
1917 | size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE; |
1918 | |
1919 | dma_free_attrs(dev: dev->dev, size, cpu_addr: dev->host_mem_desc_bufs[i], |
1920 | le64_to_cpu(desc->addr), |
1921 | DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); |
1922 | } |
1923 | |
1924 | kfree(objp: dev->host_mem_desc_bufs); |
1925 | dev->host_mem_desc_bufs = NULL; |
1926 | dma_free_coherent(dev: dev->dev, |
1927 | size: dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), |
1928 | cpu_addr: dev->host_mem_descs, dma_handle: dev->host_mem_descs_dma); |
1929 | dev->host_mem_descs = NULL; |
1930 | dev->nr_host_mem_descs = 0; |
1931 | } |
1932 | |
1933 | static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, |
1934 | u32 chunk_size) |
1935 | { |
1936 | struct nvme_host_mem_buf_desc *descs; |
1937 | u32 max_entries, len; |
1938 | dma_addr_t descs_dma; |
1939 | int i = 0; |
1940 | void **bufs; |
1941 | u64 size, tmp; |
1942 | |
1943 | tmp = (preferred + chunk_size - 1); |
1944 | do_div(tmp, chunk_size); |
1945 | max_entries = tmp; |
1946 | |
1947 | if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) |
1948 | max_entries = dev->ctrl.hmmaxd; |
1949 | |
1950 | descs = dma_alloc_coherent(dev: dev->dev, size: max_entries * sizeof(*descs), |
1951 | dma_handle: &descs_dma, GFP_KERNEL); |
1952 | if (!descs) |
1953 | goto out; |
1954 | |
1955 | bufs = kcalloc(n: max_entries, size: sizeof(*bufs), GFP_KERNEL); |
1956 | if (!bufs) |
1957 | goto out_free_descs; |
1958 | |
1959 | for (size = 0; size < preferred && i < max_entries; size += len) { |
1960 | dma_addr_t dma_addr; |
1961 | |
1962 | len = min_t(u64, chunk_size, preferred - size); |
1963 | bufs[i] = dma_alloc_attrs(dev: dev->dev, size: len, dma_handle: &dma_addr, GFP_KERNEL, |
1964 | DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); |
1965 | if (!bufs[i]) |
1966 | break; |
1967 | |
1968 | descs[i].addr = cpu_to_le64(dma_addr); |
1969 | descs[i].size = cpu_to_le32(len / NVME_CTRL_PAGE_SIZE); |
1970 | i++; |
1971 | } |
1972 | |
1973 | if (!size) |
1974 | goto out_free_bufs; |
1975 | |
1976 | dev->nr_host_mem_descs = i; |
1977 | dev->host_mem_size = size; |
1978 | dev->host_mem_descs = descs; |
1979 | dev->host_mem_descs_dma = descs_dma; |
1980 | dev->host_mem_desc_bufs = bufs; |
1981 | return 0; |
1982 | |
1983 | out_free_bufs: |
1984 | while (--i >= 0) { |
1985 | size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE; |
1986 | |
1987 | dma_free_attrs(dev: dev->dev, size, cpu_addr: bufs[i], |
1988 | le64_to_cpu(descs[i].addr), |
1989 | DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); |
1990 | } |
1991 | |
1992 | kfree(objp: bufs); |
1993 | out_free_descs: |
1994 | dma_free_coherent(dev: dev->dev, size: max_entries * sizeof(*descs), cpu_addr: descs, |
1995 | dma_handle: descs_dma); |
1996 | out: |
1997 | dev->host_mem_descs = NULL; |
1998 | return -ENOMEM; |
1999 | } |
2000 | |
2001 | static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) |
2002 | { |
2003 | u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES); |
2004 | u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); |
2005 | u64 chunk_size; |
2006 | |
2007 | /* start big and work our way down */ |
2008 | for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) { |
2009 | if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) { |
2010 | if (!min || dev->host_mem_size >= min) |
2011 | return 0; |
2012 | nvme_free_host_mem(dev); |
2013 | } |
2014 | } |
2015 | |
2016 | return -ENOMEM; |
2017 | } |
2018 | |
2019 | static int nvme_setup_host_mem(struct nvme_dev *dev) |
2020 | { |
2021 | u64 max = (u64)max_host_mem_size_mb * SZ_1M; |
2022 | u64 preferred = (u64)dev->ctrl.hmpre * 4096; |
2023 | u64 min = (u64)dev->ctrl.hmmin * 4096; |
2024 | u32 enable_bits = NVME_HOST_MEM_ENABLE; |
2025 | int ret; |
2026 | |
2027 | if (!dev->ctrl.hmpre) |
2028 | return 0; |
2029 | |
2030 | preferred = min(preferred, max); |
2031 | if (min > max) { |
2032 | dev_warn(dev->ctrl.device, |
2033 | "min host memory (%lld MiB) above limit (%d MiB).\n", |
2034 | min >> ilog2(SZ_1M), max_host_mem_size_mb); |
2035 | nvme_free_host_mem(dev); |
2036 | return 0; |
2037 | } |
2038 | |
2039 | /* |
2040 | * If we already have a buffer allocated check if we can reuse it. |
2041 | */ |
2042 | if (dev->host_mem_descs) { |
2043 | if (dev->host_mem_size >= min) |
2044 | enable_bits |= NVME_HOST_MEM_RETURN; |
2045 | else |
2046 | nvme_free_host_mem(dev); |
2047 | } |
2048 | |
2049 | if (!dev->host_mem_descs) { |
2050 | if (nvme_alloc_host_mem(dev, min, preferred)) { |
2051 | dev_warn(dev->ctrl.device, |
2052 | "failed to allocate host memory buffer.\n"); |
2053 | return 0; /* controller must work without HMB */ |
2054 | } |
2055 | |
2056 | dev_info(dev->ctrl.device, |
2057 | "allocated %lld MiB host memory buffer.\n", |
2058 | dev->host_mem_size >> ilog2(SZ_1M)); |
2059 | } |
2060 | |
2061 | ret = nvme_set_host_mem(dev, bits: enable_bits); |
2062 | if (ret) |
2063 | nvme_free_host_mem(dev); |
2064 | return ret; |
2065 | } |
2066 | |
2067 | static ssize_t cmb_show(struct device *dev, struct device_attribute *attr, |
2068 | char *buf) |
2069 | { |
2070 | struct nvme_dev *ndev = to_nvme_dev(ctrl: dev_get_drvdata(dev)); |
2071 | |
2072 | return sysfs_emit(buf, fmt: "cmbloc : x%08x\ncmbsz : x%08x\n", |
2073 | ndev->cmbloc, ndev->cmbsz); |
2074 | } |
2075 | static DEVICE_ATTR_RO(cmb); |
2076 | |
2077 | static ssize_t cmbloc_show(struct device *dev, struct device_attribute *attr, |
2078 | char *buf) |
2079 | { |
2080 | struct nvme_dev *ndev = to_nvme_dev(ctrl: dev_get_drvdata(dev)); |
2081 | |
2082 | return sysfs_emit(buf, fmt: "%u\n", ndev->cmbloc); |
2083 | } |
2084 | static DEVICE_ATTR_RO(cmbloc); |
2085 | |
2086 | static ssize_t cmbsz_show(struct device *dev, struct device_attribute *attr, |
2087 | char *buf) |
2088 | { |
2089 | struct nvme_dev *ndev = to_nvme_dev(ctrl: dev_get_drvdata(dev)); |
2090 | |
2091 | return sysfs_emit(buf, fmt: "%u\n", ndev->cmbsz); |
2092 | } |
2093 | static DEVICE_ATTR_RO(cmbsz); |
2094 | |
2095 | static ssize_t hmb_show(struct device *dev, struct device_attribute *attr, |
2096 | char *buf) |
2097 | { |
2098 | struct nvme_dev *ndev = to_nvme_dev(ctrl: dev_get_drvdata(dev)); |
2099 | |
2100 | return sysfs_emit(buf, fmt: "%d\n", ndev->hmb); |
2101 | } |
2102 | |
2103 | static ssize_t hmb_store(struct device *dev, struct device_attribute *attr, |
2104 | const char *buf, size_t count) |
2105 | { |
2106 | struct nvme_dev *ndev = to_nvme_dev(ctrl: dev_get_drvdata(dev)); |
2107 | bool new; |
2108 | int ret; |
2109 | |
2110 | if (kstrtobool(s: buf, res: &new) < 0) |
2111 | return -EINVAL; |
2112 | |
2113 | if (new == ndev->hmb) |
2114 | return count; |
2115 | |
2116 | if (new) { |
2117 | ret = nvme_setup_host_mem(dev: ndev); |
2118 | } else { |
2119 | ret = nvme_set_host_mem(dev: ndev, bits: 0); |
2120 | if (!ret) |
2121 | nvme_free_host_mem(dev: ndev); |
2122 | } |
2123 | |
2124 | if (ret < 0) |
2125 | return ret; |
2126 | |
2127 | return count; |
2128 | } |
2129 | static DEVICE_ATTR_RW(hmb); |
2130 | |
2131 | static umode_t nvme_pci_attrs_are_visible(struct kobject *kobj, |
2132 | struct attribute *a, int n) |
2133 | { |
2134 | struct nvme_ctrl *ctrl = |
2135 | dev_get_drvdata(container_of(kobj, struct device, kobj)); |
2136 | struct nvme_dev *dev = to_nvme_dev(ctrl); |
2137 | |
2138 | if (a == &dev_attr_cmb.attr || |
2139 | a == &dev_attr_cmbloc.attr || |
2140 | a == &dev_attr_cmbsz.attr) { |
2141 | if (!dev->cmbsz) |
2142 | return 0; |
2143 | } |
2144 | if (a == &dev_attr_hmb.attr && !ctrl->hmpre) |
2145 | return 0; |
2146 | |
2147 | return a->mode; |
2148 | } |
2149 | |
2150 | static struct attribute *nvme_pci_attrs[] = { |
2151 | &dev_attr_cmb.attr, |
2152 | &dev_attr_cmbloc.attr, |
2153 | &dev_attr_cmbsz.attr, |
2154 | &dev_attr_hmb.attr, |
2155 | NULL, |
2156 | }; |
2157 | |
2158 | static const struct attribute_group nvme_pci_dev_attrs_group = { |
2159 | .attrs = nvme_pci_attrs, |
2160 | .is_visible = nvme_pci_attrs_are_visible, |
2161 | }; |
2162 | |
2163 | static const struct attribute_group *nvme_pci_dev_attr_groups[] = { |
2164 | &nvme_dev_attrs_group, |
2165 | &nvme_pci_dev_attrs_group, |
2166 | NULL, |
2167 | }; |
2168 | |
2169 | static void nvme_update_attrs(struct nvme_dev *dev) |
2170 | { |
2171 | sysfs_update_group(kobj: &dev->ctrl.device->kobj, grp: &nvme_pci_dev_attrs_group); |
2172 | } |
2173 | |
2174 | /* |
2175 | * nirqs is the number of interrupts available for write and read |
2176 | * queues. The core already reserved an interrupt for the admin queue. |
2177 | */ |
2178 | static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) |
2179 | { |
2180 | struct nvme_dev *dev = affd->priv; |
2181 | unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues; |
2182 | |
2183 | /* |
2184 | * If there is no interrupt available for queues, ensure that |
2185 | * the default queue is set to 1. The affinity set size is |
2186 | * also set to one, but the irq core ignores it for this case. |
2187 | * |
2188 | * If only one interrupt is available or 'write_queue' == 0, combine |
2189 | * write and read queues. |
2190 | * |
2191 | * If 'write_queues' > 0, ensure it leaves room for at least one read |
2192 | * queue. |
2193 | */ |
2194 | if (!nrirqs) { |
2195 | nrirqs = 1; |
2196 | nr_read_queues = 0; |
2197 | } else if (nrirqs == 1 || !nr_write_queues) { |
2198 | nr_read_queues = 0; |
2199 | } else if (nr_write_queues >= nrirqs) { |
2200 | nr_read_queues = 1; |
2201 | } else { |
2202 | nr_read_queues = nrirqs - nr_write_queues; |
2203 | } |
2204 | |
2205 | dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; |
2206 | affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; |
2207 | dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; |
2208 | affd->set_size[HCTX_TYPE_READ] = nr_read_queues; |
2209 | affd->nr_sets = nr_read_queues ? 2 : 1; |
2210 | } |
2211 | |
2212 | static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) |
2213 | { |
2214 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
2215 | struct irq_affinity affd = { |
2216 | .pre_vectors = 1, |
2217 | .calc_sets = nvme_calc_irq_sets, |
2218 | .priv = dev, |
2219 | }; |
2220 | unsigned int irq_queues, poll_queues; |
2221 | |
2222 | /* |
2223 | * Poll queues don't need interrupts, but we need at least one I/O queue |
2224 | * left over for non-polled I/O. |
2225 | */ |
2226 | poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1); |
2227 | dev->io_queues[HCTX_TYPE_POLL] = poll_queues; |
2228 | |
2229 | /* |
2230 | * Initialize for the single interrupt case, will be updated in |
2231 | * nvme_calc_irq_sets(). |
2232 | */ |
2233 | dev->io_queues[HCTX_TYPE_DEFAULT] = 1; |
2234 | dev->io_queues[HCTX_TYPE_READ] = 0; |
2235 | |
2236 | /* |
2237 | * We need interrupts for the admin queue and each non-polled I/O queue, |
2238 | * but some Apple controllers require all queues to use the first |
2239 | * vector. |
2240 | */ |
2241 | irq_queues = 1; |
2242 | if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)) |
2243 | irq_queues += (nr_io_queues - poll_queues); |
2244 | return pci_alloc_irq_vectors_affinity(dev: pdev, min_vecs: 1, max_vecs: irq_queues, |
2245 | PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, affd: &affd); |
2246 | } |
2247 | |
2248 | static unsigned int nvme_max_io_queues(struct nvme_dev *dev) |
2249 | { |
2250 | /* |
2251 | * If tags are shared with admin queue (Apple bug), then |
2252 | * make sure we only use one IO queue. |
2253 | */ |
2254 | if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) |
2255 | return 1; |
2256 | return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues; |
2257 | } |
2258 | |
2259 | static int nvme_setup_io_queues(struct nvme_dev *dev) |
2260 | { |
2261 | struct nvme_queue *adminq = &dev->queues[0]; |
2262 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
2263 | unsigned int nr_io_queues; |
2264 | unsigned long size; |
2265 | int result; |
2266 | |
2267 | /* |
2268 | * Sample the module parameters once at reset time so that we have |
2269 | * stable values to work with. |
2270 | */ |
2271 | dev->nr_write_queues = write_queues; |
2272 | dev->nr_poll_queues = poll_queues; |
2273 | |
2274 | nr_io_queues = dev->nr_allocated_queues - 1; |
2275 | result = nvme_set_queue_count(ctrl: &dev->ctrl, count: &nr_io_queues); |
2276 | if (result < 0) |
2277 | return result; |
2278 | |
2279 | if (nr_io_queues == 0) |
2280 | return 0; |
2281 | |
2282 | /* |
2283 | * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions |
2284 | * from set to unset. If there is a window to it is truely freed, |
2285 | * pci_free_irq_vectors() jumping into this window will crash. |
2286 | * And take lock to avoid racing with pci_free_irq_vectors() in |
2287 | * nvme_dev_disable() path. |
2288 | */ |
2289 | result = nvme_setup_io_queues_trylock(dev); |
2290 | if (result) |
2291 | return result; |
2292 | if (test_and_clear_bit(NVMEQ_ENABLED, addr: &adminq->flags)) |
2293 | pci_free_irq(dev: pdev, nr: 0, dev_id: adminq); |
2294 | |
2295 | if (dev->cmb_use_sqes) { |
2296 | result = nvme_cmb_qdepth(dev, nr_io_queues, |
2297 | entry_size: sizeof(struct nvme_command)); |
2298 | if (result > 0) { |
2299 | dev->q_depth = result; |
2300 | dev->ctrl.sqsize = result - 1; |
2301 | } else { |
2302 | dev->cmb_use_sqes = false; |
2303 | } |
2304 | } |
2305 | |
2306 | do { |
2307 | size = db_bar_size(dev, nr_io_queues); |
2308 | result = nvme_remap_bar(dev, size); |
2309 | if (!result) |
2310 | break; |
2311 | if (!--nr_io_queues) { |
2312 | result = -ENOMEM; |
2313 | goto out_unlock; |
2314 | } |
2315 | } while (1); |
2316 | adminq->q_db = dev->dbs; |
2317 | |
2318 | retry: |
2319 | /* Deregister the admin queue's interrupt */ |
2320 | if (test_and_clear_bit(NVMEQ_ENABLED, addr: &adminq->flags)) |
2321 | pci_free_irq(dev: pdev, nr: 0, dev_id: adminq); |
2322 | |
2323 | /* |
2324 | * If we enable msix early due to not intx, disable it again before |
2325 | * setting up the full range we need. |
2326 | */ |
2327 | pci_free_irq_vectors(dev: pdev); |
2328 | |
2329 | result = nvme_setup_irqs(dev, nr_io_queues); |
2330 | if (result <= 0) { |
2331 | result = -EIO; |
2332 | goto out_unlock; |
2333 | } |
2334 | |
2335 | dev->num_vecs = result; |
2336 | result = max(result - 1, 1); |
2337 | dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; |
2338 | |
2339 | /* |
2340 | * Should investigate if there's a performance win from allocating |
2341 | * more queues than interrupt vectors; it might allow the submission |
2342 | * path to scale better, even if the receive path is limited by the |
2343 | * number of interrupts. |
2344 | */ |
2345 | result = queue_request_irq(nvmeq: adminq); |
2346 | if (result) |
2347 | goto out_unlock; |
2348 | set_bit(NVMEQ_ENABLED, addr: &adminq->flags); |
2349 | mutex_unlock(lock: &dev->shutdown_lock); |
2350 | |
2351 | result = nvme_create_io_queues(dev); |
2352 | if (result || dev->online_queues < 2) |
2353 | return result; |
2354 | |
2355 | if (dev->online_queues - 1 < dev->max_qid) { |
2356 | nr_io_queues = dev->online_queues - 1; |
2357 | nvme_delete_io_queues(dev); |
2358 | result = nvme_setup_io_queues_trylock(dev); |
2359 | if (result) |
2360 | return result; |
2361 | nvme_suspend_io_queues(dev); |
2362 | goto retry; |
2363 | } |
2364 | dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", |
2365 | dev->io_queues[HCTX_TYPE_DEFAULT], |
2366 | dev->io_queues[HCTX_TYPE_READ], |
2367 | dev->io_queues[HCTX_TYPE_POLL]); |
2368 | return 0; |
2369 | out_unlock: |
2370 | mutex_unlock(lock: &dev->shutdown_lock); |
2371 | return result; |
2372 | } |
2373 | |
2374 | static enum rq_end_io_ret nvme_del_queue_end(struct request *req, |
2375 | blk_status_t error) |
2376 | { |
2377 | struct nvme_queue *nvmeq = req->end_io_data; |
2378 | |
2379 | blk_mq_free_request(rq: req); |
2380 | complete(&nvmeq->delete_done); |
2381 | return RQ_END_IO_NONE; |
2382 | } |
2383 | |
2384 | static enum rq_end_io_ret nvme_del_cq_end(struct request *req, |
2385 | blk_status_t error) |
2386 | { |
2387 | struct nvme_queue *nvmeq = req->end_io_data; |
2388 | |
2389 | if (error) |
2390 | set_bit(NVMEQ_DELETE_ERROR, addr: &nvmeq->flags); |
2391 | |
2392 | return nvme_del_queue_end(req, error); |
2393 | } |
2394 | |
2395 | static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) |
2396 | { |
2397 | struct request_queue *q = nvmeq->dev->ctrl.admin_q; |
2398 | struct request *req; |
2399 | struct nvme_command cmd = { }; |
2400 | |
2401 | cmd.delete_queue.opcode = opcode; |
2402 | cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); |
2403 | |
2404 | req = blk_mq_alloc_request(q, opf: nvme_req_op(cmd: &cmd), flags: BLK_MQ_REQ_NOWAIT); |
2405 | if (IS_ERR(ptr: req)) |
2406 | return PTR_ERR(ptr: req); |
2407 | nvme_init_request(req, cmd: &cmd); |
2408 | |
2409 | if (opcode == nvme_admin_delete_cq) |
2410 | req->end_io = nvme_del_cq_end; |
2411 | else |
2412 | req->end_io = nvme_del_queue_end; |
2413 | req->end_io_data = nvmeq; |
2414 | |
2415 | init_completion(x: &nvmeq->delete_done); |
2416 | blk_execute_rq_nowait(rq: req, at_head: false); |
2417 | return 0; |
2418 | } |
2419 | |
2420 | static bool __nvme_delete_io_queues(struct nvme_dev *dev, u8 opcode) |
2421 | { |
2422 | int nr_queues = dev->online_queues - 1, sent = 0; |
2423 | unsigned long timeout; |
2424 | |
2425 | retry: |
2426 | timeout = NVME_ADMIN_TIMEOUT; |
2427 | while (nr_queues > 0) { |
2428 | if (nvme_delete_queue(nvmeq: &dev->queues[nr_queues], opcode)) |
2429 | break; |
2430 | nr_queues--; |
2431 | sent++; |
2432 | } |
2433 | while (sent) { |
2434 | struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; |
2435 | |
2436 | timeout = wait_for_completion_io_timeout(x: &nvmeq->delete_done, |
2437 | timeout); |
2438 | if (timeout == 0) |
2439 | return false; |
2440 | |
2441 | sent--; |
2442 | if (nr_queues) |
2443 | goto retry; |
2444 | } |
2445 | return true; |
2446 | } |
2447 | |
2448 | static void nvme_delete_io_queues(struct nvme_dev *dev) |
2449 | { |
2450 | if (__nvme_delete_io_queues(dev, opcode: nvme_admin_delete_sq)) |
2451 | __nvme_delete_io_queues(dev, opcode: nvme_admin_delete_cq); |
2452 | } |
2453 | |
2454 | static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev) |
2455 | { |
2456 | if (dev->io_queues[HCTX_TYPE_POLL]) |
2457 | return 3; |
2458 | if (dev->io_queues[HCTX_TYPE_READ]) |
2459 | return 2; |
2460 | return 1; |
2461 | } |
2462 | |
2463 | static void nvme_pci_update_nr_queues(struct nvme_dev *dev) |
2464 | { |
2465 | blk_mq_update_nr_hw_queues(set: &dev->tagset, nr_hw_queues: dev->online_queues - 1); |
2466 | /* free previously allocated queues that are no longer usable */ |
2467 | nvme_free_queues(dev, lowest: dev->online_queues); |
2468 | } |
2469 | |
2470 | static int nvme_pci_enable(struct nvme_dev *dev) |
2471 | { |
2472 | int result = -ENOMEM; |
2473 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
2474 | |
2475 | if (pci_enable_device_mem(dev: pdev)) |
2476 | return result; |
2477 | |
2478 | pci_set_master(dev: pdev); |
2479 | |
2480 | if (readl(addr: dev->bar + NVME_REG_CSTS) == -1) { |
2481 | result = -ENODEV; |
2482 | goto disable; |
2483 | } |
2484 | |
2485 | /* |
2486 | * Some devices and/or platforms don't advertise or work with INTx |
2487 | * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll |
2488 | * adjust this later. |
2489 | */ |
2490 | result = pci_alloc_irq_vectors(dev: pdev, min_vecs: 1, max_vecs: 1, PCI_IRQ_ALL_TYPES); |
2491 | if (result < 0) |
2492 | goto disable; |
2493 | |
2494 | dev->ctrl.cap = lo_hi_readq(addr: dev->bar + NVME_REG_CAP); |
2495 | |
2496 | dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, |
2497 | io_queue_depth); |
2498 | dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); |
2499 | dev->dbs = dev->bar + 4096; |
2500 | |
2501 | /* |
2502 | * Some Apple controllers require a non-standard SQE size. |
2503 | * Interestingly they also seem to ignore the CC:IOSQES register |
2504 | * so we don't bother updating it here. |
2505 | */ |
2506 | if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES) |
2507 | dev->io_sqes = 7; |
2508 | else |
2509 | dev->io_sqes = NVME_NVM_IOSQES; |
2510 | |
2511 | /* |
2512 | * Temporary fix for the Apple controller found in the MacBook8,1 and |
2513 | * some MacBook7,1 to avoid controller resets and data loss. |
2514 | */ |
2515 | if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { |
2516 | dev->q_depth = 2; |
2517 | dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " |
2518 | "set queue depth=%u to work around controller resets\n", |
2519 | dev->q_depth); |
2520 | } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && |
2521 | (pdev->device == 0xa821 || pdev->device == 0xa822) && |
2522 | NVME_CAP_MQES(dev->ctrl.cap) == 0) { |
2523 | dev->q_depth = 64; |
2524 | dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " |
2525 | "set queue depth=%u\n", dev->q_depth); |
2526 | } |
2527 | |
2528 | /* |
2529 | * Controllers with the shared tags quirk need the IO queue to be |
2530 | * big enough so that we get 32 tags for the admin queue |
2531 | */ |
2532 | if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) && |
2533 | (dev->q_depth < (NVME_AQ_DEPTH + 2))) { |
2534 | dev->q_depth = NVME_AQ_DEPTH + 2; |
2535 | dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n", |
2536 | dev->q_depth); |
2537 | } |
2538 | dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ |
2539 | |
2540 | nvme_map_cmb(dev); |
2541 | |
2542 | pci_save_state(dev: pdev); |
2543 | |
2544 | result = nvme_pci_configure_admin_queue(dev); |
2545 | if (result) |
2546 | goto free_irq; |
2547 | return result; |
2548 | |
2549 | free_irq: |
2550 | pci_free_irq_vectors(dev: pdev); |
2551 | disable: |
2552 | pci_disable_device(dev: pdev); |
2553 | return result; |
2554 | } |
2555 | |
2556 | static void nvme_dev_unmap(struct nvme_dev *dev) |
2557 | { |
2558 | if (dev->bar) |
2559 | iounmap(addr: dev->bar); |
2560 | pci_release_mem_regions(to_pci_dev(dev->dev)); |
2561 | } |
2562 | |
2563 | static bool nvme_pci_ctrl_is_dead(struct nvme_dev *dev) |
2564 | { |
2565 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
2566 | u32 csts; |
2567 | |
2568 | if (!pci_is_enabled(pdev) || !pci_device_is_present(pdev)) |
2569 | return true; |
2570 | if (pdev->error_state != pci_channel_io_normal) |
2571 | return true; |
2572 | |
2573 | csts = readl(addr: dev->bar + NVME_REG_CSTS); |
2574 | return (csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY); |
2575 | } |
2576 | |
2577 | static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) |
2578 | { |
2579 | enum nvme_ctrl_state state = nvme_ctrl_state(ctrl: &dev->ctrl); |
2580 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
2581 | bool dead; |
2582 | |
2583 | mutex_lock(&dev->shutdown_lock); |
2584 | dead = nvme_pci_ctrl_is_dead(dev); |
2585 | if (state == NVME_CTRL_LIVE || state == NVME_CTRL_RESETTING) { |
2586 | if (pci_is_enabled(pdev)) |
2587 | nvme_start_freeze(ctrl: &dev->ctrl); |
2588 | /* |
2589 | * Give the controller a chance to complete all entered requests |
2590 | * if doing a safe shutdown. |
2591 | */ |
2592 | if (!dead && shutdown) |
2593 | nvme_wait_freeze_timeout(ctrl: &dev->ctrl, NVME_IO_TIMEOUT); |
2594 | } |
2595 | |
2596 | nvme_quiesce_io_queues(ctrl: &dev->ctrl); |
2597 | |
2598 | if (!dead && dev->ctrl.queue_count > 0) { |
2599 | nvme_delete_io_queues(dev); |
2600 | nvme_disable_ctrl(ctrl: &dev->ctrl, shutdown); |
2601 | nvme_poll_irqdisable(nvmeq: &dev->queues[0]); |
2602 | } |
2603 | nvme_suspend_io_queues(dev); |
2604 | nvme_suspend_queue(dev, qid: 0); |
2605 | pci_free_irq_vectors(dev: pdev); |
2606 | if (pci_is_enabled(pdev)) |
2607 | pci_disable_device(dev: pdev); |
2608 | nvme_reap_pending_cqes(dev); |
2609 | |
2610 | nvme_cancel_tagset(ctrl: &dev->ctrl); |
2611 | nvme_cancel_admin_tagset(ctrl: &dev->ctrl); |
2612 | |
2613 | /* |
2614 | * The driver will not be starting up queues again if shutting down so |
2615 | * must flush all entered requests to their failed completion to avoid |
2616 | * deadlocking blk-mq hot-cpu notifier. |
2617 | */ |
2618 | if (shutdown) { |
2619 | nvme_unquiesce_io_queues(ctrl: &dev->ctrl); |
2620 | if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) |
2621 | nvme_unquiesce_admin_queue(ctrl: &dev->ctrl); |
2622 | } |
2623 | mutex_unlock(lock: &dev->shutdown_lock); |
2624 | } |
2625 | |
2626 | static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown) |
2627 | { |
2628 | if (!nvme_wait_reset(ctrl: &dev->ctrl)) |
2629 | return -EBUSY; |
2630 | nvme_dev_disable(dev, shutdown); |
2631 | return 0; |
2632 | } |
2633 | |
2634 | static int nvme_setup_prp_pools(struct nvme_dev *dev) |
2635 | { |
2636 | dev->prp_page_pool = dma_pool_create(name: "prp list page", dev: dev->dev, |
2637 | NVME_CTRL_PAGE_SIZE, |
2638 | NVME_CTRL_PAGE_SIZE, allocation: 0); |
2639 | if (!dev->prp_page_pool) |
2640 | return -ENOMEM; |
2641 | |
2642 | /* Optimisation for I/Os between 4k and 128k */ |
2643 | dev->prp_small_pool = dma_pool_create(name: "prp list 256", dev: dev->dev, |
2644 | size: 256, align: 256, allocation: 0); |
2645 | if (!dev->prp_small_pool) { |
2646 | dma_pool_destroy(pool: dev->prp_page_pool); |
2647 | return -ENOMEM; |
2648 | } |
2649 | return 0; |
2650 | } |
2651 | |
2652 | static void nvme_release_prp_pools(struct nvme_dev *dev) |
2653 | { |
2654 | dma_pool_destroy(pool: dev->prp_page_pool); |
2655 | dma_pool_destroy(pool: dev->prp_small_pool); |
2656 | } |
2657 | |
2658 | static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev) |
2659 | { |
2660 | size_t alloc_size = sizeof(struct scatterlist) * NVME_MAX_SEGS; |
2661 | |
2662 | dev->iod_mempool = mempool_create_node(min_nr: 1, |
2663 | alloc_fn: mempool_kmalloc, free_fn: mempool_kfree, |
2664 | pool_data: (void *)alloc_size, GFP_KERNEL, |
2665 | nid: dev_to_node(dev: dev->dev)); |
2666 | if (!dev->iod_mempool) |
2667 | return -ENOMEM; |
2668 | return 0; |
2669 | } |
2670 | |
2671 | static void nvme_free_tagset(struct nvme_dev *dev) |
2672 | { |
2673 | if (dev->tagset.tags) |
2674 | nvme_remove_io_tag_set(ctrl: &dev->ctrl); |
2675 | dev->ctrl.tagset = NULL; |
2676 | } |
2677 | |
2678 | /* pairs with nvme_pci_alloc_dev */ |
2679 | static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) |
2680 | { |
2681 | struct nvme_dev *dev = to_nvme_dev(ctrl); |
2682 | |
2683 | nvme_free_tagset(dev); |
2684 | put_device(dev: dev->dev); |
2685 | kfree(objp: dev->queues); |
2686 | kfree(objp: dev); |
2687 | } |
2688 | |
2689 | static void nvme_reset_work(struct work_struct *work) |
2690 | { |
2691 | struct nvme_dev *dev = |
2692 | container_of(work, struct nvme_dev, ctrl.reset_work); |
2693 | bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); |
2694 | int result; |
2695 | |
2696 | if (nvme_ctrl_state(ctrl: &dev->ctrl) != NVME_CTRL_RESETTING) { |
2697 | dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n", |
2698 | dev->ctrl.state); |
2699 | result = -ENODEV; |
2700 | goto out; |
2701 | } |
2702 | |
2703 | /* |
2704 | * If we're called to reset a live controller first shut it down before |
2705 | * moving on. |
2706 | */ |
2707 | if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) |
2708 | nvme_dev_disable(dev, shutdown: false); |
2709 | nvme_sync_queues(ctrl: &dev->ctrl); |
2710 | |
2711 | mutex_lock(&dev->shutdown_lock); |
2712 | result = nvme_pci_enable(dev); |
2713 | if (result) |
2714 | goto out_unlock; |
2715 | nvme_unquiesce_admin_queue(ctrl: &dev->ctrl); |
2716 | mutex_unlock(lock: &dev->shutdown_lock); |
2717 | |
2718 | /* |
2719 | * Introduce CONNECTING state from nvme-fc/rdma transports to mark the |
2720 | * initializing procedure here. |
2721 | */ |
2722 | if (!nvme_change_ctrl_state(ctrl: &dev->ctrl, new_state: NVME_CTRL_CONNECTING)) { |
2723 | dev_warn(dev->ctrl.device, |
2724 | "failed to mark controller CONNECTING\n"); |
2725 | result = -EBUSY; |
2726 | goto out; |
2727 | } |
2728 | |
2729 | result = nvme_init_ctrl_finish(ctrl: &dev->ctrl, was_suspended: was_suspend); |
2730 | if (result) |
2731 | goto out; |
2732 | |
2733 | nvme_dbbuf_dma_alloc(dev); |
2734 | |
2735 | result = nvme_setup_host_mem(dev); |
2736 | if (result < 0) |
2737 | goto out; |
2738 | |
2739 | result = nvme_setup_io_queues(dev); |
2740 | if (result) |
2741 | goto out; |
2742 | |
2743 | /* |
2744 | * Freeze and update the number of I/O queues as thos might have |
2745 | * changed. If there are no I/O queues left after this reset, keep the |
2746 | * controller around but remove all namespaces. |
2747 | */ |
2748 | if (dev->online_queues > 1) { |
2749 | nvme_dbbuf_set(dev); |
2750 | nvme_unquiesce_io_queues(ctrl: &dev->ctrl); |
2751 | nvme_wait_freeze(ctrl: &dev->ctrl); |
2752 | nvme_pci_update_nr_queues(dev); |
2753 | nvme_unfreeze(ctrl: &dev->ctrl); |
2754 | } else { |
2755 | dev_warn(dev->ctrl.device, "IO queues lost\n"); |
2756 | nvme_mark_namespaces_dead(ctrl: &dev->ctrl); |
2757 | nvme_unquiesce_io_queues(ctrl: &dev->ctrl); |
2758 | nvme_remove_namespaces(ctrl: &dev->ctrl); |
2759 | nvme_free_tagset(dev); |
2760 | } |
2761 | |
2762 | /* |
2763 | * If only admin queue live, keep it to do further investigation or |
2764 | * recovery. |
2765 | */ |
2766 | if (!nvme_change_ctrl_state(ctrl: &dev->ctrl, new_state: NVME_CTRL_LIVE)) { |
2767 | dev_warn(dev->ctrl.device, |
2768 | "failed to mark controller live state\n"); |
2769 | result = -ENODEV; |
2770 | goto out; |
2771 | } |
2772 | |
2773 | nvme_start_ctrl(ctrl: &dev->ctrl); |
2774 | return; |
2775 | |
2776 | out_unlock: |
2777 | mutex_unlock(lock: &dev->shutdown_lock); |
2778 | out: |
2779 | /* |
2780 | * Set state to deleting now to avoid blocking nvme_wait_reset(), which |
2781 | * may be holding this pci_dev's device lock. |
2782 | */ |
2783 | dev_warn(dev->ctrl.device, "Disabling device after reset failure: %d\n", |
2784 | result); |
2785 | nvme_change_ctrl_state(ctrl: &dev->ctrl, new_state: NVME_CTRL_DELETING); |
2786 | nvme_dev_disable(dev, shutdown: true); |
2787 | nvme_sync_queues(ctrl: &dev->ctrl); |
2788 | nvme_mark_namespaces_dead(ctrl: &dev->ctrl); |
2789 | nvme_unquiesce_io_queues(ctrl: &dev->ctrl); |
2790 | nvme_change_ctrl_state(ctrl: &dev->ctrl, new_state: NVME_CTRL_DEAD); |
2791 | } |
2792 | |
2793 | static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) |
2794 | { |
2795 | *val = readl(addr: to_nvme_dev(ctrl)->bar + off); |
2796 | return 0; |
2797 | } |
2798 | |
2799 | static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) |
2800 | { |
2801 | writel(val, addr: to_nvme_dev(ctrl)->bar + off); |
2802 | return 0; |
2803 | } |
2804 | |
2805 | static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) |
2806 | { |
2807 | *val = lo_hi_readq(addr: to_nvme_dev(ctrl)->bar + off); |
2808 | return 0; |
2809 | } |
2810 | |
2811 | static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) |
2812 | { |
2813 | struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); |
2814 | |
2815 | return snprintf(buf, size, fmt: "%s\n", dev_name(dev: &pdev->dev)); |
2816 | } |
2817 | |
2818 | static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl) |
2819 | { |
2820 | struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); |
2821 | struct nvme_subsystem *subsys = ctrl->subsys; |
2822 | |
2823 | dev_err(ctrl->device, |
2824 | "VID:DID %04x:%04x model:%.*s firmware:%.*s\n", |
2825 | pdev->vendor, pdev->device, |
2826 | nvme_strlen(subsys->model, sizeof(subsys->model)), |
2827 | subsys->model, nvme_strlen(subsys->firmware_rev, |
2828 | sizeof(subsys->firmware_rev)), |
2829 | subsys->firmware_rev); |
2830 | } |
2831 | |
2832 | static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl) |
2833 | { |
2834 | struct nvme_dev *dev = to_nvme_dev(ctrl); |
2835 | |
2836 | return dma_pci_p2pdma_supported(dev: dev->dev); |
2837 | } |
2838 | |
2839 | static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { |
2840 | .name = "pcie", |
2841 | .module = THIS_MODULE, |
2842 | .flags = NVME_F_METADATA_SUPPORTED, |
2843 | .dev_attr_groups = nvme_pci_dev_attr_groups, |
2844 | .reg_read32 = nvme_pci_reg_read32, |
2845 | .reg_write32 = nvme_pci_reg_write32, |
2846 | .reg_read64 = nvme_pci_reg_read64, |
2847 | .free_ctrl = nvme_pci_free_ctrl, |
2848 | .submit_async_event = nvme_pci_submit_async_event, |
2849 | .get_address = nvme_pci_get_address, |
2850 | .print_device_info = nvme_pci_print_device_info, |
2851 | .supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma, |
2852 | }; |
2853 | |
2854 | static int nvme_dev_map(struct nvme_dev *dev) |
2855 | { |
2856 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
2857 | |
2858 | if (pci_request_mem_regions(pdev, name: "nvme")) |
2859 | return -ENODEV; |
2860 | |
2861 | if (nvme_remap_bar(dev, size: NVME_REG_DBS + 4096)) |
2862 | goto release; |
2863 | |
2864 | return 0; |
2865 | release: |
2866 | pci_release_mem_regions(pdev); |
2867 | return -ENODEV; |
2868 | } |
2869 | |
2870 | static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) |
2871 | { |
2872 | if (pdev->vendor == 0x144d && pdev->device == 0xa802) { |
2873 | /* |
2874 | * Several Samsung devices seem to drop off the PCIe bus |
2875 | * randomly when APST is on and uses the deepest sleep state. |
2876 | * This has been observed on a Samsung "SM951 NVMe SAMSUNG |
2877 | * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD |
2878 | * 950 PRO 256GB", but it seems to be restricted to two Dell |
2879 | * laptops. |
2880 | */ |
2881 | if (dmi_match(f: DMI_SYS_VENDOR, str: "Dell Inc.") && |
2882 | (dmi_match(f: DMI_PRODUCT_NAME, str: "XPS 15 9550") || |
2883 | dmi_match(f: DMI_PRODUCT_NAME, str: "Precision 5510"))) |
2884 | return NVME_QUIRK_NO_DEEPEST_PS; |
2885 | } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { |
2886 | /* |
2887 | * Samsung SSD 960 EVO drops off the PCIe bus after system |
2888 | * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as |
2889 | * within few minutes after bootup on a Coffee Lake board - |
2890 | * ASUS PRIME Z370-A |
2891 | */ |
2892 | if (dmi_match(f: DMI_BOARD_VENDOR, str: "ASUSTeK COMPUTER INC.") && |
2893 | (dmi_match(f: DMI_BOARD_NAME, str: "PRIME B350M-A") || |
2894 | dmi_match(f: DMI_BOARD_NAME, str: "PRIME Z370-A"))) |
2895 | return NVME_QUIRK_NO_APST; |
2896 | } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 || |
2897 | pdev->device == 0xa808 || pdev->device == 0xa809)) || |
2898 | (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) { |
2899 | /* |
2900 | * Forcing to use host managed nvme power settings for |
2901 | * lowest idle power with quick resume latency on |
2902 | * Samsung and Toshiba SSDs based on suspend behavior |
2903 | * on Coffee Lake board for LENOVO C640 |
2904 | */ |
2905 | if ((dmi_match(f: DMI_BOARD_VENDOR, str: "LENOVO")) && |
2906 | dmi_match(f: DMI_BOARD_NAME, str: "LNVNB161216")) |
2907 | return NVME_QUIRK_SIMPLE_SUSPEND; |
2908 | } else if (pdev->vendor == 0x2646 && (pdev->device == 0x2263 || |
2909 | pdev->device == 0x500f)) { |
2910 | /* |
2911 | * Exclude some Kingston NV1 and A2000 devices from |
2912 | * NVME_QUIRK_SIMPLE_SUSPEND. Do a full suspend to save a |
2913 | * lot fo energy with s2idle sleep on some TUXEDO platforms. |
2914 | */ |
2915 | if (dmi_match(f: DMI_BOARD_NAME, str: "NS5X_NS7XAU") || |
2916 | dmi_match(f: DMI_BOARD_NAME, str: "NS5x_7xAU") || |
2917 | dmi_match(f: DMI_BOARD_NAME, str: "NS5x_7xPU") || |
2918 | dmi_match(f: DMI_BOARD_NAME, str: "PH4PRX1_PH6PRX1")) |
2919 | return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND; |
2920 | } |
2921 | |
2922 | return 0; |
2923 | } |
2924 | |
2925 | static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev, |
2926 | const struct pci_device_id *id) |
2927 | { |
2928 | unsigned long quirks = id->driver_data; |
2929 | int node = dev_to_node(dev: &pdev->dev); |
2930 | struct nvme_dev *dev; |
2931 | int ret = -ENOMEM; |
2932 | |
2933 | dev = kzalloc_node(size: sizeof(*dev), GFP_KERNEL, node); |
2934 | if (!dev) |
2935 | return ERR_PTR(error: -ENOMEM); |
2936 | INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); |
2937 | mutex_init(&dev->shutdown_lock); |
2938 | |
2939 | dev->nr_write_queues = write_queues; |
2940 | dev->nr_poll_queues = poll_queues; |
2941 | dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1; |
2942 | dev->queues = kcalloc_node(n: dev->nr_allocated_queues, |
2943 | size: sizeof(struct nvme_queue), GFP_KERNEL, node); |
2944 | if (!dev->queues) |
2945 | goto out_free_dev; |
2946 | |
2947 | dev->dev = get_device(dev: &pdev->dev); |
2948 | |
2949 | quirks |= check_vendor_combination_bug(pdev); |
2950 | if (!noacpi && |
2951 | !(quirks & NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND) && |
2952 | acpi_storage_d3(dev: &pdev->dev)) { |
2953 | /* |
2954 | * Some systems use a bios work around to ask for D3 on |
2955 | * platforms that support kernel managed suspend. |
2956 | */ |
2957 | dev_info(&pdev->dev, |
2958 | "platform quirk: setting simple suspend\n"); |
2959 | quirks |= NVME_QUIRK_SIMPLE_SUSPEND; |
2960 | } |
2961 | ret = nvme_init_ctrl(ctrl: &dev->ctrl, dev: &pdev->dev, ops: &nvme_pci_ctrl_ops, |
2962 | quirks); |
2963 | if (ret) |
2964 | goto out_put_device; |
2965 | |
2966 | if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48) |
2967 | dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(48)); |
2968 | else |
2969 | dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(64)); |
2970 | dma_set_min_align_mask(dev: &pdev->dev, NVME_CTRL_PAGE_SIZE - 1); |
2971 | dma_set_max_seg_size(dev: &pdev->dev, size: 0xffffffff); |
2972 | |
2973 | /* |
2974 | * Limit the max command size to prevent iod->sg allocations going |
2975 | * over a single page. |
2976 | */ |
2977 | dev->ctrl.max_hw_sectors = min_t(u32, |
2978 | NVME_MAX_KB_SZ << 1, dma_opt_mapping_size(&pdev->dev) >> 9); |
2979 | dev->ctrl.max_segments = NVME_MAX_SEGS; |
2980 | |
2981 | /* |
2982 | * There is no support for SGLs for metadata (yet), so we are limited to |
2983 | * a single integrity segment for the separate metadata pointer. |
2984 | */ |
2985 | dev->ctrl.max_integrity_segments = 1; |
2986 | return dev; |
2987 | |
2988 | out_put_device: |
2989 | put_device(dev: dev->dev); |
2990 | kfree(objp: dev->queues); |
2991 | out_free_dev: |
2992 | kfree(objp: dev); |
2993 | return ERR_PTR(error: ret); |
2994 | } |
2995 | |
2996 | static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
2997 | { |
2998 | struct nvme_dev *dev; |
2999 | int result = -ENOMEM; |
3000 | |
3001 | dev = nvme_pci_alloc_dev(pdev, id); |
3002 | if (IS_ERR(ptr: dev)) |
3003 | return PTR_ERR(ptr: dev); |
3004 | |
3005 | result = nvme_dev_map(dev); |
3006 | if (result) |
3007 | goto out_uninit_ctrl; |
3008 | |
3009 | result = nvme_setup_prp_pools(dev); |
3010 | if (result) |
3011 | goto out_dev_unmap; |
3012 | |
3013 | result = nvme_pci_alloc_iod_mempool(dev); |
3014 | if (result) |
3015 | goto out_release_prp_pools; |
3016 | |
3017 | dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); |
3018 | |
3019 | result = nvme_pci_enable(dev); |
3020 | if (result) |
3021 | goto out_release_iod_mempool; |
3022 | |
3023 | result = nvme_alloc_admin_tag_set(ctrl: &dev->ctrl, set: &dev->admin_tagset, |
3024 | ops: &nvme_mq_admin_ops, cmd_size: sizeof(struct nvme_iod)); |
3025 | if (result) |
3026 | goto out_disable; |
3027 | |
3028 | /* |
3029 | * Mark the controller as connecting before sending admin commands to |
3030 | * allow the timeout handler to do the right thing. |
3031 | */ |
3032 | if (!nvme_change_ctrl_state(ctrl: &dev->ctrl, new_state: NVME_CTRL_CONNECTING)) { |
3033 | dev_warn(dev->ctrl.device, |
3034 | "failed to mark controller CONNECTING\n"); |
3035 | result = -EBUSY; |
3036 | goto out_disable; |
3037 | } |
3038 | |
3039 | result = nvme_init_ctrl_finish(ctrl: &dev->ctrl, was_suspended: false); |
3040 | if (result) |
3041 | goto out_disable; |
3042 | |
3043 | nvme_dbbuf_dma_alloc(dev); |
3044 | |
3045 | result = nvme_setup_host_mem(dev); |
3046 | if (result < 0) |
3047 | goto out_disable; |
3048 | |
3049 | result = nvme_setup_io_queues(dev); |
3050 | if (result) |
3051 | goto out_disable; |
3052 | |
3053 | if (dev->online_queues > 1) { |
3054 | nvme_alloc_io_tag_set(ctrl: &dev->ctrl, set: &dev->tagset, ops: &nvme_mq_ops, |
3055 | nr_maps: nvme_pci_nr_maps(dev), cmd_size: sizeof(struct nvme_iod)); |
3056 | nvme_dbbuf_set(dev); |
3057 | } |
3058 | |
3059 | if (!dev->ctrl.tagset) |
3060 | dev_warn(dev->ctrl.device, "IO queues not created\n"); |
3061 | |
3062 | if (!nvme_change_ctrl_state(ctrl: &dev->ctrl, new_state: NVME_CTRL_LIVE)) { |
3063 | dev_warn(dev->ctrl.device, |
3064 | "failed to mark controller live state\n"); |
3065 | result = -ENODEV; |
3066 | goto out_disable; |
3067 | } |
3068 | |
3069 | pci_set_drvdata(pdev, data: dev); |
3070 | |
3071 | nvme_start_ctrl(ctrl: &dev->ctrl); |
3072 | nvme_put_ctrl(ctrl: &dev->ctrl); |
3073 | flush_work(work: &dev->ctrl.scan_work); |
3074 | return 0; |
3075 | |
3076 | out_disable: |
3077 | nvme_change_ctrl_state(ctrl: &dev->ctrl, new_state: NVME_CTRL_DELETING); |
3078 | nvme_dev_disable(dev, shutdown: true); |
3079 | nvme_free_host_mem(dev); |
3080 | nvme_dev_remove_admin(dev); |
3081 | nvme_dbbuf_dma_free(dev); |
3082 | nvme_free_queues(dev, lowest: 0); |
3083 | out_release_iod_mempool: |
3084 | mempool_destroy(pool: dev->iod_mempool); |
3085 | out_release_prp_pools: |
3086 | nvme_release_prp_pools(dev); |
3087 | out_dev_unmap: |
3088 | nvme_dev_unmap(dev); |
3089 | out_uninit_ctrl: |
3090 | nvme_uninit_ctrl(ctrl: &dev->ctrl); |
3091 | nvme_put_ctrl(ctrl: &dev->ctrl); |
3092 | return result; |
3093 | } |
3094 | |
3095 | static void nvme_reset_prepare(struct pci_dev *pdev) |
3096 | { |
3097 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
3098 | |
3099 | /* |
3100 | * We don't need to check the return value from waiting for the reset |
3101 | * state as pci_dev device lock is held, making it impossible to race |
3102 | * with ->remove(). |
3103 | */ |
3104 | nvme_disable_prepare_reset(dev, shutdown: false); |
3105 | nvme_sync_queues(ctrl: &dev->ctrl); |
3106 | } |
3107 | |
3108 | static void nvme_reset_done(struct pci_dev *pdev) |
3109 | { |
3110 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
3111 | |
3112 | if (!nvme_try_sched_reset(ctrl: &dev->ctrl)) |
3113 | flush_work(work: &dev->ctrl.reset_work); |
3114 | } |
3115 | |
3116 | static void nvme_shutdown(struct pci_dev *pdev) |
3117 | { |
3118 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
3119 | |
3120 | nvme_disable_prepare_reset(dev, shutdown: true); |
3121 | } |
3122 | |
3123 | /* |
3124 | * The driver's remove may be called on a device in a partially initialized |
3125 | * state. This function must not have any dependencies on the device state in |
3126 | * order to proceed. |
3127 | */ |
3128 | static void nvme_remove(struct pci_dev *pdev) |
3129 | { |
3130 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
3131 | |
3132 | nvme_change_ctrl_state(ctrl: &dev->ctrl, new_state: NVME_CTRL_DELETING); |
3133 | pci_set_drvdata(pdev, NULL); |
3134 | |
3135 | if (!pci_device_is_present(pdev)) { |
3136 | nvme_change_ctrl_state(ctrl: &dev->ctrl, new_state: NVME_CTRL_DEAD); |
3137 | nvme_dev_disable(dev, shutdown: true); |
3138 | } |
3139 | |
3140 | flush_work(work: &dev->ctrl.reset_work); |
3141 | nvme_stop_ctrl(ctrl: &dev->ctrl); |
3142 | nvme_remove_namespaces(ctrl: &dev->ctrl); |
3143 | nvme_dev_disable(dev, shutdown: true); |
3144 | nvme_free_host_mem(dev); |
3145 | nvme_dev_remove_admin(dev); |
3146 | nvme_dbbuf_dma_free(dev); |
3147 | nvme_free_queues(dev, lowest: 0); |
3148 | mempool_destroy(pool: dev->iod_mempool); |
3149 | nvme_release_prp_pools(dev); |
3150 | nvme_dev_unmap(dev); |
3151 | nvme_uninit_ctrl(ctrl: &dev->ctrl); |
3152 | } |
3153 | |
3154 | #ifdef CONFIG_PM_SLEEP |
3155 | static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps) |
3156 | { |
3157 | return nvme_get_features(dev: ctrl, fid: NVME_FEAT_POWER_MGMT, dword11: 0, NULL, buflen: 0, result: ps); |
3158 | } |
3159 | |
3160 | static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps) |
3161 | { |
3162 | return nvme_set_features(dev: ctrl, fid: NVME_FEAT_POWER_MGMT, dword11: ps, NULL, buflen: 0, NULL); |
3163 | } |
3164 | |
3165 | static int nvme_resume(struct device *dev) |
3166 | { |
3167 | struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); |
3168 | struct nvme_ctrl *ctrl = &ndev->ctrl; |
3169 | |
3170 | if (ndev->last_ps == U32_MAX || |
3171 | nvme_set_power_state(ctrl, ps: ndev->last_ps) != 0) |
3172 | goto reset; |
3173 | if (ctrl->hmpre && nvme_setup_host_mem(dev: ndev)) |
3174 | goto reset; |
3175 | |
3176 | return 0; |
3177 | reset: |
3178 | return nvme_try_sched_reset(ctrl); |
3179 | } |
3180 | |
3181 | static int nvme_suspend(struct device *dev) |
3182 | { |
3183 | struct pci_dev *pdev = to_pci_dev(dev); |
3184 | struct nvme_dev *ndev = pci_get_drvdata(pdev); |
3185 | struct nvme_ctrl *ctrl = &ndev->ctrl; |
3186 | int ret = -EBUSY; |
3187 | |
3188 | ndev->last_ps = U32_MAX; |
3189 | |
3190 | /* |
3191 | * The platform does not remove power for a kernel managed suspend so |
3192 | * use host managed nvme power settings for lowest idle power if |
3193 | * possible. This should have quicker resume latency than a full device |
3194 | * shutdown. But if the firmware is involved after the suspend or the |
3195 | * device does not support any non-default power states, shut down the |
3196 | * device fully. |
3197 | * |
3198 | * If ASPM is not enabled for the device, shut down the device and allow |
3199 | * the PCI bus layer to put it into D3 in order to take the PCIe link |
3200 | * down, so as to allow the platform to achieve its minimum low-power |
3201 | * state (which may not be possible if the link is up). |
3202 | */ |
3203 | if (pm_suspend_via_firmware() || !ctrl->npss || |
3204 | !pcie_aspm_enabled(pdev) || |
3205 | (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) |
3206 | return nvme_disable_prepare_reset(dev: ndev, shutdown: true); |
3207 | |
3208 | nvme_start_freeze(ctrl); |
3209 | nvme_wait_freeze(ctrl); |
3210 | nvme_sync_queues(ctrl); |
3211 | |
3212 | if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) |
3213 | goto unfreeze; |
3214 | |
3215 | /* |
3216 | * Host memory access may not be successful in a system suspend state, |
3217 | * but the specification allows the controller to access memory in a |
3218 | * non-operational power state. |
3219 | */ |
3220 | if (ndev->hmb) { |
3221 | ret = nvme_set_host_mem(dev: ndev, bits: 0); |
3222 | if (ret < 0) |
3223 | goto unfreeze; |
3224 | } |
3225 | |
3226 | ret = nvme_get_power_state(ctrl, ps: &ndev->last_ps); |
3227 | if (ret < 0) |
3228 | goto unfreeze; |
3229 | |
3230 | /* |
3231 | * A saved state prevents pci pm from generically controlling the |
3232 | * device's power. If we're using protocol specific settings, we don't |
3233 | * want pci interfering. |
3234 | */ |
3235 | pci_save_state(dev: pdev); |
3236 | |
3237 | ret = nvme_set_power_state(ctrl, ps: ctrl->npss); |
3238 | if (ret < 0) |
3239 | goto unfreeze; |
3240 | |
3241 | if (ret) { |
3242 | /* discard the saved state */ |
3243 | pci_load_saved_state(dev: pdev, NULL); |
3244 | |
3245 | /* |
3246 | * Clearing npss forces a controller reset on resume. The |
3247 | * correct value will be rediscovered then. |
3248 | */ |
3249 | ret = nvme_disable_prepare_reset(dev: ndev, shutdown: true); |
3250 | ctrl->npss = 0; |
3251 | } |
3252 | unfreeze: |
3253 | nvme_unfreeze(ctrl); |
3254 | return ret; |
3255 | } |
3256 | |
3257 | static int nvme_simple_suspend(struct device *dev) |
3258 | { |
3259 | struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); |
3260 | |
3261 | return nvme_disable_prepare_reset(dev: ndev, shutdown: true); |
3262 | } |
3263 | |
3264 | static int nvme_simple_resume(struct device *dev) |
3265 | { |
3266 | struct pci_dev *pdev = to_pci_dev(dev); |
3267 | struct nvme_dev *ndev = pci_get_drvdata(pdev); |
3268 | |
3269 | return nvme_try_sched_reset(ctrl: &ndev->ctrl); |
3270 | } |
3271 | |
3272 | static const struct dev_pm_ops nvme_dev_pm_ops = { |
3273 | .suspend = nvme_suspend, |
3274 | .resume = nvme_resume, |
3275 | .freeze = nvme_simple_suspend, |
3276 | .thaw = nvme_simple_resume, |
3277 | .poweroff = nvme_simple_suspend, |
3278 | .restore = nvme_simple_resume, |
3279 | }; |
3280 | #endif /* CONFIG_PM_SLEEP */ |
3281 | |
3282 | static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, |
3283 | pci_channel_state_t state) |
3284 | { |
3285 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
3286 | |
3287 | /* |
3288 | * A frozen channel requires a reset. When detected, this method will |
3289 | * shutdown the controller to quiesce. The controller will be restarted |
3290 | * after the slot reset through driver's slot_reset callback. |
3291 | */ |
3292 | switch (state) { |
3293 | case pci_channel_io_normal: |
3294 | return PCI_ERS_RESULT_CAN_RECOVER; |
3295 | case pci_channel_io_frozen: |
3296 | dev_warn(dev->ctrl.device, |
3297 | "frozen state error detected, reset controller\n"); |
3298 | if (!nvme_change_ctrl_state(ctrl: &dev->ctrl, new_state: NVME_CTRL_RESETTING)) { |
3299 | nvme_dev_disable(dev, shutdown: true); |
3300 | return PCI_ERS_RESULT_DISCONNECT; |
3301 | } |
3302 | nvme_dev_disable(dev, shutdown: false); |
3303 | return PCI_ERS_RESULT_NEED_RESET; |
3304 | case pci_channel_io_perm_failure: |
3305 | dev_warn(dev->ctrl.device, |
3306 | "failure state error detected, request disconnect\n"); |
3307 | return PCI_ERS_RESULT_DISCONNECT; |
3308 | } |
3309 | return PCI_ERS_RESULT_NEED_RESET; |
3310 | } |
3311 | |
3312 | static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) |
3313 | { |
3314 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
3315 | |
3316 | dev_info(dev->ctrl.device, "restart after slot reset\n"); |
3317 | pci_restore_state(dev: pdev); |
3318 | if (!nvme_try_sched_reset(ctrl: &dev->ctrl)) |
3319 | nvme_unquiesce_io_queues(ctrl: &dev->ctrl); |
3320 | return PCI_ERS_RESULT_RECOVERED; |
3321 | } |
3322 | |
3323 | static void nvme_error_resume(struct pci_dev *pdev) |
3324 | { |
3325 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
3326 | |
3327 | flush_work(work: &dev->ctrl.reset_work); |
3328 | } |
3329 | |
3330 | static const struct pci_error_handlers nvme_err_handler = { |
3331 | .error_detected = nvme_error_detected, |
3332 | .slot_reset = nvme_slot_reset, |
3333 | .resume = nvme_error_resume, |
3334 | .reset_prepare = nvme_reset_prepare, |
3335 | .reset_done = nvme_reset_done, |
3336 | }; |
3337 | |
3338 | static const struct pci_device_id nvme_id_table[] = { |
3339 | { PCI_VDEVICE(INTEL, 0x0953), /* Intel 750/P3500/P3600/P3700 */ |
3340 | .driver_data = NVME_QUIRK_STRIPE_SIZE | |
3341 | NVME_QUIRK_DEALLOCATE_ZEROES, }, |
3342 | { PCI_VDEVICE(INTEL, 0x0a53), /* Intel P3520 */ |
3343 | .driver_data = NVME_QUIRK_STRIPE_SIZE | |
3344 | NVME_QUIRK_DEALLOCATE_ZEROES, }, |
3345 | { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */ |
3346 | .driver_data = NVME_QUIRK_STRIPE_SIZE | |
3347 | NVME_QUIRK_DEALLOCATE_ZEROES | |
3348 | NVME_QUIRK_IGNORE_DEV_SUBNQN | |
3349 | NVME_QUIRK_BOGUS_NID, }, |
3350 | { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */ |
3351 | .driver_data = NVME_QUIRK_STRIPE_SIZE | |
3352 | NVME_QUIRK_DEALLOCATE_ZEROES, }, |
3353 | { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ |
3354 | .driver_data = NVME_QUIRK_NO_DEEPEST_PS | |
3355 | NVME_QUIRK_MEDIUM_PRIO_SQ | |
3356 | NVME_QUIRK_NO_TEMP_THRESH_CHANGE | |
3357 | NVME_QUIRK_DISABLE_WRITE_ZEROES, }, |
3358 | { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */ |
3359 | .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, |
3360 | { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ |
3361 | .driver_data = NVME_QUIRK_IDENTIFY_CNS | |
3362 | NVME_QUIRK_DISABLE_WRITE_ZEROES | |
3363 | NVME_QUIRK_BOGUS_NID, }, |
3364 | { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */ |
3365 | .driver_data = NVME_QUIRK_BOGUS_NID, }, |
3366 | { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */ |
3367 | .driver_data = NVME_QUIRK_NO_DEEPEST_PS | |
3368 | NVME_QUIRK_BOGUS_NID, }, |
3369 | { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ |
3370 | .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | |
3371 | NVME_QUIRK_BOGUS_NID, }, |
3372 | { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ |
3373 | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | |
3374 | NVME_QUIRK_NO_NS_DESC_LIST, }, |
3375 | { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ |
3376 | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, |
3377 | { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ |
3378 | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, |
3379 | { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ |
3380 | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, |
3381 | { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ |
3382 | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, |
3383 | { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ |
3384 | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | |
3385 | NVME_QUIRK_DISABLE_WRITE_ZEROES| |
3386 | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, |
3387 | { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */ |
3388 | .driver_data = NVME_QUIRK_BOGUS_NID, }, |
3389 | { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ |
3390 | .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | |
3391 | NVME_QUIRK_BOGUS_NID, }, |
3392 | { PCI_DEVICE(0x1987, 0x5019), /* phison E19 */ |
3393 | .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, |
3394 | { PCI_DEVICE(0x1987, 0x5021), /* Phison E21 */ |
3395 | .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, |
3396 | { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */ |
3397 | .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | |
3398 | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, |
3399 | { PCI_DEVICE(0x1cc1, 0x33f8), /* ADATA IM2P33F8ABR1 1 TB */ |
3400 | .driver_data = NVME_QUIRK_BOGUS_NID, }, |
3401 | { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */ |
3402 | .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | |
3403 | NVME_QUIRK_BOGUS_NID, }, |
3404 | { PCI_DEVICE(0x10ec, 0x5763), /* ADATA SX6000PNP */ |
3405 | .driver_data = NVME_QUIRK_BOGUS_NID, }, |
3406 | { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ |
3407 | .driver_data = NVME_QUIRK_NO_DEEPEST_PS | |
3408 | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, |
3409 | { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */ |
3410 | .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN }, |
3411 | { PCI_DEVICE(0x1344, 0x6001), /* Micron Nitro NVMe */ |
3412 | .driver_data = NVME_QUIRK_BOGUS_NID, }, |
3413 | { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ |
3414 | .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, |
3415 | { PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */ |
3416 | .driver_data = NVME_QUIRK_BOGUS_NID, }, |
3417 | { PCI_DEVICE(0x1c5c, 0x1D59), /* SK Hynix BC901 */ |
3418 | .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, |
3419 | { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ |
3420 | .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, |
3421 | { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */ |
3422 | .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, |
3423 | { PCI_DEVICE(0x144d, 0xa80b), /* Samsung PM9B1 256G and 512G */ |
3424 | .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES | |
3425 | NVME_QUIRK_BOGUS_NID, }, |
3426 | { PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */ |
3427 | .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, |
3428 | { PCI_DEVICE(0x144d, 0xa802), /* Samsung SM953 */ |
3429 | .driver_data = NVME_QUIRK_BOGUS_NID, }, |
3430 | { PCI_DEVICE(0x1cc4, 0x6303), /* UMIS RPJTJ512MGE1QDY 512G */ |
3431 | .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, |
3432 | { PCI_DEVICE(0x1cc4, 0x6302), /* UMIS RPJTJ256MGE1QDY 256G */ |
3433 | .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, |
3434 | { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */ |
3435 | .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, |
3436 | { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ |
3437 | .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, |
3438 | { PCI_DEVICE(0x2646, 0x5013), /* Kingston KC3000, Kingston FURY Renegade */ |
3439 | .driver_data = NVME_QUIRK_NO_SECONDARY_TEMP_THRESH, }, |
3440 | { PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */ |
3441 | .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, |
3442 | { PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */ |
3443 | .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, |
3444 | { PCI_DEVICE(0x2646, 0x501A), /* KINGSTON OM8PGP4xxxxP OS21005 NVMe SSD */ |
3445 | .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, |
3446 | { PCI_DEVICE(0x2646, 0x501B), /* KINGSTON OM8PGP4xxxxQ OS21005 NVMe SSD */ |
3447 | .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, |
3448 | { PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */ |
3449 | .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, |
3450 | { PCI_DEVICE(0x1f40, 0x1202), /* Netac Technologies Co. NV3000 NVMe SSD */ |
3451 | .driver_data = NVME_QUIRK_BOGUS_NID, }, |
3452 | { PCI_DEVICE(0x1f40, 0x5236), /* Netac Technologies Co. NV7000 NVMe SSD */ |
3453 | .driver_data = NVME_QUIRK_BOGUS_NID, }, |
3454 | { PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */ |
3455 | .driver_data = NVME_QUIRK_BOGUS_NID, }, |
3456 | { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */ |
3457 | .driver_data = NVME_QUIRK_BOGUS_NID, }, |
3458 | { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */ |
3459 | .driver_data = NVME_QUIRK_BOGUS_NID, }, |
3460 | { PCI_DEVICE(0x1e4B, 0x1602), /* MAXIO MAP1602 */ |
3461 | .driver_data = NVME_QUIRK_BOGUS_NID, }, |
3462 | { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */ |
3463 | .driver_data = NVME_QUIRK_BOGUS_NID, }, |
3464 | { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */ |
3465 | .driver_data = NVME_QUIRK_BOGUS_NID, }, |
3466 | { PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */ |
3467 | .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, |
3468 | { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */ |
3469 | .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, |
3470 | { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */ |
3471 | .driver_data = NVME_QUIRK_BOGUS_NID, }, |
3472 | { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */ |
3473 | .driver_data = NVME_QUIRK_BOGUS_NID, }, |
3474 | { PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */ |
3475 | .driver_data = NVME_QUIRK_BOGUS_NID, }, |
3476 | { PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */ |
3477 | .driver_data = NVME_QUIRK_BOGUS_NID | |
3478 | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, |
3479 | { PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */ |
3480 | .driver_data = NVME_QUIRK_BOGUS_NID, }, |
3481 | { PCI_DEVICE(0x1e4b, 0x1602), /* HS-SSD-FUTURE 2048G */ |
3482 | .driver_data = NVME_QUIRK_BOGUS_NID, }, |
3483 | { PCI_DEVICE(0x10ec, 0x5765), /* TEAMGROUP MP33 2TB SSD */ |
3484 | .driver_data = NVME_QUIRK_BOGUS_NID, }, |
3485 | { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), |
3486 | .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, |
3487 | { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), |
3488 | .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, |
3489 | { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061), |
3490 | .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, |
3491 | { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00), |
3492 | .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, |
3493 | { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01), |
3494 | .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, |
3495 | { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02), |
3496 | .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, |
3497 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), |
3498 | .driver_data = NVME_QUIRK_SINGLE_VECTOR }, |
3499 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, |
3500 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), |
3501 | .driver_data = NVME_QUIRK_SINGLE_VECTOR | |
3502 | NVME_QUIRK_128_BYTES_SQES | |
3503 | NVME_QUIRK_SHARED_TAGS | |
3504 | NVME_QUIRK_SKIP_CID_GEN | |
3505 | NVME_QUIRK_IDENTIFY_CNS }, |
3506 | { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, |
3507 | { 0, } |
3508 | }; |
3509 | MODULE_DEVICE_TABLE(pci, nvme_id_table); |
3510 | |
3511 | static struct pci_driver nvme_driver = { |
3512 | .name = "nvme", |
3513 | .id_table = nvme_id_table, |
3514 | .probe = nvme_probe, |
3515 | .remove = nvme_remove, |
3516 | .shutdown = nvme_shutdown, |
3517 | .driver = { |
3518 | .probe_type = PROBE_PREFER_ASYNCHRONOUS, |
3519 | #ifdef CONFIG_PM_SLEEP |
3520 | .pm = &nvme_dev_pm_ops, |
3521 | #endif |
3522 | }, |
3523 | .sriov_configure = pci_sriov_configure_simple, |
3524 | .err_handler = &nvme_err_handler, |
3525 | }; |
3526 | |
3527 | static int __init nvme_init(void) |
3528 | { |
3529 | BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); |
3530 | BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); |
3531 | BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); |
3532 | BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); |
3533 | BUILD_BUG_ON(NVME_MAX_SEGS > SGES_PER_PAGE); |
3534 | BUILD_BUG_ON(sizeof(struct scatterlist) * NVME_MAX_SEGS > PAGE_SIZE); |
3535 | BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_ALLOCATIONS); |
3536 | |
3537 | return pci_register_driver(&nvme_driver); |
3538 | } |
3539 | |
3540 | static void __exit nvme_exit(void) |
3541 | { |
3542 | pci_unregister_driver(dev: &nvme_driver); |
3543 | flush_workqueue(nvme_wq); |
3544 | } |
3545 | |
3546 | MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); |
3547 | MODULE_LICENSE("GPL"); |
3548 | MODULE_VERSION("1.0"); |
3549 | MODULE_DESCRIPTION("NVMe host PCIe transport driver"); |
3550 | module_init(nvme_init); |
3551 | module_exit(nvme_exit); |
3552 |
Definitions
- use_threaded_interrupts
- use_cmb_sqes
- max_host_mem_size_mb
- sgl_threshold
- io_queue_depth_ops
- io_queue_depth
- io_queue_count_set
- io_queue_count_ops
- write_queues
- poll_queues
- noacpi
- nvme_dev
- io_queue_depth_set
- sq_idx
- cq_idx
- to_nvme_dev
- nvme_queue
- nvme_descriptor
- nvme_iod
- nvme_dbbuf_size
- nvme_dbbuf_dma_alloc
- nvme_dbbuf_dma_free
- nvme_dbbuf_init
- nvme_dbbuf_free
- nvme_dbbuf_set
- nvme_dbbuf_need_event
- nvme_dbbuf_update_and_check_event
- nvme_pci_npages_prp
- nvme_admin_init_hctx
- nvme_init_hctx
- nvme_pci_init_request
- queue_irq_offset
- nvme_pci_map_queues
- nvme_write_sq_db
- nvme_sq_copy_cmd
- nvme_commit_rqs
- nvme_pci_use_sgls
- nvme_free_prps
- nvme_unmap_data
- nvme_print_sgl
- nvme_pci_setup_prps
- nvme_pci_sgl_set_data
- nvme_pci_sgl_set_seg
- nvme_pci_setup_sgls
- nvme_setup_prp_simple
- nvme_setup_sgl_simple
- nvme_map_data
- nvme_map_metadata
- nvme_prep_rq
- nvme_queue_rq
- nvme_submit_cmds
- nvme_prep_rq_batch
- nvme_queue_rqs
- nvme_pci_unmap_rq
- nvme_pci_complete_rq
- nvme_pci_complete_batch
- nvme_cqe_pending
- nvme_ring_cq_doorbell
- nvme_queue_tagset
- nvme_handle_cqe
- nvme_update_cq_head
- nvme_poll_cq
- nvme_irq
- nvme_irq_check
- nvme_poll_irqdisable
- nvme_poll
- nvme_pci_submit_async_event
- adapter_delete_queue
- adapter_alloc_cq
- adapter_alloc_sq
- adapter_delete_cq
- adapter_delete_sq
- abort_endio
- nvme_should_reset
- nvme_warn_reset
- nvme_timeout
- nvme_free_queue
- nvme_free_queues
- nvme_suspend_queue
- nvme_suspend_io_queues
- nvme_reap_pending_cqes
- nvme_cmb_qdepth
- nvme_alloc_sq_cmds
- nvme_alloc_queue
- queue_request_irq
- nvme_init_queue
- nvme_setup_io_queues_trylock
- nvme_create_queue
- nvme_mq_admin_ops
- nvme_mq_ops
- nvme_dev_remove_admin
- db_bar_size
- nvme_remap_bar
- nvme_pci_configure_admin_queue
- nvme_create_io_queues
- nvme_cmb_size_unit
- nvme_cmb_size
- nvme_map_cmb
- nvme_set_host_mem
- nvme_free_host_mem
- __nvme_alloc_host_mem
- nvme_alloc_host_mem
- nvme_setup_host_mem
- cmb_show
- cmbloc_show
- cmbsz_show
- hmb_show
- hmb_store
- nvme_pci_attrs_are_visible
- nvme_pci_attrs
- nvme_pci_dev_attrs_group
- nvme_pci_dev_attr_groups
- nvme_update_attrs
- nvme_calc_irq_sets
- nvme_setup_irqs
- nvme_max_io_queues
- nvme_setup_io_queues
- nvme_del_queue_end
- nvme_del_cq_end
- nvme_delete_queue
- __nvme_delete_io_queues
- nvme_delete_io_queues
- nvme_pci_nr_maps
- nvme_pci_update_nr_queues
- nvme_pci_enable
- nvme_dev_unmap
- nvme_pci_ctrl_is_dead
- nvme_dev_disable
- nvme_disable_prepare_reset
- nvme_setup_prp_pools
- nvme_release_prp_pools
- nvme_pci_alloc_iod_mempool
- nvme_free_tagset
- nvme_pci_free_ctrl
- nvme_reset_work
- nvme_pci_reg_read32
- nvme_pci_reg_write32
- nvme_pci_reg_read64
- nvme_pci_get_address
- nvme_pci_print_device_info
- nvme_pci_supports_pci_p2pdma
- nvme_pci_ctrl_ops
- nvme_dev_map
- check_vendor_combination_bug
- nvme_pci_alloc_dev
- nvme_probe
- nvme_reset_prepare
- nvme_reset_done
- nvme_shutdown
- nvme_remove
- nvme_get_power_state
- nvme_set_power_state
- nvme_resume
- nvme_suspend
- nvme_simple_suspend
- nvme_simple_resume
- nvme_dev_pm_ops
- nvme_error_detected
- nvme_slot_reset
- nvme_error_resume
- nvme_err_handler
- nvme_id_table
- nvme_driver
- nvme_init
Improve your Profiling and Debugging skills
Find out more