1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Virtio SCSI HBA driver |
4 | * |
5 | * Copyright IBM Corp. 2010 |
6 | * Copyright Red Hat, Inc. 2011 |
7 | * |
8 | * Authors: |
9 | * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> |
10 | * Paolo Bonzini <pbonzini@redhat.com> |
11 | */ |
12 | |
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
14 | |
15 | #include <linux/module.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/mempool.h> |
18 | #include <linux/interrupt.h> |
19 | #include <linux/virtio.h> |
20 | #include <linux/virtio_ids.h> |
21 | #include <linux/virtio_config.h> |
22 | #include <linux/virtio_scsi.h> |
23 | #include <linux/cpu.h> |
24 | #include <linux/blkdev.h> |
25 | #include <linux/blk-integrity.h> |
26 | #include <scsi/scsi_host.h> |
27 | #include <scsi/scsi_device.h> |
28 | #include <scsi/scsi_cmnd.h> |
29 | #include <scsi/scsi_tcq.h> |
30 | #include <scsi/scsi_devinfo.h> |
31 | #include <linux/seqlock.h> |
32 | #include <linux/blk-mq-virtio.h> |
33 | |
34 | #include "sd.h" |
35 | |
36 | #define VIRTIO_SCSI_MEMPOOL_SZ 64 |
37 | #define VIRTIO_SCSI_EVENT_LEN 8 |
38 | #define VIRTIO_SCSI_VQ_BASE 2 |
39 | |
40 | static unsigned int virtscsi_poll_queues; |
41 | module_param(virtscsi_poll_queues, uint, 0644); |
42 | MODULE_PARM_DESC(virtscsi_poll_queues, |
43 | "The number of dedicated virtqueues for polling I/O" ); |
44 | |
45 | /* Command queue element */ |
46 | struct virtio_scsi_cmd { |
47 | struct scsi_cmnd *sc; |
48 | struct completion *comp; |
49 | union { |
50 | struct virtio_scsi_cmd_req cmd; |
51 | struct virtio_scsi_cmd_req_pi cmd_pi; |
52 | struct virtio_scsi_ctrl_tmf_req tmf; |
53 | struct virtio_scsi_ctrl_an_req an; |
54 | } req; |
55 | union { |
56 | struct virtio_scsi_cmd_resp cmd; |
57 | struct virtio_scsi_ctrl_tmf_resp tmf; |
58 | struct virtio_scsi_ctrl_an_resp an; |
59 | struct virtio_scsi_event evt; |
60 | } resp; |
61 | } ____cacheline_aligned_in_smp; |
62 | |
63 | struct virtio_scsi_event_node { |
64 | struct virtio_scsi *vscsi; |
65 | struct virtio_scsi_event event; |
66 | struct work_struct work; |
67 | }; |
68 | |
69 | struct virtio_scsi_vq { |
70 | /* Protects vq */ |
71 | spinlock_t vq_lock; |
72 | |
73 | struct virtqueue *vq; |
74 | }; |
75 | |
76 | /* Driver instance state */ |
77 | struct virtio_scsi { |
78 | struct virtio_device *vdev; |
79 | |
80 | /* Get some buffers ready for event vq */ |
81 | struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN]; |
82 | |
83 | u32 num_queues; |
84 | int io_queues[HCTX_MAX_TYPES]; |
85 | |
86 | struct hlist_node node; |
87 | |
88 | /* Protected by event_vq lock */ |
89 | bool stop_events; |
90 | |
91 | struct virtio_scsi_vq ctrl_vq; |
92 | struct virtio_scsi_vq event_vq; |
93 | struct virtio_scsi_vq req_vqs[]; |
94 | }; |
95 | |
96 | static struct kmem_cache *virtscsi_cmd_cache; |
97 | static mempool_t *virtscsi_cmd_pool; |
98 | |
99 | static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev) |
100 | { |
101 | return vdev->priv; |
102 | } |
103 | |
104 | static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid) |
105 | { |
106 | if (resid) |
107 | scsi_set_resid(cmd: sc, min(resid, scsi_bufflen(sc))); |
108 | } |
109 | |
110 | /* |
111 | * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done |
112 | * |
113 | * Called with vq_lock held. |
114 | */ |
115 | static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) |
116 | { |
117 | struct virtio_scsi_cmd *cmd = buf; |
118 | struct scsi_cmnd *sc = cmd->sc; |
119 | struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd; |
120 | |
121 | dev_dbg(&sc->device->sdev_gendev, |
122 | "cmd %p response %u status %#02x sense_len %u\n" , |
123 | sc, resp->response, resp->status, resp->sense_len); |
124 | |
125 | sc->result = resp->status; |
126 | virtscsi_compute_resid(sc, resid: virtio32_to_cpu(vdev: vscsi->vdev, val: resp->resid)); |
127 | switch (resp->response) { |
128 | case VIRTIO_SCSI_S_OK: |
129 | set_host_byte(cmd: sc, status: DID_OK); |
130 | break; |
131 | case VIRTIO_SCSI_S_OVERRUN: |
132 | set_host_byte(cmd: sc, status: DID_ERROR); |
133 | break; |
134 | case VIRTIO_SCSI_S_ABORTED: |
135 | set_host_byte(cmd: sc, status: DID_ABORT); |
136 | break; |
137 | case VIRTIO_SCSI_S_BAD_TARGET: |
138 | set_host_byte(cmd: sc, status: DID_BAD_TARGET); |
139 | break; |
140 | case VIRTIO_SCSI_S_RESET: |
141 | set_host_byte(cmd: sc, status: DID_RESET); |
142 | break; |
143 | case VIRTIO_SCSI_S_BUSY: |
144 | set_host_byte(cmd: sc, status: DID_BUS_BUSY); |
145 | break; |
146 | case VIRTIO_SCSI_S_TRANSPORT_FAILURE: |
147 | set_host_byte(cmd: sc, status: DID_TRANSPORT_DISRUPTED); |
148 | break; |
149 | case VIRTIO_SCSI_S_TARGET_FAILURE: |
150 | set_host_byte(cmd: sc, status: DID_BAD_TARGET); |
151 | break; |
152 | case VIRTIO_SCSI_S_NEXUS_FAILURE: |
153 | set_status_byte(cmd: sc, status: SAM_STAT_RESERVATION_CONFLICT); |
154 | break; |
155 | default: |
156 | scmd_printk(KERN_WARNING, sc, "Unknown response %d" , |
157 | resp->response); |
158 | fallthrough; |
159 | case VIRTIO_SCSI_S_FAILURE: |
160 | set_host_byte(cmd: sc, status: DID_ERROR); |
161 | break; |
162 | } |
163 | |
164 | WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) > |
165 | VIRTIO_SCSI_SENSE_SIZE); |
166 | if (resp->sense_len) { |
167 | memcpy(sc->sense_buffer, resp->sense, |
168 | min_t(u32, |
169 | virtio32_to_cpu(vscsi->vdev, resp->sense_len), |
170 | VIRTIO_SCSI_SENSE_SIZE)); |
171 | } |
172 | |
173 | scsi_done(cmd: sc); |
174 | } |
175 | |
176 | static void virtscsi_vq_done(struct virtio_scsi *vscsi, |
177 | struct virtio_scsi_vq *virtscsi_vq, |
178 | void (*fn)(struct virtio_scsi *vscsi, void *buf)) |
179 | { |
180 | void *buf; |
181 | unsigned int len; |
182 | unsigned long flags; |
183 | struct virtqueue *vq = virtscsi_vq->vq; |
184 | |
185 | spin_lock_irqsave(&virtscsi_vq->vq_lock, flags); |
186 | do { |
187 | virtqueue_disable_cb(vq); |
188 | while ((buf = virtqueue_get_buf(vq, len: &len)) != NULL) |
189 | fn(vscsi, buf); |
190 | |
191 | } while (!virtqueue_enable_cb(vq)); |
192 | spin_unlock_irqrestore(lock: &virtscsi_vq->vq_lock, flags); |
193 | } |
194 | |
195 | static void virtscsi_req_done(struct virtqueue *vq) |
196 | { |
197 | struct Scsi_Host *sh = virtio_scsi_host(vdev: vq->vdev); |
198 | struct virtio_scsi *vscsi = shost_priv(shost: sh); |
199 | int index = vq->index - VIRTIO_SCSI_VQ_BASE; |
200 | struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index]; |
201 | |
202 | virtscsi_vq_done(vscsi, virtscsi_vq: req_vq, fn: virtscsi_complete_cmd); |
203 | }; |
204 | |
205 | static void virtscsi_poll_requests(struct virtio_scsi *vscsi) |
206 | { |
207 | int i, num_vqs; |
208 | |
209 | num_vqs = vscsi->num_queues; |
210 | for (i = 0; i < num_vqs; i++) |
211 | virtscsi_vq_done(vscsi, virtscsi_vq: &vscsi->req_vqs[i], |
212 | fn: virtscsi_complete_cmd); |
213 | } |
214 | |
215 | static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf) |
216 | { |
217 | struct virtio_scsi_cmd *cmd = buf; |
218 | |
219 | if (cmd->comp) |
220 | complete(cmd->comp); |
221 | } |
222 | |
223 | static void virtscsi_ctrl_done(struct virtqueue *vq) |
224 | { |
225 | struct Scsi_Host *sh = virtio_scsi_host(vdev: vq->vdev); |
226 | struct virtio_scsi *vscsi = shost_priv(shost: sh); |
227 | |
228 | virtscsi_vq_done(vscsi, virtscsi_vq: &vscsi->ctrl_vq, fn: virtscsi_complete_free); |
229 | }; |
230 | |
231 | static void virtscsi_handle_event(struct work_struct *work); |
232 | |
233 | static int virtscsi_kick_event(struct virtio_scsi *vscsi, |
234 | struct virtio_scsi_event_node *event_node) |
235 | { |
236 | int err; |
237 | struct scatterlist sg; |
238 | unsigned long flags; |
239 | |
240 | INIT_WORK(&event_node->work, virtscsi_handle_event); |
241 | sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); |
242 | |
243 | spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); |
244 | |
245 | err = virtqueue_add_inbuf(vq: vscsi->event_vq.vq, sg: &sg, num: 1, data: event_node, |
246 | GFP_ATOMIC); |
247 | if (!err) |
248 | virtqueue_kick(vq: vscsi->event_vq.vq); |
249 | |
250 | spin_unlock_irqrestore(lock: &vscsi->event_vq.vq_lock, flags); |
251 | |
252 | return err; |
253 | } |
254 | |
255 | static int virtscsi_kick_event_all(struct virtio_scsi *vscsi) |
256 | { |
257 | int i; |
258 | |
259 | for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) { |
260 | vscsi->event_list[i].vscsi = vscsi; |
261 | virtscsi_kick_event(vscsi, event_node: &vscsi->event_list[i]); |
262 | } |
263 | |
264 | return 0; |
265 | } |
266 | |
267 | static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi) |
268 | { |
269 | int i; |
270 | |
271 | /* Stop scheduling work before calling cancel_work_sync. */ |
272 | spin_lock_irq(lock: &vscsi->event_vq.vq_lock); |
273 | vscsi->stop_events = true; |
274 | spin_unlock_irq(lock: &vscsi->event_vq.vq_lock); |
275 | |
276 | for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) |
277 | cancel_work_sync(work: &vscsi->event_list[i].work); |
278 | } |
279 | |
280 | static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi, |
281 | struct virtio_scsi_event *event) |
282 | { |
283 | struct scsi_device *sdev; |
284 | struct Scsi_Host *shost = virtio_scsi_host(vdev: vscsi->vdev); |
285 | unsigned int target = event->lun[1]; |
286 | unsigned int lun = (event->lun[2] << 8) | event->lun[3]; |
287 | |
288 | switch (virtio32_to_cpu(vdev: vscsi->vdev, val: event->reason)) { |
289 | case VIRTIO_SCSI_EVT_RESET_RESCAN: |
290 | if (lun == 0) { |
291 | scsi_scan_target(parent: &shost->shost_gendev, channel: 0, id: target, |
292 | SCAN_WILD_CARD, rescan: SCSI_SCAN_INITIAL); |
293 | } else { |
294 | scsi_add_device(host: shost, channel: 0, target, lun); |
295 | } |
296 | break; |
297 | case VIRTIO_SCSI_EVT_RESET_REMOVED: |
298 | sdev = scsi_device_lookup(shost, 0, target, lun); |
299 | if (sdev) { |
300 | scsi_remove_device(sdev); |
301 | scsi_device_put(sdev); |
302 | } else { |
303 | pr_err("SCSI device %d 0 %d %d not found\n" , |
304 | shost->host_no, target, lun); |
305 | } |
306 | break; |
307 | default: |
308 | pr_info("Unsupported virtio scsi event reason %x\n" , event->reason); |
309 | } |
310 | } |
311 | |
312 | static void virtscsi_handle_param_change(struct virtio_scsi *vscsi, |
313 | struct virtio_scsi_event *event) |
314 | { |
315 | struct scsi_device *sdev; |
316 | struct Scsi_Host *shost = virtio_scsi_host(vdev: vscsi->vdev); |
317 | unsigned int target = event->lun[1]; |
318 | unsigned int lun = (event->lun[2] << 8) | event->lun[3]; |
319 | u8 asc = virtio32_to_cpu(vdev: vscsi->vdev, val: event->reason) & 255; |
320 | u8 ascq = virtio32_to_cpu(vdev: vscsi->vdev, val: event->reason) >> 8; |
321 | |
322 | sdev = scsi_device_lookup(shost, 0, target, lun); |
323 | if (!sdev) { |
324 | pr_err("SCSI device %d 0 %d %d not found\n" , |
325 | shost->host_no, target, lun); |
326 | return; |
327 | } |
328 | |
329 | /* Handle "Parameters changed", "Mode parameters changed", and |
330 | "Capacity data has changed". */ |
331 | if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09)) |
332 | scsi_rescan_device(sdev); |
333 | |
334 | scsi_device_put(sdev); |
335 | } |
336 | |
337 | static int virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi) |
338 | { |
339 | struct scsi_device *sdev; |
340 | struct Scsi_Host *shost = virtio_scsi_host(vdev: vscsi->vdev); |
341 | unsigned char scsi_cmd[MAX_COMMAND_SIZE]; |
342 | int result, inquiry_len, inq_result_len = 256; |
343 | char *inq_result = kmalloc(size: inq_result_len, GFP_KERNEL); |
344 | |
345 | if (!inq_result) |
346 | return -ENOMEM; |
347 | |
348 | shost_for_each_device(sdev, shost) { |
349 | inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36; |
350 | |
351 | memset(scsi_cmd, 0, sizeof(scsi_cmd)); |
352 | scsi_cmd[0] = INQUIRY; |
353 | scsi_cmd[4] = (unsigned char) inquiry_len; |
354 | |
355 | memset(inq_result, 0, inq_result_len); |
356 | |
357 | result = scsi_execute_cmd(sdev, cmd: scsi_cmd, opf: REQ_OP_DRV_IN, |
358 | buffer: inq_result, bufflen: inquiry_len, |
359 | SD_TIMEOUT, SD_MAX_RETRIES, NULL); |
360 | |
361 | if (result == 0 && inq_result[0] >> 5) { |
362 | /* PQ indicates the LUN is not attached */ |
363 | scsi_remove_device(sdev); |
364 | } else if (result > 0 && host_byte(result) == DID_BAD_TARGET) { |
365 | /* |
366 | * If all LUNs of a virtio-scsi device are unplugged |
367 | * it will respond with BAD TARGET on any INQUIRY |
368 | * command. |
369 | * Remove the device in this case as well. |
370 | */ |
371 | scsi_remove_device(sdev); |
372 | } |
373 | } |
374 | |
375 | kfree(objp: inq_result); |
376 | return 0; |
377 | } |
378 | |
379 | static void virtscsi_handle_event(struct work_struct *work) |
380 | { |
381 | struct virtio_scsi_event_node *event_node = |
382 | container_of(work, struct virtio_scsi_event_node, work); |
383 | struct virtio_scsi *vscsi = event_node->vscsi; |
384 | struct virtio_scsi_event *event = &event_node->event; |
385 | |
386 | if (event->event & |
387 | cpu_to_virtio32(vdev: vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) { |
388 | int ret; |
389 | |
390 | event->event &= ~cpu_to_virtio32(vdev: vscsi->vdev, |
391 | VIRTIO_SCSI_T_EVENTS_MISSED); |
392 | ret = virtscsi_rescan_hotunplug(vscsi); |
393 | if (ret) |
394 | return; |
395 | scsi_scan_host(virtio_scsi_host(vdev: vscsi->vdev)); |
396 | } |
397 | |
398 | switch (virtio32_to_cpu(vdev: vscsi->vdev, val: event->event)) { |
399 | case VIRTIO_SCSI_T_NO_EVENT: |
400 | break; |
401 | case VIRTIO_SCSI_T_TRANSPORT_RESET: |
402 | virtscsi_handle_transport_reset(vscsi, event); |
403 | break; |
404 | case VIRTIO_SCSI_T_PARAM_CHANGE: |
405 | virtscsi_handle_param_change(vscsi, event); |
406 | break; |
407 | default: |
408 | pr_err("Unsupported virtio scsi event %x\n" , event->event); |
409 | } |
410 | virtscsi_kick_event(vscsi, event_node); |
411 | } |
412 | |
413 | static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf) |
414 | { |
415 | struct virtio_scsi_event_node *event_node = buf; |
416 | |
417 | if (!vscsi->stop_events) |
418 | queue_work(wq: system_freezable_wq, work: &event_node->work); |
419 | } |
420 | |
421 | static void virtscsi_event_done(struct virtqueue *vq) |
422 | { |
423 | struct Scsi_Host *sh = virtio_scsi_host(vdev: vq->vdev); |
424 | struct virtio_scsi *vscsi = shost_priv(shost: sh); |
425 | |
426 | virtscsi_vq_done(vscsi, virtscsi_vq: &vscsi->event_vq, fn: virtscsi_complete_event); |
427 | }; |
428 | |
429 | static int __virtscsi_add_cmd(struct virtqueue *vq, |
430 | struct virtio_scsi_cmd *cmd, |
431 | size_t req_size, size_t resp_size) |
432 | { |
433 | struct scsi_cmnd *sc = cmd->sc; |
434 | struct scatterlist *sgs[6], req, resp; |
435 | struct sg_table *out, *in; |
436 | unsigned out_num = 0, in_num = 0; |
437 | |
438 | out = in = NULL; |
439 | |
440 | if (sc && sc->sc_data_direction != DMA_NONE) { |
441 | if (sc->sc_data_direction != DMA_FROM_DEVICE) |
442 | out = &sc->sdb.table; |
443 | if (sc->sc_data_direction != DMA_TO_DEVICE) |
444 | in = &sc->sdb.table; |
445 | } |
446 | |
447 | /* Request header. */ |
448 | sg_init_one(&req, &cmd->req, req_size); |
449 | sgs[out_num++] = &req; |
450 | |
451 | /* Data-out buffer. */ |
452 | if (out) { |
453 | /* Place WRITE protection SGLs before Data OUT payload */ |
454 | if (scsi_prot_sg_count(cmd: sc)) |
455 | sgs[out_num++] = scsi_prot_sglist(cmd: sc); |
456 | sgs[out_num++] = out->sgl; |
457 | } |
458 | |
459 | /* Response header. */ |
460 | sg_init_one(&resp, &cmd->resp, resp_size); |
461 | sgs[out_num + in_num++] = &resp; |
462 | |
463 | /* Data-in buffer */ |
464 | if (in) { |
465 | /* Place READ protection SGLs before Data IN payload */ |
466 | if (scsi_prot_sg_count(cmd: sc)) |
467 | sgs[out_num + in_num++] = scsi_prot_sglist(cmd: sc); |
468 | sgs[out_num + in_num++] = in->sgl; |
469 | } |
470 | |
471 | return virtqueue_add_sgs(vq, sgs, out_sgs: out_num, in_sgs: in_num, data: cmd, GFP_ATOMIC); |
472 | } |
473 | |
474 | static void virtscsi_kick_vq(struct virtio_scsi_vq *vq) |
475 | { |
476 | bool needs_kick; |
477 | unsigned long flags; |
478 | |
479 | spin_lock_irqsave(&vq->vq_lock, flags); |
480 | needs_kick = virtqueue_kick_prepare(vq: vq->vq); |
481 | spin_unlock_irqrestore(lock: &vq->vq_lock, flags); |
482 | |
483 | if (needs_kick) |
484 | virtqueue_notify(vq: vq->vq); |
485 | } |
486 | |
487 | /** |
488 | * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue, optionally kick it |
489 | * @vq : the struct virtqueue we're talking about |
490 | * @cmd : command structure |
491 | * @req_size : size of the request buffer |
492 | * @resp_size : size of the response buffer |
493 | * @kick : whether to kick the virtqueue immediately |
494 | */ |
495 | static int virtscsi_add_cmd(struct virtio_scsi_vq *vq, |
496 | struct virtio_scsi_cmd *cmd, |
497 | size_t req_size, size_t resp_size, |
498 | bool kick) |
499 | { |
500 | unsigned long flags; |
501 | int err; |
502 | bool needs_kick = false; |
503 | |
504 | spin_lock_irqsave(&vq->vq_lock, flags); |
505 | err = __virtscsi_add_cmd(vq: vq->vq, cmd, req_size, resp_size); |
506 | if (!err && kick) |
507 | needs_kick = virtqueue_kick_prepare(vq: vq->vq); |
508 | |
509 | spin_unlock_irqrestore(lock: &vq->vq_lock, flags); |
510 | |
511 | if (needs_kick) |
512 | virtqueue_notify(vq: vq->vq); |
513 | return err; |
514 | } |
515 | |
516 | static void virtio_scsi_init_hdr(struct virtio_device *vdev, |
517 | struct virtio_scsi_cmd_req *cmd, |
518 | struct scsi_cmnd *sc) |
519 | { |
520 | cmd->lun[0] = 1; |
521 | cmd->lun[1] = sc->device->id; |
522 | cmd->lun[2] = (sc->device->lun >> 8) | 0x40; |
523 | cmd->lun[3] = sc->device->lun & 0xff; |
524 | cmd->tag = cpu_to_virtio64(vdev, val: (unsigned long)sc); |
525 | cmd->task_attr = VIRTIO_SCSI_S_SIMPLE; |
526 | cmd->prio = 0; |
527 | cmd->crn = 0; |
528 | } |
529 | |
530 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
531 | static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev, |
532 | struct virtio_scsi_cmd_req_pi *cmd_pi, |
533 | struct scsi_cmnd *sc) |
534 | { |
535 | struct request *rq = scsi_cmd_to_rq(scmd: sc); |
536 | struct blk_integrity *bi; |
537 | |
538 | virtio_scsi_init_hdr(vdev, cmd: (struct virtio_scsi_cmd_req *)cmd_pi, sc); |
539 | |
540 | if (!rq || !scsi_prot_sg_count(cmd: sc)) |
541 | return; |
542 | |
543 | bi = blk_get_integrity(disk: rq->q->disk); |
544 | |
545 | if (sc->sc_data_direction == DMA_TO_DEVICE) |
546 | cmd_pi->pi_bytesout = cpu_to_virtio32(vdev, |
547 | val: bio_integrity_bytes(bi, |
548 | sectors: blk_rq_sectors(rq))); |
549 | else if (sc->sc_data_direction == DMA_FROM_DEVICE) |
550 | cmd_pi->pi_bytesin = cpu_to_virtio32(vdev, |
551 | val: bio_integrity_bytes(bi, |
552 | sectors: blk_rq_sectors(rq))); |
553 | } |
554 | #endif |
555 | |
556 | static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi, |
557 | struct scsi_cmnd *sc) |
558 | { |
559 | u32 tag = blk_mq_unique_tag(rq: scsi_cmd_to_rq(scmd: sc)); |
560 | u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag: tag); |
561 | |
562 | return &vscsi->req_vqs[hwq]; |
563 | } |
564 | |
565 | static int virtscsi_queuecommand(struct Scsi_Host *shost, |
566 | struct scsi_cmnd *sc) |
567 | { |
568 | struct virtio_scsi *vscsi = shost_priv(shost); |
569 | struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc); |
570 | struct virtio_scsi_cmd *cmd = scsi_cmd_priv(cmd: sc); |
571 | bool kick; |
572 | unsigned long flags; |
573 | int req_size; |
574 | int ret; |
575 | |
576 | BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); |
577 | |
578 | /* TODO: check feature bit and fail if unsupported? */ |
579 | BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL); |
580 | |
581 | dev_dbg(&sc->device->sdev_gendev, |
582 | "cmd %p CDB: %#02x\n" , sc, sc->cmnd[0]); |
583 | |
584 | cmd->sc = sc; |
585 | |
586 | BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); |
587 | |
588 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
589 | if (virtio_has_feature(vdev: vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) { |
590 | virtio_scsi_init_hdr_pi(vdev: vscsi->vdev, cmd_pi: &cmd->req.cmd_pi, sc); |
591 | memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len); |
592 | req_size = sizeof(cmd->req.cmd_pi); |
593 | } else |
594 | #endif |
595 | { |
596 | virtio_scsi_init_hdr(vdev: vscsi->vdev, cmd: &cmd->req.cmd, sc); |
597 | memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); |
598 | req_size = sizeof(cmd->req.cmd); |
599 | } |
600 | |
601 | kick = (sc->flags & SCMD_LAST) != 0; |
602 | ret = virtscsi_add_cmd(vq: req_vq, cmd, req_size, resp_size: sizeof(cmd->resp.cmd), kick); |
603 | if (ret == -EIO) { |
604 | cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; |
605 | spin_lock_irqsave(&req_vq->vq_lock, flags); |
606 | virtscsi_complete_cmd(vscsi, buf: cmd); |
607 | spin_unlock_irqrestore(lock: &req_vq->vq_lock, flags); |
608 | } else if (ret != 0) { |
609 | return SCSI_MLQUEUE_HOST_BUSY; |
610 | } |
611 | return 0; |
612 | } |
613 | |
614 | static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) |
615 | { |
616 | DECLARE_COMPLETION_ONSTACK(comp); |
617 | int ret = FAILED; |
618 | |
619 | cmd->comp = ∁ |
620 | if (virtscsi_add_cmd(vq: &vscsi->ctrl_vq, cmd, |
621 | req_size: sizeof cmd->req.tmf, resp_size: sizeof cmd->resp.tmf, kick: true) < 0) |
622 | goto out; |
623 | |
624 | wait_for_completion(&comp); |
625 | if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK || |
626 | cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) |
627 | ret = SUCCESS; |
628 | |
629 | /* |
630 | * The spec guarantees that all requests related to the TMF have |
631 | * been completed, but the callback might not have run yet if |
632 | * we're using independent interrupts (e.g. MSI). Poll the |
633 | * virtqueues once. |
634 | * |
635 | * In the abort case, scsi_done() will do nothing, because the |
636 | * command timed out and hence SCMD_STATE_COMPLETE has been set. |
637 | */ |
638 | virtscsi_poll_requests(vscsi); |
639 | |
640 | out: |
641 | mempool_free(element: cmd, pool: virtscsi_cmd_pool); |
642 | return ret; |
643 | } |
644 | |
645 | static int virtscsi_device_reset(struct scsi_cmnd *sc) |
646 | { |
647 | struct virtio_scsi *vscsi = shost_priv(shost: sc->device->host); |
648 | struct virtio_scsi_cmd *cmd; |
649 | |
650 | sdev_printk(KERN_INFO, sc->device, "device reset\n" ); |
651 | cmd = mempool_alloc(pool: virtscsi_cmd_pool, GFP_NOIO); |
652 | if (!cmd) |
653 | return FAILED; |
654 | |
655 | memset(cmd, 0, sizeof(*cmd)); |
656 | cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ |
657 | .type = VIRTIO_SCSI_T_TMF, |
658 | .subtype = cpu_to_virtio32(vdev: vscsi->vdev, |
659 | VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET), |
660 | .lun[0] = 1, |
661 | .lun[1] = sc->device->id, |
662 | .lun[2] = (sc->device->lun >> 8) | 0x40, |
663 | .lun[3] = sc->device->lun & 0xff, |
664 | }; |
665 | return virtscsi_tmf(vscsi, cmd); |
666 | } |
667 | |
668 | static int virtscsi_device_alloc(struct scsi_device *sdevice) |
669 | { |
670 | /* |
671 | * Passed through SCSI targets (e.g. with qemu's 'scsi-block') |
672 | * may have transfer limits which come from the host SCSI |
673 | * controller or something on the host side other than the |
674 | * target itself. |
675 | * |
676 | * To make this work properly, the hypervisor can adjust the |
677 | * target's VPD information to advertise these limits. But |
678 | * for that to work, the guest has to look at the VPD pages, |
679 | * which we won't do by default if it is an SPC-2 device, even |
680 | * if it does actually support it. |
681 | * |
682 | * So, set the blist to always try to read the VPD pages. |
683 | */ |
684 | sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES; |
685 | |
686 | return 0; |
687 | } |
688 | |
689 | |
690 | /** |
691 | * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth |
692 | * @sdev: Virtscsi target whose queue depth to change |
693 | * @qdepth: New queue depth |
694 | */ |
695 | static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) |
696 | { |
697 | struct Scsi_Host *shost = sdev->host; |
698 | int max_depth = shost->cmd_per_lun; |
699 | |
700 | return scsi_change_queue_depth(sdev, min(max_depth, qdepth)); |
701 | } |
702 | |
703 | static int virtscsi_abort(struct scsi_cmnd *sc) |
704 | { |
705 | struct virtio_scsi *vscsi = shost_priv(shost: sc->device->host); |
706 | struct virtio_scsi_cmd *cmd; |
707 | |
708 | scmd_printk(KERN_INFO, sc, "abort\n" ); |
709 | cmd = mempool_alloc(pool: virtscsi_cmd_pool, GFP_NOIO); |
710 | if (!cmd) |
711 | return FAILED; |
712 | |
713 | memset(cmd, 0, sizeof(*cmd)); |
714 | cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ |
715 | .type = VIRTIO_SCSI_T_TMF, |
716 | .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK, |
717 | .lun[0] = 1, |
718 | .lun[1] = sc->device->id, |
719 | .lun[2] = (sc->device->lun >> 8) | 0x40, |
720 | .lun[3] = sc->device->lun & 0xff, |
721 | .tag = cpu_to_virtio64(vdev: vscsi->vdev, val: (unsigned long)sc), |
722 | }; |
723 | return virtscsi_tmf(vscsi, cmd); |
724 | } |
725 | |
726 | static void virtscsi_map_queues(struct Scsi_Host *shost) |
727 | { |
728 | struct virtio_scsi *vscsi = shost_priv(shost); |
729 | int i, qoff; |
730 | |
731 | for (i = 0, qoff = 0; i < shost->nr_maps; i++) { |
732 | struct blk_mq_queue_map *map = &shost->tag_set.map[i]; |
733 | |
734 | map->nr_queues = vscsi->io_queues[i]; |
735 | map->queue_offset = qoff; |
736 | qoff += map->nr_queues; |
737 | |
738 | if (map->nr_queues == 0) |
739 | continue; |
740 | |
741 | /* |
742 | * Regular queues have interrupts and hence CPU affinity is |
743 | * defined by the core virtio code, but polling queues have |
744 | * no interrupts so we let the block layer assign CPU affinity. |
745 | */ |
746 | if (i == HCTX_TYPE_POLL) |
747 | blk_mq_map_queues(qmap: map); |
748 | else |
749 | blk_mq_virtio_map_queues(qmap: map, vdev: vscsi->vdev, first_vec: 2); |
750 | } |
751 | } |
752 | |
753 | static int virtscsi_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) |
754 | { |
755 | struct virtio_scsi *vscsi = shost_priv(shost); |
756 | struct virtio_scsi_vq *virtscsi_vq = &vscsi->req_vqs[queue_num]; |
757 | unsigned long flags; |
758 | unsigned int len; |
759 | int found = 0; |
760 | void *buf; |
761 | |
762 | spin_lock_irqsave(&virtscsi_vq->vq_lock, flags); |
763 | |
764 | while ((buf = virtqueue_get_buf(vq: virtscsi_vq->vq, len: &len)) != NULL) { |
765 | virtscsi_complete_cmd(vscsi, buf); |
766 | found++; |
767 | } |
768 | |
769 | spin_unlock_irqrestore(lock: &virtscsi_vq->vq_lock, flags); |
770 | |
771 | return found; |
772 | } |
773 | |
774 | static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq) |
775 | { |
776 | struct virtio_scsi *vscsi = shost_priv(shost); |
777 | |
778 | virtscsi_kick_vq(vq: &vscsi->req_vqs[hwq]); |
779 | } |
780 | |
781 | /* |
782 | * The host guarantees to respond to each command, although I/O |
783 | * latencies might be higher than on bare metal. Reset the timer |
784 | * unconditionally to give the host a chance to perform EH. |
785 | */ |
786 | static enum scsi_timeout_action virtscsi_eh_timed_out(struct scsi_cmnd *scmnd) |
787 | { |
788 | return SCSI_EH_RESET_TIMER; |
789 | } |
790 | |
791 | static const struct scsi_host_template virtscsi_host_template = { |
792 | .module = THIS_MODULE, |
793 | .name = "Virtio SCSI HBA" , |
794 | .proc_name = "virtio_scsi" , |
795 | .this_id = -1, |
796 | .cmd_size = sizeof(struct virtio_scsi_cmd), |
797 | .queuecommand = virtscsi_queuecommand, |
798 | .mq_poll = virtscsi_mq_poll, |
799 | .commit_rqs = virtscsi_commit_rqs, |
800 | .change_queue_depth = virtscsi_change_queue_depth, |
801 | .eh_abort_handler = virtscsi_abort, |
802 | .eh_device_reset_handler = virtscsi_device_reset, |
803 | .eh_timed_out = virtscsi_eh_timed_out, |
804 | .slave_alloc = virtscsi_device_alloc, |
805 | |
806 | .dma_boundary = UINT_MAX, |
807 | .map_queues = virtscsi_map_queues, |
808 | .track_queue_depth = 1, |
809 | }; |
810 | |
811 | #define virtscsi_config_get(vdev, fld) \ |
812 | ({ \ |
813 | __virtio_native_type(struct virtio_scsi_config, fld) __val; \ |
814 | virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \ |
815 | __val; \ |
816 | }) |
817 | |
818 | #define virtscsi_config_set(vdev, fld, val) \ |
819 | do { \ |
820 | __virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \ |
821 | virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \ |
822 | } while(0) |
823 | |
824 | static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, |
825 | struct virtqueue *vq) |
826 | { |
827 | spin_lock_init(&virtscsi_vq->vq_lock); |
828 | virtscsi_vq->vq = vq; |
829 | } |
830 | |
831 | static void virtscsi_remove_vqs(struct virtio_device *vdev) |
832 | { |
833 | /* Stop all the virtqueues. */ |
834 | virtio_reset_device(dev: vdev); |
835 | vdev->config->del_vqs(vdev); |
836 | } |
837 | |
838 | static int virtscsi_init(struct virtio_device *vdev, |
839 | struct virtio_scsi *vscsi) |
840 | { |
841 | int err; |
842 | u32 i; |
843 | u32 num_vqs, num_poll_vqs, num_req_vqs; |
844 | vq_callback_t **callbacks; |
845 | const char **names; |
846 | struct virtqueue **vqs; |
847 | struct irq_affinity desc = { .pre_vectors = 2 }; |
848 | |
849 | num_req_vqs = vscsi->num_queues; |
850 | num_vqs = num_req_vqs + VIRTIO_SCSI_VQ_BASE; |
851 | vqs = kmalloc_array(n: num_vqs, size: sizeof(struct virtqueue *), GFP_KERNEL); |
852 | callbacks = kmalloc_array(n: num_vqs, size: sizeof(vq_callback_t *), |
853 | GFP_KERNEL); |
854 | names = kmalloc_array(n: num_vqs, size: sizeof(char *), GFP_KERNEL); |
855 | |
856 | if (!callbacks || !vqs || !names) { |
857 | err = -ENOMEM; |
858 | goto out; |
859 | } |
860 | |
861 | num_poll_vqs = min_t(unsigned int, virtscsi_poll_queues, |
862 | num_req_vqs - 1); |
863 | vscsi->io_queues[HCTX_TYPE_DEFAULT] = num_req_vqs - num_poll_vqs; |
864 | vscsi->io_queues[HCTX_TYPE_READ] = 0; |
865 | vscsi->io_queues[HCTX_TYPE_POLL] = num_poll_vqs; |
866 | |
867 | dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n" , |
868 | vscsi->io_queues[HCTX_TYPE_DEFAULT], |
869 | vscsi->io_queues[HCTX_TYPE_READ], |
870 | vscsi->io_queues[HCTX_TYPE_POLL]); |
871 | |
872 | callbacks[0] = virtscsi_ctrl_done; |
873 | callbacks[1] = virtscsi_event_done; |
874 | names[0] = "control" ; |
875 | names[1] = "event" ; |
876 | for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs - num_poll_vqs; i++) { |
877 | callbacks[i] = virtscsi_req_done; |
878 | names[i] = "request" ; |
879 | } |
880 | |
881 | for (; i < num_vqs; i++) { |
882 | callbacks[i] = NULL; |
883 | names[i] = "request_poll" ; |
884 | } |
885 | |
886 | /* Discover virtqueues and write information to configuration. */ |
887 | err = virtio_find_vqs(vdev, nvqs: num_vqs, vqs, callbacks, names, desc: &desc); |
888 | if (err) |
889 | goto out; |
890 | |
891 | virtscsi_init_vq(virtscsi_vq: &vscsi->ctrl_vq, vq: vqs[0]); |
892 | virtscsi_init_vq(virtscsi_vq: &vscsi->event_vq, vq: vqs[1]); |
893 | for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) |
894 | virtscsi_init_vq(virtscsi_vq: &vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE], |
895 | vq: vqs[i]); |
896 | |
897 | virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE); |
898 | virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE); |
899 | |
900 | err = 0; |
901 | |
902 | out: |
903 | kfree(objp: names); |
904 | kfree(objp: callbacks); |
905 | kfree(objp: vqs); |
906 | if (err) |
907 | virtscsi_remove_vqs(vdev); |
908 | return err; |
909 | } |
910 | |
911 | static int virtscsi_probe(struct virtio_device *vdev) |
912 | { |
913 | struct Scsi_Host *shost; |
914 | struct virtio_scsi *vscsi; |
915 | int err; |
916 | u32 sg_elems, num_targets; |
917 | u32 cmd_per_lun; |
918 | u32 num_queues; |
919 | |
920 | if (!vdev->config->get) { |
921 | dev_err(&vdev->dev, "%s failure: config access disabled\n" , |
922 | __func__); |
923 | return -EINVAL; |
924 | } |
925 | |
926 | /* We need to know how many queues before we allocate. */ |
927 | num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; |
928 | num_queues = min_t(unsigned int, nr_cpu_ids, num_queues); |
929 | |
930 | num_targets = virtscsi_config_get(vdev, max_target) + 1; |
931 | |
932 | shost = scsi_host_alloc(&virtscsi_host_template, |
933 | struct_size(vscsi, req_vqs, num_queues)); |
934 | if (!shost) |
935 | return -ENOMEM; |
936 | |
937 | sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1; |
938 | shost->sg_tablesize = sg_elems; |
939 | shost->nr_maps = 1; |
940 | vscsi = shost_priv(shost); |
941 | vscsi->vdev = vdev; |
942 | vscsi->num_queues = num_queues; |
943 | vdev->priv = shost; |
944 | |
945 | err = virtscsi_init(vdev, vscsi); |
946 | if (err) |
947 | goto virtscsi_init_failed; |
948 | |
949 | if (vscsi->io_queues[HCTX_TYPE_POLL]) |
950 | shost->nr_maps = HCTX_TYPE_POLL + 1; |
951 | |
952 | shost->can_queue = virtqueue_get_vring_size(vq: vscsi->req_vqs[0].vq); |
953 | |
954 | cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; |
955 | shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); |
956 | shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; |
957 | |
958 | /* LUNs > 256 are reported with format 1, so they go in the range |
959 | * 16640-32767. |
960 | */ |
961 | shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000; |
962 | shost->max_id = num_targets; |
963 | shost->max_channel = 0; |
964 | shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; |
965 | shost->nr_hw_queues = num_queues; |
966 | |
967 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
968 | if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { |
969 | int host_prot; |
970 | |
971 | host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | |
972 | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | |
973 | SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; |
974 | |
975 | scsi_host_set_prot(shost, mask: host_prot); |
976 | scsi_host_set_guard(shost, type: SHOST_DIX_GUARD_CRC); |
977 | } |
978 | #endif |
979 | |
980 | err = scsi_add_host(host: shost, dev: &vdev->dev); |
981 | if (err) |
982 | goto scsi_add_host_failed; |
983 | |
984 | virtio_device_ready(dev: vdev); |
985 | |
986 | if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) |
987 | virtscsi_kick_event_all(vscsi); |
988 | |
989 | scsi_scan_host(shost); |
990 | return 0; |
991 | |
992 | scsi_add_host_failed: |
993 | vdev->config->del_vqs(vdev); |
994 | virtscsi_init_failed: |
995 | scsi_host_put(t: shost); |
996 | return err; |
997 | } |
998 | |
999 | static void virtscsi_remove(struct virtio_device *vdev) |
1000 | { |
1001 | struct Scsi_Host *shost = virtio_scsi_host(vdev); |
1002 | struct virtio_scsi *vscsi = shost_priv(shost); |
1003 | |
1004 | if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) |
1005 | virtscsi_cancel_event_work(vscsi); |
1006 | |
1007 | scsi_remove_host(shost); |
1008 | virtscsi_remove_vqs(vdev); |
1009 | scsi_host_put(t: shost); |
1010 | } |
1011 | |
1012 | #ifdef CONFIG_PM_SLEEP |
1013 | static int virtscsi_freeze(struct virtio_device *vdev) |
1014 | { |
1015 | virtscsi_remove_vqs(vdev); |
1016 | return 0; |
1017 | } |
1018 | |
1019 | static int virtscsi_restore(struct virtio_device *vdev) |
1020 | { |
1021 | struct Scsi_Host *sh = virtio_scsi_host(vdev); |
1022 | struct virtio_scsi *vscsi = shost_priv(shost: sh); |
1023 | int err; |
1024 | |
1025 | err = virtscsi_init(vdev, vscsi); |
1026 | if (err) |
1027 | return err; |
1028 | |
1029 | virtio_device_ready(dev: vdev); |
1030 | |
1031 | if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) |
1032 | virtscsi_kick_event_all(vscsi); |
1033 | |
1034 | return err; |
1035 | } |
1036 | #endif |
1037 | |
1038 | static struct virtio_device_id id_table[] = { |
1039 | { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID }, |
1040 | { 0 }, |
1041 | }; |
1042 | |
1043 | static unsigned int features[] = { |
1044 | VIRTIO_SCSI_F_HOTPLUG, |
1045 | VIRTIO_SCSI_F_CHANGE, |
1046 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
1047 | VIRTIO_SCSI_F_T10_PI, |
1048 | #endif |
1049 | }; |
1050 | |
1051 | static struct virtio_driver virtio_scsi_driver = { |
1052 | .feature_table = features, |
1053 | .feature_table_size = ARRAY_SIZE(features), |
1054 | .driver.name = KBUILD_MODNAME, |
1055 | .driver.owner = THIS_MODULE, |
1056 | .id_table = id_table, |
1057 | .probe = virtscsi_probe, |
1058 | #ifdef CONFIG_PM_SLEEP |
1059 | .freeze = virtscsi_freeze, |
1060 | .restore = virtscsi_restore, |
1061 | #endif |
1062 | .remove = virtscsi_remove, |
1063 | }; |
1064 | |
1065 | static int __init virtio_scsi_init(void) |
1066 | { |
1067 | int ret = -ENOMEM; |
1068 | |
1069 | virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0); |
1070 | if (!virtscsi_cmd_cache) { |
1071 | pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n" ); |
1072 | goto error; |
1073 | } |
1074 | |
1075 | |
1076 | virtscsi_cmd_pool = |
1077 | mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ, |
1078 | kc: virtscsi_cmd_cache); |
1079 | if (!virtscsi_cmd_pool) { |
1080 | pr_err("mempool_create() for virtscsi_cmd_pool failed\n" ); |
1081 | goto error; |
1082 | } |
1083 | ret = register_virtio_driver(&virtio_scsi_driver); |
1084 | if (ret < 0) |
1085 | goto error; |
1086 | |
1087 | return 0; |
1088 | |
1089 | error: |
1090 | mempool_destroy(pool: virtscsi_cmd_pool); |
1091 | virtscsi_cmd_pool = NULL; |
1092 | kmem_cache_destroy(s: virtscsi_cmd_cache); |
1093 | virtscsi_cmd_cache = NULL; |
1094 | return ret; |
1095 | } |
1096 | |
1097 | static void __exit virtio_scsi_fini(void) |
1098 | { |
1099 | unregister_virtio_driver(drv: &virtio_scsi_driver); |
1100 | mempool_destroy(pool: virtscsi_cmd_pool); |
1101 | kmem_cache_destroy(s: virtscsi_cmd_cache); |
1102 | } |
1103 | module_init(virtio_scsi_init); |
1104 | module_exit(virtio_scsi_fini); |
1105 | |
1106 | MODULE_DEVICE_TABLE(virtio, id_table); |
1107 | MODULE_DESCRIPTION("Virtio SCSI HBA driver" ); |
1108 | MODULE_LICENSE("GPL" ); |
1109 | |