1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * VDUSE: vDPA Device in Userspace |
4 | * |
5 | * Copyright (C) 2020-2021 Bytedance Inc. and/or its affiliates. All rights reserved. |
6 | * |
7 | * Author: Xie Yongji <xieyongji@bytedance.com> |
8 | * |
9 | */ |
10 | |
11 | #include <linux/init.h> |
12 | #include <linux/module.h> |
13 | #include <linux/cdev.h> |
14 | #include <linux/device.h> |
15 | #include <linux/eventfd.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/wait.h> |
18 | #include <linux/dma-map-ops.h> |
19 | #include <linux/poll.h> |
20 | #include <linux/file.h> |
21 | #include <linux/uio.h> |
22 | #include <linux/vdpa.h> |
23 | #include <linux/nospec.h> |
24 | #include <linux/vmalloc.h> |
25 | #include <linux/sched/mm.h> |
26 | #include <uapi/linux/vduse.h> |
27 | #include <uapi/linux/vdpa.h> |
28 | #include <uapi/linux/virtio_config.h> |
29 | #include <uapi/linux/virtio_ids.h> |
30 | #include <uapi/linux/virtio_blk.h> |
31 | #include <linux/mod_devicetable.h> |
32 | |
33 | #include "iova_domain.h" |
34 | |
35 | #define DRV_AUTHOR "Yongji Xie <xieyongji@bytedance.com>" |
36 | #define DRV_DESC "vDPA Device in Userspace" |
37 | #define DRV_LICENSE "GPL v2" |
38 | |
39 | #define VDUSE_DEV_MAX (1U << MINORBITS) |
40 | #define VDUSE_MAX_BOUNCE_SIZE (1024 * 1024 * 1024) |
41 | #define VDUSE_MIN_BOUNCE_SIZE (1024 * 1024) |
42 | #define VDUSE_BOUNCE_SIZE (64 * 1024 * 1024) |
43 | /* 128 MB reserved for virtqueue creation */ |
44 | #define VDUSE_IOVA_SIZE (VDUSE_MAX_BOUNCE_SIZE + 128 * 1024 * 1024) |
45 | #define VDUSE_MSG_DEFAULT_TIMEOUT 30 |
46 | |
47 | #define IRQ_UNBOUND -1 |
48 | |
49 | struct vduse_virtqueue { |
50 | u16 index; |
51 | u16 num_max; |
52 | u32 num; |
53 | u64 desc_addr; |
54 | u64 driver_addr; |
55 | u64 device_addr; |
56 | struct vdpa_vq_state state; |
57 | bool ready; |
58 | bool kicked; |
59 | spinlock_t kick_lock; |
60 | spinlock_t irq_lock; |
61 | struct eventfd_ctx *kickfd; |
62 | struct vdpa_callback cb; |
63 | struct work_struct inject; |
64 | struct work_struct kick; |
65 | int irq_effective_cpu; |
66 | struct cpumask irq_affinity; |
67 | struct kobject kobj; |
68 | }; |
69 | |
70 | struct vduse_dev; |
71 | |
72 | struct vduse_vdpa { |
73 | struct vdpa_device vdpa; |
74 | struct vduse_dev *dev; |
75 | }; |
76 | |
77 | struct vduse_umem { |
78 | unsigned long iova; |
79 | unsigned long npages; |
80 | struct page **pages; |
81 | struct mm_struct *mm; |
82 | }; |
83 | |
84 | struct vduse_dev { |
85 | struct vduse_vdpa *vdev; |
86 | struct device *dev; |
87 | struct vduse_virtqueue **vqs; |
88 | struct vduse_iova_domain *domain; |
89 | char *name; |
90 | struct mutex lock; |
91 | spinlock_t msg_lock; |
92 | u64 msg_unique; |
93 | u32 msg_timeout; |
94 | wait_queue_head_t waitq; |
95 | struct list_head send_list; |
96 | struct list_head recv_list; |
97 | struct vdpa_callback config_cb; |
98 | struct work_struct inject; |
99 | spinlock_t irq_lock; |
100 | struct rw_semaphore rwsem; |
101 | int minor; |
102 | bool broken; |
103 | bool connected; |
104 | u64 api_version; |
105 | u64 device_features; |
106 | u64 driver_features; |
107 | u32 device_id; |
108 | u32 vendor_id; |
109 | u32 generation; |
110 | u32 config_size; |
111 | void *config; |
112 | u8 status; |
113 | u32 vq_num; |
114 | u32 vq_align; |
115 | struct vduse_umem *umem; |
116 | struct mutex mem_lock; |
117 | unsigned int bounce_size; |
118 | struct mutex domain_lock; |
119 | }; |
120 | |
121 | struct vduse_dev_msg { |
122 | struct vduse_dev_request req; |
123 | struct vduse_dev_response resp; |
124 | struct list_head list; |
125 | wait_queue_head_t waitq; |
126 | bool completed; |
127 | }; |
128 | |
129 | struct vduse_control { |
130 | u64 api_version; |
131 | }; |
132 | |
133 | static DEFINE_MUTEX(vduse_lock); |
134 | static DEFINE_IDR(vduse_idr); |
135 | |
136 | static dev_t vduse_major; |
137 | static struct cdev vduse_ctrl_cdev; |
138 | static struct cdev vduse_cdev; |
139 | static struct workqueue_struct *vduse_irq_wq; |
140 | static struct workqueue_struct *vduse_irq_bound_wq; |
141 | |
142 | static u32 allowed_device_id[] = { |
143 | VIRTIO_ID_BLOCK, |
144 | }; |
145 | |
146 | static inline struct vduse_dev *vdpa_to_vduse(struct vdpa_device *vdpa) |
147 | { |
148 | struct vduse_vdpa *vdev = container_of(vdpa, struct vduse_vdpa, vdpa); |
149 | |
150 | return vdev->dev; |
151 | } |
152 | |
153 | static inline struct vduse_dev *dev_to_vduse(struct device *dev) |
154 | { |
155 | struct vdpa_device *vdpa = dev_to_vdpa(dev: dev); |
156 | |
157 | return vdpa_to_vduse(vdpa); |
158 | } |
159 | |
160 | static struct vduse_dev_msg *vduse_find_msg(struct list_head *head, |
161 | uint32_t request_id) |
162 | { |
163 | struct vduse_dev_msg *msg; |
164 | |
165 | list_for_each_entry(msg, head, list) { |
166 | if (msg->req.request_id == request_id) { |
167 | list_del(entry: &msg->list); |
168 | return msg; |
169 | } |
170 | } |
171 | |
172 | return NULL; |
173 | } |
174 | |
175 | static struct vduse_dev_msg *vduse_dequeue_msg(struct list_head *head) |
176 | { |
177 | struct vduse_dev_msg *msg = NULL; |
178 | |
179 | if (!list_empty(head)) { |
180 | msg = list_first_entry(head, struct vduse_dev_msg, list); |
181 | list_del(entry: &msg->list); |
182 | } |
183 | |
184 | return msg; |
185 | } |
186 | |
187 | static void vduse_enqueue_msg(struct list_head *head, |
188 | struct vduse_dev_msg *msg) |
189 | { |
190 | list_add_tail(new: &msg->list, head); |
191 | } |
192 | |
193 | static void vduse_dev_broken(struct vduse_dev *dev) |
194 | { |
195 | struct vduse_dev_msg *msg, *tmp; |
196 | |
197 | if (unlikely(dev->broken)) |
198 | return; |
199 | |
200 | list_splice_init(list: &dev->recv_list, head: &dev->send_list); |
201 | list_for_each_entry_safe(msg, tmp, &dev->send_list, list) { |
202 | list_del(entry: &msg->list); |
203 | msg->completed = 1; |
204 | msg->resp.result = VDUSE_REQ_RESULT_FAILED; |
205 | wake_up(&msg->waitq); |
206 | } |
207 | dev->broken = true; |
208 | wake_up(&dev->waitq); |
209 | } |
210 | |
211 | static int vduse_dev_msg_sync(struct vduse_dev *dev, |
212 | struct vduse_dev_msg *msg) |
213 | { |
214 | int ret; |
215 | |
216 | if (unlikely(dev->broken)) |
217 | return -EIO; |
218 | |
219 | init_waitqueue_head(&msg->waitq); |
220 | spin_lock(lock: &dev->msg_lock); |
221 | if (unlikely(dev->broken)) { |
222 | spin_unlock(lock: &dev->msg_lock); |
223 | return -EIO; |
224 | } |
225 | msg->req.request_id = dev->msg_unique++; |
226 | vduse_enqueue_msg(head: &dev->send_list, msg); |
227 | wake_up(&dev->waitq); |
228 | spin_unlock(lock: &dev->msg_lock); |
229 | if (dev->msg_timeout) |
230 | ret = wait_event_killable_timeout(msg->waitq, msg->completed, |
231 | (long)dev->msg_timeout * HZ); |
232 | else |
233 | ret = wait_event_killable(msg->waitq, msg->completed); |
234 | |
235 | spin_lock(lock: &dev->msg_lock); |
236 | if (!msg->completed) { |
237 | list_del(entry: &msg->list); |
238 | msg->resp.result = VDUSE_REQ_RESULT_FAILED; |
239 | /* Mark the device as malfunction when there is a timeout */ |
240 | if (!ret) |
241 | vduse_dev_broken(dev); |
242 | } |
243 | ret = (msg->resp.result == VDUSE_REQ_RESULT_OK) ? 0 : -EIO; |
244 | spin_unlock(lock: &dev->msg_lock); |
245 | |
246 | return ret; |
247 | } |
248 | |
249 | static int vduse_dev_get_vq_state_packed(struct vduse_dev *dev, |
250 | struct vduse_virtqueue *vq, |
251 | struct vdpa_vq_state_packed *packed) |
252 | { |
253 | struct vduse_dev_msg msg = { 0 }; |
254 | int ret; |
255 | |
256 | msg.req.type = VDUSE_GET_VQ_STATE; |
257 | msg.req.vq_state.index = vq->index; |
258 | |
259 | ret = vduse_dev_msg_sync(dev, msg: &msg); |
260 | if (ret) |
261 | return ret; |
262 | |
263 | packed->last_avail_counter = |
264 | msg.resp.vq_state.packed.last_avail_counter & 0x0001; |
265 | packed->last_avail_idx = |
266 | msg.resp.vq_state.packed.last_avail_idx & 0x7FFF; |
267 | packed->last_used_counter = |
268 | msg.resp.vq_state.packed.last_used_counter & 0x0001; |
269 | packed->last_used_idx = |
270 | msg.resp.vq_state.packed.last_used_idx & 0x7FFF; |
271 | |
272 | return 0; |
273 | } |
274 | |
275 | static int vduse_dev_get_vq_state_split(struct vduse_dev *dev, |
276 | struct vduse_virtqueue *vq, |
277 | struct vdpa_vq_state_split *split) |
278 | { |
279 | struct vduse_dev_msg msg = { 0 }; |
280 | int ret; |
281 | |
282 | msg.req.type = VDUSE_GET_VQ_STATE; |
283 | msg.req.vq_state.index = vq->index; |
284 | |
285 | ret = vduse_dev_msg_sync(dev, msg: &msg); |
286 | if (ret) |
287 | return ret; |
288 | |
289 | split->avail_index = msg.resp.vq_state.split.avail_index; |
290 | |
291 | return 0; |
292 | } |
293 | |
294 | static int vduse_dev_set_status(struct vduse_dev *dev, u8 status) |
295 | { |
296 | struct vduse_dev_msg msg = { 0 }; |
297 | |
298 | msg.req.type = VDUSE_SET_STATUS; |
299 | msg.req.s.status = status; |
300 | |
301 | return vduse_dev_msg_sync(dev, msg: &msg); |
302 | } |
303 | |
304 | static int vduse_dev_update_iotlb(struct vduse_dev *dev, |
305 | u64 start, u64 last) |
306 | { |
307 | struct vduse_dev_msg msg = { 0 }; |
308 | |
309 | if (last < start) |
310 | return -EINVAL; |
311 | |
312 | msg.req.type = VDUSE_UPDATE_IOTLB; |
313 | msg.req.iova.start = start; |
314 | msg.req.iova.last = last; |
315 | |
316 | return vduse_dev_msg_sync(dev, msg: &msg); |
317 | } |
318 | |
319 | static ssize_t vduse_dev_read_iter(struct kiocb *iocb, struct iov_iter *to) |
320 | { |
321 | struct file *file = iocb->ki_filp; |
322 | struct vduse_dev *dev = file->private_data; |
323 | struct vduse_dev_msg *msg; |
324 | int size = sizeof(struct vduse_dev_request); |
325 | ssize_t ret; |
326 | |
327 | if (iov_iter_count(i: to) < size) |
328 | return -EINVAL; |
329 | |
330 | spin_lock(lock: &dev->msg_lock); |
331 | while (1) { |
332 | msg = vduse_dequeue_msg(head: &dev->send_list); |
333 | if (msg) |
334 | break; |
335 | |
336 | ret = -EAGAIN; |
337 | if (file->f_flags & O_NONBLOCK) |
338 | goto unlock; |
339 | |
340 | spin_unlock(lock: &dev->msg_lock); |
341 | ret = wait_event_interruptible_exclusive(dev->waitq, |
342 | !list_empty(&dev->send_list)); |
343 | if (ret) |
344 | return ret; |
345 | |
346 | spin_lock(lock: &dev->msg_lock); |
347 | } |
348 | spin_unlock(lock: &dev->msg_lock); |
349 | ret = copy_to_iter(addr: &msg->req, bytes: size, i: to); |
350 | spin_lock(lock: &dev->msg_lock); |
351 | if (ret != size) { |
352 | ret = -EFAULT; |
353 | vduse_enqueue_msg(head: &dev->send_list, msg); |
354 | goto unlock; |
355 | } |
356 | vduse_enqueue_msg(head: &dev->recv_list, msg); |
357 | unlock: |
358 | spin_unlock(lock: &dev->msg_lock); |
359 | |
360 | return ret; |
361 | } |
362 | |
363 | static bool is_mem_zero(const char *ptr, int size) |
364 | { |
365 | int i; |
366 | |
367 | for (i = 0; i < size; i++) { |
368 | if (ptr[i]) |
369 | return false; |
370 | } |
371 | return true; |
372 | } |
373 | |
374 | static ssize_t vduse_dev_write_iter(struct kiocb *iocb, struct iov_iter *from) |
375 | { |
376 | struct file *file = iocb->ki_filp; |
377 | struct vduse_dev *dev = file->private_data; |
378 | struct vduse_dev_response resp; |
379 | struct vduse_dev_msg *msg; |
380 | size_t ret; |
381 | |
382 | ret = copy_from_iter(addr: &resp, bytes: sizeof(resp), i: from); |
383 | if (ret != sizeof(resp)) |
384 | return -EINVAL; |
385 | |
386 | if (!is_mem_zero(ptr: (const char *)resp.reserved, size: sizeof(resp.reserved))) |
387 | return -EINVAL; |
388 | |
389 | spin_lock(lock: &dev->msg_lock); |
390 | msg = vduse_find_msg(head: &dev->recv_list, request_id: resp.request_id); |
391 | if (!msg) { |
392 | ret = -ENOENT; |
393 | goto unlock; |
394 | } |
395 | |
396 | memcpy(&msg->resp, &resp, sizeof(resp)); |
397 | msg->completed = 1; |
398 | wake_up(&msg->waitq); |
399 | unlock: |
400 | spin_unlock(lock: &dev->msg_lock); |
401 | |
402 | return ret; |
403 | } |
404 | |
405 | static __poll_t vduse_dev_poll(struct file *file, poll_table *wait) |
406 | { |
407 | struct vduse_dev *dev = file->private_data; |
408 | __poll_t mask = 0; |
409 | |
410 | poll_wait(filp: file, wait_address: &dev->waitq, p: wait); |
411 | |
412 | spin_lock(lock: &dev->msg_lock); |
413 | |
414 | if (unlikely(dev->broken)) |
415 | mask |= EPOLLERR; |
416 | if (!list_empty(head: &dev->send_list)) |
417 | mask |= EPOLLIN | EPOLLRDNORM; |
418 | if (!list_empty(head: &dev->recv_list)) |
419 | mask |= EPOLLOUT | EPOLLWRNORM; |
420 | |
421 | spin_unlock(lock: &dev->msg_lock); |
422 | |
423 | return mask; |
424 | } |
425 | |
426 | static void vduse_dev_reset(struct vduse_dev *dev) |
427 | { |
428 | int i; |
429 | struct vduse_iova_domain *domain = dev->domain; |
430 | |
431 | /* The coherent mappings are handled in vduse_dev_free_coherent() */ |
432 | if (domain && domain->bounce_map) |
433 | vduse_domain_reset_bounce_map(domain); |
434 | |
435 | down_write(sem: &dev->rwsem); |
436 | |
437 | dev->status = 0; |
438 | dev->driver_features = 0; |
439 | dev->generation++; |
440 | spin_lock(lock: &dev->irq_lock); |
441 | dev->config_cb.callback = NULL; |
442 | dev->config_cb.private = NULL; |
443 | spin_unlock(lock: &dev->irq_lock); |
444 | flush_work(work: &dev->inject); |
445 | |
446 | for (i = 0; i < dev->vq_num; i++) { |
447 | struct vduse_virtqueue *vq = dev->vqs[i]; |
448 | |
449 | vq->ready = false; |
450 | vq->desc_addr = 0; |
451 | vq->driver_addr = 0; |
452 | vq->device_addr = 0; |
453 | vq->num = 0; |
454 | memset(&vq->state, 0, sizeof(vq->state)); |
455 | |
456 | spin_lock(lock: &vq->kick_lock); |
457 | vq->kicked = false; |
458 | if (vq->kickfd) |
459 | eventfd_ctx_put(ctx: vq->kickfd); |
460 | vq->kickfd = NULL; |
461 | spin_unlock(lock: &vq->kick_lock); |
462 | |
463 | spin_lock(lock: &vq->irq_lock); |
464 | vq->cb.callback = NULL; |
465 | vq->cb.private = NULL; |
466 | vq->cb.trigger = NULL; |
467 | spin_unlock(lock: &vq->irq_lock); |
468 | flush_work(work: &vq->inject); |
469 | flush_work(work: &vq->kick); |
470 | } |
471 | |
472 | up_write(sem: &dev->rwsem); |
473 | } |
474 | |
475 | static int vduse_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 idx, |
476 | u64 desc_area, u64 driver_area, |
477 | u64 device_area) |
478 | { |
479 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
480 | struct vduse_virtqueue *vq = dev->vqs[idx]; |
481 | |
482 | vq->desc_addr = desc_area; |
483 | vq->driver_addr = driver_area; |
484 | vq->device_addr = device_area; |
485 | |
486 | return 0; |
487 | } |
488 | |
489 | static void vduse_vq_kick(struct vduse_virtqueue *vq) |
490 | { |
491 | spin_lock(lock: &vq->kick_lock); |
492 | if (!vq->ready) |
493 | goto unlock; |
494 | |
495 | if (vq->kickfd) |
496 | eventfd_signal(ctx: vq->kickfd); |
497 | else |
498 | vq->kicked = true; |
499 | unlock: |
500 | spin_unlock(lock: &vq->kick_lock); |
501 | } |
502 | |
503 | static void vduse_vq_kick_work(struct work_struct *work) |
504 | { |
505 | struct vduse_virtqueue *vq = container_of(work, |
506 | struct vduse_virtqueue, kick); |
507 | |
508 | vduse_vq_kick(vq); |
509 | } |
510 | |
511 | static void vduse_vdpa_kick_vq(struct vdpa_device *vdpa, u16 idx) |
512 | { |
513 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
514 | struct vduse_virtqueue *vq = dev->vqs[idx]; |
515 | |
516 | if (!eventfd_signal_allowed()) { |
517 | schedule_work(work: &vq->kick); |
518 | return; |
519 | } |
520 | vduse_vq_kick(vq); |
521 | } |
522 | |
523 | static void vduse_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 idx, |
524 | struct vdpa_callback *cb) |
525 | { |
526 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
527 | struct vduse_virtqueue *vq = dev->vqs[idx]; |
528 | |
529 | spin_lock(lock: &vq->irq_lock); |
530 | vq->cb.callback = cb->callback; |
531 | vq->cb.private = cb->private; |
532 | vq->cb.trigger = cb->trigger; |
533 | spin_unlock(lock: &vq->irq_lock); |
534 | } |
535 | |
536 | static void vduse_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num) |
537 | { |
538 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
539 | struct vduse_virtqueue *vq = dev->vqs[idx]; |
540 | |
541 | vq->num = num; |
542 | } |
543 | |
544 | static u16 vduse_vdpa_get_vq_size(struct vdpa_device *vdpa, u16 idx) |
545 | { |
546 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
547 | struct vduse_virtqueue *vq = dev->vqs[idx]; |
548 | |
549 | if (vq->num) |
550 | return vq->num; |
551 | else |
552 | return vq->num_max; |
553 | } |
554 | |
555 | static void vduse_vdpa_set_vq_ready(struct vdpa_device *vdpa, |
556 | u16 idx, bool ready) |
557 | { |
558 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
559 | struct vduse_virtqueue *vq = dev->vqs[idx]; |
560 | |
561 | vq->ready = ready; |
562 | } |
563 | |
564 | static bool vduse_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 idx) |
565 | { |
566 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
567 | struct vduse_virtqueue *vq = dev->vqs[idx]; |
568 | |
569 | return vq->ready; |
570 | } |
571 | |
572 | static int vduse_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 idx, |
573 | const struct vdpa_vq_state *state) |
574 | { |
575 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
576 | struct vduse_virtqueue *vq = dev->vqs[idx]; |
577 | |
578 | if (dev->driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) { |
579 | vq->state.packed.last_avail_counter = |
580 | state->packed.last_avail_counter; |
581 | vq->state.packed.last_avail_idx = state->packed.last_avail_idx; |
582 | vq->state.packed.last_used_counter = |
583 | state->packed.last_used_counter; |
584 | vq->state.packed.last_used_idx = state->packed.last_used_idx; |
585 | } else |
586 | vq->state.split.avail_index = state->split.avail_index; |
587 | |
588 | return 0; |
589 | } |
590 | |
591 | static int vduse_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 idx, |
592 | struct vdpa_vq_state *state) |
593 | { |
594 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
595 | struct vduse_virtqueue *vq = dev->vqs[idx]; |
596 | |
597 | if (dev->driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) |
598 | return vduse_dev_get_vq_state_packed(dev, vq, packed: &state->packed); |
599 | |
600 | return vduse_dev_get_vq_state_split(dev, vq, split: &state->split); |
601 | } |
602 | |
603 | static u32 vduse_vdpa_get_vq_align(struct vdpa_device *vdpa) |
604 | { |
605 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
606 | |
607 | return dev->vq_align; |
608 | } |
609 | |
610 | static u64 vduse_vdpa_get_device_features(struct vdpa_device *vdpa) |
611 | { |
612 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
613 | |
614 | return dev->device_features; |
615 | } |
616 | |
617 | static int vduse_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features) |
618 | { |
619 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
620 | |
621 | dev->driver_features = features; |
622 | return 0; |
623 | } |
624 | |
625 | static u64 vduse_vdpa_get_driver_features(struct vdpa_device *vdpa) |
626 | { |
627 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
628 | |
629 | return dev->driver_features; |
630 | } |
631 | |
632 | static void vduse_vdpa_set_config_cb(struct vdpa_device *vdpa, |
633 | struct vdpa_callback *cb) |
634 | { |
635 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
636 | |
637 | spin_lock(lock: &dev->irq_lock); |
638 | dev->config_cb.callback = cb->callback; |
639 | dev->config_cb.private = cb->private; |
640 | spin_unlock(lock: &dev->irq_lock); |
641 | } |
642 | |
643 | static u16 vduse_vdpa_get_vq_num_max(struct vdpa_device *vdpa) |
644 | { |
645 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
646 | u16 num_max = 0; |
647 | int i; |
648 | |
649 | for (i = 0; i < dev->vq_num; i++) |
650 | if (num_max < dev->vqs[i]->num_max) |
651 | num_max = dev->vqs[i]->num_max; |
652 | |
653 | return num_max; |
654 | } |
655 | |
656 | static u32 vduse_vdpa_get_device_id(struct vdpa_device *vdpa) |
657 | { |
658 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
659 | |
660 | return dev->device_id; |
661 | } |
662 | |
663 | static u32 vduse_vdpa_get_vendor_id(struct vdpa_device *vdpa) |
664 | { |
665 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
666 | |
667 | return dev->vendor_id; |
668 | } |
669 | |
670 | static u8 vduse_vdpa_get_status(struct vdpa_device *vdpa) |
671 | { |
672 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
673 | |
674 | return dev->status; |
675 | } |
676 | |
677 | static void vduse_vdpa_set_status(struct vdpa_device *vdpa, u8 status) |
678 | { |
679 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
680 | |
681 | if (vduse_dev_set_status(dev, status)) |
682 | return; |
683 | |
684 | dev->status = status; |
685 | } |
686 | |
687 | static size_t vduse_vdpa_get_config_size(struct vdpa_device *vdpa) |
688 | { |
689 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
690 | |
691 | return dev->config_size; |
692 | } |
693 | |
694 | static void vduse_vdpa_get_config(struct vdpa_device *vdpa, unsigned int offset, |
695 | void *buf, unsigned int len) |
696 | { |
697 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
698 | |
699 | /* Initialize the buffer in case of partial copy. */ |
700 | memset(buf, 0, len); |
701 | |
702 | if (offset > dev->config_size) |
703 | return; |
704 | |
705 | if (len > dev->config_size - offset) |
706 | len = dev->config_size - offset; |
707 | |
708 | memcpy(buf, dev->config + offset, len); |
709 | } |
710 | |
711 | static void vduse_vdpa_set_config(struct vdpa_device *vdpa, unsigned int offset, |
712 | const void *buf, unsigned int len) |
713 | { |
714 | /* Now we only support read-only configuration space */ |
715 | } |
716 | |
717 | static int vduse_vdpa_reset(struct vdpa_device *vdpa) |
718 | { |
719 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
720 | int ret = vduse_dev_set_status(dev, status: 0); |
721 | |
722 | vduse_dev_reset(dev); |
723 | |
724 | return ret; |
725 | } |
726 | |
727 | static u32 vduse_vdpa_get_generation(struct vdpa_device *vdpa) |
728 | { |
729 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
730 | |
731 | return dev->generation; |
732 | } |
733 | |
734 | static int vduse_vdpa_set_vq_affinity(struct vdpa_device *vdpa, u16 idx, |
735 | const struct cpumask *cpu_mask) |
736 | { |
737 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
738 | |
739 | if (cpu_mask) |
740 | cpumask_copy(dstp: &dev->vqs[idx]->irq_affinity, srcp: cpu_mask); |
741 | else |
742 | cpumask_setall(dstp: &dev->vqs[idx]->irq_affinity); |
743 | |
744 | return 0; |
745 | } |
746 | |
747 | static const struct cpumask * |
748 | vduse_vdpa_get_vq_affinity(struct vdpa_device *vdpa, u16 idx) |
749 | { |
750 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
751 | |
752 | return &dev->vqs[idx]->irq_affinity; |
753 | } |
754 | |
755 | static int vduse_vdpa_set_map(struct vdpa_device *vdpa, |
756 | unsigned int asid, |
757 | struct vhost_iotlb *iotlb) |
758 | { |
759 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
760 | int ret; |
761 | |
762 | ret = vduse_domain_set_map(domain: dev->domain, iotlb); |
763 | if (ret) |
764 | return ret; |
765 | |
766 | ret = vduse_dev_update_iotlb(dev, start: 0ULL, ULLONG_MAX); |
767 | if (ret) { |
768 | vduse_domain_clear_map(domain: dev->domain, iotlb); |
769 | return ret; |
770 | } |
771 | |
772 | return 0; |
773 | } |
774 | |
775 | static void vduse_vdpa_free(struct vdpa_device *vdpa) |
776 | { |
777 | struct vduse_dev *dev = vdpa_to_vduse(vdpa); |
778 | |
779 | dev->vdev = NULL; |
780 | } |
781 | |
782 | static const struct vdpa_config_ops vduse_vdpa_config_ops = { |
783 | .set_vq_address = vduse_vdpa_set_vq_address, |
784 | .kick_vq = vduse_vdpa_kick_vq, |
785 | .set_vq_cb = vduse_vdpa_set_vq_cb, |
786 | .set_vq_num = vduse_vdpa_set_vq_num, |
787 | .get_vq_size = vduse_vdpa_get_vq_size, |
788 | .set_vq_ready = vduse_vdpa_set_vq_ready, |
789 | .get_vq_ready = vduse_vdpa_get_vq_ready, |
790 | .set_vq_state = vduse_vdpa_set_vq_state, |
791 | .get_vq_state = vduse_vdpa_get_vq_state, |
792 | .get_vq_align = vduse_vdpa_get_vq_align, |
793 | .get_device_features = vduse_vdpa_get_device_features, |
794 | .set_driver_features = vduse_vdpa_set_driver_features, |
795 | .get_driver_features = vduse_vdpa_get_driver_features, |
796 | .set_config_cb = vduse_vdpa_set_config_cb, |
797 | .get_vq_num_max = vduse_vdpa_get_vq_num_max, |
798 | .get_device_id = vduse_vdpa_get_device_id, |
799 | .get_vendor_id = vduse_vdpa_get_vendor_id, |
800 | .get_status = vduse_vdpa_get_status, |
801 | .set_status = vduse_vdpa_set_status, |
802 | .get_config_size = vduse_vdpa_get_config_size, |
803 | .get_config = vduse_vdpa_get_config, |
804 | .set_config = vduse_vdpa_set_config, |
805 | .get_generation = vduse_vdpa_get_generation, |
806 | .set_vq_affinity = vduse_vdpa_set_vq_affinity, |
807 | .get_vq_affinity = vduse_vdpa_get_vq_affinity, |
808 | .reset = vduse_vdpa_reset, |
809 | .set_map = vduse_vdpa_set_map, |
810 | .free = vduse_vdpa_free, |
811 | }; |
812 | |
813 | static void vduse_dev_sync_single_for_device(struct device *dev, |
814 | dma_addr_t dma_addr, size_t size, |
815 | enum dma_data_direction dir) |
816 | { |
817 | struct vduse_dev *vdev = dev_to_vduse(dev); |
818 | struct vduse_iova_domain *domain = vdev->domain; |
819 | |
820 | vduse_domain_sync_single_for_device(domain, dma_addr, size, dir); |
821 | } |
822 | |
823 | static void vduse_dev_sync_single_for_cpu(struct device *dev, |
824 | dma_addr_t dma_addr, size_t size, |
825 | enum dma_data_direction dir) |
826 | { |
827 | struct vduse_dev *vdev = dev_to_vduse(dev); |
828 | struct vduse_iova_domain *domain = vdev->domain; |
829 | |
830 | vduse_domain_sync_single_for_cpu(domain, dma_addr, size, dir); |
831 | } |
832 | |
833 | static dma_addr_t vduse_dev_map_page(struct device *dev, struct page *page, |
834 | unsigned long offset, size_t size, |
835 | enum dma_data_direction dir, |
836 | unsigned long attrs) |
837 | { |
838 | struct vduse_dev *vdev = dev_to_vduse(dev); |
839 | struct vduse_iova_domain *domain = vdev->domain; |
840 | |
841 | return vduse_domain_map_page(domain, page, offset, size, dir, attrs); |
842 | } |
843 | |
844 | static void vduse_dev_unmap_page(struct device *dev, dma_addr_t dma_addr, |
845 | size_t size, enum dma_data_direction dir, |
846 | unsigned long attrs) |
847 | { |
848 | struct vduse_dev *vdev = dev_to_vduse(dev); |
849 | struct vduse_iova_domain *domain = vdev->domain; |
850 | |
851 | return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs); |
852 | } |
853 | |
854 | static void *vduse_dev_alloc_coherent(struct device *dev, size_t size, |
855 | dma_addr_t *dma_addr, gfp_t flag, |
856 | unsigned long attrs) |
857 | { |
858 | struct vduse_dev *vdev = dev_to_vduse(dev); |
859 | struct vduse_iova_domain *domain = vdev->domain; |
860 | unsigned long iova; |
861 | void *addr; |
862 | |
863 | *dma_addr = DMA_MAPPING_ERROR; |
864 | addr = vduse_domain_alloc_coherent(domain, size, |
865 | dma_addr: (dma_addr_t *)&iova, flag, attrs); |
866 | if (!addr) |
867 | return NULL; |
868 | |
869 | *dma_addr = (dma_addr_t)iova; |
870 | |
871 | return addr; |
872 | } |
873 | |
874 | static void vduse_dev_free_coherent(struct device *dev, size_t size, |
875 | void *vaddr, dma_addr_t dma_addr, |
876 | unsigned long attrs) |
877 | { |
878 | struct vduse_dev *vdev = dev_to_vduse(dev); |
879 | struct vduse_iova_domain *domain = vdev->domain; |
880 | |
881 | vduse_domain_free_coherent(domain, size, vaddr, dma_addr, attrs); |
882 | } |
883 | |
884 | static size_t vduse_dev_max_mapping_size(struct device *dev) |
885 | { |
886 | struct vduse_dev *vdev = dev_to_vduse(dev); |
887 | struct vduse_iova_domain *domain = vdev->domain; |
888 | |
889 | return domain->bounce_size; |
890 | } |
891 | |
892 | static const struct dma_map_ops vduse_dev_dma_ops = { |
893 | .sync_single_for_device = vduse_dev_sync_single_for_device, |
894 | .sync_single_for_cpu = vduse_dev_sync_single_for_cpu, |
895 | .map_page = vduse_dev_map_page, |
896 | .unmap_page = vduse_dev_unmap_page, |
897 | .alloc = vduse_dev_alloc_coherent, |
898 | .free = vduse_dev_free_coherent, |
899 | .max_mapping_size = vduse_dev_max_mapping_size, |
900 | }; |
901 | |
902 | static unsigned int perm_to_file_flags(u8 perm) |
903 | { |
904 | unsigned int flags = 0; |
905 | |
906 | switch (perm) { |
907 | case VDUSE_ACCESS_WO: |
908 | flags |= O_WRONLY; |
909 | break; |
910 | case VDUSE_ACCESS_RO: |
911 | flags |= O_RDONLY; |
912 | break; |
913 | case VDUSE_ACCESS_RW: |
914 | flags |= O_RDWR; |
915 | break; |
916 | default: |
917 | WARN(1, "invalidate vhost IOTLB permission\n" ); |
918 | break; |
919 | } |
920 | |
921 | return flags; |
922 | } |
923 | |
924 | static int vduse_kickfd_setup(struct vduse_dev *dev, |
925 | struct vduse_vq_eventfd *eventfd) |
926 | { |
927 | struct eventfd_ctx *ctx = NULL; |
928 | struct vduse_virtqueue *vq; |
929 | u32 index; |
930 | |
931 | if (eventfd->index >= dev->vq_num) |
932 | return -EINVAL; |
933 | |
934 | index = array_index_nospec(eventfd->index, dev->vq_num); |
935 | vq = dev->vqs[index]; |
936 | if (eventfd->fd >= 0) { |
937 | ctx = eventfd_ctx_fdget(fd: eventfd->fd); |
938 | if (IS_ERR(ptr: ctx)) |
939 | return PTR_ERR(ptr: ctx); |
940 | } else if (eventfd->fd != VDUSE_EVENTFD_DEASSIGN) |
941 | return 0; |
942 | |
943 | spin_lock(lock: &vq->kick_lock); |
944 | if (vq->kickfd) |
945 | eventfd_ctx_put(ctx: vq->kickfd); |
946 | vq->kickfd = ctx; |
947 | if (vq->ready && vq->kicked && vq->kickfd) { |
948 | eventfd_signal(ctx: vq->kickfd); |
949 | vq->kicked = false; |
950 | } |
951 | spin_unlock(lock: &vq->kick_lock); |
952 | |
953 | return 0; |
954 | } |
955 | |
956 | static bool vduse_dev_is_ready(struct vduse_dev *dev) |
957 | { |
958 | int i; |
959 | |
960 | for (i = 0; i < dev->vq_num; i++) |
961 | if (!dev->vqs[i]->num_max) |
962 | return false; |
963 | |
964 | return true; |
965 | } |
966 | |
967 | static void vduse_dev_irq_inject(struct work_struct *work) |
968 | { |
969 | struct vduse_dev *dev = container_of(work, struct vduse_dev, inject); |
970 | |
971 | spin_lock_bh(lock: &dev->irq_lock); |
972 | if (dev->config_cb.callback) |
973 | dev->config_cb.callback(dev->config_cb.private); |
974 | spin_unlock_bh(lock: &dev->irq_lock); |
975 | } |
976 | |
977 | static void vduse_vq_irq_inject(struct work_struct *work) |
978 | { |
979 | struct vduse_virtqueue *vq = container_of(work, |
980 | struct vduse_virtqueue, inject); |
981 | |
982 | spin_lock_bh(lock: &vq->irq_lock); |
983 | if (vq->ready && vq->cb.callback) |
984 | vq->cb.callback(vq->cb.private); |
985 | spin_unlock_bh(lock: &vq->irq_lock); |
986 | } |
987 | |
988 | static bool vduse_vq_signal_irqfd(struct vduse_virtqueue *vq) |
989 | { |
990 | bool signal = false; |
991 | |
992 | if (!vq->cb.trigger) |
993 | return false; |
994 | |
995 | spin_lock_irq(lock: &vq->irq_lock); |
996 | if (vq->ready && vq->cb.trigger) { |
997 | eventfd_signal(ctx: vq->cb.trigger); |
998 | signal = true; |
999 | } |
1000 | spin_unlock_irq(lock: &vq->irq_lock); |
1001 | |
1002 | return signal; |
1003 | } |
1004 | |
1005 | static int vduse_dev_queue_irq_work(struct vduse_dev *dev, |
1006 | struct work_struct *irq_work, |
1007 | int irq_effective_cpu) |
1008 | { |
1009 | int ret = -EINVAL; |
1010 | |
1011 | down_read(sem: &dev->rwsem); |
1012 | if (!(dev->status & VIRTIO_CONFIG_S_DRIVER_OK)) |
1013 | goto unlock; |
1014 | |
1015 | ret = 0; |
1016 | if (irq_effective_cpu == IRQ_UNBOUND) |
1017 | queue_work(wq: vduse_irq_wq, work: irq_work); |
1018 | else |
1019 | queue_work_on(cpu: irq_effective_cpu, |
1020 | wq: vduse_irq_bound_wq, work: irq_work); |
1021 | unlock: |
1022 | up_read(sem: &dev->rwsem); |
1023 | |
1024 | return ret; |
1025 | } |
1026 | |
1027 | static int vduse_dev_dereg_umem(struct vduse_dev *dev, |
1028 | u64 iova, u64 size) |
1029 | { |
1030 | int ret; |
1031 | |
1032 | mutex_lock(&dev->mem_lock); |
1033 | ret = -ENOENT; |
1034 | if (!dev->umem) |
1035 | goto unlock; |
1036 | |
1037 | ret = -EINVAL; |
1038 | if (!dev->domain) |
1039 | goto unlock; |
1040 | |
1041 | if (dev->umem->iova != iova || size != dev->domain->bounce_size) |
1042 | goto unlock; |
1043 | |
1044 | vduse_domain_remove_user_bounce_pages(domain: dev->domain); |
1045 | unpin_user_pages_dirty_lock(pages: dev->umem->pages, |
1046 | npages: dev->umem->npages, make_dirty: true); |
1047 | atomic64_sub(i: dev->umem->npages, v: &dev->umem->mm->pinned_vm); |
1048 | mmdrop(mm: dev->umem->mm); |
1049 | vfree(addr: dev->umem->pages); |
1050 | kfree(objp: dev->umem); |
1051 | dev->umem = NULL; |
1052 | ret = 0; |
1053 | unlock: |
1054 | mutex_unlock(lock: &dev->mem_lock); |
1055 | return ret; |
1056 | } |
1057 | |
1058 | static int vduse_dev_reg_umem(struct vduse_dev *dev, |
1059 | u64 iova, u64 uaddr, u64 size) |
1060 | { |
1061 | struct page **page_list = NULL; |
1062 | struct vduse_umem *umem = NULL; |
1063 | long pinned = 0; |
1064 | unsigned long npages, lock_limit; |
1065 | int ret; |
1066 | |
1067 | if (!dev->domain || !dev->domain->bounce_map || |
1068 | size != dev->domain->bounce_size || |
1069 | iova != 0 || uaddr & ~PAGE_MASK) |
1070 | return -EINVAL; |
1071 | |
1072 | mutex_lock(&dev->mem_lock); |
1073 | ret = -EEXIST; |
1074 | if (dev->umem) |
1075 | goto unlock; |
1076 | |
1077 | ret = -ENOMEM; |
1078 | npages = size >> PAGE_SHIFT; |
1079 | page_list = __vmalloc(array_size(npages, sizeof(struct page *)), |
1080 | GFP_KERNEL_ACCOUNT); |
1081 | umem = kzalloc(size: sizeof(*umem), GFP_KERNEL); |
1082 | if (!page_list || !umem) |
1083 | goto unlock; |
1084 | |
1085 | mmap_read_lock(current->mm); |
1086 | |
1087 | lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK)); |
1088 | if (npages + atomic64_read(v: ¤t->mm->pinned_vm) > lock_limit) |
1089 | goto out; |
1090 | |
1091 | pinned = pin_user_pages(start: uaddr, nr_pages: npages, gup_flags: FOLL_LONGTERM | FOLL_WRITE, |
1092 | pages: page_list); |
1093 | if (pinned != npages) { |
1094 | ret = pinned < 0 ? pinned : -ENOMEM; |
1095 | goto out; |
1096 | } |
1097 | |
1098 | ret = vduse_domain_add_user_bounce_pages(domain: dev->domain, |
1099 | pages: page_list, count: pinned); |
1100 | if (ret) |
1101 | goto out; |
1102 | |
1103 | atomic64_add(i: npages, v: ¤t->mm->pinned_vm); |
1104 | |
1105 | umem->pages = page_list; |
1106 | umem->npages = pinned; |
1107 | umem->iova = iova; |
1108 | umem->mm = current->mm; |
1109 | mmgrab(current->mm); |
1110 | |
1111 | dev->umem = umem; |
1112 | out: |
1113 | if (ret && pinned > 0) |
1114 | unpin_user_pages(pages: page_list, npages: pinned); |
1115 | |
1116 | mmap_read_unlock(current->mm); |
1117 | unlock: |
1118 | if (ret) { |
1119 | vfree(addr: page_list); |
1120 | kfree(objp: umem); |
1121 | } |
1122 | mutex_unlock(lock: &dev->mem_lock); |
1123 | return ret; |
1124 | } |
1125 | |
1126 | static void vduse_vq_update_effective_cpu(struct vduse_virtqueue *vq) |
1127 | { |
1128 | int curr_cpu = vq->irq_effective_cpu; |
1129 | |
1130 | while (true) { |
1131 | curr_cpu = cpumask_next(n: curr_cpu, srcp: &vq->irq_affinity); |
1132 | if (cpu_online(cpu: curr_cpu)) |
1133 | break; |
1134 | |
1135 | if (curr_cpu >= nr_cpu_ids) |
1136 | curr_cpu = IRQ_UNBOUND; |
1137 | } |
1138 | |
1139 | vq->irq_effective_cpu = curr_cpu; |
1140 | } |
1141 | |
1142 | static long vduse_dev_ioctl(struct file *file, unsigned int cmd, |
1143 | unsigned long arg) |
1144 | { |
1145 | struct vduse_dev *dev = file->private_data; |
1146 | void __user *argp = (void __user *)arg; |
1147 | int ret; |
1148 | |
1149 | if (unlikely(dev->broken)) |
1150 | return -EPERM; |
1151 | |
1152 | switch (cmd) { |
1153 | case VDUSE_IOTLB_GET_FD: { |
1154 | struct vduse_iotlb_entry entry; |
1155 | struct vhost_iotlb_map *map; |
1156 | struct vdpa_map_file *map_file; |
1157 | struct file *f = NULL; |
1158 | |
1159 | ret = -EFAULT; |
1160 | if (copy_from_user(to: &entry, from: argp, n: sizeof(entry))) |
1161 | break; |
1162 | |
1163 | ret = -EINVAL; |
1164 | if (entry.start > entry.last) |
1165 | break; |
1166 | |
1167 | mutex_lock(&dev->domain_lock); |
1168 | if (!dev->domain) { |
1169 | mutex_unlock(lock: &dev->domain_lock); |
1170 | break; |
1171 | } |
1172 | spin_lock(lock: &dev->domain->iotlb_lock); |
1173 | map = vhost_iotlb_itree_first(iotlb: dev->domain->iotlb, |
1174 | start: entry.start, last: entry.last); |
1175 | if (map) { |
1176 | map_file = (struct vdpa_map_file *)map->opaque; |
1177 | f = get_file(f: map_file->file); |
1178 | entry.offset = map_file->offset; |
1179 | entry.start = map->start; |
1180 | entry.last = map->last; |
1181 | entry.perm = map->perm; |
1182 | } |
1183 | spin_unlock(lock: &dev->domain->iotlb_lock); |
1184 | mutex_unlock(lock: &dev->domain_lock); |
1185 | ret = -EINVAL; |
1186 | if (!f) |
1187 | break; |
1188 | |
1189 | ret = -EFAULT; |
1190 | if (copy_to_user(to: argp, from: &entry, n: sizeof(entry))) { |
1191 | fput(f); |
1192 | break; |
1193 | } |
1194 | ret = receive_fd(file: f, NULL, o_flags: perm_to_file_flags(perm: entry.perm)); |
1195 | fput(f); |
1196 | break; |
1197 | } |
1198 | case VDUSE_DEV_GET_FEATURES: |
1199 | /* |
1200 | * Just mirror what driver wrote here. |
1201 | * The driver is expected to check FEATURE_OK later. |
1202 | */ |
1203 | ret = put_user(dev->driver_features, (u64 __user *)argp); |
1204 | break; |
1205 | case VDUSE_DEV_SET_CONFIG: { |
1206 | struct vduse_config_data config; |
1207 | unsigned long size = offsetof(struct vduse_config_data, |
1208 | buffer); |
1209 | |
1210 | ret = -EFAULT; |
1211 | if (copy_from_user(to: &config, from: argp, n: size)) |
1212 | break; |
1213 | |
1214 | ret = -EINVAL; |
1215 | if (config.offset > dev->config_size || |
1216 | config.length == 0 || |
1217 | config.length > dev->config_size - config.offset) |
1218 | break; |
1219 | |
1220 | ret = -EFAULT; |
1221 | if (copy_from_user(to: dev->config + config.offset, from: argp + size, |
1222 | n: config.length)) |
1223 | break; |
1224 | |
1225 | ret = 0; |
1226 | break; |
1227 | } |
1228 | case VDUSE_DEV_INJECT_CONFIG_IRQ: |
1229 | ret = vduse_dev_queue_irq_work(dev, irq_work: &dev->inject, IRQ_UNBOUND); |
1230 | break; |
1231 | case VDUSE_VQ_SETUP: { |
1232 | struct vduse_vq_config config; |
1233 | u32 index; |
1234 | |
1235 | ret = -EFAULT; |
1236 | if (copy_from_user(to: &config, from: argp, n: sizeof(config))) |
1237 | break; |
1238 | |
1239 | ret = -EINVAL; |
1240 | if (config.index >= dev->vq_num) |
1241 | break; |
1242 | |
1243 | if (!is_mem_zero(ptr: (const char *)config.reserved, |
1244 | size: sizeof(config.reserved))) |
1245 | break; |
1246 | |
1247 | index = array_index_nospec(config.index, dev->vq_num); |
1248 | dev->vqs[index]->num_max = config.max_size; |
1249 | ret = 0; |
1250 | break; |
1251 | } |
1252 | case VDUSE_VQ_GET_INFO: { |
1253 | struct vduse_vq_info vq_info; |
1254 | struct vduse_virtqueue *vq; |
1255 | u32 index; |
1256 | |
1257 | ret = -EFAULT; |
1258 | if (copy_from_user(to: &vq_info, from: argp, n: sizeof(vq_info))) |
1259 | break; |
1260 | |
1261 | ret = -EINVAL; |
1262 | if (vq_info.index >= dev->vq_num) |
1263 | break; |
1264 | |
1265 | index = array_index_nospec(vq_info.index, dev->vq_num); |
1266 | vq = dev->vqs[index]; |
1267 | vq_info.desc_addr = vq->desc_addr; |
1268 | vq_info.driver_addr = vq->driver_addr; |
1269 | vq_info.device_addr = vq->device_addr; |
1270 | vq_info.num = vq->num; |
1271 | |
1272 | if (dev->driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) { |
1273 | vq_info.packed.last_avail_counter = |
1274 | vq->state.packed.last_avail_counter; |
1275 | vq_info.packed.last_avail_idx = |
1276 | vq->state.packed.last_avail_idx; |
1277 | vq_info.packed.last_used_counter = |
1278 | vq->state.packed.last_used_counter; |
1279 | vq_info.packed.last_used_idx = |
1280 | vq->state.packed.last_used_idx; |
1281 | } else |
1282 | vq_info.split.avail_index = |
1283 | vq->state.split.avail_index; |
1284 | |
1285 | vq_info.ready = vq->ready; |
1286 | |
1287 | ret = -EFAULT; |
1288 | if (copy_to_user(to: argp, from: &vq_info, n: sizeof(vq_info))) |
1289 | break; |
1290 | |
1291 | ret = 0; |
1292 | break; |
1293 | } |
1294 | case VDUSE_VQ_SETUP_KICKFD: { |
1295 | struct vduse_vq_eventfd eventfd; |
1296 | |
1297 | ret = -EFAULT; |
1298 | if (copy_from_user(to: &eventfd, from: argp, n: sizeof(eventfd))) |
1299 | break; |
1300 | |
1301 | ret = vduse_kickfd_setup(dev, eventfd: &eventfd); |
1302 | break; |
1303 | } |
1304 | case VDUSE_VQ_INJECT_IRQ: { |
1305 | u32 index; |
1306 | |
1307 | ret = -EFAULT; |
1308 | if (get_user(index, (u32 __user *)argp)) |
1309 | break; |
1310 | |
1311 | ret = -EINVAL; |
1312 | if (index >= dev->vq_num) |
1313 | break; |
1314 | |
1315 | ret = 0; |
1316 | index = array_index_nospec(index, dev->vq_num); |
1317 | if (!vduse_vq_signal_irqfd(vq: dev->vqs[index])) { |
1318 | vduse_vq_update_effective_cpu(vq: dev->vqs[index]); |
1319 | ret = vduse_dev_queue_irq_work(dev, |
1320 | irq_work: &dev->vqs[index]->inject, |
1321 | irq_effective_cpu: dev->vqs[index]->irq_effective_cpu); |
1322 | } |
1323 | break; |
1324 | } |
1325 | case VDUSE_IOTLB_REG_UMEM: { |
1326 | struct vduse_iova_umem umem; |
1327 | |
1328 | ret = -EFAULT; |
1329 | if (copy_from_user(to: &umem, from: argp, n: sizeof(umem))) |
1330 | break; |
1331 | |
1332 | ret = -EINVAL; |
1333 | if (!is_mem_zero(ptr: (const char *)umem.reserved, |
1334 | size: sizeof(umem.reserved))) |
1335 | break; |
1336 | |
1337 | mutex_lock(&dev->domain_lock); |
1338 | ret = vduse_dev_reg_umem(dev, iova: umem.iova, |
1339 | uaddr: umem.uaddr, size: umem.size); |
1340 | mutex_unlock(lock: &dev->domain_lock); |
1341 | break; |
1342 | } |
1343 | case VDUSE_IOTLB_DEREG_UMEM: { |
1344 | struct vduse_iova_umem umem; |
1345 | |
1346 | ret = -EFAULT; |
1347 | if (copy_from_user(to: &umem, from: argp, n: sizeof(umem))) |
1348 | break; |
1349 | |
1350 | ret = -EINVAL; |
1351 | if (!is_mem_zero(ptr: (const char *)umem.reserved, |
1352 | size: sizeof(umem.reserved))) |
1353 | break; |
1354 | mutex_lock(&dev->domain_lock); |
1355 | ret = vduse_dev_dereg_umem(dev, iova: umem.iova, |
1356 | size: umem.size); |
1357 | mutex_unlock(lock: &dev->domain_lock); |
1358 | break; |
1359 | } |
1360 | case VDUSE_IOTLB_GET_INFO: { |
1361 | struct vduse_iova_info info; |
1362 | struct vhost_iotlb_map *map; |
1363 | |
1364 | ret = -EFAULT; |
1365 | if (copy_from_user(to: &info, from: argp, n: sizeof(info))) |
1366 | break; |
1367 | |
1368 | ret = -EINVAL; |
1369 | if (info.start > info.last) |
1370 | break; |
1371 | |
1372 | if (!is_mem_zero(ptr: (const char *)info.reserved, |
1373 | size: sizeof(info.reserved))) |
1374 | break; |
1375 | |
1376 | mutex_lock(&dev->domain_lock); |
1377 | if (!dev->domain) { |
1378 | mutex_unlock(lock: &dev->domain_lock); |
1379 | break; |
1380 | } |
1381 | spin_lock(lock: &dev->domain->iotlb_lock); |
1382 | map = vhost_iotlb_itree_first(iotlb: dev->domain->iotlb, |
1383 | start: info.start, last: info.last); |
1384 | if (map) { |
1385 | info.start = map->start; |
1386 | info.last = map->last; |
1387 | info.capability = 0; |
1388 | if (dev->domain->bounce_map && map->start == 0 && |
1389 | map->last == dev->domain->bounce_size - 1) |
1390 | info.capability |= VDUSE_IOVA_CAP_UMEM; |
1391 | } |
1392 | spin_unlock(lock: &dev->domain->iotlb_lock); |
1393 | mutex_unlock(lock: &dev->domain_lock); |
1394 | if (!map) |
1395 | break; |
1396 | |
1397 | ret = -EFAULT; |
1398 | if (copy_to_user(to: argp, from: &info, n: sizeof(info))) |
1399 | break; |
1400 | |
1401 | ret = 0; |
1402 | break; |
1403 | } |
1404 | default: |
1405 | ret = -ENOIOCTLCMD; |
1406 | break; |
1407 | } |
1408 | |
1409 | return ret; |
1410 | } |
1411 | |
1412 | static int vduse_dev_release(struct inode *inode, struct file *file) |
1413 | { |
1414 | struct vduse_dev *dev = file->private_data; |
1415 | |
1416 | mutex_lock(&dev->domain_lock); |
1417 | if (dev->domain) |
1418 | vduse_dev_dereg_umem(dev, iova: 0, size: dev->domain->bounce_size); |
1419 | mutex_unlock(lock: &dev->domain_lock); |
1420 | spin_lock(lock: &dev->msg_lock); |
1421 | /* Make sure the inflight messages can processed after reconncection */ |
1422 | list_splice_init(list: &dev->recv_list, head: &dev->send_list); |
1423 | spin_unlock(lock: &dev->msg_lock); |
1424 | dev->connected = false; |
1425 | |
1426 | return 0; |
1427 | } |
1428 | |
1429 | static struct vduse_dev *vduse_dev_get_from_minor(int minor) |
1430 | { |
1431 | struct vduse_dev *dev; |
1432 | |
1433 | mutex_lock(&vduse_lock); |
1434 | dev = idr_find(&vduse_idr, id: minor); |
1435 | mutex_unlock(lock: &vduse_lock); |
1436 | |
1437 | return dev; |
1438 | } |
1439 | |
1440 | static int vduse_dev_open(struct inode *inode, struct file *file) |
1441 | { |
1442 | int ret; |
1443 | struct vduse_dev *dev = vduse_dev_get_from_minor(minor: iminor(inode)); |
1444 | |
1445 | if (!dev) |
1446 | return -ENODEV; |
1447 | |
1448 | ret = -EBUSY; |
1449 | mutex_lock(&dev->lock); |
1450 | if (dev->connected) |
1451 | goto unlock; |
1452 | |
1453 | ret = 0; |
1454 | dev->connected = true; |
1455 | file->private_data = dev; |
1456 | unlock: |
1457 | mutex_unlock(lock: &dev->lock); |
1458 | |
1459 | return ret; |
1460 | } |
1461 | |
1462 | static const struct file_operations vduse_dev_fops = { |
1463 | .owner = THIS_MODULE, |
1464 | .open = vduse_dev_open, |
1465 | .release = vduse_dev_release, |
1466 | .read_iter = vduse_dev_read_iter, |
1467 | .write_iter = vduse_dev_write_iter, |
1468 | .poll = vduse_dev_poll, |
1469 | .unlocked_ioctl = vduse_dev_ioctl, |
1470 | .compat_ioctl = compat_ptr_ioctl, |
1471 | .llseek = noop_llseek, |
1472 | }; |
1473 | |
1474 | static ssize_t irq_cb_affinity_show(struct vduse_virtqueue *vq, char *buf) |
1475 | { |
1476 | return sprintf(buf, fmt: "%*pb\n" , cpumask_pr_args(&vq->irq_affinity)); |
1477 | } |
1478 | |
1479 | static ssize_t irq_cb_affinity_store(struct vduse_virtqueue *vq, |
1480 | const char *buf, size_t count) |
1481 | { |
1482 | cpumask_var_t new_value; |
1483 | int ret; |
1484 | |
1485 | if (!zalloc_cpumask_var(mask: &new_value, GFP_KERNEL)) |
1486 | return -ENOMEM; |
1487 | |
1488 | ret = cpumask_parse(buf, dstp: new_value); |
1489 | if (ret) |
1490 | goto free_mask; |
1491 | |
1492 | ret = -EINVAL; |
1493 | if (!cpumask_intersects(src1p: new_value, cpu_online_mask)) |
1494 | goto free_mask; |
1495 | |
1496 | cpumask_copy(dstp: &vq->irq_affinity, srcp: new_value); |
1497 | ret = count; |
1498 | free_mask: |
1499 | free_cpumask_var(mask: new_value); |
1500 | return ret; |
1501 | } |
1502 | |
1503 | struct vq_sysfs_entry { |
1504 | struct attribute attr; |
1505 | ssize_t (*show)(struct vduse_virtqueue *vq, char *buf); |
1506 | ssize_t (*store)(struct vduse_virtqueue *vq, const char *buf, |
1507 | size_t count); |
1508 | }; |
1509 | |
1510 | static struct vq_sysfs_entry irq_cb_affinity_attr = __ATTR_RW(irq_cb_affinity); |
1511 | |
1512 | static struct attribute *vq_attrs[] = { |
1513 | &irq_cb_affinity_attr.attr, |
1514 | NULL, |
1515 | }; |
1516 | ATTRIBUTE_GROUPS(vq); |
1517 | |
1518 | static ssize_t vq_attr_show(struct kobject *kobj, struct attribute *attr, |
1519 | char *buf) |
1520 | { |
1521 | struct vduse_virtqueue *vq = container_of(kobj, |
1522 | struct vduse_virtqueue, kobj); |
1523 | struct vq_sysfs_entry *entry = container_of(attr, |
1524 | struct vq_sysfs_entry, attr); |
1525 | |
1526 | if (!entry->show) |
1527 | return -EIO; |
1528 | |
1529 | return entry->show(vq, buf); |
1530 | } |
1531 | |
1532 | static ssize_t vq_attr_store(struct kobject *kobj, struct attribute *attr, |
1533 | const char *buf, size_t count) |
1534 | { |
1535 | struct vduse_virtqueue *vq = container_of(kobj, |
1536 | struct vduse_virtqueue, kobj); |
1537 | struct vq_sysfs_entry *entry = container_of(attr, |
1538 | struct vq_sysfs_entry, attr); |
1539 | |
1540 | if (!entry->store) |
1541 | return -EIO; |
1542 | |
1543 | return entry->store(vq, buf, count); |
1544 | } |
1545 | |
1546 | static const struct sysfs_ops vq_sysfs_ops = { |
1547 | .show = vq_attr_show, |
1548 | .store = vq_attr_store, |
1549 | }; |
1550 | |
1551 | static void vq_release(struct kobject *kobj) |
1552 | { |
1553 | struct vduse_virtqueue *vq = container_of(kobj, |
1554 | struct vduse_virtqueue, kobj); |
1555 | kfree(objp: vq); |
1556 | } |
1557 | |
1558 | static const struct kobj_type vq_type = { |
1559 | .release = vq_release, |
1560 | .sysfs_ops = &vq_sysfs_ops, |
1561 | .default_groups = vq_groups, |
1562 | }; |
1563 | |
1564 | static char *vduse_devnode(const struct device *dev, umode_t *mode) |
1565 | { |
1566 | return kasprintf(GFP_KERNEL, fmt: "vduse/%s" , dev_name(dev)); |
1567 | } |
1568 | |
1569 | static const struct class vduse_class = { |
1570 | .name = "vduse" , |
1571 | .devnode = vduse_devnode, |
1572 | }; |
1573 | |
1574 | static void vduse_dev_deinit_vqs(struct vduse_dev *dev) |
1575 | { |
1576 | int i; |
1577 | |
1578 | if (!dev->vqs) |
1579 | return; |
1580 | |
1581 | for (i = 0; i < dev->vq_num; i++) |
1582 | kobject_put(kobj: &dev->vqs[i]->kobj); |
1583 | kfree(objp: dev->vqs); |
1584 | } |
1585 | |
1586 | static int vduse_dev_init_vqs(struct vduse_dev *dev, u32 vq_align, u32 vq_num) |
1587 | { |
1588 | int ret, i; |
1589 | |
1590 | dev->vq_align = vq_align; |
1591 | dev->vq_num = vq_num; |
1592 | dev->vqs = kcalloc(n: dev->vq_num, size: sizeof(*dev->vqs), GFP_KERNEL); |
1593 | if (!dev->vqs) |
1594 | return -ENOMEM; |
1595 | |
1596 | for (i = 0; i < vq_num; i++) { |
1597 | dev->vqs[i] = kzalloc(size: sizeof(*dev->vqs[i]), GFP_KERNEL); |
1598 | if (!dev->vqs[i]) { |
1599 | ret = -ENOMEM; |
1600 | goto err; |
1601 | } |
1602 | |
1603 | dev->vqs[i]->index = i; |
1604 | dev->vqs[i]->irq_effective_cpu = IRQ_UNBOUND; |
1605 | INIT_WORK(&dev->vqs[i]->inject, vduse_vq_irq_inject); |
1606 | INIT_WORK(&dev->vqs[i]->kick, vduse_vq_kick_work); |
1607 | spin_lock_init(&dev->vqs[i]->kick_lock); |
1608 | spin_lock_init(&dev->vqs[i]->irq_lock); |
1609 | cpumask_setall(dstp: &dev->vqs[i]->irq_affinity); |
1610 | |
1611 | kobject_init(kobj: &dev->vqs[i]->kobj, ktype: &vq_type); |
1612 | ret = kobject_add(kobj: &dev->vqs[i]->kobj, |
1613 | parent: &dev->dev->kobj, fmt: "vq%d" , i); |
1614 | if (ret) { |
1615 | kfree(objp: dev->vqs[i]); |
1616 | goto err; |
1617 | } |
1618 | } |
1619 | |
1620 | return 0; |
1621 | err: |
1622 | while (i--) |
1623 | kobject_put(kobj: &dev->vqs[i]->kobj); |
1624 | kfree(objp: dev->vqs); |
1625 | dev->vqs = NULL; |
1626 | return ret; |
1627 | } |
1628 | |
1629 | static struct vduse_dev *vduse_dev_create(void) |
1630 | { |
1631 | struct vduse_dev *dev = kzalloc(size: sizeof(*dev), GFP_KERNEL); |
1632 | |
1633 | if (!dev) |
1634 | return NULL; |
1635 | |
1636 | mutex_init(&dev->lock); |
1637 | mutex_init(&dev->mem_lock); |
1638 | mutex_init(&dev->domain_lock); |
1639 | spin_lock_init(&dev->msg_lock); |
1640 | INIT_LIST_HEAD(list: &dev->send_list); |
1641 | INIT_LIST_HEAD(list: &dev->recv_list); |
1642 | spin_lock_init(&dev->irq_lock); |
1643 | init_rwsem(&dev->rwsem); |
1644 | |
1645 | INIT_WORK(&dev->inject, vduse_dev_irq_inject); |
1646 | init_waitqueue_head(&dev->waitq); |
1647 | |
1648 | return dev; |
1649 | } |
1650 | |
1651 | static void vduse_dev_destroy(struct vduse_dev *dev) |
1652 | { |
1653 | kfree(objp: dev); |
1654 | } |
1655 | |
1656 | static struct vduse_dev *vduse_find_dev(const char *name) |
1657 | { |
1658 | struct vduse_dev *dev; |
1659 | int id; |
1660 | |
1661 | idr_for_each_entry(&vduse_idr, dev, id) |
1662 | if (!strcmp(dev->name, name)) |
1663 | return dev; |
1664 | |
1665 | return NULL; |
1666 | } |
1667 | |
1668 | static int vduse_destroy_dev(char *name) |
1669 | { |
1670 | struct vduse_dev *dev = vduse_find_dev(name); |
1671 | |
1672 | if (!dev) |
1673 | return -EINVAL; |
1674 | |
1675 | mutex_lock(&dev->lock); |
1676 | if (dev->vdev || dev->connected) { |
1677 | mutex_unlock(lock: &dev->lock); |
1678 | return -EBUSY; |
1679 | } |
1680 | dev->connected = true; |
1681 | mutex_unlock(lock: &dev->lock); |
1682 | |
1683 | vduse_dev_reset(dev); |
1684 | device_destroy(cls: &vduse_class, MKDEV(MAJOR(vduse_major), dev->minor)); |
1685 | idr_remove(&vduse_idr, id: dev->minor); |
1686 | kvfree(addr: dev->config); |
1687 | vduse_dev_deinit_vqs(dev); |
1688 | if (dev->domain) |
1689 | vduse_domain_destroy(domain: dev->domain); |
1690 | kfree(objp: dev->name); |
1691 | vduse_dev_destroy(dev); |
1692 | module_put(THIS_MODULE); |
1693 | |
1694 | return 0; |
1695 | } |
1696 | |
1697 | static bool device_is_allowed(u32 device_id) |
1698 | { |
1699 | int i; |
1700 | |
1701 | for (i = 0; i < ARRAY_SIZE(allowed_device_id); i++) |
1702 | if (allowed_device_id[i] == device_id) |
1703 | return true; |
1704 | |
1705 | return false; |
1706 | } |
1707 | |
1708 | static bool features_is_valid(u64 features) |
1709 | { |
1710 | if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) |
1711 | return false; |
1712 | |
1713 | /* Now we only support read-only configuration space */ |
1714 | if (features & (1ULL << VIRTIO_BLK_F_CONFIG_WCE)) |
1715 | return false; |
1716 | |
1717 | return true; |
1718 | } |
1719 | |
1720 | static bool vduse_validate_config(struct vduse_dev_config *config) |
1721 | { |
1722 | if (!is_mem_zero(ptr: (const char *)config->reserved, |
1723 | size: sizeof(config->reserved))) |
1724 | return false; |
1725 | |
1726 | if (config->vq_align > PAGE_SIZE) |
1727 | return false; |
1728 | |
1729 | if (config->config_size > PAGE_SIZE) |
1730 | return false; |
1731 | |
1732 | if (config->vq_num > 0xffff) |
1733 | return false; |
1734 | |
1735 | if (!config->name[0]) |
1736 | return false; |
1737 | |
1738 | if (!device_is_allowed(device_id: config->device_id)) |
1739 | return false; |
1740 | |
1741 | if (!features_is_valid(features: config->features)) |
1742 | return false; |
1743 | |
1744 | return true; |
1745 | } |
1746 | |
1747 | static ssize_t msg_timeout_show(struct device *device, |
1748 | struct device_attribute *attr, char *buf) |
1749 | { |
1750 | struct vduse_dev *dev = dev_get_drvdata(dev: device); |
1751 | |
1752 | return sysfs_emit(buf, fmt: "%u\n" , dev->msg_timeout); |
1753 | } |
1754 | |
1755 | static ssize_t msg_timeout_store(struct device *device, |
1756 | struct device_attribute *attr, |
1757 | const char *buf, size_t count) |
1758 | { |
1759 | struct vduse_dev *dev = dev_get_drvdata(dev: device); |
1760 | int ret; |
1761 | |
1762 | ret = kstrtouint(s: buf, base: 10, res: &dev->msg_timeout); |
1763 | if (ret < 0) |
1764 | return ret; |
1765 | |
1766 | return count; |
1767 | } |
1768 | |
1769 | static DEVICE_ATTR_RW(msg_timeout); |
1770 | |
1771 | static ssize_t bounce_size_show(struct device *device, |
1772 | struct device_attribute *attr, char *buf) |
1773 | { |
1774 | struct vduse_dev *dev = dev_get_drvdata(dev: device); |
1775 | |
1776 | return sysfs_emit(buf, fmt: "%u\n" , dev->bounce_size); |
1777 | } |
1778 | |
1779 | static ssize_t bounce_size_store(struct device *device, |
1780 | struct device_attribute *attr, |
1781 | const char *buf, size_t count) |
1782 | { |
1783 | struct vduse_dev *dev = dev_get_drvdata(dev: device); |
1784 | unsigned int bounce_size; |
1785 | int ret; |
1786 | |
1787 | ret = -EPERM; |
1788 | mutex_lock(&dev->domain_lock); |
1789 | if (dev->domain) |
1790 | goto unlock; |
1791 | |
1792 | ret = kstrtouint(s: buf, base: 10, res: &bounce_size); |
1793 | if (ret < 0) |
1794 | goto unlock; |
1795 | |
1796 | ret = -EINVAL; |
1797 | if (bounce_size > VDUSE_MAX_BOUNCE_SIZE || |
1798 | bounce_size < VDUSE_MIN_BOUNCE_SIZE) |
1799 | goto unlock; |
1800 | |
1801 | dev->bounce_size = bounce_size & PAGE_MASK; |
1802 | ret = count; |
1803 | unlock: |
1804 | mutex_unlock(lock: &dev->domain_lock); |
1805 | return ret; |
1806 | } |
1807 | |
1808 | static DEVICE_ATTR_RW(bounce_size); |
1809 | |
1810 | static struct attribute *vduse_dev_attrs[] = { |
1811 | &dev_attr_msg_timeout.attr, |
1812 | &dev_attr_bounce_size.attr, |
1813 | NULL |
1814 | }; |
1815 | |
1816 | ATTRIBUTE_GROUPS(vduse_dev); |
1817 | |
1818 | static int vduse_create_dev(struct vduse_dev_config *config, |
1819 | void *config_buf, u64 api_version) |
1820 | { |
1821 | int ret; |
1822 | struct vduse_dev *dev; |
1823 | |
1824 | ret = -EEXIST; |
1825 | if (vduse_find_dev(name: config->name)) |
1826 | goto err; |
1827 | |
1828 | ret = -ENOMEM; |
1829 | dev = vduse_dev_create(); |
1830 | if (!dev) |
1831 | goto err; |
1832 | |
1833 | dev->api_version = api_version; |
1834 | dev->device_features = config->features; |
1835 | dev->device_id = config->device_id; |
1836 | dev->vendor_id = config->vendor_id; |
1837 | dev->name = kstrdup(s: config->name, GFP_KERNEL); |
1838 | if (!dev->name) |
1839 | goto err_str; |
1840 | |
1841 | dev->bounce_size = VDUSE_BOUNCE_SIZE; |
1842 | dev->config = config_buf; |
1843 | dev->config_size = config->config_size; |
1844 | |
1845 | ret = idr_alloc(&vduse_idr, ptr: dev, start: 1, VDUSE_DEV_MAX, GFP_KERNEL); |
1846 | if (ret < 0) |
1847 | goto err_idr; |
1848 | |
1849 | dev->minor = ret; |
1850 | dev->msg_timeout = VDUSE_MSG_DEFAULT_TIMEOUT; |
1851 | dev->dev = device_create_with_groups(cls: &vduse_class, NULL, |
1852 | MKDEV(MAJOR(vduse_major), dev->minor), |
1853 | drvdata: dev, groups: vduse_dev_groups, fmt: "%s" , config->name); |
1854 | if (IS_ERR(ptr: dev->dev)) { |
1855 | ret = PTR_ERR(ptr: dev->dev); |
1856 | goto err_dev; |
1857 | } |
1858 | |
1859 | ret = vduse_dev_init_vqs(dev, vq_align: config->vq_align, vq_num: config->vq_num); |
1860 | if (ret) |
1861 | goto err_vqs; |
1862 | |
1863 | __module_get(THIS_MODULE); |
1864 | |
1865 | return 0; |
1866 | err_vqs: |
1867 | device_destroy(cls: &vduse_class, MKDEV(MAJOR(vduse_major), dev->minor)); |
1868 | err_dev: |
1869 | idr_remove(&vduse_idr, id: dev->minor); |
1870 | err_idr: |
1871 | kfree(objp: dev->name); |
1872 | err_str: |
1873 | vduse_dev_destroy(dev); |
1874 | err: |
1875 | return ret; |
1876 | } |
1877 | |
1878 | static long vduse_ioctl(struct file *file, unsigned int cmd, |
1879 | unsigned long arg) |
1880 | { |
1881 | int ret; |
1882 | void __user *argp = (void __user *)arg; |
1883 | struct vduse_control *control = file->private_data; |
1884 | |
1885 | mutex_lock(&vduse_lock); |
1886 | switch (cmd) { |
1887 | case VDUSE_GET_API_VERSION: |
1888 | ret = put_user(control->api_version, (u64 __user *)argp); |
1889 | break; |
1890 | case VDUSE_SET_API_VERSION: { |
1891 | u64 api_version; |
1892 | |
1893 | ret = -EFAULT; |
1894 | if (get_user(api_version, (u64 __user *)argp)) |
1895 | break; |
1896 | |
1897 | ret = -EINVAL; |
1898 | if (api_version > VDUSE_API_VERSION) |
1899 | break; |
1900 | |
1901 | ret = 0; |
1902 | control->api_version = api_version; |
1903 | break; |
1904 | } |
1905 | case VDUSE_CREATE_DEV: { |
1906 | struct vduse_dev_config config; |
1907 | unsigned long size = offsetof(struct vduse_dev_config, config); |
1908 | void *buf; |
1909 | |
1910 | ret = -EFAULT; |
1911 | if (copy_from_user(to: &config, from: argp, n: size)) |
1912 | break; |
1913 | |
1914 | ret = -EINVAL; |
1915 | if (vduse_validate_config(config: &config) == false) |
1916 | break; |
1917 | |
1918 | buf = vmemdup_user(argp + size, config.config_size); |
1919 | if (IS_ERR(ptr: buf)) { |
1920 | ret = PTR_ERR(ptr: buf); |
1921 | break; |
1922 | } |
1923 | config.name[VDUSE_NAME_MAX - 1] = '\0'; |
1924 | ret = vduse_create_dev(config: &config, config_buf: buf, api_version: control->api_version); |
1925 | if (ret) |
1926 | kvfree(addr: buf); |
1927 | break; |
1928 | } |
1929 | case VDUSE_DESTROY_DEV: { |
1930 | char name[VDUSE_NAME_MAX]; |
1931 | |
1932 | ret = -EFAULT; |
1933 | if (copy_from_user(to: name, from: argp, VDUSE_NAME_MAX)) |
1934 | break; |
1935 | |
1936 | name[VDUSE_NAME_MAX - 1] = '\0'; |
1937 | ret = vduse_destroy_dev(name); |
1938 | break; |
1939 | } |
1940 | default: |
1941 | ret = -EINVAL; |
1942 | break; |
1943 | } |
1944 | mutex_unlock(lock: &vduse_lock); |
1945 | |
1946 | return ret; |
1947 | } |
1948 | |
1949 | static int vduse_release(struct inode *inode, struct file *file) |
1950 | { |
1951 | struct vduse_control *control = file->private_data; |
1952 | |
1953 | kfree(objp: control); |
1954 | return 0; |
1955 | } |
1956 | |
1957 | static int vduse_open(struct inode *inode, struct file *file) |
1958 | { |
1959 | struct vduse_control *control; |
1960 | |
1961 | control = kmalloc(size: sizeof(struct vduse_control), GFP_KERNEL); |
1962 | if (!control) |
1963 | return -ENOMEM; |
1964 | |
1965 | control->api_version = VDUSE_API_VERSION; |
1966 | file->private_data = control; |
1967 | |
1968 | return 0; |
1969 | } |
1970 | |
1971 | static const struct file_operations vduse_ctrl_fops = { |
1972 | .owner = THIS_MODULE, |
1973 | .open = vduse_open, |
1974 | .release = vduse_release, |
1975 | .unlocked_ioctl = vduse_ioctl, |
1976 | .compat_ioctl = compat_ptr_ioctl, |
1977 | .llseek = noop_llseek, |
1978 | }; |
1979 | |
1980 | struct vduse_mgmt_dev { |
1981 | struct vdpa_mgmt_dev mgmt_dev; |
1982 | struct device dev; |
1983 | }; |
1984 | |
1985 | static struct vduse_mgmt_dev *vduse_mgmt; |
1986 | |
1987 | static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name) |
1988 | { |
1989 | struct vduse_vdpa *vdev; |
1990 | int ret; |
1991 | |
1992 | if (dev->vdev) |
1993 | return -EEXIST; |
1994 | |
1995 | vdev = vdpa_alloc_device(struct vduse_vdpa, vdpa, dev->dev, |
1996 | &vduse_vdpa_config_ops, 1, 1, name, true); |
1997 | if (IS_ERR(ptr: vdev)) |
1998 | return PTR_ERR(ptr: vdev); |
1999 | |
2000 | dev->vdev = vdev; |
2001 | vdev->dev = dev; |
2002 | vdev->vdpa.dev.dma_mask = &vdev->vdpa.dev.coherent_dma_mask; |
2003 | ret = dma_set_mask_and_coherent(dev: &vdev->vdpa.dev, DMA_BIT_MASK(64)); |
2004 | if (ret) { |
2005 | put_device(dev: &vdev->vdpa.dev); |
2006 | return ret; |
2007 | } |
2008 | set_dma_ops(dev: &vdev->vdpa.dev, dma_ops: &vduse_dev_dma_ops); |
2009 | vdev->vdpa.dma_dev = &vdev->vdpa.dev; |
2010 | vdev->vdpa.mdev = &vduse_mgmt->mgmt_dev; |
2011 | |
2012 | return 0; |
2013 | } |
2014 | |
2015 | static int vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name, |
2016 | const struct vdpa_dev_set_config *config) |
2017 | { |
2018 | struct vduse_dev *dev; |
2019 | int ret; |
2020 | |
2021 | mutex_lock(&vduse_lock); |
2022 | dev = vduse_find_dev(name); |
2023 | if (!dev || !vduse_dev_is_ready(dev)) { |
2024 | mutex_unlock(lock: &vduse_lock); |
2025 | return -EINVAL; |
2026 | } |
2027 | ret = vduse_dev_init_vdpa(dev, name); |
2028 | mutex_unlock(lock: &vduse_lock); |
2029 | if (ret) |
2030 | return ret; |
2031 | |
2032 | mutex_lock(&dev->domain_lock); |
2033 | if (!dev->domain) |
2034 | dev->domain = vduse_domain_create(VDUSE_IOVA_SIZE - 1, |
2035 | bounce_size: dev->bounce_size); |
2036 | mutex_unlock(lock: &dev->domain_lock); |
2037 | if (!dev->domain) { |
2038 | put_device(dev: &dev->vdev->vdpa.dev); |
2039 | return -ENOMEM; |
2040 | } |
2041 | |
2042 | ret = _vdpa_register_device(vdev: &dev->vdev->vdpa, nvqs: dev->vq_num); |
2043 | if (ret) { |
2044 | put_device(dev: &dev->vdev->vdpa.dev); |
2045 | mutex_lock(&dev->domain_lock); |
2046 | vduse_domain_destroy(domain: dev->domain); |
2047 | dev->domain = NULL; |
2048 | mutex_unlock(lock: &dev->domain_lock); |
2049 | return ret; |
2050 | } |
2051 | |
2052 | return 0; |
2053 | } |
2054 | |
2055 | static void vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev) |
2056 | { |
2057 | _vdpa_unregister_device(vdev: dev); |
2058 | } |
2059 | |
2060 | static const struct vdpa_mgmtdev_ops vdpa_dev_mgmtdev_ops = { |
2061 | .dev_add = vdpa_dev_add, |
2062 | .dev_del = vdpa_dev_del, |
2063 | }; |
2064 | |
2065 | static struct virtio_device_id id_table[] = { |
2066 | { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, |
2067 | { 0 }, |
2068 | }; |
2069 | |
2070 | static void vduse_mgmtdev_release(struct device *dev) |
2071 | { |
2072 | struct vduse_mgmt_dev *mgmt_dev; |
2073 | |
2074 | mgmt_dev = container_of(dev, struct vduse_mgmt_dev, dev); |
2075 | kfree(objp: mgmt_dev); |
2076 | } |
2077 | |
2078 | static int vduse_mgmtdev_init(void) |
2079 | { |
2080 | int ret; |
2081 | |
2082 | vduse_mgmt = kzalloc(size: sizeof(*vduse_mgmt), GFP_KERNEL); |
2083 | if (!vduse_mgmt) |
2084 | return -ENOMEM; |
2085 | |
2086 | ret = dev_set_name(dev: &vduse_mgmt->dev, name: "vduse" ); |
2087 | if (ret) { |
2088 | kfree(objp: vduse_mgmt); |
2089 | return ret; |
2090 | } |
2091 | |
2092 | vduse_mgmt->dev.release = vduse_mgmtdev_release; |
2093 | |
2094 | ret = device_register(dev: &vduse_mgmt->dev); |
2095 | if (ret) |
2096 | goto dev_reg_err; |
2097 | |
2098 | vduse_mgmt->mgmt_dev.id_table = id_table; |
2099 | vduse_mgmt->mgmt_dev.ops = &vdpa_dev_mgmtdev_ops; |
2100 | vduse_mgmt->mgmt_dev.device = &vduse_mgmt->dev; |
2101 | ret = vdpa_mgmtdev_register(mdev: &vduse_mgmt->mgmt_dev); |
2102 | if (ret) |
2103 | device_unregister(dev: &vduse_mgmt->dev); |
2104 | |
2105 | return ret; |
2106 | |
2107 | dev_reg_err: |
2108 | put_device(dev: &vduse_mgmt->dev); |
2109 | return ret; |
2110 | } |
2111 | |
2112 | static void vduse_mgmtdev_exit(void) |
2113 | { |
2114 | vdpa_mgmtdev_unregister(mdev: &vduse_mgmt->mgmt_dev); |
2115 | device_unregister(dev: &vduse_mgmt->dev); |
2116 | } |
2117 | |
2118 | static int vduse_init(void) |
2119 | { |
2120 | int ret; |
2121 | struct device *dev; |
2122 | |
2123 | ret = class_register(class: &vduse_class); |
2124 | if (ret) |
2125 | return ret; |
2126 | |
2127 | ret = alloc_chrdev_region(&vduse_major, 0, VDUSE_DEV_MAX, "vduse" ); |
2128 | if (ret) |
2129 | goto err_chardev_region; |
2130 | |
2131 | /* /dev/vduse/control */ |
2132 | cdev_init(&vduse_ctrl_cdev, &vduse_ctrl_fops); |
2133 | vduse_ctrl_cdev.owner = THIS_MODULE; |
2134 | ret = cdev_add(&vduse_ctrl_cdev, vduse_major, 1); |
2135 | if (ret) |
2136 | goto err_ctrl_cdev; |
2137 | |
2138 | dev = device_create(cls: &vduse_class, NULL, devt: vduse_major, NULL, fmt: "control" ); |
2139 | if (IS_ERR(ptr: dev)) { |
2140 | ret = PTR_ERR(ptr: dev); |
2141 | goto err_device; |
2142 | } |
2143 | |
2144 | /* /dev/vduse/$DEVICE */ |
2145 | cdev_init(&vduse_cdev, &vduse_dev_fops); |
2146 | vduse_cdev.owner = THIS_MODULE; |
2147 | ret = cdev_add(&vduse_cdev, MKDEV(MAJOR(vduse_major), 1), |
2148 | VDUSE_DEV_MAX - 1); |
2149 | if (ret) |
2150 | goto err_cdev; |
2151 | |
2152 | ret = -ENOMEM; |
2153 | vduse_irq_wq = alloc_workqueue(fmt: "vduse-irq" , |
2154 | flags: WQ_HIGHPRI | WQ_SYSFS | WQ_UNBOUND, max_active: 0); |
2155 | if (!vduse_irq_wq) |
2156 | goto err_wq; |
2157 | |
2158 | vduse_irq_bound_wq = alloc_workqueue(fmt: "vduse-irq-bound" , flags: WQ_HIGHPRI, max_active: 0); |
2159 | if (!vduse_irq_bound_wq) |
2160 | goto err_bound_wq; |
2161 | |
2162 | ret = vduse_domain_init(); |
2163 | if (ret) |
2164 | goto err_domain; |
2165 | |
2166 | ret = vduse_mgmtdev_init(); |
2167 | if (ret) |
2168 | goto err_mgmtdev; |
2169 | |
2170 | return 0; |
2171 | err_mgmtdev: |
2172 | vduse_domain_exit(); |
2173 | err_domain: |
2174 | destroy_workqueue(wq: vduse_irq_bound_wq); |
2175 | err_bound_wq: |
2176 | destroy_workqueue(wq: vduse_irq_wq); |
2177 | err_wq: |
2178 | cdev_del(&vduse_cdev); |
2179 | err_cdev: |
2180 | device_destroy(cls: &vduse_class, devt: vduse_major); |
2181 | err_device: |
2182 | cdev_del(&vduse_ctrl_cdev); |
2183 | err_ctrl_cdev: |
2184 | unregister_chrdev_region(vduse_major, VDUSE_DEV_MAX); |
2185 | err_chardev_region: |
2186 | class_unregister(class: &vduse_class); |
2187 | return ret; |
2188 | } |
2189 | module_init(vduse_init); |
2190 | |
2191 | static void vduse_exit(void) |
2192 | { |
2193 | vduse_mgmtdev_exit(); |
2194 | vduse_domain_exit(); |
2195 | destroy_workqueue(wq: vduse_irq_bound_wq); |
2196 | destroy_workqueue(wq: vduse_irq_wq); |
2197 | cdev_del(&vduse_cdev); |
2198 | device_destroy(cls: &vduse_class, devt: vduse_major); |
2199 | cdev_del(&vduse_ctrl_cdev); |
2200 | unregister_chrdev_region(vduse_major, VDUSE_DEV_MAX); |
2201 | class_unregister(class: &vduse_class); |
2202 | } |
2203 | module_exit(vduse_exit); |
2204 | |
2205 | MODULE_LICENSE(DRV_LICENSE); |
2206 | MODULE_AUTHOR(DRV_AUTHOR); |
2207 | MODULE_DESCRIPTION(DRV_DESC); |
2208 | |