1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Virtio driver for the paravirtualized IOMMU |
4 | * |
5 | * Copyright (C) 2019 Arm Limited |
6 | */ |
7 | |
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9 | |
10 | #include <linux/delay.h> |
11 | #include <linux/dma-map-ops.h> |
12 | #include <linux/freezer.h> |
13 | #include <linux/interval_tree.h> |
14 | #include <linux/iommu.h> |
15 | #include <linux/module.h> |
16 | #include <linux/of.h> |
17 | #include <linux/pci.h> |
18 | #include <linux/virtio.h> |
19 | #include <linux/virtio_config.h> |
20 | #include <linux/virtio_ids.h> |
21 | #include <linux/wait.h> |
22 | |
23 | #include <uapi/linux/virtio_iommu.h> |
24 | |
25 | #include "dma-iommu.h" |
26 | |
27 | #define MSI_IOVA_BASE 0x8000000 |
28 | #define MSI_IOVA_LENGTH 0x100000 |
29 | |
30 | #define VIOMMU_REQUEST_VQ 0 |
31 | #define VIOMMU_EVENT_VQ 1 |
32 | #define VIOMMU_NR_VQS 2 |
33 | |
34 | struct viommu_dev { |
35 | struct iommu_device iommu; |
36 | struct device *dev; |
37 | struct virtio_device *vdev; |
38 | |
39 | struct ida domain_ids; |
40 | |
41 | struct virtqueue *vqs[VIOMMU_NR_VQS]; |
42 | spinlock_t request_lock; |
43 | struct list_head requests; |
44 | void *evts; |
45 | |
46 | /* Device configuration */ |
47 | struct iommu_domain_geometry geometry; |
48 | u64 pgsize_bitmap; |
49 | u32 first_domain; |
50 | u32 last_domain; |
51 | /* Supported MAP flags */ |
52 | u32 map_flags; |
53 | u32 probe_size; |
54 | }; |
55 | |
56 | struct viommu_mapping { |
57 | phys_addr_t paddr; |
58 | struct interval_tree_node iova; |
59 | u32 flags; |
60 | }; |
61 | |
62 | struct viommu_domain { |
63 | struct iommu_domain domain; |
64 | struct viommu_dev *viommu; |
65 | struct mutex mutex; /* protects viommu pointer */ |
66 | unsigned int id; |
67 | u32 map_flags; |
68 | |
69 | spinlock_t mappings_lock; |
70 | struct rb_root_cached mappings; |
71 | |
72 | unsigned long nr_endpoints; |
73 | bool bypass; |
74 | }; |
75 | |
76 | struct viommu_endpoint { |
77 | struct device *dev; |
78 | struct viommu_dev *viommu; |
79 | struct viommu_domain *vdomain; |
80 | struct list_head resv_regions; |
81 | }; |
82 | |
83 | struct viommu_request { |
84 | struct list_head list; |
85 | void *writeback; |
86 | unsigned int write_offset; |
87 | unsigned int len; |
88 | char buf[] __counted_by(len); |
89 | }; |
90 | |
91 | #define VIOMMU_FAULT_RESV_MASK 0xffffff00 |
92 | |
93 | struct viommu_event { |
94 | union { |
95 | u32 head; |
96 | struct virtio_iommu_fault fault; |
97 | }; |
98 | }; |
99 | |
100 | #define to_viommu_domain(domain) \ |
101 | container_of(domain, struct viommu_domain, domain) |
102 | |
103 | static int viommu_get_req_errno(void *buf, size_t len) |
104 | { |
105 | struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail); |
106 | |
107 | switch (tail->status) { |
108 | case VIRTIO_IOMMU_S_OK: |
109 | return 0; |
110 | case VIRTIO_IOMMU_S_UNSUPP: |
111 | return -ENOSYS; |
112 | case VIRTIO_IOMMU_S_INVAL: |
113 | return -EINVAL; |
114 | case VIRTIO_IOMMU_S_RANGE: |
115 | return -ERANGE; |
116 | case VIRTIO_IOMMU_S_NOENT: |
117 | return -ENOENT; |
118 | case VIRTIO_IOMMU_S_FAULT: |
119 | return -EFAULT; |
120 | case VIRTIO_IOMMU_S_NOMEM: |
121 | return -ENOMEM; |
122 | case VIRTIO_IOMMU_S_IOERR: |
123 | case VIRTIO_IOMMU_S_DEVERR: |
124 | default: |
125 | return -EIO; |
126 | } |
127 | } |
128 | |
129 | static void viommu_set_req_status(void *buf, size_t len, int status) |
130 | { |
131 | struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail); |
132 | |
133 | tail->status = status; |
134 | } |
135 | |
136 | static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu, |
137 | struct virtio_iommu_req_head *req, |
138 | size_t len) |
139 | { |
140 | size_t tail_size = sizeof(struct virtio_iommu_req_tail); |
141 | |
142 | if (req->type == VIRTIO_IOMMU_T_PROBE) |
143 | return len - viommu->probe_size - tail_size; |
144 | |
145 | return len - tail_size; |
146 | } |
147 | |
148 | /* |
149 | * __viommu_sync_req - Complete all in-flight requests |
150 | * |
151 | * Wait for all added requests to complete. When this function returns, all |
152 | * requests that were in-flight at the time of the call have completed. |
153 | */ |
154 | static int __viommu_sync_req(struct viommu_dev *viommu) |
155 | { |
156 | unsigned int len; |
157 | size_t write_len; |
158 | struct viommu_request *req; |
159 | struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; |
160 | |
161 | assert_spin_locked(&viommu->request_lock); |
162 | |
163 | virtqueue_kick(vq); |
164 | |
165 | while (!list_empty(head: &viommu->requests)) { |
166 | len = 0; |
167 | req = virtqueue_get_buf(vq, len: &len); |
168 | if (!req) |
169 | continue; |
170 | |
171 | if (!len) |
172 | viommu_set_req_status(buf: req->buf, len: req->len, |
173 | VIRTIO_IOMMU_S_IOERR); |
174 | |
175 | write_len = req->len - req->write_offset; |
176 | if (req->writeback && len == write_len) |
177 | memcpy(req->writeback, req->buf + req->write_offset, |
178 | write_len); |
179 | |
180 | list_del(entry: &req->list); |
181 | kfree(objp: req); |
182 | } |
183 | |
184 | return 0; |
185 | } |
186 | |
187 | static int viommu_sync_req(struct viommu_dev *viommu) |
188 | { |
189 | int ret; |
190 | unsigned long flags; |
191 | |
192 | spin_lock_irqsave(&viommu->request_lock, flags); |
193 | ret = __viommu_sync_req(viommu); |
194 | if (ret) |
195 | dev_dbg(viommu->dev, "could not sync requests (%d)\n" , ret); |
196 | spin_unlock_irqrestore(lock: &viommu->request_lock, flags); |
197 | |
198 | return ret; |
199 | } |
200 | |
201 | /* |
202 | * __viommu_add_request - Add one request to the queue |
203 | * @buf: pointer to the request buffer |
204 | * @len: length of the request buffer |
205 | * @writeback: copy data back to the buffer when the request completes. |
206 | * |
207 | * Add a request to the queue. Only synchronize the queue if it's already full. |
208 | * Otherwise don't kick the queue nor wait for requests to complete. |
209 | * |
210 | * When @writeback is true, data written by the device, including the request |
211 | * status, is copied into @buf after the request completes. This is unsafe if |
212 | * the caller allocates @buf on stack and drops the lock between add_req() and |
213 | * sync_req(). |
214 | * |
215 | * Return 0 if the request was successfully added to the queue. |
216 | */ |
217 | static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len, |
218 | bool writeback) |
219 | { |
220 | int ret; |
221 | off_t write_offset; |
222 | struct viommu_request *req; |
223 | struct scatterlist top_sg, bottom_sg; |
224 | struct scatterlist *sg[2] = { &top_sg, &bottom_sg }; |
225 | struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; |
226 | |
227 | assert_spin_locked(&viommu->request_lock); |
228 | |
229 | write_offset = viommu_get_write_desc_offset(viommu, req: buf, len); |
230 | if (write_offset <= 0) |
231 | return -EINVAL; |
232 | |
233 | req = kzalloc(struct_size(req, buf, len), GFP_ATOMIC); |
234 | if (!req) |
235 | return -ENOMEM; |
236 | |
237 | req->len = len; |
238 | if (writeback) { |
239 | req->writeback = buf + write_offset; |
240 | req->write_offset = write_offset; |
241 | } |
242 | memcpy(&req->buf, buf, write_offset); |
243 | |
244 | sg_init_one(&top_sg, req->buf, write_offset); |
245 | sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset); |
246 | |
247 | ret = virtqueue_add_sgs(vq, sgs: sg, out_sgs: 1, in_sgs: 1, data: req, GFP_ATOMIC); |
248 | if (ret == -ENOSPC) { |
249 | /* If the queue is full, sync and retry */ |
250 | if (!__viommu_sync_req(viommu)) |
251 | ret = virtqueue_add_sgs(vq, sgs: sg, out_sgs: 1, in_sgs: 1, data: req, GFP_ATOMIC); |
252 | } |
253 | if (ret) |
254 | goto err_free; |
255 | |
256 | list_add_tail(new: &req->list, head: &viommu->requests); |
257 | return 0; |
258 | |
259 | err_free: |
260 | kfree(objp: req); |
261 | return ret; |
262 | } |
263 | |
264 | static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len) |
265 | { |
266 | int ret; |
267 | unsigned long flags; |
268 | |
269 | spin_lock_irqsave(&viommu->request_lock, flags); |
270 | ret = __viommu_add_req(viommu, buf, len, writeback: false); |
271 | if (ret) |
272 | dev_dbg(viommu->dev, "could not add request: %d\n" , ret); |
273 | spin_unlock_irqrestore(lock: &viommu->request_lock, flags); |
274 | |
275 | return ret; |
276 | } |
277 | |
278 | /* |
279 | * Send a request and wait for it to complete. Return the request status (as an |
280 | * errno) |
281 | */ |
282 | static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf, |
283 | size_t len) |
284 | { |
285 | int ret; |
286 | unsigned long flags; |
287 | |
288 | spin_lock_irqsave(&viommu->request_lock, flags); |
289 | |
290 | ret = __viommu_add_req(viommu, buf, len, writeback: true); |
291 | if (ret) { |
292 | dev_dbg(viommu->dev, "could not add request (%d)\n" , ret); |
293 | goto out_unlock; |
294 | } |
295 | |
296 | ret = __viommu_sync_req(viommu); |
297 | if (ret) { |
298 | dev_dbg(viommu->dev, "could not sync requests (%d)\n" , ret); |
299 | /* Fall-through (get the actual request status) */ |
300 | } |
301 | |
302 | ret = viommu_get_req_errno(buf, len); |
303 | out_unlock: |
304 | spin_unlock_irqrestore(lock: &viommu->request_lock, flags); |
305 | return ret; |
306 | } |
307 | |
308 | /* |
309 | * viommu_add_mapping - add a mapping to the internal tree |
310 | * |
311 | * On success, return the new mapping. Otherwise return NULL. |
312 | */ |
313 | static int viommu_add_mapping(struct viommu_domain *vdomain, u64 iova, u64 end, |
314 | phys_addr_t paddr, u32 flags) |
315 | { |
316 | unsigned long irqflags; |
317 | struct viommu_mapping *mapping; |
318 | |
319 | mapping = kzalloc(size: sizeof(*mapping), GFP_ATOMIC); |
320 | if (!mapping) |
321 | return -ENOMEM; |
322 | |
323 | mapping->paddr = paddr; |
324 | mapping->iova.start = iova; |
325 | mapping->iova.last = end; |
326 | mapping->flags = flags; |
327 | |
328 | spin_lock_irqsave(&vdomain->mappings_lock, irqflags); |
329 | interval_tree_insert(node: &mapping->iova, root: &vdomain->mappings); |
330 | spin_unlock_irqrestore(lock: &vdomain->mappings_lock, flags: irqflags); |
331 | |
332 | return 0; |
333 | } |
334 | |
335 | /* |
336 | * viommu_del_mappings - remove mappings from the internal tree |
337 | * |
338 | * @vdomain: the domain |
339 | * @iova: start of the range |
340 | * @end: end of the range |
341 | * |
342 | * On success, returns the number of unmapped bytes |
343 | */ |
344 | static size_t viommu_del_mappings(struct viommu_domain *vdomain, |
345 | u64 iova, u64 end) |
346 | { |
347 | size_t unmapped = 0; |
348 | unsigned long flags; |
349 | struct viommu_mapping *mapping = NULL; |
350 | struct interval_tree_node *node, *next; |
351 | |
352 | spin_lock_irqsave(&vdomain->mappings_lock, flags); |
353 | next = interval_tree_iter_first(root: &vdomain->mappings, start: iova, last: end); |
354 | while (next) { |
355 | node = next; |
356 | mapping = container_of(node, struct viommu_mapping, iova); |
357 | next = interval_tree_iter_next(node, start: iova, last: end); |
358 | |
359 | /* Trying to split a mapping? */ |
360 | if (mapping->iova.start < iova) |
361 | break; |
362 | |
363 | /* |
364 | * Virtio-iommu doesn't allow UNMAP to split a mapping created |
365 | * with a single MAP request, so remove the full mapping. |
366 | */ |
367 | unmapped += mapping->iova.last - mapping->iova.start + 1; |
368 | |
369 | interval_tree_remove(node, root: &vdomain->mappings); |
370 | kfree(objp: mapping); |
371 | } |
372 | spin_unlock_irqrestore(lock: &vdomain->mappings_lock, flags); |
373 | |
374 | return unmapped; |
375 | } |
376 | |
377 | /* |
378 | * Fill the domain with identity mappings, skipping the device's reserved |
379 | * regions. |
380 | */ |
381 | static int viommu_domain_map_identity(struct viommu_endpoint *vdev, |
382 | struct viommu_domain *vdomain) |
383 | { |
384 | int ret; |
385 | struct iommu_resv_region *resv; |
386 | u64 iova = vdomain->domain.geometry.aperture_start; |
387 | u64 limit = vdomain->domain.geometry.aperture_end; |
388 | u32 flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE; |
389 | unsigned long granule = 1UL << __ffs(vdomain->domain.pgsize_bitmap); |
390 | |
391 | iova = ALIGN(iova, granule); |
392 | limit = ALIGN_DOWN(limit + 1, granule) - 1; |
393 | |
394 | list_for_each_entry(resv, &vdev->resv_regions, list) { |
395 | u64 resv_start = ALIGN_DOWN(resv->start, granule); |
396 | u64 resv_end = ALIGN(resv->start + resv->length, granule) - 1; |
397 | |
398 | if (resv_end < iova || resv_start > limit) |
399 | /* No overlap */ |
400 | continue; |
401 | |
402 | if (resv_start > iova) { |
403 | ret = viommu_add_mapping(vdomain, iova, end: resv_start - 1, |
404 | paddr: (phys_addr_t)iova, flags); |
405 | if (ret) |
406 | goto err_unmap; |
407 | } |
408 | |
409 | if (resv_end >= limit) |
410 | return 0; |
411 | |
412 | iova = resv_end + 1; |
413 | } |
414 | |
415 | ret = viommu_add_mapping(vdomain, iova, end: limit, paddr: (phys_addr_t)iova, |
416 | flags); |
417 | if (ret) |
418 | goto err_unmap; |
419 | return 0; |
420 | |
421 | err_unmap: |
422 | viommu_del_mappings(vdomain, iova: 0, end: iova); |
423 | return ret; |
424 | } |
425 | |
426 | /* |
427 | * viommu_replay_mappings - re-send MAP requests |
428 | * |
429 | * When reattaching a domain that was previously detached from all endpoints, |
430 | * mappings were deleted from the device. Re-create the mappings available in |
431 | * the internal tree. |
432 | */ |
433 | static int viommu_replay_mappings(struct viommu_domain *vdomain) |
434 | { |
435 | int ret = 0; |
436 | unsigned long flags; |
437 | struct viommu_mapping *mapping; |
438 | struct interval_tree_node *node; |
439 | struct virtio_iommu_req_map map; |
440 | |
441 | spin_lock_irqsave(&vdomain->mappings_lock, flags); |
442 | node = interval_tree_iter_first(root: &vdomain->mappings, start: 0, last: -1UL); |
443 | while (node) { |
444 | mapping = container_of(node, struct viommu_mapping, iova); |
445 | map = (struct virtio_iommu_req_map) { |
446 | .head.type = VIRTIO_IOMMU_T_MAP, |
447 | .domain = cpu_to_le32(vdomain->id), |
448 | .virt_start = cpu_to_le64(mapping->iova.start), |
449 | .virt_end = cpu_to_le64(mapping->iova.last), |
450 | .phys_start = cpu_to_le64(mapping->paddr), |
451 | .flags = cpu_to_le32(mapping->flags), |
452 | }; |
453 | |
454 | ret = viommu_send_req_sync(viommu: vdomain->viommu, buf: &map, len: sizeof(map)); |
455 | if (ret) |
456 | break; |
457 | |
458 | node = interval_tree_iter_next(node, start: 0, last: -1UL); |
459 | } |
460 | spin_unlock_irqrestore(lock: &vdomain->mappings_lock, flags); |
461 | |
462 | return ret; |
463 | } |
464 | |
465 | static int viommu_add_resv_mem(struct viommu_endpoint *vdev, |
466 | struct virtio_iommu_probe_resv_mem *mem, |
467 | size_t len) |
468 | { |
469 | size_t size; |
470 | u64 start64, end64; |
471 | phys_addr_t start, end; |
472 | struct iommu_resv_region *region = NULL, *next; |
473 | unsigned long prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
474 | |
475 | start = start64 = le64_to_cpu(mem->start); |
476 | end = end64 = le64_to_cpu(mem->end); |
477 | size = end64 - start64 + 1; |
478 | |
479 | /* Catch any overflow, including the unlikely end64 - start64 + 1 = 0 */ |
480 | if (start != start64 || end != end64 || size < end64 - start64) |
481 | return -EOVERFLOW; |
482 | |
483 | if (len < sizeof(*mem)) |
484 | return -EINVAL; |
485 | |
486 | switch (mem->subtype) { |
487 | default: |
488 | dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n" , |
489 | mem->subtype); |
490 | fallthrough; |
491 | case VIRTIO_IOMMU_RESV_MEM_T_RESERVED: |
492 | region = iommu_alloc_resv_region(start, length: size, prot: 0, |
493 | type: IOMMU_RESV_RESERVED, |
494 | GFP_KERNEL); |
495 | break; |
496 | case VIRTIO_IOMMU_RESV_MEM_T_MSI: |
497 | region = iommu_alloc_resv_region(start, length: size, prot, |
498 | type: IOMMU_RESV_MSI, |
499 | GFP_KERNEL); |
500 | break; |
501 | } |
502 | if (!region) |
503 | return -ENOMEM; |
504 | |
505 | /* Keep the list sorted */ |
506 | list_for_each_entry(next, &vdev->resv_regions, list) { |
507 | if (next->start > region->start) |
508 | break; |
509 | } |
510 | list_add_tail(new: ®ion->list, head: &next->list); |
511 | return 0; |
512 | } |
513 | |
514 | static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev) |
515 | { |
516 | int ret; |
517 | u16 type, len; |
518 | size_t cur = 0; |
519 | size_t probe_len; |
520 | struct virtio_iommu_req_probe *probe; |
521 | struct virtio_iommu_probe_property *prop; |
522 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
523 | struct viommu_endpoint *vdev = dev_iommu_priv_get(dev); |
524 | |
525 | if (!fwspec->num_ids) |
526 | return -EINVAL; |
527 | |
528 | probe_len = sizeof(*probe) + viommu->probe_size + |
529 | sizeof(struct virtio_iommu_req_tail); |
530 | probe = kzalloc(size: probe_len, GFP_KERNEL); |
531 | if (!probe) |
532 | return -ENOMEM; |
533 | |
534 | probe->head.type = VIRTIO_IOMMU_T_PROBE; |
535 | /* |
536 | * For now, assume that properties of an endpoint that outputs multiple |
537 | * IDs are consistent. Only probe the first one. |
538 | */ |
539 | probe->endpoint = cpu_to_le32(fwspec->ids[0]); |
540 | |
541 | ret = viommu_send_req_sync(viommu, buf: probe, len: probe_len); |
542 | if (ret) |
543 | goto out_free; |
544 | |
545 | prop = (void *)probe->properties; |
546 | type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK; |
547 | |
548 | while (type != VIRTIO_IOMMU_PROBE_T_NONE && |
549 | cur < viommu->probe_size) { |
550 | len = le16_to_cpu(prop->length) + sizeof(*prop); |
551 | |
552 | switch (type) { |
553 | case VIRTIO_IOMMU_PROBE_T_RESV_MEM: |
554 | ret = viommu_add_resv_mem(vdev, mem: (void *)prop, len); |
555 | break; |
556 | default: |
557 | dev_err(dev, "unknown viommu prop 0x%x\n" , type); |
558 | } |
559 | |
560 | if (ret) |
561 | dev_err(dev, "failed to parse viommu prop 0x%x\n" , type); |
562 | |
563 | cur += len; |
564 | if (cur >= viommu->probe_size) |
565 | break; |
566 | |
567 | prop = (void *)probe->properties + cur; |
568 | type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK; |
569 | } |
570 | |
571 | out_free: |
572 | kfree(objp: probe); |
573 | return ret; |
574 | } |
575 | |
576 | static int viommu_fault_handler(struct viommu_dev *viommu, |
577 | struct virtio_iommu_fault *fault) |
578 | { |
579 | char *reason_str; |
580 | |
581 | u8 reason = fault->reason; |
582 | u32 flags = le32_to_cpu(fault->flags); |
583 | u32 endpoint = le32_to_cpu(fault->endpoint); |
584 | u64 address = le64_to_cpu(fault->address); |
585 | |
586 | switch (reason) { |
587 | case VIRTIO_IOMMU_FAULT_R_DOMAIN: |
588 | reason_str = "domain" ; |
589 | break; |
590 | case VIRTIO_IOMMU_FAULT_R_MAPPING: |
591 | reason_str = "page" ; |
592 | break; |
593 | case VIRTIO_IOMMU_FAULT_R_UNKNOWN: |
594 | default: |
595 | reason_str = "unknown" ; |
596 | break; |
597 | } |
598 | |
599 | /* TODO: find EP by ID and report_iommu_fault */ |
600 | if (flags & VIRTIO_IOMMU_FAULT_F_ADDRESS) |
601 | dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n" , |
602 | reason_str, endpoint, address, |
603 | flags & VIRTIO_IOMMU_FAULT_F_READ ? "R" : "" , |
604 | flags & VIRTIO_IOMMU_FAULT_F_WRITE ? "W" : "" , |
605 | flags & VIRTIO_IOMMU_FAULT_F_EXEC ? "X" : "" ); |
606 | else |
607 | dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n" , |
608 | reason_str, endpoint); |
609 | return 0; |
610 | } |
611 | |
612 | static void viommu_event_handler(struct virtqueue *vq) |
613 | { |
614 | int ret; |
615 | unsigned int len; |
616 | struct scatterlist sg[1]; |
617 | struct viommu_event *evt; |
618 | struct viommu_dev *viommu = vq->vdev->priv; |
619 | |
620 | while ((evt = virtqueue_get_buf(vq, len: &len)) != NULL) { |
621 | if (len > sizeof(*evt)) { |
622 | dev_err(viommu->dev, |
623 | "invalid event buffer (len %u != %zu)\n" , |
624 | len, sizeof(*evt)); |
625 | } else if (!(evt->head & VIOMMU_FAULT_RESV_MASK)) { |
626 | viommu_fault_handler(viommu, fault: &evt->fault); |
627 | } |
628 | |
629 | sg_init_one(sg, evt, sizeof(*evt)); |
630 | ret = virtqueue_add_inbuf(vq, sg, num: 1, data: evt, GFP_ATOMIC); |
631 | if (ret) |
632 | dev_err(viommu->dev, "could not add event buffer\n" ); |
633 | } |
634 | |
635 | virtqueue_kick(vq); |
636 | } |
637 | |
638 | /* IOMMU API */ |
639 | |
640 | static struct iommu_domain *viommu_domain_alloc(unsigned type) |
641 | { |
642 | struct viommu_domain *vdomain; |
643 | |
644 | if (type != IOMMU_DOMAIN_UNMANAGED && |
645 | type != IOMMU_DOMAIN_DMA && |
646 | type != IOMMU_DOMAIN_IDENTITY) |
647 | return NULL; |
648 | |
649 | vdomain = kzalloc(size: sizeof(*vdomain), GFP_KERNEL); |
650 | if (!vdomain) |
651 | return NULL; |
652 | |
653 | mutex_init(&vdomain->mutex); |
654 | spin_lock_init(&vdomain->mappings_lock); |
655 | vdomain->mappings = RB_ROOT_CACHED; |
656 | |
657 | return &vdomain->domain; |
658 | } |
659 | |
660 | static int viommu_domain_finalise(struct viommu_endpoint *vdev, |
661 | struct iommu_domain *domain) |
662 | { |
663 | int ret; |
664 | unsigned long viommu_page_size; |
665 | struct viommu_dev *viommu = vdev->viommu; |
666 | struct viommu_domain *vdomain = to_viommu_domain(domain); |
667 | |
668 | viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap); |
669 | if (viommu_page_size > PAGE_SIZE) { |
670 | dev_err(vdev->dev, |
671 | "granule 0x%lx larger than system page size 0x%lx\n" , |
672 | viommu_page_size, PAGE_SIZE); |
673 | return -ENODEV; |
674 | } |
675 | |
676 | ret = ida_alloc_range(&viommu->domain_ids, min: viommu->first_domain, |
677 | max: viommu->last_domain, GFP_KERNEL); |
678 | if (ret < 0) |
679 | return ret; |
680 | |
681 | vdomain->id = (unsigned int)ret; |
682 | |
683 | domain->pgsize_bitmap = viommu->pgsize_bitmap; |
684 | domain->geometry = viommu->geometry; |
685 | |
686 | vdomain->map_flags = viommu->map_flags; |
687 | vdomain->viommu = viommu; |
688 | |
689 | if (domain->type == IOMMU_DOMAIN_IDENTITY) { |
690 | if (virtio_has_feature(vdev: viommu->vdev, |
691 | VIRTIO_IOMMU_F_BYPASS_CONFIG)) { |
692 | vdomain->bypass = true; |
693 | return 0; |
694 | } |
695 | |
696 | ret = viommu_domain_map_identity(vdev, vdomain); |
697 | if (ret) { |
698 | ida_free(&viommu->domain_ids, id: vdomain->id); |
699 | vdomain->viommu = NULL; |
700 | return ret; |
701 | } |
702 | } |
703 | |
704 | return 0; |
705 | } |
706 | |
707 | static void viommu_domain_free(struct iommu_domain *domain) |
708 | { |
709 | struct viommu_domain *vdomain = to_viommu_domain(domain); |
710 | |
711 | /* Free all remaining mappings */ |
712 | viommu_del_mappings(vdomain, iova: 0, ULLONG_MAX); |
713 | |
714 | if (vdomain->viommu) |
715 | ida_free(&vdomain->viommu->domain_ids, id: vdomain->id); |
716 | |
717 | kfree(objp: vdomain); |
718 | } |
719 | |
720 | static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev) |
721 | { |
722 | int i; |
723 | int ret = 0; |
724 | struct virtio_iommu_req_attach req; |
725 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
726 | struct viommu_endpoint *vdev = dev_iommu_priv_get(dev); |
727 | struct viommu_domain *vdomain = to_viommu_domain(domain); |
728 | |
729 | mutex_lock(&vdomain->mutex); |
730 | if (!vdomain->viommu) { |
731 | /* |
732 | * Properly initialize the domain now that we know which viommu |
733 | * owns it. |
734 | */ |
735 | ret = viommu_domain_finalise(vdev, domain); |
736 | } else if (vdomain->viommu != vdev->viommu) { |
737 | ret = -EINVAL; |
738 | } |
739 | mutex_unlock(lock: &vdomain->mutex); |
740 | |
741 | if (ret) |
742 | return ret; |
743 | |
744 | /* |
745 | * In the virtio-iommu device, when attaching the endpoint to a new |
746 | * domain, it is detached from the old one and, if as a result the |
747 | * old domain isn't attached to any endpoint, all mappings are removed |
748 | * from the old domain and it is freed. |
749 | * |
750 | * In the driver the old domain still exists, and its mappings will be |
751 | * recreated if it gets reattached to an endpoint. Otherwise it will be |
752 | * freed explicitly. |
753 | * |
754 | * vdev->vdomain is protected by group->mutex |
755 | */ |
756 | if (vdev->vdomain) |
757 | vdev->vdomain->nr_endpoints--; |
758 | |
759 | req = (struct virtio_iommu_req_attach) { |
760 | .head.type = VIRTIO_IOMMU_T_ATTACH, |
761 | .domain = cpu_to_le32(vdomain->id), |
762 | }; |
763 | |
764 | if (vdomain->bypass) |
765 | req.flags |= cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS); |
766 | |
767 | for (i = 0; i < fwspec->num_ids; i++) { |
768 | req.endpoint = cpu_to_le32(fwspec->ids[i]); |
769 | |
770 | ret = viommu_send_req_sync(viommu: vdomain->viommu, buf: &req, len: sizeof(req)); |
771 | if (ret) |
772 | return ret; |
773 | } |
774 | |
775 | if (!vdomain->nr_endpoints) { |
776 | /* |
777 | * This endpoint is the first to be attached to the domain. |
778 | * Replay existing mappings (e.g. SW MSI). |
779 | */ |
780 | ret = viommu_replay_mappings(vdomain); |
781 | if (ret) |
782 | return ret; |
783 | } |
784 | |
785 | vdomain->nr_endpoints++; |
786 | vdev->vdomain = vdomain; |
787 | |
788 | return 0; |
789 | } |
790 | |
791 | static void viommu_detach_dev(struct viommu_endpoint *vdev) |
792 | { |
793 | int i; |
794 | struct virtio_iommu_req_detach req; |
795 | struct viommu_domain *vdomain = vdev->vdomain; |
796 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev: vdev->dev); |
797 | |
798 | if (!vdomain) |
799 | return; |
800 | |
801 | req = (struct virtio_iommu_req_detach) { |
802 | .head.type = VIRTIO_IOMMU_T_DETACH, |
803 | .domain = cpu_to_le32(vdomain->id), |
804 | }; |
805 | |
806 | for (i = 0; i < fwspec->num_ids; i++) { |
807 | req.endpoint = cpu_to_le32(fwspec->ids[i]); |
808 | WARN_ON(viommu_send_req_sync(vdev->viommu, &req, sizeof(req))); |
809 | } |
810 | vdomain->nr_endpoints--; |
811 | vdev->vdomain = NULL; |
812 | } |
813 | |
814 | static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova, |
815 | phys_addr_t paddr, size_t pgsize, size_t pgcount, |
816 | int prot, gfp_t gfp, size_t *mapped) |
817 | { |
818 | int ret; |
819 | u32 flags; |
820 | size_t size = pgsize * pgcount; |
821 | u64 end = iova + size - 1; |
822 | struct virtio_iommu_req_map map; |
823 | struct viommu_domain *vdomain = to_viommu_domain(domain); |
824 | |
825 | flags = (prot & IOMMU_READ ? VIRTIO_IOMMU_MAP_F_READ : 0) | |
826 | (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) | |
827 | (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0); |
828 | |
829 | if (flags & ~vdomain->map_flags) |
830 | return -EINVAL; |
831 | |
832 | ret = viommu_add_mapping(vdomain, iova, end, paddr, flags); |
833 | if (ret) |
834 | return ret; |
835 | |
836 | if (vdomain->nr_endpoints) { |
837 | map = (struct virtio_iommu_req_map) { |
838 | .head.type = VIRTIO_IOMMU_T_MAP, |
839 | .domain = cpu_to_le32(vdomain->id), |
840 | .virt_start = cpu_to_le64(iova), |
841 | .phys_start = cpu_to_le64(paddr), |
842 | .virt_end = cpu_to_le64(end), |
843 | .flags = cpu_to_le32(flags), |
844 | }; |
845 | |
846 | ret = viommu_add_req(viommu: vdomain->viommu, buf: &map, len: sizeof(map)); |
847 | if (ret) { |
848 | viommu_del_mappings(vdomain, iova, end); |
849 | return ret; |
850 | } |
851 | } |
852 | if (mapped) |
853 | *mapped = size; |
854 | |
855 | return 0; |
856 | } |
857 | |
858 | static size_t viommu_unmap_pages(struct iommu_domain *domain, unsigned long iova, |
859 | size_t pgsize, size_t pgcount, |
860 | struct iommu_iotlb_gather *gather) |
861 | { |
862 | int ret = 0; |
863 | size_t unmapped; |
864 | struct virtio_iommu_req_unmap unmap; |
865 | struct viommu_domain *vdomain = to_viommu_domain(domain); |
866 | size_t size = pgsize * pgcount; |
867 | |
868 | unmapped = viommu_del_mappings(vdomain, iova, end: iova + size - 1); |
869 | if (unmapped < size) |
870 | return 0; |
871 | |
872 | /* Device already removed all mappings after detach. */ |
873 | if (!vdomain->nr_endpoints) |
874 | return unmapped; |
875 | |
876 | unmap = (struct virtio_iommu_req_unmap) { |
877 | .head.type = VIRTIO_IOMMU_T_UNMAP, |
878 | .domain = cpu_to_le32(vdomain->id), |
879 | .virt_start = cpu_to_le64(iova), |
880 | .virt_end = cpu_to_le64(iova + unmapped - 1), |
881 | }; |
882 | |
883 | ret = viommu_add_req(viommu: vdomain->viommu, buf: &unmap, len: sizeof(unmap)); |
884 | return ret ? 0 : unmapped; |
885 | } |
886 | |
887 | static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain, |
888 | dma_addr_t iova) |
889 | { |
890 | u64 paddr = 0; |
891 | unsigned long flags; |
892 | struct viommu_mapping *mapping; |
893 | struct interval_tree_node *node; |
894 | struct viommu_domain *vdomain = to_viommu_domain(domain); |
895 | |
896 | spin_lock_irqsave(&vdomain->mappings_lock, flags); |
897 | node = interval_tree_iter_first(root: &vdomain->mappings, start: iova, last: iova); |
898 | if (node) { |
899 | mapping = container_of(node, struct viommu_mapping, iova); |
900 | paddr = mapping->paddr + (iova - mapping->iova.start); |
901 | } |
902 | spin_unlock_irqrestore(lock: &vdomain->mappings_lock, flags); |
903 | |
904 | return paddr; |
905 | } |
906 | |
907 | static void viommu_iotlb_sync(struct iommu_domain *domain, |
908 | struct iommu_iotlb_gather *gather) |
909 | { |
910 | struct viommu_domain *vdomain = to_viommu_domain(domain); |
911 | |
912 | viommu_sync_req(viommu: vdomain->viommu); |
913 | } |
914 | |
915 | static int viommu_iotlb_sync_map(struct iommu_domain *domain, |
916 | unsigned long iova, size_t size) |
917 | { |
918 | struct viommu_domain *vdomain = to_viommu_domain(domain); |
919 | |
920 | /* |
921 | * May be called before the viommu is initialized including |
922 | * while creating direct mapping |
923 | */ |
924 | if (!vdomain->nr_endpoints) |
925 | return 0; |
926 | return viommu_sync_req(viommu: vdomain->viommu); |
927 | } |
928 | |
929 | static void viommu_flush_iotlb_all(struct iommu_domain *domain) |
930 | { |
931 | struct viommu_domain *vdomain = to_viommu_domain(domain); |
932 | |
933 | /* |
934 | * May be called before the viommu is initialized including |
935 | * while creating direct mapping |
936 | */ |
937 | if (!vdomain->nr_endpoints) |
938 | return; |
939 | viommu_sync_req(viommu: vdomain->viommu); |
940 | } |
941 | |
942 | static void viommu_get_resv_regions(struct device *dev, struct list_head *head) |
943 | { |
944 | struct iommu_resv_region *entry, *new_entry, *msi = NULL; |
945 | struct viommu_endpoint *vdev = dev_iommu_priv_get(dev); |
946 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
947 | |
948 | list_for_each_entry(entry, &vdev->resv_regions, list) { |
949 | if (entry->type == IOMMU_RESV_MSI) |
950 | msi = entry; |
951 | |
952 | new_entry = kmemdup(p: entry, size: sizeof(*entry), GFP_KERNEL); |
953 | if (!new_entry) |
954 | return; |
955 | list_add_tail(new: &new_entry->list, head); |
956 | } |
957 | |
958 | /* |
959 | * If the device didn't register any bypass MSI window, add a |
960 | * software-mapped region. |
961 | */ |
962 | if (!msi) { |
963 | msi = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, |
964 | prot, type: IOMMU_RESV_SW_MSI, |
965 | GFP_KERNEL); |
966 | if (!msi) |
967 | return; |
968 | |
969 | list_add_tail(new: &msi->list, head); |
970 | } |
971 | |
972 | iommu_dma_get_resv_regions(dev, list: head); |
973 | } |
974 | |
975 | static struct iommu_ops viommu_ops; |
976 | static struct virtio_driver virtio_iommu_drv; |
977 | |
978 | static int viommu_match_node(struct device *dev, const void *data) |
979 | { |
980 | return device_match_fwnode(dev: dev->parent, fwnode: data); |
981 | } |
982 | |
983 | static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode) |
984 | { |
985 | struct device *dev = driver_find_device(drv: &virtio_iommu_drv.driver, NULL, |
986 | data: fwnode, match: viommu_match_node); |
987 | put_device(dev); |
988 | |
989 | return dev ? dev_to_virtio(dev)->priv : NULL; |
990 | } |
991 | |
992 | static struct iommu_device *viommu_probe_device(struct device *dev) |
993 | { |
994 | int ret; |
995 | struct viommu_endpoint *vdev; |
996 | struct viommu_dev *viommu = NULL; |
997 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
998 | |
999 | viommu = viommu_get_by_fwnode(fwnode: fwspec->iommu_fwnode); |
1000 | if (!viommu) |
1001 | return ERR_PTR(error: -ENODEV); |
1002 | |
1003 | vdev = kzalloc(size: sizeof(*vdev), GFP_KERNEL); |
1004 | if (!vdev) |
1005 | return ERR_PTR(error: -ENOMEM); |
1006 | |
1007 | vdev->dev = dev; |
1008 | vdev->viommu = viommu; |
1009 | INIT_LIST_HEAD(list: &vdev->resv_regions); |
1010 | dev_iommu_priv_set(dev, priv: vdev); |
1011 | |
1012 | if (viommu->probe_size) { |
1013 | /* Get additional information for this endpoint */ |
1014 | ret = viommu_probe_endpoint(viommu, dev); |
1015 | if (ret) |
1016 | goto err_free_dev; |
1017 | } |
1018 | |
1019 | return &viommu->iommu; |
1020 | |
1021 | err_free_dev: |
1022 | iommu_put_resv_regions(dev, list: &vdev->resv_regions); |
1023 | kfree(objp: vdev); |
1024 | |
1025 | return ERR_PTR(error: ret); |
1026 | } |
1027 | |
1028 | static void viommu_probe_finalize(struct device *dev) |
1029 | { |
1030 | #ifndef CONFIG_ARCH_HAS_SETUP_DMA_OPS |
1031 | /* First clear the DMA ops in case we're switching from a DMA domain */ |
1032 | set_dma_ops(dev, NULL); |
1033 | iommu_setup_dma_ops(dev, dma_base: 0, U64_MAX); |
1034 | #endif |
1035 | } |
1036 | |
1037 | static void viommu_release_device(struct device *dev) |
1038 | { |
1039 | struct viommu_endpoint *vdev = dev_iommu_priv_get(dev); |
1040 | |
1041 | viommu_detach_dev(vdev); |
1042 | iommu_put_resv_regions(dev, list: &vdev->resv_regions); |
1043 | kfree(objp: vdev); |
1044 | } |
1045 | |
1046 | static struct iommu_group *viommu_device_group(struct device *dev) |
1047 | { |
1048 | if (dev_is_pci(dev)) |
1049 | return pci_device_group(dev); |
1050 | else |
1051 | return generic_device_group(dev); |
1052 | } |
1053 | |
1054 | static int viommu_of_xlate(struct device *dev, |
1055 | const struct of_phandle_args *args) |
1056 | { |
1057 | return iommu_fwspec_add_ids(dev, ids: args->args, num_ids: 1); |
1058 | } |
1059 | |
1060 | static bool viommu_capable(struct device *dev, enum iommu_cap cap) |
1061 | { |
1062 | switch (cap) { |
1063 | case IOMMU_CAP_CACHE_COHERENCY: |
1064 | return true; |
1065 | case IOMMU_CAP_DEFERRED_FLUSH: |
1066 | return true; |
1067 | default: |
1068 | return false; |
1069 | } |
1070 | } |
1071 | |
1072 | static struct iommu_ops viommu_ops = { |
1073 | .capable = viommu_capable, |
1074 | .domain_alloc = viommu_domain_alloc, |
1075 | .probe_device = viommu_probe_device, |
1076 | .probe_finalize = viommu_probe_finalize, |
1077 | .release_device = viommu_release_device, |
1078 | .device_group = viommu_device_group, |
1079 | .get_resv_regions = viommu_get_resv_regions, |
1080 | .of_xlate = viommu_of_xlate, |
1081 | .owner = THIS_MODULE, |
1082 | .default_domain_ops = &(const struct iommu_domain_ops) { |
1083 | .attach_dev = viommu_attach_dev, |
1084 | .map_pages = viommu_map_pages, |
1085 | .unmap_pages = viommu_unmap_pages, |
1086 | .iova_to_phys = viommu_iova_to_phys, |
1087 | .flush_iotlb_all = viommu_flush_iotlb_all, |
1088 | .iotlb_sync = viommu_iotlb_sync, |
1089 | .iotlb_sync_map = viommu_iotlb_sync_map, |
1090 | .free = viommu_domain_free, |
1091 | } |
1092 | }; |
1093 | |
1094 | static int viommu_init_vqs(struct viommu_dev *viommu) |
1095 | { |
1096 | struct virtio_device *vdev = dev_to_virtio(viommu->dev); |
1097 | const char *names[] = { "request" , "event" }; |
1098 | vq_callback_t *callbacks[] = { |
1099 | NULL, /* No async requests */ |
1100 | viommu_event_handler, |
1101 | }; |
1102 | |
1103 | return virtio_find_vqs(vdev, VIOMMU_NR_VQS, vqs: viommu->vqs, callbacks, |
1104 | names, NULL); |
1105 | } |
1106 | |
1107 | static int viommu_fill_evtq(struct viommu_dev *viommu) |
1108 | { |
1109 | int i, ret; |
1110 | struct scatterlist sg[1]; |
1111 | struct viommu_event *evts; |
1112 | struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ]; |
1113 | size_t nr_evts = vq->num_free; |
1114 | |
1115 | viommu->evts = evts = devm_kmalloc_array(dev: viommu->dev, n: nr_evts, |
1116 | size: sizeof(*evts), GFP_KERNEL); |
1117 | if (!evts) |
1118 | return -ENOMEM; |
1119 | |
1120 | for (i = 0; i < nr_evts; i++) { |
1121 | sg_init_one(sg, &evts[i], sizeof(*evts)); |
1122 | ret = virtqueue_add_inbuf(vq, sg, num: 1, data: &evts[i], GFP_KERNEL); |
1123 | if (ret) |
1124 | return ret; |
1125 | } |
1126 | |
1127 | return 0; |
1128 | } |
1129 | |
1130 | static int viommu_probe(struct virtio_device *vdev) |
1131 | { |
1132 | struct device *parent_dev = vdev->dev.parent; |
1133 | struct viommu_dev *viommu = NULL; |
1134 | struct device *dev = &vdev->dev; |
1135 | u64 input_start = 0; |
1136 | u64 input_end = -1UL; |
1137 | int ret; |
1138 | |
1139 | if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) || |
1140 | !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP)) |
1141 | return -ENODEV; |
1142 | |
1143 | viommu = devm_kzalloc(dev, size: sizeof(*viommu), GFP_KERNEL); |
1144 | if (!viommu) |
1145 | return -ENOMEM; |
1146 | |
1147 | spin_lock_init(&viommu->request_lock); |
1148 | ida_init(ida: &viommu->domain_ids); |
1149 | viommu->dev = dev; |
1150 | viommu->vdev = vdev; |
1151 | INIT_LIST_HEAD(list: &viommu->requests); |
1152 | |
1153 | ret = viommu_init_vqs(viommu); |
1154 | if (ret) |
1155 | return ret; |
1156 | |
1157 | virtio_cread_le(vdev, struct virtio_iommu_config, page_size_mask, |
1158 | &viommu->pgsize_bitmap); |
1159 | |
1160 | if (!viommu->pgsize_bitmap) { |
1161 | ret = -EINVAL; |
1162 | goto err_free_vqs; |
1163 | } |
1164 | |
1165 | viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE; |
1166 | viommu->last_domain = ~0U; |
1167 | |
1168 | /* Optional features */ |
1169 | virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, |
1170 | struct virtio_iommu_config, input_range.start, |
1171 | &input_start); |
1172 | |
1173 | virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, |
1174 | struct virtio_iommu_config, input_range.end, |
1175 | &input_end); |
1176 | |
1177 | virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, |
1178 | struct virtio_iommu_config, domain_range.start, |
1179 | &viommu->first_domain); |
1180 | |
1181 | virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, |
1182 | struct virtio_iommu_config, domain_range.end, |
1183 | &viommu->last_domain); |
1184 | |
1185 | virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_PROBE, |
1186 | struct virtio_iommu_config, probe_size, |
1187 | &viommu->probe_size); |
1188 | |
1189 | viommu->geometry = (struct iommu_domain_geometry) { |
1190 | .aperture_start = input_start, |
1191 | .aperture_end = input_end, |
1192 | .force_aperture = true, |
1193 | }; |
1194 | |
1195 | if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO)) |
1196 | viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO; |
1197 | |
1198 | viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; |
1199 | |
1200 | virtio_device_ready(dev: vdev); |
1201 | |
1202 | /* Populate the event queue with buffers */ |
1203 | ret = viommu_fill_evtq(viommu); |
1204 | if (ret) |
1205 | goto err_free_vqs; |
1206 | |
1207 | ret = iommu_device_sysfs_add(iommu: &viommu->iommu, parent: dev, NULL, fmt: "%s" , |
1208 | virtio_bus_name(vdev)); |
1209 | if (ret) |
1210 | goto err_free_vqs; |
1211 | |
1212 | iommu_device_register(iommu: &viommu->iommu, ops: &viommu_ops, hwdev: parent_dev); |
1213 | |
1214 | vdev->priv = viommu; |
1215 | |
1216 | dev_info(dev, "input address: %u bits\n" , |
1217 | order_base_2(viommu->geometry.aperture_end)); |
1218 | dev_info(dev, "page mask: %#llx\n" , viommu->pgsize_bitmap); |
1219 | |
1220 | return 0; |
1221 | |
1222 | err_free_vqs: |
1223 | vdev->config->del_vqs(vdev); |
1224 | |
1225 | return ret; |
1226 | } |
1227 | |
1228 | static void viommu_remove(struct virtio_device *vdev) |
1229 | { |
1230 | struct viommu_dev *viommu = vdev->priv; |
1231 | |
1232 | iommu_device_sysfs_remove(iommu: &viommu->iommu); |
1233 | iommu_device_unregister(iommu: &viommu->iommu); |
1234 | |
1235 | /* Stop all virtqueues */ |
1236 | virtio_reset_device(dev: vdev); |
1237 | vdev->config->del_vqs(vdev); |
1238 | |
1239 | dev_info(&vdev->dev, "device removed\n" ); |
1240 | } |
1241 | |
1242 | static void viommu_config_changed(struct virtio_device *vdev) |
1243 | { |
1244 | dev_warn(&vdev->dev, "config changed\n" ); |
1245 | } |
1246 | |
1247 | static unsigned int features[] = { |
1248 | VIRTIO_IOMMU_F_MAP_UNMAP, |
1249 | VIRTIO_IOMMU_F_INPUT_RANGE, |
1250 | VIRTIO_IOMMU_F_DOMAIN_RANGE, |
1251 | VIRTIO_IOMMU_F_PROBE, |
1252 | VIRTIO_IOMMU_F_MMIO, |
1253 | VIRTIO_IOMMU_F_BYPASS_CONFIG, |
1254 | }; |
1255 | |
1256 | static struct virtio_device_id id_table[] = { |
1257 | { VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID }, |
1258 | { 0 }, |
1259 | }; |
1260 | MODULE_DEVICE_TABLE(virtio, id_table); |
1261 | |
1262 | static struct virtio_driver virtio_iommu_drv = { |
1263 | .driver.name = KBUILD_MODNAME, |
1264 | .driver.owner = THIS_MODULE, |
1265 | .id_table = id_table, |
1266 | .feature_table = features, |
1267 | .feature_table_size = ARRAY_SIZE(features), |
1268 | .probe = viommu_probe, |
1269 | .remove = viommu_remove, |
1270 | .config_changed = viommu_config_changed, |
1271 | }; |
1272 | |
1273 | module_virtio_driver(virtio_iommu_drv); |
1274 | |
1275 | MODULE_DESCRIPTION("Virtio IOMMU driver" ); |
1276 | MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>" ); |
1277 | MODULE_LICENSE("GPL v2" ); |
1278 | |