1 | // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) |
2 | /* |
3 | * Copyright 2013-2016 Freescale Semiconductor Inc. |
4 | * Copyright 2016-2017,2019-2020 NXP |
5 | */ |
6 | |
7 | #include <linux/device.h> |
8 | #include <linux/iommu.h> |
9 | #include <linux/module.h> |
10 | #include <linux/mutex.h> |
11 | #include <linux/slab.h> |
12 | #include <linux/types.h> |
13 | #include <linux/vfio.h> |
14 | #include <linux/fsl/mc.h> |
15 | #include <linux/delay.h> |
16 | #include <linux/io-64-nonatomic-hi-lo.h> |
17 | |
18 | #include "vfio_fsl_mc_private.h" |
19 | |
20 | static struct fsl_mc_driver vfio_fsl_mc_driver; |
21 | |
22 | static int vfio_fsl_mc_open_device(struct vfio_device *core_vdev) |
23 | { |
24 | struct vfio_fsl_mc_device *vdev = |
25 | container_of(core_vdev, struct vfio_fsl_mc_device, vdev); |
26 | struct fsl_mc_device *mc_dev = vdev->mc_dev; |
27 | int count = mc_dev->obj_desc.region_count; |
28 | int i; |
29 | |
30 | vdev->regions = kcalloc(n: count, size: sizeof(struct vfio_fsl_mc_region), |
31 | GFP_KERNEL_ACCOUNT); |
32 | if (!vdev->regions) |
33 | return -ENOMEM; |
34 | |
35 | for (i = 0; i < count; i++) { |
36 | struct resource *res = &mc_dev->regions[i]; |
37 | int no_mmap = is_fsl_mc_bus_dprc(mc_dev); |
38 | |
39 | vdev->regions[i].addr = res->start; |
40 | vdev->regions[i].size = resource_size(res); |
41 | vdev->regions[i].type = mc_dev->regions[i].flags & IORESOURCE_BITS; |
42 | /* |
43 | * Only regions addressed with PAGE granularity may be |
44 | * MMAPed securely. |
45 | */ |
46 | if (!no_mmap && !(vdev->regions[i].addr & ~PAGE_MASK) && |
47 | !(vdev->regions[i].size & ~PAGE_MASK)) |
48 | vdev->regions[i].flags |= |
49 | VFIO_REGION_INFO_FLAG_MMAP; |
50 | vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ; |
51 | if (!(mc_dev->regions[i].flags & IORESOURCE_READONLY)) |
52 | vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE; |
53 | } |
54 | |
55 | return 0; |
56 | } |
57 | |
58 | static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev) |
59 | { |
60 | struct fsl_mc_device *mc_dev = vdev->mc_dev; |
61 | int i; |
62 | |
63 | for (i = 0; i < mc_dev->obj_desc.region_count; i++) |
64 | iounmap(addr: vdev->regions[i].ioaddr); |
65 | kfree(objp: vdev->regions); |
66 | } |
67 | |
68 | static int vfio_fsl_mc_reset_device(struct vfio_fsl_mc_device *vdev) |
69 | { |
70 | struct fsl_mc_device *mc_dev = vdev->mc_dev; |
71 | int ret = 0; |
72 | |
73 | if (is_fsl_mc_bus_dprc(mc_dev: vdev->mc_dev)) { |
74 | return dprc_reset_container(mc_io: mc_dev->mc_io, cmd_flags: 0, |
75 | token: mc_dev->mc_handle, |
76 | child_container_id: mc_dev->obj_desc.id, |
77 | DPRC_RESET_OPTION_NON_RECURSIVE); |
78 | } else { |
79 | u16 token; |
80 | |
81 | ret = fsl_mc_obj_open(mc_io: mc_dev->mc_io, cmd_flags: 0, obj_id: mc_dev->obj_desc.id, |
82 | obj_type: mc_dev->obj_desc.type, |
83 | token: &token); |
84 | if (ret) |
85 | goto out; |
86 | ret = fsl_mc_obj_reset(mc_io: mc_dev->mc_io, cmd_flags: 0, token); |
87 | if (ret) { |
88 | fsl_mc_obj_close(mc_io: mc_dev->mc_io, cmd_flags: 0, token); |
89 | goto out; |
90 | } |
91 | ret = fsl_mc_obj_close(mc_io: mc_dev->mc_io, cmd_flags: 0, token); |
92 | } |
93 | out: |
94 | return ret; |
95 | } |
96 | |
97 | static void vfio_fsl_mc_close_device(struct vfio_device *core_vdev) |
98 | { |
99 | struct vfio_fsl_mc_device *vdev = |
100 | container_of(core_vdev, struct vfio_fsl_mc_device, vdev); |
101 | struct fsl_mc_device *mc_dev = vdev->mc_dev; |
102 | struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev); |
103 | struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev); |
104 | int ret; |
105 | |
106 | vfio_fsl_mc_regions_cleanup(vdev); |
107 | |
108 | /* reset the device before cleaning up the interrupts */ |
109 | ret = vfio_fsl_mc_reset_device(vdev); |
110 | |
111 | if (ret) |
112 | dev_warn(&mc_cont->dev, |
113 | "VFIO_FSL_MC: reset device has failed (%d)\n" , ret); |
114 | |
115 | vfio_fsl_mc_irqs_cleanup(vdev); |
116 | |
117 | fsl_mc_cleanup_irq_pool(mc_bus_dev: mc_cont); |
118 | } |
119 | |
120 | static long vfio_fsl_mc_ioctl(struct vfio_device *core_vdev, |
121 | unsigned int cmd, unsigned long arg) |
122 | { |
123 | unsigned long minsz; |
124 | struct vfio_fsl_mc_device *vdev = |
125 | container_of(core_vdev, struct vfio_fsl_mc_device, vdev); |
126 | struct fsl_mc_device *mc_dev = vdev->mc_dev; |
127 | |
128 | switch (cmd) { |
129 | case VFIO_DEVICE_GET_INFO: |
130 | { |
131 | struct vfio_device_info info; |
132 | |
133 | minsz = offsetofend(struct vfio_device_info, num_irqs); |
134 | |
135 | if (copy_from_user(to: &info, from: (void __user *)arg, n: minsz)) |
136 | return -EFAULT; |
137 | |
138 | if (info.argsz < minsz) |
139 | return -EINVAL; |
140 | |
141 | info.flags = VFIO_DEVICE_FLAGS_FSL_MC; |
142 | |
143 | if (is_fsl_mc_bus_dprc(mc_dev)) |
144 | info.flags |= VFIO_DEVICE_FLAGS_RESET; |
145 | |
146 | info.num_regions = mc_dev->obj_desc.region_count; |
147 | info.num_irqs = mc_dev->obj_desc.irq_count; |
148 | |
149 | return copy_to_user(to: (void __user *)arg, from: &info, n: minsz) ? |
150 | -EFAULT : 0; |
151 | } |
152 | case VFIO_DEVICE_GET_REGION_INFO: |
153 | { |
154 | struct vfio_region_info info; |
155 | |
156 | minsz = offsetofend(struct vfio_region_info, offset); |
157 | |
158 | if (copy_from_user(to: &info, from: (void __user *)arg, n: minsz)) |
159 | return -EFAULT; |
160 | |
161 | if (info.argsz < minsz) |
162 | return -EINVAL; |
163 | |
164 | if (info.index >= mc_dev->obj_desc.region_count) |
165 | return -EINVAL; |
166 | |
167 | /* map offset to the physical address */ |
168 | info.offset = VFIO_FSL_MC_INDEX_TO_OFFSET(info.index); |
169 | info.size = vdev->regions[info.index].size; |
170 | info.flags = vdev->regions[info.index].flags; |
171 | |
172 | if (copy_to_user(to: (void __user *)arg, from: &info, n: minsz)) |
173 | return -EFAULT; |
174 | return 0; |
175 | } |
176 | case VFIO_DEVICE_GET_IRQ_INFO: |
177 | { |
178 | struct vfio_irq_info info; |
179 | |
180 | minsz = offsetofend(struct vfio_irq_info, count); |
181 | if (copy_from_user(to: &info, from: (void __user *)arg, n: minsz)) |
182 | return -EFAULT; |
183 | |
184 | if (info.argsz < minsz) |
185 | return -EINVAL; |
186 | |
187 | if (info.index >= mc_dev->obj_desc.irq_count) |
188 | return -EINVAL; |
189 | |
190 | info.flags = VFIO_IRQ_INFO_EVENTFD; |
191 | info.count = 1; |
192 | |
193 | if (copy_to_user(to: (void __user *)arg, from: &info, n: minsz)) |
194 | return -EFAULT; |
195 | return 0; |
196 | } |
197 | case VFIO_DEVICE_SET_IRQS: |
198 | { |
199 | struct vfio_irq_set hdr; |
200 | u8 *data = NULL; |
201 | int ret = 0; |
202 | size_t data_size = 0; |
203 | |
204 | minsz = offsetofend(struct vfio_irq_set, count); |
205 | |
206 | if (copy_from_user(to: &hdr, from: (void __user *)arg, n: minsz)) |
207 | return -EFAULT; |
208 | |
209 | ret = vfio_set_irqs_validate_and_prepare(hdr: &hdr, num_irqs: mc_dev->obj_desc.irq_count, |
210 | max_irq_type: mc_dev->obj_desc.irq_count, data_size: &data_size); |
211 | if (ret) |
212 | return ret; |
213 | |
214 | if (data_size) { |
215 | data = memdup_user((void __user *)(arg + minsz), |
216 | data_size); |
217 | if (IS_ERR(ptr: data)) |
218 | return PTR_ERR(ptr: data); |
219 | } |
220 | |
221 | mutex_lock(&vdev->igate); |
222 | ret = vfio_fsl_mc_set_irqs_ioctl(vdev, flags: hdr.flags, |
223 | index: hdr.index, start: hdr.start, |
224 | count: hdr.count, data); |
225 | mutex_unlock(lock: &vdev->igate); |
226 | kfree(objp: data); |
227 | |
228 | return ret; |
229 | } |
230 | case VFIO_DEVICE_RESET: |
231 | { |
232 | return vfio_fsl_mc_reset_device(vdev); |
233 | |
234 | } |
235 | default: |
236 | return -ENOTTY; |
237 | } |
238 | } |
239 | |
240 | static ssize_t vfio_fsl_mc_read(struct vfio_device *core_vdev, char __user *buf, |
241 | size_t count, loff_t *ppos) |
242 | { |
243 | struct vfio_fsl_mc_device *vdev = |
244 | container_of(core_vdev, struct vfio_fsl_mc_device, vdev); |
245 | unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos); |
246 | loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK; |
247 | struct fsl_mc_device *mc_dev = vdev->mc_dev; |
248 | struct vfio_fsl_mc_region *region; |
249 | u64 data[8]; |
250 | int i; |
251 | |
252 | if (index >= mc_dev->obj_desc.region_count) |
253 | return -EINVAL; |
254 | |
255 | region = &vdev->regions[index]; |
256 | |
257 | if (!(region->flags & VFIO_REGION_INFO_FLAG_READ)) |
258 | return -EINVAL; |
259 | |
260 | if (!region->ioaddr) { |
261 | region->ioaddr = ioremap(offset: region->addr, size: region->size); |
262 | if (!region->ioaddr) |
263 | return -ENOMEM; |
264 | } |
265 | |
266 | if (count != 64 || off != 0) |
267 | return -EINVAL; |
268 | |
269 | for (i = 7; i >= 0; i--) |
270 | data[i] = readq(addr: region->ioaddr + i * sizeof(uint64_t)); |
271 | |
272 | if (copy_to_user(to: buf, from: data, n: 64)) |
273 | return -EFAULT; |
274 | |
275 | return count; |
276 | } |
277 | |
278 | #define MC_CMD_COMPLETION_TIMEOUT_MS 5000 |
279 | #define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500 |
280 | |
281 | static int vfio_fsl_mc_send_command(void __iomem *ioaddr, uint64_t *cmd_data) |
282 | { |
283 | int i; |
284 | enum mc_cmd_status status; |
285 | unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000; |
286 | |
287 | /* Write at command parameter into portal */ |
288 | for (i = 7; i >= 1; i--) |
289 | writeq_relaxed(cmd_data[i], ioaddr + i * sizeof(uint64_t)); |
290 | |
291 | /* Write command header in the end */ |
292 | writeq(val: cmd_data[0], addr: ioaddr); |
293 | |
294 | /* Wait for response before returning to user-space |
295 | * This can be optimized in future to even prepare response |
296 | * before returning to user-space and avoid read ioctl. |
297 | */ |
298 | for (;;) { |
299 | u64 ; |
300 | struct mc_cmd_header *resp_hdr; |
301 | |
302 | header = cpu_to_le64(readq_relaxed(ioaddr)); |
303 | |
304 | resp_hdr = (struct mc_cmd_header *)&header; |
305 | status = (enum mc_cmd_status)resp_hdr->status; |
306 | if (status != MC_CMD_STATUS_READY) |
307 | break; |
308 | |
309 | udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS); |
310 | timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS; |
311 | if (timeout_usecs == 0) |
312 | return -ETIMEDOUT; |
313 | } |
314 | |
315 | return 0; |
316 | } |
317 | |
318 | static ssize_t vfio_fsl_mc_write(struct vfio_device *core_vdev, |
319 | const char __user *buf, size_t count, |
320 | loff_t *ppos) |
321 | { |
322 | struct vfio_fsl_mc_device *vdev = |
323 | container_of(core_vdev, struct vfio_fsl_mc_device, vdev); |
324 | unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos); |
325 | loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK; |
326 | struct fsl_mc_device *mc_dev = vdev->mc_dev; |
327 | struct vfio_fsl_mc_region *region; |
328 | u64 data[8]; |
329 | int ret; |
330 | |
331 | if (index >= mc_dev->obj_desc.region_count) |
332 | return -EINVAL; |
333 | |
334 | region = &vdev->regions[index]; |
335 | |
336 | if (!(region->flags & VFIO_REGION_INFO_FLAG_WRITE)) |
337 | return -EINVAL; |
338 | |
339 | if (!region->ioaddr) { |
340 | region->ioaddr = ioremap(offset: region->addr, size: region->size); |
341 | if (!region->ioaddr) |
342 | return -ENOMEM; |
343 | } |
344 | |
345 | if (count != 64 || off != 0) |
346 | return -EINVAL; |
347 | |
348 | if (copy_from_user(to: &data, from: buf, n: 64)) |
349 | return -EFAULT; |
350 | |
351 | ret = vfio_fsl_mc_send_command(ioaddr: region->ioaddr, cmd_data: data); |
352 | if (ret) |
353 | return ret; |
354 | |
355 | return count; |
356 | |
357 | } |
358 | |
359 | static int vfio_fsl_mc_mmap_mmio(struct vfio_fsl_mc_region region, |
360 | struct vm_area_struct *vma) |
361 | { |
362 | u64 size = vma->vm_end - vma->vm_start; |
363 | u64 pgoff, base; |
364 | u8 region_cacheable; |
365 | |
366 | pgoff = vma->vm_pgoff & |
367 | ((1U << (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT)) - 1); |
368 | base = pgoff << PAGE_SHIFT; |
369 | |
370 | if (region.size < PAGE_SIZE || base + size > region.size) |
371 | return -EINVAL; |
372 | |
373 | region_cacheable = (region.type & FSL_MC_REGION_CACHEABLE) && |
374 | (region.type & FSL_MC_REGION_SHAREABLE); |
375 | if (!region_cacheable) |
376 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
377 | |
378 | vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff; |
379 | |
380 | return remap_pfn_range(vma, addr: vma->vm_start, pfn: vma->vm_pgoff, |
381 | size, vma->vm_page_prot); |
382 | } |
383 | |
384 | static int vfio_fsl_mc_mmap(struct vfio_device *core_vdev, |
385 | struct vm_area_struct *vma) |
386 | { |
387 | struct vfio_fsl_mc_device *vdev = |
388 | container_of(core_vdev, struct vfio_fsl_mc_device, vdev); |
389 | struct fsl_mc_device *mc_dev = vdev->mc_dev; |
390 | unsigned int index; |
391 | |
392 | index = vma->vm_pgoff >> (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT); |
393 | |
394 | if (vma->vm_end < vma->vm_start) |
395 | return -EINVAL; |
396 | if (vma->vm_start & ~PAGE_MASK) |
397 | return -EINVAL; |
398 | if (vma->vm_end & ~PAGE_MASK) |
399 | return -EINVAL; |
400 | if (!(vma->vm_flags & VM_SHARED)) |
401 | return -EINVAL; |
402 | if (index >= mc_dev->obj_desc.region_count) |
403 | return -EINVAL; |
404 | |
405 | if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP)) |
406 | return -EINVAL; |
407 | |
408 | if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ) |
409 | && (vma->vm_flags & VM_READ)) |
410 | return -EINVAL; |
411 | |
412 | if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE) |
413 | && (vma->vm_flags & VM_WRITE)) |
414 | return -EINVAL; |
415 | |
416 | vma->vm_private_data = mc_dev; |
417 | |
418 | return vfio_fsl_mc_mmap_mmio(region: vdev->regions[index], vma); |
419 | } |
420 | |
421 | static const struct vfio_device_ops vfio_fsl_mc_ops; |
422 | static int vfio_fsl_mc_bus_notifier(struct notifier_block *nb, |
423 | unsigned long action, void *data) |
424 | { |
425 | struct vfio_fsl_mc_device *vdev = container_of(nb, |
426 | struct vfio_fsl_mc_device, nb); |
427 | struct device *dev = data; |
428 | struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); |
429 | struct fsl_mc_device *mc_cont = to_fsl_mc_device(mc_dev->dev.parent); |
430 | |
431 | if (action == BUS_NOTIFY_ADD_DEVICE && |
432 | vdev->mc_dev == mc_cont) { |
433 | mc_dev->driver_override = kasprintf(GFP_KERNEL, fmt: "%s" , |
434 | vfio_fsl_mc_ops.name); |
435 | if (!mc_dev->driver_override) |
436 | dev_warn(dev, "VFIO_FSL_MC: Setting driver override for device in dprc %s failed\n" , |
437 | dev_name(&mc_cont->dev)); |
438 | else |
439 | dev_info(dev, "VFIO_FSL_MC: Setting driver override for device in dprc %s\n" , |
440 | dev_name(&mc_cont->dev)); |
441 | } else if (action == BUS_NOTIFY_BOUND_DRIVER && |
442 | vdev->mc_dev == mc_cont) { |
443 | struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver); |
444 | |
445 | if (mc_drv && mc_drv != &vfio_fsl_mc_driver) |
446 | dev_warn(dev, "VFIO_FSL_MC: Object %s bound to driver %s while DPRC bound to vfio-fsl-mc\n" , |
447 | dev_name(dev), mc_drv->driver.name); |
448 | } |
449 | |
450 | return 0; |
451 | } |
452 | |
453 | static int vfio_fsl_mc_init_device(struct vfio_fsl_mc_device *vdev) |
454 | { |
455 | struct fsl_mc_device *mc_dev = vdev->mc_dev; |
456 | int ret; |
457 | |
458 | /* Non-dprc devices share mc_io from parent */ |
459 | if (!is_fsl_mc_bus_dprc(mc_dev)) { |
460 | struct fsl_mc_device *mc_cont = to_fsl_mc_device(mc_dev->dev.parent); |
461 | |
462 | mc_dev->mc_io = mc_cont->mc_io; |
463 | return 0; |
464 | } |
465 | |
466 | vdev->nb.notifier_call = vfio_fsl_mc_bus_notifier; |
467 | ret = bus_register_notifier(bus: &fsl_mc_bus_type, nb: &vdev->nb); |
468 | if (ret) |
469 | return ret; |
470 | |
471 | /* open DPRC, allocate a MC portal */ |
472 | ret = dprc_setup(mc_dev); |
473 | if (ret) { |
474 | dev_err(&mc_dev->dev, "VFIO_FSL_MC: Failed to setup DPRC (%d)\n" , ret); |
475 | goto out_nc_unreg; |
476 | } |
477 | return 0; |
478 | |
479 | out_nc_unreg: |
480 | bus_unregister_notifier(bus: &fsl_mc_bus_type, nb: &vdev->nb); |
481 | return ret; |
482 | } |
483 | |
484 | static int vfio_fsl_mc_scan_container(struct fsl_mc_device *mc_dev) |
485 | { |
486 | int ret; |
487 | |
488 | /* non dprc devices do not scan for other devices */ |
489 | if (!is_fsl_mc_bus_dprc(mc_dev)) |
490 | return 0; |
491 | ret = dprc_scan_container(mc_bus_dev: mc_dev, alloc_interrupts: false); |
492 | if (ret) { |
493 | dev_err(&mc_dev->dev, |
494 | "VFIO_FSL_MC: Container scanning failed (%d)\n" , ret); |
495 | dprc_remove_devices(mc_bus_dev: mc_dev, NULL, num_child_objects_in_mc: 0); |
496 | return ret; |
497 | } |
498 | return 0; |
499 | } |
500 | |
501 | static void vfio_fsl_uninit_device(struct vfio_fsl_mc_device *vdev) |
502 | { |
503 | struct fsl_mc_device *mc_dev = vdev->mc_dev; |
504 | |
505 | if (!is_fsl_mc_bus_dprc(mc_dev)) |
506 | return; |
507 | |
508 | dprc_cleanup(mc_dev); |
509 | bus_unregister_notifier(bus: &fsl_mc_bus_type, nb: &vdev->nb); |
510 | } |
511 | |
512 | static int vfio_fsl_mc_init_dev(struct vfio_device *core_vdev) |
513 | { |
514 | struct vfio_fsl_mc_device *vdev = |
515 | container_of(core_vdev, struct vfio_fsl_mc_device, vdev); |
516 | struct fsl_mc_device *mc_dev = to_fsl_mc_device(core_vdev->dev); |
517 | int ret; |
518 | |
519 | vdev->mc_dev = mc_dev; |
520 | mutex_init(&vdev->igate); |
521 | |
522 | if (is_fsl_mc_bus_dprc(mc_dev)) |
523 | ret = vfio_assign_device_set(device: core_vdev, set_id: &mc_dev->dev); |
524 | else |
525 | ret = vfio_assign_device_set(device: core_vdev, set_id: mc_dev->dev.parent); |
526 | |
527 | if (ret) |
528 | return ret; |
529 | |
530 | /* device_set is released by vfio core if @init fails */ |
531 | return vfio_fsl_mc_init_device(vdev); |
532 | } |
533 | |
534 | static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev) |
535 | { |
536 | struct vfio_fsl_mc_device *vdev; |
537 | struct device *dev = &mc_dev->dev; |
538 | int ret; |
539 | |
540 | vdev = vfio_alloc_device(vfio_fsl_mc_device, vdev, dev, |
541 | &vfio_fsl_mc_ops); |
542 | if (IS_ERR(ptr: vdev)) |
543 | return PTR_ERR(ptr: vdev); |
544 | |
545 | ret = vfio_register_group_dev(device: &vdev->vdev); |
546 | if (ret) { |
547 | dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n" ); |
548 | goto out_put_vdev; |
549 | } |
550 | |
551 | ret = vfio_fsl_mc_scan_container(mc_dev); |
552 | if (ret) |
553 | goto out_group_dev; |
554 | dev_set_drvdata(dev, data: vdev); |
555 | return 0; |
556 | |
557 | out_group_dev: |
558 | vfio_unregister_group_dev(device: &vdev->vdev); |
559 | out_put_vdev: |
560 | vfio_put_device(device: &vdev->vdev); |
561 | return ret; |
562 | } |
563 | |
564 | static void vfio_fsl_mc_release_dev(struct vfio_device *core_vdev) |
565 | { |
566 | struct vfio_fsl_mc_device *vdev = |
567 | container_of(core_vdev, struct vfio_fsl_mc_device, vdev); |
568 | |
569 | vfio_fsl_uninit_device(vdev); |
570 | mutex_destroy(lock: &vdev->igate); |
571 | } |
572 | |
573 | static void vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev) |
574 | { |
575 | struct device *dev = &mc_dev->dev; |
576 | struct vfio_fsl_mc_device *vdev = dev_get_drvdata(dev); |
577 | |
578 | vfio_unregister_group_dev(device: &vdev->vdev); |
579 | dprc_remove_devices(mc_bus_dev: mc_dev, NULL, num_child_objects_in_mc: 0); |
580 | vfio_put_device(device: &vdev->vdev); |
581 | } |
582 | |
583 | static const struct vfio_device_ops vfio_fsl_mc_ops = { |
584 | .name = "vfio-fsl-mc" , |
585 | .init = vfio_fsl_mc_init_dev, |
586 | .release = vfio_fsl_mc_release_dev, |
587 | .open_device = vfio_fsl_mc_open_device, |
588 | .close_device = vfio_fsl_mc_close_device, |
589 | .ioctl = vfio_fsl_mc_ioctl, |
590 | .read = vfio_fsl_mc_read, |
591 | .write = vfio_fsl_mc_write, |
592 | .mmap = vfio_fsl_mc_mmap, |
593 | .bind_iommufd = vfio_iommufd_physical_bind, |
594 | .unbind_iommufd = vfio_iommufd_physical_unbind, |
595 | .attach_ioas = vfio_iommufd_physical_attach_ioas, |
596 | .detach_ioas = vfio_iommufd_physical_detach_ioas, |
597 | }; |
598 | |
599 | static struct fsl_mc_driver vfio_fsl_mc_driver = { |
600 | .probe = vfio_fsl_mc_probe, |
601 | .remove = vfio_fsl_mc_remove, |
602 | .driver = { |
603 | .name = "vfio-fsl-mc" , |
604 | }, |
605 | .driver_managed_dma = true, |
606 | }; |
607 | |
608 | module_fsl_mc_driver(vfio_fsl_mc_driver); |
609 | |
610 | MODULE_LICENSE("Dual BSD/GPL" ); |
611 | MODULE_DESCRIPTION("VFIO for FSL-MC devices - User Level meta-driver" ); |
612 | |