1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * drivers/uio/uio.c |
4 | * |
5 | * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de> |
6 | * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> |
7 | * Copyright(C) 2006, Hans J. Koch <hjk@hansjkoch.de> |
8 | * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com> |
9 | * |
10 | * Userspace IO |
11 | * |
12 | * Base Functions |
13 | */ |
14 | |
15 | #include <linux/module.h> |
16 | #include <linux/init.h> |
17 | #include <linux/poll.h> |
18 | #include <linux/device.h> |
19 | #include <linux/slab.h> |
20 | #include <linux/mm.h> |
21 | #include <linux/idr.h> |
22 | #include <linux/sched/signal.h> |
23 | #include <linux/string.h> |
24 | #include <linux/kobject.h> |
25 | #include <linux/cdev.h> |
26 | #include <linux/uio_driver.h> |
27 | #include <linux/dma-mapping.h> |
28 | |
29 | #define UIO_MAX_DEVICES (1U << MINORBITS) |
30 | |
31 | static int uio_major; |
32 | static struct cdev *uio_cdev; |
33 | static DEFINE_IDR(uio_idr); |
34 | static const struct file_operations uio_fops; |
35 | |
36 | /* Protect idr accesses */ |
37 | static DEFINE_MUTEX(minor_lock); |
38 | |
39 | /* |
40 | * attributes |
41 | */ |
42 | |
43 | struct uio_map { |
44 | struct kobject kobj; |
45 | struct uio_mem *mem; |
46 | }; |
47 | #define to_map(map) container_of(map, struct uio_map, kobj) |
48 | |
49 | static ssize_t map_name_show(struct uio_mem *mem, char *buf) |
50 | { |
51 | if (unlikely(!mem->name)) |
52 | mem->name = "" ; |
53 | |
54 | return sprintf(buf, fmt: "%s\n" , mem->name); |
55 | } |
56 | |
57 | static ssize_t map_addr_show(struct uio_mem *mem, char *buf) |
58 | { |
59 | return sprintf(buf, fmt: "%pa\n" , &mem->addr); |
60 | } |
61 | |
62 | static ssize_t map_size_show(struct uio_mem *mem, char *buf) |
63 | { |
64 | return sprintf(buf, fmt: "%pa\n" , &mem->size); |
65 | } |
66 | |
67 | static ssize_t map_offset_show(struct uio_mem *mem, char *buf) |
68 | { |
69 | return sprintf(buf, fmt: "0x%llx\n" , (unsigned long long)mem->offs); |
70 | } |
71 | |
72 | struct map_sysfs_entry { |
73 | struct attribute attr; |
74 | ssize_t (*show)(struct uio_mem *, char *); |
75 | ssize_t (*store)(struct uio_mem *, const char *, size_t); |
76 | }; |
77 | |
78 | static struct map_sysfs_entry name_attribute = |
79 | __ATTR(name, S_IRUGO, map_name_show, NULL); |
80 | static struct map_sysfs_entry addr_attribute = |
81 | __ATTR(addr, S_IRUGO, map_addr_show, NULL); |
82 | static struct map_sysfs_entry size_attribute = |
83 | __ATTR(size, S_IRUGO, map_size_show, NULL); |
84 | static struct map_sysfs_entry offset_attribute = |
85 | __ATTR(offset, S_IRUGO, map_offset_show, NULL); |
86 | |
87 | static struct attribute *map_attrs[] = { |
88 | &name_attribute.attr, |
89 | &addr_attribute.attr, |
90 | &size_attribute.attr, |
91 | &offset_attribute.attr, |
92 | NULL, /* need to NULL terminate the list of attributes */ |
93 | }; |
94 | ATTRIBUTE_GROUPS(map); |
95 | |
96 | static void map_release(struct kobject *kobj) |
97 | { |
98 | struct uio_map *map = to_map(kobj); |
99 | kfree(objp: map); |
100 | } |
101 | |
102 | static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr, |
103 | char *buf) |
104 | { |
105 | struct uio_map *map = to_map(kobj); |
106 | struct uio_mem *mem = map->mem; |
107 | struct map_sysfs_entry *entry; |
108 | |
109 | entry = container_of(attr, struct map_sysfs_entry, attr); |
110 | |
111 | if (!entry->show) |
112 | return -EIO; |
113 | |
114 | return entry->show(mem, buf); |
115 | } |
116 | |
117 | static const struct sysfs_ops map_sysfs_ops = { |
118 | .show = map_type_show, |
119 | }; |
120 | |
121 | static struct kobj_type map_attr_type = { |
122 | .release = map_release, |
123 | .sysfs_ops = &map_sysfs_ops, |
124 | .default_groups = map_groups, |
125 | }; |
126 | |
127 | struct uio_portio { |
128 | struct kobject kobj; |
129 | struct uio_port *port; |
130 | }; |
131 | #define to_portio(portio) container_of(portio, struct uio_portio, kobj) |
132 | |
133 | static ssize_t portio_name_show(struct uio_port *port, char *buf) |
134 | { |
135 | if (unlikely(!port->name)) |
136 | port->name = "" ; |
137 | |
138 | return sprintf(buf, fmt: "%s\n" , port->name); |
139 | } |
140 | |
141 | static ssize_t portio_start_show(struct uio_port *port, char *buf) |
142 | { |
143 | return sprintf(buf, fmt: "0x%lx\n" , port->start); |
144 | } |
145 | |
146 | static ssize_t portio_size_show(struct uio_port *port, char *buf) |
147 | { |
148 | return sprintf(buf, fmt: "0x%lx\n" , port->size); |
149 | } |
150 | |
151 | static ssize_t portio_porttype_show(struct uio_port *port, char *buf) |
152 | { |
153 | const char *porttypes[] = {"none" , "x86" , "gpio" , "other" }; |
154 | |
155 | if ((port->porttype < 0) || (port->porttype > UIO_PORT_OTHER)) |
156 | return -EINVAL; |
157 | |
158 | return sprintf(buf, fmt: "port_%s\n" , porttypes[port->porttype]); |
159 | } |
160 | |
161 | struct portio_sysfs_entry { |
162 | struct attribute attr; |
163 | ssize_t (*show)(struct uio_port *, char *); |
164 | ssize_t (*store)(struct uio_port *, const char *, size_t); |
165 | }; |
166 | |
167 | static struct portio_sysfs_entry portio_name_attribute = |
168 | __ATTR(name, S_IRUGO, portio_name_show, NULL); |
169 | static struct portio_sysfs_entry portio_start_attribute = |
170 | __ATTR(start, S_IRUGO, portio_start_show, NULL); |
171 | static struct portio_sysfs_entry portio_size_attribute = |
172 | __ATTR(size, S_IRUGO, portio_size_show, NULL); |
173 | static struct portio_sysfs_entry portio_porttype_attribute = |
174 | __ATTR(porttype, S_IRUGO, portio_porttype_show, NULL); |
175 | |
176 | static struct attribute *portio_attrs[] = { |
177 | &portio_name_attribute.attr, |
178 | &portio_start_attribute.attr, |
179 | &portio_size_attribute.attr, |
180 | &portio_porttype_attribute.attr, |
181 | NULL, |
182 | }; |
183 | ATTRIBUTE_GROUPS(portio); |
184 | |
185 | static void portio_release(struct kobject *kobj) |
186 | { |
187 | struct uio_portio *portio = to_portio(kobj); |
188 | kfree(objp: portio); |
189 | } |
190 | |
191 | static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr, |
192 | char *buf) |
193 | { |
194 | struct uio_portio *portio = to_portio(kobj); |
195 | struct uio_port *port = portio->port; |
196 | struct portio_sysfs_entry *entry; |
197 | |
198 | entry = container_of(attr, struct portio_sysfs_entry, attr); |
199 | |
200 | if (!entry->show) |
201 | return -EIO; |
202 | |
203 | return entry->show(port, buf); |
204 | } |
205 | |
206 | static const struct sysfs_ops portio_sysfs_ops = { |
207 | .show = portio_type_show, |
208 | }; |
209 | |
210 | static struct kobj_type portio_attr_type = { |
211 | .release = portio_release, |
212 | .sysfs_ops = &portio_sysfs_ops, |
213 | .default_groups = portio_groups, |
214 | }; |
215 | |
216 | static ssize_t name_show(struct device *dev, |
217 | struct device_attribute *attr, char *buf) |
218 | { |
219 | struct uio_device *idev = dev_get_drvdata(dev); |
220 | int ret; |
221 | |
222 | mutex_lock(&idev->info_lock); |
223 | if (!idev->info) { |
224 | ret = -EINVAL; |
225 | dev_err(dev, "the device has been unregistered\n" ); |
226 | goto out; |
227 | } |
228 | |
229 | ret = sprintf(buf, fmt: "%s\n" , idev->info->name); |
230 | |
231 | out: |
232 | mutex_unlock(lock: &idev->info_lock); |
233 | return ret; |
234 | } |
235 | static DEVICE_ATTR_RO(name); |
236 | |
237 | static ssize_t version_show(struct device *dev, |
238 | struct device_attribute *attr, char *buf) |
239 | { |
240 | struct uio_device *idev = dev_get_drvdata(dev); |
241 | int ret; |
242 | |
243 | mutex_lock(&idev->info_lock); |
244 | if (!idev->info) { |
245 | ret = -EINVAL; |
246 | dev_err(dev, "the device has been unregistered\n" ); |
247 | goto out; |
248 | } |
249 | |
250 | ret = sprintf(buf, fmt: "%s\n" , idev->info->version); |
251 | |
252 | out: |
253 | mutex_unlock(lock: &idev->info_lock); |
254 | return ret; |
255 | } |
256 | static DEVICE_ATTR_RO(version); |
257 | |
258 | static ssize_t event_show(struct device *dev, |
259 | struct device_attribute *attr, char *buf) |
260 | { |
261 | struct uio_device *idev = dev_get_drvdata(dev); |
262 | return sprintf(buf, fmt: "%u\n" , (unsigned int)atomic_read(v: &idev->event)); |
263 | } |
264 | static DEVICE_ATTR_RO(event); |
265 | |
266 | static struct attribute *uio_attrs[] = { |
267 | &dev_attr_name.attr, |
268 | &dev_attr_version.attr, |
269 | &dev_attr_event.attr, |
270 | NULL, |
271 | }; |
272 | ATTRIBUTE_GROUPS(uio); |
273 | |
274 | /* UIO class infrastructure */ |
275 | static struct class uio_class = { |
276 | .name = "uio" , |
277 | .dev_groups = uio_groups, |
278 | }; |
279 | |
280 | static bool uio_class_registered; |
281 | |
282 | /* |
283 | * device functions |
284 | */ |
285 | static int uio_dev_add_attributes(struct uio_device *idev) |
286 | { |
287 | int ret; |
288 | int mi, pi; |
289 | int map_found = 0; |
290 | int portio_found = 0; |
291 | struct uio_mem *mem; |
292 | struct uio_map *map; |
293 | struct uio_port *port; |
294 | struct uio_portio *portio; |
295 | |
296 | for (mi = 0; mi < MAX_UIO_MAPS; mi++) { |
297 | mem = &idev->info->mem[mi]; |
298 | if (mem->size == 0) |
299 | break; |
300 | if (!map_found) { |
301 | map_found = 1; |
302 | idev->map_dir = kobject_create_and_add(name: "maps" , |
303 | parent: &idev->dev.kobj); |
304 | if (!idev->map_dir) { |
305 | ret = -ENOMEM; |
306 | goto err_map; |
307 | } |
308 | } |
309 | map = kzalloc(size: sizeof(*map), GFP_KERNEL); |
310 | if (!map) { |
311 | ret = -ENOMEM; |
312 | goto err_map; |
313 | } |
314 | kobject_init(kobj: &map->kobj, ktype: &map_attr_type); |
315 | map->mem = mem; |
316 | mem->map = map; |
317 | ret = kobject_add(kobj: &map->kobj, parent: idev->map_dir, fmt: "map%d" , mi); |
318 | if (ret) |
319 | goto err_map_kobj; |
320 | ret = kobject_uevent(kobj: &map->kobj, action: KOBJ_ADD); |
321 | if (ret) |
322 | goto err_map_kobj; |
323 | } |
324 | |
325 | for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) { |
326 | port = &idev->info->port[pi]; |
327 | if (port->size == 0) |
328 | break; |
329 | if (!portio_found) { |
330 | portio_found = 1; |
331 | idev->portio_dir = kobject_create_and_add(name: "portio" , |
332 | parent: &idev->dev.kobj); |
333 | if (!idev->portio_dir) { |
334 | ret = -ENOMEM; |
335 | goto err_portio; |
336 | } |
337 | } |
338 | portio = kzalloc(size: sizeof(*portio), GFP_KERNEL); |
339 | if (!portio) { |
340 | ret = -ENOMEM; |
341 | goto err_portio; |
342 | } |
343 | kobject_init(kobj: &portio->kobj, ktype: &portio_attr_type); |
344 | portio->port = port; |
345 | port->portio = portio; |
346 | ret = kobject_add(kobj: &portio->kobj, parent: idev->portio_dir, |
347 | fmt: "port%d" , pi); |
348 | if (ret) |
349 | goto err_portio_kobj; |
350 | ret = kobject_uevent(kobj: &portio->kobj, action: KOBJ_ADD); |
351 | if (ret) |
352 | goto err_portio_kobj; |
353 | } |
354 | |
355 | return 0; |
356 | |
357 | err_portio: |
358 | pi--; |
359 | err_portio_kobj: |
360 | for (; pi >= 0; pi--) { |
361 | port = &idev->info->port[pi]; |
362 | portio = port->portio; |
363 | kobject_put(kobj: &portio->kobj); |
364 | } |
365 | kobject_put(kobj: idev->portio_dir); |
366 | err_map: |
367 | mi--; |
368 | err_map_kobj: |
369 | for (; mi >= 0; mi--) { |
370 | mem = &idev->info->mem[mi]; |
371 | map = mem->map; |
372 | kobject_put(kobj: &map->kobj); |
373 | } |
374 | kobject_put(kobj: idev->map_dir); |
375 | dev_err(&idev->dev, "error creating sysfs files (%d)\n" , ret); |
376 | return ret; |
377 | } |
378 | |
379 | static void uio_dev_del_attributes(struct uio_device *idev) |
380 | { |
381 | int i; |
382 | struct uio_mem *mem; |
383 | struct uio_port *port; |
384 | |
385 | for (i = 0; i < MAX_UIO_MAPS; i++) { |
386 | mem = &idev->info->mem[i]; |
387 | if (mem->size == 0) |
388 | break; |
389 | kobject_put(kobj: &mem->map->kobj); |
390 | } |
391 | kobject_put(kobj: idev->map_dir); |
392 | |
393 | for (i = 0; i < MAX_UIO_PORT_REGIONS; i++) { |
394 | port = &idev->info->port[i]; |
395 | if (port->size == 0) |
396 | break; |
397 | kobject_put(kobj: &port->portio->kobj); |
398 | } |
399 | kobject_put(kobj: idev->portio_dir); |
400 | } |
401 | |
402 | static int uio_get_minor(struct uio_device *idev) |
403 | { |
404 | int retval; |
405 | |
406 | mutex_lock(&minor_lock); |
407 | retval = idr_alloc(&uio_idr, ptr: idev, start: 0, UIO_MAX_DEVICES, GFP_KERNEL); |
408 | if (retval >= 0) { |
409 | idev->minor = retval; |
410 | retval = 0; |
411 | } else if (retval == -ENOSPC) { |
412 | dev_err(&idev->dev, "too many uio devices\n" ); |
413 | retval = -EINVAL; |
414 | } |
415 | mutex_unlock(lock: &minor_lock); |
416 | return retval; |
417 | } |
418 | |
419 | static void uio_free_minor(unsigned long minor) |
420 | { |
421 | mutex_lock(&minor_lock); |
422 | idr_remove(&uio_idr, id: minor); |
423 | mutex_unlock(lock: &minor_lock); |
424 | } |
425 | |
426 | /** |
427 | * uio_event_notify - trigger an interrupt event |
428 | * @info: UIO device capabilities |
429 | */ |
430 | void uio_event_notify(struct uio_info *info) |
431 | { |
432 | struct uio_device *idev = info->uio_dev; |
433 | |
434 | atomic_inc(v: &idev->event); |
435 | wake_up_interruptible(&idev->wait); |
436 | kill_fasync(&idev->async_queue, SIGIO, POLL_IN); |
437 | } |
438 | EXPORT_SYMBOL_GPL(uio_event_notify); |
439 | |
440 | /** |
441 | * uio_interrupt - hardware interrupt handler |
442 | * @irq: IRQ number, can be UIO_IRQ_CYCLIC for cyclic timer |
443 | * @dev_id: Pointer to the devices uio_device structure |
444 | */ |
445 | static irqreturn_t uio_interrupt(int irq, void *dev_id) |
446 | { |
447 | struct uio_device *idev = (struct uio_device *)dev_id; |
448 | irqreturn_t ret; |
449 | |
450 | ret = idev->info->handler(irq, idev->info); |
451 | if (ret == IRQ_HANDLED) |
452 | uio_event_notify(idev->info); |
453 | |
454 | return ret; |
455 | } |
456 | |
457 | struct uio_listener { |
458 | struct uio_device *dev; |
459 | s32 event_count; |
460 | }; |
461 | |
462 | static int uio_open(struct inode *inode, struct file *filep) |
463 | { |
464 | struct uio_device *idev; |
465 | struct uio_listener *listener; |
466 | int ret = 0; |
467 | |
468 | mutex_lock(&minor_lock); |
469 | idev = idr_find(&uio_idr, id: iminor(inode)); |
470 | if (!idev) { |
471 | ret = -ENODEV; |
472 | mutex_unlock(lock: &minor_lock); |
473 | goto out; |
474 | } |
475 | get_device(dev: &idev->dev); |
476 | mutex_unlock(lock: &minor_lock); |
477 | |
478 | if (!try_module_get(module: idev->owner)) { |
479 | ret = -ENODEV; |
480 | goto err_module_get; |
481 | } |
482 | |
483 | listener = kmalloc(size: sizeof(*listener), GFP_KERNEL); |
484 | if (!listener) { |
485 | ret = -ENOMEM; |
486 | goto err_alloc_listener; |
487 | } |
488 | |
489 | listener->dev = idev; |
490 | listener->event_count = atomic_read(v: &idev->event); |
491 | filep->private_data = listener; |
492 | |
493 | mutex_lock(&idev->info_lock); |
494 | if (!idev->info) { |
495 | mutex_unlock(lock: &idev->info_lock); |
496 | ret = -EINVAL; |
497 | goto err_infoopen; |
498 | } |
499 | |
500 | if (idev->info->open) |
501 | ret = idev->info->open(idev->info, inode); |
502 | mutex_unlock(lock: &idev->info_lock); |
503 | if (ret) |
504 | goto err_infoopen; |
505 | |
506 | return 0; |
507 | |
508 | err_infoopen: |
509 | kfree(objp: listener); |
510 | |
511 | err_alloc_listener: |
512 | module_put(module: idev->owner); |
513 | |
514 | err_module_get: |
515 | put_device(dev: &idev->dev); |
516 | |
517 | out: |
518 | return ret; |
519 | } |
520 | |
521 | static int uio_fasync(int fd, struct file *filep, int on) |
522 | { |
523 | struct uio_listener *listener = filep->private_data; |
524 | struct uio_device *idev = listener->dev; |
525 | |
526 | return fasync_helper(fd, filep, on, &idev->async_queue); |
527 | } |
528 | |
529 | static int uio_release(struct inode *inode, struct file *filep) |
530 | { |
531 | int ret = 0; |
532 | struct uio_listener *listener = filep->private_data; |
533 | struct uio_device *idev = listener->dev; |
534 | |
535 | mutex_lock(&idev->info_lock); |
536 | if (idev->info && idev->info->release) |
537 | ret = idev->info->release(idev->info, inode); |
538 | mutex_unlock(lock: &idev->info_lock); |
539 | |
540 | module_put(module: idev->owner); |
541 | kfree(objp: listener); |
542 | put_device(dev: &idev->dev); |
543 | return ret; |
544 | } |
545 | |
546 | static __poll_t uio_poll(struct file *filep, poll_table *wait) |
547 | { |
548 | struct uio_listener *listener = filep->private_data; |
549 | struct uio_device *idev = listener->dev; |
550 | __poll_t ret = 0; |
551 | |
552 | mutex_lock(&idev->info_lock); |
553 | if (!idev->info || !idev->info->irq) |
554 | ret = -EIO; |
555 | mutex_unlock(lock: &idev->info_lock); |
556 | |
557 | if (ret) |
558 | return ret; |
559 | |
560 | poll_wait(filp: filep, wait_address: &idev->wait, p: wait); |
561 | if (listener->event_count != atomic_read(v: &idev->event)) |
562 | return EPOLLIN | EPOLLRDNORM; |
563 | return 0; |
564 | } |
565 | |
566 | static ssize_t uio_read(struct file *filep, char __user *buf, |
567 | size_t count, loff_t *ppos) |
568 | { |
569 | struct uio_listener *listener = filep->private_data; |
570 | struct uio_device *idev = listener->dev; |
571 | DECLARE_WAITQUEUE(wait, current); |
572 | ssize_t retval = 0; |
573 | s32 event_count; |
574 | |
575 | if (count != sizeof(s32)) |
576 | return -EINVAL; |
577 | |
578 | add_wait_queue(wq_head: &idev->wait, wq_entry: &wait); |
579 | |
580 | do { |
581 | mutex_lock(&idev->info_lock); |
582 | if (!idev->info || !idev->info->irq) { |
583 | retval = -EIO; |
584 | mutex_unlock(lock: &idev->info_lock); |
585 | break; |
586 | } |
587 | mutex_unlock(lock: &idev->info_lock); |
588 | |
589 | set_current_state(TASK_INTERRUPTIBLE); |
590 | |
591 | event_count = atomic_read(v: &idev->event); |
592 | if (event_count != listener->event_count) { |
593 | __set_current_state(TASK_RUNNING); |
594 | if (copy_to_user(to: buf, from: &event_count, n: count)) |
595 | retval = -EFAULT; |
596 | else { |
597 | listener->event_count = event_count; |
598 | retval = count; |
599 | } |
600 | break; |
601 | } |
602 | |
603 | if (filep->f_flags & O_NONBLOCK) { |
604 | retval = -EAGAIN; |
605 | break; |
606 | } |
607 | |
608 | if (signal_pending(current)) { |
609 | retval = -ERESTARTSYS; |
610 | break; |
611 | } |
612 | schedule(); |
613 | } while (1); |
614 | |
615 | __set_current_state(TASK_RUNNING); |
616 | remove_wait_queue(wq_head: &idev->wait, wq_entry: &wait); |
617 | |
618 | return retval; |
619 | } |
620 | |
621 | static ssize_t uio_write(struct file *filep, const char __user *buf, |
622 | size_t count, loff_t *ppos) |
623 | { |
624 | struct uio_listener *listener = filep->private_data; |
625 | struct uio_device *idev = listener->dev; |
626 | ssize_t retval; |
627 | s32 irq_on; |
628 | |
629 | if (count != sizeof(s32)) |
630 | return -EINVAL; |
631 | |
632 | if (copy_from_user(to: &irq_on, from: buf, n: count)) |
633 | return -EFAULT; |
634 | |
635 | mutex_lock(&idev->info_lock); |
636 | if (!idev->info) { |
637 | retval = -EINVAL; |
638 | goto out; |
639 | } |
640 | |
641 | if (!idev->info->irq) { |
642 | retval = -EIO; |
643 | goto out; |
644 | } |
645 | |
646 | if (!idev->info->irqcontrol) { |
647 | retval = -ENOSYS; |
648 | goto out; |
649 | } |
650 | |
651 | retval = idev->info->irqcontrol(idev->info, irq_on); |
652 | |
653 | out: |
654 | mutex_unlock(lock: &idev->info_lock); |
655 | return retval ? retval : sizeof(s32); |
656 | } |
657 | |
658 | static int uio_find_mem_index(struct vm_area_struct *vma) |
659 | { |
660 | struct uio_device *idev = vma->vm_private_data; |
661 | |
662 | if (vma->vm_pgoff < MAX_UIO_MAPS) { |
663 | if (idev->info->mem[vma->vm_pgoff].size == 0) |
664 | return -1; |
665 | return (int)vma->vm_pgoff; |
666 | } |
667 | return -1; |
668 | } |
669 | |
670 | static vm_fault_t uio_vma_fault(struct vm_fault *vmf) |
671 | { |
672 | struct uio_device *idev = vmf->vma->vm_private_data; |
673 | struct page *page; |
674 | unsigned long offset; |
675 | void *addr; |
676 | vm_fault_t ret = 0; |
677 | int mi; |
678 | |
679 | mutex_lock(&idev->info_lock); |
680 | if (!idev->info) { |
681 | ret = VM_FAULT_SIGBUS; |
682 | goto out; |
683 | } |
684 | |
685 | mi = uio_find_mem_index(vma: vmf->vma); |
686 | if (mi < 0) { |
687 | ret = VM_FAULT_SIGBUS; |
688 | goto out; |
689 | } |
690 | |
691 | /* |
692 | * We need to subtract mi because userspace uses offset = N*PAGE_SIZE |
693 | * to use mem[N]. |
694 | */ |
695 | offset = (vmf->pgoff - mi) << PAGE_SHIFT; |
696 | |
697 | addr = (void *)(unsigned long)idev->info->mem[mi].addr + offset; |
698 | if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL) |
699 | page = virt_to_page(addr); |
700 | else |
701 | page = vmalloc_to_page(addr); |
702 | get_page(page); |
703 | vmf->page = page; |
704 | |
705 | out: |
706 | mutex_unlock(lock: &idev->info_lock); |
707 | |
708 | return ret; |
709 | } |
710 | |
711 | static const struct vm_operations_struct uio_logical_vm_ops = { |
712 | .fault = uio_vma_fault, |
713 | }; |
714 | |
715 | static int uio_mmap_logical(struct vm_area_struct *vma) |
716 | { |
717 | vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); |
718 | vma->vm_ops = &uio_logical_vm_ops; |
719 | return 0; |
720 | } |
721 | |
722 | static const struct vm_operations_struct uio_physical_vm_ops = { |
723 | #ifdef CONFIG_HAVE_IOREMAP_PROT |
724 | .access = generic_access_phys, |
725 | #endif |
726 | }; |
727 | |
728 | static int uio_mmap_physical(struct vm_area_struct *vma) |
729 | { |
730 | struct uio_device *idev = vma->vm_private_data; |
731 | int mi = uio_find_mem_index(vma); |
732 | struct uio_mem *mem; |
733 | |
734 | if (mi < 0) |
735 | return -EINVAL; |
736 | mem = idev->info->mem + mi; |
737 | |
738 | if (mem->addr & ~PAGE_MASK) |
739 | return -ENODEV; |
740 | if (vma->vm_end - vma->vm_start > mem->size) |
741 | return -EINVAL; |
742 | |
743 | vma->vm_ops = &uio_physical_vm_ops; |
744 | if (idev->info->mem[mi].memtype == UIO_MEM_PHYS) |
745 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
746 | |
747 | /* |
748 | * We cannot use the vm_iomap_memory() helper here, |
749 | * because vma->vm_pgoff is the map index we looked |
750 | * up above in uio_find_mem_index(), rather than an |
751 | * actual page offset into the mmap. |
752 | * |
753 | * So we just do the physical mmap without a page |
754 | * offset. |
755 | */ |
756 | return remap_pfn_range(vma, |
757 | addr: vma->vm_start, |
758 | pfn: mem->addr >> PAGE_SHIFT, |
759 | size: vma->vm_end - vma->vm_start, |
760 | vma->vm_page_prot); |
761 | } |
762 | |
763 | static int uio_mmap_dma_coherent(struct vm_area_struct *vma) |
764 | { |
765 | struct uio_device *idev = vma->vm_private_data; |
766 | struct uio_mem *mem; |
767 | void *addr; |
768 | int ret = 0; |
769 | int mi; |
770 | |
771 | mi = uio_find_mem_index(vma); |
772 | if (mi < 0) |
773 | return -EINVAL; |
774 | |
775 | mem = idev->info->mem + mi; |
776 | |
777 | if (mem->addr & ~PAGE_MASK) |
778 | return -ENODEV; |
779 | if (mem->dma_addr & ~PAGE_MASK) |
780 | return -ENODEV; |
781 | if (!mem->dma_device) |
782 | return -ENODEV; |
783 | if (vma->vm_end - vma->vm_start > mem->size) |
784 | return -EINVAL; |
785 | |
786 | dev_warn(mem->dma_device, |
787 | "use of UIO_MEM_DMA_COHERENT is highly discouraged" ); |
788 | |
789 | /* |
790 | * UIO uses offset to index into the maps for a device. |
791 | * We need to clear vm_pgoff for dma_mmap_coherent. |
792 | */ |
793 | vma->vm_pgoff = 0; |
794 | |
795 | addr = (void *)(uintptr_t)mem->addr; |
796 | ret = dma_mmap_coherent(mem->dma_device, |
797 | vma, |
798 | addr, |
799 | mem->dma_addr, |
800 | vma->vm_end - vma->vm_start); |
801 | vma->vm_pgoff = mi; |
802 | |
803 | return ret; |
804 | } |
805 | |
806 | static int uio_mmap(struct file *filep, struct vm_area_struct *vma) |
807 | { |
808 | struct uio_listener *listener = filep->private_data; |
809 | struct uio_device *idev = listener->dev; |
810 | int mi; |
811 | unsigned long requested_pages, actual_pages; |
812 | int ret = 0; |
813 | |
814 | if (vma->vm_end < vma->vm_start) |
815 | return -EINVAL; |
816 | |
817 | vma->vm_private_data = idev; |
818 | |
819 | mutex_lock(&idev->info_lock); |
820 | if (!idev->info) { |
821 | ret = -EINVAL; |
822 | goto out; |
823 | } |
824 | |
825 | mi = uio_find_mem_index(vma); |
826 | if (mi < 0) { |
827 | ret = -EINVAL; |
828 | goto out; |
829 | } |
830 | |
831 | requested_pages = vma_pages(vma); |
832 | actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK) |
833 | + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; |
834 | if (requested_pages > actual_pages) { |
835 | ret = -EINVAL; |
836 | goto out; |
837 | } |
838 | |
839 | if (idev->info->mmap) { |
840 | ret = idev->info->mmap(idev->info, vma); |
841 | goto out; |
842 | } |
843 | |
844 | switch (idev->info->mem[mi].memtype) { |
845 | case UIO_MEM_IOVA: |
846 | case UIO_MEM_PHYS: |
847 | ret = uio_mmap_physical(vma); |
848 | break; |
849 | case UIO_MEM_LOGICAL: |
850 | case UIO_MEM_VIRTUAL: |
851 | ret = uio_mmap_logical(vma); |
852 | break; |
853 | case UIO_MEM_DMA_COHERENT: |
854 | ret = uio_mmap_dma_coherent(vma); |
855 | break; |
856 | default: |
857 | ret = -EINVAL; |
858 | } |
859 | |
860 | out: |
861 | mutex_unlock(lock: &idev->info_lock); |
862 | return ret; |
863 | } |
864 | |
865 | static const struct file_operations uio_fops = { |
866 | .owner = THIS_MODULE, |
867 | .open = uio_open, |
868 | .release = uio_release, |
869 | .read = uio_read, |
870 | .write = uio_write, |
871 | .mmap = uio_mmap, |
872 | .poll = uio_poll, |
873 | .fasync = uio_fasync, |
874 | .llseek = noop_llseek, |
875 | }; |
876 | |
877 | static int uio_major_init(void) |
878 | { |
879 | static const char name[] = "uio" ; |
880 | struct cdev *cdev = NULL; |
881 | dev_t uio_dev = 0; |
882 | int result; |
883 | |
884 | result = alloc_chrdev_region(&uio_dev, 0, UIO_MAX_DEVICES, name); |
885 | if (result) |
886 | goto out; |
887 | |
888 | result = -ENOMEM; |
889 | cdev = cdev_alloc(); |
890 | if (!cdev) |
891 | goto out_unregister; |
892 | |
893 | cdev->owner = THIS_MODULE; |
894 | cdev->ops = &uio_fops; |
895 | kobject_set_name(kobj: &cdev->kobj, name: "%s" , name); |
896 | |
897 | result = cdev_add(cdev, uio_dev, UIO_MAX_DEVICES); |
898 | if (result) |
899 | goto out_put; |
900 | |
901 | uio_major = MAJOR(uio_dev); |
902 | uio_cdev = cdev; |
903 | return 0; |
904 | out_put: |
905 | kobject_put(kobj: &cdev->kobj); |
906 | out_unregister: |
907 | unregister_chrdev_region(uio_dev, UIO_MAX_DEVICES); |
908 | out: |
909 | return result; |
910 | } |
911 | |
912 | static void uio_major_cleanup(void) |
913 | { |
914 | unregister_chrdev_region(MKDEV(uio_major, 0), UIO_MAX_DEVICES); |
915 | cdev_del(uio_cdev); |
916 | } |
917 | |
918 | static int init_uio_class(void) |
919 | { |
920 | int ret; |
921 | |
922 | /* This is the first time in here, set everything up properly */ |
923 | ret = uio_major_init(); |
924 | if (ret) |
925 | goto exit; |
926 | |
927 | ret = class_register(class: &uio_class); |
928 | if (ret) { |
929 | printk(KERN_ERR "class_register failed for uio\n" ); |
930 | goto err_class_register; |
931 | } |
932 | |
933 | uio_class_registered = true; |
934 | |
935 | return 0; |
936 | |
937 | err_class_register: |
938 | uio_major_cleanup(); |
939 | exit: |
940 | return ret; |
941 | } |
942 | |
943 | static void release_uio_class(void) |
944 | { |
945 | uio_class_registered = false; |
946 | class_unregister(class: &uio_class); |
947 | uio_major_cleanup(); |
948 | } |
949 | |
950 | static void uio_device_release(struct device *dev) |
951 | { |
952 | struct uio_device *idev = dev_get_drvdata(dev); |
953 | |
954 | kfree(objp: idev); |
955 | } |
956 | |
957 | /** |
958 | * __uio_register_device - register a new userspace IO device |
959 | * @owner: module that creates the new device |
960 | * @parent: parent device |
961 | * @info: UIO device capabilities |
962 | * |
963 | * returns zero on success or a negative error code. |
964 | */ |
965 | int __uio_register_device(struct module *owner, |
966 | struct device *parent, |
967 | struct uio_info *info) |
968 | { |
969 | struct uio_device *idev; |
970 | int ret = 0; |
971 | |
972 | if (!uio_class_registered) |
973 | return -EPROBE_DEFER; |
974 | |
975 | if (!parent || !info || !info->name || !info->version) |
976 | return -EINVAL; |
977 | |
978 | info->uio_dev = NULL; |
979 | |
980 | idev = kzalloc(size: sizeof(*idev), GFP_KERNEL); |
981 | if (!idev) { |
982 | return -ENOMEM; |
983 | } |
984 | |
985 | idev->owner = owner; |
986 | idev->info = info; |
987 | mutex_init(&idev->info_lock); |
988 | init_waitqueue_head(&idev->wait); |
989 | atomic_set(v: &idev->event, i: 0); |
990 | |
991 | ret = uio_get_minor(idev); |
992 | if (ret) { |
993 | kfree(objp: idev); |
994 | return ret; |
995 | } |
996 | |
997 | device_initialize(dev: &idev->dev); |
998 | idev->dev.devt = MKDEV(uio_major, idev->minor); |
999 | idev->dev.class = &uio_class; |
1000 | idev->dev.parent = parent; |
1001 | idev->dev.release = uio_device_release; |
1002 | dev_set_drvdata(dev: &idev->dev, data: idev); |
1003 | |
1004 | ret = dev_set_name(dev: &idev->dev, name: "uio%d" , idev->minor); |
1005 | if (ret) |
1006 | goto err_device_create; |
1007 | |
1008 | ret = device_add(dev: &idev->dev); |
1009 | if (ret) |
1010 | goto err_device_create; |
1011 | |
1012 | ret = uio_dev_add_attributes(idev); |
1013 | if (ret) |
1014 | goto err_uio_dev_add_attributes; |
1015 | |
1016 | info->uio_dev = idev; |
1017 | |
1018 | if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) { |
1019 | /* |
1020 | * Note that we deliberately don't use devm_request_irq |
1021 | * here. The parent module can unregister the UIO device |
1022 | * and call pci_disable_msi, which requires that this |
1023 | * irq has been freed. However, the device may have open |
1024 | * FDs at the time of unregister and therefore may not be |
1025 | * freed until they are released. |
1026 | */ |
1027 | ret = request_irq(irq: info->irq, handler: uio_interrupt, |
1028 | flags: info->irq_flags, name: info->name, dev: idev); |
1029 | if (ret) { |
1030 | info->uio_dev = NULL; |
1031 | goto err_request_irq; |
1032 | } |
1033 | } |
1034 | |
1035 | return 0; |
1036 | |
1037 | err_request_irq: |
1038 | uio_dev_del_attributes(idev); |
1039 | err_uio_dev_add_attributes: |
1040 | device_del(dev: &idev->dev); |
1041 | err_device_create: |
1042 | uio_free_minor(minor: idev->minor); |
1043 | put_device(dev: &idev->dev); |
1044 | return ret; |
1045 | } |
1046 | EXPORT_SYMBOL_GPL(__uio_register_device); |
1047 | |
1048 | static void devm_uio_unregister_device(struct device *dev, void *res) |
1049 | { |
1050 | uio_unregister_device(info: *(struct uio_info **)res); |
1051 | } |
1052 | |
1053 | /** |
1054 | * __devm_uio_register_device - Resource managed uio_register_device() |
1055 | * @owner: module that creates the new device |
1056 | * @parent: parent device |
1057 | * @info: UIO device capabilities |
1058 | * |
1059 | * returns zero on success or a negative error code. |
1060 | */ |
1061 | int __devm_uio_register_device(struct module *owner, |
1062 | struct device *parent, |
1063 | struct uio_info *info) |
1064 | { |
1065 | struct uio_info **ptr; |
1066 | int ret; |
1067 | |
1068 | ptr = devres_alloc(devm_uio_unregister_device, sizeof(*ptr), |
1069 | GFP_KERNEL); |
1070 | if (!ptr) |
1071 | return -ENOMEM; |
1072 | |
1073 | *ptr = info; |
1074 | ret = __uio_register_device(owner, parent, info); |
1075 | if (ret) { |
1076 | devres_free(res: ptr); |
1077 | return ret; |
1078 | } |
1079 | |
1080 | devres_add(dev: parent, res: ptr); |
1081 | |
1082 | return 0; |
1083 | } |
1084 | EXPORT_SYMBOL_GPL(__devm_uio_register_device); |
1085 | |
1086 | /** |
1087 | * uio_unregister_device - unregister a industrial IO device |
1088 | * @info: UIO device capabilities |
1089 | * |
1090 | */ |
1091 | void uio_unregister_device(struct uio_info *info) |
1092 | { |
1093 | struct uio_device *idev; |
1094 | unsigned long minor; |
1095 | |
1096 | if (!info || !info->uio_dev) |
1097 | return; |
1098 | |
1099 | idev = info->uio_dev; |
1100 | minor = idev->minor; |
1101 | |
1102 | mutex_lock(&idev->info_lock); |
1103 | uio_dev_del_attributes(idev); |
1104 | |
1105 | if (info->irq && info->irq != UIO_IRQ_CUSTOM) |
1106 | free_irq(info->irq, idev); |
1107 | |
1108 | idev->info = NULL; |
1109 | mutex_unlock(lock: &idev->info_lock); |
1110 | |
1111 | wake_up_interruptible(&idev->wait); |
1112 | kill_fasync(&idev->async_queue, SIGIO, POLL_HUP); |
1113 | |
1114 | uio_free_minor(minor); |
1115 | device_unregister(dev: &idev->dev); |
1116 | |
1117 | return; |
1118 | } |
1119 | EXPORT_SYMBOL_GPL(uio_unregister_device); |
1120 | |
1121 | static int __init uio_init(void) |
1122 | { |
1123 | return init_uio_class(); |
1124 | } |
1125 | |
1126 | static void __exit uio_exit(void) |
1127 | { |
1128 | release_uio_class(); |
1129 | idr_destroy(&uio_idr); |
1130 | } |
1131 | |
1132 | module_init(uio_init) |
1133 | module_exit(uio_exit) |
1134 | MODULE_LICENSE("GPL v2" ); |
1135 | |