1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Framework for buffer objects that can be shared across devices/subsystems. |
4 | * |
5 | * Copyright(C) 2011 Linaro Limited. All rights reserved. |
6 | * Author: Sumit Semwal <sumit.semwal@ti.com> |
7 | * |
8 | * Many thanks to linaro-mm-sig list, and specially |
9 | * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and |
10 | * Daniel Vetter <daniel@ffwll.ch> for their support in creation and |
11 | * refining of this idea. |
12 | */ |
13 | |
14 | #include <linux/fs.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/dma-buf.h> |
17 | #include <linux/dma-fence.h> |
18 | #include <linux/dma-fence-unwrap.h> |
19 | #include <linux/anon_inodes.h> |
20 | #include <linux/export.h> |
21 | #include <linux/debugfs.h> |
22 | #include <linux/module.h> |
23 | #include <linux/seq_file.h> |
24 | #include <linux/sync_file.h> |
25 | #include <linux/poll.h> |
26 | #include <linux/dma-resv.h> |
27 | #include <linux/mm.h> |
28 | #include <linux/mount.h> |
29 | #include <linux/pseudo_fs.h> |
30 | |
31 | #include <uapi/linux/dma-buf.h> |
32 | #include <uapi/linux/magic.h> |
33 | |
34 | #include "dma-buf-sysfs-stats.h" |
35 | |
36 | static inline int is_dma_buf_file(struct file *); |
37 | |
38 | struct dma_buf_list { |
39 | struct list_head head; |
40 | struct mutex lock; |
41 | }; |
42 | |
43 | static struct dma_buf_list db_list; |
44 | |
45 | static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) |
46 | { |
47 | struct dma_buf *dmabuf; |
48 | char name[DMA_BUF_NAME_LEN]; |
49 | size_t ret = 0; |
50 | |
51 | dmabuf = dentry->d_fsdata; |
52 | spin_lock(lock: &dmabuf->name_lock); |
53 | if (dmabuf->name) |
54 | ret = strlcpy(p: name, q: dmabuf->name, DMA_BUF_NAME_LEN); |
55 | spin_unlock(lock: &dmabuf->name_lock); |
56 | |
57 | return dynamic_dname(buffer, buflen, "/%s:%s" , |
58 | dentry->d_name.name, ret > 0 ? name : "" ); |
59 | } |
60 | |
61 | static void dma_buf_release(struct dentry *dentry) |
62 | { |
63 | struct dma_buf *dmabuf; |
64 | |
65 | dmabuf = dentry->d_fsdata; |
66 | if (unlikely(!dmabuf)) |
67 | return; |
68 | |
69 | BUG_ON(dmabuf->vmapping_counter); |
70 | |
71 | /* |
72 | * If you hit this BUG() it could mean: |
73 | * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else |
74 | * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback |
75 | */ |
76 | BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active); |
77 | |
78 | dma_buf_stats_teardown(dmabuf); |
79 | dmabuf->ops->release(dmabuf); |
80 | |
81 | if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) |
82 | dma_resv_fini(obj: dmabuf->resv); |
83 | |
84 | WARN_ON(!list_empty(&dmabuf->attachments)); |
85 | module_put(module: dmabuf->owner); |
86 | kfree(objp: dmabuf->name); |
87 | kfree(objp: dmabuf); |
88 | } |
89 | |
90 | static int dma_buf_file_release(struct inode *inode, struct file *file) |
91 | { |
92 | struct dma_buf *dmabuf; |
93 | |
94 | if (!is_dma_buf_file(file)) |
95 | return -EINVAL; |
96 | |
97 | dmabuf = file->private_data; |
98 | if (dmabuf) { |
99 | mutex_lock(&db_list.lock); |
100 | list_del(entry: &dmabuf->list_node); |
101 | mutex_unlock(lock: &db_list.lock); |
102 | } |
103 | |
104 | return 0; |
105 | } |
106 | |
107 | static const struct dentry_operations dma_buf_dentry_ops = { |
108 | .d_dname = dmabuffs_dname, |
109 | .d_release = dma_buf_release, |
110 | }; |
111 | |
112 | static struct vfsmount *dma_buf_mnt; |
113 | |
114 | static int dma_buf_fs_init_context(struct fs_context *fc) |
115 | { |
116 | struct pseudo_fs_context *ctx; |
117 | |
118 | ctx = init_pseudo(fc, DMA_BUF_MAGIC); |
119 | if (!ctx) |
120 | return -ENOMEM; |
121 | ctx->dops = &dma_buf_dentry_ops; |
122 | return 0; |
123 | } |
124 | |
125 | static struct file_system_type dma_buf_fs_type = { |
126 | .name = "dmabuf" , |
127 | .init_fs_context = dma_buf_fs_init_context, |
128 | .kill_sb = kill_anon_super, |
129 | }; |
130 | |
131 | static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) |
132 | { |
133 | struct dma_buf *dmabuf; |
134 | |
135 | if (!is_dma_buf_file(file)) |
136 | return -EINVAL; |
137 | |
138 | dmabuf = file->private_data; |
139 | |
140 | /* check if buffer supports mmap */ |
141 | if (!dmabuf->ops->mmap) |
142 | return -EINVAL; |
143 | |
144 | /* check for overflowing the buffer's size */ |
145 | if (vma->vm_pgoff + vma_pages(vma) > |
146 | dmabuf->size >> PAGE_SHIFT) |
147 | return -EINVAL; |
148 | |
149 | return dmabuf->ops->mmap(dmabuf, vma); |
150 | } |
151 | |
152 | static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) |
153 | { |
154 | struct dma_buf *dmabuf; |
155 | loff_t base; |
156 | |
157 | if (!is_dma_buf_file(file)) |
158 | return -EBADF; |
159 | |
160 | dmabuf = file->private_data; |
161 | |
162 | /* only support discovering the end of the buffer, |
163 | but also allow SEEK_SET to maintain the idiomatic |
164 | SEEK_END(0), SEEK_CUR(0) pattern */ |
165 | if (whence == SEEK_END) |
166 | base = dmabuf->size; |
167 | else if (whence == SEEK_SET) |
168 | base = 0; |
169 | else |
170 | return -EINVAL; |
171 | |
172 | if (offset != 0) |
173 | return -EINVAL; |
174 | |
175 | return base + offset; |
176 | } |
177 | |
178 | /** |
179 | * DOC: implicit fence polling |
180 | * |
181 | * To support cross-device and cross-driver synchronization of buffer access |
182 | * implicit fences (represented internally in the kernel with &struct dma_fence) |
183 | * can be attached to a &dma_buf. The glue for that and a few related things are |
184 | * provided in the &dma_resv structure. |
185 | * |
186 | * Userspace can query the state of these implicitly tracked fences using poll() |
187 | * and related system calls: |
188 | * |
189 | * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the |
190 | * most recent write or exclusive fence. |
191 | * |
192 | * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of |
193 | * all attached fences, shared and exclusive ones. |
194 | * |
195 | * Note that this only signals the completion of the respective fences, i.e. the |
196 | * DMA transfers are complete. Cache flushing and any other necessary |
197 | * preparations before CPU access can begin still need to happen. |
198 | * |
199 | * As an alternative to poll(), the set of fences on DMA buffer can be |
200 | * exported as a &sync_file using &dma_buf_sync_file_export. |
201 | */ |
202 | |
203 | static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) |
204 | { |
205 | struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; |
206 | struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll); |
207 | unsigned long flags; |
208 | |
209 | spin_lock_irqsave(&dcb->poll->lock, flags); |
210 | wake_up_locked_poll(dcb->poll, dcb->active); |
211 | dcb->active = 0; |
212 | spin_unlock_irqrestore(lock: &dcb->poll->lock, flags); |
213 | dma_fence_put(fence); |
214 | /* Paired with get_file in dma_buf_poll */ |
215 | fput(dmabuf->file); |
216 | } |
217 | |
218 | static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write, |
219 | struct dma_buf_poll_cb_t *dcb) |
220 | { |
221 | struct dma_resv_iter cursor; |
222 | struct dma_fence *fence; |
223 | int r; |
224 | |
225 | dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write), |
226 | fence) { |
227 | dma_fence_get(fence); |
228 | r = dma_fence_add_callback(fence, cb: &dcb->cb, func: dma_buf_poll_cb); |
229 | if (!r) |
230 | return true; |
231 | dma_fence_put(fence); |
232 | } |
233 | |
234 | return false; |
235 | } |
236 | |
237 | static __poll_t dma_buf_poll(struct file *file, poll_table *poll) |
238 | { |
239 | struct dma_buf *dmabuf; |
240 | struct dma_resv *resv; |
241 | __poll_t events; |
242 | |
243 | dmabuf = file->private_data; |
244 | if (!dmabuf || !dmabuf->resv) |
245 | return EPOLLERR; |
246 | |
247 | resv = dmabuf->resv; |
248 | |
249 | poll_wait(filp: file, wait_address: &dmabuf->poll, p: poll); |
250 | |
251 | events = poll_requested_events(p: poll) & (EPOLLIN | EPOLLOUT); |
252 | if (!events) |
253 | return 0; |
254 | |
255 | dma_resv_lock(obj: resv, NULL); |
256 | |
257 | if (events & EPOLLOUT) { |
258 | struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out; |
259 | |
260 | /* Check that callback isn't busy */ |
261 | spin_lock_irq(lock: &dmabuf->poll.lock); |
262 | if (dcb->active) |
263 | events &= ~EPOLLOUT; |
264 | else |
265 | dcb->active = EPOLLOUT; |
266 | spin_unlock_irq(lock: &dmabuf->poll.lock); |
267 | |
268 | if (events & EPOLLOUT) { |
269 | /* Paired with fput in dma_buf_poll_cb */ |
270 | get_file(f: dmabuf->file); |
271 | |
272 | if (!dma_buf_poll_add_cb(resv, write: true, dcb)) |
273 | /* No callback queued, wake up any other waiters */ |
274 | dma_buf_poll_cb(NULL, cb: &dcb->cb); |
275 | else |
276 | events &= ~EPOLLOUT; |
277 | } |
278 | } |
279 | |
280 | if (events & EPOLLIN) { |
281 | struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in; |
282 | |
283 | /* Check that callback isn't busy */ |
284 | spin_lock_irq(lock: &dmabuf->poll.lock); |
285 | if (dcb->active) |
286 | events &= ~EPOLLIN; |
287 | else |
288 | dcb->active = EPOLLIN; |
289 | spin_unlock_irq(lock: &dmabuf->poll.lock); |
290 | |
291 | if (events & EPOLLIN) { |
292 | /* Paired with fput in dma_buf_poll_cb */ |
293 | get_file(f: dmabuf->file); |
294 | |
295 | if (!dma_buf_poll_add_cb(resv, write: false, dcb)) |
296 | /* No callback queued, wake up any other waiters */ |
297 | dma_buf_poll_cb(NULL, cb: &dcb->cb); |
298 | else |
299 | events &= ~EPOLLIN; |
300 | } |
301 | } |
302 | |
303 | dma_resv_unlock(obj: resv); |
304 | return events; |
305 | } |
306 | |
307 | /** |
308 | * dma_buf_set_name - Set a name to a specific dma_buf to track the usage. |
309 | * It could support changing the name of the dma-buf if the same |
310 | * piece of memory is used for multiple purpose between different devices. |
311 | * |
312 | * @dmabuf: [in] dmabuf buffer that will be renamed. |
313 | * @buf: [in] A piece of userspace memory that contains the name of |
314 | * the dma-buf. |
315 | * |
316 | * Returns 0 on success. If the dma-buf buffer is already attached to |
317 | * devices, return -EBUSY. |
318 | * |
319 | */ |
320 | static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf) |
321 | { |
322 | char *name = strndup_user(buf, DMA_BUF_NAME_LEN); |
323 | |
324 | if (IS_ERR(ptr: name)) |
325 | return PTR_ERR(ptr: name); |
326 | |
327 | spin_lock(lock: &dmabuf->name_lock); |
328 | kfree(objp: dmabuf->name); |
329 | dmabuf->name = name; |
330 | spin_unlock(lock: &dmabuf->name_lock); |
331 | |
332 | return 0; |
333 | } |
334 | |
335 | #if IS_ENABLED(CONFIG_SYNC_FILE) |
336 | static long dma_buf_export_sync_file(struct dma_buf *dmabuf, |
337 | void __user *user_data) |
338 | { |
339 | struct dma_buf_export_sync_file arg; |
340 | enum dma_resv_usage usage; |
341 | struct dma_fence *fence = NULL; |
342 | struct sync_file *sync_file; |
343 | int fd, ret; |
344 | |
345 | if (copy_from_user(to: &arg, from: user_data, n: sizeof(arg))) |
346 | return -EFAULT; |
347 | |
348 | if (arg.flags & ~DMA_BUF_SYNC_RW) |
349 | return -EINVAL; |
350 | |
351 | if ((arg.flags & DMA_BUF_SYNC_RW) == 0) |
352 | return -EINVAL; |
353 | |
354 | fd = get_unused_fd_flags(O_CLOEXEC); |
355 | if (fd < 0) |
356 | return fd; |
357 | |
358 | usage = dma_resv_usage_rw(write: arg.flags & DMA_BUF_SYNC_WRITE); |
359 | ret = dma_resv_get_singleton(obj: dmabuf->resv, usage, fence: &fence); |
360 | if (ret) |
361 | goto err_put_fd; |
362 | |
363 | if (!fence) |
364 | fence = dma_fence_get_stub(); |
365 | |
366 | sync_file = sync_file_create(fence); |
367 | |
368 | dma_fence_put(fence); |
369 | |
370 | if (!sync_file) { |
371 | ret = -ENOMEM; |
372 | goto err_put_fd; |
373 | } |
374 | |
375 | arg.fd = fd; |
376 | if (copy_to_user(to: user_data, from: &arg, n: sizeof(arg))) { |
377 | ret = -EFAULT; |
378 | goto err_put_file; |
379 | } |
380 | |
381 | fd_install(fd, file: sync_file->file); |
382 | |
383 | return 0; |
384 | |
385 | err_put_file: |
386 | fput(sync_file->file); |
387 | err_put_fd: |
388 | put_unused_fd(fd); |
389 | return ret; |
390 | } |
391 | |
392 | static long dma_buf_import_sync_file(struct dma_buf *dmabuf, |
393 | const void __user *user_data) |
394 | { |
395 | struct dma_buf_import_sync_file arg; |
396 | struct dma_fence *fence, *f; |
397 | enum dma_resv_usage usage; |
398 | struct dma_fence_unwrap iter; |
399 | unsigned int num_fences; |
400 | int ret = 0; |
401 | |
402 | if (copy_from_user(to: &arg, from: user_data, n: sizeof(arg))) |
403 | return -EFAULT; |
404 | |
405 | if (arg.flags & ~DMA_BUF_SYNC_RW) |
406 | return -EINVAL; |
407 | |
408 | if ((arg.flags & DMA_BUF_SYNC_RW) == 0) |
409 | return -EINVAL; |
410 | |
411 | fence = sync_file_get_fence(fd: arg.fd); |
412 | if (!fence) |
413 | return -EINVAL; |
414 | |
415 | usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE : |
416 | DMA_RESV_USAGE_READ; |
417 | |
418 | num_fences = 0; |
419 | dma_fence_unwrap_for_each(f, &iter, fence) |
420 | ++num_fences; |
421 | |
422 | if (num_fences > 0) { |
423 | dma_resv_lock(obj: dmabuf->resv, NULL); |
424 | |
425 | ret = dma_resv_reserve_fences(obj: dmabuf->resv, num_fences); |
426 | if (!ret) { |
427 | dma_fence_unwrap_for_each(f, &iter, fence) |
428 | dma_resv_add_fence(obj: dmabuf->resv, fence: f, usage); |
429 | } |
430 | |
431 | dma_resv_unlock(obj: dmabuf->resv); |
432 | } |
433 | |
434 | dma_fence_put(fence); |
435 | |
436 | return ret; |
437 | } |
438 | #endif |
439 | |
440 | static long dma_buf_ioctl(struct file *file, |
441 | unsigned int cmd, unsigned long arg) |
442 | { |
443 | struct dma_buf *dmabuf; |
444 | struct dma_buf_sync sync; |
445 | enum dma_data_direction direction; |
446 | int ret; |
447 | |
448 | dmabuf = file->private_data; |
449 | |
450 | switch (cmd) { |
451 | case DMA_BUF_IOCTL_SYNC: |
452 | if (copy_from_user(to: &sync, from: (void __user *) arg, n: sizeof(sync))) |
453 | return -EFAULT; |
454 | |
455 | if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK) |
456 | return -EINVAL; |
457 | |
458 | switch (sync.flags & DMA_BUF_SYNC_RW) { |
459 | case DMA_BUF_SYNC_READ: |
460 | direction = DMA_FROM_DEVICE; |
461 | break; |
462 | case DMA_BUF_SYNC_WRITE: |
463 | direction = DMA_TO_DEVICE; |
464 | break; |
465 | case DMA_BUF_SYNC_RW: |
466 | direction = DMA_BIDIRECTIONAL; |
467 | break; |
468 | default: |
469 | return -EINVAL; |
470 | } |
471 | |
472 | if (sync.flags & DMA_BUF_SYNC_END) |
473 | ret = dma_buf_end_cpu_access(dma_buf: dmabuf, dir: direction); |
474 | else |
475 | ret = dma_buf_begin_cpu_access(dma_buf: dmabuf, dir: direction); |
476 | |
477 | return ret; |
478 | |
479 | case DMA_BUF_SET_NAME_A: |
480 | case DMA_BUF_SET_NAME_B: |
481 | return dma_buf_set_name(dmabuf, buf: (const char __user *)arg); |
482 | |
483 | #if IS_ENABLED(CONFIG_SYNC_FILE) |
484 | case DMA_BUF_IOCTL_EXPORT_SYNC_FILE: |
485 | return dma_buf_export_sync_file(dmabuf, user_data: (void __user *)arg); |
486 | case DMA_BUF_IOCTL_IMPORT_SYNC_FILE: |
487 | return dma_buf_import_sync_file(dmabuf, user_data: (const void __user *)arg); |
488 | #endif |
489 | |
490 | default: |
491 | return -ENOTTY; |
492 | } |
493 | } |
494 | |
495 | static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file) |
496 | { |
497 | struct dma_buf *dmabuf = file->private_data; |
498 | |
499 | seq_printf(m, fmt: "size:\t%zu\n" , dmabuf->size); |
500 | /* Don't count the temporary reference taken inside procfs seq_show */ |
501 | seq_printf(m, fmt: "count:\t%ld\n" , file_count(dmabuf->file) - 1); |
502 | seq_printf(m, fmt: "exp_name:\t%s\n" , dmabuf->exp_name); |
503 | spin_lock(lock: &dmabuf->name_lock); |
504 | if (dmabuf->name) |
505 | seq_printf(m, fmt: "name:\t%s\n" , dmabuf->name); |
506 | spin_unlock(lock: &dmabuf->name_lock); |
507 | } |
508 | |
509 | static const struct file_operations dma_buf_fops = { |
510 | .release = dma_buf_file_release, |
511 | .mmap = dma_buf_mmap_internal, |
512 | .llseek = dma_buf_llseek, |
513 | .poll = dma_buf_poll, |
514 | .unlocked_ioctl = dma_buf_ioctl, |
515 | .compat_ioctl = compat_ptr_ioctl, |
516 | .show_fdinfo = dma_buf_show_fdinfo, |
517 | }; |
518 | |
519 | /* |
520 | * is_dma_buf_file - Check if struct file* is associated with dma_buf |
521 | */ |
522 | static inline int is_dma_buf_file(struct file *file) |
523 | { |
524 | return file->f_op == &dma_buf_fops; |
525 | } |
526 | |
527 | static struct file *dma_buf_getfile(size_t size, int flags) |
528 | { |
529 | static atomic64_t dmabuf_inode = ATOMIC64_INIT(0); |
530 | struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb); |
531 | struct file *file; |
532 | |
533 | if (IS_ERR(ptr: inode)) |
534 | return ERR_CAST(ptr: inode); |
535 | |
536 | inode->i_size = size; |
537 | inode_set_bytes(inode, bytes: size); |
538 | |
539 | /* |
540 | * The ->i_ino acquired from get_next_ino() is not unique thus |
541 | * not suitable for using it as dentry name by dmabuf stats. |
542 | * Override ->i_ino with the unique and dmabuffs specific |
543 | * value. |
544 | */ |
545 | inode->i_ino = atomic64_add_return(i: 1, v: &dmabuf_inode); |
546 | flags &= O_ACCMODE | O_NONBLOCK; |
547 | file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf" , |
548 | flags, &dma_buf_fops); |
549 | if (IS_ERR(ptr: file)) |
550 | goto err_alloc_file; |
551 | |
552 | return file; |
553 | |
554 | err_alloc_file: |
555 | iput(inode); |
556 | return file; |
557 | } |
558 | |
559 | /** |
560 | * DOC: dma buf device access |
561 | * |
562 | * For device DMA access to a shared DMA buffer the usual sequence of operations |
563 | * is fairly simple: |
564 | * |
565 | * 1. The exporter defines his exporter instance using |
566 | * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private |
567 | * buffer object into a &dma_buf. It then exports that &dma_buf to userspace |
568 | * as a file descriptor by calling dma_buf_fd(). |
569 | * |
570 | * 2. Userspace passes this file-descriptors to all drivers it wants this buffer |
571 | * to share with: First the file descriptor is converted to a &dma_buf using |
572 | * dma_buf_get(). Then the buffer is attached to the device using |
573 | * dma_buf_attach(). |
574 | * |
575 | * Up to this stage the exporter is still free to migrate or reallocate the |
576 | * backing storage. |
577 | * |
578 | * 3. Once the buffer is attached to all devices userspace can initiate DMA |
579 | * access to the shared buffer. In the kernel this is done by calling |
580 | * dma_buf_map_attachment() and dma_buf_unmap_attachment(). |
581 | * |
582 | * 4. Once a driver is done with a shared buffer it needs to call |
583 | * dma_buf_detach() (after cleaning up any mappings) and then release the |
584 | * reference acquired with dma_buf_get() by calling dma_buf_put(). |
585 | * |
586 | * For the detailed semantics exporters are expected to implement see |
587 | * &dma_buf_ops. |
588 | */ |
589 | |
590 | /** |
591 | * dma_buf_export - Creates a new dma_buf, and associates an anon file |
592 | * with this buffer, so it can be exported. |
593 | * Also connect the allocator specific data and ops to the buffer. |
594 | * Additionally, provide a name string for exporter; useful in debugging. |
595 | * |
596 | * @exp_info: [in] holds all the export related information provided |
597 | * by the exporter. see &struct dma_buf_export_info |
598 | * for further details. |
599 | * |
600 | * Returns, on success, a newly created struct dma_buf object, which wraps the |
601 | * supplied private data and operations for struct dma_buf_ops. On either |
602 | * missing ops, or error in allocating struct dma_buf, will return negative |
603 | * error. |
604 | * |
605 | * For most cases the easiest way to create @exp_info is through the |
606 | * %DEFINE_DMA_BUF_EXPORT_INFO macro. |
607 | */ |
608 | struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) |
609 | { |
610 | struct dma_buf *dmabuf; |
611 | struct dma_resv *resv = exp_info->resv; |
612 | struct file *file; |
613 | size_t alloc_size = sizeof(struct dma_buf); |
614 | int ret; |
615 | |
616 | if (WARN_ON(!exp_info->priv || !exp_info->ops |
617 | || !exp_info->ops->map_dma_buf |
618 | || !exp_info->ops->unmap_dma_buf |
619 | || !exp_info->ops->release)) |
620 | return ERR_PTR(error: -EINVAL); |
621 | |
622 | if (WARN_ON(exp_info->ops->cache_sgt_mapping && |
623 | (exp_info->ops->pin || exp_info->ops->unpin))) |
624 | return ERR_PTR(error: -EINVAL); |
625 | |
626 | if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin)) |
627 | return ERR_PTR(error: -EINVAL); |
628 | |
629 | if (!try_module_get(module: exp_info->owner)) |
630 | return ERR_PTR(error: -ENOENT); |
631 | |
632 | file = dma_buf_getfile(size: exp_info->size, flags: exp_info->flags); |
633 | if (IS_ERR(ptr: file)) { |
634 | ret = PTR_ERR(ptr: file); |
635 | goto err_module; |
636 | } |
637 | |
638 | if (!exp_info->resv) |
639 | alloc_size += sizeof(struct dma_resv); |
640 | else |
641 | /* prevent &dma_buf[1] == dma_buf->resv */ |
642 | alloc_size += 1; |
643 | dmabuf = kzalloc(size: alloc_size, GFP_KERNEL); |
644 | if (!dmabuf) { |
645 | ret = -ENOMEM; |
646 | goto err_file; |
647 | } |
648 | |
649 | dmabuf->priv = exp_info->priv; |
650 | dmabuf->ops = exp_info->ops; |
651 | dmabuf->size = exp_info->size; |
652 | dmabuf->exp_name = exp_info->exp_name; |
653 | dmabuf->owner = exp_info->owner; |
654 | spin_lock_init(&dmabuf->name_lock); |
655 | init_waitqueue_head(&dmabuf->poll); |
656 | dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll; |
657 | dmabuf->cb_in.active = dmabuf->cb_out.active = 0; |
658 | INIT_LIST_HEAD(list: &dmabuf->attachments); |
659 | |
660 | if (!resv) { |
661 | dmabuf->resv = (struct dma_resv *)&dmabuf[1]; |
662 | dma_resv_init(obj: dmabuf->resv); |
663 | } else { |
664 | dmabuf->resv = resv; |
665 | } |
666 | |
667 | ret = dma_buf_stats_setup(dmabuf, file); |
668 | if (ret) |
669 | goto err_dmabuf; |
670 | |
671 | file->private_data = dmabuf; |
672 | file->f_path.dentry->d_fsdata = dmabuf; |
673 | dmabuf->file = file; |
674 | |
675 | mutex_lock(&db_list.lock); |
676 | list_add(new: &dmabuf->list_node, head: &db_list.head); |
677 | mutex_unlock(lock: &db_list.lock); |
678 | |
679 | return dmabuf; |
680 | |
681 | err_dmabuf: |
682 | if (!resv) |
683 | dma_resv_fini(obj: dmabuf->resv); |
684 | kfree(objp: dmabuf); |
685 | err_file: |
686 | fput(file); |
687 | err_module: |
688 | module_put(module: exp_info->owner); |
689 | return ERR_PTR(error: ret); |
690 | } |
691 | EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF); |
692 | |
693 | /** |
694 | * dma_buf_fd - returns a file descriptor for the given struct dma_buf |
695 | * @dmabuf: [in] pointer to dma_buf for which fd is required. |
696 | * @flags: [in] flags to give to fd |
697 | * |
698 | * On success, returns an associated 'fd'. Else, returns error. |
699 | */ |
700 | int dma_buf_fd(struct dma_buf *dmabuf, int flags) |
701 | { |
702 | int fd; |
703 | |
704 | if (!dmabuf || !dmabuf->file) |
705 | return -EINVAL; |
706 | |
707 | fd = get_unused_fd_flags(flags); |
708 | if (fd < 0) |
709 | return fd; |
710 | |
711 | fd_install(fd, file: dmabuf->file); |
712 | |
713 | return fd; |
714 | } |
715 | EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF); |
716 | |
717 | /** |
718 | * dma_buf_get - returns the struct dma_buf related to an fd |
719 | * @fd: [in] fd associated with the struct dma_buf to be returned |
720 | * |
721 | * On success, returns the struct dma_buf associated with an fd; uses |
722 | * file's refcounting done by fget to increase refcount. returns ERR_PTR |
723 | * otherwise. |
724 | */ |
725 | struct dma_buf *dma_buf_get(int fd) |
726 | { |
727 | struct file *file; |
728 | |
729 | file = fget(fd); |
730 | |
731 | if (!file) |
732 | return ERR_PTR(error: -EBADF); |
733 | |
734 | if (!is_dma_buf_file(file)) { |
735 | fput(file); |
736 | return ERR_PTR(error: -EINVAL); |
737 | } |
738 | |
739 | return file->private_data; |
740 | } |
741 | EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF); |
742 | |
743 | /** |
744 | * dma_buf_put - decreases refcount of the buffer |
745 | * @dmabuf: [in] buffer to reduce refcount of |
746 | * |
747 | * Uses file's refcounting done implicitly by fput(). |
748 | * |
749 | * If, as a result of this call, the refcount becomes 0, the 'release' file |
750 | * operation related to this fd is called. It calls &dma_buf_ops.release vfunc |
751 | * in turn, and frees the memory allocated for dmabuf when exported. |
752 | */ |
753 | void dma_buf_put(struct dma_buf *dmabuf) |
754 | { |
755 | if (WARN_ON(!dmabuf || !dmabuf->file)) |
756 | return; |
757 | |
758 | fput(dmabuf->file); |
759 | } |
760 | EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF); |
761 | |
762 | static void mangle_sg_table(struct sg_table *sg_table) |
763 | { |
764 | #ifdef CONFIG_DMABUF_DEBUG |
765 | int i; |
766 | struct scatterlist *sg; |
767 | |
768 | /* To catch abuse of the underlying struct page by importers mix |
769 | * up the bits, but take care to preserve the low SG_ bits to |
770 | * not corrupt the sgt. The mixing is undone in __unmap_dma_buf |
771 | * before passing the sgt back to the exporter. */ |
772 | for_each_sgtable_sg(sg_table, sg, i) |
773 | sg->page_link ^= ~0xffUL; |
774 | #endif |
775 | |
776 | } |
777 | static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach, |
778 | enum dma_data_direction direction) |
779 | { |
780 | struct sg_table *sg_table; |
781 | signed long ret; |
782 | |
783 | sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); |
784 | if (IS_ERR_OR_NULL(ptr: sg_table)) |
785 | return sg_table; |
786 | |
787 | if (!dma_buf_attachment_is_dynamic(attach)) { |
788 | ret = dma_resv_wait_timeout(obj: attach->dmabuf->resv, |
789 | usage: DMA_RESV_USAGE_KERNEL, intr: true, |
790 | MAX_SCHEDULE_TIMEOUT); |
791 | if (ret < 0) { |
792 | attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, |
793 | direction); |
794 | return ERR_PTR(error: ret); |
795 | } |
796 | } |
797 | |
798 | mangle_sg_table(sg_table); |
799 | return sg_table; |
800 | } |
801 | |
802 | /** |
803 | * DOC: locking convention |
804 | * |
805 | * In order to avoid deadlock situations between dma-buf exports and importers, |
806 | * all dma-buf API users must follow the common dma-buf locking convention. |
807 | * |
808 | * Convention for importers |
809 | * |
810 | * 1. Importers must hold the dma-buf reservation lock when calling these |
811 | * functions: |
812 | * |
813 | * - dma_buf_pin() |
814 | * - dma_buf_unpin() |
815 | * - dma_buf_map_attachment() |
816 | * - dma_buf_unmap_attachment() |
817 | * - dma_buf_vmap() |
818 | * - dma_buf_vunmap() |
819 | * |
820 | * 2. Importers must not hold the dma-buf reservation lock when calling these |
821 | * functions: |
822 | * |
823 | * - dma_buf_attach() |
824 | * - dma_buf_dynamic_attach() |
825 | * - dma_buf_detach() |
826 | * - dma_buf_export() |
827 | * - dma_buf_fd() |
828 | * - dma_buf_get() |
829 | * - dma_buf_put() |
830 | * - dma_buf_mmap() |
831 | * - dma_buf_begin_cpu_access() |
832 | * - dma_buf_end_cpu_access() |
833 | * - dma_buf_map_attachment_unlocked() |
834 | * - dma_buf_unmap_attachment_unlocked() |
835 | * - dma_buf_vmap_unlocked() |
836 | * - dma_buf_vunmap_unlocked() |
837 | * |
838 | * Convention for exporters |
839 | * |
840 | * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf |
841 | * reservation and exporter can take the lock: |
842 | * |
843 | * - &dma_buf_ops.attach() |
844 | * - &dma_buf_ops.detach() |
845 | * - &dma_buf_ops.release() |
846 | * - &dma_buf_ops.begin_cpu_access() |
847 | * - &dma_buf_ops.end_cpu_access() |
848 | * - &dma_buf_ops.mmap() |
849 | * |
850 | * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf |
851 | * reservation and exporter can't take the lock: |
852 | * |
853 | * - &dma_buf_ops.pin() |
854 | * - &dma_buf_ops.unpin() |
855 | * - &dma_buf_ops.map_dma_buf() |
856 | * - &dma_buf_ops.unmap_dma_buf() |
857 | * - &dma_buf_ops.vmap() |
858 | * - &dma_buf_ops.vunmap() |
859 | * |
860 | * 3. Exporters must hold the dma-buf reservation lock when calling these |
861 | * functions: |
862 | * |
863 | * - dma_buf_move_notify() |
864 | */ |
865 | |
866 | /** |
867 | * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list |
868 | * @dmabuf: [in] buffer to attach device to. |
869 | * @dev: [in] device to be attached. |
870 | * @importer_ops: [in] importer operations for the attachment |
871 | * @importer_priv: [in] importer private pointer for the attachment |
872 | * |
873 | * Returns struct dma_buf_attachment pointer for this attachment. Attachments |
874 | * must be cleaned up by calling dma_buf_detach(). |
875 | * |
876 | * Optionally this calls &dma_buf_ops.attach to allow device-specific attach |
877 | * functionality. |
878 | * |
879 | * Returns: |
880 | * |
881 | * A pointer to newly created &dma_buf_attachment on success, or a negative |
882 | * error code wrapped into a pointer on failure. |
883 | * |
884 | * Note that this can fail if the backing storage of @dmabuf is in a place not |
885 | * accessible to @dev, and cannot be moved to a more suitable place. This is |
886 | * indicated with the error code -EBUSY. |
887 | */ |
888 | struct dma_buf_attachment * |
889 | dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, |
890 | const struct dma_buf_attach_ops *importer_ops, |
891 | void *importer_priv) |
892 | { |
893 | struct dma_buf_attachment *attach; |
894 | int ret; |
895 | |
896 | if (WARN_ON(!dmabuf || !dev)) |
897 | return ERR_PTR(error: -EINVAL); |
898 | |
899 | if (WARN_ON(importer_ops && !importer_ops->move_notify)) |
900 | return ERR_PTR(error: -EINVAL); |
901 | |
902 | attach = kzalloc(size: sizeof(*attach), GFP_KERNEL); |
903 | if (!attach) |
904 | return ERR_PTR(error: -ENOMEM); |
905 | |
906 | attach->dev = dev; |
907 | attach->dmabuf = dmabuf; |
908 | if (importer_ops) |
909 | attach->peer2peer = importer_ops->allow_peer2peer; |
910 | attach->importer_ops = importer_ops; |
911 | attach->importer_priv = importer_priv; |
912 | |
913 | if (dmabuf->ops->attach) { |
914 | ret = dmabuf->ops->attach(dmabuf, attach); |
915 | if (ret) |
916 | goto err_attach; |
917 | } |
918 | dma_resv_lock(obj: dmabuf->resv, NULL); |
919 | list_add(new: &attach->node, head: &dmabuf->attachments); |
920 | dma_resv_unlock(obj: dmabuf->resv); |
921 | |
922 | /* When either the importer or the exporter can't handle dynamic |
923 | * mappings we cache the mapping here to avoid issues with the |
924 | * reservation object lock. |
925 | */ |
926 | if (dma_buf_attachment_is_dynamic(attach) != |
927 | dma_buf_is_dynamic(dmabuf)) { |
928 | struct sg_table *sgt; |
929 | |
930 | dma_resv_lock(obj: attach->dmabuf->resv, NULL); |
931 | if (dma_buf_is_dynamic(dmabuf: attach->dmabuf)) { |
932 | ret = dmabuf->ops->pin(attach); |
933 | if (ret) |
934 | goto err_unlock; |
935 | } |
936 | |
937 | sgt = __map_dma_buf(attach, direction: DMA_BIDIRECTIONAL); |
938 | if (!sgt) |
939 | sgt = ERR_PTR(error: -ENOMEM); |
940 | if (IS_ERR(ptr: sgt)) { |
941 | ret = PTR_ERR(ptr: sgt); |
942 | goto err_unpin; |
943 | } |
944 | dma_resv_unlock(obj: attach->dmabuf->resv); |
945 | attach->sgt = sgt; |
946 | attach->dir = DMA_BIDIRECTIONAL; |
947 | } |
948 | |
949 | return attach; |
950 | |
951 | err_attach: |
952 | kfree(objp: attach); |
953 | return ERR_PTR(error: ret); |
954 | |
955 | err_unpin: |
956 | if (dma_buf_is_dynamic(dmabuf: attach->dmabuf)) |
957 | dmabuf->ops->unpin(attach); |
958 | |
959 | err_unlock: |
960 | dma_resv_unlock(obj: attach->dmabuf->resv); |
961 | |
962 | dma_buf_detach(dmabuf, attach); |
963 | return ERR_PTR(error: ret); |
964 | } |
965 | EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF); |
966 | |
967 | /** |
968 | * dma_buf_attach - Wrapper for dma_buf_dynamic_attach |
969 | * @dmabuf: [in] buffer to attach device to. |
970 | * @dev: [in] device to be attached. |
971 | * |
972 | * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static |
973 | * mapping. |
974 | */ |
975 | struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, |
976 | struct device *dev) |
977 | { |
978 | return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL); |
979 | } |
980 | EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF); |
981 | |
982 | static void __unmap_dma_buf(struct dma_buf_attachment *attach, |
983 | struct sg_table *sg_table, |
984 | enum dma_data_direction direction) |
985 | { |
986 | /* uses XOR, hence this unmangles */ |
987 | mangle_sg_table(sg_table); |
988 | |
989 | attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); |
990 | } |
991 | |
992 | /** |
993 | * dma_buf_detach - Remove the given attachment from dmabuf's attachments list |
994 | * @dmabuf: [in] buffer to detach from. |
995 | * @attach: [in] attachment to be detached; is free'd after this call. |
996 | * |
997 | * Clean up a device attachment obtained by calling dma_buf_attach(). |
998 | * |
999 | * Optionally this calls &dma_buf_ops.detach for device-specific detach. |
1000 | */ |
1001 | void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) |
1002 | { |
1003 | if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf)) |
1004 | return; |
1005 | |
1006 | dma_resv_lock(obj: dmabuf->resv, NULL); |
1007 | |
1008 | if (attach->sgt) { |
1009 | |
1010 | __unmap_dma_buf(attach, sg_table: attach->sgt, direction: attach->dir); |
1011 | |
1012 | if (dma_buf_is_dynamic(dmabuf: attach->dmabuf)) |
1013 | dmabuf->ops->unpin(attach); |
1014 | } |
1015 | list_del(entry: &attach->node); |
1016 | |
1017 | dma_resv_unlock(obj: dmabuf->resv); |
1018 | |
1019 | if (dmabuf->ops->detach) |
1020 | dmabuf->ops->detach(dmabuf, attach); |
1021 | |
1022 | kfree(objp: attach); |
1023 | } |
1024 | EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF); |
1025 | |
1026 | /** |
1027 | * dma_buf_pin - Lock down the DMA-buf |
1028 | * @attach: [in] attachment which should be pinned |
1029 | * |
1030 | * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may |
1031 | * call this, and only for limited use cases like scanout and not for temporary |
1032 | * pin operations. It is not permitted to allow userspace to pin arbitrary |
1033 | * amounts of buffers through this interface. |
1034 | * |
1035 | * Buffers must be unpinned by calling dma_buf_unpin(). |
1036 | * |
1037 | * Returns: |
1038 | * 0 on success, negative error code on failure. |
1039 | */ |
1040 | int dma_buf_pin(struct dma_buf_attachment *attach) |
1041 | { |
1042 | struct dma_buf *dmabuf = attach->dmabuf; |
1043 | int ret = 0; |
1044 | |
1045 | WARN_ON(!dma_buf_attachment_is_dynamic(attach)); |
1046 | |
1047 | dma_resv_assert_held(dmabuf->resv); |
1048 | |
1049 | if (dmabuf->ops->pin) |
1050 | ret = dmabuf->ops->pin(attach); |
1051 | |
1052 | return ret; |
1053 | } |
1054 | EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF); |
1055 | |
1056 | /** |
1057 | * dma_buf_unpin - Unpin a DMA-buf |
1058 | * @attach: [in] attachment which should be unpinned |
1059 | * |
1060 | * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move |
1061 | * any mapping of @attach again and inform the importer through |
1062 | * &dma_buf_attach_ops.move_notify. |
1063 | */ |
1064 | void dma_buf_unpin(struct dma_buf_attachment *attach) |
1065 | { |
1066 | struct dma_buf *dmabuf = attach->dmabuf; |
1067 | |
1068 | WARN_ON(!dma_buf_attachment_is_dynamic(attach)); |
1069 | |
1070 | dma_resv_assert_held(dmabuf->resv); |
1071 | |
1072 | if (dmabuf->ops->unpin) |
1073 | dmabuf->ops->unpin(attach); |
1074 | } |
1075 | EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF); |
1076 | |
1077 | /** |
1078 | * dma_buf_map_attachment - Returns the scatterlist table of the attachment; |
1079 | * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the |
1080 | * dma_buf_ops. |
1081 | * @attach: [in] attachment whose scatterlist is to be returned |
1082 | * @direction: [in] direction of DMA transfer |
1083 | * |
1084 | * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR |
1085 | * on error. May return -EINTR if it is interrupted by a signal. |
1086 | * |
1087 | * On success, the DMA addresses and lengths in the returned scatterlist are |
1088 | * PAGE_SIZE aligned. |
1089 | * |
1090 | * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that |
1091 | * the underlying backing storage is pinned for as long as a mapping exists, |
1092 | * therefore users/importers should not hold onto a mapping for undue amounts of |
1093 | * time. |
1094 | * |
1095 | * Important: Dynamic importers must wait for the exclusive fence of the struct |
1096 | * dma_resv attached to the DMA-BUF first. |
1097 | */ |
1098 | struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, |
1099 | enum dma_data_direction direction) |
1100 | { |
1101 | struct sg_table *sg_table; |
1102 | int r; |
1103 | |
1104 | might_sleep(); |
1105 | |
1106 | if (WARN_ON(!attach || !attach->dmabuf)) |
1107 | return ERR_PTR(error: -EINVAL); |
1108 | |
1109 | dma_resv_assert_held(attach->dmabuf->resv); |
1110 | |
1111 | if (attach->sgt) { |
1112 | /* |
1113 | * Two mappings with different directions for the same |
1114 | * attachment are not allowed. |
1115 | */ |
1116 | if (attach->dir != direction && |
1117 | attach->dir != DMA_BIDIRECTIONAL) |
1118 | return ERR_PTR(error: -EBUSY); |
1119 | |
1120 | return attach->sgt; |
1121 | } |
1122 | |
1123 | if (dma_buf_is_dynamic(dmabuf: attach->dmabuf)) { |
1124 | if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) { |
1125 | r = attach->dmabuf->ops->pin(attach); |
1126 | if (r) |
1127 | return ERR_PTR(error: r); |
1128 | } |
1129 | } |
1130 | |
1131 | sg_table = __map_dma_buf(attach, direction); |
1132 | if (!sg_table) |
1133 | sg_table = ERR_PTR(error: -ENOMEM); |
1134 | |
1135 | if (IS_ERR(ptr: sg_table) && dma_buf_is_dynamic(dmabuf: attach->dmabuf) && |
1136 | !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) |
1137 | attach->dmabuf->ops->unpin(attach); |
1138 | |
1139 | if (!IS_ERR(ptr: sg_table) && attach->dmabuf->ops->cache_sgt_mapping) { |
1140 | attach->sgt = sg_table; |
1141 | attach->dir = direction; |
1142 | } |
1143 | |
1144 | #ifdef CONFIG_DMA_API_DEBUG |
1145 | if (!IS_ERR(ptr: sg_table)) { |
1146 | struct scatterlist *sg; |
1147 | u64 addr; |
1148 | int len; |
1149 | int i; |
1150 | |
1151 | for_each_sgtable_dma_sg(sg_table, sg, i) { |
1152 | addr = sg_dma_address(sg); |
1153 | len = sg_dma_len(sg); |
1154 | if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) { |
1155 | pr_debug("%s: addr %llx or len %x is not page aligned!\n" , |
1156 | __func__, addr, len); |
1157 | } |
1158 | } |
1159 | } |
1160 | #endif /* CONFIG_DMA_API_DEBUG */ |
1161 | return sg_table; |
1162 | } |
1163 | EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF); |
1164 | |
1165 | /** |
1166 | * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment; |
1167 | * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the |
1168 | * dma_buf_ops. |
1169 | * @attach: [in] attachment whose scatterlist is to be returned |
1170 | * @direction: [in] direction of DMA transfer |
1171 | * |
1172 | * Unlocked variant of dma_buf_map_attachment(). |
1173 | */ |
1174 | struct sg_table * |
1175 | dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach, |
1176 | enum dma_data_direction direction) |
1177 | { |
1178 | struct sg_table *sg_table; |
1179 | |
1180 | might_sleep(); |
1181 | |
1182 | if (WARN_ON(!attach || !attach->dmabuf)) |
1183 | return ERR_PTR(error: -EINVAL); |
1184 | |
1185 | dma_resv_lock(obj: attach->dmabuf->resv, NULL); |
1186 | sg_table = dma_buf_map_attachment(attach, direction); |
1187 | dma_resv_unlock(obj: attach->dmabuf->resv); |
1188 | |
1189 | return sg_table; |
1190 | } |
1191 | EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF); |
1192 | |
1193 | /** |
1194 | * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might |
1195 | * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of |
1196 | * dma_buf_ops. |
1197 | * @attach: [in] attachment to unmap buffer from |
1198 | * @sg_table: [in] scatterlist info of the buffer to unmap |
1199 | * @direction: [in] direction of DMA transfer |
1200 | * |
1201 | * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment(). |
1202 | */ |
1203 | void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, |
1204 | struct sg_table *sg_table, |
1205 | enum dma_data_direction direction) |
1206 | { |
1207 | might_sleep(); |
1208 | |
1209 | if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) |
1210 | return; |
1211 | |
1212 | dma_resv_assert_held(attach->dmabuf->resv); |
1213 | |
1214 | if (attach->sgt == sg_table) |
1215 | return; |
1216 | |
1217 | __unmap_dma_buf(attach, sg_table, direction); |
1218 | |
1219 | if (dma_buf_is_dynamic(dmabuf: attach->dmabuf) && |
1220 | !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) |
1221 | dma_buf_unpin(attach); |
1222 | } |
1223 | EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF); |
1224 | |
1225 | /** |
1226 | * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might |
1227 | * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of |
1228 | * dma_buf_ops. |
1229 | * @attach: [in] attachment to unmap buffer from |
1230 | * @sg_table: [in] scatterlist info of the buffer to unmap |
1231 | * @direction: [in] direction of DMA transfer |
1232 | * |
1233 | * Unlocked variant of dma_buf_unmap_attachment(). |
1234 | */ |
1235 | void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach, |
1236 | struct sg_table *sg_table, |
1237 | enum dma_data_direction direction) |
1238 | { |
1239 | might_sleep(); |
1240 | |
1241 | if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) |
1242 | return; |
1243 | |
1244 | dma_resv_lock(obj: attach->dmabuf->resv, NULL); |
1245 | dma_buf_unmap_attachment(attach, sg_table, direction); |
1246 | dma_resv_unlock(obj: attach->dmabuf->resv); |
1247 | } |
1248 | EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF); |
1249 | |
1250 | /** |
1251 | * dma_buf_move_notify - notify attachments that DMA-buf is moving |
1252 | * |
1253 | * @dmabuf: [in] buffer which is moving |
1254 | * |
1255 | * Informs all attachments that they need to destroy and recreate all their |
1256 | * mappings. |
1257 | */ |
1258 | void dma_buf_move_notify(struct dma_buf *dmabuf) |
1259 | { |
1260 | struct dma_buf_attachment *attach; |
1261 | |
1262 | dma_resv_assert_held(dmabuf->resv); |
1263 | |
1264 | list_for_each_entry(attach, &dmabuf->attachments, node) |
1265 | if (attach->importer_ops) |
1266 | attach->importer_ops->move_notify(attach); |
1267 | } |
1268 | EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF); |
1269 | |
1270 | /** |
1271 | * DOC: cpu access |
1272 | * |
1273 | * There are multiple reasons for supporting CPU access to a dma buffer object: |
1274 | * |
1275 | * - Fallback operations in the kernel, for example when a device is connected |
1276 | * over USB and the kernel needs to shuffle the data around first before |
1277 | * sending it away. Cache coherency is handled by bracketing any transactions |
1278 | * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access() |
1279 | * access. |
1280 | * |
1281 | * Since for most kernel internal dma-buf accesses need the entire buffer, a |
1282 | * vmap interface is introduced. Note that on very old 32-bit architectures |
1283 | * vmalloc space might be limited and result in vmap calls failing. |
1284 | * |
1285 | * Interfaces:: |
1286 | * |
1287 | * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map) |
1288 | * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map) |
1289 | * |
1290 | * The vmap call can fail if there is no vmap support in the exporter, or if |
1291 | * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference |
1292 | * count for all vmap access and calls down into the exporter's vmap function |
1293 | * only when no vmapping exists, and only unmaps it once. Protection against |
1294 | * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex. |
1295 | * |
1296 | * - For full compatibility on the importer side with existing userspace |
1297 | * interfaces, which might already support mmap'ing buffers. This is needed in |
1298 | * many processing pipelines (e.g. feeding a software rendered image into a |
1299 | * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION |
1300 | * framework already supported this and for DMA buffer file descriptors to |
1301 | * replace ION buffers mmap support was needed. |
1302 | * |
1303 | * There is no special interfaces, userspace simply calls mmap on the dma-buf |
1304 | * fd. But like for CPU access there's a need to bracket the actual access, |
1305 | * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that |
1306 | * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must |
1307 | * be restarted. |
1308 | * |
1309 | * Some systems might need some sort of cache coherency management e.g. when |
1310 | * CPU and GPU domains are being accessed through dma-buf at the same time. |
1311 | * To circumvent this problem there are begin/end coherency markers, that |
1312 | * forward directly to existing dma-buf device drivers vfunc hooks. Userspace |
1313 | * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The |
1314 | * sequence would be used like following: |
1315 | * |
1316 | * - mmap dma-buf fd |
1317 | * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write |
1318 | * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you |
1319 | * want (with the new data being consumed by say the GPU or the scanout |
1320 | * device) |
1321 | * - munmap once you don't need the buffer any more |
1322 | * |
1323 | * For correctness and optimal performance, it is always required to use |
1324 | * SYNC_START and SYNC_END before and after, respectively, when accessing the |
1325 | * mapped address. Userspace cannot rely on coherent access, even when there |
1326 | * are systems where it just works without calling these ioctls. |
1327 | * |
1328 | * - And as a CPU fallback in userspace processing pipelines. |
1329 | * |
1330 | * Similar to the motivation for kernel cpu access it is again important that |
1331 | * the userspace code of a given importing subsystem can use the same |
1332 | * interfaces with a imported dma-buf buffer object as with a native buffer |
1333 | * object. This is especially important for drm where the userspace part of |
1334 | * contemporary OpenGL, X, and other drivers is huge, and reworking them to |
1335 | * use a different way to mmap a buffer rather invasive. |
1336 | * |
1337 | * The assumption in the current dma-buf interfaces is that redirecting the |
1338 | * initial mmap is all that's needed. A survey of some of the existing |
1339 | * subsystems shows that no driver seems to do any nefarious thing like |
1340 | * syncing up with outstanding asynchronous processing on the device or |
1341 | * allocating special resources at fault time. So hopefully this is good |
1342 | * enough, since adding interfaces to intercept pagefaults and allow pte |
1343 | * shootdowns would increase the complexity quite a bit. |
1344 | * |
1345 | * Interface:: |
1346 | * |
1347 | * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*, |
1348 | * unsigned long); |
1349 | * |
1350 | * If the importing subsystem simply provides a special-purpose mmap call to |
1351 | * set up a mapping in userspace, calling do_mmap with &dma_buf.file will |
1352 | * equally achieve that for a dma-buf object. |
1353 | */ |
1354 | |
1355 | static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, |
1356 | enum dma_data_direction direction) |
1357 | { |
1358 | bool write = (direction == DMA_BIDIRECTIONAL || |
1359 | direction == DMA_TO_DEVICE); |
1360 | struct dma_resv *resv = dmabuf->resv; |
1361 | long ret; |
1362 | |
1363 | /* Wait on any implicit rendering fences */ |
1364 | ret = dma_resv_wait_timeout(obj: resv, usage: dma_resv_usage_rw(write), |
1365 | intr: true, MAX_SCHEDULE_TIMEOUT); |
1366 | if (ret < 0) |
1367 | return ret; |
1368 | |
1369 | return 0; |
1370 | } |
1371 | |
1372 | /** |
1373 | * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the |
1374 | * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific |
1375 | * preparations. Coherency is only guaranteed in the specified range for the |
1376 | * specified access direction. |
1377 | * @dmabuf: [in] buffer to prepare cpu access for. |
1378 | * @direction: [in] direction of access. |
1379 | * |
1380 | * After the cpu access is complete the caller should call |
1381 | * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is |
1382 | * it guaranteed to be coherent with other DMA access. |
1383 | * |
1384 | * This function will also wait for any DMA transactions tracked through |
1385 | * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit |
1386 | * synchronization this function will only ensure cache coherency, callers must |
1387 | * ensure synchronization with such DMA transactions on their own. |
1388 | * |
1389 | * Can return negative error values, returns 0 on success. |
1390 | */ |
1391 | int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, |
1392 | enum dma_data_direction direction) |
1393 | { |
1394 | int ret = 0; |
1395 | |
1396 | if (WARN_ON(!dmabuf)) |
1397 | return -EINVAL; |
1398 | |
1399 | might_lock(&dmabuf->resv->lock.base); |
1400 | |
1401 | if (dmabuf->ops->begin_cpu_access) |
1402 | ret = dmabuf->ops->begin_cpu_access(dmabuf, direction); |
1403 | |
1404 | /* Ensure that all fences are waited upon - but we first allow |
1405 | * the native handler the chance to do so more efficiently if it |
1406 | * chooses. A double invocation here will be reasonably cheap no-op. |
1407 | */ |
1408 | if (ret == 0) |
1409 | ret = __dma_buf_begin_cpu_access(dmabuf, direction); |
1410 | |
1411 | return ret; |
1412 | } |
1413 | EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF); |
1414 | |
1415 | /** |
1416 | * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the |
1417 | * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific |
1418 | * actions. Coherency is only guaranteed in the specified range for the |
1419 | * specified access direction. |
1420 | * @dmabuf: [in] buffer to complete cpu access for. |
1421 | * @direction: [in] direction of access. |
1422 | * |
1423 | * This terminates CPU access started with dma_buf_begin_cpu_access(). |
1424 | * |
1425 | * Can return negative error values, returns 0 on success. |
1426 | */ |
1427 | int dma_buf_end_cpu_access(struct dma_buf *dmabuf, |
1428 | enum dma_data_direction direction) |
1429 | { |
1430 | int ret = 0; |
1431 | |
1432 | WARN_ON(!dmabuf); |
1433 | |
1434 | might_lock(&dmabuf->resv->lock.base); |
1435 | |
1436 | if (dmabuf->ops->end_cpu_access) |
1437 | ret = dmabuf->ops->end_cpu_access(dmabuf, direction); |
1438 | |
1439 | return ret; |
1440 | } |
1441 | EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF); |
1442 | |
1443 | |
1444 | /** |
1445 | * dma_buf_mmap - Setup up a userspace mmap with the given vma |
1446 | * @dmabuf: [in] buffer that should back the vma |
1447 | * @vma: [in] vma for the mmap |
1448 | * @pgoff: [in] offset in pages where this mmap should start within the |
1449 | * dma-buf buffer. |
1450 | * |
1451 | * This function adjusts the passed in vma so that it points at the file of the |
1452 | * dma_buf operation. It also adjusts the starting pgoff and does bounds |
1453 | * checking on the size of the vma. Then it calls the exporters mmap function to |
1454 | * set up the mapping. |
1455 | * |
1456 | * Can return negative error values, returns 0 on success. |
1457 | */ |
1458 | int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, |
1459 | unsigned long pgoff) |
1460 | { |
1461 | if (WARN_ON(!dmabuf || !vma)) |
1462 | return -EINVAL; |
1463 | |
1464 | /* check if buffer supports mmap */ |
1465 | if (!dmabuf->ops->mmap) |
1466 | return -EINVAL; |
1467 | |
1468 | /* check for offset overflow */ |
1469 | if (pgoff + vma_pages(vma) < pgoff) |
1470 | return -EOVERFLOW; |
1471 | |
1472 | /* check for overflowing the buffer's size */ |
1473 | if (pgoff + vma_pages(vma) > |
1474 | dmabuf->size >> PAGE_SHIFT) |
1475 | return -EINVAL; |
1476 | |
1477 | /* readjust the vma */ |
1478 | vma_set_file(vma, file: dmabuf->file); |
1479 | vma->vm_pgoff = pgoff; |
1480 | |
1481 | return dmabuf->ops->mmap(dmabuf, vma); |
1482 | } |
1483 | EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF); |
1484 | |
1485 | /** |
1486 | * dma_buf_vmap - Create virtual mapping for the buffer object into kernel |
1487 | * address space. Same restrictions as for vmap and friends apply. |
1488 | * @dmabuf: [in] buffer to vmap |
1489 | * @map: [out] returns the vmap pointer |
1490 | * |
1491 | * This call may fail due to lack of virtual mapping address space. |
1492 | * These calls are optional in drivers. The intended use for them |
1493 | * is for mapping objects linear in kernel space for high use objects. |
1494 | * |
1495 | * To ensure coherency users must call dma_buf_begin_cpu_access() and |
1496 | * dma_buf_end_cpu_access() around any cpu access performed through this |
1497 | * mapping. |
1498 | * |
1499 | * Returns 0 on success, or a negative errno code otherwise. |
1500 | */ |
1501 | int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map) |
1502 | { |
1503 | struct iosys_map ptr; |
1504 | int ret; |
1505 | |
1506 | iosys_map_clear(map); |
1507 | |
1508 | if (WARN_ON(!dmabuf)) |
1509 | return -EINVAL; |
1510 | |
1511 | dma_resv_assert_held(dmabuf->resv); |
1512 | |
1513 | if (!dmabuf->ops->vmap) |
1514 | return -EINVAL; |
1515 | |
1516 | if (dmabuf->vmapping_counter) { |
1517 | dmabuf->vmapping_counter++; |
1518 | BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr)); |
1519 | *map = dmabuf->vmap_ptr; |
1520 | return 0; |
1521 | } |
1522 | |
1523 | BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr)); |
1524 | |
1525 | ret = dmabuf->ops->vmap(dmabuf, &ptr); |
1526 | if (WARN_ON_ONCE(ret)) |
1527 | return ret; |
1528 | |
1529 | dmabuf->vmap_ptr = ptr; |
1530 | dmabuf->vmapping_counter = 1; |
1531 | |
1532 | *map = dmabuf->vmap_ptr; |
1533 | |
1534 | return 0; |
1535 | } |
1536 | EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF); |
1537 | |
1538 | /** |
1539 | * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel |
1540 | * address space. Same restrictions as for vmap and friends apply. |
1541 | * @dmabuf: [in] buffer to vmap |
1542 | * @map: [out] returns the vmap pointer |
1543 | * |
1544 | * Unlocked version of dma_buf_vmap() |
1545 | * |
1546 | * Returns 0 on success, or a negative errno code otherwise. |
1547 | */ |
1548 | int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map) |
1549 | { |
1550 | int ret; |
1551 | |
1552 | iosys_map_clear(map); |
1553 | |
1554 | if (WARN_ON(!dmabuf)) |
1555 | return -EINVAL; |
1556 | |
1557 | dma_resv_lock(obj: dmabuf->resv, NULL); |
1558 | ret = dma_buf_vmap(dmabuf, map); |
1559 | dma_resv_unlock(obj: dmabuf->resv); |
1560 | |
1561 | return ret; |
1562 | } |
1563 | EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, DMA_BUF); |
1564 | |
1565 | /** |
1566 | * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap. |
1567 | * @dmabuf: [in] buffer to vunmap |
1568 | * @map: [in] vmap pointer to vunmap |
1569 | */ |
1570 | void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) |
1571 | { |
1572 | if (WARN_ON(!dmabuf)) |
1573 | return; |
1574 | |
1575 | dma_resv_assert_held(dmabuf->resv); |
1576 | |
1577 | BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr)); |
1578 | BUG_ON(dmabuf->vmapping_counter == 0); |
1579 | BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map)); |
1580 | |
1581 | if (--dmabuf->vmapping_counter == 0) { |
1582 | if (dmabuf->ops->vunmap) |
1583 | dmabuf->ops->vunmap(dmabuf, map); |
1584 | iosys_map_clear(map: &dmabuf->vmap_ptr); |
1585 | } |
1586 | } |
1587 | EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF); |
1588 | |
1589 | /** |
1590 | * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap. |
1591 | * @dmabuf: [in] buffer to vunmap |
1592 | * @map: [in] vmap pointer to vunmap |
1593 | */ |
1594 | void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map) |
1595 | { |
1596 | if (WARN_ON(!dmabuf)) |
1597 | return; |
1598 | |
1599 | dma_resv_lock(obj: dmabuf->resv, NULL); |
1600 | dma_buf_vunmap(dmabuf, map); |
1601 | dma_resv_unlock(obj: dmabuf->resv); |
1602 | } |
1603 | EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF); |
1604 | |
1605 | #ifdef CONFIG_DEBUG_FS |
1606 | static int dma_buf_debug_show(struct seq_file *s, void *unused) |
1607 | { |
1608 | struct dma_buf *buf_obj; |
1609 | struct dma_buf_attachment *attach_obj; |
1610 | int count = 0, attach_count; |
1611 | size_t size = 0; |
1612 | int ret; |
1613 | |
1614 | ret = mutex_lock_interruptible(&db_list.lock); |
1615 | |
1616 | if (ret) |
1617 | return ret; |
1618 | |
1619 | seq_puts(m: s, s: "\nDma-buf Objects:\n" ); |
1620 | seq_printf(m: s, fmt: "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n" , |
1621 | "size" , "flags" , "mode" , "count" , "ino" ); |
1622 | |
1623 | list_for_each_entry(buf_obj, &db_list.head, list_node) { |
1624 | |
1625 | ret = dma_resv_lock_interruptible(obj: buf_obj->resv, NULL); |
1626 | if (ret) |
1627 | goto error_unlock; |
1628 | |
1629 | |
1630 | spin_lock(lock: &buf_obj->name_lock); |
1631 | seq_printf(m: s, fmt: "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n" , |
1632 | buf_obj->size, |
1633 | buf_obj->file->f_flags, buf_obj->file->f_mode, |
1634 | file_count(buf_obj->file), |
1635 | buf_obj->exp_name, |
1636 | file_inode(f: buf_obj->file)->i_ino, |
1637 | buf_obj->name ?: "<none>" ); |
1638 | spin_unlock(lock: &buf_obj->name_lock); |
1639 | |
1640 | dma_resv_describe(obj: buf_obj->resv, seq: s); |
1641 | |
1642 | seq_puts(m: s, s: "\tAttached Devices:\n" ); |
1643 | attach_count = 0; |
1644 | |
1645 | list_for_each_entry(attach_obj, &buf_obj->attachments, node) { |
1646 | seq_printf(m: s, fmt: "\t%s\n" , dev_name(dev: attach_obj->dev)); |
1647 | attach_count++; |
1648 | } |
1649 | dma_resv_unlock(obj: buf_obj->resv); |
1650 | |
1651 | seq_printf(m: s, fmt: "Total %d devices attached\n\n" , |
1652 | attach_count); |
1653 | |
1654 | count++; |
1655 | size += buf_obj->size; |
1656 | } |
1657 | |
1658 | seq_printf(m: s, fmt: "\nTotal %d objects, %zu bytes\n" , count, size); |
1659 | |
1660 | mutex_unlock(lock: &db_list.lock); |
1661 | return 0; |
1662 | |
1663 | error_unlock: |
1664 | mutex_unlock(lock: &db_list.lock); |
1665 | return ret; |
1666 | } |
1667 | |
1668 | DEFINE_SHOW_ATTRIBUTE(dma_buf_debug); |
1669 | |
1670 | static struct dentry *dma_buf_debugfs_dir; |
1671 | |
1672 | static int dma_buf_init_debugfs(void) |
1673 | { |
1674 | struct dentry *d; |
1675 | int err = 0; |
1676 | |
1677 | d = debugfs_create_dir(name: "dma_buf" , NULL); |
1678 | if (IS_ERR(ptr: d)) |
1679 | return PTR_ERR(ptr: d); |
1680 | |
1681 | dma_buf_debugfs_dir = d; |
1682 | |
1683 | d = debugfs_create_file(name: "bufinfo" , S_IRUGO, parent: dma_buf_debugfs_dir, |
1684 | NULL, fops: &dma_buf_debug_fops); |
1685 | if (IS_ERR(ptr: d)) { |
1686 | pr_debug("dma_buf: debugfs: failed to create node bufinfo\n" ); |
1687 | debugfs_remove_recursive(dentry: dma_buf_debugfs_dir); |
1688 | dma_buf_debugfs_dir = NULL; |
1689 | err = PTR_ERR(ptr: d); |
1690 | } |
1691 | |
1692 | return err; |
1693 | } |
1694 | |
1695 | static void dma_buf_uninit_debugfs(void) |
1696 | { |
1697 | debugfs_remove_recursive(dentry: dma_buf_debugfs_dir); |
1698 | } |
1699 | #else |
1700 | static inline int dma_buf_init_debugfs(void) |
1701 | { |
1702 | return 0; |
1703 | } |
1704 | static inline void dma_buf_uninit_debugfs(void) |
1705 | { |
1706 | } |
1707 | #endif |
1708 | |
1709 | static int __init dma_buf_init(void) |
1710 | { |
1711 | int ret; |
1712 | |
1713 | ret = dma_buf_init_sysfs_statistics(); |
1714 | if (ret) |
1715 | return ret; |
1716 | |
1717 | dma_buf_mnt = kern_mount(&dma_buf_fs_type); |
1718 | if (IS_ERR(ptr: dma_buf_mnt)) |
1719 | return PTR_ERR(ptr: dma_buf_mnt); |
1720 | |
1721 | mutex_init(&db_list.lock); |
1722 | INIT_LIST_HEAD(list: &db_list.head); |
1723 | dma_buf_init_debugfs(); |
1724 | return 0; |
1725 | } |
1726 | subsys_initcall(dma_buf_init); |
1727 | |
1728 | static void __exit dma_buf_deinit(void) |
1729 | { |
1730 | dma_buf_uninit_debugfs(); |
1731 | kern_unmount(mnt: dma_buf_mnt); |
1732 | dma_buf_uninit_sysfs_statistics(); |
1733 | } |
1734 | __exitcall(dma_buf_deinit); |
1735 | |