1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * drm gem DMA helper functions
4 *
5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
6 *
7 * Based on Samsung Exynos code
8 *
9 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
10 */
11
12#include <linux/dma-buf.h>
13#include <linux/dma-mapping.h>
14#include <linux/export.h>
15#include <linux/mm.h>
16#include <linux/module.h>
17#include <linux/mutex.h>
18#include <linux/slab.h>
19
20#include <drm/drm.h>
21#include <drm/drm_device.h>
22#include <drm/drm_drv.h>
23#include <drm/drm_gem_dma_helper.h>
24#include <drm/drm_vma_manager.h>
25
26/**
27 * DOC: dma helpers
28 *
29 * The DRM GEM/DMA helpers are a means to provide buffer objects that are
30 * presented to the device as a contiguous chunk of memory. This is useful
31 * for devices that do not support scatter-gather DMA (either directly or
32 * by using an intimately attached IOMMU).
33 *
34 * For devices that access the memory bus through an (external) IOMMU then
35 * the buffer objects are allocated using a traditional page-based
36 * allocator and may be scattered through physical memory. However they
37 * are contiguous in the IOVA space so appear contiguous to devices using
38 * them.
39 *
40 * For other devices then the helpers rely on CMA to provide buffer
41 * objects that are physically contiguous in memory.
42 *
43 * For GEM callback helpers in struct &drm_gem_object functions, see likewise
44 * named functions with an _object_ infix (e.g., drm_gem_dma_object_vmap() wraps
45 * drm_gem_dma_vmap()). These helpers perform the necessary type conversion.
46 */
47
48static const struct drm_gem_object_funcs drm_gem_dma_default_funcs = {
49 .free = drm_gem_dma_object_free,
50 .print_info = drm_gem_dma_object_print_info,
51 .get_sg_table = drm_gem_dma_object_get_sg_table,
52 .vmap = drm_gem_dma_object_vmap,
53 .mmap = drm_gem_dma_object_mmap,
54 .vm_ops = &drm_gem_dma_vm_ops,
55};
56
57/**
58 * __drm_gem_dma_create - Create a GEM DMA object without allocating memory
59 * @drm: DRM device
60 * @size: size of the object to allocate
61 * @private: true if used for internal purposes
62 *
63 * This function creates and initializes a GEM DMA object of the given size,
64 * but doesn't allocate any memory to back the object.
65 *
66 * Returns:
67 * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
68 * error code on failure.
69 */
70static struct drm_gem_dma_object *
71__drm_gem_dma_create(struct drm_device *drm, size_t size, bool private)
72{
73 struct drm_gem_dma_object *dma_obj;
74 struct drm_gem_object *gem_obj;
75 int ret = 0;
76
77 if (drm->driver->gem_create_object) {
78 gem_obj = drm->driver->gem_create_object(drm, size);
79 if (IS_ERR(ptr: gem_obj))
80 return ERR_CAST(ptr: gem_obj);
81 dma_obj = to_drm_gem_dma_obj(gem_obj);
82 } else {
83 dma_obj = kzalloc(size: sizeof(*dma_obj), GFP_KERNEL);
84 if (!dma_obj)
85 return ERR_PTR(error: -ENOMEM);
86 gem_obj = &dma_obj->base;
87 }
88
89 if (!gem_obj->funcs)
90 gem_obj->funcs = &drm_gem_dma_default_funcs;
91
92 if (private) {
93 drm_gem_private_object_init(dev: drm, obj: gem_obj, size);
94
95 /* Always use writecombine for dma-buf mappings */
96 dma_obj->map_noncoherent = false;
97 } else {
98 ret = drm_gem_object_init(dev: drm, obj: gem_obj, size);
99 }
100 if (ret)
101 goto error;
102
103 ret = drm_gem_create_mmap_offset(obj: gem_obj);
104 if (ret) {
105 drm_gem_object_release(obj: gem_obj);
106 goto error;
107 }
108
109 return dma_obj;
110
111error:
112 kfree(objp: dma_obj);
113 return ERR_PTR(error: ret);
114}
115
116/**
117 * drm_gem_dma_create - allocate an object with the given size
118 * @drm: DRM device
119 * @size: size of the object to allocate
120 *
121 * This function creates a DMA GEM object and allocates memory as backing store.
122 * The allocated memory will occupy a contiguous chunk of bus address space.
123 *
124 * For devices that are directly connected to the memory bus then the allocated
125 * memory will be physically contiguous. For devices that access through an
126 * IOMMU, then the allocated memory is not expected to be physically contiguous
127 * because having contiguous IOVAs is sufficient to meet a devices DMA
128 * requirements.
129 *
130 * Returns:
131 * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
132 * error code on failure.
133 */
134struct drm_gem_dma_object *drm_gem_dma_create(struct drm_device *drm,
135 size_t size)
136{
137 struct drm_gem_dma_object *dma_obj;
138 int ret;
139
140 size = round_up(size, PAGE_SIZE);
141
142 dma_obj = __drm_gem_dma_create(drm, size, private: false);
143 if (IS_ERR(ptr: dma_obj))
144 return dma_obj;
145
146 if (dma_obj->map_noncoherent) {
147 dma_obj->vaddr = dma_alloc_noncoherent(dev: drm->dev, size,
148 dma_handle: &dma_obj->dma_addr,
149 dir: DMA_TO_DEVICE,
150 GFP_KERNEL | __GFP_NOWARN);
151 } else {
152 dma_obj->vaddr = dma_alloc_wc(dev: drm->dev, size,
153 dma_addr: &dma_obj->dma_addr,
154 GFP_KERNEL | __GFP_NOWARN);
155 }
156 if (!dma_obj->vaddr) {
157 drm_dbg(drm, "failed to allocate buffer with size %zu\n",
158 size);
159 ret = -ENOMEM;
160 goto error;
161 }
162
163 return dma_obj;
164
165error:
166 drm_gem_object_put(obj: &dma_obj->base);
167 return ERR_PTR(error: ret);
168}
169EXPORT_SYMBOL_GPL(drm_gem_dma_create);
170
171/**
172 * drm_gem_dma_create_with_handle - allocate an object with the given size and
173 * return a GEM handle to it
174 * @file_priv: DRM file-private structure to register the handle for
175 * @drm: DRM device
176 * @size: size of the object to allocate
177 * @handle: return location for the GEM handle
178 *
179 * This function creates a DMA GEM object, allocating a chunk of memory as
180 * backing store. The GEM object is then added to the list of object associated
181 * with the given file and a handle to it is returned.
182 *
183 * The allocated memory will occupy a contiguous chunk of bus address space.
184 * See drm_gem_dma_create() for more details.
185 *
186 * Returns:
187 * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
188 * error code on failure.
189 */
190static struct drm_gem_dma_object *
191drm_gem_dma_create_with_handle(struct drm_file *file_priv,
192 struct drm_device *drm, size_t size,
193 uint32_t *handle)
194{
195 struct drm_gem_dma_object *dma_obj;
196 struct drm_gem_object *gem_obj;
197 int ret;
198
199 dma_obj = drm_gem_dma_create(drm, size);
200 if (IS_ERR(ptr: dma_obj))
201 return dma_obj;
202
203 gem_obj = &dma_obj->base;
204
205 /*
206 * allocate a id of idr table where the obj is registered
207 * and handle has the id what user can see.
208 */
209 ret = drm_gem_handle_create(file_priv, obj: gem_obj, handlep: handle);
210 /* drop reference from allocate - handle holds it now. */
211 drm_gem_object_put(obj: gem_obj);
212 if (ret)
213 return ERR_PTR(error: ret);
214
215 return dma_obj;
216}
217
218/**
219 * drm_gem_dma_free - free resources associated with a DMA GEM object
220 * @dma_obj: DMA GEM object to free
221 *
222 * This function frees the backing memory of the DMA GEM object, cleans up the
223 * GEM object state and frees the memory used to store the object itself.
224 * If the buffer is imported and the virtual address is set, it is released.
225 */
226void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj)
227{
228 struct drm_gem_object *gem_obj = &dma_obj->base;
229 struct iosys_map map = IOSYS_MAP_INIT_VADDR(dma_obj->vaddr);
230
231 if (gem_obj->import_attach) {
232 if (dma_obj->vaddr)
233 dma_buf_vunmap_unlocked(dmabuf: gem_obj->import_attach->dmabuf, map: &map);
234 drm_prime_gem_destroy(obj: gem_obj, sg: dma_obj->sgt);
235 } else if (dma_obj->vaddr) {
236 if (dma_obj->map_noncoherent)
237 dma_free_noncoherent(dev: gem_obj->dev->dev, size: dma_obj->base.size,
238 vaddr: dma_obj->vaddr, dma_handle: dma_obj->dma_addr,
239 dir: DMA_TO_DEVICE);
240 else
241 dma_free_wc(dev: gem_obj->dev->dev, size: dma_obj->base.size,
242 cpu_addr: dma_obj->vaddr, dma_addr: dma_obj->dma_addr);
243 }
244
245 drm_gem_object_release(obj: gem_obj);
246
247 kfree(objp: dma_obj);
248}
249EXPORT_SYMBOL_GPL(drm_gem_dma_free);
250
251/**
252 * drm_gem_dma_dumb_create_internal - create a dumb buffer object
253 * @file_priv: DRM file-private structure to create the dumb buffer for
254 * @drm: DRM device
255 * @args: IOCTL data
256 *
257 * This aligns the pitch and size arguments to the minimum required. This is
258 * an internal helper that can be wrapped by a driver to account for hardware
259 * with more specific alignment requirements. It should not be used directly
260 * as their &drm_driver.dumb_create callback.
261 *
262 * Returns:
263 * 0 on success or a negative error code on failure.
264 */
265int drm_gem_dma_dumb_create_internal(struct drm_file *file_priv,
266 struct drm_device *drm,
267 struct drm_mode_create_dumb *args)
268{
269 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
270 struct drm_gem_dma_object *dma_obj;
271
272 if (args->pitch < min_pitch)
273 args->pitch = min_pitch;
274
275 if (args->size < args->pitch * args->height)
276 args->size = args->pitch * args->height;
277
278 dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, size: args->size,
279 handle: &args->handle);
280 return PTR_ERR_OR_ZERO(ptr: dma_obj);
281}
282EXPORT_SYMBOL_GPL(drm_gem_dma_dumb_create_internal);
283
284/**
285 * drm_gem_dma_dumb_create - create a dumb buffer object
286 * @file_priv: DRM file-private structure to create the dumb buffer for
287 * @drm: DRM device
288 * @args: IOCTL data
289 *
290 * This function computes the pitch of the dumb buffer and rounds it up to an
291 * integer number of bytes per pixel. Drivers for hardware that doesn't have
292 * any additional restrictions on the pitch can directly use this function as
293 * their &drm_driver.dumb_create callback.
294 *
295 * For hardware with additional restrictions, drivers can adjust the fields
296 * set up by userspace and pass the IOCTL data along to the
297 * drm_gem_dma_dumb_create_internal() function.
298 *
299 * Returns:
300 * 0 on success or a negative error code on failure.
301 */
302int drm_gem_dma_dumb_create(struct drm_file *file_priv,
303 struct drm_device *drm,
304 struct drm_mode_create_dumb *args)
305{
306 struct drm_gem_dma_object *dma_obj;
307
308 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
309 args->size = args->pitch * args->height;
310
311 dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, size: args->size,
312 handle: &args->handle);
313 return PTR_ERR_OR_ZERO(ptr: dma_obj);
314}
315EXPORT_SYMBOL_GPL(drm_gem_dma_dumb_create);
316
317const struct vm_operations_struct drm_gem_dma_vm_ops = {
318 .open = drm_gem_vm_open,
319 .close = drm_gem_vm_close,
320};
321EXPORT_SYMBOL_GPL(drm_gem_dma_vm_ops);
322
323#ifndef CONFIG_MMU
324/**
325 * drm_gem_dma_get_unmapped_area - propose address for mapping in noMMU cases
326 * @filp: file object
327 * @addr: memory address
328 * @len: buffer size
329 * @pgoff: page offset
330 * @flags: memory flags
331 *
332 * This function is used in noMMU platforms to propose address mapping
333 * for a given buffer.
334 * It's intended to be used as a direct handler for the struct
335 * &file_operations.get_unmapped_area operation.
336 *
337 * Returns:
338 * mapping address on success or a negative error code on failure.
339 */
340unsigned long drm_gem_dma_get_unmapped_area(struct file *filp,
341 unsigned long addr,
342 unsigned long len,
343 unsigned long pgoff,
344 unsigned long flags)
345{
346 struct drm_gem_dma_object *dma_obj;
347 struct drm_gem_object *obj = NULL;
348 struct drm_file *priv = filp->private_data;
349 struct drm_device *dev = priv->minor->dev;
350 struct drm_vma_offset_node *node;
351
352 if (drm_dev_is_unplugged(dev))
353 return -ENODEV;
354
355 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
356 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
357 pgoff,
358 len >> PAGE_SHIFT);
359 if (likely(node)) {
360 obj = container_of(node, struct drm_gem_object, vma_node);
361 /*
362 * When the object is being freed, after it hits 0-refcnt it
363 * proceeds to tear down the object. In the process it will
364 * attempt to remove the VMA offset and so acquire this
365 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
366 * that matches our range, we know it is in the process of being
367 * destroyed and will be freed as soon as we release the lock -
368 * so we have to check for the 0-refcnted object and treat it as
369 * invalid.
370 */
371 if (!kref_get_unless_zero(&obj->refcount))
372 obj = NULL;
373 }
374
375 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
376
377 if (!obj)
378 return -EINVAL;
379
380 if (!drm_vma_node_is_allowed(node, priv)) {
381 drm_gem_object_put(obj);
382 return -EACCES;
383 }
384
385 dma_obj = to_drm_gem_dma_obj(obj);
386
387 drm_gem_object_put(obj);
388
389 return dma_obj->vaddr ? (unsigned long)dma_obj->vaddr : -EINVAL;
390}
391EXPORT_SYMBOL_GPL(drm_gem_dma_get_unmapped_area);
392#endif
393
394/**
395 * drm_gem_dma_print_info() - Print &drm_gem_dma_object info for debugfs
396 * @dma_obj: DMA GEM object
397 * @p: DRM printer
398 * @indent: Tab indentation level
399 *
400 * This function prints dma_addr and vaddr for use in e.g. debugfs output.
401 */
402void drm_gem_dma_print_info(const struct drm_gem_dma_object *dma_obj,
403 struct drm_printer *p, unsigned int indent)
404{
405 drm_printf_indent(p, indent, "dma_addr=%pad\n", &dma_obj->dma_addr);
406 drm_printf_indent(p, indent, "vaddr=%p\n", dma_obj->vaddr);
407}
408EXPORT_SYMBOL(drm_gem_dma_print_info);
409
410/**
411 * drm_gem_dma_get_sg_table - provide a scatter/gather table of pinned
412 * pages for a DMA GEM object
413 * @dma_obj: DMA GEM object
414 *
415 * This function exports a scatter/gather table by calling the standard
416 * DMA mapping API.
417 *
418 * Returns:
419 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
420 */
421struct sg_table *drm_gem_dma_get_sg_table(struct drm_gem_dma_object *dma_obj)
422{
423 struct drm_gem_object *obj = &dma_obj->base;
424 struct sg_table *sgt;
425 int ret;
426
427 sgt = kzalloc(size: sizeof(*sgt), GFP_KERNEL);
428 if (!sgt)
429 return ERR_PTR(error: -ENOMEM);
430
431 ret = dma_get_sgtable(obj->dev->dev, sgt, dma_obj->vaddr,
432 dma_obj->dma_addr, obj->size);
433 if (ret < 0)
434 goto out;
435
436 return sgt;
437
438out:
439 kfree(objp: sgt);
440 return ERR_PTR(error: ret);
441}
442EXPORT_SYMBOL_GPL(drm_gem_dma_get_sg_table);
443
444/**
445 * drm_gem_dma_prime_import_sg_table - produce a DMA GEM object from another
446 * driver's scatter/gather table of pinned pages
447 * @dev: device to import into
448 * @attach: DMA-BUF attachment
449 * @sgt: scatter/gather table of pinned pages
450 *
451 * This function imports a scatter/gather table exported via DMA-BUF by
452 * another driver. Imported buffers must be physically contiguous in memory
453 * (i.e. the scatter/gather table must contain a single entry). Drivers that
454 * use the DMA helpers should set this as their
455 * &drm_driver.gem_prime_import_sg_table callback.
456 *
457 * Returns:
458 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
459 * error code on failure.
460 */
461struct drm_gem_object *
462drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
463 struct dma_buf_attachment *attach,
464 struct sg_table *sgt)
465{
466 struct drm_gem_dma_object *dma_obj;
467
468 /* check if the entries in the sg_table are contiguous */
469 if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size)
470 return ERR_PTR(error: -EINVAL);
471
472 /* Create a DMA GEM buffer. */
473 dma_obj = __drm_gem_dma_create(drm: dev, size: attach->dmabuf->size, private: true);
474 if (IS_ERR(ptr: dma_obj))
475 return ERR_CAST(ptr: dma_obj);
476
477 dma_obj->dma_addr = sg_dma_address(sgt->sgl);
478 dma_obj->sgt = sgt;
479
480 drm_dbg_prime(dev, "dma_addr = %pad, size = %zu\n", &dma_obj->dma_addr,
481 attach->dmabuf->size);
482
483 return &dma_obj->base;
484}
485EXPORT_SYMBOL_GPL(drm_gem_dma_prime_import_sg_table);
486
487/**
488 * drm_gem_dma_vmap - map a DMA GEM object into the kernel's virtual
489 * address space
490 * @dma_obj: DMA GEM object
491 * @map: Returns the kernel virtual address of the DMA GEM object's backing
492 * store.
493 *
494 * This function maps a buffer into the kernel's virtual address space.
495 * Since the DMA buffers are already mapped into the kernel virtual address
496 * space this simply returns the cached virtual address.
497 *
498 * Returns:
499 * 0 on success, or a negative error code otherwise.
500 */
501int drm_gem_dma_vmap(struct drm_gem_dma_object *dma_obj,
502 struct iosys_map *map)
503{
504 iosys_map_set_vaddr(map, vaddr: dma_obj->vaddr);
505
506 return 0;
507}
508EXPORT_SYMBOL_GPL(drm_gem_dma_vmap);
509
510/**
511 * drm_gem_dma_mmap - memory-map an exported DMA GEM object
512 * @dma_obj: DMA GEM object
513 * @vma: VMA for the area to be mapped
514 *
515 * This function maps a buffer into a userspace process's address space.
516 * In addition to the usual GEM VMA setup it immediately faults in the entire
517 * object instead of using on-demand faulting.
518 *
519 * Returns:
520 * 0 on success or a negative error code on failure.
521 */
522int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *vma)
523{
524 struct drm_gem_object *obj = &dma_obj->base;
525 int ret;
526
527 /*
528 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
529 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
530 * the whole buffer.
531 */
532 vma->vm_pgoff -= drm_vma_node_start(node: &obj->vma_node);
533 vm_flags_mod(vma, VM_DONTEXPAND, VM_PFNMAP);
534
535 if (dma_obj->map_noncoherent) {
536 vma->vm_page_prot = vm_get_page_prot(vm_flags: vma->vm_flags);
537
538 ret = dma_mmap_pages(dev: dma_obj->base.dev->dev,
539 vma, size: vma->vm_end - vma->vm_start,
540 virt_to_page(dma_obj->vaddr));
541 } else {
542 ret = dma_mmap_wc(dev: dma_obj->base.dev->dev, vma, cpu_addr: dma_obj->vaddr,
543 dma_addr: dma_obj->dma_addr,
544 size: vma->vm_end - vma->vm_start);
545 }
546 if (ret)
547 drm_gem_vm_close(vma);
548
549 return ret;
550}
551EXPORT_SYMBOL_GPL(drm_gem_dma_mmap);
552
553/**
554 * drm_gem_dma_prime_import_sg_table_vmap - PRIME import another driver's
555 * scatter/gather table and get the virtual address of the buffer
556 * @dev: DRM device
557 * @attach: DMA-BUF attachment
558 * @sgt: Scatter/gather table of pinned pages
559 *
560 * This function imports a scatter/gather table using
561 * drm_gem_dma_prime_import_sg_table() and uses dma_buf_vmap() to get the kernel
562 * virtual address. This ensures that a DMA GEM object always has its virtual
563 * address set. This address is released when the object is freed.
564 *
565 * This function can be used as the &drm_driver.gem_prime_import_sg_table
566 * callback. The &DRM_GEM_DMA_DRIVER_OPS_VMAP macro provides a shortcut to set
567 * the necessary DRM driver operations.
568 *
569 * Returns:
570 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
571 * error code on failure.
572 */
573struct drm_gem_object *
574drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev,
575 struct dma_buf_attachment *attach,
576 struct sg_table *sgt)
577{
578 struct drm_gem_dma_object *dma_obj;
579 struct drm_gem_object *obj;
580 struct iosys_map map;
581 int ret;
582
583 ret = dma_buf_vmap_unlocked(dmabuf: attach->dmabuf, map: &map);
584 if (ret) {
585 DRM_ERROR("Failed to vmap PRIME buffer\n");
586 return ERR_PTR(error: ret);
587 }
588
589 obj = drm_gem_dma_prime_import_sg_table(dev, attach, sgt);
590 if (IS_ERR(ptr: obj)) {
591 dma_buf_vunmap_unlocked(dmabuf: attach->dmabuf, map: &map);
592 return obj;
593 }
594
595 dma_obj = to_drm_gem_dma_obj(obj);
596 dma_obj->vaddr = map.vaddr;
597
598 return obj;
599}
600EXPORT_SYMBOL(drm_gem_dma_prime_import_sg_table_vmap);
601
602MODULE_DESCRIPTION("DRM DMA memory-management helpers");
603MODULE_IMPORT_NS(DMA_BUF);
604MODULE_LICENSE("GPL");
605

source code of linux/drivers/gpu/drm/drm_gem_dma_helper.c