1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright 2018 Noralf Trønnes |
4 | */ |
5 | |
6 | #include <linux/dma-buf.h> |
7 | #include <linux/export.h> |
8 | #include <linux/module.h> |
9 | #include <linux/mutex.h> |
10 | #include <linux/shmem_fs.h> |
11 | #include <linux/slab.h> |
12 | #include <linux/vmalloc.h> |
13 | #include <linux/module.h> |
14 | |
15 | #ifdef CONFIG_X86 |
16 | #include <asm/set_memory.h> |
17 | #endif |
18 | |
19 | #include <drm/drm.h> |
20 | #include <drm/drm_device.h> |
21 | #include <drm/drm_drv.h> |
22 | #include <drm/drm_gem_shmem_helper.h> |
23 | #include <drm/drm_prime.h> |
24 | #include <drm/drm_print.h> |
25 | |
26 | MODULE_IMPORT_NS(DMA_BUF); |
27 | |
28 | /** |
29 | * DOC: overview |
30 | * |
31 | * This library provides helpers for GEM objects backed by shmem buffers |
32 | * allocated using anonymous pageable memory. |
33 | * |
34 | * Functions that operate on the GEM object receive struct &drm_gem_shmem_object. |
35 | * For GEM callback helpers in struct &drm_gem_object functions, see likewise |
36 | * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps |
37 | * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion. |
38 | */ |
39 | |
40 | static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { |
41 | .free = drm_gem_shmem_object_free, |
42 | .print_info = drm_gem_shmem_object_print_info, |
43 | .pin = drm_gem_shmem_object_pin, |
44 | .unpin = drm_gem_shmem_object_unpin, |
45 | .get_sg_table = drm_gem_shmem_object_get_sg_table, |
46 | .vmap = drm_gem_shmem_object_vmap, |
47 | .vunmap = drm_gem_shmem_object_vunmap, |
48 | .mmap = drm_gem_shmem_object_mmap, |
49 | .vm_ops = &drm_gem_shmem_vm_ops, |
50 | }; |
51 | |
52 | static struct drm_gem_shmem_object * |
53 | __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) |
54 | { |
55 | struct drm_gem_shmem_object *shmem; |
56 | struct drm_gem_object *obj; |
57 | int ret = 0; |
58 | |
59 | size = PAGE_ALIGN(size); |
60 | |
61 | if (dev->driver->gem_create_object) { |
62 | obj = dev->driver->gem_create_object(dev, size); |
63 | if (IS_ERR(ptr: obj)) |
64 | return ERR_CAST(ptr: obj); |
65 | shmem = to_drm_gem_shmem_obj(obj); |
66 | } else { |
67 | shmem = kzalloc(size: sizeof(*shmem), GFP_KERNEL); |
68 | if (!shmem) |
69 | return ERR_PTR(error: -ENOMEM); |
70 | obj = &shmem->base; |
71 | } |
72 | |
73 | if (!obj->funcs) |
74 | obj->funcs = &drm_gem_shmem_funcs; |
75 | |
76 | if (private) { |
77 | drm_gem_private_object_init(dev, obj, size); |
78 | shmem->map_wc = false; /* dma-buf mappings use always writecombine */ |
79 | } else { |
80 | ret = drm_gem_object_init(dev, obj, size); |
81 | } |
82 | if (ret) { |
83 | drm_gem_private_object_fini(obj); |
84 | goto err_free; |
85 | } |
86 | |
87 | ret = drm_gem_create_mmap_offset(obj); |
88 | if (ret) |
89 | goto err_release; |
90 | |
91 | INIT_LIST_HEAD(list: &shmem->madv_list); |
92 | |
93 | if (!private) { |
94 | /* |
95 | * Our buffers are kept pinned, so allocating them |
96 | * from the MOVABLE zone is a really bad idea, and |
97 | * conflicts with CMA. See comments above new_inode() |
98 | * why this is required _and_ expected if you're |
99 | * going to pin these pages. |
100 | */ |
101 | mapping_set_gfp_mask(m: obj->filp->f_mapping, GFP_HIGHUSER | |
102 | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); |
103 | } |
104 | |
105 | return shmem; |
106 | |
107 | err_release: |
108 | drm_gem_object_release(obj); |
109 | err_free: |
110 | kfree(objp: obj); |
111 | |
112 | return ERR_PTR(error: ret); |
113 | } |
114 | /** |
115 | * drm_gem_shmem_create - Allocate an object with the given size |
116 | * @dev: DRM device |
117 | * @size: Size of the object to allocate |
118 | * |
119 | * This function creates a shmem GEM object. |
120 | * |
121 | * Returns: |
122 | * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative |
123 | * error code on failure. |
124 | */ |
125 | struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size) |
126 | { |
127 | return __drm_gem_shmem_create(dev, size, private: false); |
128 | } |
129 | EXPORT_SYMBOL_GPL(drm_gem_shmem_create); |
130 | |
131 | /** |
132 | * drm_gem_shmem_free - Free resources associated with a shmem GEM object |
133 | * @shmem: shmem GEM object to free |
134 | * |
135 | * This function cleans up the GEM object state and frees the memory used to |
136 | * store the object itself. |
137 | */ |
138 | void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) |
139 | { |
140 | struct drm_gem_object *obj = &shmem->base; |
141 | |
142 | if (obj->import_attach) { |
143 | drm_prime_gem_destroy(obj, sg: shmem->sgt); |
144 | } else { |
145 | dma_resv_lock(obj: shmem->base.resv, NULL); |
146 | |
147 | drm_WARN_ON(obj->dev, shmem->vmap_use_count); |
148 | |
149 | if (shmem->sgt) { |
150 | dma_unmap_sgtable(dev: obj->dev->dev, sgt: shmem->sgt, |
151 | dir: DMA_BIDIRECTIONAL, attrs: 0); |
152 | sg_free_table(shmem->sgt); |
153 | kfree(objp: shmem->sgt); |
154 | } |
155 | if (shmem->pages) |
156 | drm_gem_shmem_put_pages(shmem); |
157 | |
158 | drm_WARN_ON(obj->dev, shmem->pages_use_count); |
159 | |
160 | dma_resv_unlock(obj: shmem->base.resv); |
161 | } |
162 | |
163 | drm_gem_object_release(obj); |
164 | kfree(objp: shmem); |
165 | } |
166 | EXPORT_SYMBOL_GPL(drm_gem_shmem_free); |
167 | |
168 | static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) |
169 | { |
170 | struct drm_gem_object *obj = &shmem->base; |
171 | struct page **pages; |
172 | |
173 | dma_resv_assert_held(shmem->base.resv); |
174 | |
175 | if (shmem->pages_use_count++ > 0) |
176 | return 0; |
177 | |
178 | pages = drm_gem_get_pages(obj); |
179 | if (IS_ERR(ptr: pages)) { |
180 | drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n" , |
181 | PTR_ERR(pages)); |
182 | shmem->pages_use_count = 0; |
183 | return PTR_ERR(ptr: pages); |
184 | } |
185 | |
186 | /* |
187 | * TODO: Allocating WC pages which are correctly flushed is only |
188 | * supported on x86. Ideal solution would be a GFP_WC flag, which also |
189 | * ttm_pool.c could use. |
190 | */ |
191 | #ifdef CONFIG_X86 |
192 | if (shmem->map_wc) |
193 | set_pages_array_wc(pages, addrinarray: obj->size >> PAGE_SHIFT); |
194 | #endif |
195 | |
196 | shmem->pages = pages; |
197 | |
198 | return 0; |
199 | } |
200 | |
201 | /* |
202 | * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object |
203 | * @shmem: shmem GEM object |
204 | * |
205 | * This function decreases the use count and puts the backing pages when use drops to zero. |
206 | */ |
207 | void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) |
208 | { |
209 | struct drm_gem_object *obj = &shmem->base; |
210 | |
211 | dma_resv_assert_held(shmem->base.resv); |
212 | |
213 | if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) |
214 | return; |
215 | |
216 | if (--shmem->pages_use_count > 0) |
217 | return; |
218 | |
219 | #ifdef CONFIG_X86 |
220 | if (shmem->map_wc) |
221 | set_pages_array_wb(pages: shmem->pages, addrinarray: obj->size >> PAGE_SHIFT); |
222 | #endif |
223 | |
224 | drm_gem_put_pages(obj, pages: shmem->pages, |
225 | dirty: shmem->pages_mark_dirty_on_put, |
226 | accessed: shmem->pages_mark_accessed_on_put); |
227 | shmem->pages = NULL; |
228 | } |
229 | EXPORT_SYMBOL(drm_gem_shmem_put_pages); |
230 | |
231 | static int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem) |
232 | { |
233 | int ret; |
234 | |
235 | dma_resv_assert_held(shmem->base.resv); |
236 | |
237 | ret = drm_gem_shmem_get_pages(shmem); |
238 | |
239 | return ret; |
240 | } |
241 | |
242 | static void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem) |
243 | { |
244 | dma_resv_assert_held(shmem->base.resv); |
245 | |
246 | drm_gem_shmem_put_pages(shmem); |
247 | } |
248 | |
249 | /** |
250 | * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object |
251 | * @shmem: shmem GEM object |
252 | * |
253 | * This function makes sure the backing pages are pinned in memory while the |
254 | * buffer is exported. |
255 | * |
256 | * Returns: |
257 | * 0 on success or a negative error code on failure. |
258 | */ |
259 | int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) |
260 | { |
261 | struct drm_gem_object *obj = &shmem->base; |
262 | int ret; |
263 | |
264 | drm_WARN_ON(obj->dev, obj->import_attach); |
265 | |
266 | ret = dma_resv_lock_interruptible(obj: shmem->base.resv, NULL); |
267 | if (ret) |
268 | return ret; |
269 | ret = drm_gem_shmem_pin_locked(shmem); |
270 | dma_resv_unlock(obj: shmem->base.resv); |
271 | |
272 | return ret; |
273 | } |
274 | EXPORT_SYMBOL(drm_gem_shmem_pin); |
275 | |
276 | /** |
277 | * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object |
278 | * @shmem: shmem GEM object |
279 | * |
280 | * This function removes the requirement that the backing pages are pinned in |
281 | * memory. |
282 | */ |
283 | void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) |
284 | { |
285 | struct drm_gem_object *obj = &shmem->base; |
286 | |
287 | drm_WARN_ON(obj->dev, obj->import_attach); |
288 | |
289 | dma_resv_lock(obj: shmem->base.resv, NULL); |
290 | drm_gem_shmem_unpin_locked(shmem); |
291 | dma_resv_unlock(obj: shmem->base.resv); |
292 | } |
293 | EXPORT_SYMBOL(drm_gem_shmem_unpin); |
294 | |
295 | /* |
296 | * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object |
297 | * @shmem: shmem GEM object |
298 | * @map: Returns the kernel virtual address of the SHMEM GEM object's backing |
299 | * store. |
300 | * |
301 | * This function makes sure that a contiguous kernel virtual address mapping |
302 | * exists for the buffer backing the shmem GEM object. It hides the differences |
303 | * between dma-buf imported and natively allocated objects. |
304 | * |
305 | * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(). |
306 | * |
307 | * Returns: |
308 | * 0 on success or a negative error code on failure. |
309 | */ |
310 | int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, |
311 | struct iosys_map *map) |
312 | { |
313 | struct drm_gem_object *obj = &shmem->base; |
314 | int ret = 0; |
315 | |
316 | if (obj->import_attach) { |
317 | ret = dma_buf_vmap(dmabuf: obj->import_attach->dmabuf, map); |
318 | if (!ret) { |
319 | if (drm_WARN_ON(obj->dev, map->is_iomem)) { |
320 | dma_buf_vunmap(dmabuf: obj->import_attach->dmabuf, map); |
321 | return -EIO; |
322 | } |
323 | } |
324 | } else { |
325 | pgprot_t prot = PAGE_KERNEL; |
326 | |
327 | dma_resv_assert_held(shmem->base.resv); |
328 | |
329 | if (shmem->vmap_use_count++ > 0) { |
330 | iosys_map_set_vaddr(map, vaddr: shmem->vaddr); |
331 | return 0; |
332 | } |
333 | |
334 | ret = drm_gem_shmem_get_pages(shmem); |
335 | if (ret) |
336 | goto err_zero_use; |
337 | |
338 | if (shmem->map_wc) |
339 | prot = pgprot_writecombine(prot); |
340 | shmem->vaddr = vmap(pages: shmem->pages, count: obj->size >> PAGE_SHIFT, |
341 | VM_MAP, prot); |
342 | if (!shmem->vaddr) |
343 | ret = -ENOMEM; |
344 | else |
345 | iosys_map_set_vaddr(map, vaddr: shmem->vaddr); |
346 | } |
347 | |
348 | if (ret) { |
349 | drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n" , ret); |
350 | goto err_put_pages; |
351 | } |
352 | |
353 | return 0; |
354 | |
355 | err_put_pages: |
356 | if (!obj->import_attach) |
357 | drm_gem_shmem_put_pages(shmem); |
358 | err_zero_use: |
359 | shmem->vmap_use_count = 0; |
360 | |
361 | return ret; |
362 | } |
363 | EXPORT_SYMBOL(drm_gem_shmem_vmap); |
364 | |
365 | /* |
366 | * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object |
367 | * @shmem: shmem GEM object |
368 | * @map: Kernel virtual address where the SHMEM GEM object was mapped |
369 | * |
370 | * This function cleans up a kernel virtual address mapping acquired by |
371 | * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to |
372 | * zero. |
373 | * |
374 | * This function hides the differences between dma-buf imported and natively |
375 | * allocated objects. |
376 | */ |
377 | void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, |
378 | struct iosys_map *map) |
379 | { |
380 | struct drm_gem_object *obj = &shmem->base; |
381 | |
382 | if (obj->import_attach) { |
383 | dma_buf_vunmap(dmabuf: obj->import_attach->dmabuf, map); |
384 | } else { |
385 | dma_resv_assert_held(shmem->base.resv); |
386 | |
387 | if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count)) |
388 | return; |
389 | |
390 | if (--shmem->vmap_use_count > 0) |
391 | return; |
392 | |
393 | vunmap(addr: shmem->vaddr); |
394 | drm_gem_shmem_put_pages(shmem); |
395 | } |
396 | |
397 | shmem->vaddr = NULL; |
398 | } |
399 | EXPORT_SYMBOL(drm_gem_shmem_vunmap); |
400 | |
401 | static int |
402 | drm_gem_shmem_create_with_handle(struct drm_file *file_priv, |
403 | struct drm_device *dev, size_t size, |
404 | uint32_t *handle) |
405 | { |
406 | struct drm_gem_shmem_object *shmem; |
407 | int ret; |
408 | |
409 | shmem = drm_gem_shmem_create(dev, size); |
410 | if (IS_ERR(ptr: shmem)) |
411 | return PTR_ERR(ptr: shmem); |
412 | |
413 | /* |
414 | * Allocate an id of idr table where the obj is registered |
415 | * and handle has the id what user can see. |
416 | */ |
417 | ret = drm_gem_handle_create(file_priv, obj: &shmem->base, handlep: handle); |
418 | /* drop reference from allocate - handle holds it now. */ |
419 | drm_gem_object_put(obj: &shmem->base); |
420 | |
421 | return ret; |
422 | } |
423 | |
424 | /* Update madvise status, returns true if not purged, else |
425 | * false or -errno. |
426 | */ |
427 | int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv) |
428 | { |
429 | dma_resv_assert_held(shmem->base.resv); |
430 | |
431 | if (shmem->madv >= 0) |
432 | shmem->madv = madv; |
433 | |
434 | madv = shmem->madv; |
435 | |
436 | return (madv >= 0); |
437 | } |
438 | EXPORT_SYMBOL(drm_gem_shmem_madvise); |
439 | |
440 | void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) |
441 | { |
442 | struct drm_gem_object *obj = &shmem->base; |
443 | struct drm_device *dev = obj->dev; |
444 | |
445 | dma_resv_assert_held(shmem->base.resv); |
446 | |
447 | drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem)); |
448 | |
449 | dma_unmap_sgtable(dev: dev->dev, sgt: shmem->sgt, dir: DMA_BIDIRECTIONAL, attrs: 0); |
450 | sg_free_table(shmem->sgt); |
451 | kfree(objp: shmem->sgt); |
452 | shmem->sgt = NULL; |
453 | |
454 | drm_gem_shmem_put_pages(shmem); |
455 | |
456 | shmem->madv = -1; |
457 | |
458 | drm_vma_node_unmap(node: &obj->vma_node, file_mapping: dev->anon_inode->i_mapping); |
459 | drm_gem_free_mmap_offset(obj); |
460 | |
461 | /* Our goal here is to return as much of the memory as |
462 | * is possible back to the system as we are called from OOM. |
463 | * To do this we must instruct the shmfs to drop all of its |
464 | * backing pages, *now*. |
465 | */ |
466 | shmem_truncate_range(inode: file_inode(f: obj->filp), start: 0, end: (loff_t)-1); |
467 | |
468 | invalidate_mapping_pages(mapping: file_inode(f: obj->filp)->i_mapping, start: 0, end: (loff_t)-1); |
469 | } |
470 | EXPORT_SYMBOL(drm_gem_shmem_purge); |
471 | |
472 | /** |
473 | * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object |
474 | * @file: DRM file structure to create the dumb buffer for |
475 | * @dev: DRM device |
476 | * @args: IOCTL data |
477 | * |
478 | * This function computes the pitch of the dumb buffer and rounds it up to an |
479 | * integer number of bytes per pixel. Drivers for hardware that doesn't have |
480 | * any additional restrictions on the pitch can directly use this function as |
481 | * their &drm_driver.dumb_create callback. |
482 | * |
483 | * For hardware with additional restrictions, drivers can adjust the fields |
484 | * set up by userspace before calling into this function. |
485 | * |
486 | * Returns: |
487 | * 0 on success or a negative error code on failure. |
488 | */ |
489 | int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, |
490 | struct drm_mode_create_dumb *args) |
491 | { |
492 | u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); |
493 | |
494 | if (!args->pitch || !args->size) { |
495 | args->pitch = min_pitch; |
496 | args->size = PAGE_ALIGN(args->pitch * args->height); |
497 | } else { |
498 | /* ensure sane minimum values */ |
499 | if (args->pitch < min_pitch) |
500 | args->pitch = min_pitch; |
501 | if (args->size < args->pitch * args->height) |
502 | args->size = PAGE_ALIGN(args->pitch * args->height); |
503 | } |
504 | |
505 | return drm_gem_shmem_create_with_handle(file_priv: file, dev, size: args->size, handle: &args->handle); |
506 | } |
507 | EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create); |
508 | |
509 | static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) |
510 | { |
511 | struct vm_area_struct *vma = vmf->vma; |
512 | struct drm_gem_object *obj = vma->vm_private_data; |
513 | struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); |
514 | loff_t num_pages = obj->size >> PAGE_SHIFT; |
515 | vm_fault_t ret; |
516 | struct page *page; |
517 | pgoff_t page_offset; |
518 | |
519 | /* We don't use vmf->pgoff since that has the fake offset */ |
520 | page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; |
521 | |
522 | dma_resv_lock(obj: shmem->base.resv, NULL); |
523 | |
524 | if (page_offset >= num_pages || |
525 | drm_WARN_ON_ONCE(obj->dev, !shmem->pages) || |
526 | shmem->madv < 0) { |
527 | ret = VM_FAULT_SIGBUS; |
528 | } else { |
529 | page = shmem->pages[page_offset]; |
530 | |
531 | ret = vmf_insert_pfn(vma, addr: vmf->address, page_to_pfn(page)); |
532 | } |
533 | |
534 | dma_resv_unlock(obj: shmem->base.resv); |
535 | |
536 | return ret; |
537 | } |
538 | |
539 | static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) |
540 | { |
541 | struct drm_gem_object *obj = vma->vm_private_data; |
542 | struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); |
543 | |
544 | drm_WARN_ON(obj->dev, obj->import_attach); |
545 | |
546 | dma_resv_lock(obj: shmem->base.resv, NULL); |
547 | |
548 | /* |
549 | * We should have already pinned the pages when the buffer was first |
550 | * mmap'd, vm_open() just grabs an additional reference for the new |
551 | * mm the vma is getting copied into (ie. on fork()). |
552 | */ |
553 | if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) |
554 | shmem->pages_use_count++; |
555 | |
556 | dma_resv_unlock(obj: shmem->base.resv); |
557 | |
558 | drm_gem_vm_open(vma); |
559 | } |
560 | |
561 | static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) |
562 | { |
563 | struct drm_gem_object *obj = vma->vm_private_data; |
564 | struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); |
565 | |
566 | dma_resv_lock(obj: shmem->base.resv, NULL); |
567 | drm_gem_shmem_put_pages(shmem); |
568 | dma_resv_unlock(obj: shmem->base.resv); |
569 | |
570 | drm_gem_vm_close(vma); |
571 | } |
572 | |
573 | const struct vm_operations_struct drm_gem_shmem_vm_ops = { |
574 | .fault = drm_gem_shmem_fault, |
575 | .open = drm_gem_shmem_vm_open, |
576 | .close = drm_gem_shmem_vm_close, |
577 | }; |
578 | EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops); |
579 | |
580 | /** |
581 | * drm_gem_shmem_mmap - Memory-map a shmem GEM object |
582 | * @shmem: shmem GEM object |
583 | * @vma: VMA for the area to be mapped |
584 | * |
585 | * This function implements an augmented version of the GEM DRM file mmap |
586 | * operation for shmem objects. |
587 | * |
588 | * Returns: |
589 | * 0 on success or a negative error code on failure. |
590 | */ |
591 | int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma) |
592 | { |
593 | struct drm_gem_object *obj = &shmem->base; |
594 | int ret; |
595 | |
596 | if (obj->import_attach) { |
597 | /* Reset both vm_ops and vm_private_data, so we don't end up with |
598 | * vm_ops pointing to our implementation if the dma-buf backend |
599 | * doesn't set those fields. |
600 | */ |
601 | vma->vm_private_data = NULL; |
602 | vma->vm_ops = NULL; |
603 | |
604 | ret = dma_buf_mmap(obj->dma_buf, vma, 0); |
605 | |
606 | /* Drop the reference drm_gem_mmap_obj() acquired.*/ |
607 | if (!ret) |
608 | drm_gem_object_put(obj); |
609 | |
610 | return ret; |
611 | } |
612 | |
613 | dma_resv_lock(obj: shmem->base.resv, NULL); |
614 | ret = drm_gem_shmem_get_pages(shmem); |
615 | dma_resv_unlock(obj: shmem->base.resv); |
616 | |
617 | if (ret) |
618 | return ret; |
619 | |
620 | vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); |
621 | vma->vm_page_prot = vm_get_page_prot(vm_flags: vma->vm_flags); |
622 | if (shmem->map_wc) |
623 | vma->vm_page_prot = pgprot_writecombine(prot: vma->vm_page_prot); |
624 | |
625 | return 0; |
626 | } |
627 | EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap); |
628 | |
629 | /** |
630 | * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs |
631 | * @shmem: shmem GEM object |
632 | * @p: DRM printer |
633 | * @indent: Tab indentation level |
634 | */ |
635 | void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem, |
636 | struct drm_printer *p, unsigned int indent) |
637 | { |
638 | if (shmem->base.import_attach) |
639 | return; |
640 | |
641 | drm_printf_indent(p, indent, "pages_use_count=%u\n" , shmem->pages_use_count); |
642 | drm_printf_indent(p, indent, "vmap_use_count=%u\n" , shmem->vmap_use_count); |
643 | drm_printf_indent(p, indent, "vaddr=%p\n" , shmem->vaddr); |
644 | } |
645 | EXPORT_SYMBOL(drm_gem_shmem_print_info); |
646 | |
647 | /** |
648 | * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned |
649 | * pages for a shmem GEM object |
650 | * @shmem: shmem GEM object |
651 | * |
652 | * This function exports a scatter/gather table suitable for PRIME usage by |
653 | * calling the standard DMA mapping API. |
654 | * |
655 | * Drivers who need to acquire an scatter/gather table for objects need to call |
656 | * drm_gem_shmem_get_pages_sgt() instead. |
657 | * |
658 | * Returns: |
659 | * A pointer to the scatter/gather table of pinned pages or error pointer on failure. |
660 | */ |
661 | struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem) |
662 | { |
663 | struct drm_gem_object *obj = &shmem->base; |
664 | |
665 | drm_WARN_ON(obj->dev, obj->import_attach); |
666 | |
667 | return drm_prime_pages_to_sg(dev: obj->dev, pages: shmem->pages, nr_pages: obj->size >> PAGE_SHIFT); |
668 | } |
669 | EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); |
670 | |
671 | static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem) |
672 | { |
673 | struct drm_gem_object *obj = &shmem->base; |
674 | int ret; |
675 | struct sg_table *sgt; |
676 | |
677 | if (shmem->sgt) |
678 | return shmem->sgt; |
679 | |
680 | drm_WARN_ON(obj->dev, obj->import_attach); |
681 | |
682 | ret = drm_gem_shmem_get_pages(shmem); |
683 | if (ret) |
684 | return ERR_PTR(error: ret); |
685 | |
686 | sgt = drm_gem_shmem_get_sg_table(shmem); |
687 | if (IS_ERR(ptr: sgt)) { |
688 | ret = PTR_ERR(ptr: sgt); |
689 | goto err_put_pages; |
690 | } |
691 | /* Map the pages for use by the h/w. */ |
692 | ret = dma_map_sgtable(dev: obj->dev->dev, sgt, dir: DMA_BIDIRECTIONAL, attrs: 0); |
693 | if (ret) |
694 | goto err_free_sgt; |
695 | |
696 | shmem->sgt = sgt; |
697 | |
698 | return sgt; |
699 | |
700 | err_free_sgt: |
701 | sg_free_table(sgt); |
702 | kfree(objp: sgt); |
703 | err_put_pages: |
704 | drm_gem_shmem_put_pages(shmem); |
705 | return ERR_PTR(error: ret); |
706 | } |
707 | |
708 | /** |
709 | * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a |
710 | * scatter/gather table for a shmem GEM object. |
711 | * @shmem: shmem GEM object |
712 | * |
713 | * This function returns a scatter/gather table suitable for driver usage. If |
714 | * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg |
715 | * table created. |
716 | * |
717 | * This is the main function for drivers to get at backing storage, and it hides |
718 | * and difference between dma-buf imported and natively allocated objects. |
719 | * drm_gem_shmem_get_sg_table() should not be directly called by drivers. |
720 | * |
721 | * Returns: |
722 | * A pointer to the scatter/gather table of pinned pages or errno on failure. |
723 | */ |
724 | struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem) |
725 | { |
726 | int ret; |
727 | struct sg_table *sgt; |
728 | |
729 | ret = dma_resv_lock_interruptible(obj: shmem->base.resv, NULL); |
730 | if (ret) |
731 | return ERR_PTR(error: ret); |
732 | sgt = drm_gem_shmem_get_pages_sgt_locked(shmem); |
733 | dma_resv_unlock(obj: shmem->base.resv); |
734 | |
735 | return sgt; |
736 | } |
737 | EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt); |
738 | |
739 | /** |
740 | * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from |
741 | * another driver's scatter/gather table of pinned pages |
742 | * @dev: Device to import into |
743 | * @attach: DMA-BUF attachment |
744 | * @sgt: Scatter/gather table of pinned pages |
745 | * |
746 | * This function imports a scatter/gather table exported via DMA-BUF by |
747 | * another driver. Drivers that use the shmem helpers should set this as their |
748 | * &drm_driver.gem_prime_import_sg_table callback. |
749 | * |
750 | * Returns: |
751 | * A pointer to a newly created GEM object or an ERR_PTR-encoded negative |
752 | * error code on failure. |
753 | */ |
754 | struct drm_gem_object * |
755 | drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, |
756 | struct dma_buf_attachment *attach, |
757 | struct sg_table *sgt) |
758 | { |
759 | size_t size = PAGE_ALIGN(attach->dmabuf->size); |
760 | struct drm_gem_shmem_object *shmem; |
761 | |
762 | shmem = __drm_gem_shmem_create(dev, size, private: true); |
763 | if (IS_ERR(ptr: shmem)) |
764 | return ERR_CAST(ptr: shmem); |
765 | |
766 | shmem->sgt = sgt; |
767 | |
768 | drm_dbg_prime(dev, "size = %zu\n" , size); |
769 | |
770 | return &shmem->base; |
771 | } |
772 | EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table); |
773 | |
774 | MODULE_DESCRIPTION("DRM SHMEM memory-management helpers" ); |
775 | MODULE_IMPORT_NS(DMA_BUF); |
776 | MODULE_LICENSE("GPL v2" ); |
777 | |