1 | /* |
---|---|
2 | * Copyright © 2008 Intel Corporation |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. |
22 | * |
23 | * Authors: |
24 | * Eric Anholt <eric@anholt.net> |
25 | * |
26 | */ |
27 | |
28 | #include <linux/dma-buf.h> |
29 | #include <linux/file.h> |
30 | #include <linux/fs.h> |
31 | #include <linux/iosys-map.h> |
32 | #include <linux/mem_encrypt.h> |
33 | #include <linux/mm.h> |
34 | #include <linux/mman.h> |
35 | #include <linux/module.h> |
36 | #include <linux/pagemap.h> |
37 | #include <linux/pagevec.h> |
38 | #include <linux/shmem_fs.h> |
39 | #include <linux/slab.h> |
40 | #include <linux/string_helpers.h> |
41 | #include <linux/types.h> |
42 | #include <linux/uaccess.h> |
43 | |
44 | #include <drm/drm.h> |
45 | #include <drm/drm_device.h> |
46 | #include <drm/drm_drv.h> |
47 | #include <drm/drm_file.h> |
48 | #include <drm/drm_gem.h> |
49 | #include <drm/drm_managed.h> |
50 | #include <drm/drm_print.h> |
51 | #include <drm/drm_vma_manager.h> |
52 | |
53 | #include "drm_internal.h" |
54 | |
55 | /** @file drm_gem.c |
56 | * |
57 | * This file provides some of the base ioctls and library routines for |
58 | * the graphics memory manager implemented by each device driver. |
59 | * |
60 | * Because various devices have different requirements in terms of |
61 | * synchronization and migration strategies, implementing that is left up to |
62 | * the driver, and all that the general API provides should be generic -- |
63 | * allocating objects, reading/writing data with the cpu, freeing objects. |
64 | * Even there, platform-dependent optimizations for reading/writing data with |
65 | * the CPU mean we'll likely hook those out to driver-specific calls. However, |
66 | * the DRI2 implementation wants to have at least allocate/mmap be generic. |
67 | * |
68 | * The goal was to have swap-backed object allocation managed through |
69 | * struct file. However, file descriptors as handles to a struct file have |
70 | * two major failings: |
71 | * - Process limits prevent more than 1024 or so being used at a time by |
72 | * default. |
73 | * - Inability to allocate high fds will aggravate the X Server's select() |
74 | * handling, and likely that of many GL client applications as well. |
75 | * |
76 | * This led to a plan of using our own integer IDs (called handles, following |
77 | * DRM terminology) to mimic fds, and implement the fd syscalls we need as |
78 | * ioctls. The objects themselves will still include the struct file so |
79 | * that we can transition to fds if the required kernel infrastructure shows |
80 | * up at a later date, and as our interface with shmfs for memory allocation. |
81 | */ |
82 | |
83 | static void |
84 | drm_gem_init_release(struct drm_device *dev, void *ptr) |
85 | { |
86 | drm_vma_offset_manager_destroy(mgr: dev->vma_offset_manager); |
87 | } |
88 | |
89 | /** |
90 | * drm_gem_init - Initialize the GEM device fields |
91 | * @dev: drm_devic structure to initialize |
92 | */ |
93 | int |
94 | drm_gem_init(struct drm_device *dev) |
95 | { |
96 | struct drm_vma_offset_manager *vma_offset_manager; |
97 | |
98 | mutex_init(&dev->object_name_lock); |
99 | idr_init_base(idr: &dev->object_name_idr, base: 1); |
100 | |
101 | vma_offset_manager = drmm_kzalloc(dev, size: sizeof(*vma_offset_manager), |
102 | GFP_KERNEL); |
103 | if (!vma_offset_manager) { |
104 | DRM_ERROR("out of memory\n"); |
105 | return -ENOMEM; |
106 | } |
107 | |
108 | dev->vma_offset_manager = vma_offset_manager; |
109 | drm_vma_offset_manager_init(mgr: vma_offset_manager, |
110 | DRM_FILE_PAGE_OFFSET_START, |
111 | DRM_FILE_PAGE_OFFSET_SIZE); |
112 | |
113 | return drmm_add_action(dev, drm_gem_init_release, NULL); |
114 | } |
115 | |
116 | /** |
117 | * drm_gem_object_init_with_mnt - initialize an allocated shmem-backed GEM |
118 | * object in a given shmfs mountpoint |
119 | * |
120 | * @dev: drm_device the object should be initialized for |
121 | * @obj: drm_gem_object to initialize |
122 | * @size: object size |
123 | * @gemfs: tmpfs mount where the GEM object will be created. If NULL, use |
124 | * the usual tmpfs mountpoint (`shm_mnt`). |
125 | * |
126 | * Initialize an already allocated GEM object of the specified size with |
127 | * shmfs backing store. |
128 | */ |
129 | int drm_gem_object_init_with_mnt(struct drm_device *dev, |
130 | struct drm_gem_object *obj, size_t size, |
131 | struct vfsmount *gemfs) |
132 | { |
133 | struct file *filp; |
134 | |
135 | drm_gem_private_object_init(dev, obj, size); |
136 | |
137 | if (gemfs) |
138 | filp = shmem_file_setup_with_mnt(mnt: gemfs, name: "drm mm object", size, |
139 | VM_NORESERVE); |
140 | else |
141 | filp = shmem_file_setup(name: "drm mm object", size, VM_NORESERVE); |
142 | |
143 | if (IS_ERR(ptr: filp)) |
144 | return PTR_ERR(ptr: filp); |
145 | |
146 | obj->filp = filp; |
147 | |
148 | return 0; |
149 | } |
150 | EXPORT_SYMBOL(drm_gem_object_init_with_mnt); |
151 | |
152 | /** |
153 | * drm_gem_object_init - initialize an allocated shmem-backed GEM object |
154 | * @dev: drm_device the object should be initialized for |
155 | * @obj: drm_gem_object to initialize |
156 | * @size: object size |
157 | * |
158 | * Initialize an already allocated GEM object of the specified size with |
159 | * shmfs backing store. |
160 | */ |
161 | int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, |
162 | size_t size) |
163 | { |
164 | return drm_gem_object_init_with_mnt(dev, obj, size, NULL); |
165 | } |
166 | EXPORT_SYMBOL(drm_gem_object_init); |
167 | |
168 | /** |
169 | * drm_gem_private_object_init - initialize an allocated private GEM object |
170 | * @dev: drm_device the object should be initialized for |
171 | * @obj: drm_gem_object to initialize |
172 | * @size: object size |
173 | * |
174 | * Initialize an already allocated GEM object of the specified size with |
175 | * no GEM provided backing store. Instead the caller is responsible for |
176 | * backing the object and handling it. |
177 | */ |
178 | void drm_gem_private_object_init(struct drm_device *dev, |
179 | struct drm_gem_object *obj, size_t size) |
180 | { |
181 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
182 | |
183 | obj->dev = dev; |
184 | obj->filp = NULL; |
185 | |
186 | kref_init(kref: &obj->refcount); |
187 | obj->handle_count = 0; |
188 | obj->size = size; |
189 | dma_resv_init(obj: &obj->_resv); |
190 | if (!obj->resv) |
191 | obj->resv = &obj->_resv; |
192 | |
193 | if (drm_core_check_feature(dev, feature: DRIVER_GEM_GPUVA)) |
194 | drm_gem_gpuva_init(obj); |
195 | |
196 | drm_vma_node_reset(node: &obj->vma_node); |
197 | INIT_LIST_HEAD(list: &obj->lru_node); |
198 | } |
199 | EXPORT_SYMBOL(drm_gem_private_object_init); |
200 | |
201 | /** |
202 | * drm_gem_private_object_fini - Finalize a failed drm_gem_object |
203 | * @obj: drm_gem_object |
204 | * |
205 | * Uninitialize an already allocated GEM object when it initialized failed |
206 | */ |
207 | void drm_gem_private_object_fini(struct drm_gem_object *obj) |
208 | { |
209 | WARN_ON(obj->dma_buf); |
210 | |
211 | dma_resv_fini(obj: &obj->_resv); |
212 | } |
213 | EXPORT_SYMBOL(drm_gem_private_object_fini); |
214 | |
215 | /** |
216 | * drm_gem_object_handle_free - release resources bound to userspace handles |
217 | * @obj: GEM object to clean up. |
218 | * |
219 | * Called after the last handle to the object has been closed |
220 | * |
221 | * Removes any name for the object. Note that this must be |
222 | * called before drm_gem_object_free or we'll be touching |
223 | * freed memory |
224 | */ |
225 | static void drm_gem_object_handle_free(struct drm_gem_object *obj) |
226 | { |
227 | struct drm_device *dev = obj->dev; |
228 | |
229 | /* Remove any name for this object */ |
230 | if (obj->name) { |
231 | idr_remove(&dev->object_name_idr, id: obj->name); |
232 | obj->name = 0; |
233 | } |
234 | } |
235 | |
236 | static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) |
237 | { |
238 | /* Unbreak the reference cycle if we have an exported dma_buf. */ |
239 | if (obj->dma_buf) { |
240 | dma_buf_put(dmabuf: obj->dma_buf); |
241 | obj->dma_buf = NULL; |
242 | } |
243 | } |
244 | |
245 | static void |
246 | drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) |
247 | { |
248 | struct drm_device *dev = obj->dev; |
249 | bool final = false; |
250 | |
251 | if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) |
252 | return; |
253 | |
254 | /* |
255 | * Must bump handle count first as this may be the last |
256 | * ref, in which case the object would disappear before we |
257 | * checked for a name |
258 | */ |
259 | |
260 | mutex_lock(&dev->object_name_lock); |
261 | if (--obj->handle_count == 0) { |
262 | drm_gem_object_handle_free(obj); |
263 | drm_gem_object_exported_dma_buf_free(obj); |
264 | final = true; |
265 | } |
266 | mutex_unlock(lock: &dev->object_name_lock); |
267 | |
268 | if (final) |
269 | drm_gem_object_put(obj); |
270 | } |
271 | |
272 | /* |
273 | * Called at device or object close to release the file's |
274 | * handle references on objects. |
275 | */ |
276 | static int |
277 | drm_gem_object_release_handle(int id, void *ptr, void *data) |
278 | { |
279 | struct drm_file *file_priv = data; |
280 | struct drm_gem_object *obj = ptr; |
281 | |
282 | if (obj->funcs->close) |
283 | obj->funcs->close(obj, file_priv); |
284 | |
285 | drm_prime_remove_buf_handle(prime_fpriv: &file_priv->prime, handle: id); |
286 | drm_vma_node_revoke(node: &obj->vma_node, tag: file_priv); |
287 | |
288 | drm_gem_object_handle_put_unlocked(obj); |
289 | |
290 | return 0; |
291 | } |
292 | |
293 | /** |
294 | * drm_gem_handle_delete - deletes the given file-private handle |
295 | * @filp: drm file-private structure to use for the handle look up |
296 | * @handle: userspace handle to delete |
297 | * |
298 | * Removes the GEM handle from the @filp lookup table which has been added with |
299 | * drm_gem_handle_create(). If this is the last handle also cleans up linked |
300 | * resources like GEM names. |
301 | */ |
302 | int |
303 | drm_gem_handle_delete(struct drm_file *filp, u32 handle) |
304 | { |
305 | struct drm_gem_object *obj; |
306 | |
307 | spin_lock(lock: &filp->table_lock); |
308 | |
309 | /* Check if we currently have a reference on the object */ |
310 | obj = idr_replace(&filp->object_idr, NULL, id: handle); |
311 | spin_unlock(lock: &filp->table_lock); |
312 | if (IS_ERR_OR_NULL(ptr: obj)) |
313 | return -EINVAL; |
314 | |
315 | /* Release driver's reference and decrement refcount. */ |
316 | drm_gem_object_release_handle(id: handle, ptr: obj, data: filp); |
317 | |
318 | /* And finally make the handle available for future allocations. */ |
319 | spin_lock(lock: &filp->table_lock); |
320 | idr_remove(&filp->object_idr, id: handle); |
321 | spin_unlock(lock: &filp->table_lock); |
322 | |
323 | return 0; |
324 | } |
325 | EXPORT_SYMBOL(drm_gem_handle_delete); |
326 | |
327 | /** |
328 | * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object |
329 | * @file: drm file-private structure containing the gem object |
330 | * @dev: corresponding drm_device |
331 | * @handle: gem object handle |
332 | * @offset: return location for the fake mmap offset |
333 | * |
334 | * This implements the &drm_driver.dumb_map_offset kms driver callback for |
335 | * drivers which use gem to manage their backing storage. |
336 | * |
337 | * Returns: |
338 | * 0 on success or a negative error code on failure. |
339 | */ |
340 | int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
341 | u32 handle, u64 *offset) |
342 | { |
343 | struct drm_gem_object *obj; |
344 | int ret; |
345 | |
346 | obj = drm_gem_object_lookup(filp: file, handle); |
347 | if (!obj) |
348 | return -ENOENT; |
349 | |
350 | /* Don't allow imported objects to be mapped */ |
351 | if (drm_gem_is_imported(obj)) { |
352 | ret = -EINVAL; |
353 | goto out; |
354 | } |
355 | |
356 | ret = drm_gem_create_mmap_offset(obj); |
357 | if (ret) |
358 | goto out; |
359 | |
360 | *offset = drm_vma_node_offset_addr(node: &obj->vma_node); |
361 | out: |
362 | drm_gem_object_put(obj); |
363 | |
364 | return ret; |
365 | } |
366 | EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); |
367 | |
368 | /** |
369 | * drm_gem_handle_create_tail - internal functions to create a handle |
370 | * @file_priv: drm file-private structure to register the handle for |
371 | * @obj: object to register |
372 | * @handlep: pointer to return the created handle to the caller |
373 | * |
374 | * This expects the &drm_device.object_name_lock to be held already and will |
375 | * drop it before returning. Used to avoid races in establishing new handles |
376 | * when importing an object from either an flink name or a dma-buf. |
377 | * |
378 | * Handles must be release again through drm_gem_handle_delete(). This is done |
379 | * when userspace closes @file_priv for all attached handles, or through the |
380 | * GEM_CLOSE ioctl for individual handles. |
381 | */ |
382 | int |
383 | drm_gem_handle_create_tail(struct drm_file *file_priv, |
384 | struct drm_gem_object *obj, |
385 | u32 *handlep) |
386 | { |
387 | struct drm_device *dev = obj->dev; |
388 | u32 handle; |
389 | int ret; |
390 | |
391 | WARN_ON(!mutex_is_locked(&dev->object_name_lock)); |
392 | if (obj->handle_count++ == 0) |
393 | drm_gem_object_get(obj); |
394 | |
395 | /* |
396 | * Get the user-visible handle using idr. Preload and perform |
397 | * allocation under our spinlock. |
398 | */ |
399 | idr_preload(GFP_KERNEL); |
400 | spin_lock(lock: &file_priv->table_lock); |
401 | |
402 | ret = idr_alloc(&file_priv->object_idr, ptr: obj, start: 1, end: 0, GFP_NOWAIT); |
403 | |
404 | spin_unlock(lock: &file_priv->table_lock); |
405 | idr_preload_end(); |
406 | |
407 | mutex_unlock(lock: &dev->object_name_lock); |
408 | if (ret < 0) |
409 | goto err_unref; |
410 | |
411 | handle = ret; |
412 | |
413 | ret = drm_vma_node_allow(node: &obj->vma_node, tag: file_priv); |
414 | if (ret) |
415 | goto err_remove; |
416 | |
417 | if (obj->funcs->open) { |
418 | ret = obj->funcs->open(obj, file_priv); |
419 | if (ret) |
420 | goto err_revoke; |
421 | } |
422 | |
423 | *handlep = handle; |
424 | return 0; |
425 | |
426 | err_revoke: |
427 | drm_vma_node_revoke(node: &obj->vma_node, tag: file_priv); |
428 | err_remove: |
429 | spin_lock(lock: &file_priv->table_lock); |
430 | idr_remove(&file_priv->object_idr, id: handle); |
431 | spin_unlock(lock: &file_priv->table_lock); |
432 | err_unref: |
433 | drm_gem_object_handle_put_unlocked(obj); |
434 | return ret; |
435 | } |
436 | |
437 | /** |
438 | * drm_gem_handle_create - create a gem handle for an object |
439 | * @file_priv: drm file-private structure to register the handle for |
440 | * @obj: object to register |
441 | * @handlep: pointer to return the created handle to the caller |
442 | * |
443 | * Create a handle for this object. This adds a handle reference to the object, |
444 | * which includes a regular reference count. Callers will likely want to |
445 | * dereference the object afterwards. |
446 | * |
447 | * Since this publishes @obj to userspace it must be fully set up by this point, |
448 | * drivers must call this last in their buffer object creation callbacks. |
449 | */ |
450 | int drm_gem_handle_create(struct drm_file *file_priv, |
451 | struct drm_gem_object *obj, |
452 | u32 *handlep) |
453 | { |
454 | mutex_lock(&obj->dev->object_name_lock); |
455 | |
456 | return drm_gem_handle_create_tail(file_priv, obj, handlep); |
457 | } |
458 | EXPORT_SYMBOL(drm_gem_handle_create); |
459 | |
460 | |
461 | /** |
462 | * drm_gem_free_mmap_offset - release a fake mmap offset for an object |
463 | * @obj: obj in question |
464 | * |
465 | * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). |
466 | * |
467 | * Note that drm_gem_object_release() already calls this function, so drivers |
468 | * don't have to take care of releasing the mmap offset themselves when freeing |
469 | * the GEM object. |
470 | */ |
471 | void |
472 | drm_gem_free_mmap_offset(struct drm_gem_object *obj) |
473 | { |
474 | struct drm_device *dev = obj->dev; |
475 | |
476 | drm_vma_offset_remove(mgr: dev->vma_offset_manager, node: &obj->vma_node); |
477 | } |
478 | EXPORT_SYMBOL(drm_gem_free_mmap_offset); |
479 | |
480 | /** |
481 | * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object |
482 | * @obj: obj in question |
483 | * @size: the virtual size |
484 | * |
485 | * GEM memory mapping works by handing back to userspace a fake mmap offset |
486 | * it can use in a subsequent mmap(2) call. The DRM core code then looks |
487 | * up the object based on the offset and sets up the various memory mapping |
488 | * structures. |
489 | * |
490 | * This routine allocates and attaches a fake offset for @obj, in cases where |
491 | * the virtual size differs from the physical size (ie. &drm_gem_object.size). |
492 | * Otherwise just use drm_gem_create_mmap_offset(). |
493 | * |
494 | * This function is idempotent and handles an already allocated mmap offset |
495 | * transparently. Drivers do not need to check for this case. |
496 | */ |
497 | int |
498 | drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) |
499 | { |
500 | struct drm_device *dev = obj->dev; |
501 | |
502 | return drm_vma_offset_add(mgr: dev->vma_offset_manager, node: &obj->vma_node, |
503 | pages: size / PAGE_SIZE); |
504 | } |
505 | EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); |
506 | |
507 | /** |
508 | * drm_gem_create_mmap_offset - create a fake mmap offset for an object |
509 | * @obj: obj in question |
510 | * |
511 | * GEM memory mapping works by handing back to userspace a fake mmap offset |
512 | * it can use in a subsequent mmap(2) call. The DRM core code then looks |
513 | * up the object based on the offset and sets up the various memory mapping |
514 | * structures. |
515 | * |
516 | * This routine allocates and attaches a fake offset for @obj. |
517 | * |
518 | * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release |
519 | * the fake offset again. |
520 | */ |
521 | int drm_gem_create_mmap_offset(struct drm_gem_object *obj) |
522 | { |
523 | return drm_gem_create_mmap_offset_size(obj, obj->size); |
524 | } |
525 | EXPORT_SYMBOL(drm_gem_create_mmap_offset); |
526 | |
527 | /* |
528 | * Move folios to appropriate lru and release the folios, decrementing the |
529 | * ref count of those folios. |
530 | */ |
531 | static void drm_gem_check_release_batch(struct folio_batch *fbatch) |
532 | { |
533 | check_move_unevictable_folios(fbatch); |
534 | __folio_batch_release(pvec: fbatch); |
535 | cond_resched(); |
536 | } |
537 | |
538 | /** |
539 | * drm_gem_get_pages - helper to allocate backing pages for a GEM object |
540 | * from shmem |
541 | * @obj: obj in question |
542 | * |
543 | * This reads the page-array of the shmem-backing storage of the given gem |
544 | * object. An array of pages is returned. If a page is not allocated or |
545 | * swapped-out, this will allocate/swap-in the required pages. Note that the |
546 | * whole object is covered by the page-array and pinned in memory. |
547 | * |
548 | * Use drm_gem_put_pages() to release the array and unpin all pages. |
549 | * |
550 | * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). |
551 | * If you require other GFP-masks, you have to do those allocations yourself. |
552 | * |
553 | * Note that you are not allowed to change gfp-zones during runtime. That is, |
554 | * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as |
555 | * set during initialization. If you have special zone constraints, set them |
556 | * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care |
557 | * to keep pages in the required zone during swap-in. |
558 | * |
559 | * This function is only valid on objects initialized with |
560 | * drm_gem_object_init(), but not for those initialized with |
561 | * drm_gem_private_object_init() only. |
562 | */ |
563 | struct page **drm_gem_get_pages(struct drm_gem_object *obj) |
564 | { |
565 | struct address_space *mapping; |
566 | struct page **pages; |
567 | struct folio *folio; |
568 | struct folio_batch fbatch; |
569 | long i, j, npages; |
570 | |
571 | if (WARN_ON(!obj->filp)) |
572 | return ERR_PTR(error: -EINVAL); |
573 | |
574 | /* This is the shared memory object that backs the GEM resource */ |
575 | mapping = obj->filp->f_mapping; |
576 | |
577 | /* We already BUG_ON() for non-page-aligned sizes in |
578 | * drm_gem_object_init(), so we should never hit this unless |
579 | * driver author is doing something really wrong: |
580 | */ |
581 | WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); |
582 | |
583 | npages = obj->size >> PAGE_SHIFT; |
584 | |
585 | pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); |
586 | if (pages == NULL) |
587 | return ERR_PTR(error: -ENOMEM); |
588 | |
589 | mapping_set_unevictable(mapping); |
590 | |
591 | i = 0; |
592 | while (i < npages) { |
593 | long nr; |
594 | folio = shmem_read_folio_gfp(mapping, index: i, |
595 | gfp: mapping_gfp_mask(mapping)); |
596 | if (IS_ERR(ptr: folio)) |
597 | goto fail; |
598 | nr = min(npages - i, folio_nr_pages(folio)); |
599 | for (j = 0; j < nr; j++, i++) |
600 | pages[i] = folio_file_page(folio, index: i); |
601 | |
602 | /* Make sure shmem keeps __GFP_DMA32 allocated pages in the |
603 | * correct region during swapin. Note that this requires |
604 | * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) |
605 | * so shmem can relocate pages during swapin if required. |
606 | */ |
607 | BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && |
608 | (folio_pfn(folio) >= 0x00100000UL)); |
609 | } |
610 | |
611 | return pages; |
612 | |
613 | fail: |
614 | mapping_clear_unevictable(mapping); |
615 | folio_batch_init(fbatch: &fbatch); |
616 | j = 0; |
617 | while (j < i) { |
618 | struct folio *f = page_folio(pages[j]); |
619 | if (!folio_batch_add(fbatch: &fbatch, folio: f)) |
620 | drm_gem_check_release_batch(fbatch: &fbatch); |
621 | j += folio_nr_pages(folio: f); |
622 | } |
623 | if (fbatch.nr) |
624 | drm_gem_check_release_batch(fbatch: &fbatch); |
625 | |
626 | kvfree(addr: pages); |
627 | return ERR_CAST(ptr: folio); |
628 | } |
629 | EXPORT_SYMBOL(drm_gem_get_pages); |
630 | |
631 | /** |
632 | * drm_gem_put_pages - helper to free backing pages for a GEM object |
633 | * @obj: obj in question |
634 | * @pages: pages to free |
635 | * @dirty: if true, pages will be marked as dirty |
636 | * @accessed: if true, the pages will be marked as accessed |
637 | */ |
638 | void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, |
639 | bool dirty, bool accessed) |
640 | { |
641 | int i, npages; |
642 | struct address_space *mapping; |
643 | struct folio_batch fbatch; |
644 | |
645 | mapping = file_inode(f: obj->filp)->i_mapping; |
646 | mapping_clear_unevictable(mapping); |
647 | |
648 | /* We already BUG_ON() for non-page-aligned sizes in |
649 | * drm_gem_object_init(), so we should never hit this unless |
650 | * driver author is doing something really wrong: |
651 | */ |
652 | WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); |
653 | |
654 | npages = obj->size >> PAGE_SHIFT; |
655 | |
656 | folio_batch_init(fbatch: &fbatch); |
657 | for (i = 0; i < npages; i++) { |
658 | struct folio *folio; |
659 | |
660 | if (!pages[i]) |
661 | continue; |
662 | folio = page_folio(pages[i]); |
663 | |
664 | if (dirty) |
665 | folio_mark_dirty(folio); |
666 | |
667 | if (accessed) |
668 | folio_mark_accessed(folio); |
669 | |
670 | /* Undo the reference we took when populating the table */ |
671 | if (!folio_batch_add(fbatch: &fbatch, folio)) |
672 | drm_gem_check_release_batch(fbatch: &fbatch); |
673 | i += folio_nr_pages(folio) - 1; |
674 | } |
675 | if (folio_batch_count(fbatch: &fbatch)) |
676 | drm_gem_check_release_batch(fbatch: &fbatch); |
677 | |
678 | kvfree(addr: pages); |
679 | } |
680 | EXPORT_SYMBOL(drm_gem_put_pages); |
681 | |
682 | static int objects_lookup(struct drm_file *filp, u32 *handle, int count, |
683 | struct drm_gem_object **objs) |
684 | { |
685 | int i, ret = 0; |
686 | struct drm_gem_object *obj; |
687 | |
688 | spin_lock(lock: &filp->table_lock); |
689 | |
690 | for (i = 0; i < count; i++) { |
691 | /* Check if we currently have a reference on the object */ |
692 | obj = idr_find(&filp->object_idr, id: handle[i]); |
693 | if (!obj) { |
694 | ret = -ENOENT; |
695 | break; |
696 | } |
697 | drm_gem_object_get(obj); |
698 | objs[i] = obj; |
699 | } |
700 | spin_unlock(lock: &filp->table_lock); |
701 | |
702 | return ret; |
703 | } |
704 | |
705 | /** |
706 | * drm_gem_objects_lookup - look up GEM objects from an array of handles |
707 | * @filp: DRM file private date |
708 | * @bo_handles: user pointer to array of userspace handle |
709 | * @count: size of handle array |
710 | * @objs_out: returned pointer to array of drm_gem_object pointers |
711 | * |
712 | * Takes an array of userspace handles and returns a newly allocated array of |
713 | * GEM objects. |
714 | * |
715 | * For a single handle lookup, use drm_gem_object_lookup(). |
716 | * |
717 | * Returns: |
718 | * @objs filled in with GEM object pointers. Returned GEM objects need to be |
719 | * released with drm_gem_object_put(). -ENOENT is returned on a lookup |
720 | * failure. 0 is returned on success. |
721 | * |
722 | */ |
723 | int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, |
724 | int count, struct drm_gem_object ***objs_out) |
725 | { |
726 | int ret; |
727 | u32 *handles; |
728 | struct drm_gem_object **objs; |
729 | |
730 | if (!count) |
731 | return 0; |
732 | |
733 | objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), |
734 | GFP_KERNEL | __GFP_ZERO); |
735 | if (!objs) |
736 | return -ENOMEM; |
737 | |
738 | *objs_out = objs; |
739 | |
740 | handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); |
741 | if (!handles) { |
742 | ret = -ENOMEM; |
743 | goto out; |
744 | } |
745 | |
746 | if (copy_from_user(to: handles, from: bo_handles, n: count * sizeof(u32))) { |
747 | ret = -EFAULT; |
748 | DRM_DEBUG("Failed to copy in GEM handles\n"); |
749 | goto out; |
750 | } |
751 | |
752 | ret = objects_lookup(filp, handle: handles, count, objs); |
753 | out: |
754 | kvfree(addr: handles); |
755 | return ret; |
756 | |
757 | } |
758 | EXPORT_SYMBOL(drm_gem_objects_lookup); |
759 | |
760 | /** |
761 | * drm_gem_object_lookup - look up a GEM object from its handle |
762 | * @filp: DRM file private date |
763 | * @handle: userspace handle |
764 | * |
765 | * If looking up an array of handles, use drm_gem_objects_lookup(). |
766 | * |
767 | * Returns: |
768 | * A reference to the object named by the handle if such exists on @filp, NULL |
769 | * otherwise. |
770 | */ |
771 | struct drm_gem_object * |
772 | drm_gem_object_lookup(struct drm_file *filp, u32 handle) |
773 | { |
774 | struct drm_gem_object *obj = NULL; |
775 | |
776 | objects_lookup(filp, handle: &handle, count: 1, objs: &obj); |
777 | return obj; |
778 | } |
779 | EXPORT_SYMBOL(drm_gem_object_lookup); |
780 | |
781 | /** |
782 | * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects |
783 | * shared and/or exclusive fences. |
784 | * @filep: DRM file private date |
785 | * @handle: userspace handle |
786 | * @wait_all: if true, wait on all fences, else wait on just exclusive fence |
787 | * @timeout: timeout value in jiffies or zero to return immediately |
788 | * |
789 | * Returns: |
790 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or |
791 | * greater than 0 on success. |
792 | */ |
793 | long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, |
794 | bool wait_all, unsigned long timeout) |
795 | { |
796 | long ret; |
797 | struct drm_gem_object *obj; |
798 | |
799 | obj = drm_gem_object_lookup(filep, handle); |
800 | if (!obj) { |
801 | DRM_DEBUG("Failed to look up GEM BO %d\n", handle); |
802 | return -EINVAL; |
803 | } |
804 | |
805 | ret = dma_resv_wait_timeout(obj: obj->resv, usage: dma_resv_usage_rw(write: wait_all), |
806 | intr: true, timeout); |
807 | if (ret == 0) |
808 | ret = -ETIME; |
809 | else if (ret > 0) |
810 | ret = 0; |
811 | |
812 | drm_gem_object_put(obj); |
813 | |
814 | return ret; |
815 | } |
816 | EXPORT_SYMBOL(drm_gem_dma_resv_wait); |
817 | |
818 | /** |
819 | * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl |
820 | * @dev: drm_device |
821 | * @data: ioctl data |
822 | * @file_priv: drm file-private structure |
823 | * |
824 | * Releases the handle to an mm object. |
825 | */ |
826 | int |
827 | drm_gem_close_ioctl(struct drm_device *dev, void *data, |
828 | struct drm_file *file_priv) |
829 | { |
830 | struct drm_gem_close *args = data; |
831 | int ret; |
832 | |
833 | if (!drm_core_check_feature(dev, feature: DRIVER_GEM)) |
834 | return -EOPNOTSUPP; |
835 | |
836 | ret = drm_gem_handle_delete(file_priv, args->handle); |
837 | |
838 | return ret; |
839 | } |
840 | |
841 | /** |
842 | * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl |
843 | * @dev: drm_device |
844 | * @data: ioctl data |
845 | * @file_priv: drm file-private structure |
846 | * |
847 | * Create a global name for an object, returning the name. |
848 | * |
849 | * Note that the name does not hold a reference; when the object |
850 | * is freed, the name goes away. |
851 | */ |
852 | int |
853 | drm_gem_flink_ioctl(struct drm_device *dev, void *data, |
854 | struct drm_file *file_priv) |
855 | { |
856 | struct drm_gem_flink *args = data; |
857 | struct drm_gem_object *obj; |
858 | int ret; |
859 | |
860 | if (!drm_core_check_feature(dev, feature: DRIVER_GEM)) |
861 | return -EOPNOTSUPP; |
862 | |
863 | obj = drm_gem_object_lookup(file_priv, args->handle); |
864 | if (obj == NULL) |
865 | return -ENOENT; |
866 | |
867 | mutex_lock(&dev->object_name_lock); |
868 | /* prevent races with concurrent gem_close. */ |
869 | if (obj->handle_count == 0) { |
870 | ret = -ENOENT; |
871 | goto err; |
872 | } |
873 | |
874 | if (!obj->name) { |
875 | ret = idr_alloc(&dev->object_name_idr, ptr: obj, start: 1, end: 0, GFP_KERNEL); |
876 | if (ret < 0) |
877 | goto err; |
878 | |
879 | obj->name = ret; |
880 | } |
881 | |
882 | args->name = (uint64_t) obj->name; |
883 | ret = 0; |
884 | |
885 | err: |
886 | mutex_unlock(lock: &dev->object_name_lock); |
887 | drm_gem_object_put(obj); |
888 | return ret; |
889 | } |
890 | |
891 | /** |
892 | * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl |
893 | * @dev: drm_device |
894 | * @data: ioctl data |
895 | * @file_priv: drm file-private structure |
896 | * |
897 | * Open an object using the global name, returning a handle and the size. |
898 | * |
899 | * This handle (of course) holds a reference to the object, so the object |
900 | * will not go away until the handle is deleted. |
901 | */ |
902 | int |
903 | drm_gem_open_ioctl(struct drm_device *dev, void *data, |
904 | struct drm_file *file_priv) |
905 | { |
906 | struct drm_gem_open *args = data; |
907 | struct drm_gem_object *obj; |
908 | int ret; |
909 | u32 handle; |
910 | |
911 | if (!drm_core_check_feature(dev, feature: DRIVER_GEM)) |
912 | return -EOPNOTSUPP; |
913 | |
914 | mutex_lock(&dev->object_name_lock); |
915 | obj = idr_find(&dev->object_name_idr, id: (int) args->name); |
916 | if (obj) { |
917 | drm_gem_object_get(obj); |
918 | } else { |
919 | mutex_unlock(lock: &dev->object_name_lock); |
920 | return -ENOENT; |
921 | } |
922 | |
923 | /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ |
924 | ret = drm_gem_handle_create_tail(file_priv, obj, handlep: &handle); |
925 | if (ret) |
926 | goto err; |
927 | |
928 | args->handle = handle; |
929 | args->size = obj->size; |
930 | |
931 | err: |
932 | drm_gem_object_put(obj); |
933 | return ret; |
934 | } |
935 | |
936 | /** |
937 | * drm_gem_open - initializes GEM file-private structures at devnode open time |
938 | * @dev: drm_device which is being opened by userspace |
939 | * @file_private: drm file-private structure to set up |
940 | * |
941 | * Called at device open time, sets up the structure for handling refcounting |
942 | * of mm objects. |
943 | */ |
944 | void |
945 | drm_gem_open(struct drm_device *dev, struct drm_file *file_private) |
946 | { |
947 | idr_init_base(idr: &file_private->object_idr, base: 1); |
948 | spin_lock_init(&file_private->table_lock); |
949 | } |
950 | |
951 | /** |
952 | * drm_gem_release - release file-private GEM resources |
953 | * @dev: drm_device which is being closed by userspace |
954 | * @file_private: drm file-private structure to clean up |
955 | * |
956 | * Called at close time when the filp is going away. |
957 | * |
958 | * Releases any remaining references on objects by this filp. |
959 | */ |
960 | void |
961 | drm_gem_release(struct drm_device *dev, struct drm_file *file_private) |
962 | { |
963 | idr_for_each(&file_private->object_idr, |
964 | fn: &drm_gem_object_release_handle, data: file_private); |
965 | idr_destroy(&file_private->object_idr); |
966 | } |
967 | |
968 | /** |
969 | * drm_gem_object_release - release GEM buffer object resources |
970 | * @obj: GEM buffer object |
971 | * |
972 | * This releases any structures and resources used by @obj and is the inverse of |
973 | * drm_gem_object_init(). |
974 | */ |
975 | void |
976 | drm_gem_object_release(struct drm_gem_object *obj) |
977 | { |
978 | if (obj->filp) |
979 | fput(obj->filp); |
980 | |
981 | drm_gem_private_object_fini(obj); |
982 | |
983 | drm_gem_free_mmap_offset(obj); |
984 | drm_gem_lru_remove(obj); |
985 | } |
986 | EXPORT_SYMBOL(drm_gem_object_release); |
987 | |
988 | /** |
989 | * drm_gem_object_free - free a GEM object |
990 | * @kref: kref of the object to free |
991 | * |
992 | * Called after the last reference to the object has been lost. |
993 | * |
994 | * Frees the object |
995 | */ |
996 | void |
997 | drm_gem_object_free(struct kref *kref) |
998 | { |
999 | struct drm_gem_object *obj = |
1000 | container_of(kref, struct drm_gem_object, refcount); |
1001 | |
1002 | if (WARN_ON(!obj->funcs->free)) |
1003 | return; |
1004 | |
1005 | obj->funcs->free(obj); |
1006 | } |
1007 | EXPORT_SYMBOL(drm_gem_object_free); |
1008 | |
1009 | /** |
1010 | * drm_gem_vm_open - vma->ops->open implementation for GEM |
1011 | * @vma: VM area structure |
1012 | * |
1013 | * This function implements the #vm_operations_struct open() callback for GEM |
1014 | * drivers. This must be used together with drm_gem_vm_close(). |
1015 | */ |
1016 | void drm_gem_vm_open(struct vm_area_struct *vma) |
1017 | { |
1018 | struct drm_gem_object *obj = vma->vm_private_data; |
1019 | |
1020 | drm_gem_object_get(obj); |
1021 | } |
1022 | EXPORT_SYMBOL(drm_gem_vm_open); |
1023 | |
1024 | /** |
1025 | * drm_gem_vm_close - vma->ops->close implementation for GEM |
1026 | * @vma: VM area structure |
1027 | * |
1028 | * This function implements the #vm_operations_struct close() callback for GEM |
1029 | * drivers. This must be used together with drm_gem_vm_open(). |
1030 | */ |
1031 | void drm_gem_vm_close(struct vm_area_struct *vma) |
1032 | { |
1033 | struct drm_gem_object *obj = vma->vm_private_data; |
1034 | |
1035 | drm_gem_object_put(obj); |
1036 | } |
1037 | EXPORT_SYMBOL(drm_gem_vm_close); |
1038 | |
1039 | /** |
1040 | * drm_gem_mmap_obj - memory map a GEM object |
1041 | * @obj: the GEM object to map |
1042 | * @obj_size: the object size to be mapped, in bytes |
1043 | * @vma: VMA for the area to be mapped |
1044 | * |
1045 | * Set up the VMA to prepare mapping of the GEM object using the GEM object's |
1046 | * vm_ops. Depending on their requirements, GEM objects can either |
1047 | * provide a fault handler in their vm_ops (in which case any accesses to |
1048 | * the object will be trapped, to perform migration, GTT binding, surface |
1049 | * register allocation, or performance monitoring), or mmap the buffer memory |
1050 | * synchronously after calling drm_gem_mmap_obj. |
1051 | * |
1052 | * This function is mainly intended to implement the DMABUF mmap operation, when |
1053 | * the GEM object is not looked up based on its fake offset. To implement the |
1054 | * DRM mmap operation, drivers should use the drm_gem_mmap() function. |
1055 | * |
1056 | * drm_gem_mmap_obj() assumes the user is granted access to the buffer while |
1057 | * drm_gem_mmap() prevents unprivileged users from mapping random objects. So |
1058 | * callers must verify access restrictions before calling this helper. |
1059 | * |
1060 | * Return 0 or success or -EINVAL if the object size is smaller than the VMA |
1061 | * size, or if no vm_ops are provided. |
1062 | */ |
1063 | int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, |
1064 | struct vm_area_struct *vma) |
1065 | { |
1066 | int ret; |
1067 | |
1068 | /* Check for valid size. */ |
1069 | if (obj_size < vma->vm_end - vma->vm_start) |
1070 | return -EINVAL; |
1071 | |
1072 | /* Take a ref for this mapping of the object, so that the fault |
1073 | * handler can dereference the mmap offset's pointer to the object. |
1074 | * This reference is cleaned up by the corresponding vm_close |
1075 | * (which should happen whether the vma was created by this call, or |
1076 | * by a vm_open due to mremap or partial unmap or whatever). |
1077 | */ |
1078 | drm_gem_object_get(obj); |
1079 | |
1080 | vma->vm_private_data = obj; |
1081 | vma->vm_ops = obj->funcs->vm_ops; |
1082 | |
1083 | if (obj->funcs->mmap) { |
1084 | ret = obj->funcs->mmap(obj, vma); |
1085 | if (ret) |
1086 | goto err_drm_gem_object_put; |
1087 | WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); |
1088 | } else { |
1089 | if (!vma->vm_ops) { |
1090 | ret = -EINVAL; |
1091 | goto err_drm_gem_object_put; |
1092 | } |
1093 | |
1094 | vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); |
1095 | vma->vm_page_prot = pgprot_writecombine(prot: vm_get_page_prot(vm_flags: vma->vm_flags)); |
1096 | vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); |
1097 | } |
1098 | |
1099 | return 0; |
1100 | |
1101 | err_drm_gem_object_put: |
1102 | drm_gem_object_put(obj); |
1103 | return ret; |
1104 | } |
1105 | EXPORT_SYMBOL(drm_gem_mmap_obj); |
1106 | |
1107 | /** |
1108 | * drm_gem_mmap - memory map routine for GEM objects |
1109 | * @filp: DRM file pointer |
1110 | * @vma: VMA for the area to be mapped |
1111 | * |
1112 | * If a driver supports GEM object mapping, mmap calls on the DRM file |
1113 | * descriptor will end up here. |
1114 | * |
1115 | * Look up the GEM object based on the offset passed in (vma->vm_pgoff will |
1116 | * contain the fake offset we created when the GTT map ioctl was called on |
1117 | * the object) and map it with a call to drm_gem_mmap_obj(). |
1118 | * |
1119 | * If the caller is not granted access to the buffer object, the mmap will fail |
1120 | * with EACCES. Please see the vma manager for more information. |
1121 | */ |
1122 | int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) |
1123 | { |
1124 | struct drm_file *priv = filp->private_data; |
1125 | struct drm_device *dev = priv->minor->dev; |
1126 | struct drm_gem_object *obj = NULL; |
1127 | struct drm_vma_offset_node *node; |
1128 | int ret; |
1129 | |
1130 | if (drm_dev_is_unplugged(dev)) |
1131 | return -ENODEV; |
1132 | |
1133 | drm_vma_offset_lock_lookup(mgr: dev->vma_offset_manager); |
1134 | node = drm_vma_offset_exact_lookup_locked(mgr: dev->vma_offset_manager, |
1135 | start: vma->vm_pgoff, |
1136 | pages: vma_pages(vma)); |
1137 | if (likely(node)) { |
1138 | obj = container_of(node, struct drm_gem_object, vma_node); |
1139 | /* |
1140 | * When the object is being freed, after it hits 0-refcnt it |
1141 | * proceeds to tear down the object. In the process it will |
1142 | * attempt to remove the VMA offset and so acquire this |
1143 | * mgr->vm_lock. Therefore if we find an object with a 0-refcnt |
1144 | * that matches our range, we know it is in the process of being |
1145 | * destroyed and will be freed as soon as we release the lock - |
1146 | * so we have to check for the 0-refcnted object and treat it as |
1147 | * invalid. |
1148 | */ |
1149 | if (!kref_get_unless_zero(kref: &obj->refcount)) |
1150 | obj = NULL; |
1151 | } |
1152 | drm_vma_offset_unlock_lookup(mgr: dev->vma_offset_manager); |
1153 | |
1154 | if (!obj) |
1155 | return -EINVAL; |
1156 | |
1157 | if (!drm_vma_node_is_allowed(node, tag: priv)) { |
1158 | drm_gem_object_put(obj); |
1159 | return -EACCES; |
1160 | } |
1161 | |
1162 | ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, |
1163 | vma); |
1164 | |
1165 | drm_gem_object_put(obj); |
1166 | |
1167 | return ret; |
1168 | } |
1169 | EXPORT_SYMBOL(drm_gem_mmap); |
1170 | |
1171 | void drm_gem_print_info(struct drm_printer *p, unsigned int indent, |
1172 | const struct drm_gem_object *obj) |
1173 | { |
1174 | drm_printf_indent(p, indent, "name=%d\n", obj->name); |
1175 | drm_printf_indent(p, indent, "refcount=%u\n", |
1176 | kref_read(&obj->refcount)); |
1177 | drm_printf_indent(p, indent, "start=%08lx\n", |
1178 | drm_vma_node_start(&obj->vma_node)); |
1179 | drm_printf_indent(p, indent, "size=%zu\n", obj->size); |
1180 | drm_printf_indent(p, indent, "imported=%s\n", |
1181 | str_yes_no(drm_gem_is_imported(obj))); |
1182 | |
1183 | if (obj->funcs->print_info) |
1184 | obj->funcs->print_info(p, indent, obj); |
1185 | } |
1186 | |
1187 | int drm_gem_pin_locked(struct drm_gem_object *obj) |
1188 | { |
1189 | if (obj->funcs->pin) |
1190 | return obj->funcs->pin(obj); |
1191 | |
1192 | return 0; |
1193 | } |
1194 | |
1195 | void drm_gem_unpin_locked(struct drm_gem_object *obj) |
1196 | { |
1197 | if (obj->funcs->unpin) |
1198 | obj->funcs->unpin(obj); |
1199 | } |
1200 | |
1201 | int drm_gem_pin(struct drm_gem_object *obj) |
1202 | { |
1203 | int ret; |
1204 | |
1205 | dma_resv_lock(obj: obj->resv, NULL); |
1206 | ret = drm_gem_pin_locked(obj); |
1207 | dma_resv_unlock(obj: obj->resv); |
1208 | |
1209 | return ret; |
1210 | } |
1211 | |
1212 | void drm_gem_unpin(struct drm_gem_object *obj) |
1213 | { |
1214 | dma_resv_lock(obj: obj->resv, NULL); |
1215 | drm_gem_unpin_locked(obj); |
1216 | dma_resv_unlock(obj: obj->resv); |
1217 | } |
1218 | |
1219 | int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map) |
1220 | { |
1221 | int ret; |
1222 | |
1223 | dma_resv_assert_held(obj->resv); |
1224 | |
1225 | if (!obj->funcs->vmap) |
1226 | return -EOPNOTSUPP; |
1227 | |
1228 | ret = obj->funcs->vmap(obj, map); |
1229 | if (ret) |
1230 | return ret; |
1231 | else if (iosys_map_is_null(map)) |
1232 | return -ENOMEM; |
1233 | |
1234 | return 0; |
1235 | } |
1236 | EXPORT_SYMBOL(drm_gem_vmap_locked); |
1237 | |
1238 | void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map) |
1239 | { |
1240 | dma_resv_assert_held(obj->resv); |
1241 | |
1242 | if (iosys_map_is_null(map)) |
1243 | return; |
1244 | |
1245 | if (obj->funcs->vunmap) |
1246 | obj->funcs->vunmap(obj, map); |
1247 | |
1248 | /* Always set the mapping to NULL. Callers may rely on this. */ |
1249 | iosys_map_clear(map); |
1250 | } |
1251 | EXPORT_SYMBOL(drm_gem_vunmap_locked); |
1252 | |
1253 | void drm_gem_lock(struct drm_gem_object *obj) |
1254 | { |
1255 | dma_resv_lock(obj: obj->resv, NULL); |
1256 | } |
1257 | EXPORT_SYMBOL(drm_gem_lock); |
1258 | |
1259 | void drm_gem_unlock(struct drm_gem_object *obj) |
1260 | { |
1261 | dma_resv_unlock(obj: obj->resv); |
1262 | } |
1263 | EXPORT_SYMBOL(drm_gem_unlock); |
1264 | |
1265 | int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) |
1266 | { |
1267 | int ret; |
1268 | |
1269 | dma_resv_lock(obj: obj->resv, NULL); |
1270 | ret = drm_gem_vmap_locked(obj, map); |
1271 | dma_resv_unlock(obj: obj->resv); |
1272 | |
1273 | return ret; |
1274 | } |
1275 | EXPORT_SYMBOL(drm_gem_vmap); |
1276 | |
1277 | void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) |
1278 | { |
1279 | dma_resv_lock(obj: obj->resv, NULL); |
1280 | drm_gem_vunmap_locked(obj, map); |
1281 | dma_resv_unlock(obj: obj->resv); |
1282 | } |
1283 | EXPORT_SYMBOL(drm_gem_vunmap); |
1284 | |
1285 | /** |
1286 | * drm_gem_lock_reservations - Sets up the ww context and acquires |
1287 | * the lock on an array of GEM objects. |
1288 | * |
1289 | * Once you've locked your reservations, you'll want to set up space |
1290 | * for your shared fences (if applicable), submit your job, then |
1291 | * drm_gem_unlock_reservations(). |
1292 | * |
1293 | * @objs: drm_gem_objects to lock |
1294 | * @count: Number of objects in @objs |
1295 | * @acquire_ctx: struct ww_acquire_ctx that will be initialized as |
1296 | * part of tracking this set of locked reservations. |
1297 | */ |
1298 | int |
1299 | drm_gem_lock_reservations(struct drm_gem_object **objs, int count, |
1300 | struct ww_acquire_ctx *acquire_ctx) |
1301 | { |
1302 | int contended = -1; |
1303 | int i, ret; |
1304 | |
1305 | ww_acquire_init(ctx: acquire_ctx, ww_class: &reservation_ww_class); |
1306 | |
1307 | retry: |
1308 | if (contended != -1) { |
1309 | struct drm_gem_object *obj = objs[contended]; |
1310 | |
1311 | ret = dma_resv_lock_slow_interruptible(obj: obj->resv, |
1312 | ctx: acquire_ctx); |
1313 | if (ret) { |
1314 | ww_acquire_fini(ctx: acquire_ctx); |
1315 | return ret; |
1316 | } |
1317 | } |
1318 | |
1319 | for (i = 0; i < count; i++) { |
1320 | if (i == contended) |
1321 | continue; |
1322 | |
1323 | ret = dma_resv_lock_interruptible(obj: objs[i]->resv, |
1324 | ctx: acquire_ctx); |
1325 | if (ret) { |
1326 | int j; |
1327 | |
1328 | for (j = 0; j < i; j++) |
1329 | dma_resv_unlock(obj: objs[j]->resv); |
1330 | |
1331 | if (contended != -1 && contended >= i) |
1332 | dma_resv_unlock(obj: objs[contended]->resv); |
1333 | |
1334 | if (ret == -EDEADLK) { |
1335 | contended = i; |
1336 | goto retry; |
1337 | } |
1338 | |
1339 | ww_acquire_fini(ctx: acquire_ctx); |
1340 | return ret; |
1341 | } |
1342 | } |
1343 | |
1344 | ww_acquire_done(ctx: acquire_ctx); |
1345 | |
1346 | return 0; |
1347 | } |
1348 | EXPORT_SYMBOL(drm_gem_lock_reservations); |
1349 | |
1350 | void |
1351 | drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, |
1352 | struct ww_acquire_ctx *acquire_ctx) |
1353 | { |
1354 | int i; |
1355 | |
1356 | for (i = 0; i < count; i++) |
1357 | dma_resv_unlock(obj: objs[i]->resv); |
1358 | |
1359 | ww_acquire_fini(ctx: acquire_ctx); |
1360 | } |
1361 | EXPORT_SYMBOL(drm_gem_unlock_reservations); |
1362 | |
1363 | /** |
1364 | * drm_gem_lru_init - initialize a LRU |
1365 | * |
1366 | * @lru: The LRU to initialize |
1367 | * @lock: The lock protecting the LRU |
1368 | */ |
1369 | void |
1370 | drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock) |
1371 | { |
1372 | lru->lock = lock; |
1373 | lru->count = 0; |
1374 | INIT_LIST_HEAD(list: &lru->list); |
1375 | } |
1376 | EXPORT_SYMBOL(drm_gem_lru_init); |
1377 | |
1378 | static void |
1379 | drm_gem_lru_remove_locked(struct drm_gem_object *obj) |
1380 | { |
1381 | obj->lru->count -= obj->size >> PAGE_SHIFT; |
1382 | WARN_ON(obj->lru->count < 0); |
1383 | list_del(entry: &obj->lru_node); |
1384 | obj->lru = NULL; |
1385 | } |
1386 | |
1387 | /** |
1388 | * drm_gem_lru_remove - remove object from whatever LRU it is in |
1389 | * |
1390 | * If the object is currently in any LRU, remove it. |
1391 | * |
1392 | * @obj: The GEM object to remove from current LRU |
1393 | */ |
1394 | void |
1395 | drm_gem_lru_remove(struct drm_gem_object *obj) |
1396 | { |
1397 | struct drm_gem_lru *lru = obj->lru; |
1398 | |
1399 | if (!lru) |
1400 | return; |
1401 | |
1402 | mutex_lock(lru->lock); |
1403 | drm_gem_lru_remove_locked(obj); |
1404 | mutex_unlock(lock: lru->lock); |
1405 | } |
1406 | EXPORT_SYMBOL(drm_gem_lru_remove); |
1407 | |
1408 | /** |
1409 | * drm_gem_lru_move_tail_locked - move the object to the tail of the LRU |
1410 | * |
1411 | * Like &drm_gem_lru_move_tail but lru lock must be held |
1412 | * |
1413 | * @lru: The LRU to move the object into. |
1414 | * @obj: The GEM object to move into this LRU |
1415 | */ |
1416 | void |
1417 | drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj) |
1418 | { |
1419 | lockdep_assert_held_once(lru->lock); |
1420 | |
1421 | if (obj->lru) |
1422 | drm_gem_lru_remove_locked(obj); |
1423 | |
1424 | lru->count += obj->size >> PAGE_SHIFT; |
1425 | list_add_tail(new: &obj->lru_node, head: &lru->list); |
1426 | obj->lru = lru; |
1427 | } |
1428 | EXPORT_SYMBOL(drm_gem_lru_move_tail_locked); |
1429 | |
1430 | /** |
1431 | * drm_gem_lru_move_tail - move the object to the tail of the LRU |
1432 | * |
1433 | * If the object is already in this LRU it will be moved to the |
1434 | * tail. Otherwise it will be removed from whichever other LRU |
1435 | * it is in (if any) and moved into this LRU. |
1436 | * |
1437 | * @lru: The LRU to move the object into. |
1438 | * @obj: The GEM object to move into this LRU |
1439 | */ |
1440 | void |
1441 | drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj) |
1442 | { |
1443 | mutex_lock(lru->lock); |
1444 | drm_gem_lru_move_tail_locked(lru, obj); |
1445 | mutex_unlock(lock: lru->lock); |
1446 | } |
1447 | EXPORT_SYMBOL(drm_gem_lru_move_tail); |
1448 | |
1449 | /** |
1450 | * drm_gem_lru_scan - helper to implement shrinker.scan_objects |
1451 | * |
1452 | * If the shrink callback succeeds, it is expected that the driver |
1453 | * move the object out of this LRU. |
1454 | * |
1455 | * If the LRU possibly contain active buffers, it is the responsibility |
1456 | * of the shrink callback to check for this (ie. dma_resv_test_signaled()) |
1457 | * or if necessary block until the buffer becomes idle. |
1458 | * |
1459 | * @lru: The LRU to scan |
1460 | * @nr_to_scan: The number of pages to try to reclaim |
1461 | * @remaining: The number of pages left to reclaim, should be initialized by caller |
1462 | * @shrink: Callback to try to shrink/reclaim the object. |
1463 | */ |
1464 | unsigned long |
1465 | drm_gem_lru_scan(struct drm_gem_lru *lru, |
1466 | unsigned int nr_to_scan, |
1467 | unsigned long *remaining, |
1468 | bool (*shrink)(struct drm_gem_object *obj)) |
1469 | { |
1470 | struct drm_gem_lru still_in_lru; |
1471 | struct drm_gem_object *obj; |
1472 | unsigned freed = 0; |
1473 | |
1474 | drm_gem_lru_init(&still_in_lru, lru->lock); |
1475 | |
1476 | mutex_lock(lru->lock); |
1477 | |
1478 | while (freed < nr_to_scan) { |
1479 | obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node); |
1480 | |
1481 | if (!obj) |
1482 | break; |
1483 | |
1484 | drm_gem_lru_move_tail_locked(&still_in_lru, obj); |
1485 | |
1486 | /* |
1487 | * If it's in the process of being freed, gem_object->free() |
1488 | * may be blocked on lock waiting to remove it. So just |
1489 | * skip it. |
1490 | */ |
1491 | if (!kref_get_unless_zero(kref: &obj->refcount)) |
1492 | continue; |
1493 | |
1494 | /* |
1495 | * Now that we own a reference, we can drop the lock for the |
1496 | * rest of the loop body, to reduce contention with other |
1497 | * code paths that need the LRU lock |
1498 | */ |
1499 | mutex_unlock(lock: lru->lock); |
1500 | |
1501 | /* |
1502 | * Note that this still needs to be trylock, since we can |
1503 | * hit shrinker in response to trying to get backing pages |
1504 | * for this obj (ie. while it's lock is already held) |
1505 | */ |
1506 | if (!dma_resv_trylock(obj: obj->resv)) { |
1507 | *remaining += obj->size >> PAGE_SHIFT; |
1508 | goto tail; |
1509 | } |
1510 | |
1511 | if (shrink(obj)) { |
1512 | freed += obj->size >> PAGE_SHIFT; |
1513 | |
1514 | /* |
1515 | * If we succeeded in releasing the object's backing |
1516 | * pages, we expect the driver to have moved the object |
1517 | * out of this LRU |
1518 | */ |
1519 | WARN_ON(obj->lru == &still_in_lru); |
1520 | WARN_ON(obj->lru == lru); |
1521 | } |
1522 | |
1523 | dma_resv_unlock(obj: obj->resv); |
1524 | |
1525 | tail: |
1526 | drm_gem_object_put(obj); |
1527 | mutex_lock(lru->lock); |
1528 | } |
1529 | |
1530 | /* |
1531 | * Move objects we've skipped over out of the temporary still_in_lru |
1532 | * back into this LRU |
1533 | */ |
1534 | list_for_each_entry (obj, &still_in_lru.list, lru_node) |
1535 | obj->lru = lru; |
1536 | list_splice_tail(list: &still_in_lru.list, head: &lru->list); |
1537 | lru->count += still_in_lru.count; |
1538 | |
1539 | mutex_unlock(lock: lru->lock); |
1540 | |
1541 | return freed; |
1542 | } |
1543 | EXPORT_SYMBOL(drm_gem_lru_scan); |
1544 | |
1545 | /** |
1546 | * drm_gem_evict_locked - helper to evict backing pages for a GEM object |
1547 | * @obj: obj in question |
1548 | */ |
1549 | int drm_gem_evict_locked(struct drm_gem_object *obj) |
1550 | { |
1551 | dma_resv_assert_held(obj->resv); |
1552 | |
1553 | if (!dma_resv_test_signaled(obj: obj->resv, usage: DMA_RESV_USAGE_READ)) |
1554 | return -EBUSY; |
1555 | |
1556 | if (obj->funcs->evict) |
1557 | return obj->funcs->evict(obj); |
1558 | |
1559 | return 0; |
1560 | } |
1561 | EXPORT_SYMBOL(drm_gem_evict_locked); |
1562 |
Definitions
- drm_gem_init_release
- drm_gem_init
- drm_gem_object_init_with_mnt
- drm_gem_object_init
- drm_gem_private_object_init
- drm_gem_private_object_fini
- drm_gem_object_handle_free
- drm_gem_object_exported_dma_buf_free
- drm_gem_object_handle_put_unlocked
- drm_gem_object_release_handle
- drm_gem_handle_delete
- drm_gem_dumb_map_offset
- drm_gem_handle_create_tail
- drm_gem_handle_create
- drm_gem_free_mmap_offset
- drm_gem_create_mmap_offset_size
- drm_gem_create_mmap_offset
- drm_gem_check_release_batch
- drm_gem_get_pages
- drm_gem_put_pages
- objects_lookup
- drm_gem_objects_lookup
- drm_gem_object_lookup
- drm_gem_dma_resv_wait
- drm_gem_close_ioctl
- drm_gem_flink_ioctl
- drm_gem_open_ioctl
- drm_gem_open
- drm_gem_release
- drm_gem_object_release
- drm_gem_object_free
- drm_gem_vm_open
- drm_gem_vm_close
- drm_gem_mmap_obj
- drm_gem_mmap
- drm_gem_print_info
- drm_gem_pin_locked
- drm_gem_unpin_locked
- drm_gem_pin
- drm_gem_unpin
- drm_gem_vmap_locked
- drm_gem_vunmap_locked
- drm_gem_lock
- drm_gem_unlock
- drm_gem_vmap
- drm_gem_vunmap
- drm_gem_lock_reservations
- drm_gem_unlock_reservations
- drm_gem_lru_init
- drm_gem_lru_remove_locked
- drm_gem_lru_remove
- drm_gem_lru_move_tail_locked
- drm_gem_lru_move_tail
- drm_gem_lru_scan
Improve your Profiling and Debugging skills
Find out more