1 | #ifndef __DRM_GEM_H__ |
2 | #define __DRM_GEM_H__ |
3 | |
4 | /* |
5 | * GEM Graphics Execution Manager Driver Interfaces |
6 | * |
7 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. |
8 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. |
9 | * Copyright (c) 2009-2010, Code Aurora Forum. |
10 | * All rights reserved. |
11 | * Copyright © 2014 Intel Corporation |
12 | * Daniel Vetter <daniel.vetter@ffwll.ch> |
13 | * |
14 | * Author: Rickard E. (Rik) Faith <faith@valinux.com> |
15 | * Author: Gareth Hughes <gareth@valinux.com> |
16 | * |
17 | * Permission is hereby granted, free of charge, to any person obtaining a |
18 | * copy of this software and associated documentation files (the "Software"), |
19 | * to deal in the Software without restriction, including without limitation |
20 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
21 | * and/or sell copies of the Software, and to permit persons to whom the |
22 | * Software is furnished to do so, subject to the following conditions: |
23 | * |
24 | * The above copyright notice and this permission notice (including the next |
25 | * paragraph) shall be included in all copies or substantial portions of the |
26 | * Software. |
27 | * |
28 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
29 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
30 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
31 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
32 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
33 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
34 | * OTHER DEALINGS IN THE SOFTWARE. |
35 | */ |
36 | |
37 | #include <linux/kref.h> |
38 | #include <linux/dma-buf.h> |
39 | #include <linux/dma-resv.h> |
40 | #include <linux/list.h> |
41 | #include <linux/mutex.h> |
42 | |
43 | #include <drm/drm_vma_manager.h> |
44 | |
45 | struct iosys_map; |
46 | struct drm_gem_object; |
47 | |
48 | /** |
49 | * enum drm_gem_object_status - bitmask of object state for fdinfo reporting |
50 | * @DRM_GEM_OBJECT_RESIDENT: object is resident in memory (ie. not unpinned) |
51 | * @DRM_GEM_OBJECT_PURGEABLE: object marked as purgeable by userspace |
52 | * @DRM_GEM_OBJECT_ACTIVE: object is currently used by an active submission |
53 | * |
54 | * Bitmask of status used for fdinfo memory stats, see &drm_gem_object_funcs.status |
55 | * and drm_show_fdinfo(). Note that an object can report DRM_GEM_OBJECT_PURGEABLE |
56 | * and be active or not resident, in which case drm_show_fdinfo() will not |
57 | * account for it as purgeable. So drivers do not need to check if the buffer |
58 | * is idle and resident to return this bit, i.e. userspace can mark a buffer as |
59 | * purgeable even while it is still busy on the GPU. It will not get reported in |
60 | * the puregeable stats until it becomes idle. The status gem object func does |
61 | * not need to consider this. |
62 | */ |
63 | enum drm_gem_object_status { |
64 | DRM_GEM_OBJECT_RESIDENT = BIT(0), |
65 | DRM_GEM_OBJECT_PURGEABLE = BIT(1), |
66 | DRM_GEM_OBJECT_ACTIVE = BIT(2), |
67 | }; |
68 | |
69 | /** |
70 | * struct drm_gem_object_funcs - GEM object functions |
71 | */ |
72 | struct drm_gem_object_funcs { |
73 | /** |
74 | * @free: |
75 | * |
76 | * Deconstructor for drm_gem_objects. |
77 | * |
78 | * This callback is mandatory. |
79 | */ |
80 | void (*free)(struct drm_gem_object *obj); |
81 | |
82 | /** |
83 | * @open: |
84 | * |
85 | * Called upon GEM handle creation. |
86 | * |
87 | * This callback is optional. |
88 | */ |
89 | int (*open)(struct drm_gem_object *obj, struct drm_file *file); |
90 | |
91 | /** |
92 | * @close: |
93 | * |
94 | * Called upon GEM handle release. |
95 | * |
96 | * This callback is optional. |
97 | */ |
98 | void (*close)(struct drm_gem_object *obj, struct drm_file *file); |
99 | |
100 | /** |
101 | * @print_info: |
102 | * |
103 | * If driver subclasses struct &drm_gem_object, it can implement this |
104 | * optional hook for printing additional driver specific info. |
105 | * |
106 | * drm_printf_indent() should be used in the callback passing it the |
107 | * indent argument. |
108 | * |
109 | * This callback is called from drm_gem_print_info(). |
110 | * |
111 | * This callback is optional. |
112 | */ |
113 | void (*print_info)(struct drm_printer *p, unsigned int indent, |
114 | const struct drm_gem_object *obj); |
115 | |
116 | /** |
117 | * @export: |
118 | * |
119 | * Export backing buffer as a &dma_buf. |
120 | * If this is not set drm_gem_prime_export() is used. |
121 | * |
122 | * This callback is optional. |
123 | */ |
124 | struct dma_buf *(*export)(struct drm_gem_object *obj, int flags); |
125 | |
126 | /** |
127 | * @pin: |
128 | * |
129 | * Pin backing buffer in memory. Used by the drm_gem_map_attach() helper. |
130 | * |
131 | * This callback is optional. |
132 | */ |
133 | int (*pin)(struct drm_gem_object *obj); |
134 | |
135 | /** |
136 | * @unpin: |
137 | * |
138 | * Unpin backing buffer. Used by the drm_gem_map_detach() helper. |
139 | * |
140 | * This callback is optional. |
141 | */ |
142 | void (*unpin)(struct drm_gem_object *obj); |
143 | |
144 | /** |
145 | * @get_sg_table: |
146 | * |
147 | * Returns a Scatter-Gather table representation of the buffer. |
148 | * Used when exporting a buffer by the drm_gem_map_dma_buf() helper. |
149 | * Releasing is done by calling dma_unmap_sg_attrs() and sg_free_table() |
150 | * in drm_gem_unmap_buf(), therefore these helpers and this callback |
151 | * here cannot be used for sg tables pointing at driver private memory |
152 | * ranges. |
153 | * |
154 | * See also drm_prime_pages_to_sg(). |
155 | */ |
156 | struct sg_table *(*get_sg_table)(struct drm_gem_object *obj); |
157 | |
158 | /** |
159 | * @vmap: |
160 | * |
161 | * Returns a virtual address for the buffer. Used by the |
162 | * drm_gem_dmabuf_vmap() helper. Called with a held GEM reservation |
163 | * lock. |
164 | * |
165 | * This callback is optional. |
166 | */ |
167 | int (*vmap)(struct drm_gem_object *obj, struct iosys_map *map); |
168 | |
169 | /** |
170 | * @vunmap: |
171 | * |
172 | * Releases the address previously returned by @vmap. Used by the |
173 | * drm_gem_dmabuf_vunmap() helper. Called with a held GEM reservation |
174 | * lock. |
175 | * |
176 | * This callback is optional. |
177 | */ |
178 | void (*vunmap)(struct drm_gem_object *obj, struct iosys_map *map); |
179 | |
180 | /** |
181 | * @mmap: |
182 | * |
183 | * Handle mmap() of the gem object, setup vma accordingly. |
184 | * |
185 | * This callback is optional. |
186 | * |
187 | * The callback is used by both drm_gem_mmap_obj() and |
188 | * drm_gem_prime_mmap(). When @mmap is present @vm_ops is not |
189 | * used, the @mmap callback must set vma->vm_ops instead. |
190 | */ |
191 | int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma); |
192 | |
193 | /** |
194 | * @evict: |
195 | * |
196 | * Evicts gem object out from memory. Used by the drm_gem_object_evict() |
197 | * helper. Returns 0 on success, -errno otherwise. Called with a held |
198 | * GEM reservation lock. |
199 | * |
200 | * This callback is optional. |
201 | */ |
202 | int (*evict)(struct drm_gem_object *obj); |
203 | |
204 | /** |
205 | * @status: |
206 | * |
207 | * The optional status callback can return additional object state |
208 | * which determines which stats the object is counted against. The |
209 | * callback is called under table_lock. Racing against object status |
210 | * change is "harmless", and the callback can expect to not race |
211 | * against object destruction. |
212 | * |
213 | * Called by drm_show_memory_stats(). |
214 | */ |
215 | enum drm_gem_object_status (*status)(struct drm_gem_object *obj); |
216 | |
217 | /** |
218 | * @rss: |
219 | * |
220 | * Return resident size of the object in physical memory. |
221 | * |
222 | * Called by drm_show_memory_stats(). |
223 | */ |
224 | size_t (*)(struct drm_gem_object *obj); |
225 | |
226 | /** |
227 | * @vm_ops: |
228 | * |
229 | * Virtual memory operations used with mmap. |
230 | * |
231 | * This is optional but necessary for mmap support. |
232 | */ |
233 | const struct vm_operations_struct *vm_ops; |
234 | }; |
235 | |
236 | /** |
237 | * struct drm_gem_lru - A simple LRU helper |
238 | * |
239 | * A helper for tracking GEM objects in a given state, to aid in |
240 | * driver's shrinker implementation. Tracks the count of pages |
241 | * for lockless &shrinker.count_objects, and provides |
242 | * &drm_gem_lru_scan for driver's &shrinker.scan_objects |
243 | * implementation. |
244 | */ |
245 | struct drm_gem_lru { |
246 | /** |
247 | * @lock: |
248 | * |
249 | * Lock protecting movement of GEM objects between LRUs. All |
250 | * LRUs that the object can move between should be protected |
251 | * by the same lock. |
252 | */ |
253 | struct mutex *lock; |
254 | |
255 | /** |
256 | * @count: |
257 | * |
258 | * The total number of backing pages of the GEM objects in |
259 | * this LRU. |
260 | */ |
261 | long count; |
262 | |
263 | /** |
264 | * @list: |
265 | * |
266 | * The LRU list. |
267 | */ |
268 | struct list_head list; |
269 | }; |
270 | |
271 | /** |
272 | * struct drm_gem_object - GEM buffer object |
273 | * |
274 | * This structure defines the generic parts for GEM buffer objects, which are |
275 | * mostly around handling mmap and userspace handles. |
276 | * |
277 | * Buffer objects are often abbreviated to BO. |
278 | */ |
279 | struct drm_gem_object { |
280 | /** |
281 | * @refcount: |
282 | * |
283 | * Reference count of this object |
284 | * |
285 | * Please use drm_gem_object_get() to acquire and drm_gem_object_put_locked() |
286 | * or drm_gem_object_put() to release a reference to a GEM |
287 | * buffer object. |
288 | */ |
289 | struct kref refcount; |
290 | |
291 | /** |
292 | * @handle_count: |
293 | * |
294 | * This is the GEM file_priv handle count of this object. |
295 | * |
296 | * Each handle also holds a reference. Note that when the handle_count |
297 | * drops to 0 any global names (e.g. the id in the flink namespace) will |
298 | * be cleared. |
299 | * |
300 | * Protected by &drm_device.object_name_lock. |
301 | */ |
302 | unsigned handle_count; |
303 | |
304 | /** |
305 | * @dev: DRM dev this object belongs to. |
306 | */ |
307 | struct drm_device *dev; |
308 | |
309 | /** |
310 | * @filp: |
311 | * |
312 | * SHMEM file node used as backing storage for swappable buffer objects. |
313 | * GEM also supports driver private objects with driver-specific backing |
314 | * storage (contiguous DMA memory, special reserved blocks). In this |
315 | * case @filp is NULL. |
316 | */ |
317 | struct file *filp; |
318 | |
319 | /** |
320 | * @vma_node: |
321 | * |
322 | * Mapping info for this object to support mmap. Drivers are supposed to |
323 | * allocate the mmap offset using drm_gem_create_mmap_offset(). The |
324 | * offset itself can be retrieved using drm_vma_node_offset_addr(). |
325 | * |
326 | * Memory mapping itself is handled by drm_gem_mmap(), which also checks |
327 | * that userspace is allowed to access the object. |
328 | */ |
329 | struct drm_vma_offset_node vma_node; |
330 | |
331 | /** |
332 | * @size: |
333 | * |
334 | * Size of the object, in bytes. Immutable over the object's |
335 | * lifetime. |
336 | */ |
337 | size_t size; |
338 | |
339 | /** |
340 | * @name: |
341 | * |
342 | * Global name for this object, starts at 1. 0 means unnamed. |
343 | * Access is covered by &drm_device.object_name_lock. This is used by |
344 | * the GEM_FLINK and GEM_OPEN ioctls. |
345 | */ |
346 | int name; |
347 | |
348 | /** |
349 | * @dma_buf: |
350 | * |
351 | * dma-buf associated with this GEM object. |
352 | * |
353 | * Pointer to the dma-buf associated with this gem object (either |
354 | * through importing or exporting). We break the resulting reference |
355 | * loop when the last gem handle for this object is released. |
356 | * |
357 | * Protected by &drm_device.object_name_lock. |
358 | */ |
359 | struct dma_buf *dma_buf; |
360 | |
361 | /** |
362 | * @import_attach: |
363 | * |
364 | * dma-buf attachment backing this object. |
365 | * |
366 | * Any foreign dma_buf imported as a gem object has this set to the |
367 | * attachment point for the device. This is invariant over the lifetime |
368 | * of a gem object. |
369 | * |
370 | * The &drm_gem_object_funcs.free callback is responsible for |
371 | * cleaning up the dma_buf attachment and references acquired at import |
372 | * time. |
373 | * |
374 | * Note that the drm gem/prime core does not depend upon drivers setting |
375 | * this field any more. So for drivers where this doesn't make sense |
376 | * (e.g. virtual devices or a displaylink behind an usb bus) they can |
377 | * simply leave it as NULL. |
378 | */ |
379 | struct dma_buf_attachment *import_attach; |
380 | |
381 | /** |
382 | * @resv: |
383 | * |
384 | * Pointer to reservation object associated with the this GEM object. |
385 | * |
386 | * Normally (@resv == &@_resv) except for imported GEM objects. |
387 | */ |
388 | struct dma_resv *resv; |
389 | |
390 | /** |
391 | * @_resv: |
392 | * |
393 | * A reservation object for this GEM object. |
394 | * |
395 | * This is unused for imported GEM objects. |
396 | */ |
397 | struct dma_resv _resv; |
398 | |
399 | /** |
400 | * @gpuva: |
401 | * |
402 | * Provides the list of GPU VAs attached to this GEM object. |
403 | * |
404 | * Drivers should lock list accesses with the GEMs &dma_resv lock |
405 | * (&drm_gem_object.resv) or a custom lock if one is provided. |
406 | */ |
407 | struct { |
408 | struct list_head list; |
409 | |
410 | #ifdef CONFIG_LOCKDEP |
411 | struct lockdep_map *lock_dep_map; |
412 | #endif |
413 | } gpuva; |
414 | |
415 | /** |
416 | * @funcs: |
417 | * |
418 | * Optional GEM object functions. If this is set, it will be used instead of the |
419 | * corresponding &drm_driver GEM callbacks. |
420 | * |
421 | * New drivers should use this. |
422 | * |
423 | */ |
424 | const struct drm_gem_object_funcs *funcs; |
425 | |
426 | /** |
427 | * @lru_node: |
428 | * |
429 | * List node in a &drm_gem_lru. |
430 | */ |
431 | struct list_head lru_node; |
432 | |
433 | /** |
434 | * @lru: |
435 | * |
436 | * The current LRU list that the GEM object is on. |
437 | */ |
438 | struct drm_gem_lru *lru; |
439 | }; |
440 | |
441 | /** |
442 | * DRM_GEM_FOPS - Default drm GEM file operations |
443 | * |
444 | * This macro provides a shorthand for setting the GEM file ops in the |
445 | * &file_operations structure. If all you need are the default ops, use |
446 | * DEFINE_DRM_GEM_FOPS instead. |
447 | */ |
448 | #define DRM_GEM_FOPS \ |
449 | .open = drm_open,\ |
450 | .release = drm_release,\ |
451 | .unlocked_ioctl = drm_ioctl,\ |
452 | .compat_ioctl = drm_compat_ioctl,\ |
453 | .poll = drm_poll,\ |
454 | .read = drm_read,\ |
455 | .llseek = noop_llseek,\ |
456 | .mmap = drm_gem_mmap, \ |
457 | .fop_flags = FOP_UNSIGNED_OFFSET |
458 | |
459 | /** |
460 | * DEFINE_DRM_GEM_FOPS() - macro to generate file operations for GEM drivers |
461 | * @name: name for the generated structure |
462 | * |
463 | * This macro autogenerates a suitable &struct file_operations for GEM based |
464 | * drivers, which can be assigned to &drm_driver.fops. Note that this structure |
465 | * cannot be shared between drivers, because it contains a reference to the |
466 | * current module using THIS_MODULE. |
467 | * |
468 | * Note that the declaration is already marked as static - if you need a |
469 | * non-static version of this you're probably doing it wrong and will break the |
470 | * THIS_MODULE reference by accident. |
471 | */ |
472 | #define DEFINE_DRM_GEM_FOPS(name) \ |
473 | static const struct file_operations name = {\ |
474 | .owner = THIS_MODULE,\ |
475 | DRM_GEM_FOPS,\ |
476 | } |
477 | |
478 | void drm_gem_object_release(struct drm_gem_object *obj); |
479 | void drm_gem_object_free(struct kref *kref); |
480 | int drm_gem_object_init(struct drm_device *dev, |
481 | struct drm_gem_object *obj, size_t size); |
482 | int drm_gem_object_init_with_mnt(struct drm_device *dev, |
483 | struct drm_gem_object *obj, size_t size, |
484 | struct vfsmount *gemfs); |
485 | void drm_gem_private_object_init(struct drm_device *dev, |
486 | struct drm_gem_object *obj, size_t size); |
487 | void drm_gem_private_object_fini(struct drm_gem_object *obj); |
488 | void drm_gem_vm_open(struct vm_area_struct *vma); |
489 | void drm_gem_vm_close(struct vm_area_struct *vma); |
490 | int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, |
491 | struct vm_area_struct *vma); |
492 | int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); |
493 | |
494 | /** |
495 | * drm_gem_object_get - acquire a GEM buffer object reference |
496 | * @obj: GEM buffer object |
497 | * |
498 | * This function acquires an additional reference to @obj. It is illegal to |
499 | * call this without already holding a reference. No locks required. |
500 | */ |
501 | static inline void drm_gem_object_get(struct drm_gem_object *obj) |
502 | { |
503 | kref_get(kref: &obj->refcount); |
504 | } |
505 | |
506 | __attribute__((nonnull)) |
507 | static inline void |
508 | __drm_gem_object_put(struct drm_gem_object *obj) |
509 | { |
510 | kref_put(kref: &obj->refcount, release: drm_gem_object_free); |
511 | } |
512 | |
513 | /** |
514 | * drm_gem_object_put - drop a GEM buffer object reference |
515 | * @obj: GEM buffer object |
516 | * |
517 | * This releases a reference to @obj. |
518 | */ |
519 | static inline void |
520 | drm_gem_object_put(struct drm_gem_object *obj) |
521 | { |
522 | if (obj) |
523 | __drm_gem_object_put(obj); |
524 | } |
525 | |
526 | int drm_gem_handle_create(struct drm_file *file_priv, |
527 | struct drm_gem_object *obj, |
528 | u32 *handlep); |
529 | int drm_gem_handle_delete(struct drm_file *filp, u32 handle); |
530 | |
531 | |
532 | void drm_gem_free_mmap_offset(struct drm_gem_object *obj); |
533 | int drm_gem_create_mmap_offset(struct drm_gem_object *obj); |
534 | int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); |
535 | |
536 | struct page **drm_gem_get_pages(struct drm_gem_object *obj); |
537 | void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, |
538 | bool dirty, bool accessed); |
539 | |
540 | void drm_gem_lock(struct drm_gem_object *obj); |
541 | void drm_gem_unlock(struct drm_gem_object *obj); |
542 | |
543 | int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map); |
544 | void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map); |
545 | |
546 | int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, |
547 | int count, struct drm_gem_object ***objs_out); |
548 | struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle); |
549 | long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, |
550 | bool wait_all, unsigned long timeout); |
551 | int drm_gem_lock_reservations(struct drm_gem_object **objs, int count, |
552 | struct ww_acquire_ctx *acquire_ctx); |
553 | void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, |
554 | struct ww_acquire_ctx *acquire_ctx); |
555 | int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
556 | u32 handle, u64 *offset); |
557 | |
558 | void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock); |
559 | void drm_gem_lru_remove(struct drm_gem_object *obj); |
560 | void drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj); |
561 | void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj); |
562 | unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, |
563 | unsigned int nr_to_scan, |
564 | unsigned long *remaining, |
565 | bool (*shrink)(struct drm_gem_object *obj)); |
566 | |
567 | int drm_gem_evict_locked(struct drm_gem_object *obj); |
568 | |
569 | /** |
570 | * drm_gem_object_is_shared_for_memory_stats - helper for shared memory stats |
571 | * |
572 | * This helper should only be used for fdinfo shared memory stats to determine |
573 | * if a GEM object is shared. |
574 | * |
575 | * @obj: obj in question |
576 | */ |
577 | static inline bool drm_gem_object_is_shared_for_memory_stats(struct drm_gem_object *obj) |
578 | { |
579 | return (obj->handle_count > 1) || obj->dma_buf; |
580 | } |
581 | |
582 | /** |
583 | * drm_gem_is_imported() - Tests if GEM object's buffer has been imported |
584 | * @obj: the GEM object |
585 | * |
586 | * Returns: |
587 | * True if the GEM object's buffer has been imported, false otherwise |
588 | */ |
589 | static inline bool drm_gem_is_imported(const struct drm_gem_object *obj) |
590 | { |
591 | return !!obj->import_attach; |
592 | } |
593 | |
594 | #ifdef CONFIG_LOCKDEP |
595 | /** |
596 | * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list. |
597 | * @obj: the &drm_gem_object |
598 | * @lock: the lock used to protect the gpuva list. The locking primitive |
599 | * must contain a dep_map field. |
600 | * |
601 | * Call this if you're not proctecting access to the gpuva list with the |
602 | * dma-resv lock, but with a custom lock. |
603 | */ |
604 | #define drm_gem_gpuva_set_lock(obj, lock) \ |
605 | if (!WARN((obj)->gpuva.lock_dep_map, \ |
606 | "GEM GPUVA lock should be set only once.")) \ |
607 | (obj)->gpuva.lock_dep_map = &(lock)->dep_map |
608 | #define drm_gem_gpuva_assert_lock_held(obj) \ |
609 | lockdep_assert((obj)->gpuva.lock_dep_map ? \ |
610 | lock_is_held((obj)->gpuva.lock_dep_map) : \ |
611 | dma_resv_held((obj)->resv)) |
612 | #else |
613 | #define drm_gem_gpuva_set_lock(obj, lock) do {} while (0) |
614 | #define drm_gem_gpuva_assert_lock_held(obj) do {} while (0) |
615 | #endif |
616 | |
617 | /** |
618 | * drm_gem_gpuva_init() - initialize the gpuva list of a GEM object |
619 | * @obj: the &drm_gem_object |
620 | * |
621 | * This initializes the &drm_gem_object's &drm_gpuvm_bo list. |
622 | * |
623 | * Calling this function is only necessary for drivers intending to support the |
624 | * &drm_driver_feature DRIVER_GEM_GPUVA. |
625 | * |
626 | * See also drm_gem_gpuva_set_lock(). |
627 | */ |
628 | static inline void drm_gem_gpuva_init(struct drm_gem_object *obj) |
629 | { |
630 | INIT_LIST_HEAD(list: &obj->gpuva.list); |
631 | } |
632 | |
633 | /** |
634 | * drm_gem_for_each_gpuvm_bo() - iterator to walk over a list of &drm_gpuvm_bo |
635 | * @entry__: &drm_gpuvm_bo structure to assign to in each iteration step |
636 | * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with |
637 | * |
638 | * This iterator walks over all &drm_gpuvm_bo structures associated with the |
639 | * &drm_gem_object. |
640 | */ |
641 | #define drm_gem_for_each_gpuvm_bo(entry__, obj__) \ |
642 | list_for_each_entry(entry__, &(obj__)->gpuva.list, list.entry.gem) |
643 | |
644 | /** |
645 | * drm_gem_for_each_gpuvm_bo_safe() - iterator to safely walk over a list of |
646 | * &drm_gpuvm_bo |
647 | * @entry__: &drm_gpuvm_bostructure to assign to in each iteration step |
648 | * @next__: &next &drm_gpuvm_bo to store the next step |
649 | * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with |
650 | * |
651 | * This iterator walks over all &drm_gpuvm_bo structures associated with the |
652 | * &drm_gem_object. It is implemented with list_for_each_entry_safe(), hence |
653 | * it is save against removal of elements. |
654 | */ |
655 | #define drm_gem_for_each_gpuvm_bo_safe(entry__, next__, obj__) \ |
656 | list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, list.entry.gem) |
657 | |
658 | #endif /* __DRM_GEM_H__ */ |
659 | |