1 | /************************************************************************** |
2 | * |
3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: |
13 | * |
14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. |
17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * |
26 | **************************************************************************/ |
27 | /* |
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
29 | */ |
30 | |
31 | #ifndef _TTM_BO_API_H_ |
32 | #define _TTM_BO_API_H_ |
33 | |
34 | #include <drm/drm_gem.h> |
35 | |
36 | #include <linux/kref.h> |
37 | #include <linux/list.h> |
38 | |
39 | #include "ttm_device.h" |
40 | |
41 | /* Default number of pre-faulted pages in the TTM fault handler */ |
42 | #define TTM_BO_VM_NUM_PREFAULT 16 |
43 | |
44 | struct iosys_map; |
45 | |
46 | struct ttm_global; |
47 | struct ttm_device; |
48 | struct ttm_placement; |
49 | struct ttm_place; |
50 | struct ttm_resource; |
51 | struct ttm_resource_manager; |
52 | struct ttm_tt; |
53 | |
54 | /** |
55 | * enum ttm_bo_type |
56 | * |
57 | * @ttm_bo_type_device: These are 'normal' buffers that can |
58 | * be mmapped by user space. Each of these bos occupy a slot in the |
59 | * device address space, that can be used for normal vm operations. |
60 | * |
61 | * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers, |
62 | * but they cannot be accessed from user-space. For kernel-only use. |
63 | * |
64 | * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another |
65 | * driver. |
66 | */ |
67 | enum ttm_bo_type { |
68 | ttm_bo_type_device, |
69 | ttm_bo_type_kernel, |
70 | ttm_bo_type_sg |
71 | }; |
72 | |
73 | /** |
74 | * struct ttm_buffer_object |
75 | * |
76 | * @base: drm_gem_object superclass data. |
77 | * @bdev: Pointer to the buffer object device structure. |
78 | * @type: The bo type. |
79 | * @page_alignment: Page alignment. |
80 | * @destroy: Destruction function. If NULL, kfree is used. |
81 | * @kref: Reference count of this buffer object. When this refcount reaches |
82 | * zero, the object is destroyed or put on the delayed delete list. |
83 | * @resource: structure describing current placement. |
84 | * @ttm: TTM structure holding system pages. |
85 | * @deleted: True if the object is only a zombie and already deleted. |
86 | * @bulk_move: The bulk move object. |
87 | * @priority: Priority for LRU, BOs with lower priority are evicted first. |
88 | * @pin_count: Pin count. |
89 | * |
90 | * Base class for TTM buffer object, that deals with data placement and CPU |
91 | * mappings. GPU mappings are really up to the driver, but for simpler GPUs |
92 | * the driver can usually use the placement offset @offset directly as the |
93 | * GPU virtual address. For drivers implementing multiple |
94 | * GPU memory manager contexts, the driver should manage the address space |
95 | * in these contexts separately and use these objects to get the correct |
96 | * placement and caching for these GPU maps. This makes it possible to use |
97 | * these objects for even quite elaborate memory management schemes. |
98 | * The destroy member, the API visibility of this object makes it possible |
99 | * to derive driver specific types. |
100 | */ |
101 | struct ttm_buffer_object { |
102 | struct drm_gem_object base; |
103 | |
104 | /* |
105 | * Members constant at init. |
106 | */ |
107 | struct ttm_device *bdev; |
108 | enum ttm_bo_type type; |
109 | uint32_t page_alignment; |
110 | void (*destroy) (struct ttm_buffer_object *); |
111 | |
112 | /* |
113 | * Members not needing protection. |
114 | */ |
115 | struct kref kref; |
116 | |
117 | /* |
118 | * Members protected by the bo::resv::reserved lock. |
119 | */ |
120 | struct ttm_resource *resource; |
121 | struct ttm_tt *ttm; |
122 | bool deleted; |
123 | struct ttm_lru_bulk_move *bulk_move; |
124 | unsigned priority; |
125 | unsigned pin_count; |
126 | |
127 | /** |
128 | * @delayed_delete: Work item used when we can't delete the BO |
129 | * immediately |
130 | */ |
131 | struct work_struct delayed_delete; |
132 | |
133 | /** |
134 | * @sg: external source of pages and DMA addresses, protected by the |
135 | * reservation lock. |
136 | */ |
137 | struct sg_table *sg; |
138 | }; |
139 | |
140 | #define TTM_BO_MAP_IOMEM_MASK 0x80 |
141 | |
142 | /** |
143 | * struct ttm_bo_kmap_obj |
144 | * |
145 | * @virtual: The current kernel virtual address. |
146 | * @page: The page when kmap'ing a single page. |
147 | * @bo_kmap_type: Type of bo_kmap. |
148 | * @bo: The TTM BO. |
149 | * |
150 | * Object describing a kernel mapping. Since a TTM bo may be located |
151 | * in various memory types with various caching policies, the |
152 | * mapping can either be an ioremap, a vmap, a kmap or part of a |
153 | * premapped region. |
154 | */ |
155 | struct ttm_bo_kmap_obj { |
156 | void *virtual; |
157 | struct page *page; |
158 | enum { |
159 | ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK, |
160 | ttm_bo_map_vmap = 2, |
161 | ttm_bo_map_kmap = 3, |
162 | ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK, |
163 | } bo_kmap_type; |
164 | struct ttm_buffer_object *bo; |
165 | }; |
166 | |
167 | /** |
168 | * struct ttm_operation_ctx |
169 | * |
170 | * @interruptible: Sleep interruptible if sleeping. |
171 | * @no_wait_gpu: Return immediately if the GPU is busy. |
172 | * @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages. |
173 | * @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple |
174 | * BOs share the same reservation object. |
175 | * faults. Should only be used by TTM internally. |
176 | * @resv: Reservation object to allow reserved evictions with. |
177 | * @bytes_moved: Statistics on how many bytes have been moved. |
178 | * |
179 | * Context for TTM operations like changing buffer placement or general memory |
180 | * allocation. |
181 | */ |
182 | struct ttm_operation_ctx { |
183 | bool interruptible; |
184 | bool no_wait_gpu; |
185 | bool gfp_retry_mayfail; |
186 | bool allow_res_evict; |
187 | struct dma_resv *resv; |
188 | uint64_t bytes_moved; |
189 | }; |
190 | |
191 | struct ttm_lru_walk; |
192 | |
193 | /** struct ttm_lru_walk_ops - Operations for a LRU walk. */ |
194 | struct ttm_lru_walk_ops { |
195 | /** |
196 | * process_bo - Process this bo. |
197 | * @walk: struct ttm_lru_walk describing the walk. |
198 | * @bo: A locked and referenced buffer object. |
199 | * |
200 | * Return: Negative error code on error, User-defined positive value |
201 | * (typically, but not always, size of the processed bo) on success. |
202 | * On success, the returned values are summed by the walk and the |
203 | * walk exits when its target is met. |
204 | * 0 also indicates success, -EBUSY means this bo was skipped. |
205 | */ |
206 | s64 (*process_bo)(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo); |
207 | }; |
208 | |
209 | /** |
210 | * struct ttm_lru_walk - Structure describing a LRU walk. |
211 | */ |
212 | struct ttm_lru_walk { |
213 | /** @ops: Pointer to the ops structure. */ |
214 | const struct ttm_lru_walk_ops *ops; |
215 | /** @ctx: Pointer to the struct ttm_operation_ctx. */ |
216 | struct ttm_operation_ctx *ctx; |
217 | /** @ticket: The struct ww_acquire_ctx if any. */ |
218 | struct ww_acquire_ctx *ticket; |
219 | /** @trylock_only: Only use trylock for locking. */ |
220 | bool trylock_only; |
221 | }; |
222 | |
223 | s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev, |
224 | struct ttm_resource_manager *man, s64 target); |
225 | |
226 | /** |
227 | * struct ttm_bo_shrink_flags - flags to govern the bo shrinking behaviour |
228 | * @purge: Purge the content rather than backing it up. |
229 | * @writeback: Attempt to immediately write content to swap space. |
230 | * @allow_move: Allow moving to system before shrinking. This is typically |
231 | * not desired for zombie- or ghost objects (with zombie object meaning |
232 | * objects with a zero gem object refcount) |
233 | */ |
234 | struct ttm_bo_shrink_flags { |
235 | u32 purge : 1; |
236 | u32 writeback : 1; |
237 | u32 allow_move : 1; |
238 | }; |
239 | |
240 | long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, |
241 | const struct ttm_bo_shrink_flags flags); |
242 | |
243 | bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx); |
244 | |
245 | bool ttm_bo_shrink_avoid_wait(void); |
246 | |
247 | /** |
248 | * ttm_bo_get - reference a struct ttm_buffer_object |
249 | * |
250 | * @bo: The buffer object. |
251 | */ |
252 | static inline void ttm_bo_get(struct ttm_buffer_object *bo) |
253 | { |
254 | kref_get(kref: &bo->kref); |
255 | } |
256 | |
257 | /** |
258 | * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless |
259 | * its refcount has already reached zero. |
260 | * @bo: The buffer object. |
261 | * |
262 | * Used to reference a TTM buffer object in lookups where the object is removed |
263 | * from the lookup structure during the destructor and for RCU lookups. |
264 | * |
265 | * Returns: @bo if the referencing was successful, NULL otherwise. |
266 | */ |
267 | static inline __must_check struct ttm_buffer_object * |
268 | ttm_bo_get_unless_zero(struct ttm_buffer_object *bo) |
269 | { |
270 | if (!kref_get_unless_zero(kref: &bo->kref)) |
271 | return NULL; |
272 | return bo; |
273 | } |
274 | |
275 | /** |
276 | * ttm_bo_reserve: |
277 | * |
278 | * @bo: A pointer to a struct ttm_buffer_object. |
279 | * @interruptible: Sleep interruptible if waiting. |
280 | * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. |
281 | * @ticket: ticket used to acquire the ww_mutex. |
282 | * |
283 | * Locks a buffer object for validation. (Or prevents other processes from |
284 | * locking it for validation), while taking a number of measures to prevent |
285 | * deadlocks. |
286 | * |
287 | * Returns: |
288 | * -EDEADLK: The reservation may cause a deadlock. |
289 | * Release all buffer reservations, wait for @bo to become unreserved and |
290 | * try again. |
291 | * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by |
292 | * a signal. Release all buffer reservations and return to user-space. |
293 | * -EBUSY: The function needed to sleep, but @no_wait was true |
294 | * -EALREADY: Bo already reserved using @ticket. This error code will only |
295 | * be returned if @use_ticket is set to true. |
296 | */ |
297 | static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, |
298 | bool interruptible, bool no_wait, |
299 | struct ww_acquire_ctx *ticket) |
300 | { |
301 | int ret = 0; |
302 | |
303 | if (no_wait) { |
304 | bool success; |
305 | |
306 | if (WARN_ON(ticket)) |
307 | return -EBUSY; |
308 | |
309 | success = dma_resv_trylock(obj: bo->base.resv); |
310 | return success ? 0 : -EBUSY; |
311 | } |
312 | |
313 | if (interruptible) |
314 | ret = dma_resv_lock_interruptible(obj: bo->base.resv, ctx: ticket); |
315 | else |
316 | ret = dma_resv_lock(obj: bo->base.resv, ctx: ticket); |
317 | if (ret == -EINTR) |
318 | return -ERESTARTSYS; |
319 | return ret; |
320 | } |
321 | |
322 | /** |
323 | * ttm_bo_reserve_slowpath: |
324 | * @bo: A pointer to a struct ttm_buffer_object. |
325 | * @interruptible: Sleep interruptible if waiting. |
326 | * @ticket: Ticket used to acquire the ww_mutex. |
327 | * |
328 | * This is called after ttm_bo_reserve returns -EAGAIN and we backed off |
329 | * from all our other reservations. Because there are no other reservations |
330 | * held by us, this function cannot deadlock any more. |
331 | */ |
332 | static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, |
333 | bool interruptible, |
334 | struct ww_acquire_ctx *ticket) |
335 | { |
336 | if (interruptible) { |
337 | int ret = dma_resv_lock_slow_interruptible(obj: bo->base.resv, |
338 | ctx: ticket); |
339 | if (ret == -EINTR) |
340 | ret = -ERESTARTSYS; |
341 | return ret; |
342 | } |
343 | dma_resv_lock_slow(obj: bo->base.resv, ctx: ticket); |
344 | return 0; |
345 | } |
346 | |
347 | void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); |
348 | |
349 | static inline void |
350 | ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo) |
351 | { |
352 | spin_lock(lock: &bo->bdev->lru_lock); |
353 | ttm_bo_move_to_lru_tail(bo); |
354 | spin_unlock(lock: &bo->bdev->lru_lock); |
355 | } |
356 | |
357 | static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo, |
358 | struct ttm_resource *new_mem) |
359 | { |
360 | WARN_ON(bo->resource); |
361 | bo->resource = new_mem; |
362 | } |
363 | |
364 | /** |
365 | * ttm_bo_move_null - assign memory for a buffer object. |
366 | * @bo: The bo to assign the memory to |
367 | * @new_mem: The memory to be assigned. |
368 | * |
369 | * Assign the memory from new_mem to the memory of the buffer object bo. |
370 | */ |
371 | static inline void ttm_bo_move_null(struct ttm_buffer_object *bo, |
372 | struct ttm_resource *new_mem) |
373 | { |
374 | ttm_resource_free(bo, res: &bo->resource); |
375 | ttm_bo_assign_mem(bo, new_mem); |
376 | } |
377 | |
378 | /** |
379 | * ttm_bo_unreserve |
380 | * |
381 | * @bo: A pointer to a struct ttm_buffer_object. |
382 | * |
383 | * Unreserve a previous reservation of @bo. |
384 | */ |
385 | static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) |
386 | { |
387 | ttm_bo_move_to_lru_tail_unlocked(bo); |
388 | dma_resv_unlock(obj: bo->base.resv); |
389 | } |
390 | |
391 | /** |
392 | * ttm_kmap_obj_virtual |
393 | * |
394 | * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap. |
395 | * @is_iomem: Pointer to an integer that on return indicates 1 if the |
396 | * virtual map is io memory, 0 if normal memory. |
397 | * |
398 | * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap. |
399 | * If *is_iomem is 1 on return, the virtual address points to an io memory area, |
400 | * that should strictly be accessed by the iowriteXX() and similar functions. |
401 | */ |
402 | static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, |
403 | bool *is_iomem) |
404 | { |
405 | *is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK); |
406 | return map->virtual; |
407 | } |
408 | |
409 | int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, |
410 | struct ttm_operation_ctx *ctx); |
411 | int ttm_bo_validate(struct ttm_buffer_object *bo, |
412 | struct ttm_placement *placement, |
413 | struct ttm_operation_ctx *ctx); |
414 | void ttm_bo_put(struct ttm_buffer_object *bo); |
415 | void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo, |
416 | struct ttm_lru_bulk_move *bulk); |
417 | bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, |
418 | const struct ttm_place *place); |
419 | int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo, |
420 | enum ttm_bo_type type, struct ttm_placement *placement, |
421 | uint32_t alignment, struct ttm_operation_ctx *ctx, |
422 | struct sg_table *sg, struct dma_resv *resv, |
423 | void (*destroy)(struct ttm_buffer_object *)); |
424 | int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo, |
425 | enum ttm_bo_type type, struct ttm_placement *placement, |
426 | uint32_t alignment, bool interruptible, |
427 | struct sg_table *sg, struct dma_resv *resv, |
428 | void (*destroy)(struct ttm_buffer_object *)); |
429 | int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, |
430 | unsigned long num_pages, struct ttm_bo_kmap_obj *map); |
431 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); |
432 | int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map); |
433 | void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map); |
434 | int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo); |
435 | s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, |
436 | struct ttm_resource_manager *man, gfp_t gfp_flags, |
437 | s64 target); |
438 | void ttm_bo_pin(struct ttm_buffer_object *bo); |
439 | void ttm_bo_unpin(struct ttm_buffer_object *bo); |
440 | int ttm_bo_evict_first(struct ttm_device *bdev, |
441 | struct ttm_resource_manager *man, |
442 | struct ttm_operation_ctx *ctx); |
443 | int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset, |
444 | void *buf, int len, int write); |
445 | vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, |
446 | struct vm_fault *vmf); |
447 | vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, |
448 | pgprot_t prot, |
449 | pgoff_t num_prefault); |
450 | vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf); |
451 | void ttm_bo_vm_open(struct vm_area_struct *vma); |
452 | void ttm_bo_vm_close(struct vm_area_struct *vma); |
453 | int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, |
454 | void *buf, int len, int write); |
455 | vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot); |
456 | |
457 | int ttm_bo_mem_space(struct ttm_buffer_object *bo, |
458 | struct ttm_placement *placement, |
459 | struct ttm_resource **mem, |
460 | struct ttm_operation_ctx *ctx); |
461 | |
462 | void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); |
463 | /* |
464 | * ttm_bo_util.c |
465 | */ |
466 | int ttm_mem_io_reserve(struct ttm_device *bdev, |
467 | struct ttm_resource *mem); |
468 | void ttm_mem_io_free(struct ttm_device *bdev, |
469 | struct ttm_resource *mem); |
470 | void ttm_move_memcpy(bool clear, u32 num_pages, |
471 | struct ttm_kmap_iter *dst_iter, |
472 | struct ttm_kmap_iter *src_iter); |
473 | int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, |
474 | struct ttm_operation_ctx *ctx, |
475 | struct ttm_resource *new_mem); |
476 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
477 | struct dma_fence *fence, bool evict, |
478 | bool pipeline, |
479 | struct ttm_resource *new_mem); |
480 | void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo, |
481 | struct ttm_resource *new_mem); |
482 | int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo); |
483 | pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res, |
484 | pgprot_t tmp); |
485 | void ttm_bo_tt_destroy(struct ttm_buffer_object *bo); |
486 | int ttm_bo_populate(struct ttm_buffer_object *bo, |
487 | struct ttm_operation_ctx *ctx); |
488 | |
489 | /* Driver LRU walk helpers initially targeted for shrinking. */ |
490 | |
491 | /** |
492 | * struct ttm_bo_lru_cursor - Iterator cursor for TTM LRU list looping |
493 | */ |
494 | struct ttm_bo_lru_cursor { |
495 | /** @res_curs: Embedded struct ttm_resource_cursor. */ |
496 | struct ttm_resource_cursor res_curs; |
497 | /** |
498 | * @ctx: The struct ttm_operation_ctx used while looping. |
499 | * governs the locking mode. |
500 | */ |
501 | struct ttm_operation_ctx *ctx; |
502 | /** |
503 | * @bo: Buffer object pointer if a buffer object is refcounted, |
504 | * NULL otherwise. |
505 | */ |
506 | struct ttm_buffer_object *bo; |
507 | /** |
508 | * @needs_unlock: Valid iff @bo != NULL. The bo resv needs |
509 | * unlock before the next iteration or after loop exit. |
510 | */ |
511 | bool needs_unlock; |
512 | }; |
513 | |
514 | void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs); |
515 | |
516 | struct ttm_bo_lru_cursor * |
517 | ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs, |
518 | struct ttm_resource_manager *man, |
519 | struct ttm_operation_ctx *ctx); |
520 | |
521 | struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs); |
522 | |
523 | struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs); |
524 | |
525 | /* |
526 | * Defines needed to use autocleanup (linux/cleanup.h) with struct ttm_bo_lru_cursor. |
527 | */ |
528 | DEFINE_CLASS(ttm_bo_lru_cursor, struct ttm_bo_lru_cursor *, |
529 | if (_T) {ttm_bo_lru_cursor_fini(_T); }, |
530 | ttm_bo_lru_cursor_init(curs, man, ctx), |
531 | struct ttm_bo_lru_cursor *curs, struct ttm_resource_manager *man, |
532 | struct ttm_operation_ctx *ctx); |
533 | static inline void * |
534 | class_ttm_bo_lru_cursor_lock_ptr(class_ttm_bo_lru_cursor_t *_T) |
535 | { return *_T; } |
536 | #define class_ttm_bo_lru_cursor_is_conditional false |
537 | |
538 | /** |
539 | * ttm_bo_lru_for_each_reserved_guarded() - Iterate over buffer objects owning |
540 | * resources on LRU lists. |
541 | * @_cursor: struct ttm_bo_lru_cursor to use for the iteration. |
542 | * @_man: The resource manager whose LRU lists to iterate over. |
543 | * @_ctx: The struct ttm_operation_context to govern the @_bo locking. |
544 | * @_bo: The struct ttm_buffer_object pointer pointing to the buffer object |
545 | * for the current iteration. |
546 | * |
547 | * Iterate over all resources of @_man and for each resource, attempt to |
548 | * reference and lock (using the locking mode detailed in @_ctx) the buffer |
549 | * object it points to. If successful, assign @_bo to the address of the |
550 | * buffer object and update @_cursor. The iteration is guarded in the |
551 | * sense that @_cursor will be initialized before looping start and cleaned |
552 | * up at looping termination, even if terminated prematurely by, for |
553 | * example a return or break statement. Exiting the loop will also unlock |
554 | * (if needed) and unreference @_bo. |
555 | */ |
556 | #define ttm_bo_lru_for_each_reserved_guarded(_cursor, _man, _ctx, _bo) \ |
557 | scoped_guard(ttm_bo_lru_cursor, _cursor, _man, _ctx) \ |
558 | for ((_bo) = ttm_bo_lru_cursor_first(_cursor); (_bo); \ |
559 | (_bo) = ttm_bo_lru_cursor_next(_cursor)) |
560 | |
561 | #endif |
562 | |