1 | /************************************************************************** |
2 | * |
3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: |
13 | * |
14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. |
17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * |
26 | **************************************************************************/ |
27 | /* |
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
29 | */ |
30 | |
31 | #ifndef _TTM_BO_API_H_ |
32 | #define _TTM_BO_API_H_ |
33 | |
34 | #include <drm/drm_gem.h> |
35 | |
36 | #include <linux/kref.h> |
37 | #include <linux/list.h> |
38 | |
39 | #include "ttm_device.h" |
40 | |
41 | /* Default number of pre-faulted pages in the TTM fault handler */ |
42 | #define TTM_BO_VM_NUM_PREFAULT 16 |
43 | |
44 | struct iosys_map; |
45 | |
46 | struct ttm_global; |
47 | struct ttm_device; |
48 | struct ttm_placement; |
49 | struct ttm_place; |
50 | struct ttm_resource; |
51 | struct ttm_resource_manager; |
52 | struct ttm_tt; |
53 | |
54 | /** |
55 | * enum ttm_bo_type |
56 | * |
57 | * @ttm_bo_type_device: These are 'normal' buffers that can |
58 | * be mmapped by user space. Each of these bos occupy a slot in the |
59 | * device address space, that can be used for normal vm operations. |
60 | * |
61 | * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers, |
62 | * but they cannot be accessed from user-space. For kernel-only use. |
63 | * |
64 | * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another |
65 | * driver. |
66 | */ |
67 | enum ttm_bo_type { |
68 | ttm_bo_type_device, |
69 | ttm_bo_type_kernel, |
70 | ttm_bo_type_sg |
71 | }; |
72 | |
73 | /** |
74 | * struct ttm_buffer_object |
75 | * |
76 | * @base: drm_gem_object superclass data. |
77 | * @bdev: Pointer to the buffer object device structure. |
78 | * @type: The bo type. |
79 | * @page_alignment: Page alignment. |
80 | * @destroy: Destruction function. If NULL, kfree is used. |
81 | * @kref: Reference count of this buffer object. When this refcount reaches |
82 | * zero, the object is destroyed or put on the delayed delete list. |
83 | * @resource: structure describing current placement. |
84 | * @ttm: TTM structure holding system pages. |
85 | * @deleted: True if the object is only a zombie and already deleted. |
86 | * |
87 | * Base class for TTM buffer object, that deals with data placement and CPU |
88 | * mappings. GPU mappings are really up to the driver, but for simpler GPUs |
89 | * the driver can usually use the placement offset @offset directly as the |
90 | * GPU virtual address. For drivers implementing multiple |
91 | * GPU memory manager contexts, the driver should manage the address space |
92 | * in these contexts separately and use these objects to get the correct |
93 | * placement and caching for these GPU maps. This makes it possible to use |
94 | * these objects for even quite elaborate memory management schemes. |
95 | * The destroy member, the API visibility of this object makes it possible |
96 | * to derive driver specific types. |
97 | */ |
98 | struct ttm_buffer_object { |
99 | struct drm_gem_object base; |
100 | |
101 | /* |
102 | * Members constant at init. |
103 | */ |
104 | struct ttm_device *bdev; |
105 | enum ttm_bo_type type; |
106 | uint32_t page_alignment; |
107 | void (*destroy) (struct ttm_buffer_object *); |
108 | |
109 | /* |
110 | * Members not needing protection. |
111 | */ |
112 | struct kref kref; |
113 | |
114 | /* |
115 | * Members protected by the bo::resv::reserved lock. |
116 | */ |
117 | struct ttm_resource *resource; |
118 | struct ttm_tt *ttm; |
119 | bool deleted; |
120 | struct ttm_lru_bulk_move *bulk_move; |
121 | unsigned priority; |
122 | unsigned pin_count; |
123 | |
124 | /** |
125 | * @delayed_delete: Work item used when we can't delete the BO |
126 | * immediately |
127 | */ |
128 | struct work_struct delayed_delete; |
129 | |
130 | /** |
131 | * Special members that are protected by the reserve lock |
132 | * and the bo::lock when written to. Can be read with |
133 | * either of these locks held. |
134 | */ |
135 | struct sg_table *sg; |
136 | }; |
137 | |
138 | /** |
139 | * struct ttm_bo_kmap_obj |
140 | * |
141 | * @virtual: The current kernel virtual address. |
142 | * @page: The page when kmap'ing a single page. |
143 | * @bo_kmap_type: Type of bo_kmap. |
144 | * |
145 | * Object describing a kernel mapping. Since a TTM bo may be located |
146 | * in various memory types with various caching policies, the |
147 | * mapping can either be an ioremap, a vmap, a kmap or part of a |
148 | * premapped region. |
149 | */ |
150 | #define TTM_BO_MAP_IOMEM_MASK 0x80 |
151 | struct ttm_bo_kmap_obj { |
152 | void *virtual; |
153 | struct page *page; |
154 | enum { |
155 | ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK, |
156 | ttm_bo_map_vmap = 2, |
157 | ttm_bo_map_kmap = 3, |
158 | ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK, |
159 | } bo_kmap_type; |
160 | struct ttm_buffer_object *bo; |
161 | }; |
162 | |
163 | /** |
164 | * struct ttm_operation_ctx |
165 | * |
166 | * @interruptible: Sleep interruptible if sleeping. |
167 | * @no_wait_gpu: Return immediately if the GPU is busy. |
168 | * @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages. |
169 | * @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple |
170 | * BOs share the same reservation object. |
171 | * @force_alloc: Don't check the memory account during suspend or CPU page |
172 | * faults. Should only be used by TTM internally. |
173 | * @resv: Reservation object to allow reserved evictions with. |
174 | * |
175 | * Context for TTM operations like changing buffer placement or general memory |
176 | * allocation. |
177 | */ |
178 | struct ttm_operation_ctx { |
179 | bool interruptible; |
180 | bool no_wait_gpu; |
181 | bool gfp_retry_mayfail; |
182 | bool allow_res_evict; |
183 | bool force_alloc; |
184 | struct dma_resv *resv; |
185 | uint64_t bytes_moved; |
186 | }; |
187 | |
188 | /** |
189 | * ttm_bo_get - reference a struct ttm_buffer_object |
190 | * |
191 | * @bo: The buffer object. |
192 | */ |
193 | static inline void ttm_bo_get(struct ttm_buffer_object *bo) |
194 | { |
195 | kref_get(kref: &bo->kref); |
196 | } |
197 | |
198 | /** |
199 | * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless |
200 | * its refcount has already reached zero. |
201 | * @bo: The buffer object. |
202 | * |
203 | * Used to reference a TTM buffer object in lookups where the object is removed |
204 | * from the lookup structure during the destructor and for RCU lookups. |
205 | * |
206 | * Returns: @bo if the referencing was successful, NULL otherwise. |
207 | */ |
208 | static inline __must_check struct ttm_buffer_object * |
209 | ttm_bo_get_unless_zero(struct ttm_buffer_object *bo) |
210 | { |
211 | if (!kref_get_unless_zero(kref: &bo->kref)) |
212 | return NULL; |
213 | return bo; |
214 | } |
215 | |
216 | /** |
217 | * ttm_bo_reserve: |
218 | * |
219 | * @bo: A pointer to a struct ttm_buffer_object. |
220 | * @interruptible: Sleep interruptible if waiting. |
221 | * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. |
222 | * @ticket: ticket used to acquire the ww_mutex. |
223 | * |
224 | * Locks a buffer object for validation. (Or prevents other processes from |
225 | * locking it for validation), while taking a number of measures to prevent |
226 | * deadlocks. |
227 | * |
228 | * Returns: |
229 | * -EDEADLK: The reservation may cause a deadlock. |
230 | * Release all buffer reservations, wait for @bo to become unreserved and |
231 | * try again. |
232 | * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by |
233 | * a signal. Release all buffer reservations and return to user-space. |
234 | * -EBUSY: The function needed to sleep, but @no_wait was true |
235 | * -EALREADY: Bo already reserved using @ticket. This error code will only |
236 | * be returned if @use_ticket is set to true. |
237 | */ |
238 | static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, |
239 | bool interruptible, bool no_wait, |
240 | struct ww_acquire_ctx *ticket) |
241 | { |
242 | int ret = 0; |
243 | |
244 | if (no_wait) { |
245 | bool success; |
246 | |
247 | if (WARN_ON(ticket)) |
248 | return -EBUSY; |
249 | |
250 | success = dma_resv_trylock(obj: bo->base.resv); |
251 | return success ? 0 : -EBUSY; |
252 | } |
253 | |
254 | if (interruptible) |
255 | ret = dma_resv_lock_interruptible(obj: bo->base.resv, ctx: ticket); |
256 | else |
257 | ret = dma_resv_lock(obj: bo->base.resv, ctx: ticket); |
258 | if (ret == -EINTR) |
259 | return -ERESTARTSYS; |
260 | return ret; |
261 | } |
262 | |
263 | /** |
264 | * ttm_bo_reserve_slowpath: |
265 | * @bo: A pointer to a struct ttm_buffer_object. |
266 | * @interruptible: Sleep interruptible if waiting. |
267 | * @sequence: Set (@bo)->sequence to this value after lock |
268 | * |
269 | * This is called after ttm_bo_reserve returns -EAGAIN and we backed off |
270 | * from all our other reservations. Because there are no other reservations |
271 | * held by us, this function cannot deadlock any more. |
272 | */ |
273 | static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, |
274 | bool interruptible, |
275 | struct ww_acquire_ctx *ticket) |
276 | { |
277 | if (interruptible) { |
278 | int ret = dma_resv_lock_slow_interruptible(obj: bo->base.resv, |
279 | ctx: ticket); |
280 | if (ret == -EINTR) |
281 | ret = -ERESTARTSYS; |
282 | return ret; |
283 | } |
284 | dma_resv_lock_slow(obj: bo->base.resv, ctx: ticket); |
285 | return 0; |
286 | } |
287 | |
288 | void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); |
289 | |
290 | static inline void |
291 | ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo) |
292 | { |
293 | spin_lock(lock: &bo->bdev->lru_lock); |
294 | ttm_bo_move_to_lru_tail(bo); |
295 | spin_unlock(lock: &bo->bdev->lru_lock); |
296 | } |
297 | |
298 | static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo, |
299 | struct ttm_resource *new_mem) |
300 | { |
301 | WARN_ON(bo->resource); |
302 | bo->resource = new_mem; |
303 | } |
304 | |
305 | /** |
306 | * ttm_bo_move_null = assign memory for a buffer object. |
307 | * @bo: The bo to assign the memory to |
308 | * @new_mem: The memory to be assigned. |
309 | * |
310 | * Assign the memory from new_mem to the memory of the buffer object bo. |
311 | */ |
312 | static inline void ttm_bo_move_null(struct ttm_buffer_object *bo, |
313 | struct ttm_resource *new_mem) |
314 | { |
315 | ttm_resource_free(bo, res: &bo->resource); |
316 | ttm_bo_assign_mem(bo, new_mem); |
317 | } |
318 | |
319 | /** |
320 | * ttm_bo_unreserve |
321 | * |
322 | * @bo: A pointer to a struct ttm_buffer_object. |
323 | * |
324 | * Unreserve a previous reservation of @bo. |
325 | */ |
326 | static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) |
327 | { |
328 | ttm_bo_move_to_lru_tail_unlocked(bo); |
329 | dma_resv_unlock(obj: bo->base.resv); |
330 | } |
331 | |
332 | /** |
333 | * ttm_kmap_obj_virtual |
334 | * |
335 | * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap. |
336 | * @is_iomem: Pointer to an integer that on return indicates 1 if the |
337 | * virtual map is io memory, 0 if normal memory. |
338 | * |
339 | * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap. |
340 | * If *is_iomem is 1 on return, the virtual address points to an io memory area, |
341 | * that should strictly be accessed by the iowriteXX() and similar functions. |
342 | */ |
343 | static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, |
344 | bool *is_iomem) |
345 | { |
346 | *is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK); |
347 | return map->virtual; |
348 | } |
349 | |
350 | int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, |
351 | struct ttm_operation_ctx *ctx); |
352 | int ttm_bo_validate(struct ttm_buffer_object *bo, |
353 | struct ttm_placement *placement, |
354 | struct ttm_operation_ctx *ctx); |
355 | void ttm_bo_put(struct ttm_buffer_object *bo); |
356 | void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo, |
357 | struct ttm_lru_bulk_move *bulk); |
358 | bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, |
359 | const struct ttm_place *place); |
360 | int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo, |
361 | enum ttm_bo_type type, struct ttm_placement *placement, |
362 | uint32_t alignment, struct ttm_operation_ctx *ctx, |
363 | struct sg_table *sg, struct dma_resv *resv, |
364 | void (*destroy)(struct ttm_buffer_object *)); |
365 | int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo, |
366 | enum ttm_bo_type type, struct ttm_placement *placement, |
367 | uint32_t alignment, bool interruptible, |
368 | struct sg_table *sg, struct dma_resv *resv, |
369 | void (*destroy)(struct ttm_buffer_object *)); |
370 | int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, |
371 | unsigned long num_pages, struct ttm_bo_kmap_obj *map); |
372 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); |
373 | int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map); |
374 | void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map); |
375 | int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo); |
376 | int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, |
377 | gfp_t gfp_flags); |
378 | void ttm_bo_pin(struct ttm_buffer_object *bo); |
379 | void ttm_bo_unpin(struct ttm_buffer_object *bo); |
380 | int ttm_mem_evict_first(struct ttm_device *bdev, |
381 | struct ttm_resource_manager *man, |
382 | const struct ttm_place *place, |
383 | struct ttm_operation_ctx *ctx, |
384 | struct ww_acquire_ctx *ticket); |
385 | vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, |
386 | struct vm_fault *vmf); |
387 | vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, |
388 | pgprot_t prot, |
389 | pgoff_t num_prefault); |
390 | vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf); |
391 | void ttm_bo_vm_open(struct vm_area_struct *vma); |
392 | void ttm_bo_vm_close(struct vm_area_struct *vma); |
393 | int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, |
394 | void *buf, int len, int write); |
395 | vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot); |
396 | |
397 | int ttm_bo_mem_space(struct ttm_buffer_object *bo, |
398 | struct ttm_placement *placement, |
399 | struct ttm_resource **mem, |
400 | struct ttm_operation_ctx *ctx); |
401 | |
402 | void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); |
403 | /* |
404 | * ttm_bo_util.c |
405 | */ |
406 | int ttm_mem_io_reserve(struct ttm_device *bdev, |
407 | struct ttm_resource *mem); |
408 | void ttm_mem_io_free(struct ttm_device *bdev, |
409 | struct ttm_resource *mem); |
410 | void ttm_move_memcpy(bool clear, u32 num_pages, |
411 | struct ttm_kmap_iter *dst_iter, |
412 | struct ttm_kmap_iter *src_iter); |
413 | int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, |
414 | struct ttm_operation_ctx *ctx, |
415 | struct ttm_resource *new_mem); |
416 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
417 | struct dma_fence *fence, bool evict, |
418 | bool pipeline, |
419 | struct ttm_resource *new_mem); |
420 | void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo, |
421 | struct ttm_resource *new_mem); |
422 | int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo); |
423 | pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res, |
424 | pgprot_t tmp); |
425 | void ttm_bo_tt_destroy(struct ttm_buffer_object *bo); |
426 | |
427 | #endif |
428 | |