1 | /* |
2 | * Copyright 2009 Jerome Glisse. |
3 | * All Rights Reserved. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the |
7 | * "Software"), to deal in the Software without restriction, including |
8 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * the following conditions: |
12 | * |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
20 | * |
21 | * The above copyright notice and this permission notice (including the |
22 | * next paragraph) shall be included in all copies or substantial portions |
23 | * of the Software. |
24 | * |
25 | */ |
26 | /* |
27 | * Authors: |
28 | * Jerome Glisse <glisse@freedesktop.org> |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> |
30 | * Dave Airlie |
31 | */ |
32 | #include <linux/list.h> |
33 | #include <linux/slab.h> |
34 | #include <linux/dma-buf.h> |
35 | |
36 | #include <drm/drm_drv.h> |
37 | #include <drm/amdgpu_drm.h> |
38 | #include <drm/drm_cache.h> |
39 | #include "amdgpu.h" |
40 | #include "amdgpu_trace.h" |
41 | #include "amdgpu_amdkfd.h" |
42 | |
43 | /** |
44 | * DOC: amdgpu_object |
45 | * |
46 | * This defines the interfaces to operate on an &amdgpu_bo buffer object which |
47 | * represents memory used by driver (VRAM, system memory, etc.). The driver |
48 | * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces |
49 | * to create/destroy/set buffer object which are then managed by the kernel TTM |
50 | * memory manager. |
51 | * The interfaces are also used internally by kernel clients, including gfx, |
52 | * uvd, etc. for kernel managed allocations used by the GPU. |
53 | * |
54 | */ |
55 | |
56 | static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) |
57 | { |
58 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); |
59 | |
60 | amdgpu_bo_kunmap(bo); |
61 | |
62 | if (bo->tbo.base.import_attach) |
63 | drm_prime_gem_destroy(obj: &bo->tbo.base, sg: bo->tbo.sg); |
64 | drm_gem_object_release(obj: &bo->tbo.base); |
65 | amdgpu_bo_unref(bo: &bo->parent); |
66 | kvfree(addr: bo); |
67 | } |
68 | |
69 | static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo) |
70 | { |
71 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); |
72 | struct amdgpu_bo_user *ubo; |
73 | |
74 | ubo = to_amdgpu_bo_user(bo); |
75 | kfree(objp: ubo->metadata); |
76 | amdgpu_bo_destroy(tbo); |
77 | } |
78 | |
79 | static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo) |
80 | { |
81 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: tbo->bdev); |
82 | struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo; |
83 | struct amdgpu_bo_vm *vmbo; |
84 | |
85 | bo = shadow_bo->parent; |
86 | vmbo = to_amdgpu_bo_vm(bo); |
87 | /* in case amdgpu_device_recover_vram got NULL of bo->parent */ |
88 | if (!list_empty(head: &vmbo->shadow_list)) { |
89 | mutex_lock(&adev->shadow_list_lock); |
90 | list_del_init(entry: &vmbo->shadow_list); |
91 | mutex_unlock(lock: &adev->shadow_list_lock); |
92 | } |
93 | |
94 | amdgpu_bo_destroy(tbo); |
95 | } |
96 | |
97 | /** |
98 | * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo |
99 | * @bo: buffer object to be checked |
100 | * |
101 | * Uses destroy function associated with the object to determine if this is |
102 | * an &amdgpu_bo. |
103 | * |
104 | * Returns: |
105 | * true if the object belongs to &amdgpu_bo, false if not. |
106 | */ |
107 | bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) |
108 | { |
109 | if (bo->destroy == &amdgpu_bo_destroy || |
110 | bo->destroy == &amdgpu_bo_user_destroy || |
111 | bo->destroy == &amdgpu_bo_vm_destroy) |
112 | return true; |
113 | |
114 | return false; |
115 | } |
116 | |
117 | /** |
118 | * amdgpu_bo_placement_from_domain - set buffer's placement |
119 | * @abo: &amdgpu_bo buffer object whose placement is to be set |
120 | * @domain: requested domain |
121 | * |
122 | * Sets buffer's placement according to requested domain and the buffer's |
123 | * flags. |
124 | */ |
125 | void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) |
126 | { |
127 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: abo->tbo.bdev); |
128 | struct ttm_placement *placement = &abo->placement; |
129 | struct ttm_place *places = abo->placements; |
130 | u64 flags = abo->flags; |
131 | u32 c = 0; |
132 | |
133 | if (domain & AMDGPU_GEM_DOMAIN_VRAM) { |
134 | unsigned int visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; |
135 | int8_t mem_id = KFD_XCP_MEM_ID(adev, abo->xcp_id); |
136 | |
137 | if (adev->gmc.mem_partitions && mem_id >= 0) { |
138 | places[c].fpfn = adev->gmc.mem_partitions[mem_id].range.fpfn; |
139 | /* |
140 | * memory partition range lpfn is inclusive start + size - 1 |
141 | * TTM place lpfn is exclusive start + size |
142 | */ |
143 | places[c].lpfn = adev->gmc.mem_partitions[mem_id].range.lpfn + 1; |
144 | } else { |
145 | places[c].fpfn = 0; |
146 | places[c].lpfn = 0; |
147 | } |
148 | places[c].mem_type = TTM_PL_VRAM; |
149 | places[c].flags = 0; |
150 | |
151 | if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) |
152 | places[c].lpfn = min_not_zero(places[c].lpfn, visible_pfn); |
153 | else |
154 | places[c].flags |= TTM_PL_FLAG_TOPDOWN; |
155 | |
156 | if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) |
157 | places[c].flags |= TTM_PL_FLAG_CONTIGUOUS; |
158 | c++; |
159 | } |
160 | |
161 | if (domain & AMDGPU_GEM_DOMAIN_DOORBELL) { |
162 | places[c].fpfn = 0; |
163 | places[c].lpfn = 0; |
164 | places[c].mem_type = AMDGPU_PL_DOORBELL; |
165 | places[c].flags = 0; |
166 | c++; |
167 | } |
168 | |
169 | if (domain & AMDGPU_GEM_DOMAIN_GTT) { |
170 | places[c].fpfn = 0; |
171 | places[c].lpfn = 0; |
172 | places[c].mem_type = |
173 | abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ? |
174 | AMDGPU_PL_PREEMPT : TTM_PL_TT; |
175 | places[c].flags = 0; |
176 | c++; |
177 | } |
178 | |
179 | if (domain & AMDGPU_GEM_DOMAIN_CPU) { |
180 | places[c].fpfn = 0; |
181 | places[c].lpfn = 0; |
182 | places[c].mem_type = TTM_PL_SYSTEM; |
183 | places[c].flags = 0; |
184 | c++; |
185 | } |
186 | |
187 | if (domain & AMDGPU_GEM_DOMAIN_GDS) { |
188 | places[c].fpfn = 0; |
189 | places[c].lpfn = 0; |
190 | places[c].mem_type = AMDGPU_PL_GDS; |
191 | places[c].flags = 0; |
192 | c++; |
193 | } |
194 | |
195 | if (domain & AMDGPU_GEM_DOMAIN_GWS) { |
196 | places[c].fpfn = 0; |
197 | places[c].lpfn = 0; |
198 | places[c].mem_type = AMDGPU_PL_GWS; |
199 | places[c].flags = 0; |
200 | c++; |
201 | } |
202 | |
203 | if (domain & AMDGPU_GEM_DOMAIN_OA) { |
204 | places[c].fpfn = 0; |
205 | places[c].lpfn = 0; |
206 | places[c].mem_type = AMDGPU_PL_OA; |
207 | places[c].flags = 0; |
208 | c++; |
209 | } |
210 | |
211 | if (!c) { |
212 | places[c].fpfn = 0; |
213 | places[c].lpfn = 0; |
214 | places[c].mem_type = TTM_PL_SYSTEM; |
215 | places[c].flags = 0; |
216 | c++; |
217 | } |
218 | |
219 | BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS); |
220 | |
221 | placement->num_placement = c; |
222 | placement->placement = places; |
223 | } |
224 | |
225 | /** |
226 | * amdgpu_bo_create_reserved - create reserved BO for kernel use |
227 | * |
228 | * @adev: amdgpu device object |
229 | * @size: size for the new BO |
230 | * @align: alignment for the new BO |
231 | * @domain: where to place it |
232 | * @bo_ptr: used to initialize BOs in structures |
233 | * @gpu_addr: GPU addr of the pinned BO |
234 | * @cpu_addr: optional CPU address mapping |
235 | * |
236 | * Allocates and pins a BO for kernel internal use, and returns it still |
237 | * reserved. |
238 | * |
239 | * Note: For bo_ptr new BO is only created if bo_ptr points to NULL. |
240 | * |
241 | * Returns: |
242 | * 0 on success, negative error code otherwise. |
243 | */ |
244 | int amdgpu_bo_create_reserved(struct amdgpu_device *adev, |
245 | unsigned long size, int align, |
246 | u32 domain, struct amdgpu_bo **bo_ptr, |
247 | u64 *gpu_addr, void **cpu_addr) |
248 | { |
249 | struct amdgpu_bo_param bp; |
250 | bool free = false; |
251 | int r; |
252 | |
253 | if (!size) { |
254 | amdgpu_bo_unref(bo: bo_ptr); |
255 | return 0; |
256 | } |
257 | |
258 | memset(&bp, 0, sizeof(bp)); |
259 | bp.size = size; |
260 | bp.byte_align = align; |
261 | bp.domain = domain; |
262 | bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
263 | : AMDGPU_GEM_CREATE_NO_CPU_ACCESS; |
264 | bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; |
265 | bp.type = ttm_bo_type_kernel; |
266 | bp.resv = NULL; |
267 | bp.bo_ptr_size = sizeof(struct amdgpu_bo); |
268 | |
269 | if (!*bo_ptr) { |
270 | r = amdgpu_bo_create(adev, bp: &bp, bo_ptr); |
271 | if (r) { |
272 | dev_err(adev->dev, "(%d) failed to allocate kernel bo\n" , |
273 | r); |
274 | return r; |
275 | } |
276 | free = true; |
277 | } |
278 | |
279 | r = amdgpu_bo_reserve(bo: *bo_ptr, no_intr: false); |
280 | if (r) { |
281 | dev_err(adev->dev, "(%d) failed to reserve kernel bo\n" , r); |
282 | goto error_free; |
283 | } |
284 | |
285 | r = amdgpu_bo_pin(bo: *bo_ptr, domain); |
286 | if (r) { |
287 | dev_err(adev->dev, "(%d) kernel bo pin failed\n" , r); |
288 | goto error_unreserve; |
289 | } |
290 | |
291 | r = amdgpu_ttm_alloc_gart(bo: &(*bo_ptr)->tbo); |
292 | if (r) { |
293 | dev_err(adev->dev, "%p bind failed\n" , *bo_ptr); |
294 | goto error_unpin; |
295 | } |
296 | |
297 | if (gpu_addr) |
298 | *gpu_addr = amdgpu_bo_gpu_offset(bo: *bo_ptr); |
299 | |
300 | if (cpu_addr) { |
301 | r = amdgpu_bo_kmap(bo: *bo_ptr, ptr: cpu_addr); |
302 | if (r) { |
303 | dev_err(adev->dev, "(%d) kernel bo map failed\n" , r); |
304 | goto error_unpin; |
305 | } |
306 | } |
307 | |
308 | return 0; |
309 | |
310 | error_unpin: |
311 | amdgpu_bo_unpin(bo: *bo_ptr); |
312 | error_unreserve: |
313 | amdgpu_bo_unreserve(bo: *bo_ptr); |
314 | |
315 | error_free: |
316 | if (free) |
317 | amdgpu_bo_unref(bo: bo_ptr); |
318 | |
319 | return r; |
320 | } |
321 | |
322 | /** |
323 | * amdgpu_bo_create_kernel - create BO for kernel use |
324 | * |
325 | * @adev: amdgpu device object |
326 | * @size: size for the new BO |
327 | * @align: alignment for the new BO |
328 | * @domain: where to place it |
329 | * @bo_ptr: used to initialize BOs in structures |
330 | * @gpu_addr: GPU addr of the pinned BO |
331 | * @cpu_addr: optional CPU address mapping |
332 | * |
333 | * Allocates and pins a BO for kernel internal use. |
334 | * |
335 | * Note: For bo_ptr new BO is only created if bo_ptr points to NULL. |
336 | * |
337 | * Returns: |
338 | * 0 on success, negative error code otherwise. |
339 | */ |
340 | int amdgpu_bo_create_kernel(struct amdgpu_device *adev, |
341 | unsigned long size, int align, |
342 | u32 domain, struct amdgpu_bo **bo_ptr, |
343 | u64 *gpu_addr, void **cpu_addr) |
344 | { |
345 | int r; |
346 | |
347 | r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr, |
348 | gpu_addr, cpu_addr); |
349 | |
350 | if (r) |
351 | return r; |
352 | |
353 | if (*bo_ptr) |
354 | amdgpu_bo_unreserve(bo: *bo_ptr); |
355 | |
356 | return 0; |
357 | } |
358 | |
359 | /** |
360 | * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location |
361 | * |
362 | * @adev: amdgpu device object |
363 | * @offset: offset of the BO |
364 | * @size: size of the BO |
365 | * @bo_ptr: used to initialize BOs in structures |
366 | * @cpu_addr: optional CPU address mapping |
367 | * |
368 | * Creates a kernel BO at a specific offset in VRAM. |
369 | * |
370 | * Returns: |
371 | * 0 on success, negative error code otherwise. |
372 | */ |
373 | int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, |
374 | uint64_t offset, uint64_t size, |
375 | struct amdgpu_bo **bo_ptr, void **cpu_addr) |
376 | { |
377 | struct ttm_operation_ctx ctx = { false, false }; |
378 | unsigned int i; |
379 | int r; |
380 | |
381 | offset &= PAGE_MASK; |
382 | size = ALIGN(size, PAGE_SIZE); |
383 | |
384 | r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, |
385 | AMDGPU_GEM_DOMAIN_VRAM, bo_ptr, NULL, |
386 | cpu_addr); |
387 | if (r) |
388 | return r; |
389 | |
390 | if ((*bo_ptr) == NULL) |
391 | return 0; |
392 | |
393 | /* |
394 | * Remove the original mem node and create a new one at the request |
395 | * position. |
396 | */ |
397 | if (cpu_addr) |
398 | amdgpu_bo_kunmap(bo: *bo_ptr); |
399 | |
400 | ttm_resource_free(bo: &(*bo_ptr)->tbo, res: &(*bo_ptr)->tbo.resource); |
401 | |
402 | for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) { |
403 | (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT; |
404 | (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; |
405 | } |
406 | r = ttm_bo_mem_space(bo: &(*bo_ptr)->tbo, placement: &(*bo_ptr)->placement, |
407 | mem: &(*bo_ptr)->tbo.resource, ctx: &ctx); |
408 | if (r) |
409 | goto error; |
410 | |
411 | if (cpu_addr) { |
412 | r = amdgpu_bo_kmap(bo: *bo_ptr, ptr: cpu_addr); |
413 | if (r) |
414 | goto error; |
415 | } |
416 | |
417 | amdgpu_bo_unreserve(bo: *bo_ptr); |
418 | return 0; |
419 | |
420 | error: |
421 | amdgpu_bo_unreserve(bo: *bo_ptr); |
422 | amdgpu_bo_unref(bo: bo_ptr); |
423 | return r; |
424 | } |
425 | |
426 | /** |
427 | * amdgpu_bo_free_kernel - free BO for kernel use |
428 | * |
429 | * @bo: amdgpu BO to free |
430 | * @gpu_addr: pointer to where the BO's GPU memory space address was stored |
431 | * @cpu_addr: pointer to where the BO's CPU memory space address was stored |
432 | * |
433 | * unmaps and unpin a BO for kernel internal use. |
434 | */ |
435 | void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, |
436 | void **cpu_addr) |
437 | { |
438 | if (*bo == NULL) |
439 | return; |
440 | |
441 | WARN_ON(amdgpu_ttm_adev((*bo)->tbo.bdev)->in_suspend); |
442 | |
443 | if (likely(amdgpu_bo_reserve(*bo, true) == 0)) { |
444 | if (cpu_addr) |
445 | amdgpu_bo_kunmap(bo: *bo); |
446 | |
447 | amdgpu_bo_unpin(bo: *bo); |
448 | amdgpu_bo_unreserve(bo: *bo); |
449 | } |
450 | amdgpu_bo_unref(bo); |
451 | |
452 | if (gpu_addr) |
453 | *gpu_addr = 0; |
454 | |
455 | if (cpu_addr) |
456 | *cpu_addr = NULL; |
457 | } |
458 | |
459 | /* Validate bo size is bit bigger than the request domain */ |
460 | static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, |
461 | unsigned long size, u32 domain) |
462 | { |
463 | struct ttm_resource_manager *man = NULL; |
464 | |
465 | /* |
466 | * If GTT is part of requested domains the check must succeed to |
467 | * allow fall back to GTT. |
468 | */ |
469 | if (domain & AMDGPU_GEM_DOMAIN_GTT) |
470 | man = ttm_manager_type(bdev: &adev->mman.bdev, TTM_PL_TT); |
471 | else if (domain & AMDGPU_GEM_DOMAIN_VRAM) |
472 | man = ttm_manager_type(bdev: &adev->mman.bdev, TTM_PL_VRAM); |
473 | else |
474 | return true; |
475 | |
476 | if (!man) { |
477 | if (domain & AMDGPU_GEM_DOMAIN_GTT) |
478 | WARN_ON_ONCE("GTT domain requested but GTT mem manager uninitialized" ); |
479 | return false; |
480 | } |
481 | |
482 | /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU, _DOMAIN_DOORBELL */ |
483 | if (size < man->size) |
484 | return true; |
485 | |
486 | DRM_DEBUG("BO size %lu > total memory in domain: %llu\n" , size, man->size); |
487 | return false; |
488 | } |
489 | |
490 | bool amdgpu_bo_support_uswc(u64 bo_flags) |
491 | { |
492 | |
493 | #ifdef CONFIG_X86_32 |
494 | /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit |
495 | * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 |
496 | */ |
497 | return false; |
498 | #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) |
499 | /* Don't try to enable write-combining when it can't work, or things |
500 | * may be slow |
501 | * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 |
502 | */ |
503 | |
504 | #ifndef CONFIG_COMPILE_TEST |
505 | #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ |
506 | thanks to write-combining |
507 | #endif |
508 | |
509 | if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) |
510 | DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " |
511 | "better performance thanks to write-combining\n" ); |
512 | return false; |
513 | #else |
514 | /* For architectures that don't support WC memory, |
515 | * mask out the WC flag from the BO |
516 | */ |
517 | if (!drm_arch_can_wc_memory()) |
518 | return false; |
519 | |
520 | return true; |
521 | #endif |
522 | } |
523 | |
524 | /** |
525 | * amdgpu_bo_create - create an &amdgpu_bo buffer object |
526 | * @adev: amdgpu device object |
527 | * @bp: parameters to be used for the buffer object |
528 | * @bo_ptr: pointer to the buffer object pointer |
529 | * |
530 | * Creates an &amdgpu_bo buffer object. |
531 | * |
532 | * Returns: |
533 | * 0 for success or a negative error code on failure. |
534 | */ |
535 | int amdgpu_bo_create(struct amdgpu_device *adev, |
536 | struct amdgpu_bo_param *bp, |
537 | struct amdgpu_bo **bo_ptr) |
538 | { |
539 | struct ttm_operation_ctx ctx = { |
540 | .interruptible = (bp->type != ttm_bo_type_kernel), |
541 | .no_wait_gpu = bp->no_wait_gpu, |
542 | /* We opt to avoid OOM on system pages allocations */ |
543 | .gfp_retry_mayfail = true, |
544 | .allow_res_evict = bp->type != ttm_bo_type_kernel, |
545 | .resv = bp->resv |
546 | }; |
547 | struct amdgpu_bo *bo; |
548 | unsigned long page_align, size = bp->size; |
549 | int r; |
550 | |
551 | /* Note that GDS/GWS/OA allocates 1 page per byte/resource. */ |
552 | if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { |
553 | /* GWS and OA don't need any alignment. */ |
554 | page_align = bp->byte_align; |
555 | size <<= PAGE_SHIFT; |
556 | |
557 | } else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) { |
558 | /* Both size and alignment must be a multiple of 4. */ |
559 | page_align = ALIGN(bp->byte_align, 4); |
560 | size = ALIGN(size, 4) << PAGE_SHIFT; |
561 | } else { |
562 | /* Memory should be aligned at least to a page size. */ |
563 | page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
564 | size = ALIGN(size, PAGE_SIZE); |
565 | } |
566 | |
567 | if (!amdgpu_bo_validate_size(adev, size, domain: bp->domain)) |
568 | return -ENOMEM; |
569 | |
570 | BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo)); |
571 | |
572 | *bo_ptr = NULL; |
573 | bo = kvzalloc(size: bp->bo_ptr_size, GFP_KERNEL); |
574 | if (bo == NULL) |
575 | return -ENOMEM; |
576 | drm_gem_private_object_init(dev: adev_to_drm(adev), obj: &bo->tbo.base, size); |
577 | bo->vm_bo = NULL; |
578 | bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain : |
579 | bp->domain; |
580 | bo->allowed_domains = bo->preferred_domains; |
581 | if (bp->type != ttm_bo_type_kernel && |
582 | !(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE) && |
583 | bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) |
584 | bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; |
585 | |
586 | bo->flags = bp->flags; |
587 | |
588 | if (adev->gmc.mem_partitions) |
589 | /* For GPUs with spatial partitioning, bo->xcp_id=-1 means any partition */ |
590 | bo->xcp_id = bp->xcp_id_plus1 - 1; |
591 | else |
592 | /* For GPUs without spatial partitioning */ |
593 | bo->xcp_id = 0; |
594 | |
595 | if (!amdgpu_bo_support_uswc(bo_flags: bo->flags)) |
596 | bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; |
597 | |
598 | if (adev->ras_enabled) |
599 | bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; |
600 | |
601 | bo->tbo.bdev = &adev->mman.bdev; |
602 | if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA | |
603 | AMDGPU_GEM_DOMAIN_GDS)) |
604 | amdgpu_bo_placement_from_domain(abo: bo, AMDGPU_GEM_DOMAIN_CPU); |
605 | else |
606 | amdgpu_bo_placement_from_domain(abo: bo, domain: bp->domain); |
607 | if (bp->type == ttm_bo_type_kernel) |
608 | bo->tbo.priority = 1; |
609 | |
610 | if (!bp->destroy) |
611 | bp->destroy = &amdgpu_bo_destroy; |
612 | |
613 | r = ttm_bo_init_reserved(bdev: &adev->mman.bdev, bo: &bo->tbo, type: bp->type, |
614 | placement: &bo->placement, alignment: page_align, ctx: &ctx, NULL, |
615 | resv: bp->resv, destroy: bp->destroy); |
616 | if (unlikely(r != 0)) |
617 | return r; |
618 | |
619 | if (!amdgpu_gmc_vram_full_visible(gmc: &adev->gmc) && |
620 | amdgpu_res_cpu_visible(adev, res: bo->tbo.resource)) |
621 | amdgpu_cs_report_moved_bytes(adev, num_bytes: ctx.bytes_moved, |
622 | num_vis_bytes: ctx.bytes_moved); |
623 | else |
624 | amdgpu_cs_report_moved_bytes(adev, num_bytes: ctx.bytes_moved, num_vis_bytes: 0); |
625 | |
626 | if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && |
627 | bo->tbo.resource->mem_type == TTM_PL_VRAM) { |
628 | struct dma_fence *fence; |
629 | |
630 | r = amdgpu_fill_buffer(bo, src_data: 0, resv: bo->tbo.base.resv, fence: &fence, delayed: true); |
631 | if (unlikely(r)) |
632 | goto fail_unreserve; |
633 | |
634 | dma_resv_add_fence(obj: bo->tbo.base.resv, fence, |
635 | usage: DMA_RESV_USAGE_KERNEL); |
636 | dma_fence_put(fence); |
637 | } |
638 | if (!bp->resv) |
639 | amdgpu_bo_unreserve(bo); |
640 | *bo_ptr = bo; |
641 | |
642 | trace_amdgpu_bo_create(bo); |
643 | |
644 | /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */ |
645 | if (bp->type == ttm_bo_type_device) |
646 | bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; |
647 | |
648 | return 0; |
649 | |
650 | fail_unreserve: |
651 | if (!bp->resv) |
652 | dma_resv_unlock(obj: bo->tbo.base.resv); |
653 | amdgpu_bo_unref(bo: &bo); |
654 | return r; |
655 | } |
656 | |
657 | /** |
658 | * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object |
659 | * @adev: amdgpu device object |
660 | * @bp: parameters to be used for the buffer object |
661 | * @ubo_ptr: pointer to the buffer object pointer |
662 | * |
663 | * Create a BO to be used by user application; |
664 | * |
665 | * Returns: |
666 | * 0 for success or a negative error code on failure. |
667 | */ |
668 | |
669 | int amdgpu_bo_create_user(struct amdgpu_device *adev, |
670 | struct amdgpu_bo_param *bp, |
671 | struct amdgpu_bo_user **ubo_ptr) |
672 | { |
673 | struct amdgpu_bo *bo_ptr; |
674 | int r; |
675 | |
676 | bp->bo_ptr_size = sizeof(struct amdgpu_bo_user); |
677 | bp->destroy = &amdgpu_bo_user_destroy; |
678 | r = amdgpu_bo_create(adev, bp, bo_ptr: &bo_ptr); |
679 | if (r) |
680 | return r; |
681 | |
682 | *ubo_ptr = to_amdgpu_bo_user(bo_ptr); |
683 | return r; |
684 | } |
685 | |
686 | /** |
687 | * amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object |
688 | * @adev: amdgpu device object |
689 | * @bp: parameters to be used for the buffer object |
690 | * @vmbo_ptr: pointer to the buffer object pointer |
691 | * |
692 | * Create a BO to be for GPUVM. |
693 | * |
694 | * Returns: |
695 | * 0 for success or a negative error code on failure. |
696 | */ |
697 | |
698 | int amdgpu_bo_create_vm(struct amdgpu_device *adev, |
699 | struct amdgpu_bo_param *bp, |
700 | struct amdgpu_bo_vm **vmbo_ptr) |
701 | { |
702 | struct amdgpu_bo *bo_ptr; |
703 | int r; |
704 | |
705 | /* bo_ptr_size will be determined by the caller and it depends on |
706 | * num of amdgpu_vm_pt entries. |
707 | */ |
708 | BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm)); |
709 | r = amdgpu_bo_create(adev, bp, bo_ptr: &bo_ptr); |
710 | if (r) |
711 | return r; |
712 | |
713 | *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr); |
714 | return r; |
715 | } |
716 | |
717 | /** |
718 | * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list |
719 | * |
720 | * @vmbo: BO that will be inserted into the shadow list |
721 | * |
722 | * Insert a BO to the shadow list. |
723 | */ |
724 | void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo) |
725 | { |
726 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: vmbo->bo.tbo.bdev); |
727 | |
728 | mutex_lock(&adev->shadow_list_lock); |
729 | list_add_tail(new: &vmbo->shadow_list, head: &adev->shadow_list); |
730 | vmbo->shadow->parent = amdgpu_bo_ref(bo: &vmbo->bo); |
731 | vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy; |
732 | mutex_unlock(lock: &adev->shadow_list_lock); |
733 | } |
734 | |
735 | /** |
736 | * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow |
737 | * |
738 | * @shadow: &amdgpu_bo shadow to be restored |
739 | * @fence: dma_fence associated with the operation |
740 | * |
741 | * Copies a buffer object's shadow content back to the object. |
742 | * This is used for recovering a buffer from its shadow in case of a gpu |
743 | * reset where vram context may be lost. |
744 | * |
745 | * Returns: |
746 | * 0 for success or a negative error code on failure. |
747 | */ |
748 | int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence) |
749 | |
750 | { |
751 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: shadow->tbo.bdev); |
752 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
753 | uint64_t shadow_addr, parent_addr; |
754 | |
755 | shadow_addr = amdgpu_bo_gpu_offset(bo: shadow); |
756 | parent_addr = amdgpu_bo_gpu_offset(bo: shadow->parent); |
757 | |
758 | return amdgpu_copy_buffer(ring, src_offset: shadow_addr, dst_offset: parent_addr, |
759 | byte_count: amdgpu_bo_size(bo: shadow), NULL, fence, |
760 | direct_submit: true, vm_needs_flush: false, tmz: false); |
761 | } |
762 | |
763 | /** |
764 | * amdgpu_bo_kmap - map an &amdgpu_bo buffer object |
765 | * @bo: &amdgpu_bo buffer object to be mapped |
766 | * @ptr: kernel virtual address to be returned |
767 | * |
768 | * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls |
769 | * amdgpu_bo_kptr() to get the kernel virtual address. |
770 | * |
771 | * Returns: |
772 | * 0 for success or a negative error code on failure. |
773 | */ |
774 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) |
775 | { |
776 | void *kptr; |
777 | long r; |
778 | |
779 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) |
780 | return -EPERM; |
781 | |
782 | r = dma_resv_wait_timeout(obj: bo->tbo.base.resv, usage: DMA_RESV_USAGE_KERNEL, |
783 | intr: false, MAX_SCHEDULE_TIMEOUT); |
784 | if (r < 0) |
785 | return r; |
786 | |
787 | kptr = amdgpu_bo_kptr(bo); |
788 | if (kptr) { |
789 | if (ptr) |
790 | *ptr = kptr; |
791 | return 0; |
792 | } |
793 | |
794 | r = ttm_bo_kmap(bo: &bo->tbo, start_page: 0, PFN_UP(bo->tbo.base.size), map: &bo->kmap); |
795 | if (r) |
796 | return r; |
797 | |
798 | if (ptr) |
799 | *ptr = amdgpu_bo_kptr(bo); |
800 | |
801 | return 0; |
802 | } |
803 | |
804 | /** |
805 | * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object |
806 | * @bo: &amdgpu_bo buffer object |
807 | * |
808 | * Calls ttm_kmap_obj_virtual() to get the kernel virtual address |
809 | * |
810 | * Returns: |
811 | * the virtual address of a buffer object area. |
812 | */ |
813 | void *amdgpu_bo_kptr(struct amdgpu_bo *bo) |
814 | { |
815 | bool is_iomem; |
816 | |
817 | return ttm_kmap_obj_virtual(map: &bo->kmap, is_iomem: &is_iomem); |
818 | } |
819 | |
820 | /** |
821 | * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object |
822 | * @bo: &amdgpu_bo buffer object to be unmapped |
823 | * |
824 | * Unmaps a kernel map set up by amdgpu_bo_kmap(). |
825 | */ |
826 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo) |
827 | { |
828 | if (bo->kmap.bo) |
829 | ttm_bo_kunmap(map: &bo->kmap); |
830 | } |
831 | |
832 | /** |
833 | * amdgpu_bo_ref - reference an &amdgpu_bo buffer object |
834 | * @bo: &amdgpu_bo buffer object |
835 | * |
836 | * References the contained &ttm_buffer_object. |
837 | * |
838 | * Returns: |
839 | * a refcounted pointer to the &amdgpu_bo buffer object. |
840 | */ |
841 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) |
842 | { |
843 | if (bo == NULL) |
844 | return NULL; |
845 | |
846 | ttm_bo_get(bo: &bo->tbo); |
847 | return bo; |
848 | } |
849 | |
850 | /** |
851 | * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object |
852 | * @bo: &amdgpu_bo buffer object |
853 | * |
854 | * Unreferences the contained &ttm_buffer_object and clear the pointer |
855 | */ |
856 | void amdgpu_bo_unref(struct amdgpu_bo **bo) |
857 | { |
858 | struct ttm_buffer_object *tbo; |
859 | |
860 | if ((*bo) == NULL) |
861 | return; |
862 | |
863 | tbo = &((*bo)->tbo); |
864 | ttm_bo_put(bo: tbo); |
865 | *bo = NULL; |
866 | } |
867 | |
868 | /** |
869 | * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object |
870 | * @bo: &amdgpu_bo buffer object to be pinned |
871 | * @domain: domain to be pinned to |
872 | * @min_offset: the start of requested address range |
873 | * @max_offset: the end of requested address range |
874 | * |
875 | * Pins the buffer object according to requested domain and address range. If |
876 | * the memory is unbound gart memory, binds the pages into gart table. Adjusts |
877 | * pin_count and pin_size accordingly. |
878 | * |
879 | * Pinning means to lock pages in memory along with keeping them at a fixed |
880 | * offset. It is required when a buffer can not be moved, for example, when |
881 | * a display buffer is being scanned out. |
882 | * |
883 | * Compared with amdgpu_bo_pin(), this function gives more flexibility on |
884 | * where to pin a buffer if there are specific restrictions on where a buffer |
885 | * must be located. |
886 | * |
887 | * Returns: |
888 | * 0 for success or a negative error code on failure. |
889 | */ |
890 | int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, |
891 | u64 min_offset, u64 max_offset) |
892 | { |
893 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: bo->tbo.bdev); |
894 | struct ttm_operation_ctx ctx = { false, false }; |
895 | int r, i; |
896 | |
897 | if (amdgpu_ttm_tt_get_usermm(ttm: bo->tbo.ttm)) |
898 | return -EPERM; |
899 | |
900 | if (WARN_ON_ONCE(min_offset > max_offset)) |
901 | return -EINVAL; |
902 | |
903 | /* Check domain to be pinned to against preferred domains */ |
904 | if (bo->preferred_domains & domain) |
905 | domain = bo->preferred_domains & domain; |
906 | |
907 | /* A shared bo cannot be migrated to VRAM */ |
908 | if (bo->tbo.base.import_attach) { |
909 | if (domain & AMDGPU_GEM_DOMAIN_GTT) |
910 | domain = AMDGPU_GEM_DOMAIN_GTT; |
911 | else |
912 | return -EINVAL; |
913 | } |
914 | |
915 | if (bo->tbo.pin_count) { |
916 | uint32_t mem_type = bo->tbo.resource->mem_type; |
917 | uint32_t mem_flags = bo->tbo.resource->placement; |
918 | |
919 | if (!(domain & amdgpu_mem_type_to_domain(mem_type))) |
920 | return -EINVAL; |
921 | |
922 | if ((mem_type == TTM_PL_VRAM) && |
923 | (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) && |
924 | !(mem_flags & TTM_PL_FLAG_CONTIGUOUS)) |
925 | return -EINVAL; |
926 | |
927 | ttm_bo_pin(bo: &bo->tbo); |
928 | |
929 | if (max_offset != 0) { |
930 | u64 domain_start = amdgpu_ttm_domain_start(adev, |
931 | type: mem_type); |
932 | WARN_ON_ONCE(max_offset < |
933 | (amdgpu_bo_gpu_offset(bo) - domain_start)); |
934 | } |
935 | |
936 | return 0; |
937 | } |
938 | |
939 | /* This assumes only APU display buffers are pinned with (VRAM|GTT). |
940 | * See function amdgpu_display_supported_domains() |
941 | */ |
942 | domain = amdgpu_bo_get_preferred_domain(adev, domain); |
943 | |
944 | if (bo->tbo.base.import_attach) |
945 | dma_buf_pin(attach: bo->tbo.base.import_attach); |
946 | |
947 | /* force to pin into visible video ram */ |
948 | if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) |
949 | bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; |
950 | amdgpu_bo_placement_from_domain(abo: bo, domain); |
951 | for (i = 0; i < bo->placement.num_placement; i++) { |
952 | unsigned int fpfn, lpfn; |
953 | |
954 | fpfn = min_offset >> PAGE_SHIFT; |
955 | lpfn = max_offset >> PAGE_SHIFT; |
956 | |
957 | if (fpfn > bo->placements[i].fpfn) |
958 | bo->placements[i].fpfn = fpfn; |
959 | if (!bo->placements[i].lpfn || |
960 | (lpfn && lpfn < bo->placements[i].lpfn)) |
961 | bo->placements[i].lpfn = lpfn; |
962 | } |
963 | |
964 | r = ttm_bo_validate(bo: &bo->tbo, placement: &bo->placement, ctx: &ctx); |
965 | if (unlikely(r)) { |
966 | dev_err(adev->dev, "%p pin failed\n" , bo); |
967 | goto error; |
968 | } |
969 | |
970 | ttm_bo_pin(bo: &bo->tbo); |
971 | |
972 | domain = amdgpu_mem_type_to_domain(mem_type: bo->tbo.resource->mem_type); |
973 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) { |
974 | atomic64_add(i: amdgpu_bo_size(bo), v: &adev->vram_pin_size); |
975 | atomic64_add(i: amdgpu_vram_mgr_bo_visible_size(bo), |
976 | v: &adev->visible_pin_size); |
977 | } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { |
978 | atomic64_add(i: amdgpu_bo_size(bo), v: &adev->gart_pin_size); |
979 | } |
980 | |
981 | error: |
982 | return r; |
983 | } |
984 | |
985 | /** |
986 | * amdgpu_bo_pin - pin an &amdgpu_bo buffer object |
987 | * @bo: &amdgpu_bo buffer object to be pinned |
988 | * @domain: domain to be pinned to |
989 | * |
990 | * A simple wrapper to amdgpu_bo_pin_restricted(). |
991 | * Provides a simpler API for buffers that do not have any strict restrictions |
992 | * on where a buffer must be located. |
993 | * |
994 | * Returns: |
995 | * 0 for success or a negative error code on failure. |
996 | */ |
997 | int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain) |
998 | { |
999 | bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; |
1000 | return amdgpu_bo_pin_restricted(bo, domain, min_offset: 0, max_offset: 0); |
1001 | } |
1002 | |
1003 | /** |
1004 | * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object |
1005 | * @bo: &amdgpu_bo buffer object to be unpinned |
1006 | * |
1007 | * Decreases the pin_count, and clears the flags if pin_count reaches 0. |
1008 | * Changes placement and pin size accordingly. |
1009 | * |
1010 | * Returns: |
1011 | * 0 for success or a negative error code on failure. |
1012 | */ |
1013 | void amdgpu_bo_unpin(struct amdgpu_bo *bo) |
1014 | { |
1015 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: bo->tbo.bdev); |
1016 | |
1017 | ttm_bo_unpin(bo: &bo->tbo); |
1018 | if (bo->tbo.pin_count) |
1019 | return; |
1020 | |
1021 | if (bo->tbo.base.import_attach) |
1022 | dma_buf_unpin(attach: bo->tbo.base.import_attach); |
1023 | |
1024 | if (bo->tbo.resource->mem_type == TTM_PL_VRAM) { |
1025 | atomic64_sub(i: amdgpu_bo_size(bo), v: &adev->vram_pin_size); |
1026 | atomic64_sub(i: amdgpu_vram_mgr_bo_visible_size(bo), |
1027 | v: &adev->visible_pin_size); |
1028 | } else if (bo->tbo.resource->mem_type == TTM_PL_TT) { |
1029 | atomic64_sub(i: amdgpu_bo_size(bo), v: &adev->gart_pin_size); |
1030 | } |
1031 | |
1032 | } |
1033 | |
1034 | static const char * const amdgpu_vram_names[] = { |
1035 | "UNKNOWN" , |
1036 | "GDDR1" , |
1037 | "DDR2" , |
1038 | "GDDR3" , |
1039 | "GDDR4" , |
1040 | "GDDR5" , |
1041 | "HBM" , |
1042 | "DDR3" , |
1043 | "DDR4" , |
1044 | "GDDR6" , |
1045 | "DDR5" , |
1046 | "LPDDR4" , |
1047 | "LPDDR5" |
1048 | }; |
1049 | |
1050 | /** |
1051 | * amdgpu_bo_init - initialize memory manager |
1052 | * @adev: amdgpu device object |
1053 | * |
1054 | * Calls amdgpu_ttm_init() to initialize amdgpu memory manager. |
1055 | * |
1056 | * Returns: |
1057 | * 0 for success or a negative error code on failure. |
1058 | */ |
1059 | int amdgpu_bo_init(struct amdgpu_device *adev) |
1060 | { |
1061 | /* On A+A platform, VRAM can be mapped as WB */ |
1062 | if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) { |
1063 | /* reserve PAT memory space to WC for VRAM */ |
1064 | int r = arch_io_reserve_memtype_wc(start: adev->gmc.aper_base, |
1065 | size: adev->gmc.aper_size); |
1066 | |
1067 | if (r) { |
1068 | DRM_ERROR("Unable to set WC memtype for the aperture base\n" ); |
1069 | return r; |
1070 | } |
1071 | |
1072 | /* Add an MTRR for the VRAM */ |
1073 | adev->gmc.vram_mtrr = arch_phys_wc_add(base: adev->gmc.aper_base, |
1074 | size: adev->gmc.aper_size); |
1075 | } |
1076 | |
1077 | DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n" , |
1078 | adev->gmc.mc_vram_size >> 20, |
1079 | (unsigned long long)adev->gmc.aper_size >> 20); |
1080 | DRM_INFO("RAM width %dbits %s\n" , |
1081 | adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]); |
1082 | return amdgpu_ttm_init(adev); |
1083 | } |
1084 | |
1085 | /** |
1086 | * amdgpu_bo_fini - tear down memory manager |
1087 | * @adev: amdgpu device object |
1088 | * |
1089 | * Reverses amdgpu_bo_init() to tear down memory manager. |
1090 | */ |
1091 | void amdgpu_bo_fini(struct amdgpu_device *adev) |
1092 | { |
1093 | int idx; |
1094 | |
1095 | amdgpu_ttm_fini(adev); |
1096 | |
1097 | if (drm_dev_enter(dev: adev_to_drm(adev), idx: &idx)) { |
1098 | if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) { |
1099 | arch_phys_wc_del(handle: adev->gmc.vram_mtrr); |
1100 | arch_io_free_memtype_wc(start: adev->gmc.aper_base, size: adev->gmc.aper_size); |
1101 | } |
1102 | drm_dev_exit(idx); |
1103 | } |
1104 | } |
1105 | |
1106 | /** |
1107 | * amdgpu_bo_set_tiling_flags - set tiling flags |
1108 | * @bo: &amdgpu_bo buffer object |
1109 | * @tiling_flags: new flags |
1110 | * |
1111 | * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or |
1112 | * kernel driver to set the tiling flags on a buffer. |
1113 | * |
1114 | * Returns: |
1115 | * 0 for success or a negative error code on failure. |
1116 | */ |
1117 | int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags) |
1118 | { |
1119 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: bo->tbo.bdev); |
1120 | struct amdgpu_bo_user *ubo; |
1121 | |
1122 | BUG_ON(bo->tbo.type == ttm_bo_type_kernel); |
1123 | if (adev->family <= AMDGPU_FAMILY_CZ && |
1124 | AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6) |
1125 | return -EINVAL; |
1126 | |
1127 | ubo = to_amdgpu_bo_user(bo); |
1128 | ubo->tiling_flags = tiling_flags; |
1129 | return 0; |
1130 | } |
1131 | |
1132 | /** |
1133 | * amdgpu_bo_get_tiling_flags - get tiling flags |
1134 | * @bo: &amdgpu_bo buffer object |
1135 | * @tiling_flags: returned flags |
1136 | * |
1137 | * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to |
1138 | * set the tiling flags on a buffer. |
1139 | */ |
1140 | void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) |
1141 | { |
1142 | struct amdgpu_bo_user *ubo; |
1143 | |
1144 | BUG_ON(bo->tbo.type == ttm_bo_type_kernel); |
1145 | dma_resv_assert_held(bo->tbo.base.resv); |
1146 | ubo = to_amdgpu_bo_user(bo); |
1147 | |
1148 | if (tiling_flags) |
1149 | *tiling_flags = ubo->tiling_flags; |
1150 | } |
1151 | |
1152 | /** |
1153 | * amdgpu_bo_set_metadata - set metadata |
1154 | * @bo: &amdgpu_bo buffer object |
1155 | * @metadata: new metadata |
1156 | * @metadata_size: size of the new metadata |
1157 | * @flags: flags of the new metadata |
1158 | * |
1159 | * Sets buffer object's metadata, its size and flags. |
1160 | * Used via GEM ioctl. |
1161 | * |
1162 | * Returns: |
1163 | * 0 for success or a negative error code on failure. |
1164 | */ |
1165 | int amdgpu_bo_set_metadata(struct amdgpu_bo *bo, void *metadata, |
1166 | u32 metadata_size, uint64_t flags) |
1167 | { |
1168 | struct amdgpu_bo_user *ubo; |
1169 | void *buffer; |
1170 | |
1171 | BUG_ON(bo->tbo.type == ttm_bo_type_kernel); |
1172 | ubo = to_amdgpu_bo_user(bo); |
1173 | if (!metadata_size) { |
1174 | if (ubo->metadata_size) { |
1175 | kfree(objp: ubo->metadata); |
1176 | ubo->metadata = NULL; |
1177 | ubo->metadata_size = 0; |
1178 | } |
1179 | return 0; |
1180 | } |
1181 | |
1182 | if (metadata == NULL) |
1183 | return -EINVAL; |
1184 | |
1185 | buffer = kmemdup(p: metadata, size: metadata_size, GFP_KERNEL); |
1186 | if (buffer == NULL) |
1187 | return -ENOMEM; |
1188 | |
1189 | kfree(objp: ubo->metadata); |
1190 | ubo->metadata_flags = flags; |
1191 | ubo->metadata = buffer; |
1192 | ubo->metadata_size = metadata_size; |
1193 | |
1194 | return 0; |
1195 | } |
1196 | |
1197 | /** |
1198 | * amdgpu_bo_get_metadata - get metadata |
1199 | * @bo: &amdgpu_bo buffer object |
1200 | * @buffer: returned metadata |
1201 | * @buffer_size: size of the buffer |
1202 | * @metadata_size: size of the returned metadata |
1203 | * @flags: flags of the returned metadata |
1204 | * |
1205 | * Gets buffer object's metadata, its size and flags. buffer_size shall not be |
1206 | * less than metadata_size. |
1207 | * Used via GEM ioctl. |
1208 | * |
1209 | * Returns: |
1210 | * 0 for success or a negative error code on failure. |
1211 | */ |
1212 | int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, |
1213 | size_t buffer_size, uint32_t *metadata_size, |
1214 | uint64_t *flags) |
1215 | { |
1216 | struct amdgpu_bo_user *ubo; |
1217 | |
1218 | if (!buffer && !metadata_size) |
1219 | return -EINVAL; |
1220 | |
1221 | BUG_ON(bo->tbo.type == ttm_bo_type_kernel); |
1222 | ubo = to_amdgpu_bo_user(bo); |
1223 | if (metadata_size) |
1224 | *metadata_size = ubo->metadata_size; |
1225 | |
1226 | if (buffer) { |
1227 | if (buffer_size < ubo->metadata_size) |
1228 | return -EINVAL; |
1229 | |
1230 | if (ubo->metadata_size) |
1231 | memcpy(buffer, ubo->metadata, ubo->metadata_size); |
1232 | } |
1233 | |
1234 | if (flags) |
1235 | *flags = ubo->metadata_flags; |
1236 | |
1237 | return 0; |
1238 | } |
1239 | |
1240 | /** |
1241 | * amdgpu_bo_move_notify - notification about a memory move |
1242 | * @bo: pointer to a buffer object |
1243 | * @evict: if this move is evicting the buffer from the graphics address space |
1244 | * |
1245 | * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs |
1246 | * bookkeeping. |
1247 | * TTM driver callback which is called when ttm moves a buffer. |
1248 | */ |
1249 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict) |
1250 | { |
1251 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: bo->bdev); |
1252 | struct amdgpu_bo *abo; |
1253 | |
1254 | if (!amdgpu_bo_is_amdgpu_bo(bo)) |
1255 | return; |
1256 | |
1257 | abo = ttm_to_amdgpu_bo(tbo: bo); |
1258 | amdgpu_vm_bo_invalidate(adev, bo: abo, evicted: evict); |
1259 | |
1260 | amdgpu_bo_kunmap(bo: abo); |
1261 | |
1262 | if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach && |
1263 | bo->resource->mem_type != TTM_PL_SYSTEM) |
1264 | dma_buf_move_notify(dma_buf: abo->tbo.base.dma_buf); |
1265 | |
1266 | /* remember the eviction */ |
1267 | if (evict) |
1268 | atomic64_inc(v: &adev->num_evictions); |
1269 | } |
1270 | |
1271 | void amdgpu_bo_get_memory(struct amdgpu_bo *bo, |
1272 | struct amdgpu_mem_stats *stats) |
1273 | { |
1274 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: bo->tbo.bdev); |
1275 | struct ttm_resource *res = bo->tbo.resource; |
1276 | uint64_t size = amdgpu_bo_size(bo); |
1277 | struct drm_gem_object *obj; |
1278 | unsigned int domain; |
1279 | bool shared; |
1280 | |
1281 | /* Abort if the BO doesn't currently have a backing store */ |
1282 | if (!res) |
1283 | return; |
1284 | |
1285 | obj = &bo->tbo.base; |
1286 | shared = drm_gem_object_is_shared_for_memory_stats(obj); |
1287 | |
1288 | domain = amdgpu_mem_type_to_domain(mem_type: res->mem_type); |
1289 | switch (domain) { |
1290 | case AMDGPU_GEM_DOMAIN_VRAM: |
1291 | stats->vram += size; |
1292 | if (amdgpu_res_cpu_visible(adev, res: bo->tbo.resource)) |
1293 | stats->visible_vram += size; |
1294 | if (shared) |
1295 | stats->vram_shared += size; |
1296 | break; |
1297 | case AMDGPU_GEM_DOMAIN_GTT: |
1298 | stats->gtt += size; |
1299 | if (shared) |
1300 | stats->gtt_shared += size; |
1301 | break; |
1302 | case AMDGPU_GEM_DOMAIN_CPU: |
1303 | default: |
1304 | stats->cpu += size; |
1305 | if (shared) |
1306 | stats->cpu_shared += size; |
1307 | break; |
1308 | } |
1309 | |
1310 | if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) { |
1311 | stats->requested_vram += size; |
1312 | if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) |
1313 | stats->requested_visible_vram += size; |
1314 | |
1315 | if (domain != AMDGPU_GEM_DOMAIN_VRAM) { |
1316 | stats->evicted_vram += size; |
1317 | if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) |
1318 | stats->evicted_visible_vram += size; |
1319 | } |
1320 | } else if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_GTT) { |
1321 | stats->requested_gtt += size; |
1322 | } |
1323 | } |
1324 | |
1325 | /** |
1326 | * amdgpu_bo_release_notify - notification about a BO being released |
1327 | * @bo: pointer to a buffer object |
1328 | * |
1329 | * Wipes VRAM buffers whose contents should not be leaked before the |
1330 | * memory is released. |
1331 | */ |
1332 | void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) |
1333 | { |
1334 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: bo->bdev); |
1335 | struct dma_fence *fence = NULL; |
1336 | struct amdgpu_bo *abo; |
1337 | int r; |
1338 | |
1339 | if (!amdgpu_bo_is_amdgpu_bo(bo)) |
1340 | return; |
1341 | |
1342 | abo = ttm_to_amdgpu_bo(tbo: bo); |
1343 | |
1344 | WARN_ON(abo->vm_bo); |
1345 | |
1346 | if (abo->kfd_bo) |
1347 | amdgpu_amdkfd_release_notify(bo: abo); |
1348 | |
1349 | /* We only remove the fence if the resv has individualized. */ |
1350 | WARN_ON_ONCE(bo->type == ttm_bo_type_kernel |
1351 | && bo->base.resv != &bo->base._resv); |
1352 | if (bo->base.resv == &bo->base._resv) |
1353 | amdgpu_amdkfd_remove_fence_on_pt_pd_bos(bo: abo); |
1354 | |
1355 | if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM || |
1356 | !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) || |
1357 | adev->in_suspend || drm_dev_is_unplugged(dev: adev_to_drm(adev))) |
1358 | return; |
1359 | |
1360 | if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv))) |
1361 | return; |
1362 | |
1363 | r = amdgpu_fill_buffer(bo: abo, AMDGPU_POISON, resv: bo->base.resv, fence: &fence, delayed: true); |
1364 | if (!WARN_ON(r)) { |
1365 | amdgpu_bo_fence(bo: abo, fence, shared: false); |
1366 | dma_fence_put(fence); |
1367 | } |
1368 | |
1369 | dma_resv_unlock(obj: bo->base.resv); |
1370 | } |
1371 | |
1372 | /** |
1373 | * amdgpu_bo_fault_reserve_notify - notification about a memory fault |
1374 | * @bo: pointer to a buffer object |
1375 | * |
1376 | * Notifies the driver we are taking a fault on this BO and have reserved it, |
1377 | * also performs bookkeeping. |
1378 | * TTM driver callback for dealing with vm faults. |
1379 | * |
1380 | * Returns: |
1381 | * 0 for success or a negative error code on failure. |
1382 | */ |
1383 | vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
1384 | { |
1385 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: bo->bdev); |
1386 | struct ttm_operation_ctx ctx = { false, false }; |
1387 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo: bo); |
1388 | int r; |
1389 | |
1390 | /* Remember that this BO was accessed by the CPU */ |
1391 | abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; |
1392 | |
1393 | if (amdgpu_res_cpu_visible(adev, res: bo->resource)) |
1394 | return 0; |
1395 | |
1396 | /* Can't move a pinned BO to visible VRAM */ |
1397 | if (abo->tbo.pin_count > 0) |
1398 | return VM_FAULT_SIGBUS; |
1399 | |
1400 | /* hurrah the memory is not visible ! */ |
1401 | atomic64_inc(v: &adev->num_vram_cpu_page_faults); |
1402 | amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | |
1403 | AMDGPU_GEM_DOMAIN_GTT); |
1404 | |
1405 | /* Avoid costly evictions; only set GTT as a busy placement */ |
1406 | abo->placements[0].flags |= TTM_PL_FLAG_DESIRED; |
1407 | |
1408 | r = ttm_bo_validate(bo, placement: &abo->placement, ctx: &ctx); |
1409 | if (unlikely(r == -EBUSY || r == -ERESTARTSYS)) |
1410 | return VM_FAULT_NOPAGE; |
1411 | else if (unlikely(r)) |
1412 | return VM_FAULT_SIGBUS; |
1413 | |
1414 | /* this should never happen */ |
1415 | if (bo->resource->mem_type == TTM_PL_VRAM && |
1416 | !amdgpu_res_cpu_visible(adev, res: bo->resource)) |
1417 | return VM_FAULT_SIGBUS; |
1418 | |
1419 | ttm_bo_move_to_lru_tail_unlocked(bo); |
1420 | return 0; |
1421 | } |
1422 | |
1423 | /** |
1424 | * amdgpu_bo_fence - add fence to buffer object |
1425 | * |
1426 | * @bo: buffer object in question |
1427 | * @fence: fence to add |
1428 | * @shared: true if fence should be added shared |
1429 | * |
1430 | */ |
1431 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, |
1432 | bool shared) |
1433 | { |
1434 | struct dma_resv *resv = bo->tbo.base.resv; |
1435 | int r; |
1436 | |
1437 | r = dma_resv_reserve_fences(obj: resv, num_fences: 1); |
1438 | if (r) { |
1439 | /* As last resort on OOM we block for the fence */ |
1440 | dma_fence_wait(fence, intr: false); |
1441 | return; |
1442 | } |
1443 | |
1444 | dma_resv_add_fence(obj: resv, fence, usage: shared ? DMA_RESV_USAGE_READ : |
1445 | DMA_RESV_USAGE_WRITE); |
1446 | } |
1447 | |
1448 | /** |
1449 | * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences |
1450 | * |
1451 | * @adev: amdgpu device pointer |
1452 | * @resv: reservation object to sync to |
1453 | * @sync_mode: synchronization mode |
1454 | * @owner: fence owner |
1455 | * @intr: Whether the wait is interruptible |
1456 | * |
1457 | * Extract the fences from the reservation object and waits for them to finish. |
1458 | * |
1459 | * Returns: |
1460 | * 0 on success, errno otherwise. |
1461 | */ |
1462 | int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, |
1463 | enum amdgpu_sync_mode sync_mode, void *owner, |
1464 | bool intr) |
1465 | { |
1466 | struct amdgpu_sync sync; |
1467 | int r; |
1468 | |
1469 | amdgpu_sync_create(sync: &sync); |
1470 | amdgpu_sync_resv(adev, sync: &sync, resv, mode: sync_mode, owner); |
1471 | r = amdgpu_sync_wait(sync: &sync, intr); |
1472 | amdgpu_sync_free(sync: &sync); |
1473 | return r; |
1474 | } |
1475 | |
1476 | /** |
1477 | * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv |
1478 | * @bo: buffer object to wait for |
1479 | * @owner: fence owner |
1480 | * @intr: Whether the wait is interruptible |
1481 | * |
1482 | * Wrapper to wait for fences in a BO. |
1483 | * Returns: |
1484 | * 0 on success, errno otherwise. |
1485 | */ |
1486 | int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr) |
1487 | { |
1488 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: bo->tbo.bdev); |
1489 | |
1490 | return amdgpu_bo_sync_wait_resv(adev, resv: bo->tbo.base.resv, |
1491 | sync_mode: AMDGPU_SYNC_NE_OWNER, owner, intr); |
1492 | } |
1493 | |
1494 | /** |
1495 | * amdgpu_bo_gpu_offset - return GPU offset of bo |
1496 | * @bo: amdgpu object for which we query the offset |
1497 | * |
1498 | * Note: object should either be pinned or reserved when calling this |
1499 | * function, it might be useful to add check for this for debugging. |
1500 | * |
1501 | * Returns: |
1502 | * current GPU offset of the object. |
1503 | */ |
1504 | u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) |
1505 | { |
1506 | WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM); |
1507 | WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) && |
1508 | !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel); |
1509 | WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET); |
1510 | WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM && |
1511 | !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)); |
1512 | |
1513 | return amdgpu_bo_gpu_offset_no_check(bo); |
1514 | } |
1515 | |
1516 | /** |
1517 | * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo |
1518 | * @bo: amdgpu object for which we query the offset |
1519 | * |
1520 | * Returns: |
1521 | * current GPU offset of the object without raising warnings. |
1522 | */ |
1523 | u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo) |
1524 | { |
1525 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: bo->tbo.bdev); |
1526 | uint64_t offset = AMDGPU_BO_INVALID_OFFSET; |
1527 | |
1528 | if (bo->tbo.resource->mem_type == TTM_PL_TT) |
1529 | offset = amdgpu_gmc_agp_addr(bo: &bo->tbo); |
1530 | |
1531 | if (offset == AMDGPU_BO_INVALID_OFFSET) |
1532 | offset = (bo->tbo.resource->start << PAGE_SHIFT) + |
1533 | amdgpu_ttm_domain_start(adev, type: bo->tbo.resource->mem_type); |
1534 | |
1535 | return amdgpu_gmc_sign_extend(addr: offset); |
1536 | } |
1537 | |
1538 | /** |
1539 | * amdgpu_bo_get_preferred_domain - get preferred domain |
1540 | * @adev: amdgpu device object |
1541 | * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>` |
1542 | * |
1543 | * Returns: |
1544 | * Which of the allowed domains is preferred for allocating the BO. |
1545 | */ |
1546 | uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, |
1547 | uint32_t domain) |
1548 | { |
1549 | if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) && |
1550 | ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) { |
1551 | domain = AMDGPU_GEM_DOMAIN_VRAM; |
1552 | if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD) |
1553 | domain = AMDGPU_GEM_DOMAIN_GTT; |
1554 | } |
1555 | return domain; |
1556 | } |
1557 | |
1558 | #if defined(CONFIG_DEBUG_FS) |
1559 | #define amdgpu_bo_print_flag(m, bo, flag) \ |
1560 | do { \ |
1561 | if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \ |
1562 | seq_printf((m), " " #flag); \ |
1563 | } \ |
1564 | } while (0) |
1565 | |
1566 | /** |
1567 | * amdgpu_bo_print_info - print BO info in debugfs file |
1568 | * |
1569 | * @id: Index or Id of the BO |
1570 | * @bo: Requested BO for printing info |
1571 | * @m: debugfs file |
1572 | * |
1573 | * Print BO information in debugfs file |
1574 | * |
1575 | * Returns: |
1576 | * Size of the BO in bytes. |
1577 | */ |
1578 | u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) |
1579 | { |
1580 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: bo->tbo.bdev); |
1581 | struct dma_buf_attachment *attachment; |
1582 | struct dma_buf *dma_buf; |
1583 | const char *placement; |
1584 | unsigned int pin_count; |
1585 | u64 size; |
1586 | |
1587 | if (dma_resv_trylock(obj: bo->tbo.base.resv)) { |
1588 | unsigned int domain; |
1589 | |
1590 | domain = amdgpu_mem_type_to_domain(mem_type: bo->tbo.resource->mem_type); |
1591 | switch (domain) { |
1592 | case AMDGPU_GEM_DOMAIN_VRAM: |
1593 | if (amdgpu_res_cpu_visible(adev, res: bo->tbo.resource)) |
1594 | placement = "VRAM VISIBLE" ; |
1595 | else |
1596 | placement = "VRAM" ; |
1597 | break; |
1598 | case AMDGPU_GEM_DOMAIN_GTT: |
1599 | placement = "GTT" ; |
1600 | break; |
1601 | case AMDGPU_GEM_DOMAIN_CPU: |
1602 | default: |
1603 | placement = "CPU" ; |
1604 | break; |
1605 | } |
1606 | dma_resv_unlock(obj: bo->tbo.base.resv); |
1607 | } else { |
1608 | placement = "UNKNOWN" ; |
1609 | } |
1610 | |
1611 | size = amdgpu_bo_size(bo); |
1612 | seq_printf(m, fmt: "\t\t0x%08x: %12lld byte %s" , |
1613 | id, size, placement); |
1614 | |
1615 | pin_count = READ_ONCE(bo->tbo.pin_count); |
1616 | if (pin_count) |
1617 | seq_printf(m, fmt: " pin count %d" , pin_count); |
1618 | |
1619 | dma_buf = READ_ONCE(bo->tbo.base.dma_buf); |
1620 | attachment = READ_ONCE(bo->tbo.base.import_attach); |
1621 | |
1622 | if (attachment) |
1623 | seq_printf(m, fmt: " imported from ino:%lu" , file_inode(f: dma_buf->file)->i_ino); |
1624 | else if (dma_buf) |
1625 | seq_printf(m, fmt: " exported as ino:%lu" , file_inode(f: dma_buf->file)->i_ino); |
1626 | |
1627 | amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED); |
1628 | amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS); |
1629 | amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC); |
1630 | amdgpu_bo_print_flag(m, bo, VRAM_CLEARED); |
1631 | amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS); |
1632 | amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID); |
1633 | amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC); |
1634 | |
1635 | seq_puts(m, s: "\n" ); |
1636 | |
1637 | return size; |
1638 | } |
1639 | #endif |
1640 | |