1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | * Authors: Christian König |
23 | */ |
24 | |
25 | #include <linux/dma-mapping.h> |
26 | #include <drm/ttm/ttm_range_manager.h> |
27 | |
28 | #include "amdgpu.h" |
29 | #include "amdgpu_vm.h" |
30 | #include "amdgpu_res_cursor.h" |
31 | #include "amdgpu_atomfirmware.h" |
32 | #include "atom.h" |
33 | |
34 | struct amdgpu_vram_reservation { |
35 | u64 start; |
36 | u64 size; |
37 | struct list_head allocated; |
38 | struct list_head blocks; |
39 | }; |
40 | |
41 | static inline struct amdgpu_vram_mgr * |
42 | to_vram_mgr(struct ttm_resource_manager *man) |
43 | { |
44 | return container_of(man, struct amdgpu_vram_mgr, manager); |
45 | } |
46 | |
47 | static inline struct amdgpu_device * |
48 | to_amdgpu_device(struct amdgpu_vram_mgr *mgr) |
49 | { |
50 | return container_of(mgr, struct amdgpu_device, mman.vram_mgr); |
51 | } |
52 | |
53 | static inline struct drm_buddy_block * |
54 | amdgpu_vram_mgr_first_block(struct list_head *list) |
55 | { |
56 | return list_first_entry_or_null(list, struct drm_buddy_block, link); |
57 | } |
58 | |
59 | static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head) |
60 | { |
61 | struct drm_buddy_block *block; |
62 | u64 start, size; |
63 | |
64 | block = amdgpu_vram_mgr_first_block(list: head); |
65 | if (!block) |
66 | return false; |
67 | |
68 | while (head != block->link.next) { |
69 | start = amdgpu_vram_mgr_block_start(block); |
70 | size = amdgpu_vram_mgr_block_size(block); |
71 | |
72 | block = list_entry(block->link.next, struct drm_buddy_block, link); |
73 | if (start + size != amdgpu_vram_mgr_block_start(block)) |
74 | return false; |
75 | } |
76 | |
77 | return true; |
78 | } |
79 | |
80 | static inline u64 amdgpu_vram_mgr_blocks_size(struct list_head *head) |
81 | { |
82 | struct drm_buddy_block *block; |
83 | u64 size = 0; |
84 | |
85 | list_for_each_entry(block, head, link) |
86 | size += amdgpu_vram_mgr_block_size(block); |
87 | |
88 | return size; |
89 | } |
90 | |
91 | /** |
92 | * DOC: mem_info_vram_total |
93 | * |
94 | * The amdgpu driver provides a sysfs API for reporting current total VRAM |
95 | * available on the device |
96 | * The file mem_info_vram_total is used for this and returns the total |
97 | * amount of VRAM in bytes |
98 | */ |
99 | static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev, |
100 | struct device_attribute *attr, char *buf) |
101 | { |
102 | struct drm_device *ddev = dev_get_drvdata(dev); |
103 | struct amdgpu_device *adev = drm_to_adev(ddev); |
104 | |
105 | return sysfs_emit(buf, fmt: "%llu\n" , adev->gmc.real_vram_size); |
106 | } |
107 | |
108 | /** |
109 | * DOC: mem_info_vis_vram_total |
110 | * |
111 | * The amdgpu driver provides a sysfs API for reporting current total |
112 | * visible VRAM available on the device |
113 | * The file mem_info_vis_vram_total is used for this and returns the total |
114 | * amount of visible VRAM in bytes |
115 | */ |
116 | static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev, |
117 | struct device_attribute *attr, char *buf) |
118 | { |
119 | struct drm_device *ddev = dev_get_drvdata(dev); |
120 | struct amdgpu_device *adev = drm_to_adev(ddev); |
121 | |
122 | return sysfs_emit(buf, fmt: "%llu\n" , adev->gmc.visible_vram_size); |
123 | } |
124 | |
125 | /** |
126 | * DOC: mem_info_vram_used |
127 | * |
128 | * The amdgpu driver provides a sysfs API for reporting current total VRAM |
129 | * available on the device |
130 | * The file mem_info_vram_used is used for this and returns the total |
131 | * amount of currently used VRAM in bytes |
132 | */ |
133 | static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev, |
134 | struct device_attribute *attr, |
135 | char *buf) |
136 | { |
137 | struct drm_device *ddev = dev_get_drvdata(dev); |
138 | struct amdgpu_device *adev = drm_to_adev(ddev); |
139 | struct ttm_resource_manager *man = &adev->mman.vram_mgr.manager; |
140 | |
141 | return sysfs_emit(buf, fmt: "%llu\n" , ttm_resource_manager_usage(man)); |
142 | } |
143 | |
144 | /** |
145 | * DOC: mem_info_vis_vram_used |
146 | * |
147 | * The amdgpu driver provides a sysfs API for reporting current total of |
148 | * used visible VRAM |
149 | * The file mem_info_vis_vram_used is used for this and returns the total |
150 | * amount of currently used visible VRAM in bytes |
151 | */ |
152 | static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev, |
153 | struct device_attribute *attr, |
154 | char *buf) |
155 | { |
156 | struct drm_device *ddev = dev_get_drvdata(dev); |
157 | struct amdgpu_device *adev = drm_to_adev(ddev); |
158 | |
159 | return sysfs_emit(buf, fmt: "%llu\n" , |
160 | amdgpu_vram_mgr_vis_usage(mgr: &adev->mman.vram_mgr)); |
161 | } |
162 | |
163 | /** |
164 | * DOC: mem_info_vram_vendor |
165 | * |
166 | * The amdgpu driver provides a sysfs API for reporting the vendor of the |
167 | * installed VRAM |
168 | * The file mem_info_vram_vendor is used for this and returns the name of the |
169 | * vendor. |
170 | */ |
171 | static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev, |
172 | struct device_attribute *attr, |
173 | char *buf) |
174 | { |
175 | struct drm_device *ddev = dev_get_drvdata(dev); |
176 | struct amdgpu_device *adev = drm_to_adev(ddev); |
177 | |
178 | switch (adev->gmc.vram_vendor) { |
179 | case SAMSUNG: |
180 | return sysfs_emit(buf, fmt: "samsung\n" ); |
181 | case INFINEON: |
182 | return sysfs_emit(buf, fmt: "infineon\n" ); |
183 | case ELPIDA: |
184 | return sysfs_emit(buf, fmt: "elpida\n" ); |
185 | case ETRON: |
186 | return sysfs_emit(buf, fmt: "etron\n" ); |
187 | case NANYA: |
188 | return sysfs_emit(buf, fmt: "nanya\n" ); |
189 | case HYNIX: |
190 | return sysfs_emit(buf, fmt: "hynix\n" ); |
191 | case MOSEL: |
192 | return sysfs_emit(buf, fmt: "mosel\n" ); |
193 | case WINBOND: |
194 | return sysfs_emit(buf, fmt: "winbond\n" ); |
195 | case ESMT: |
196 | return sysfs_emit(buf, fmt: "esmt\n" ); |
197 | case MICRON: |
198 | return sysfs_emit(buf, fmt: "micron\n" ); |
199 | default: |
200 | return sysfs_emit(buf, fmt: "unknown\n" ); |
201 | } |
202 | } |
203 | |
204 | static DEVICE_ATTR(mem_info_vram_total, S_IRUGO, |
205 | amdgpu_mem_info_vram_total_show, NULL); |
206 | static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO, |
207 | amdgpu_mem_info_vis_vram_total_show,NULL); |
208 | static DEVICE_ATTR(mem_info_vram_used, S_IRUGO, |
209 | amdgpu_mem_info_vram_used_show, NULL); |
210 | static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO, |
211 | amdgpu_mem_info_vis_vram_used_show, NULL); |
212 | static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO, |
213 | amdgpu_mem_info_vram_vendor, NULL); |
214 | |
215 | static struct attribute *amdgpu_vram_mgr_attributes[] = { |
216 | &dev_attr_mem_info_vram_total.attr, |
217 | &dev_attr_mem_info_vis_vram_total.attr, |
218 | &dev_attr_mem_info_vram_used.attr, |
219 | &dev_attr_mem_info_vis_vram_used.attr, |
220 | &dev_attr_mem_info_vram_vendor.attr, |
221 | NULL |
222 | }; |
223 | |
224 | static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj, |
225 | struct attribute *attr, int i) |
226 | { |
227 | struct device *dev = kobj_to_dev(kobj); |
228 | struct drm_device *ddev = dev_get_drvdata(dev); |
229 | struct amdgpu_device *adev = drm_to_adev(ddev); |
230 | |
231 | if (attr == &dev_attr_mem_info_vram_vendor.attr && |
232 | !adev->gmc.vram_vendor) |
233 | return 0; |
234 | |
235 | return attr->mode; |
236 | } |
237 | |
238 | const struct attribute_group amdgpu_vram_mgr_attr_group = { |
239 | .attrs = amdgpu_vram_mgr_attributes, |
240 | .is_visible = amdgpu_vram_attrs_is_visible |
241 | }; |
242 | |
243 | /** |
244 | * amdgpu_vram_mgr_vis_size - Calculate visible block size |
245 | * |
246 | * @adev: amdgpu_device pointer |
247 | * @block: DRM BUDDY block structure |
248 | * |
249 | * Calculate how many bytes of the DRM BUDDY block are inside visible VRAM |
250 | */ |
251 | static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, |
252 | struct drm_buddy_block *block) |
253 | { |
254 | u64 start = amdgpu_vram_mgr_block_start(block); |
255 | u64 end = start + amdgpu_vram_mgr_block_size(block); |
256 | |
257 | if (start >= adev->gmc.visible_vram_size) |
258 | return 0; |
259 | |
260 | return (end > adev->gmc.visible_vram_size ? |
261 | adev->gmc.visible_vram_size : end) - start; |
262 | } |
263 | |
264 | /** |
265 | * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size |
266 | * |
267 | * @bo: &amdgpu_bo buffer object (must be in VRAM) |
268 | * |
269 | * Returns: |
270 | * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM. |
271 | */ |
272 | u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) |
273 | { |
274 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: bo->tbo.bdev); |
275 | struct ttm_resource *res = bo->tbo.resource; |
276 | struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res); |
277 | struct drm_buddy_block *block; |
278 | u64 usage = 0; |
279 | |
280 | if (amdgpu_gmc_vram_full_visible(gmc: &adev->gmc)) |
281 | return amdgpu_bo_size(bo); |
282 | |
283 | if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) |
284 | return 0; |
285 | |
286 | list_for_each_entry(block, &vres->blocks, link) |
287 | usage += amdgpu_vram_mgr_vis_size(adev, block); |
288 | |
289 | return usage; |
290 | } |
291 | |
292 | /* Commit the reservation of VRAM pages */ |
293 | static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man) |
294 | { |
295 | struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); |
296 | struct amdgpu_device *adev = to_amdgpu_device(mgr); |
297 | struct drm_buddy *mm = &mgr->mm; |
298 | struct amdgpu_vram_reservation *rsv, *temp; |
299 | struct drm_buddy_block *block; |
300 | uint64_t vis_usage; |
301 | |
302 | list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) { |
303 | if (drm_buddy_alloc_blocks(mm, start: rsv->start, end: rsv->start + rsv->size, |
304 | size: rsv->size, min_page_size: mm->chunk_size, blocks: &rsv->allocated, |
305 | DRM_BUDDY_RANGE_ALLOCATION)) |
306 | continue; |
307 | |
308 | block = amdgpu_vram_mgr_first_block(list: &rsv->allocated); |
309 | if (!block) |
310 | continue; |
311 | |
312 | dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n" , |
313 | rsv->start, rsv->size); |
314 | |
315 | vis_usage = amdgpu_vram_mgr_vis_size(adev, block); |
316 | atomic64_add(i: vis_usage, v: &mgr->vis_usage); |
317 | spin_lock(lock: &man->bdev->lru_lock); |
318 | man->usage += rsv->size; |
319 | spin_unlock(lock: &man->bdev->lru_lock); |
320 | list_move(list: &rsv->blocks, head: &mgr->reserved_pages); |
321 | } |
322 | } |
323 | |
324 | /** |
325 | * amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM |
326 | * |
327 | * @mgr: amdgpu_vram_mgr pointer |
328 | * @start: start address of the range in VRAM |
329 | * @size: size of the range |
330 | * |
331 | * Reserve memory from start address with the specified size in VRAM |
332 | */ |
333 | int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, |
334 | uint64_t start, uint64_t size) |
335 | { |
336 | struct amdgpu_vram_reservation *rsv; |
337 | |
338 | rsv = kzalloc(size: sizeof(*rsv), GFP_KERNEL); |
339 | if (!rsv) |
340 | return -ENOMEM; |
341 | |
342 | INIT_LIST_HEAD(list: &rsv->allocated); |
343 | INIT_LIST_HEAD(list: &rsv->blocks); |
344 | |
345 | rsv->start = start; |
346 | rsv->size = size; |
347 | |
348 | mutex_lock(&mgr->lock); |
349 | list_add_tail(new: &rsv->blocks, head: &mgr->reservations_pending); |
350 | amdgpu_vram_mgr_do_reserve(man: &mgr->manager); |
351 | mutex_unlock(lock: &mgr->lock); |
352 | |
353 | return 0; |
354 | } |
355 | |
356 | /** |
357 | * amdgpu_vram_mgr_query_page_status - query the reservation status |
358 | * |
359 | * @mgr: amdgpu_vram_mgr pointer |
360 | * @start: start address of a page in VRAM |
361 | * |
362 | * Returns: |
363 | * -EBUSY: the page is still hold and in pending list |
364 | * 0: the page has been reserved |
365 | * -ENOENT: the input page is not a reservation |
366 | */ |
367 | int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, |
368 | uint64_t start) |
369 | { |
370 | struct amdgpu_vram_reservation *rsv; |
371 | int ret; |
372 | |
373 | mutex_lock(&mgr->lock); |
374 | |
375 | list_for_each_entry(rsv, &mgr->reservations_pending, blocks) { |
376 | if (rsv->start <= start && |
377 | (start < (rsv->start + rsv->size))) { |
378 | ret = -EBUSY; |
379 | goto out; |
380 | } |
381 | } |
382 | |
383 | list_for_each_entry(rsv, &mgr->reserved_pages, blocks) { |
384 | if (rsv->start <= start && |
385 | (start < (rsv->start + rsv->size))) { |
386 | ret = 0; |
387 | goto out; |
388 | } |
389 | } |
390 | |
391 | ret = -ENOENT; |
392 | out: |
393 | mutex_unlock(lock: &mgr->lock); |
394 | return ret; |
395 | } |
396 | |
397 | static void amdgpu_dummy_vram_mgr_debug(struct ttm_resource_manager *man, |
398 | struct drm_printer *printer) |
399 | { |
400 | DRM_DEBUG_DRIVER("Dummy vram mgr debug\n" ); |
401 | } |
402 | |
403 | static bool amdgpu_dummy_vram_mgr_compatible(struct ttm_resource_manager *man, |
404 | struct ttm_resource *res, |
405 | const struct ttm_place *place, |
406 | size_t size) |
407 | { |
408 | DRM_DEBUG_DRIVER("Dummy vram mgr compatible\n" ); |
409 | return false; |
410 | } |
411 | |
412 | static bool amdgpu_dummy_vram_mgr_intersects(struct ttm_resource_manager *man, |
413 | struct ttm_resource *res, |
414 | const struct ttm_place *place, |
415 | size_t size) |
416 | { |
417 | DRM_DEBUG_DRIVER("Dummy vram mgr intersects\n" ); |
418 | return true; |
419 | } |
420 | |
421 | static void amdgpu_dummy_vram_mgr_del(struct ttm_resource_manager *man, |
422 | struct ttm_resource *res) |
423 | { |
424 | DRM_DEBUG_DRIVER("Dummy vram mgr deleted\n" ); |
425 | } |
426 | |
427 | static int amdgpu_dummy_vram_mgr_new(struct ttm_resource_manager *man, |
428 | struct ttm_buffer_object *tbo, |
429 | const struct ttm_place *place, |
430 | struct ttm_resource **res) |
431 | { |
432 | DRM_DEBUG_DRIVER("Dummy vram mgr new\n" ); |
433 | return -ENOSPC; |
434 | } |
435 | |
436 | /** |
437 | * amdgpu_vram_mgr_new - allocate new ranges |
438 | * |
439 | * @man: TTM memory type manager |
440 | * @tbo: TTM BO we need this range for |
441 | * @place: placement flags and restrictions |
442 | * @res: the resulting mem object |
443 | * |
444 | * Allocate VRAM for the given BO. |
445 | */ |
446 | static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, |
447 | struct ttm_buffer_object *tbo, |
448 | const struct ttm_place *place, |
449 | struct ttm_resource **res) |
450 | { |
451 | struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); |
452 | struct amdgpu_device *adev = to_amdgpu_device(mgr); |
453 | u64 vis_usage = 0, max_bytes, min_block_size; |
454 | struct amdgpu_vram_mgr_resource *vres; |
455 | u64 size, remaining_size, lpfn, fpfn; |
456 | struct drm_buddy *mm = &mgr->mm; |
457 | struct drm_buddy_block *block; |
458 | unsigned long pages_per_block; |
459 | int r; |
460 | |
461 | lpfn = (u64)place->lpfn << PAGE_SHIFT; |
462 | if (!lpfn) |
463 | lpfn = man->size; |
464 | |
465 | fpfn = (u64)place->fpfn << PAGE_SHIFT; |
466 | |
467 | max_bytes = adev->gmc.mc_vram_size; |
468 | if (tbo->type != ttm_bo_type_kernel) |
469 | max_bytes -= AMDGPU_VM_RESERVED_VRAM; |
470 | |
471 | if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { |
472 | pages_per_block = ~0ul; |
473 | } else { |
474 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
475 | pages_per_block = HPAGE_PMD_NR; |
476 | #else |
477 | /* default to 2MB */ |
478 | pages_per_block = 2UL << (20UL - PAGE_SHIFT); |
479 | #endif |
480 | pages_per_block = max_t(uint32_t, pages_per_block, |
481 | tbo->page_alignment); |
482 | } |
483 | |
484 | vres = kzalloc(size: sizeof(*vres), GFP_KERNEL); |
485 | if (!vres) |
486 | return -ENOMEM; |
487 | |
488 | ttm_resource_init(bo: tbo, place, res: &vres->base); |
489 | |
490 | /* bail out quickly if there's likely not enough VRAM for this BO */ |
491 | if (ttm_resource_manager_usage(man) > max_bytes) { |
492 | r = -ENOSPC; |
493 | goto error_fini; |
494 | } |
495 | |
496 | INIT_LIST_HEAD(list: &vres->blocks); |
497 | |
498 | if (place->flags & TTM_PL_FLAG_TOPDOWN) |
499 | vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION; |
500 | |
501 | if (place->flags & TTM_PL_FLAG_CONTIGUOUS) |
502 | vres->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION; |
503 | |
504 | if (fpfn || lpfn != mgr->mm.size) |
505 | /* Allocate blocks in desired range */ |
506 | vres->flags |= DRM_BUDDY_RANGE_ALLOCATION; |
507 | |
508 | remaining_size = (u64)vres->base.size; |
509 | |
510 | mutex_lock(&mgr->lock); |
511 | while (remaining_size) { |
512 | if (tbo->page_alignment) |
513 | min_block_size = (u64)tbo->page_alignment << PAGE_SHIFT; |
514 | else |
515 | min_block_size = mgr->default_page_size; |
516 | |
517 | BUG_ON(min_block_size < mm->chunk_size); |
518 | |
519 | /* Limit maximum size to 2GiB due to SG table limitations */ |
520 | size = min(remaining_size, 2ULL << 30); |
521 | |
522 | if ((size >= (u64)pages_per_block << PAGE_SHIFT) && |
523 | !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1))) |
524 | min_block_size = (u64)pages_per_block << PAGE_SHIFT; |
525 | |
526 | r = drm_buddy_alloc_blocks(mm, start: fpfn, |
527 | end: lpfn, |
528 | size, |
529 | min_page_size: min_block_size, |
530 | blocks: &vres->blocks, |
531 | flags: vres->flags); |
532 | if (unlikely(r)) |
533 | goto error_free_blocks; |
534 | |
535 | if (size > remaining_size) |
536 | remaining_size = 0; |
537 | else |
538 | remaining_size -= size; |
539 | } |
540 | mutex_unlock(lock: &mgr->lock); |
541 | |
542 | vres->base.start = 0; |
543 | size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks), |
544 | vres->base.size); |
545 | list_for_each_entry(block, &vres->blocks, link) { |
546 | unsigned long start; |
547 | |
548 | start = amdgpu_vram_mgr_block_start(block) + |
549 | amdgpu_vram_mgr_block_size(block); |
550 | start >>= PAGE_SHIFT; |
551 | |
552 | if (start > PFN_UP(size)) |
553 | start -= PFN_UP(size); |
554 | else |
555 | start = 0; |
556 | vres->base.start = max(vres->base.start, start); |
557 | |
558 | vis_usage += amdgpu_vram_mgr_vis_size(adev, block); |
559 | } |
560 | |
561 | if (amdgpu_is_vram_mgr_blocks_contiguous(head: &vres->blocks)) |
562 | vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS; |
563 | |
564 | if (adev->gmc.xgmi.connected_to_cpu) |
565 | vres->base.bus.caching = ttm_cached; |
566 | else |
567 | vres->base.bus.caching = ttm_write_combined; |
568 | |
569 | atomic64_add(i: vis_usage, v: &mgr->vis_usage); |
570 | *res = &vres->base; |
571 | return 0; |
572 | |
573 | error_free_blocks: |
574 | drm_buddy_free_list(mm, objects: &vres->blocks); |
575 | mutex_unlock(lock: &mgr->lock); |
576 | error_fini: |
577 | ttm_resource_fini(man, res: &vres->base); |
578 | kfree(objp: vres); |
579 | |
580 | return r; |
581 | } |
582 | |
583 | /** |
584 | * amdgpu_vram_mgr_del - free ranges |
585 | * |
586 | * @man: TTM memory type manager |
587 | * @res: TTM memory object |
588 | * |
589 | * Free the allocated VRAM again. |
590 | */ |
591 | static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, |
592 | struct ttm_resource *res) |
593 | { |
594 | struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res); |
595 | struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); |
596 | struct amdgpu_device *adev = to_amdgpu_device(mgr); |
597 | struct drm_buddy *mm = &mgr->mm; |
598 | struct drm_buddy_block *block; |
599 | uint64_t vis_usage = 0; |
600 | |
601 | mutex_lock(&mgr->lock); |
602 | list_for_each_entry(block, &vres->blocks, link) |
603 | vis_usage += amdgpu_vram_mgr_vis_size(adev, block); |
604 | |
605 | amdgpu_vram_mgr_do_reserve(man); |
606 | |
607 | drm_buddy_free_list(mm, objects: &vres->blocks); |
608 | mutex_unlock(lock: &mgr->lock); |
609 | |
610 | atomic64_sub(i: vis_usage, v: &mgr->vis_usage); |
611 | |
612 | ttm_resource_fini(man, res); |
613 | kfree(objp: vres); |
614 | } |
615 | |
616 | /** |
617 | * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table |
618 | * |
619 | * @adev: amdgpu device pointer |
620 | * @res: TTM memory object |
621 | * @offset: byte offset from the base of VRAM BO |
622 | * @length: number of bytes to export in sg_table |
623 | * @dev: the other device |
624 | * @dir: dma direction |
625 | * @sgt: resulting sg table |
626 | * |
627 | * Allocate and fill a sg table from a VRAM allocation. |
628 | */ |
629 | int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, |
630 | struct ttm_resource *res, |
631 | u64 offset, u64 length, |
632 | struct device *dev, |
633 | enum dma_data_direction dir, |
634 | struct sg_table **sgt) |
635 | { |
636 | struct amdgpu_res_cursor cursor; |
637 | struct scatterlist *sg; |
638 | int num_entries = 0; |
639 | int i, r; |
640 | |
641 | *sgt = kmalloc(size: sizeof(**sgt), GFP_KERNEL); |
642 | if (!*sgt) |
643 | return -ENOMEM; |
644 | |
645 | /* Determine the number of DRM_BUDDY blocks to export */ |
646 | amdgpu_res_first(res, start: offset, size: length, cur: &cursor); |
647 | while (cursor.remaining) { |
648 | num_entries++; |
649 | amdgpu_res_next(cur: &cursor, size: cursor.size); |
650 | } |
651 | |
652 | r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL); |
653 | if (r) |
654 | goto error_free; |
655 | |
656 | /* Initialize scatterlist nodes of sg_table */ |
657 | for_each_sgtable_sg((*sgt), sg, i) |
658 | sg->length = 0; |
659 | |
660 | /* |
661 | * Walk down DRM_BUDDY blocks to populate scatterlist nodes |
662 | * @note: Use iterator api to get first the DRM_BUDDY block |
663 | * and the number of bytes from it. Access the following |
664 | * DRM_BUDDY block(s) if more buffer needs to exported |
665 | */ |
666 | amdgpu_res_first(res, start: offset, size: length, cur: &cursor); |
667 | for_each_sgtable_sg((*sgt), sg, i) { |
668 | phys_addr_t phys = cursor.start + adev->gmc.aper_base; |
669 | size_t size = cursor.size; |
670 | dma_addr_t addr; |
671 | |
672 | addr = dma_map_resource(dev, phys_addr: phys, size, dir, |
673 | DMA_ATTR_SKIP_CPU_SYNC); |
674 | r = dma_mapping_error(dev, dma_addr: addr); |
675 | if (r) |
676 | goto error_unmap; |
677 | |
678 | sg_set_page(sg, NULL, len: size, offset: 0); |
679 | sg_dma_address(sg) = addr; |
680 | sg_dma_len(sg) = size; |
681 | |
682 | amdgpu_res_next(cur: &cursor, size: cursor.size); |
683 | } |
684 | |
685 | return 0; |
686 | |
687 | error_unmap: |
688 | for_each_sgtable_sg((*sgt), sg, i) { |
689 | if (!sg->length) |
690 | continue; |
691 | |
692 | dma_unmap_resource(dev, addr: sg->dma_address, |
693 | size: sg->length, dir, |
694 | DMA_ATTR_SKIP_CPU_SYNC); |
695 | } |
696 | sg_free_table(*sgt); |
697 | |
698 | error_free: |
699 | kfree(objp: *sgt); |
700 | return r; |
701 | } |
702 | |
703 | /** |
704 | * amdgpu_vram_mgr_free_sgt - allocate and fill a sg table |
705 | * |
706 | * @dev: device pointer |
707 | * @dir: data direction of resource to unmap |
708 | * @sgt: sg table to free |
709 | * |
710 | * Free a previously allocate sg table. |
711 | */ |
712 | void amdgpu_vram_mgr_free_sgt(struct device *dev, |
713 | enum dma_data_direction dir, |
714 | struct sg_table *sgt) |
715 | { |
716 | struct scatterlist *sg; |
717 | int i; |
718 | |
719 | for_each_sgtable_sg(sgt, sg, i) |
720 | dma_unmap_resource(dev, addr: sg->dma_address, |
721 | size: sg->length, dir, |
722 | DMA_ATTR_SKIP_CPU_SYNC); |
723 | sg_free_table(sgt); |
724 | kfree(objp: sgt); |
725 | } |
726 | |
727 | /** |
728 | * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part |
729 | * |
730 | * @mgr: amdgpu_vram_mgr pointer |
731 | * |
732 | * Returns how many bytes are used in the visible part of VRAM |
733 | */ |
734 | uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr) |
735 | { |
736 | return atomic64_read(v: &mgr->vis_usage); |
737 | } |
738 | |
739 | /** |
740 | * amdgpu_vram_mgr_intersects - test each drm buddy block for intersection |
741 | * |
742 | * @man: TTM memory type manager |
743 | * @res: The resource to test |
744 | * @place: The place to test against |
745 | * @size: Size of the new allocation |
746 | * |
747 | * Test each drm buddy block for intersection for eviction decision. |
748 | */ |
749 | static bool amdgpu_vram_mgr_intersects(struct ttm_resource_manager *man, |
750 | struct ttm_resource *res, |
751 | const struct ttm_place *place, |
752 | size_t size) |
753 | { |
754 | struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res); |
755 | struct drm_buddy_block *block; |
756 | |
757 | /* Check each drm buddy block individually */ |
758 | list_for_each_entry(block, &mgr->blocks, link) { |
759 | unsigned long fpfn = |
760 | amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT; |
761 | unsigned long lpfn = fpfn + |
762 | (amdgpu_vram_mgr_block_size(block) >> PAGE_SHIFT); |
763 | |
764 | if (place->fpfn < lpfn && |
765 | (!place->lpfn || place->lpfn > fpfn)) |
766 | return true; |
767 | } |
768 | |
769 | return false; |
770 | } |
771 | |
772 | /** |
773 | * amdgpu_vram_mgr_compatible - test each drm buddy block for compatibility |
774 | * |
775 | * @man: TTM memory type manager |
776 | * @res: The resource to test |
777 | * @place: The place to test against |
778 | * @size: Size of the new allocation |
779 | * |
780 | * Test each drm buddy block for placement compatibility. |
781 | */ |
782 | static bool amdgpu_vram_mgr_compatible(struct ttm_resource_manager *man, |
783 | struct ttm_resource *res, |
784 | const struct ttm_place *place, |
785 | size_t size) |
786 | { |
787 | struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res); |
788 | struct drm_buddy_block *block; |
789 | |
790 | /* Check each drm buddy block individually */ |
791 | list_for_each_entry(block, &mgr->blocks, link) { |
792 | unsigned long fpfn = |
793 | amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT; |
794 | unsigned long lpfn = fpfn + |
795 | (amdgpu_vram_mgr_block_size(block) >> PAGE_SHIFT); |
796 | |
797 | if (fpfn < place->fpfn || |
798 | (place->lpfn && lpfn > place->lpfn)) |
799 | return false; |
800 | } |
801 | |
802 | return true; |
803 | } |
804 | |
805 | /** |
806 | * amdgpu_vram_mgr_debug - dump VRAM table |
807 | * |
808 | * @man: TTM memory type manager |
809 | * @printer: DRM printer to use |
810 | * |
811 | * Dump the table content using printk. |
812 | */ |
813 | static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, |
814 | struct drm_printer *printer) |
815 | { |
816 | struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); |
817 | struct drm_buddy *mm = &mgr->mm; |
818 | struct amdgpu_vram_reservation *rsv; |
819 | |
820 | drm_printf(p: printer, f: " vis usage:%llu\n" , |
821 | amdgpu_vram_mgr_vis_usage(mgr)); |
822 | |
823 | mutex_lock(&mgr->lock); |
824 | drm_printf(p: printer, f: "default_page_size: %lluKiB\n" , |
825 | mgr->default_page_size >> 10); |
826 | |
827 | drm_buddy_print(mm, p: printer); |
828 | |
829 | drm_printf(p: printer, f: "reserved:\n" ); |
830 | list_for_each_entry(rsv, &mgr->reserved_pages, blocks) |
831 | drm_printf(p: printer, f: "%#018llx-%#018llx: %llu\n" , |
832 | rsv->start, rsv->start + rsv->size, rsv->size); |
833 | mutex_unlock(lock: &mgr->lock); |
834 | } |
835 | |
836 | static const struct ttm_resource_manager_func amdgpu_dummy_vram_mgr_func = { |
837 | .alloc = amdgpu_dummy_vram_mgr_new, |
838 | .free = amdgpu_dummy_vram_mgr_del, |
839 | .intersects = amdgpu_dummy_vram_mgr_intersects, |
840 | .compatible = amdgpu_dummy_vram_mgr_compatible, |
841 | .debug = amdgpu_dummy_vram_mgr_debug |
842 | }; |
843 | |
844 | static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { |
845 | .alloc = amdgpu_vram_mgr_new, |
846 | .free = amdgpu_vram_mgr_del, |
847 | .intersects = amdgpu_vram_mgr_intersects, |
848 | .compatible = amdgpu_vram_mgr_compatible, |
849 | .debug = amdgpu_vram_mgr_debug |
850 | }; |
851 | |
852 | /** |
853 | * amdgpu_vram_mgr_init - init VRAM manager and DRM MM |
854 | * |
855 | * @adev: amdgpu_device pointer |
856 | * |
857 | * Allocate and initialize the VRAM manager. |
858 | */ |
859 | int amdgpu_vram_mgr_init(struct amdgpu_device *adev) |
860 | { |
861 | struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; |
862 | struct ttm_resource_manager *man = &mgr->manager; |
863 | int err; |
864 | |
865 | ttm_resource_manager_init(man, bdev: &adev->mman.bdev, |
866 | size: adev->gmc.real_vram_size); |
867 | |
868 | mutex_init(&mgr->lock); |
869 | INIT_LIST_HEAD(list: &mgr->reservations_pending); |
870 | INIT_LIST_HEAD(list: &mgr->reserved_pages); |
871 | mgr->default_page_size = PAGE_SIZE; |
872 | |
873 | if (!adev->gmc.is_app_apu) { |
874 | man->func = &amdgpu_vram_mgr_func; |
875 | |
876 | err = drm_buddy_init(mm: &mgr->mm, size: man->size, PAGE_SIZE); |
877 | if (err) |
878 | return err; |
879 | } else { |
880 | man->func = &amdgpu_dummy_vram_mgr_func; |
881 | DRM_INFO("Setup dummy vram mgr\n" ); |
882 | } |
883 | |
884 | ttm_set_driver_manager(bdev: &adev->mman.bdev, TTM_PL_VRAM, manager: &mgr->manager); |
885 | ttm_resource_manager_set_used(man, used: true); |
886 | return 0; |
887 | } |
888 | |
889 | /** |
890 | * amdgpu_vram_mgr_fini - free and destroy VRAM manager |
891 | * |
892 | * @adev: amdgpu_device pointer |
893 | * |
894 | * Destroy and free the VRAM manager, returns -EBUSY if ranges are still |
895 | * allocated inside it. |
896 | */ |
897 | void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) |
898 | { |
899 | struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; |
900 | struct ttm_resource_manager *man = &mgr->manager; |
901 | int ret; |
902 | struct amdgpu_vram_reservation *rsv, *temp; |
903 | |
904 | ttm_resource_manager_set_used(man, used: false); |
905 | |
906 | ret = ttm_resource_manager_evict_all(bdev: &adev->mman.bdev, man); |
907 | if (ret) |
908 | return; |
909 | |
910 | mutex_lock(&mgr->lock); |
911 | list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) |
912 | kfree(objp: rsv); |
913 | |
914 | list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) { |
915 | drm_buddy_free_list(mm: &mgr->mm, objects: &rsv->allocated); |
916 | kfree(objp: rsv); |
917 | } |
918 | if (!adev->gmc.is_app_apu) |
919 | drm_buddy_fini(mm: &mgr->mm); |
920 | mutex_unlock(lock: &mgr->lock); |
921 | |
922 | ttm_resource_manager_cleanup(man); |
923 | ttm_set_driver_manager(bdev: &adev->mman.bdev, TTM_PL_VRAM, NULL); |
924 | } |
925 | |