1 | // SPDX-License-Identifier: MIT |
2 | /* |
3 | * Copyright 2014 Advanced Micro Devices, Inc. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: |
11 | * |
12 | * The above copyright notice and this permission notice shall be included in |
13 | * all copies or substantial portions of the Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | */ |
23 | |
24 | #include "amdgpu_amdkfd.h" |
25 | #include "amd_pcie.h" |
26 | #include "amd_shared.h" |
27 | |
28 | #include "amdgpu.h" |
29 | #include "amdgpu_gfx.h" |
30 | #include "amdgpu_dma_buf.h" |
31 | #include <drm/ttm/ttm_tt.h> |
32 | #include <linux/module.h> |
33 | #include <linux/dma-buf.h> |
34 | #include "amdgpu_xgmi.h" |
35 | #include <uapi/linux/kfd_ioctl.h> |
36 | #include "amdgpu_ras.h" |
37 | #include "amdgpu_umc.h" |
38 | #include "amdgpu_reset.h" |
39 | |
40 | /* Total memory size in system memory and all GPU VRAM. Used to |
41 | * estimate worst case amount of memory to reserve for page tables |
42 | */ |
43 | uint64_t amdgpu_amdkfd_total_mem_size; |
44 | |
45 | static bool kfd_initialized; |
46 | |
47 | int amdgpu_amdkfd_init(void) |
48 | { |
49 | struct sysinfo si; |
50 | int ret; |
51 | |
52 | si_meminfo(val: &si); |
53 | amdgpu_amdkfd_total_mem_size = si.freeram - si.freehigh; |
54 | amdgpu_amdkfd_total_mem_size *= si.mem_unit; |
55 | |
56 | ret = kgd2kfd_init(); |
57 | kfd_initialized = !ret; |
58 | |
59 | return ret; |
60 | } |
61 | |
62 | void amdgpu_amdkfd_fini(void) |
63 | { |
64 | if (kfd_initialized) { |
65 | kgd2kfd_exit(); |
66 | kfd_initialized = false; |
67 | } |
68 | } |
69 | |
70 | void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) |
71 | { |
72 | bool vf = amdgpu_sriov_vf(adev); |
73 | |
74 | if (!kfd_initialized) |
75 | return; |
76 | |
77 | adev->kfd.dev = kgd2kfd_probe(adev, vf); |
78 | } |
79 | |
80 | /** |
81 | * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to |
82 | * setup amdkfd |
83 | * |
84 | * @adev: amdgpu_device pointer |
85 | * @aperture_base: output returning doorbell aperture base physical address |
86 | * @aperture_size: output returning doorbell aperture size in bytes |
87 | * @start_offset: output returning # of doorbell bytes reserved for amdgpu. |
88 | * |
89 | * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up, |
90 | * takes doorbells required for its own rings and reports the setup to amdkfd. |
91 | * amdgpu reserved doorbells are at the start of the doorbell aperture. |
92 | */ |
93 | static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, |
94 | phys_addr_t *aperture_base, |
95 | size_t *aperture_size, |
96 | size_t *start_offset) |
97 | { |
98 | /* |
99 | * The first num_kernel_doorbells are used by amdgpu. |
100 | * amdkfd takes whatever's left in the aperture. |
101 | */ |
102 | if (adev->enable_mes) { |
103 | /* |
104 | * With MES enabled, we only need to initialize |
105 | * the base address. The size and offset are |
106 | * not initialized as AMDGPU manages the whole |
107 | * doorbell space. |
108 | */ |
109 | *aperture_base = adev->doorbell.base; |
110 | *aperture_size = 0; |
111 | *start_offset = 0; |
112 | } else if (adev->doorbell.size > adev->doorbell.num_kernel_doorbells * |
113 | sizeof(u32)) { |
114 | *aperture_base = adev->doorbell.base; |
115 | *aperture_size = adev->doorbell.size; |
116 | *start_offset = adev->doorbell.num_kernel_doorbells * sizeof(u32); |
117 | } else { |
118 | *aperture_base = 0; |
119 | *aperture_size = 0; |
120 | *start_offset = 0; |
121 | } |
122 | } |
123 | |
124 | |
125 | static void amdgpu_amdkfd_reset_work(struct work_struct *work) |
126 | { |
127 | struct amdgpu_device *adev = container_of(work, struct amdgpu_device, |
128 | kfd.reset_work); |
129 | |
130 | struct amdgpu_reset_context reset_context; |
131 | |
132 | memset(&reset_context, 0, sizeof(reset_context)); |
133 | |
134 | reset_context.method = AMD_RESET_METHOD_NONE; |
135 | reset_context.reset_req_dev = adev; |
136 | reset_context.src = adev->enable_mes ? |
137 | AMDGPU_RESET_SRC_MES : |
138 | AMDGPU_RESET_SRC_HWS; |
139 | clear_bit(nr: AMDGPU_NEED_FULL_RESET, addr: &reset_context.flags); |
140 | |
141 | amdgpu_device_gpu_recover(adev, NULL, reset_context: &reset_context); |
142 | } |
143 | |
144 | static const struct drm_client_funcs kfd_client_funcs = { |
145 | .unregister = drm_client_release, |
146 | }; |
147 | |
148 | int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev) |
149 | { |
150 | int ret; |
151 | |
152 | if (!adev->kfd.init_complete || adev->kfd.client.dev) |
153 | return 0; |
154 | |
155 | ret = drm_client_init(dev: &adev->ddev, client: &adev->kfd.client, name: "kfd" , |
156 | funcs: &kfd_client_funcs); |
157 | if (ret) { |
158 | dev_err(adev->dev, "Failed to init DRM client: %d\n" , |
159 | ret); |
160 | return ret; |
161 | } |
162 | |
163 | drm_client_register(client: &adev->kfd.client); |
164 | |
165 | return 0; |
166 | } |
167 | |
168 | void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) |
169 | { |
170 | int i; |
171 | int last_valid_bit; |
172 | |
173 | amdgpu_amdkfd_gpuvm_init_mem_limits(); |
174 | |
175 | if (adev->kfd.dev) { |
176 | struct kgd2kfd_shared_resources gpu_resources = { |
177 | .compute_vmid_bitmap = |
178 | ((1 << AMDGPU_NUM_VMID) - 1) - |
179 | ((1 << adev->vm_manager.first_kfd_vmid) - 1), |
180 | .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec, |
181 | .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe, |
182 | .gpuvm_size = min(adev->vm_manager.max_pfn |
183 | << AMDGPU_GPU_PAGE_SHIFT, |
184 | AMDGPU_GMC_HOLE_START), |
185 | .drm_render_minor = adev_to_drm(adev)->render->index, |
186 | .sdma_doorbell_idx = adev->doorbell_index.sdma_engine, |
187 | .enable_mes = adev->enable_mes, |
188 | }; |
189 | |
190 | /* this is going to have a few of the MSBs set that we need to |
191 | * clear |
192 | */ |
193 | bitmap_complement(dst: gpu_resources.cp_queue_bitmap, |
194 | src: adev->gfx.mec_bitmap[0].queue_bitmap, |
195 | AMDGPU_MAX_QUEUES); |
196 | |
197 | /* According to linux/bitmap.h we shouldn't use bitmap_clear if |
198 | * nbits is not compile time constant |
199 | */ |
200 | last_valid_bit = 1 /* only first MEC can have compute queues */ |
201 | * adev->gfx.mec.num_pipe_per_mec |
202 | * adev->gfx.mec.num_queue_per_pipe; |
203 | for (i = last_valid_bit; i < AMDGPU_MAX_QUEUES; ++i) |
204 | clear_bit(nr: i, addr: gpu_resources.cp_queue_bitmap); |
205 | |
206 | amdgpu_doorbell_get_kfd_info(adev, |
207 | aperture_base: &gpu_resources.doorbell_physical_address, |
208 | aperture_size: &gpu_resources.doorbell_aperture_size, |
209 | start_offset: &gpu_resources.doorbell_start_offset); |
210 | |
211 | /* Since SOC15, BIF starts to statically use the |
212 | * lower 12 bits of doorbell addresses for routing |
213 | * based on settings in registers like |
214 | * SDMA0_DOORBELL_RANGE etc.. |
215 | * In order to route a doorbell to CP engine, the lower |
216 | * 12 bits of its address has to be outside the range |
217 | * set for SDMA, VCN, and IH blocks. |
218 | */ |
219 | if (adev->asic_type >= CHIP_VEGA10) { |
220 | gpu_resources.non_cp_doorbells_start = |
221 | adev->doorbell_index.first_non_cp; |
222 | gpu_resources.non_cp_doorbells_end = |
223 | adev->doorbell_index.last_non_cp; |
224 | } |
225 | |
226 | adev->kfd.init_complete = kgd2kfd_device_init(kfd: adev->kfd.dev, |
227 | gpu_resources: &gpu_resources); |
228 | |
229 | amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size; |
230 | |
231 | INIT_WORK(&adev->kfd.reset_work, amdgpu_amdkfd_reset_work); |
232 | } |
233 | } |
234 | |
235 | void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev) |
236 | { |
237 | if (adev->kfd.dev) { |
238 | kgd2kfd_device_exit(kfd: adev->kfd.dev); |
239 | adev->kfd.dev = NULL; |
240 | amdgpu_amdkfd_total_mem_size -= adev->gmc.real_vram_size; |
241 | } |
242 | } |
243 | |
244 | void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, |
245 | const void *ih_ring_entry) |
246 | { |
247 | if (adev->kfd.dev) |
248 | kgd2kfd_interrupt(kfd: adev->kfd.dev, ih_ring_entry); |
249 | } |
250 | |
251 | void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm) |
252 | { |
253 | if (adev->kfd.dev) |
254 | kgd2kfd_suspend(kfd: adev->kfd.dev, run_pm); |
255 | } |
256 | |
257 | int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm) |
258 | { |
259 | int r = 0; |
260 | |
261 | if (adev->kfd.dev) |
262 | r = kgd2kfd_resume(kfd: adev->kfd.dev, run_pm); |
263 | |
264 | return r; |
265 | } |
266 | |
267 | int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev, |
268 | struct amdgpu_reset_context *reset_context) |
269 | { |
270 | int r = 0; |
271 | |
272 | if (adev->kfd.dev) |
273 | r = kgd2kfd_pre_reset(kfd: adev->kfd.dev, reset_context); |
274 | |
275 | return r; |
276 | } |
277 | |
278 | int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev) |
279 | { |
280 | int r = 0; |
281 | |
282 | if (adev->kfd.dev) |
283 | r = kgd2kfd_post_reset(kfd: adev->kfd.dev); |
284 | |
285 | return r; |
286 | } |
287 | |
288 | void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev) |
289 | { |
290 | if (amdgpu_device_should_recover_gpu(adev)) |
291 | amdgpu_reset_domain_schedule(domain: adev->reset_domain, |
292 | work: &adev->kfd.reset_work); |
293 | } |
294 | |
295 | int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size, |
296 | void **mem_obj, uint64_t *gpu_addr, |
297 | void **cpu_ptr, bool cp_mqd_gfx9) |
298 | { |
299 | struct amdgpu_bo *bo = NULL; |
300 | struct amdgpu_bo_param bp; |
301 | int r; |
302 | void *cpu_ptr_tmp = NULL; |
303 | |
304 | memset(&bp, 0, sizeof(bp)); |
305 | bp.size = size; |
306 | bp.byte_align = PAGE_SIZE; |
307 | bp.domain = AMDGPU_GEM_DOMAIN_GTT; |
308 | bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; |
309 | bp.type = ttm_bo_type_kernel; |
310 | bp.resv = NULL; |
311 | bp.bo_ptr_size = sizeof(struct amdgpu_bo); |
312 | |
313 | if (cp_mqd_gfx9) |
314 | bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9; |
315 | |
316 | r = amdgpu_bo_create(adev, bp: &bp, bo_ptr: &bo); |
317 | if (r) { |
318 | dev_err(adev->dev, |
319 | "failed to allocate BO for amdkfd (%d)\n" , r); |
320 | return r; |
321 | } |
322 | |
323 | /* map the buffer */ |
324 | r = amdgpu_bo_reserve(bo, no_intr: true); |
325 | if (r) { |
326 | dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n" , r); |
327 | goto allocate_mem_reserve_bo_failed; |
328 | } |
329 | |
330 | r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); |
331 | if (r) { |
332 | dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n" , r); |
333 | goto allocate_mem_pin_bo_failed; |
334 | } |
335 | |
336 | r = amdgpu_ttm_alloc_gart(bo: &bo->tbo); |
337 | if (r) { |
338 | dev_err(adev->dev, "%p bind failed\n" , bo); |
339 | goto allocate_mem_kmap_bo_failed; |
340 | } |
341 | |
342 | r = amdgpu_bo_kmap(bo, ptr: &cpu_ptr_tmp); |
343 | if (r) { |
344 | dev_err(adev->dev, |
345 | "(%d) failed to map bo to kernel for amdkfd\n" , r); |
346 | goto allocate_mem_kmap_bo_failed; |
347 | } |
348 | |
349 | *mem_obj = bo; |
350 | *gpu_addr = amdgpu_bo_gpu_offset(bo); |
351 | *cpu_ptr = cpu_ptr_tmp; |
352 | |
353 | amdgpu_bo_unreserve(bo); |
354 | |
355 | return 0; |
356 | |
357 | allocate_mem_kmap_bo_failed: |
358 | amdgpu_bo_unpin(bo); |
359 | allocate_mem_pin_bo_failed: |
360 | amdgpu_bo_unreserve(bo); |
361 | allocate_mem_reserve_bo_failed: |
362 | amdgpu_bo_unref(bo: &bo); |
363 | |
364 | return r; |
365 | } |
366 | |
367 | void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj) |
368 | { |
369 | struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj; |
370 | |
371 | if (!bo || !*bo) |
372 | return; |
373 | |
374 | (void)amdgpu_bo_reserve(bo: *bo, no_intr: true); |
375 | amdgpu_bo_kunmap(bo: *bo); |
376 | amdgpu_bo_unpin(bo: *bo); |
377 | amdgpu_bo_unreserve(bo: *bo); |
378 | amdgpu_bo_unref(bo); |
379 | } |
380 | |
381 | int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size, |
382 | void **mem_obj) |
383 | { |
384 | struct amdgpu_bo *bo = NULL; |
385 | struct amdgpu_bo_user *ubo; |
386 | struct amdgpu_bo_param bp; |
387 | int r; |
388 | |
389 | memset(&bp, 0, sizeof(bp)); |
390 | bp.size = size; |
391 | bp.byte_align = 1; |
392 | bp.domain = AMDGPU_GEM_DOMAIN_GWS; |
393 | bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS; |
394 | bp.type = ttm_bo_type_device; |
395 | bp.resv = NULL; |
396 | bp.bo_ptr_size = sizeof(struct amdgpu_bo); |
397 | |
398 | r = amdgpu_bo_create_user(adev, bp: &bp, ubo_ptr: &ubo); |
399 | if (r) { |
400 | dev_err(adev->dev, |
401 | "failed to allocate gws BO for amdkfd (%d)\n" , r); |
402 | return r; |
403 | } |
404 | |
405 | bo = &ubo->bo; |
406 | *mem_obj = bo; |
407 | return 0; |
408 | } |
409 | |
410 | void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj) |
411 | { |
412 | struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj; |
413 | |
414 | amdgpu_bo_unref(bo: &bo); |
415 | } |
416 | |
417 | uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, |
418 | enum kgd_engine_type type) |
419 | { |
420 | switch (type) { |
421 | case KGD_ENGINE_PFP: |
422 | return adev->gfx.pfp_fw_version; |
423 | |
424 | case KGD_ENGINE_ME: |
425 | return adev->gfx.me_fw_version; |
426 | |
427 | case KGD_ENGINE_CE: |
428 | return adev->gfx.ce_fw_version; |
429 | |
430 | case KGD_ENGINE_MEC1: |
431 | return adev->gfx.mec_fw_version; |
432 | |
433 | case KGD_ENGINE_MEC2: |
434 | return adev->gfx.mec2_fw_version; |
435 | |
436 | case KGD_ENGINE_RLC: |
437 | return adev->gfx.rlc_fw_version; |
438 | |
439 | case KGD_ENGINE_SDMA1: |
440 | return adev->sdma.instance[0].fw_version; |
441 | |
442 | case KGD_ENGINE_SDMA2: |
443 | return adev->sdma.instance[1].fw_version; |
444 | |
445 | default: |
446 | return 0; |
447 | } |
448 | |
449 | return 0; |
450 | } |
451 | |
452 | void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, |
453 | struct kfd_local_mem_info *mem_info, |
454 | struct amdgpu_xcp *xcp) |
455 | { |
456 | memset(mem_info, 0, sizeof(*mem_info)); |
457 | |
458 | if (xcp) { |
459 | if (adev->gmc.real_vram_size == adev->gmc.visible_vram_size) |
460 | mem_info->local_mem_size_public = |
461 | KFD_XCP_MEMORY_SIZE(adev, xcp->id); |
462 | else |
463 | mem_info->local_mem_size_private = |
464 | KFD_XCP_MEMORY_SIZE(adev, xcp->id); |
465 | } else if (adev->apu_prefer_gtt) { |
466 | mem_info->local_mem_size_public = (ttm_tt_pages_limit() << PAGE_SHIFT); |
467 | mem_info->local_mem_size_private = 0; |
468 | } else { |
469 | mem_info->local_mem_size_public = adev->gmc.visible_vram_size; |
470 | mem_info->local_mem_size_private = adev->gmc.real_vram_size - |
471 | adev->gmc.visible_vram_size; |
472 | } |
473 | mem_info->vram_width = adev->gmc.vram_width; |
474 | |
475 | pr_debug("Address base: %pap public 0x%llx private 0x%llx\n" , |
476 | &adev->gmc.aper_base, |
477 | mem_info->local_mem_size_public, |
478 | mem_info->local_mem_size_private); |
479 | |
480 | if (adev->pm.dpm_enabled) { |
481 | if (amdgpu_emu_mode == 1) |
482 | mem_info->mem_clk_max = 0; |
483 | else |
484 | mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, low: false) / 100; |
485 | } else |
486 | mem_info->mem_clk_max = 100; |
487 | } |
488 | |
489 | uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev) |
490 | { |
491 | if (adev->gfx.funcs->get_gpu_clock_counter) |
492 | return adev->gfx.funcs->get_gpu_clock_counter(adev); |
493 | return 0; |
494 | } |
495 | |
496 | uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev) |
497 | { |
498 | /* the sclk is in quantas of 10kHz */ |
499 | if (adev->pm.dpm_enabled) |
500 | return amdgpu_dpm_get_sclk(adev, low: false) / 100; |
501 | else |
502 | return 100; |
503 | } |
504 | |
505 | int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, |
506 | struct amdgpu_device **dmabuf_adev, |
507 | uint64_t *bo_size, void *metadata_buffer, |
508 | size_t buffer_size, uint32_t *metadata_size, |
509 | uint32_t *flags, int8_t *xcp_id) |
510 | { |
511 | struct dma_buf *dma_buf; |
512 | struct drm_gem_object *obj; |
513 | struct amdgpu_bo *bo; |
514 | uint64_t metadata_flags; |
515 | int r = -EINVAL; |
516 | |
517 | dma_buf = dma_buf_get(fd: dma_buf_fd); |
518 | if (IS_ERR(ptr: dma_buf)) |
519 | return PTR_ERR(ptr: dma_buf); |
520 | |
521 | if (dma_buf->ops != &amdgpu_dmabuf_ops) |
522 | /* Can't handle non-graphics buffers */ |
523 | goto out_put; |
524 | |
525 | obj = dma_buf->priv; |
526 | if (obj->dev->driver != adev_to_drm(adev)->driver) |
527 | /* Can't handle buffers from different drivers */ |
528 | goto out_put; |
529 | |
530 | adev = drm_to_adev(ddev: obj->dev); |
531 | bo = gem_to_amdgpu_bo(obj); |
532 | if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | |
533 | AMDGPU_GEM_DOMAIN_GTT))) |
534 | /* Only VRAM and GTT BOs are supported */ |
535 | goto out_put; |
536 | |
537 | r = 0; |
538 | if (dmabuf_adev) |
539 | *dmabuf_adev = adev; |
540 | if (bo_size) |
541 | *bo_size = amdgpu_bo_size(bo); |
542 | if (metadata_buffer) |
543 | r = amdgpu_bo_get_metadata(bo, buffer: metadata_buffer, buffer_size, |
544 | metadata_size, flags: &metadata_flags); |
545 | if (flags) { |
546 | *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? |
547 | KFD_IOC_ALLOC_MEM_FLAGS_VRAM |
548 | : KFD_IOC_ALLOC_MEM_FLAGS_GTT; |
549 | |
550 | if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) |
551 | *flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC; |
552 | } |
553 | if (xcp_id) |
554 | *xcp_id = bo->xcp_id; |
555 | |
556 | out_put: |
557 | dma_buf_put(dmabuf: dma_buf); |
558 | return r; |
559 | } |
560 | |
561 | int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min) |
562 | { |
563 | int num_lanes_shift = (is_min ? ffs(adev->pm.pcie_mlw_mask) : |
564 | fls(x: adev->pm.pcie_mlw_mask)) - 1; |
565 | int gen_speed_shift = (is_min ? ffs(adev->pm.pcie_gen_mask & |
566 | CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) : |
567 | fls(x: adev->pm.pcie_gen_mask & |
568 | CAIL_PCIE_LINK_SPEED_SUPPORT_MASK)) - 1; |
569 | uint32_t num_lanes_mask = 1 << num_lanes_shift; |
570 | uint32_t gen_speed_mask = 1 << gen_speed_shift; |
571 | int num_lanes_factor = 0, gen_speed_mbits_factor = 0; |
572 | |
573 | switch (num_lanes_mask) { |
574 | case CAIL_PCIE_LINK_WIDTH_SUPPORT_X1: |
575 | num_lanes_factor = 1; |
576 | break; |
577 | case CAIL_PCIE_LINK_WIDTH_SUPPORT_X2: |
578 | num_lanes_factor = 2; |
579 | break; |
580 | case CAIL_PCIE_LINK_WIDTH_SUPPORT_X4: |
581 | num_lanes_factor = 4; |
582 | break; |
583 | case CAIL_PCIE_LINK_WIDTH_SUPPORT_X8: |
584 | num_lanes_factor = 8; |
585 | break; |
586 | case CAIL_PCIE_LINK_WIDTH_SUPPORT_X12: |
587 | num_lanes_factor = 12; |
588 | break; |
589 | case CAIL_PCIE_LINK_WIDTH_SUPPORT_X16: |
590 | num_lanes_factor = 16; |
591 | break; |
592 | case CAIL_PCIE_LINK_WIDTH_SUPPORT_X32: |
593 | num_lanes_factor = 32; |
594 | break; |
595 | } |
596 | |
597 | switch (gen_speed_mask) { |
598 | case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1: |
599 | gen_speed_mbits_factor = 2500; |
600 | break; |
601 | case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2: |
602 | gen_speed_mbits_factor = 5000; |
603 | break; |
604 | case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3: |
605 | gen_speed_mbits_factor = 8000; |
606 | break; |
607 | case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4: |
608 | gen_speed_mbits_factor = 16000; |
609 | break; |
610 | case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5: |
611 | gen_speed_mbits_factor = 32000; |
612 | break; |
613 | } |
614 | |
615 | return (num_lanes_factor * gen_speed_mbits_factor)/BITS_PER_BYTE; |
616 | } |
617 | |
618 | int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, |
619 | enum kgd_engine_type engine, |
620 | uint32_t vmid, uint64_t gpu_addr, |
621 | uint32_t *ib_cmd, uint32_t ib_len) |
622 | { |
623 | struct amdgpu_job *job; |
624 | struct amdgpu_ib *ib; |
625 | struct amdgpu_ring *ring; |
626 | struct dma_fence *f = NULL; |
627 | int ret; |
628 | |
629 | switch (engine) { |
630 | case KGD_ENGINE_MEC1: |
631 | ring = &adev->gfx.compute_ring[0]; |
632 | break; |
633 | case KGD_ENGINE_SDMA1: |
634 | ring = &adev->sdma.instance[0].ring; |
635 | break; |
636 | case KGD_ENGINE_SDMA2: |
637 | ring = &adev->sdma.instance[1].ring; |
638 | break; |
639 | default: |
640 | pr_err("Invalid engine in IB submission: %d\n" , engine); |
641 | ret = -EINVAL; |
642 | goto err; |
643 | } |
644 | |
645 | ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, num_ibs: 1, job: &job); |
646 | if (ret) |
647 | goto err; |
648 | |
649 | ib = &job->ibs[0]; |
650 | memset(ib, 0, sizeof(struct amdgpu_ib)); |
651 | |
652 | ib->gpu_addr = gpu_addr; |
653 | ib->ptr = ib_cmd; |
654 | ib->length_dw = ib_len; |
655 | /* This works for NO_HWS. TODO: need to handle without knowing VMID */ |
656 | job->vmid = vmid; |
657 | job->num_ibs = 1; |
658 | |
659 | ret = amdgpu_ib_schedule(ring, num_ibs: 1, ibs: ib, job, f: &f); |
660 | |
661 | if (ret) { |
662 | DRM_ERROR("amdgpu: failed to schedule IB.\n" ); |
663 | goto err_ib_sched; |
664 | } |
665 | |
666 | /* Drop the initial kref_init count (see drm_sched_main as example) */ |
667 | dma_fence_put(fence: f); |
668 | ret = dma_fence_wait(fence: f, intr: false); |
669 | |
670 | err_ib_sched: |
671 | amdgpu_job_free(job); |
672 | err: |
673 | return ret; |
674 | } |
675 | |
676 | void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle) |
677 | { |
678 | enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE; |
679 | if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 && |
680 | ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) || |
681 | (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 12)) { |
682 | pr_debug("GFXOFF is %s\n" , idle ? "enabled" : "disabled" ); |
683 | amdgpu_gfx_off_ctrl(adev, enable: idle); |
684 | } else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) && |
685 | (adev->flags & AMD_IS_APU)) { |
686 | /* Disable GFXOFF and PG. Temporary workaround |
687 | * to fix some compute applications issue on GFX9. |
688 | */ |
689 | struct amdgpu_ip_block *gfx_block = amdgpu_device_ip_get_ip_block(adev, type: AMD_IP_BLOCK_TYPE_GFX); |
690 | if (gfx_block != NULL) |
691 | gfx_block->version->funcs->set_powergating_state((void *)gfx_block, state); |
692 | } |
693 | amdgpu_dpm_switch_power_profile(adev, |
694 | type: PP_SMC_POWER_PROFILE_COMPUTE, |
695 | en: !idle); |
696 | } |
697 | |
698 | bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) |
699 | { |
700 | if (adev->kfd.dev) |
701 | return vmid >= adev->vm_manager.first_kfd_vmid; |
702 | |
703 | return false; |
704 | } |
705 | |
706 | bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev) |
707 | { |
708 | return adev->have_atomics_support; |
709 | } |
710 | |
711 | void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev) |
712 | { |
713 | amdgpu_device_flush_hdp(adev, NULL); |
714 | } |
715 | |
716 | bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev) |
717 | { |
718 | return amdgpu_ras_get_fed_status(adev); |
719 | } |
720 | |
721 | void amdgpu_amdkfd_ras_pasid_poison_consumption_handler(struct amdgpu_device *adev, |
722 | enum amdgpu_ras_block block, uint16_t pasid, |
723 | pasid_notify pasid_fn, void *data, uint32_t reset) |
724 | { |
725 | amdgpu_umc_pasid_poison_handler(adev, block, pasid, pasid_fn, data, reset); |
726 | } |
727 | |
728 | void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, |
729 | enum amdgpu_ras_block block, uint32_t reset) |
730 | { |
731 | amdgpu_umc_pasid_poison_handler(adev, block, pasid: 0, NULL, NULL, reset); |
732 | } |
733 | |
734 | int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, |
735 | uint32_t *payload) |
736 | { |
737 | int ret; |
738 | |
739 | /* Device or IH ring is not ready so bail. */ |
740 | ret = amdgpu_ih_wait_on_checkpoint_process_ts(adev, ih: &adev->irq.ih); |
741 | if (ret) |
742 | return ret; |
743 | |
744 | /* Send payload to fence KFD interrupts */ |
745 | amdgpu_amdkfd_interrupt(adev, ih_ring_entry: payload); |
746 | |
747 | return 0; |
748 | } |
749 | |
750 | int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev) |
751 | { |
752 | return kgd2kfd_check_and_lock_kfd(); |
753 | } |
754 | |
755 | void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev) |
756 | { |
757 | kgd2kfd_unlock_kfd(); |
758 | } |
759 | |
760 | |
761 | u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id) |
762 | { |
763 | s8 mem_id = KFD_XCP_MEM_ID(adev, xcp_id); |
764 | u64 tmp; |
765 | |
766 | if (adev->gmc.num_mem_partitions && xcp_id >= 0 && mem_id >= 0) { |
767 | if (adev->gmc.is_app_apu && adev->gmc.num_mem_partitions == 1) { |
768 | /* In NPS1 mode, we should restrict the vram reporting |
769 | * tied to the ttm_pages_limit which is 1/2 of the system |
770 | * memory. For other partition modes, the HBM is uniformly |
771 | * divided already per numa node reported. If user wants to |
772 | * go beyond the default ttm limit and maximize the ROCm |
773 | * allocations, they can go up to max ttm and sysmem limits. |
774 | */ |
775 | |
776 | tmp = (ttm_tt_pages_limit() << PAGE_SHIFT) / num_online_nodes(); |
777 | } else { |
778 | tmp = adev->gmc.mem_partitions[mem_id].size; |
779 | } |
780 | do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition); |
781 | return ALIGN_DOWN(tmp, PAGE_SIZE); |
782 | } else if (adev->apu_prefer_gtt) { |
783 | return (ttm_tt_pages_limit() << PAGE_SHIFT); |
784 | } else { |
785 | return adev->gmc.real_vram_size; |
786 | } |
787 | } |
788 | |
789 | int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off, |
790 | u32 inst) |
791 | { |
792 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst]; |
793 | struct amdgpu_ring *kiq_ring = &kiq->ring; |
794 | struct amdgpu_ring_funcs *ring_funcs; |
795 | struct amdgpu_ring *ring; |
796 | int r = 0; |
797 | |
798 | if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) |
799 | return -EINVAL; |
800 | |
801 | if (!kiq_ring->sched.ready || amdgpu_in_reset(adev)) |
802 | return 0; |
803 | |
804 | ring_funcs = kzalloc(sizeof(*ring_funcs), GFP_KERNEL); |
805 | if (!ring_funcs) |
806 | return -ENOMEM; |
807 | |
808 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); |
809 | if (!ring) { |
810 | r = -ENOMEM; |
811 | goto free_ring_funcs; |
812 | } |
813 | |
814 | ring_funcs->type = AMDGPU_RING_TYPE_COMPUTE; |
815 | ring->doorbell_index = doorbell_off; |
816 | ring->funcs = ring_funcs; |
817 | |
818 | spin_lock(lock: &kiq->ring_lock); |
819 | |
820 | if (amdgpu_ring_alloc(ring: kiq_ring, ndw: kiq->pmf->unmap_queues_size)) { |
821 | spin_unlock(lock: &kiq->ring_lock); |
822 | r = -ENOMEM; |
823 | goto free_ring; |
824 | } |
825 | |
826 | kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 0, 0); |
827 | |
828 | /* Submit unmap queue packet */ |
829 | amdgpu_ring_commit(ring: kiq_ring); |
830 | /* |
831 | * Ring test will do a basic scratch register change check. Just run |
832 | * this to ensure that unmap queues that is submitted before got |
833 | * processed successfully before returning. |
834 | */ |
835 | r = amdgpu_ring_test_helper(ring: kiq_ring); |
836 | |
837 | spin_unlock(lock: &kiq->ring_lock); |
838 | |
839 | free_ring: |
840 | kfree(objp: ring); |
841 | |
842 | free_ring_funcs: |
843 | kfree(objp: ring_funcs); |
844 | |
845 | return r; |
846 | } |
847 | |
848 | /* Stop scheduling on KFD */ |
849 | int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id) |
850 | { |
851 | if (!adev->kfd.init_complete) |
852 | return 0; |
853 | |
854 | return kgd2kfd_stop_sched(kfd: adev->kfd.dev, node_id); |
855 | } |
856 | |
857 | /* Start scheduling on KFD */ |
858 | int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id) |
859 | { |
860 | if (!adev->kfd.init_complete) |
861 | return 0; |
862 | |
863 | return kgd2kfd_start_sched(kfd: adev->kfd.dev, node_id); |
864 | } |
865 | |
866 | /* check if there are KFD queues active */ |
867 | bool amdgpu_amdkfd_compute_active(struct amdgpu_device *adev, uint32_t node_id) |
868 | { |
869 | if (!adev->kfd.init_complete) |
870 | return false; |
871 | |
872 | return kgd2kfd_compute_active(kfd: adev->kfd.dev, node_id); |
873 | } |
874 | |
875 | /* Config CGTT_SQ_CLK_CTRL */ |
876 | int amdgpu_amdkfd_config_sq_perfmon(struct amdgpu_device *adev, uint32_t xcp_id, |
877 | bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable) |
878 | { |
879 | int r; |
880 | |
881 | if (!adev->kfd.init_complete) |
882 | return 0; |
883 | |
884 | r = psp_config_sq_perfmon(psp: &adev->psp, xcp_id, core_override_enable, |
885 | reg_override_enable, perfmon_override_enable); |
886 | |
887 | return r; |
888 | } |
889 | |