1 | // SPDX-License-Identifier: MIT |
2 | /* |
3 | * Copyright 2014 Advanced Micro Devices, Inc. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: |
11 | * |
12 | * The above copyright notice and this permission notice shall be included in |
13 | * all copies or substantial portions of the Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | */ |
23 | |
24 | #include "amdgpu_amdkfd.h" |
25 | #include "amd_pcie.h" |
26 | #include "amd_shared.h" |
27 | |
28 | #include "amdgpu.h" |
29 | #include "amdgpu_gfx.h" |
30 | #include "amdgpu_dma_buf.h" |
31 | #include <drm/ttm/ttm_tt.h> |
32 | #include <linux/module.h> |
33 | #include <linux/dma-buf.h> |
34 | #include "amdgpu_xgmi.h" |
35 | #include <uapi/linux/kfd_ioctl.h> |
36 | #include "amdgpu_ras.h" |
37 | #include "amdgpu_umc.h" |
38 | #include "amdgpu_reset.h" |
39 | |
40 | /* Total memory size in system memory and all GPU VRAM. Used to |
41 | * estimate worst case amount of memory to reserve for page tables |
42 | */ |
43 | uint64_t amdgpu_amdkfd_total_mem_size; |
44 | |
45 | static bool kfd_initialized; |
46 | |
47 | int amdgpu_amdkfd_init(void) |
48 | { |
49 | struct sysinfo si; |
50 | int ret; |
51 | |
52 | si_meminfo(val: &si); |
53 | amdgpu_amdkfd_total_mem_size = si.freeram - si.freehigh; |
54 | amdgpu_amdkfd_total_mem_size *= si.mem_unit; |
55 | |
56 | ret = kgd2kfd_init(); |
57 | kfd_initialized = !ret; |
58 | |
59 | return ret; |
60 | } |
61 | |
62 | void amdgpu_amdkfd_fini(void) |
63 | { |
64 | if (kfd_initialized) { |
65 | kgd2kfd_exit(); |
66 | kfd_initialized = false; |
67 | } |
68 | } |
69 | |
70 | void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) |
71 | { |
72 | bool vf = amdgpu_sriov_vf(adev); |
73 | |
74 | if (!kfd_initialized) |
75 | return; |
76 | |
77 | adev->kfd.dev = kgd2kfd_probe(adev, vf); |
78 | } |
79 | |
80 | /** |
81 | * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to |
82 | * setup amdkfd |
83 | * |
84 | * @adev: amdgpu_device pointer |
85 | * @aperture_base: output returning doorbell aperture base physical address |
86 | * @aperture_size: output returning doorbell aperture size in bytes |
87 | * @start_offset: output returning # of doorbell bytes reserved for amdgpu. |
88 | * |
89 | * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up, |
90 | * takes doorbells required for its own rings and reports the setup to amdkfd. |
91 | * amdgpu reserved doorbells are at the start of the doorbell aperture. |
92 | */ |
93 | static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, |
94 | phys_addr_t *aperture_base, |
95 | size_t *aperture_size, |
96 | size_t *start_offset) |
97 | { |
98 | /* |
99 | * The first num_kernel_doorbells are used by amdgpu. |
100 | * amdkfd takes whatever's left in the aperture. |
101 | */ |
102 | if (adev->enable_mes) { |
103 | /* |
104 | * With MES enabled, we only need to initialize |
105 | * the base address. The size and offset are |
106 | * not initialized as AMDGPU manages the whole |
107 | * doorbell space. |
108 | */ |
109 | *aperture_base = adev->doorbell.base; |
110 | *aperture_size = 0; |
111 | *start_offset = 0; |
112 | } else if (adev->doorbell.size > adev->doorbell.num_kernel_doorbells * |
113 | sizeof(u32)) { |
114 | *aperture_base = adev->doorbell.base; |
115 | *aperture_size = adev->doorbell.size; |
116 | *start_offset = adev->doorbell.num_kernel_doorbells * sizeof(u32); |
117 | } else { |
118 | *aperture_base = 0; |
119 | *aperture_size = 0; |
120 | *start_offset = 0; |
121 | } |
122 | } |
123 | |
124 | |
125 | static void amdgpu_amdkfd_reset_work(struct work_struct *work) |
126 | { |
127 | struct amdgpu_device *adev = container_of(work, struct amdgpu_device, |
128 | kfd.reset_work); |
129 | |
130 | struct amdgpu_reset_context reset_context; |
131 | |
132 | memset(&reset_context, 0, sizeof(reset_context)); |
133 | |
134 | reset_context.method = AMD_RESET_METHOD_NONE; |
135 | reset_context.reset_req_dev = adev; |
136 | clear_bit(nr: AMDGPU_NEED_FULL_RESET, addr: &reset_context.flags); |
137 | |
138 | amdgpu_device_gpu_recover(adev, NULL, reset_context: &reset_context); |
139 | } |
140 | |
141 | static const struct drm_client_funcs kfd_client_funcs = { |
142 | .unregister = drm_client_release, |
143 | }; |
144 | |
145 | int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev) |
146 | { |
147 | int ret; |
148 | |
149 | if (!adev->kfd.init_complete || adev->kfd.client.dev) |
150 | return 0; |
151 | |
152 | ret = drm_client_init(dev: &adev->ddev, client: &adev->kfd.client, name: "kfd" , |
153 | funcs: &kfd_client_funcs); |
154 | if (ret) { |
155 | dev_err(adev->dev, "Failed to init DRM client: %d\n" , |
156 | ret); |
157 | return ret; |
158 | } |
159 | |
160 | drm_client_register(client: &adev->kfd.client); |
161 | |
162 | return 0; |
163 | } |
164 | |
165 | void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) |
166 | { |
167 | int i; |
168 | int last_valid_bit; |
169 | |
170 | amdgpu_amdkfd_gpuvm_init_mem_limits(); |
171 | |
172 | if (adev->kfd.dev) { |
173 | struct kgd2kfd_shared_resources gpu_resources = { |
174 | .compute_vmid_bitmap = |
175 | ((1 << AMDGPU_NUM_VMID) - 1) - |
176 | ((1 << adev->vm_manager.first_kfd_vmid) - 1), |
177 | .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec, |
178 | .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe, |
179 | .gpuvm_size = min(adev->vm_manager.max_pfn |
180 | << AMDGPU_GPU_PAGE_SHIFT, |
181 | AMDGPU_GMC_HOLE_START), |
182 | .drm_render_minor = adev_to_drm(adev)->render->index, |
183 | .sdma_doorbell_idx = adev->doorbell_index.sdma_engine, |
184 | .enable_mes = adev->enable_mes, |
185 | }; |
186 | |
187 | /* this is going to have a few of the MSBs set that we need to |
188 | * clear |
189 | */ |
190 | bitmap_complement(dst: gpu_resources.cp_queue_bitmap, |
191 | src: adev->gfx.mec_bitmap[0].queue_bitmap, |
192 | AMDGPU_MAX_QUEUES); |
193 | |
194 | /* According to linux/bitmap.h we shouldn't use bitmap_clear if |
195 | * nbits is not compile time constant |
196 | */ |
197 | last_valid_bit = 1 /* only first MEC can have compute queues */ |
198 | * adev->gfx.mec.num_pipe_per_mec |
199 | * adev->gfx.mec.num_queue_per_pipe; |
200 | for (i = last_valid_bit; i < AMDGPU_MAX_QUEUES; ++i) |
201 | clear_bit(nr: i, addr: gpu_resources.cp_queue_bitmap); |
202 | |
203 | amdgpu_doorbell_get_kfd_info(adev, |
204 | aperture_base: &gpu_resources.doorbell_physical_address, |
205 | aperture_size: &gpu_resources.doorbell_aperture_size, |
206 | start_offset: &gpu_resources.doorbell_start_offset); |
207 | |
208 | /* Since SOC15, BIF starts to statically use the |
209 | * lower 12 bits of doorbell addresses for routing |
210 | * based on settings in registers like |
211 | * SDMA0_DOORBELL_RANGE etc.. |
212 | * In order to route a doorbell to CP engine, the lower |
213 | * 12 bits of its address has to be outside the range |
214 | * set for SDMA, VCN, and IH blocks. |
215 | */ |
216 | if (adev->asic_type >= CHIP_VEGA10) { |
217 | gpu_resources.non_cp_doorbells_start = |
218 | adev->doorbell_index.first_non_cp; |
219 | gpu_resources.non_cp_doorbells_end = |
220 | adev->doorbell_index.last_non_cp; |
221 | } |
222 | |
223 | adev->kfd.init_complete = kgd2kfd_device_init(kfd: adev->kfd.dev, |
224 | gpu_resources: &gpu_resources); |
225 | |
226 | amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size; |
227 | |
228 | INIT_WORK(&adev->kfd.reset_work, amdgpu_amdkfd_reset_work); |
229 | } |
230 | } |
231 | |
232 | void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev) |
233 | { |
234 | if (adev->kfd.dev) { |
235 | kgd2kfd_device_exit(kfd: adev->kfd.dev); |
236 | adev->kfd.dev = NULL; |
237 | amdgpu_amdkfd_total_mem_size -= adev->gmc.real_vram_size; |
238 | } |
239 | } |
240 | |
241 | void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, |
242 | const void *ih_ring_entry) |
243 | { |
244 | if (adev->kfd.dev) |
245 | kgd2kfd_interrupt(kfd: adev->kfd.dev, ih_ring_entry); |
246 | } |
247 | |
248 | void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm) |
249 | { |
250 | if (adev->kfd.dev) |
251 | kgd2kfd_suspend(kfd: adev->kfd.dev, run_pm); |
252 | } |
253 | |
254 | int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm) |
255 | { |
256 | int r = 0; |
257 | |
258 | if (adev->kfd.dev) |
259 | r = kgd2kfd_resume(kfd: adev->kfd.dev, run_pm); |
260 | |
261 | return r; |
262 | } |
263 | |
264 | int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev) |
265 | { |
266 | int r = 0; |
267 | |
268 | if (adev->kfd.dev) |
269 | r = kgd2kfd_pre_reset(kfd: adev->kfd.dev); |
270 | |
271 | return r; |
272 | } |
273 | |
274 | int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev) |
275 | { |
276 | int r = 0; |
277 | |
278 | if (adev->kfd.dev) |
279 | r = kgd2kfd_post_reset(kfd: adev->kfd.dev); |
280 | |
281 | return r; |
282 | } |
283 | |
284 | void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev) |
285 | { |
286 | if (amdgpu_device_should_recover_gpu(adev)) |
287 | amdgpu_reset_domain_schedule(domain: adev->reset_domain, |
288 | work: &adev->kfd.reset_work); |
289 | } |
290 | |
291 | int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size, |
292 | void **mem_obj, uint64_t *gpu_addr, |
293 | void **cpu_ptr, bool cp_mqd_gfx9) |
294 | { |
295 | struct amdgpu_bo *bo = NULL; |
296 | struct amdgpu_bo_param bp; |
297 | int r; |
298 | void *cpu_ptr_tmp = NULL; |
299 | |
300 | memset(&bp, 0, sizeof(bp)); |
301 | bp.size = size; |
302 | bp.byte_align = PAGE_SIZE; |
303 | bp.domain = AMDGPU_GEM_DOMAIN_GTT; |
304 | bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; |
305 | bp.type = ttm_bo_type_kernel; |
306 | bp.resv = NULL; |
307 | bp.bo_ptr_size = sizeof(struct amdgpu_bo); |
308 | |
309 | if (cp_mqd_gfx9) |
310 | bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9; |
311 | |
312 | r = amdgpu_bo_create(adev, bp: &bp, bo_ptr: &bo); |
313 | if (r) { |
314 | dev_err(adev->dev, |
315 | "failed to allocate BO for amdkfd (%d)\n" , r); |
316 | return r; |
317 | } |
318 | |
319 | /* map the buffer */ |
320 | r = amdgpu_bo_reserve(bo, no_intr: true); |
321 | if (r) { |
322 | dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n" , r); |
323 | goto allocate_mem_reserve_bo_failed; |
324 | } |
325 | |
326 | r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); |
327 | if (r) { |
328 | dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n" , r); |
329 | goto allocate_mem_pin_bo_failed; |
330 | } |
331 | |
332 | r = amdgpu_ttm_alloc_gart(bo: &bo->tbo); |
333 | if (r) { |
334 | dev_err(adev->dev, "%p bind failed\n" , bo); |
335 | goto allocate_mem_kmap_bo_failed; |
336 | } |
337 | |
338 | r = amdgpu_bo_kmap(bo, ptr: &cpu_ptr_tmp); |
339 | if (r) { |
340 | dev_err(adev->dev, |
341 | "(%d) failed to map bo to kernel for amdkfd\n" , r); |
342 | goto allocate_mem_kmap_bo_failed; |
343 | } |
344 | |
345 | *mem_obj = bo; |
346 | *gpu_addr = amdgpu_bo_gpu_offset(bo); |
347 | *cpu_ptr = cpu_ptr_tmp; |
348 | |
349 | amdgpu_bo_unreserve(bo); |
350 | |
351 | return 0; |
352 | |
353 | allocate_mem_kmap_bo_failed: |
354 | amdgpu_bo_unpin(bo); |
355 | allocate_mem_pin_bo_failed: |
356 | amdgpu_bo_unreserve(bo); |
357 | allocate_mem_reserve_bo_failed: |
358 | amdgpu_bo_unref(bo: &bo); |
359 | |
360 | return r; |
361 | } |
362 | |
363 | void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj) |
364 | { |
365 | struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj; |
366 | |
367 | amdgpu_bo_reserve(bo, no_intr: true); |
368 | amdgpu_bo_kunmap(bo); |
369 | amdgpu_bo_unpin(bo); |
370 | amdgpu_bo_unreserve(bo); |
371 | amdgpu_bo_unref(bo: &(bo)); |
372 | } |
373 | |
374 | int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size, |
375 | void **mem_obj) |
376 | { |
377 | struct amdgpu_bo *bo = NULL; |
378 | struct amdgpu_bo_user *ubo; |
379 | struct amdgpu_bo_param bp; |
380 | int r; |
381 | |
382 | memset(&bp, 0, sizeof(bp)); |
383 | bp.size = size; |
384 | bp.byte_align = 1; |
385 | bp.domain = AMDGPU_GEM_DOMAIN_GWS; |
386 | bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS; |
387 | bp.type = ttm_bo_type_device; |
388 | bp.resv = NULL; |
389 | bp.bo_ptr_size = sizeof(struct amdgpu_bo); |
390 | |
391 | r = amdgpu_bo_create_user(adev, bp: &bp, ubo_ptr: &ubo); |
392 | if (r) { |
393 | dev_err(adev->dev, |
394 | "failed to allocate gws BO for amdkfd (%d)\n" , r); |
395 | return r; |
396 | } |
397 | |
398 | bo = &ubo->bo; |
399 | *mem_obj = bo; |
400 | return 0; |
401 | } |
402 | |
403 | void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj) |
404 | { |
405 | struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj; |
406 | |
407 | amdgpu_bo_unref(bo: &bo); |
408 | } |
409 | |
410 | uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, |
411 | enum kgd_engine_type type) |
412 | { |
413 | switch (type) { |
414 | case KGD_ENGINE_PFP: |
415 | return adev->gfx.pfp_fw_version; |
416 | |
417 | case KGD_ENGINE_ME: |
418 | return adev->gfx.me_fw_version; |
419 | |
420 | case KGD_ENGINE_CE: |
421 | return adev->gfx.ce_fw_version; |
422 | |
423 | case KGD_ENGINE_MEC1: |
424 | return adev->gfx.mec_fw_version; |
425 | |
426 | case KGD_ENGINE_MEC2: |
427 | return adev->gfx.mec2_fw_version; |
428 | |
429 | case KGD_ENGINE_RLC: |
430 | return adev->gfx.rlc_fw_version; |
431 | |
432 | case KGD_ENGINE_SDMA1: |
433 | return adev->sdma.instance[0].fw_version; |
434 | |
435 | case KGD_ENGINE_SDMA2: |
436 | return adev->sdma.instance[1].fw_version; |
437 | |
438 | default: |
439 | return 0; |
440 | } |
441 | |
442 | return 0; |
443 | } |
444 | |
445 | void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, |
446 | struct kfd_local_mem_info *mem_info, |
447 | struct amdgpu_xcp *xcp) |
448 | { |
449 | memset(mem_info, 0, sizeof(*mem_info)); |
450 | |
451 | if (xcp) { |
452 | if (adev->gmc.real_vram_size == adev->gmc.visible_vram_size) |
453 | mem_info->local_mem_size_public = |
454 | KFD_XCP_MEMORY_SIZE(adev, xcp->id); |
455 | else |
456 | mem_info->local_mem_size_private = |
457 | KFD_XCP_MEMORY_SIZE(adev, xcp->id); |
458 | } else { |
459 | mem_info->local_mem_size_public = adev->gmc.visible_vram_size; |
460 | mem_info->local_mem_size_private = adev->gmc.real_vram_size - |
461 | adev->gmc.visible_vram_size; |
462 | } |
463 | mem_info->vram_width = adev->gmc.vram_width; |
464 | |
465 | pr_debug("Address base: %pap public 0x%llx private 0x%llx\n" , |
466 | &adev->gmc.aper_base, |
467 | mem_info->local_mem_size_public, |
468 | mem_info->local_mem_size_private); |
469 | |
470 | if (adev->pm.dpm_enabled) { |
471 | if (amdgpu_emu_mode == 1) |
472 | mem_info->mem_clk_max = 0; |
473 | else |
474 | mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, low: false) / 100; |
475 | } else |
476 | mem_info->mem_clk_max = 100; |
477 | } |
478 | |
479 | uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev) |
480 | { |
481 | if (adev->gfx.funcs->get_gpu_clock_counter) |
482 | return adev->gfx.funcs->get_gpu_clock_counter(adev); |
483 | return 0; |
484 | } |
485 | |
486 | uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev) |
487 | { |
488 | /* the sclk is in quantas of 10kHz */ |
489 | if (adev->pm.dpm_enabled) |
490 | return amdgpu_dpm_get_sclk(adev, low: false) / 100; |
491 | else |
492 | return 100; |
493 | } |
494 | |
495 | int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, |
496 | struct amdgpu_device **dmabuf_adev, |
497 | uint64_t *bo_size, void *metadata_buffer, |
498 | size_t buffer_size, uint32_t *metadata_size, |
499 | uint32_t *flags, int8_t *xcp_id) |
500 | { |
501 | struct dma_buf *dma_buf; |
502 | struct drm_gem_object *obj; |
503 | struct amdgpu_bo *bo; |
504 | uint64_t metadata_flags; |
505 | int r = -EINVAL; |
506 | |
507 | dma_buf = dma_buf_get(fd: dma_buf_fd); |
508 | if (IS_ERR(ptr: dma_buf)) |
509 | return PTR_ERR(ptr: dma_buf); |
510 | |
511 | if (dma_buf->ops != &amdgpu_dmabuf_ops) |
512 | /* Can't handle non-graphics buffers */ |
513 | goto out_put; |
514 | |
515 | obj = dma_buf->priv; |
516 | if (obj->dev->driver != adev_to_drm(adev)->driver) |
517 | /* Can't handle buffers from different drivers */ |
518 | goto out_put; |
519 | |
520 | adev = drm_to_adev(ddev: obj->dev); |
521 | bo = gem_to_amdgpu_bo(obj); |
522 | if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | |
523 | AMDGPU_GEM_DOMAIN_GTT))) |
524 | /* Only VRAM and GTT BOs are supported */ |
525 | goto out_put; |
526 | |
527 | r = 0; |
528 | if (dmabuf_adev) |
529 | *dmabuf_adev = adev; |
530 | if (bo_size) |
531 | *bo_size = amdgpu_bo_size(bo); |
532 | if (metadata_buffer) |
533 | r = amdgpu_bo_get_metadata(bo, buffer: metadata_buffer, buffer_size, |
534 | metadata_size, flags: &metadata_flags); |
535 | if (flags) { |
536 | *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? |
537 | KFD_IOC_ALLOC_MEM_FLAGS_VRAM |
538 | : KFD_IOC_ALLOC_MEM_FLAGS_GTT; |
539 | |
540 | if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) |
541 | *flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC; |
542 | } |
543 | if (xcp_id) |
544 | *xcp_id = bo->xcp_id; |
545 | |
546 | out_put: |
547 | dma_buf_put(dmabuf: dma_buf); |
548 | return r; |
549 | } |
550 | |
551 | uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst, |
552 | struct amdgpu_device *src) |
553 | { |
554 | struct amdgpu_device *peer_adev = src; |
555 | struct amdgpu_device *adev = dst; |
556 | int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev); |
557 | |
558 | if (ret < 0) { |
559 | DRM_ERROR("amdgpu: failed to get xgmi hops count between node %d and %d. ret = %d\n" , |
560 | adev->gmc.xgmi.physical_node_id, |
561 | peer_adev->gmc.xgmi.physical_node_id, ret); |
562 | ret = 0; |
563 | } |
564 | return (uint8_t)ret; |
565 | } |
566 | |
567 | int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, |
568 | struct amdgpu_device *src, |
569 | bool is_min) |
570 | { |
571 | struct amdgpu_device *adev = dst, *peer_adev; |
572 | int num_links; |
573 | |
574 | if (amdgpu_ip_version(adev, ip: GC_HWIP, inst: 0) < IP_VERSION(9, 4, 2)) |
575 | return 0; |
576 | |
577 | if (src) |
578 | peer_adev = src; |
579 | |
580 | /* num links returns 0 for indirect peers since indirect route is unknown. */ |
581 | num_links = is_min ? 1 : amdgpu_xgmi_get_num_links(adev, peer_adev); |
582 | if (num_links < 0) { |
583 | DRM_ERROR("amdgpu: failed to get xgmi num links between node %d and %d. ret = %d\n" , |
584 | adev->gmc.xgmi.physical_node_id, |
585 | peer_adev->gmc.xgmi.physical_node_id, num_links); |
586 | num_links = 0; |
587 | } |
588 | |
589 | /* Aldebaran xGMI DPM is defeatured so assume x16 x 25Gbps for bandwidth. */ |
590 | return (num_links * 16 * 25000)/BITS_PER_BYTE; |
591 | } |
592 | |
593 | int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min) |
594 | { |
595 | int num_lanes_shift = (is_min ? ffs(adev->pm.pcie_mlw_mask) : |
596 | fls(x: adev->pm.pcie_mlw_mask)) - 1; |
597 | int gen_speed_shift = (is_min ? ffs(adev->pm.pcie_gen_mask & |
598 | CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) : |
599 | fls(x: adev->pm.pcie_gen_mask & |
600 | CAIL_PCIE_LINK_SPEED_SUPPORT_MASK)) - 1; |
601 | uint32_t num_lanes_mask = 1 << num_lanes_shift; |
602 | uint32_t gen_speed_mask = 1 << gen_speed_shift; |
603 | int num_lanes_factor = 0, gen_speed_mbits_factor = 0; |
604 | |
605 | switch (num_lanes_mask) { |
606 | case CAIL_PCIE_LINK_WIDTH_SUPPORT_X1: |
607 | num_lanes_factor = 1; |
608 | break; |
609 | case CAIL_PCIE_LINK_WIDTH_SUPPORT_X2: |
610 | num_lanes_factor = 2; |
611 | break; |
612 | case CAIL_PCIE_LINK_WIDTH_SUPPORT_X4: |
613 | num_lanes_factor = 4; |
614 | break; |
615 | case CAIL_PCIE_LINK_WIDTH_SUPPORT_X8: |
616 | num_lanes_factor = 8; |
617 | break; |
618 | case CAIL_PCIE_LINK_WIDTH_SUPPORT_X12: |
619 | num_lanes_factor = 12; |
620 | break; |
621 | case CAIL_PCIE_LINK_WIDTH_SUPPORT_X16: |
622 | num_lanes_factor = 16; |
623 | break; |
624 | case CAIL_PCIE_LINK_WIDTH_SUPPORT_X32: |
625 | num_lanes_factor = 32; |
626 | break; |
627 | } |
628 | |
629 | switch (gen_speed_mask) { |
630 | case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1: |
631 | gen_speed_mbits_factor = 2500; |
632 | break; |
633 | case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2: |
634 | gen_speed_mbits_factor = 5000; |
635 | break; |
636 | case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3: |
637 | gen_speed_mbits_factor = 8000; |
638 | break; |
639 | case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4: |
640 | gen_speed_mbits_factor = 16000; |
641 | break; |
642 | case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5: |
643 | gen_speed_mbits_factor = 32000; |
644 | break; |
645 | } |
646 | |
647 | return (num_lanes_factor * gen_speed_mbits_factor)/BITS_PER_BYTE; |
648 | } |
649 | |
650 | int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, |
651 | enum kgd_engine_type engine, |
652 | uint32_t vmid, uint64_t gpu_addr, |
653 | uint32_t *ib_cmd, uint32_t ib_len) |
654 | { |
655 | struct amdgpu_job *job; |
656 | struct amdgpu_ib *ib; |
657 | struct amdgpu_ring *ring; |
658 | struct dma_fence *f = NULL; |
659 | int ret; |
660 | |
661 | switch (engine) { |
662 | case KGD_ENGINE_MEC1: |
663 | ring = &adev->gfx.compute_ring[0]; |
664 | break; |
665 | case KGD_ENGINE_SDMA1: |
666 | ring = &adev->sdma.instance[0].ring; |
667 | break; |
668 | case KGD_ENGINE_SDMA2: |
669 | ring = &adev->sdma.instance[1].ring; |
670 | break; |
671 | default: |
672 | pr_err("Invalid engine in IB submission: %d\n" , engine); |
673 | ret = -EINVAL; |
674 | goto err; |
675 | } |
676 | |
677 | ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, num_ibs: 1, job: &job); |
678 | if (ret) |
679 | goto err; |
680 | |
681 | ib = &job->ibs[0]; |
682 | memset(ib, 0, sizeof(struct amdgpu_ib)); |
683 | |
684 | ib->gpu_addr = gpu_addr; |
685 | ib->ptr = ib_cmd; |
686 | ib->length_dw = ib_len; |
687 | /* This works for NO_HWS. TODO: need to handle without knowing VMID */ |
688 | job->vmid = vmid; |
689 | job->num_ibs = 1; |
690 | |
691 | ret = amdgpu_ib_schedule(ring, num_ibs: 1, ibs: ib, job, f: &f); |
692 | |
693 | if (ret) { |
694 | DRM_ERROR("amdgpu: failed to schedule IB.\n" ); |
695 | goto err_ib_sched; |
696 | } |
697 | |
698 | /* Drop the initial kref_init count (see drm_sched_main as example) */ |
699 | dma_fence_put(fence: f); |
700 | ret = dma_fence_wait(fence: f, intr: false); |
701 | |
702 | err_ib_sched: |
703 | amdgpu_job_free(job); |
704 | err: |
705 | return ret; |
706 | } |
707 | |
708 | void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle) |
709 | { |
710 | enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE; |
711 | if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 && |
712 | ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) { |
713 | pr_debug("GFXOFF is %s\n" , idle ? "enabled" : "disabled" ); |
714 | amdgpu_gfx_off_ctrl(adev, enable: idle); |
715 | } else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) && |
716 | (adev->flags & AMD_IS_APU)) { |
717 | /* Disable GFXOFF and PG. Temporary workaround |
718 | * to fix some compute applications issue on GFX9. |
719 | */ |
720 | adev->ip_blocks[AMD_IP_BLOCK_TYPE_GFX].version->funcs->set_powergating_state((void *)adev, state); |
721 | } |
722 | amdgpu_dpm_switch_power_profile(adev, |
723 | type: PP_SMC_POWER_PROFILE_COMPUTE, |
724 | en: !idle); |
725 | } |
726 | |
727 | bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) |
728 | { |
729 | if (adev->kfd.dev) |
730 | return vmid >= adev->vm_manager.first_kfd_vmid; |
731 | |
732 | return false; |
733 | } |
734 | |
735 | bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev) |
736 | { |
737 | return adev->have_atomics_support; |
738 | } |
739 | |
740 | void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev) |
741 | { |
742 | amdgpu_device_flush_hdp(adev, NULL); |
743 | } |
744 | |
745 | bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev) |
746 | { |
747 | return amdgpu_ras_get_fed_status(adev); |
748 | } |
749 | |
750 | void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, |
751 | enum amdgpu_ras_block block, bool reset) |
752 | { |
753 | amdgpu_umc_poison_handler(adev, block, reset); |
754 | } |
755 | |
756 | int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, |
757 | uint32_t *payload) |
758 | { |
759 | int ret; |
760 | |
761 | /* Device or IH ring is not ready so bail. */ |
762 | ret = amdgpu_ih_wait_on_checkpoint_process_ts(adev, ih: &adev->irq.ih); |
763 | if (ret) |
764 | return ret; |
765 | |
766 | /* Send payload to fence KFD interrupts */ |
767 | amdgpu_amdkfd_interrupt(adev, ih_ring_entry: payload); |
768 | |
769 | return 0; |
770 | } |
771 | |
772 | bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev) |
773 | { |
774 | if (adev->gfx.ras && adev->gfx.ras->query_utcl2_poison_status) |
775 | return adev->gfx.ras->query_utcl2_poison_status(adev); |
776 | else |
777 | return false; |
778 | } |
779 | |
780 | int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev) |
781 | { |
782 | return kgd2kfd_check_and_lock_kfd(); |
783 | } |
784 | |
785 | void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev) |
786 | { |
787 | kgd2kfd_unlock_kfd(); |
788 | } |
789 | |
790 | |
791 | u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id) |
792 | { |
793 | s8 mem_id = KFD_XCP_MEM_ID(adev, xcp_id); |
794 | u64 tmp; |
795 | |
796 | if (adev->gmc.num_mem_partitions && xcp_id >= 0 && mem_id >= 0) { |
797 | if (adev->gmc.is_app_apu && adev->gmc.num_mem_partitions == 1) { |
798 | /* In NPS1 mode, we should restrict the vram reporting |
799 | * tied to the ttm_pages_limit which is 1/2 of the system |
800 | * memory. For other partition modes, the HBM is uniformly |
801 | * divided already per numa node reported. If user wants to |
802 | * go beyond the default ttm limit and maximize the ROCm |
803 | * allocations, they can go up to max ttm and sysmem limits. |
804 | */ |
805 | |
806 | tmp = (ttm_tt_pages_limit() << PAGE_SHIFT) / num_online_nodes(); |
807 | } else { |
808 | tmp = adev->gmc.mem_partitions[mem_id].size; |
809 | } |
810 | do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition); |
811 | return ALIGN_DOWN(tmp, PAGE_SIZE); |
812 | } else { |
813 | return adev->gmc.real_vram_size; |
814 | } |
815 | } |
816 | |
817 | int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off, |
818 | u32 inst) |
819 | { |
820 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst]; |
821 | struct amdgpu_ring *kiq_ring = &kiq->ring; |
822 | struct amdgpu_ring_funcs *ring_funcs; |
823 | struct amdgpu_ring *ring; |
824 | int r = 0; |
825 | |
826 | if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) |
827 | return -EINVAL; |
828 | |
829 | ring_funcs = kzalloc(size: sizeof(*ring_funcs), GFP_KERNEL); |
830 | if (!ring_funcs) |
831 | return -ENOMEM; |
832 | |
833 | ring = kzalloc(size: sizeof(*ring), GFP_KERNEL); |
834 | if (!ring) { |
835 | r = -ENOMEM; |
836 | goto free_ring_funcs; |
837 | } |
838 | |
839 | ring_funcs->type = AMDGPU_RING_TYPE_COMPUTE; |
840 | ring->doorbell_index = doorbell_off; |
841 | ring->funcs = ring_funcs; |
842 | |
843 | spin_lock(lock: &kiq->ring_lock); |
844 | |
845 | if (amdgpu_ring_alloc(ring: kiq_ring, ndw: kiq->pmf->unmap_queues_size)) { |
846 | spin_unlock(lock: &kiq->ring_lock); |
847 | r = -ENOMEM; |
848 | goto free_ring; |
849 | } |
850 | |
851 | kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 0, 0); |
852 | |
853 | if (kiq_ring->sched.ready && !adev->job_hang) |
854 | r = amdgpu_ring_test_helper(ring: kiq_ring); |
855 | |
856 | spin_unlock(lock: &kiq->ring_lock); |
857 | |
858 | free_ring: |
859 | kfree(objp: ring); |
860 | |
861 | free_ring_funcs: |
862 | kfree(objp: ring_funcs); |
863 | |
864 | return r; |
865 | } |
866 | |