1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | |
24 | #include <linux/module.h> |
25 | |
26 | #ifdef CONFIG_X86 |
27 | #include <asm/hypervisor.h> |
28 | #endif |
29 | |
30 | #include <drm/drm_drv.h> |
31 | #include <xen/xen.h> |
32 | |
33 | #include "amdgpu.h" |
34 | #include "amdgpu_ras.h" |
35 | #include "vi.h" |
36 | #include "soc15.h" |
37 | #include "nv.h" |
38 | |
39 | #define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \ |
40 | do { \ |
41 | vf2pf_info->ucode_info[ucode].id = ucode; \ |
42 | vf2pf_info->ucode_info[ucode].version = ver; \ |
43 | } while (0) |
44 | |
45 | bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) |
46 | { |
47 | /* By now all MMIO pages except mailbox are blocked */ |
48 | /* if blocking is enabled in hypervisor. Choose the */ |
49 | /* SCRATCH_REG0 to test. */ |
50 | return RREG32_NO_KIQ(0xc040) == 0xffffffff; |
51 | } |
52 | |
53 | void amdgpu_virt_init_setting(struct amdgpu_device *adev) |
54 | { |
55 | struct drm_device *ddev = adev_to_drm(adev); |
56 | |
57 | /* enable virtual display */ |
58 | if (adev->asic_type != CHIP_ALDEBARAN && |
59 | adev->asic_type != CHIP_ARCTURUS && |
60 | ((adev->pdev->class >> 8) != PCI_CLASS_ACCELERATOR_PROCESSING)) { |
61 | if (adev->mode_info.num_crtc == 0) |
62 | adev->mode_info.num_crtc = 1; |
63 | adev->enable_virtual_display = true; |
64 | } |
65 | ddev->driver_features &= ~DRIVER_ATOMIC; |
66 | adev->cg_flags = 0; |
67 | adev->pg_flags = 0; |
68 | |
69 | /* Reduce kcq number to 2 to reduce latency */ |
70 | if (amdgpu_num_kcq == -1) |
71 | amdgpu_num_kcq = 2; |
72 | } |
73 | |
74 | /** |
75 | * amdgpu_virt_request_full_gpu() - request full gpu access |
76 | * @adev: amdgpu device. |
77 | * @init: is driver init time. |
78 | * When start to init/fini driver, first need to request full gpu access. |
79 | * Return: Zero if request success, otherwise will return error. |
80 | */ |
81 | int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init) |
82 | { |
83 | struct amdgpu_virt *virt = &adev->virt; |
84 | int r; |
85 | |
86 | if (virt->ops && virt->ops->req_full_gpu) { |
87 | r = virt->ops->req_full_gpu(adev, init); |
88 | if (r) |
89 | return r; |
90 | |
91 | adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; |
92 | } |
93 | |
94 | return 0; |
95 | } |
96 | |
97 | /** |
98 | * amdgpu_virt_release_full_gpu() - release full gpu access |
99 | * @adev: amdgpu device. |
100 | * @init: is driver init time. |
101 | * When finishing driver init/fini, need to release full gpu access. |
102 | * Return: Zero if release success, otherwise will returen error. |
103 | */ |
104 | int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init) |
105 | { |
106 | struct amdgpu_virt *virt = &adev->virt; |
107 | int r; |
108 | |
109 | if (virt->ops && virt->ops->rel_full_gpu) { |
110 | r = virt->ops->rel_full_gpu(adev, init); |
111 | if (r) |
112 | return r; |
113 | |
114 | adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME; |
115 | } |
116 | return 0; |
117 | } |
118 | |
119 | /** |
120 | * amdgpu_virt_reset_gpu() - reset gpu |
121 | * @adev: amdgpu device. |
122 | * Send reset command to GPU hypervisor to reset GPU that VM is using |
123 | * Return: Zero if reset success, otherwise will return error. |
124 | */ |
125 | int amdgpu_virt_reset_gpu(struct amdgpu_device *adev) |
126 | { |
127 | struct amdgpu_virt *virt = &adev->virt; |
128 | int r; |
129 | |
130 | if (virt->ops && virt->ops->reset_gpu) { |
131 | r = virt->ops->reset_gpu(adev); |
132 | if (r) |
133 | return r; |
134 | |
135 | adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; |
136 | } |
137 | |
138 | return 0; |
139 | } |
140 | |
141 | void amdgpu_virt_request_init_data(struct amdgpu_device *adev) |
142 | { |
143 | struct amdgpu_virt *virt = &adev->virt; |
144 | |
145 | if (virt->ops && virt->ops->req_init_data) |
146 | virt->ops->req_init_data(adev); |
147 | |
148 | if (adev->virt.req_init_data_ver > 0) |
149 | DRM_INFO("host supports REQ_INIT_DATA handshake\n" ); |
150 | else |
151 | DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n" ); |
152 | } |
153 | |
154 | /** |
155 | * amdgpu_virt_wait_reset() - wait for reset gpu completed |
156 | * @adev: amdgpu device. |
157 | * Wait for GPU reset completed. |
158 | * Return: Zero if reset success, otherwise will return error. |
159 | */ |
160 | int amdgpu_virt_wait_reset(struct amdgpu_device *adev) |
161 | { |
162 | struct amdgpu_virt *virt = &adev->virt; |
163 | |
164 | if (!virt->ops || !virt->ops->wait_reset) |
165 | return -EINVAL; |
166 | |
167 | return virt->ops->wait_reset(adev); |
168 | } |
169 | |
170 | /** |
171 | * amdgpu_virt_alloc_mm_table() - alloc memory for mm table |
172 | * @adev: amdgpu device. |
173 | * MM table is used by UVD and VCE for its initialization |
174 | * Return: Zero if allocate success. |
175 | */ |
176 | int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev) |
177 | { |
178 | int r; |
179 | |
180 | if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr) |
181 | return 0; |
182 | |
183 | r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, |
184 | AMDGPU_GEM_DOMAIN_VRAM | |
185 | AMDGPU_GEM_DOMAIN_GTT, |
186 | bo_ptr: &adev->virt.mm_table.bo, |
187 | gpu_addr: &adev->virt.mm_table.gpu_addr, |
188 | cpu_addr: (void *)&adev->virt.mm_table.cpu_addr); |
189 | if (r) { |
190 | DRM_ERROR("failed to alloc mm table and error = %d.\n" , r); |
191 | return r; |
192 | } |
193 | |
194 | memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE); |
195 | DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n" , |
196 | adev->virt.mm_table.gpu_addr, |
197 | adev->virt.mm_table.cpu_addr); |
198 | return 0; |
199 | } |
200 | |
201 | /** |
202 | * amdgpu_virt_free_mm_table() - free mm table memory |
203 | * @adev: amdgpu device. |
204 | * Free MM table memory |
205 | */ |
206 | void amdgpu_virt_free_mm_table(struct amdgpu_device *adev) |
207 | { |
208 | if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr) |
209 | return; |
210 | |
211 | amdgpu_bo_free_kernel(bo: &adev->virt.mm_table.bo, |
212 | gpu_addr: &adev->virt.mm_table.gpu_addr, |
213 | cpu_addr: (void *)&adev->virt.mm_table.cpu_addr); |
214 | adev->virt.mm_table.gpu_addr = 0; |
215 | } |
216 | |
217 | |
218 | unsigned int amd_sriov_msg_checksum(void *obj, |
219 | unsigned long obj_size, |
220 | unsigned int key, |
221 | unsigned int checksum) |
222 | { |
223 | unsigned int ret = key; |
224 | unsigned long i = 0; |
225 | unsigned char *pos; |
226 | |
227 | pos = (char *)obj; |
228 | /* calculate checksum */ |
229 | for (i = 0; i < obj_size; ++i) |
230 | ret += *(pos + i); |
231 | /* minus the checksum itself */ |
232 | pos = (char *)&checksum; |
233 | for (i = 0; i < sizeof(checksum); ++i) |
234 | ret -= *(pos + i); |
235 | return ret; |
236 | } |
237 | |
238 | static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev) |
239 | { |
240 | struct amdgpu_virt *virt = &adev->virt; |
241 | struct amdgpu_virt_ras_err_handler_data **data = &virt->virt_eh_data; |
242 | /* GPU will be marked bad on host if bp count more then 10, |
243 | * so alloc 512 is enough. |
244 | */ |
245 | unsigned int align_space = 512; |
246 | void *bps = NULL; |
247 | struct amdgpu_bo **bps_bo = NULL; |
248 | |
249 | *data = kmalloc(size: sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL); |
250 | if (!*data) |
251 | goto data_failure; |
252 | |
253 | bps = kmalloc_array(n: align_space, size: sizeof(*(*data)->bps), GFP_KERNEL); |
254 | if (!bps) |
255 | goto bps_failure; |
256 | |
257 | bps_bo = kmalloc_array(n: align_space, size: sizeof(*(*data)->bps_bo), GFP_KERNEL); |
258 | if (!bps_bo) |
259 | goto bps_bo_failure; |
260 | |
261 | (*data)->bps = bps; |
262 | (*data)->bps_bo = bps_bo; |
263 | (*data)->count = 0; |
264 | (*data)->last_reserved = 0; |
265 | |
266 | virt->ras_init_done = true; |
267 | |
268 | return 0; |
269 | |
270 | bps_bo_failure: |
271 | kfree(objp: bps); |
272 | bps_failure: |
273 | kfree(objp: *data); |
274 | data_failure: |
275 | return -ENOMEM; |
276 | } |
277 | |
278 | static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev) |
279 | { |
280 | struct amdgpu_virt *virt = &adev->virt; |
281 | struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data; |
282 | struct amdgpu_bo *bo; |
283 | int i; |
284 | |
285 | if (!data) |
286 | return; |
287 | |
288 | for (i = data->last_reserved - 1; i >= 0; i--) { |
289 | bo = data->bps_bo[i]; |
290 | if (bo) { |
291 | amdgpu_bo_free_kernel(bo: &bo, NULL, NULL); |
292 | data->bps_bo[i] = bo; |
293 | } |
294 | data->last_reserved = i; |
295 | } |
296 | } |
297 | |
298 | void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev) |
299 | { |
300 | struct amdgpu_virt *virt = &adev->virt; |
301 | struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data; |
302 | |
303 | virt->ras_init_done = false; |
304 | |
305 | if (!data) |
306 | return; |
307 | |
308 | amdgpu_virt_ras_release_bp(adev); |
309 | |
310 | kfree(objp: data->bps); |
311 | kfree(objp: data->bps_bo); |
312 | kfree(objp: data); |
313 | virt->virt_eh_data = NULL; |
314 | } |
315 | |
316 | static void amdgpu_virt_ras_add_bps(struct amdgpu_device *adev, |
317 | struct eeprom_table_record *bps, int pages) |
318 | { |
319 | struct amdgpu_virt *virt = &adev->virt; |
320 | struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data; |
321 | |
322 | if (!data) |
323 | return; |
324 | |
325 | memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps)); |
326 | data->count += pages; |
327 | } |
328 | |
329 | static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev) |
330 | { |
331 | struct amdgpu_virt *virt = &adev->virt; |
332 | struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data; |
333 | struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; |
334 | struct ttm_resource_manager *man = &mgr->manager; |
335 | struct amdgpu_bo *bo = NULL; |
336 | uint64_t bp; |
337 | int i; |
338 | |
339 | if (!data) |
340 | return; |
341 | |
342 | for (i = data->last_reserved; i < data->count; i++) { |
343 | bp = data->bps[i].retired_page; |
344 | |
345 | /* There are two cases of reserve error should be ignored: |
346 | * 1) a ras bad page has been allocated (used by someone); |
347 | * 2) a ras bad page has been reserved (duplicate error injection |
348 | * for one page); |
349 | */ |
350 | if (ttm_resource_manager_used(man)) { |
351 | amdgpu_vram_mgr_reserve_range(mgr: &adev->mman.vram_mgr, |
352 | start: bp << AMDGPU_GPU_PAGE_SHIFT, |
353 | AMDGPU_GPU_PAGE_SIZE); |
354 | data->bps_bo[i] = NULL; |
355 | } else { |
356 | if (amdgpu_bo_create_kernel_at(adev, offset: bp << AMDGPU_GPU_PAGE_SHIFT, |
357 | AMDGPU_GPU_PAGE_SIZE, |
358 | bo_ptr: &bo, NULL)) |
359 | DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n" , bp); |
360 | data->bps_bo[i] = bo; |
361 | } |
362 | data->last_reserved = i + 1; |
363 | bo = NULL; |
364 | } |
365 | } |
366 | |
367 | static bool amdgpu_virt_ras_check_bad_page(struct amdgpu_device *adev, |
368 | uint64_t retired_page) |
369 | { |
370 | struct amdgpu_virt *virt = &adev->virt; |
371 | struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data; |
372 | int i; |
373 | |
374 | if (!data) |
375 | return true; |
376 | |
377 | for (i = 0; i < data->count; i++) |
378 | if (retired_page == data->bps[i].retired_page) |
379 | return true; |
380 | |
381 | return false; |
382 | } |
383 | |
384 | static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev, |
385 | uint64_t bp_block_offset, uint32_t bp_block_size) |
386 | { |
387 | struct eeprom_table_record bp; |
388 | uint64_t retired_page; |
389 | uint32_t bp_idx, bp_cnt; |
390 | void *vram_usage_va = NULL; |
391 | |
392 | if (adev->mman.fw_vram_usage_va) |
393 | vram_usage_va = adev->mman.fw_vram_usage_va; |
394 | else |
395 | vram_usage_va = adev->mman.drv_vram_usage_va; |
396 | |
397 | if (bp_block_size) { |
398 | bp_cnt = bp_block_size / sizeof(uint64_t); |
399 | for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) { |
400 | retired_page = *(uint64_t *)(vram_usage_va + |
401 | bp_block_offset + bp_idx * sizeof(uint64_t)); |
402 | bp.retired_page = retired_page; |
403 | |
404 | if (amdgpu_virt_ras_check_bad_page(adev, retired_page)) |
405 | continue; |
406 | |
407 | amdgpu_virt_ras_add_bps(adev, bps: &bp, pages: 1); |
408 | |
409 | amdgpu_virt_ras_reserve_bps(adev); |
410 | } |
411 | } |
412 | } |
413 | |
414 | static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) |
415 | { |
416 | struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf; |
417 | uint32_t checksum; |
418 | uint32_t checkval; |
419 | |
420 | uint32_t i; |
421 | uint32_t tmp; |
422 | |
423 | if (adev->virt.fw_reserve.p_pf2vf == NULL) |
424 | return -EINVAL; |
425 | |
426 | if (pf2vf_info->size > 1024) { |
427 | DRM_ERROR("invalid pf2vf message size\n" ); |
428 | return -EINVAL; |
429 | } |
430 | |
431 | switch (pf2vf_info->version) { |
432 | case 1: |
433 | checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum; |
434 | checkval = amd_sriov_msg_checksum( |
435 | obj: adev->virt.fw_reserve.p_pf2vf, obj_size: pf2vf_info->size, |
436 | key: adev->virt.fw_reserve.checksum_key, checksum); |
437 | if (checksum != checkval) { |
438 | DRM_ERROR("invalid pf2vf message\n" ); |
439 | return -EINVAL; |
440 | } |
441 | |
442 | adev->virt.gim_feature = |
443 | ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags; |
444 | break; |
445 | case 2: |
446 | /* TODO: missing key, need to add it later */ |
447 | checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum; |
448 | checkval = amd_sriov_msg_checksum( |
449 | obj: adev->virt.fw_reserve.p_pf2vf, obj_size: pf2vf_info->size, |
450 | key: 0, checksum); |
451 | if (checksum != checkval) { |
452 | DRM_ERROR("invalid pf2vf message\n" ); |
453 | return -EINVAL; |
454 | } |
455 | |
456 | adev->virt.vf2pf_update_interval_ms = |
457 | ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms; |
458 | adev->virt.gim_feature = |
459 | ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all; |
460 | adev->virt.reg_access = |
461 | ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all; |
462 | |
463 | adev->virt.decode_max_dimension_pixels = 0; |
464 | adev->virt.decode_max_frame_pixels = 0; |
465 | adev->virt.encode_max_dimension_pixels = 0; |
466 | adev->virt.encode_max_frame_pixels = 0; |
467 | adev->virt.is_mm_bw_enabled = false; |
468 | for (i = 0; i < AMD_SRIOV_MSG_RESERVE_VCN_INST; i++) { |
469 | tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_dimension_pixels; |
470 | adev->virt.decode_max_dimension_pixels = max(tmp, adev->virt.decode_max_dimension_pixels); |
471 | |
472 | tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_frame_pixels; |
473 | adev->virt.decode_max_frame_pixels = max(tmp, adev->virt.decode_max_frame_pixels); |
474 | |
475 | tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_dimension_pixels; |
476 | adev->virt.encode_max_dimension_pixels = max(tmp, adev->virt.encode_max_dimension_pixels); |
477 | |
478 | tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels; |
479 | adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels); |
480 | } |
481 | if ((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0)) |
482 | adev->virt.is_mm_bw_enabled = true; |
483 | |
484 | adev->unique_id = |
485 | ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid; |
486 | break; |
487 | default: |
488 | DRM_ERROR("invalid pf2vf version\n" ); |
489 | return -EINVAL; |
490 | } |
491 | |
492 | /* correct too large or too little interval value */ |
493 | if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000) |
494 | adev->virt.vf2pf_update_interval_ms = 2000; |
495 | |
496 | return 0; |
497 | } |
498 | |
499 | static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev) |
500 | { |
501 | struct amd_sriov_msg_vf2pf_info *vf2pf_info; |
502 | vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf; |
503 | |
504 | if (adev->virt.fw_reserve.p_vf2pf == NULL) |
505 | return; |
506 | |
507 | POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE, adev->vce.fw_version); |
508 | POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD, adev->uvd.fw_version); |
509 | POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC, adev->gmc.fw_version); |
510 | POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME, adev->gfx.me_fw_version); |
511 | POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP, adev->gfx.pfp_fw_version); |
512 | POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE, adev->gfx.ce_fw_version); |
513 | POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC, adev->gfx.rlc_fw_version); |
514 | POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version); |
515 | POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version); |
516 | POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version); |
517 | POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version); |
518 | POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version); |
519 | POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version); |
520 | POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, |
521 | adev->psp.asd_context.bin_desc.fw_version); |
522 | POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS, |
523 | adev->psp.ras_context.context.bin_desc.fw_version); |
524 | POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI, |
525 | adev->psp.xgmi_context.context.bin_desc.fw_version); |
526 | POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version); |
527 | POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version); |
528 | POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version); |
529 | POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN, adev->vcn.fw_version); |
530 | POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU, adev->dm.dmcu_fw_version); |
531 | } |
532 | |
533 | static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev) |
534 | { |
535 | struct amd_sriov_msg_vf2pf_info *vf2pf_info; |
536 | |
537 | vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf; |
538 | |
539 | if (adev->virt.fw_reserve.p_vf2pf == NULL) |
540 | return -EINVAL; |
541 | |
542 | memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info)); |
543 | |
544 | vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info); |
545 | vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER; |
546 | |
547 | #ifdef MODULE |
548 | if (THIS_MODULE->version != NULL) |
549 | strcpy(vf2pf_info->driver_version, THIS_MODULE->version); |
550 | else |
551 | #endif |
552 | strcpy(p: vf2pf_info->driver_version, q: "N/A" ); |
553 | |
554 | vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all |
555 | vf2pf_info->driver_cert = 0; |
556 | vf2pf_info->os_info.all = 0; |
557 | |
558 | vf2pf_info->fb_usage = |
559 | ttm_resource_manager_usage(man: &adev->mman.vram_mgr.manager) >> 20; |
560 | vf2pf_info->fb_vis_usage = |
561 | amdgpu_vram_mgr_vis_usage(mgr: &adev->mman.vram_mgr) >> 20; |
562 | vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20; |
563 | vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20; |
564 | |
565 | amdgpu_virt_populate_vf2pf_ucode_info(adev); |
566 | |
567 | /* TODO: read dynamic info */ |
568 | vf2pf_info->gfx_usage = 0; |
569 | vf2pf_info->compute_usage = 0; |
570 | vf2pf_info->encode_usage = 0; |
571 | vf2pf_info->decode_usage = 0; |
572 | |
573 | vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr; |
574 | vf2pf_info->checksum = |
575 | amd_sriov_msg_checksum( |
576 | obj: vf2pf_info, obj_size: vf2pf_info->header.size, key: 0, checksum: 0); |
577 | |
578 | return 0; |
579 | } |
580 | |
581 | static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work) |
582 | { |
583 | struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work); |
584 | int ret; |
585 | |
586 | ret = amdgpu_virt_read_pf2vf_data(adev); |
587 | if (ret) |
588 | goto out; |
589 | amdgpu_virt_write_vf2pf_data(adev); |
590 | |
591 | out: |
592 | schedule_delayed_work(dwork: &(adev->virt.vf2pf_work), delay: adev->virt.vf2pf_update_interval_ms); |
593 | } |
594 | |
595 | void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev) |
596 | { |
597 | if (adev->virt.vf2pf_update_interval_ms != 0) { |
598 | DRM_INFO("clean up the vf2pf work item\n" ); |
599 | cancel_delayed_work_sync(dwork: &adev->virt.vf2pf_work); |
600 | adev->virt.vf2pf_update_interval_ms = 0; |
601 | } |
602 | } |
603 | |
604 | void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) |
605 | { |
606 | adev->virt.fw_reserve.p_pf2vf = NULL; |
607 | adev->virt.fw_reserve.p_vf2pf = NULL; |
608 | adev->virt.vf2pf_update_interval_ms = 0; |
609 | |
610 | if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) { |
611 | DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!" ); |
612 | } else if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) { |
613 | /* go through this logic in ip_init and reset to init workqueue*/ |
614 | amdgpu_virt_exchange_data(adev); |
615 | |
616 | INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item); |
617 | schedule_delayed_work(dwork: &(adev->virt.vf2pf_work), delay: msecs_to_jiffies(m: adev->virt.vf2pf_update_interval_ms)); |
618 | } else if (adev->bios != NULL) { |
619 | /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/ |
620 | adev->virt.fw_reserve.p_pf2vf = |
621 | (struct amd_sriov_msg_pf2vf_info_header *) |
622 | (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); |
623 | |
624 | amdgpu_virt_read_pf2vf_data(adev); |
625 | } |
626 | } |
627 | |
628 | |
629 | void amdgpu_virt_exchange_data(struct amdgpu_device *adev) |
630 | { |
631 | uint64_t bp_block_offset = 0; |
632 | uint32_t bp_block_size = 0; |
633 | struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL; |
634 | |
635 | if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) { |
636 | if (adev->mman.fw_vram_usage_va) { |
637 | adev->virt.fw_reserve.p_pf2vf = |
638 | (struct amd_sriov_msg_pf2vf_info_header *) |
639 | (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); |
640 | adev->virt.fw_reserve.p_vf2pf = |
641 | (struct amd_sriov_msg_vf2pf_info_header *) |
642 | (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10)); |
643 | } else if (adev->mman.drv_vram_usage_va) { |
644 | adev->virt.fw_reserve.p_pf2vf = |
645 | (struct amd_sriov_msg_pf2vf_info_header *) |
646 | (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); |
647 | adev->virt.fw_reserve.p_vf2pf = |
648 | (struct amd_sriov_msg_vf2pf_info_header *) |
649 | (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10)); |
650 | } |
651 | |
652 | amdgpu_virt_read_pf2vf_data(adev); |
653 | amdgpu_virt_write_vf2pf_data(adev); |
654 | |
655 | /* bad page handling for version 2 */ |
656 | if (adev->virt.fw_reserve.p_pf2vf->version == 2) { |
657 | pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf; |
658 | |
659 | bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) | |
660 | ((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000); |
661 | bp_block_size = pf2vf_v2->bp_block_size; |
662 | |
663 | if (bp_block_size && !adev->virt.ras_init_done) |
664 | amdgpu_virt_init_ras_err_handler_data(adev); |
665 | |
666 | if (adev->virt.ras_init_done) |
667 | amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size); |
668 | } |
669 | } |
670 | } |
671 | |
672 | void amdgpu_detect_virtualization(struct amdgpu_device *adev) |
673 | { |
674 | uint32_t reg; |
675 | |
676 | switch (adev->asic_type) { |
677 | case CHIP_TONGA: |
678 | case CHIP_FIJI: |
679 | reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); |
680 | break; |
681 | case CHIP_VEGA10: |
682 | case CHIP_VEGA20: |
683 | case CHIP_NAVI10: |
684 | case CHIP_NAVI12: |
685 | case CHIP_SIENNA_CICHLID: |
686 | case CHIP_ARCTURUS: |
687 | case CHIP_ALDEBARAN: |
688 | case CHIP_IP_DISCOVERY: |
689 | reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER); |
690 | break; |
691 | default: /* other chip doesn't support SRIOV */ |
692 | reg = 0; |
693 | break; |
694 | } |
695 | |
696 | if (reg & 1) |
697 | adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; |
698 | |
699 | if (reg & 0x80000000) |
700 | adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; |
701 | |
702 | if (!reg) { |
703 | /* passthrough mode exclus sriov mod */ |
704 | if (is_virtual_machine() && !xen_initial_domain()) |
705 | adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; |
706 | } |
707 | |
708 | if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) |
709 | /* VF MMIO access (except mailbox range) from CPU |
710 | * will be blocked during sriov runtime |
711 | */ |
712 | adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT; |
713 | |
714 | /* we have the ability to check now */ |
715 | if (amdgpu_sriov_vf(adev)) { |
716 | switch (adev->asic_type) { |
717 | case CHIP_TONGA: |
718 | case CHIP_FIJI: |
719 | vi_set_virt_ops(adev); |
720 | break; |
721 | case CHIP_VEGA10: |
722 | soc15_set_virt_ops(adev); |
723 | #ifdef CONFIG_X86 |
724 | /* not send GPU_INIT_DATA with MS_HYPERV*/ |
725 | if (!hypervisor_is_type(type: X86_HYPER_MS_HYPERV)) |
726 | #endif |
727 | /* send a dummy GPU_INIT_DATA request to host on vega10 */ |
728 | amdgpu_virt_request_init_data(adev); |
729 | break; |
730 | case CHIP_VEGA20: |
731 | case CHIP_ARCTURUS: |
732 | case CHIP_ALDEBARAN: |
733 | soc15_set_virt_ops(adev); |
734 | break; |
735 | case CHIP_NAVI10: |
736 | case CHIP_NAVI12: |
737 | case CHIP_SIENNA_CICHLID: |
738 | case CHIP_IP_DISCOVERY: |
739 | nv_set_virt_ops(adev); |
740 | /* try send GPU_INIT_DATA request to host */ |
741 | amdgpu_virt_request_init_data(adev); |
742 | break; |
743 | default: /* other chip doesn't support SRIOV */ |
744 | DRM_ERROR("Unknown asic type: %d!\n" , adev->asic_type); |
745 | break; |
746 | } |
747 | } |
748 | } |
749 | |
750 | static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev) |
751 | { |
752 | return amdgpu_sriov_is_debug(adev) ? true : false; |
753 | } |
754 | |
755 | static bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev) |
756 | { |
757 | return amdgpu_sriov_is_normal(adev) ? true : false; |
758 | } |
759 | |
760 | int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev) |
761 | { |
762 | if (!amdgpu_sriov_vf(adev) || |
763 | amdgpu_virt_access_debugfs_is_kiq(adev)) |
764 | return 0; |
765 | |
766 | if (amdgpu_virt_access_debugfs_is_mmio(adev)) |
767 | adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; |
768 | else |
769 | return -EPERM; |
770 | |
771 | return 0; |
772 | } |
773 | |
774 | void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev) |
775 | { |
776 | if (amdgpu_sriov_vf(adev)) |
777 | adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME; |
778 | } |
779 | |
780 | enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev) |
781 | { |
782 | enum amdgpu_sriov_vf_mode mode; |
783 | |
784 | if (amdgpu_sriov_vf(adev)) { |
785 | if (amdgpu_sriov_is_pp_one_vf(adev)) |
786 | mode = SRIOV_VF_MODE_ONE_VF; |
787 | else |
788 | mode = SRIOV_VF_MODE_MULTI_VF; |
789 | } else { |
790 | mode = SRIOV_VF_MODE_BARE_METAL; |
791 | } |
792 | |
793 | return mode; |
794 | } |
795 | |
796 | void amdgpu_virt_post_reset(struct amdgpu_device *adev) |
797 | { |
798 | if (amdgpu_ip_version(adev, ip: GC_HWIP, inst: 0) == IP_VERSION(11, 0, 3)) { |
799 | /* force set to GFXOFF state after reset, |
800 | * to avoid some invalid operation before GC enable |
801 | */ |
802 | adev->gfx.is_poweron = false; |
803 | } |
804 | } |
805 | |
806 | bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id) |
807 | { |
808 | switch (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0)) { |
809 | case IP_VERSION(13, 0, 0): |
810 | /* no vf autoload, white list */ |
811 | if (ucode_id == AMDGPU_UCODE_ID_VCN1 || |
812 | ucode_id == AMDGPU_UCODE_ID_VCN) |
813 | return false; |
814 | else |
815 | return true; |
816 | case IP_VERSION(11, 0, 9): |
817 | case IP_VERSION(11, 0, 7): |
818 | /* black list for CHIP_NAVI12 and CHIP_SIENNA_CICHLID */ |
819 | if (ucode_id == AMDGPU_UCODE_ID_RLC_G |
820 | || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL |
821 | || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM |
822 | || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM |
823 | || ucode_id == AMDGPU_UCODE_ID_SMC) |
824 | return true; |
825 | else |
826 | return false; |
827 | case IP_VERSION(13, 0, 10): |
828 | /* white list */ |
829 | if (ucode_id == AMDGPU_UCODE_ID_CAP |
830 | || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP |
831 | || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME |
832 | || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC |
833 | || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK |
834 | || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK |
835 | || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK |
836 | || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK |
837 | || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK |
838 | || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK |
839 | || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK |
840 | || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK |
841 | || ucode_id == AMDGPU_UCODE_ID_CP_MES |
842 | || ucode_id == AMDGPU_UCODE_ID_CP_MES_DATA |
843 | || ucode_id == AMDGPU_UCODE_ID_CP_MES1 |
844 | || ucode_id == AMDGPU_UCODE_ID_CP_MES1_DATA |
845 | || ucode_id == AMDGPU_UCODE_ID_VCN1 |
846 | || ucode_id == AMDGPU_UCODE_ID_VCN) |
847 | return false; |
848 | else |
849 | return true; |
850 | default: |
851 | /* lagacy black list */ |
852 | if (ucode_id == AMDGPU_UCODE_ID_SDMA0 |
853 | || ucode_id == AMDGPU_UCODE_ID_SDMA1 |
854 | || ucode_id == AMDGPU_UCODE_ID_SDMA2 |
855 | || ucode_id == AMDGPU_UCODE_ID_SDMA3 |
856 | || ucode_id == AMDGPU_UCODE_ID_SDMA4 |
857 | || ucode_id == AMDGPU_UCODE_ID_SDMA5 |
858 | || ucode_id == AMDGPU_UCODE_ID_SDMA6 |
859 | || ucode_id == AMDGPU_UCODE_ID_SDMA7 |
860 | || ucode_id == AMDGPU_UCODE_ID_RLC_G |
861 | || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL |
862 | || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM |
863 | || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM |
864 | || ucode_id == AMDGPU_UCODE_ID_SMC) |
865 | return true; |
866 | else |
867 | return false; |
868 | } |
869 | } |
870 | |
871 | void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev, |
872 | struct amdgpu_video_codec_info *encode, uint32_t encode_array_size, |
873 | struct amdgpu_video_codec_info *decode, uint32_t decode_array_size) |
874 | { |
875 | uint32_t i; |
876 | |
877 | if (!adev->virt.is_mm_bw_enabled) |
878 | return; |
879 | |
880 | if (encode) { |
881 | for (i = 0; i < encode_array_size; i++) { |
882 | encode[i].max_width = adev->virt.encode_max_dimension_pixels; |
883 | encode[i].max_pixels_per_frame = adev->virt.encode_max_frame_pixels; |
884 | if (encode[i].max_width > 0) |
885 | encode[i].max_height = encode[i].max_pixels_per_frame / encode[i].max_width; |
886 | else |
887 | encode[i].max_height = 0; |
888 | } |
889 | } |
890 | |
891 | if (decode) { |
892 | for (i = 0; i < decode_array_size; i++) { |
893 | decode[i].max_width = adev->virt.decode_max_dimension_pixels; |
894 | decode[i].max_pixels_per_frame = adev->virt.decode_max_frame_pixels; |
895 | if (decode[i].max_width > 0) |
896 | decode[i].max_height = decode[i].max_pixels_per_frame / decode[i].max_width; |
897 | else |
898 | decode[i].max_height = 0; |
899 | } |
900 | } |
901 | } |
902 | |
903 | bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, |
904 | u32 acc_flags, u32 hwip, |
905 | bool write, u32 *rlcg_flag) |
906 | { |
907 | bool ret = false; |
908 | |
909 | switch (hwip) { |
910 | case GC_HWIP: |
911 | if (amdgpu_sriov_reg_indirect_gc(adev)) { |
912 | *rlcg_flag = |
913 | write ? AMDGPU_RLCG_GC_WRITE : AMDGPU_RLCG_GC_READ; |
914 | ret = true; |
915 | /* only in new version, AMDGPU_REGS_NO_KIQ and |
916 | * AMDGPU_REGS_RLC are enabled simultaneously */ |
917 | } else if ((acc_flags & AMDGPU_REGS_RLC) && |
918 | !(acc_flags & AMDGPU_REGS_NO_KIQ) && write) { |
919 | *rlcg_flag = AMDGPU_RLCG_GC_WRITE_LEGACY; |
920 | ret = true; |
921 | } |
922 | break; |
923 | case MMHUB_HWIP: |
924 | if (amdgpu_sriov_reg_indirect_mmhub(adev) && |
925 | (acc_flags & AMDGPU_REGS_RLC) && write) { |
926 | *rlcg_flag = AMDGPU_RLCG_MMHUB_WRITE; |
927 | ret = true; |
928 | } |
929 | break; |
930 | default: |
931 | break; |
932 | } |
933 | return ret; |
934 | } |
935 | |
936 | u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id) |
937 | { |
938 | struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; |
939 | uint32_t timeout = 50000; |
940 | uint32_t i, tmp; |
941 | uint32_t ret = 0; |
942 | void *scratch_reg0; |
943 | void *scratch_reg1; |
944 | void *scratch_reg2; |
945 | void *scratch_reg3; |
946 | void *spare_int; |
947 | |
948 | if (!adev->gfx.rlc.rlcg_reg_access_supported) { |
949 | dev_err(adev->dev, |
950 | "indirect registers access through rlcg is not available\n" ); |
951 | return 0; |
952 | } |
953 | |
954 | if (adev->gfx.xcc_mask && (((1 << xcc_id) & adev->gfx.xcc_mask) == 0)) { |
955 | dev_err(adev->dev, "invalid xcc\n" ); |
956 | return 0; |
957 | } |
958 | |
959 | reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id]; |
960 | scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0; |
961 | scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1; |
962 | scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2; |
963 | scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3; |
964 | if (reg_access_ctrl->spare_int) |
965 | spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int; |
966 | |
967 | if (offset == reg_access_ctrl->grbm_cntl) { |
968 | /* if the target reg offset is grbm_cntl, write to scratch_reg2 */ |
969 | writel(val: v, addr: scratch_reg2); |
970 | if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY) |
971 | writel(val: v, addr: ((void __iomem *)adev->rmmio) + (offset * 4)); |
972 | } else if (offset == reg_access_ctrl->grbm_idx) { |
973 | /* if the target reg offset is grbm_idx, write to scratch_reg3 */ |
974 | writel(val: v, addr: scratch_reg3); |
975 | if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY) |
976 | writel(val: v, addr: ((void __iomem *)adev->rmmio) + (offset * 4)); |
977 | } else { |
978 | /* |
979 | * SCRATCH_REG0 = read/write value |
980 | * SCRATCH_REG1[30:28] = command |
981 | * SCRATCH_REG1[19:0] = address in dword |
982 | * SCRATCH_REG1[27:24] = Error reporting |
983 | */ |
984 | writel(val: v, addr: scratch_reg0); |
985 | writel(val: (offset | flag), addr: scratch_reg1); |
986 | if (reg_access_ctrl->spare_int) |
987 | writel(val: 1, addr: spare_int); |
988 | |
989 | for (i = 0; i < timeout; i++) { |
990 | tmp = readl(addr: scratch_reg1); |
991 | if (!(tmp & AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK)) |
992 | break; |
993 | udelay(10); |
994 | } |
995 | |
996 | tmp = readl(addr: scratch_reg1); |
997 | if (i >= timeout || (tmp & AMDGPU_RLCG_SCRATCH1_ERROR_MASK) != 0) { |
998 | if (amdgpu_sriov_rlcg_error_report_enabled(adev)) { |
999 | if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) { |
1000 | dev_err(adev->dev, |
1001 | "vfgate is disabled, rlcg failed to program reg: 0x%05x\n" , offset); |
1002 | } else if (tmp & AMDGPU_RLCG_WRONG_OPERATION_TYPE) { |
1003 | dev_err(adev->dev, |
1004 | "wrong operation type, rlcg failed to program reg: 0x%05x\n" , offset); |
1005 | } else if (tmp & AMDGPU_RLCG_REG_NOT_IN_RANGE) { |
1006 | dev_err(adev->dev, |
1007 | "register is not in range, rlcg failed to program reg: 0x%05x\n" , offset); |
1008 | } else { |
1009 | dev_err(adev->dev, |
1010 | "unknown error type, rlcg failed to program reg: 0x%05x\n" , offset); |
1011 | } |
1012 | } else { |
1013 | dev_err(adev->dev, |
1014 | "timeout: rlcg faled to program reg: 0x%05x\n" , offset); |
1015 | } |
1016 | } |
1017 | } |
1018 | |
1019 | ret = readl(addr: scratch_reg0); |
1020 | return ret; |
1021 | } |
1022 | |
1023 | void amdgpu_sriov_wreg(struct amdgpu_device *adev, |
1024 | u32 offset, u32 value, |
1025 | u32 acc_flags, u32 hwip, u32 xcc_id) |
1026 | { |
1027 | u32 rlcg_flag; |
1028 | |
1029 | if (!amdgpu_sriov_runtime(adev) && |
1030 | amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, write: true, rlcg_flag: &rlcg_flag)) { |
1031 | amdgpu_virt_rlcg_reg_rw(adev, offset, v: value, flag: rlcg_flag, xcc_id); |
1032 | return; |
1033 | } |
1034 | |
1035 | if (acc_flags & AMDGPU_REGS_NO_KIQ) |
1036 | WREG32_NO_KIQ(offset, value); |
1037 | else |
1038 | WREG32(offset, value); |
1039 | } |
1040 | |
1041 | u32 amdgpu_sriov_rreg(struct amdgpu_device *adev, |
1042 | u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id) |
1043 | { |
1044 | u32 rlcg_flag; |
1045 | |
1046 | if (!amdgpu_sriov_runtime(adev) && |
1047 | amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, write: false, rlcg_flag: &rlcg_flag)) |
1048 | return amdgpu_virt_rlcg_reg_rw(adev, offset, v: 0, flag: rlcg_flag, xcc_id); |
1049 | |
1050 | if (acc_flags & AMDGPU_REGS_NO_KIQ) |
1051 | return RREG32_NO_KIQ(offset); |
1052 | else |
1053 | return RREG32(offset); |
1054 | } |
1055 | |
1056 | bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev) |
1057 | { |
1058 | bool xnack_mode = true; |
1059 | |
1060 | if (amdgpu_sriov_vf(adev) && |
1061 | amdgpu_ip_version(adev, ip: GC_HWIP, inst: 0) == IP_VERSION(9, 4, 2)) |
1062 | xnack_mode = false; |
1063 | |
1064 | return xnack_mode; |
1065 | } |
1066 | |