1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui
23 *
24 */
25
26#include <linux/firmware.h>
27#include <drm/drm_drv.h>
28
29#include "amdgpu.h"
30#include "amdgpu_psp.h"
31#include "amdgpu_ucode.h"
32#include "amdgpu_xgmi.h"
33#include "soc15_common.h"
34#include "psp_v3_1.h"
35#include "psp_v10_0.h"
36#include "psp_v11_0.h"
37#include "psp_v11_0_8.h"
38#include "psp_v12_0.h"
39#include "psp_v13_0.h"
40#include "psp_v13_0_4.h"
41#include "psp_v14_0.h"
42
43#include "amdgpu_ras.h"
44#include "amdgpu_securedisplay.h"
45#include "amdgpu_atomfirmware.h"
46
47#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*16)
48
49static int psp_load_smu_fw(struct psp_context *psp);
50static int psp_rap_terminate(struct psp_context *psp);
51static int psp_securedisplay_terminate(struct psp_context *psp);
52
53static int psp_ring_init(struct psp_context *psp,
54 enum psp_ring_type ring_type)
55{
56 int ret = 0;
57 struct psp_ring *ring;
58 struct amdgpu_device *adev = psp->adev;
59
60 ring = &psp->km_ring;
61
62 ring->ring_type = ring_type;
63
64 /* allocate 4k Page of Local Frame Buffer memory for ring */
65 ring->ring_size = 0x1000;
66 ret = amdgpu_bo_create_kernel(adev, size: ring->ring_size, PAGE_SIZE,
67 AMDGPU_GEM_DOMAIN_VRAM |
68 AMDGPU_GEM_DOMAIN_GTT,
69 bo_ptr: &adev->firmware.rbuf,
70 gpu_addr: &ring->ring_mem_mc_addr,
71 cpu_addr: (void **)&ring->ring_mem);
72 if (ret) {
73 ring->ring_size = 0;
74 return ret;
75 }
76
77 return 0;
78}
79
80/*
81 * Due to DF Cstate management centralized to PMFW, the firmware
82 * loading sequence will be updated as below:
83 * - Load KDB
84 * - Load SYS_DRV
85 * - Load tOS
86 * - Load PMFW
87 * - Setup TMR
88 * - Load other non-psp fw
89 * - Load ASD
90 * - Load XGMI/RAS/HDCP/DTM TA if any
91 *
92 * This new sequence is required for
93 * - Arcturus and onwards
94 */
95static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
96{
97 struct amdgpu_device *adev = psp->adev;
98
99 if (amdgpu_sriov_vf(adev)) {
100 psp->pmfw_centralized_cstate_management = false;
101 return;
102 }
103
104 switch (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0)) {
105 case IP_VERSION(11, 0, 0):
106 case IP_VERSION(11, 0, 4):
107 case IP_VERSION(11, 0, 5):
108 case IP_VERSION(11, 0, 7):
109 case IP_VERSION(11, 0, 9):
110 case IP_VERSION(11, 0, 11):
111 case IP_VERSION(11, 0, 12):
112 case IP_VERSION(11, 0, 13):
113 case IP_VERSION(13, 0, 0):
114 case IP_VERSION(13, 0, 2):
115 case IP_VERSION(13, 0, 7):
116 psp->pmfw_centralized_cstate_management = true;
117 break;
118 default:
119 psp->pmfw_centralized_cstate_management = false;
120 break;
121 }
122}
123
124static int psp_init_sriov_microcode(struct psp_context *psp)
125{
126 struct amdgpu_device *adev = psp->adev;
127 char ucode_prefix[30];
128 int ret = 0;
129
130 amdgpu_ucode_ip_version_decode(adev, block_type: MP0_HWIP, ucode_prefix, len: sizeof(ucode_prefix));
131
132 switch (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0)) {
133 case IP_VERSION(9, 0, 0):
134 case IP_VERSION(11, 0, 7):
135 case IP_VERSION(11, 0, 9):
136 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
137 ret = psp_init_cap_microcode(psp, chip_name: ucode_prefix);
138 break;
139 case IP_VERSION(13, 0, 2):
140 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
141 ret = psp_init_cap_microcode(psp, chip_name: ucode_prefix);
142 ret &= psp_init_ta_microcode(psp, chip_name: ucode_prefix);
143 break;
144 case IP_VERSION(13, 0, 0):
145 adev->virt.autoload_ucode_id = 0;
146 break;
147 case IP_VERSION(13, 0, 6):
148 case IP_VERSION(13, 0, 14):
149 ret = psp_init_cap_microcode(psp, chip_name: ucode_prefix);
150 ret &= psp_init_ta_microcode(psp, chip_name: ucode_prefix);
151 break;
152 case IP_VERSION(13, 0, 10):
153 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
154 ret = psp_init_cap_microcode(psp, chip_name: ucode_prefix);
155 break;
156 case IP_VERSION(13, 0, 12):
157 ret = psp_init_ta_microcode(psp, chip_name: ucode_prefix);
158 break;
159 default:
160 return -EINVAL;
161 }
162 return ret;
163}
164
165static int psp_early_init(struct amdgpu_ip_block *ip_block)
166{
167 struct amdgpu_device *adev = ip_block->adev;
168 struct psp_context *psp = &adev->psp;
169
170 psp->autoload_supported = true;
171 psp->boot_time_tmr = true;
172
173 switch (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0)) {
174 case IP_VERSION(9, 0, 0):
175 psp_v3_1_set_psp_funcs(psp);
176 psp->autoload_supported = false;
177 psp->boot_time_tmr = false;
178 break;
179 case IP_VERSION(10, 0, 0):
180 case IP_VERSION(10, 0, 1):
181 psp_v10_0_set_psp_funcs(psp);
182 psp->autoload_supported = false;
183 psp->boot_time_tmr = false;
184 break;
185 case IP_VERSION(11, 0, 2):
186 case IP_VERSION(11, 0, 4):
187 psp_v11_0_set_psp_funcs(psp);
188 psp->autoload_supported = false;
189 psp->boot_time_tmr = false;
190 break;
191 case IP_VERSION(11, 0, 0):
192 case IP_VERSION(11, 0, 7):
193 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
194 fallthrough;
195 case IP_VERSION(11, 0, 5):
196 case IP_VERSION(11, 0, 9):
197 case IP_VERSION(11, 0, 11):
198 case IP_VERSION(11, 5, 0):
199 case IP_VERSION(11, 5, 2):
200 case IP_VERSION(11, 0, 12):
201 case IP_VERSION(11, 0, 13):
202 psp_v11_0_set_psp_funcs(psp);
203 psp->boot_time_tmr = false;
204 break;
205 case IP_VERSION(11, 0, 3):
206 case IP_VERSION(12, 0, 1):
207 psp_v12_0_set_psp_funcs(psp);
208 psp->autoload_supported = false;
209 psp->boot_time_tmr = false;
210 break;
211 case IP_VERSION(13, 0, 2):
212 psp->boot_time_tmr = false;
213 fallthrough;
214 case IP_VERSION(13, 0, 6):
215 case IP_VERSION(13, 0, 14):
216 psp_v13_0_set_psp_funcs(psp);
217 psp->autoload_supported = false;
218 break;
219 case IP_VERSION(13, 0, 12):
220 psp_v13_0_set_psp_funcs(psp);
221 psp->autoload_supported = false;
222 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
223 break;
224 case IP_VERSION(13, 0, 1):
225 case IP_VERSION(13, 0, 3):
226 case IP_VERSION(13, 0, 5):
227 case IP_VERSION(13, 0, 8):
228 case IP_VERSION(13, 0, 11):
229 case IP_VERSION(14, 0, 0):
230 case IP_VERSION(14, 0, 1):
231 case IP_VERSION(14, 0, 4):
232 psp_v13_0_set_psp_funcs(psp);
233 psp->boot_time_tmr = false;
234 break;
235 case IP_VERSION(11, 0, 8):
236 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
237 psp_v11_0_8_set_psp_funcs(psp);
238 }
239 psp->autoload_supported = false;
240 psp->boot_time_tmr = false;
241 break;
242 case IP_VERSION(13, 0, 0):
243 case IP_VERSION(13, 0, 7):
244 case IP_VERSION(13, 0, 10):
245 psp_v13_0_set_psp_funcs(psp);
246 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
247 psp->boot_time_tmr = false;
248 break;
249 case IP_VERSION(13, 0, 4):
250 psp_v13_0_4_set_psp_funcs(psp);
251 psp->boot_time_tmr = false;
252 break;
253 case IP_VERSION(14, 0, 2):
254 case IP_VERSION(14, 0, 3):
255 psp_v14_0_set_psp_funcs(psp);
256 break;
257 case IP_VERSION(14, 0, 5):
258 psp_v14_0_set_psp_funcs(psp);
259 psp->boot_time_tmr = false;
260 break;
261 default:
262 return -EINVAL;
263 }
264
265 psp->adev = adev;
266
267 adev->psp_timeout = 20000;
268
269 psp_check_pmfw_centralized_cstate_management(psp);
270
271 if (amdgpu_sriov_vf(adev))
272 return psp_init_sriov_microcode(psp);
273 else
274 return psp_init_microcode(psp);
275}
276
277void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
278{
279 amdgpu_bo_free_kernel(bo: &mem_ctx->shared_bo, gpu_addr: &mem_ctx->shared_mc_addr,
280 cpu_addr: &mem_ctx->shared_buf);
281 mem_ctx->shared_bo = NULL;
282}
283
284static void psp_free_shared_bufs(struct psp_context *psp)
285{
286 void *tmr_buf;
287 void **pptr;
288
289 /* free TMR memory buffer */
290 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
291 amdgpu_bo_free_kernel(bo: &psp->tmr_bo, gpu_addr: &psp->tmr_mc_addr, cpu_addr: pptr);
292 psp->tmr_bo = NULL;
293
294 /* free xgmi shared memory */
295 psp_ta_free_shared_buf(mem_ctx: &psp->xgmi_context.context.mem_context);
296
297 /* free ras shared memory */
298 psp_ta_free_shared_buf(mem_ctx: &psp->ras_context.context.mem_context);
299
300 /* free hdcp shared memory */
301 psp_ta_free_shared_buf(mem_ctx: &psp->hdcp_context.context.mem_context);
302
303 /* free dtm shared memory */
304 psp_ta_free_shared_buf(mem_ctx: &psp->dtm_context.context.mem_context);
305
306 /* free rap shared memory */
307 psp_ta_free_shared_buf(mem_ctx: &psp->rap_context.context.mem_context);
308
309 /* free securedisplay shared memory */
310 psp_ta_free_shared_buf(mem_ctx: &psp->securedisplay_context.context.mem_context);
311
312
313}
314
315static void psp_memory_training_fini(struct psp_context *psp)
316{
317 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
318
319 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
320 kfree(objp: ctx->sys_cache);
321 ctx->sys_cache = NULL;
322}
323
324static int psp_memory_training_init(struct psp_context *psp)
325{
326 int ret;
327 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
328
329 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
330 dev_dbg(psp->adev->dev, "memory training is not supported!\n");
331 return 0;
332 }
333
334 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
335 if (ctx->sys_cache == NULL) {
336 dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
337 ret = -ENOMEM;
338 goto Err_out;
339 }
340
341 dev_dbg(psp->adev->dev,
342 "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
343 ctx->train_data_size,
344 ctx->p2c_train_data_offset,
345 ctx->c2p_train_data_offset);
346 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
347 return 0;
348
349Err_out:
350 psp_memory_training_fini(psp);
351 return ret;
352}
353
354/*
355 * Helper funciton to query psp runtime database entry
356 *
357 * @adev: amdgpu_device pointer
358 * @entry_type: the type of psp runtime database entry
359 * @db_entry: runtime database entry pointer
360 *
361 * Return false if runtime database doesn't exit or entry is invalid
362 * or true if the specific database entry is found, and copy to @db_entry
363 */
364static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
365 enum psp_runtime_entry_type entry_type,
366 void *db_entry)
367{
368 uint64_t db_header_pos, db_dir_pos;
369 struct psp_runtime_data_header db_header = {0};
370 struct psp_runtime_data_directory db_dir = {0};
371 bool ret = false;
372 int i;
373
374 if (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) == IP_VERSION(13, 0, 6) ||
375 amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) == IP_VERSION(13, 0, 12) ||
376 amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) == IP_VERSION(13, 0, 14))
377 return false;
378
379 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
380 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
381
382 /* read runtime db header from vram */
383 amdgpu_device_vram_access(adev, pos: db_header_pos, buf: (uint32_t *)&db_header,
384 size: sizeof(struct psp_runtime_data_header), write: false);
385
386 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
387 /* runtime db doesn't exist, exit */
388 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
389 return false;
390 }
391
392 /* read runtime database entry from vram */
393 amdgpu_device_vram_access(adev, pos: db_dir_pos, buf: (uint32_t *)&db_dir,
394 size: sizeof(struct psp_runtime_data_directory), write: false);
395
396 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
397 /* invalid db entry count, exit */
398 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
399 return false;
400 }
401
402 /* look up for requested entry type */
403 for (i = 0; i < db_dir.entry_count && !ret; i++) {
404 if (db_dir.entry_list[i].entry_type == entry_type) {
405 switch (entry_type) {
406 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
407 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
408 /* invalid db entry size */
409 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
410 return false;
411 }
412 /* read runtime database entry */
413 amdgpu_device_vram_access(adev, pos: db_header_pos + db_dir.entry_list[i].offset,
414 buf: (uint32_t *)db_entry, size: sizeof(struct psp_runtime_boot_cfg_entry), write: false);
415 ret = true;
416 break;
417 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
418 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
419 /* invalid db entry size */
420 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
421 return false;
422 }
423 /* read runtime database entry */
424 amdgpu_device_vram_access(adev, pos: db_header_pos + db_dir.entry_list[i].offset,
425 buf: (uint32_t *)db_entry, size: sizeof(struct psp_runtime_scpm_entry), write: false);
426 ret = true;
427 break;
428 default:
429 ret = false;
430 break;
431 }
432 }
433 }
434
435 return ret;
436}
437
438static int psp_sw_init(struct amdgpu_ip_block *ip_block)
439{
440 struct amdgpu_device *adev = ip_block->adev;
441 struct psp_context *psp = &adev->psp;
442 int ret;
443 struct psp_runtime_boot_cfg_entry boot_cfg_entry;
444 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
445 struct psp_runtime_scpm_entry scpm_entry;
446
447 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
448 if (!psp->cmd) {
449 dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
450 ret = -ENOMEM;
451 }
452
453 adev->psp.xgmi_context.supports_extended_data =
454 !adev->gmc.xgmi.connected_to_cpu &&
455 amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) == IP_VERSION(13, 0, 2);
456
457 memset(&scpm_entry, 0, sizeof(scpm_entry));
458 if ((psp_get_runtime_db_entry(adev,
459 entry_type: PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
460 db_entry: &scpm_entry)) &&
461 (scpm_entry.scpm_status != SCPM_DISABLE)) {
462 adev->scpm_enabled = true;
463 adev->scpm_status = scpm_entry.scpm_status;
464 } else {
465 adev->scpm_enabled = false;
466 adev->scpm_status = SCPM_DISABLE;
467 }
468
469 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
470
471 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
472 if (psp_get_runtime_db_entry(adev,
473 entry_type: PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
474 db_entry: &boot_cfg_entry)) {
475 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
476 if ((psp->boot_cfg_bitmask) &
477 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
478 /* If psp runtime database exists, then
479 * only enable two stage memory training
480 * when TWO_STAGE_DRAM_TRAINING bit is set
481 * in runtime database
482 */
483 mem_training_ctx->enable_mem_training = true;
484 }
485
486 } else {
487 /* If psp runtime database doesn't exist or is
488 * invalid, force enable two stage memory training
489 */
490 mem_training_ctx->enable_mem_training = true;
491 }
492
493 if (mem_training_ctx->enable_mem_training) {
494 ret = psp_memory_training_init(psp);
495 if (ret) {
496 dev_err(adev->dev, "Failed to initialize memory training!\n");
497 return ret;
498 }
499
500 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
501 if (ret) {
502 dev_err(adev->dev, "Failed to process memory training!\n");
503 return ret;
504 }
505 }
506
507 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
508 domain: (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
509 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
510 bo_ptr: &psp->fw_pri_bo,
511 gpu_addr: &psp->fw_pri_mc_addr,
512 cpu_addr: &psp->fw_pri_buf);
513 if (ret)
514 return ret;
515
516 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
517 AMDGPU_GEM_DOMAIN_VRAM |
518 AMDGPU_GEM_DOMAIN_GTT,
519 bo_ptr: &psp->fence_buf_bo,
520 gpu_addr: &psp->fence_buf_mc_addr,
521 cpu_addr: &psp->fence_buf);
522 if (ret)
523 goto failed1;
524
525 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
526 AMDGPU_GEM_DOMAIN_VRAM |
527 AMDGPU_GEM_DOMAIN_GTT,
528 bo_ptr: &psp->cmd_buf_bo, gpu_addr: &psp->cmd_buf_mc_addr,
529 cpu_addr: (void **)&psp->cmd_buf_mem);
530 if (ret)
531 goto failed2;
532
533 return 0;
534
535failed2:
536 amdgpu_bo_free_kernel(bo: &psp->fence_buf_bo,
537 gpu_addr: &psp->fence_buf_mc_addr, cpu_addr: &psp->fence_buf);
538failed1:
539 amdgpu_bo_free_kernel(bo: &psp->fw_pri_bo,
540 gpu_addr: &psp->fw_pri_mc_addr, cpu_addr: &psp->fw_pri_buf);
541 return ret;
542}
543
544static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
545{
546 struct amdgpu_device *adev = ip_block->adev;
547 struct psp_context *psp = &adev->psp;
548
549 psp_memory_training_fini(psp);
550
551 amdgpu_ucode_release(fw: &psp->sos_fw);
552 amdgpu_ucode_release(fw: &psp->asd_fw);
553 amdgpu_ucode_release(fw: &psp->ta_fw);
554 amdgpu_ucode_release(fw: &psp->cap_fw);
555 amdgpu_ucode_release(fw: &psp->toc_fw);
556
557 kfree(objp: psp->cmd);
558 psp->cmd = NULL;
559
560 psp_free_shared_bufs(psp);
561
562 if (psp->km_ring.ring_mem)
563 amdgpu_bo_free_kernel(bo: &adev->firmware.rbuf,
564 gpu_addr: &psp->km_ring.ring_mem_mc_addr,
565 cpu_addr: (void **)&psp->km_ring.ring_mem);
566
567 amdgpu_bo_free_kernel(bo: &psp->fw_pri_bo,
568 gpu_addr: &psp->fw_pri_mc_addr, cpu_addr: &psp->fw_pri_buf);
569 amdgpu_bo_free_kernel(bo: &psp->fence_buf_bo,
570 gpu_addr: &psp->fence_buf_mc_addr, cpu_addr: &psp->fence_buf);
571 amdgpu_bo_free_kernel(bo: &psp->cmd_buf_bo, gpu_addr: &psp->cmd_buf_mc_addr,
572 cpu_addr: (void **)&psp->cmd_buf_mem);
573
574 return 0;
575}
576
577int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
578 uint32_t reg_val, uint32_t mask, bool check_changed)
579{
580 uint32_t val;
581 int i;
582 struct amdgpu_device *adev = psp->adev;
583
584 if (psp->adev->no_hw_access)
585 return 0;
586
587 for (i = 0; i < adev->usec_timeout; i++) {
588 val = RREG32(reg_index);
589 if (check_changed) {
590 if (val != reg_val)
591 return 0;
592 } else {
593 if ((val & mask) == reg_val)
594 return 0;
595 }
596 udelay(usec: 1);
597 }
598
599 return -ETIME;
600}
601
602int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
603 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
604{
605 uint32_t val;
606 int i;
607 struct amdgpu_device *adev = psp->adev;
608
609 if (psp->adev->no_hw_access)
610 return 0;
611
612 for (i = 0; i < msec_timeout; i++) {
613 val = RREG32(reg_index);
614 if ((val & mask) == reg_val)
615 return 0;
616 msleep(msecs: 1);
617 }
618
619 return -ETIME;
620}
621
622static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
623{
624 switch (cmd_id) {
625 case GFX_CMD_ID_LOAD_TA:
626 return "LOAD_TA";
627 case GFX_CMD_ID_UNLOAD_TA:
628 return "UNLOAD_TA";
629 case GFX_CMD_ID_INVOKE_CMD:
630 return "INVOKE_CMD";
631 case GFX_CMD_ID_LOAD_ASD:
632 return "LOAD_ASD";
633 case GFX_CMD_ID_SETUP_TMR:
634 return "SETUP_TMR";
635 case GFX_CMD_ID_LOAD_IP_FW:
636 return "LOAD_IP_FW";
637 case GFX_CMD_ID_DESTROY_TMR:
638 return "DESTROY_TMR";
639 case GFX_CMD_ID_SAVE_RESTORE:
640 return "SAVE_RESTORE_IP_FW";
641 case GFX_CMD_ID_SETUP_VMR:
642 return "SETUP_VMR";
643 case GFX_CMD_ID_DESTROY_VMR:
644 return "DESTROY_VMR";
645 case GFX_CMD_ID_PROG_REG:
646 return "PROG_REG";
647 case GFX_CMD_ID_GET_FW_ATTESTATION:
648 return "GET_FW_ATTESTATION";
649 case GFX_CMD_ID_LOAD_TOC:
650 return "ID_LOAD_TOC";
651 case GFX_CMD_ID_AUTOLOAD_RLC:
652 return "AUTOLOAD_RLC";
653 case GFX_CMD_ID_BOOT_CFG:
654 return "BOOT_CFG";
655 case GFX_CMD_ID_CONFIG_SQ_PERFMON:
656 return "CONFIG_SQ_PERFMON";
657 default:
658 return "UNKNOWN CMD";
659 }
660}
661
662static bool psp_err_warn(struct psp_context *psp)
663{
664 struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem;
665
666 /* This response indicates reg list is already loaded */
667 if (amdgpu_ip_version(adev: psp->adev, ip: MP0_HWIP, inst: 0) == IP_VERSION(13, 0, 2) &&
668 cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW &&
669 cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST &&
670 cmd->resp.status == TEE_ERROR_CANCEL)
671 return false;
672
673 return true;
674}
675
676static int
677psp_cmd_submit_buf(struct psp_context *psp,
678 struct amdgpu_firmware_info *ucode,
679 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
680{
681 int ret;
682 int index;
683 int timeout = psp->adev->psp_timeout;
684 bool ras_intr = false;
685 bool skip_unsupport = false;
686
687 if (psp->adev->no_hw_access)
688 return 0;
689
690 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
691
692 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
693
694 index = atomic_inc_return(v: &psp->fence_value);
695 ret = psp_ring_cmd_submit(psp, cmd_buf_mc_addr: psp->cmd_buf_mc_addr, fence_mc_addr, index);
696 if (ret) {
697 atomic_dec(v: &psp->fence_value);
698 goto exit;
699 }
700
701 amdgpu_device_invalidate_hdp(adev: psp->adev, NULL);
702 while (*((unsigned int *)psp->fence_buf) != index) {
703 if (--timeout == 0)
704 break;
705 /*
706 * Shouldn't wait for timeout when err_event_athub occurs,
707 * because gpu reset thread triggered and lock resource should
708 * be released for psp resume sequence.
709 */
710 ras_intr = amdgpu_ras_intr_triggered();
711 if (ras_intr)
712 break;
713 usleep_range(min: 10, max: 100);
714 amdgpu_device_invalidate_hdp(adev: psp->adev, NULL);
715 }
716
717 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
718 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
719 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
720
721 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
722
723 /* In some cases, psp response status is not 0 even there is no
724 * problem while the command is submitted. Some version of PSP FW
725 * doesn't write 0 to that field.
726 * So here we would like to only print a warning instead of an error
727 * during psp initialization to avoid breaking hw_init and it doesn't
728 * return -EINVAL.
729 */
730 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
731 if (ucode)
732 dev_warn(psp->adev->dev,
733 "failed to load ucode %s(0x%X) ",
734 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
735 if (psp_err_warn(psp))
736 dev_warn(
737 psp->adev->dev,
738 "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
739 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
740 psp->cmd_buf_mem->cmd_id,
741 psp->cmd_buf_mem->resp.status);
742 /* If any firmware (including CAP) load fails under SRIOV, it should
743 * return failure to stop the VF from initializing.
744 * Also return failure in case of timeout
745 */
746 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
747 ret = -EINVAL;
748 goto exit;
749 }
750 }
751
752 if (ucode) {
753 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
754 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
755 }
756
757exit:
758 return ret;
759}
760
761static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
762{
763 struct psp_gfx_cmd_resp *cmd = psp->cmd;
764
765 mutex_lock(&psp->mutex);
766
767 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
768
769 return cmd;
770}
771
772static void release_psp_cmd_buf(struct psp_context *psp)
773{
774 mutex_unlock(lock: &psp->mutex);
775}
776
777static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
778 struct psp_gfx_cmd_resp *cmd,
779 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
780{
781 struct amdgpu_device *adev = psp->adev;
782 uint32_t size = 0;
783 uint64_t tmr_pa = 0;
784
785 if (tmr_bo) {
786 size = amdgpu_bo_size(bo: tmr_bo);
787 tmr_pa = amdgpu_gmc_vram_pa(adev, bo: tmr_bo);
788 }
789
790 if (amdgpu_sriov_vf(psp->adev))
791 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
792 else
793 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
794 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
795 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
796 cmd->cmd.cmd_setup_tmr.buf_size = size;
797 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
798 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
799 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
800}
801
802static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
803 uint64_t pri_buf_mc, uint32_t size)
804{
805 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
806 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
807 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
808 cmd->cmd.cmd_load_toc.toc_size = size;
809}
810
811/* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
812static int psp_load_toc(struct psp_context *psp,
813 uint32_t *tmr_size)
814{
815 int ret;
816 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
817
818 /* Copy toc to psp firmware private buffer */
819 psp_copy_fw(psp, start_addr: psp->toc.start_addr, bin_size: psp->toc.size_bytes);
820
821 psp_prep_load_toc_cmd_buf(cmd, pri_buf_mc: psp->fw_pri_mc_addr, size: psp->toc.size_bytes);
822
823 ret = psp_cmd_submit_buf(psp, NULL, cmd,
824 fence_mc_addr: psp->fence_buf_mc_addr);
825 if (!ret)
826 *tmr_size = psp->cmd_buf_mem->resp.tmr_size;
827
828 release_psp_cmd_buf(psp);
829
830 return ret;
831}
832
833/* Set up Trusted Memory Region */
834static int psp_tmr_init(struct psp_context *psp)
835{
836 int ret = 0;
837 int tmr_size;
838 void *tmr_buf;
839 void **pptr;
840
841 /*
842 * According to HW engineer, they prefer the TMR address be "naturally
843 * aligned" , e.g. the start address be an integer divide of TMR size.
844 *
845 * Note: this memory need be reserved till the driver
846 * uninitializes.
847 */
848 tmr_size = PSP_TMR_SIZE(psp->adev);
849
850 /* For ASICs support RLC autoload, psp will parse the toc
851 * and calculate the total size of TMR needed
852 */
853 if (!amdgpu_sriov_vf(psp->adev) &&
854 psp->toc.start_addr &&
855 psp->toc.size_bytes &&
856 psp->fw_pri_buf) {
857 ret = psp_load_toc(psp, tmr_size: &tmr_size);
858 if (ret) {
859 dev_err(psp->adev->dev, "Failed to load toc\n");
860 return ret;
861 }
862 }
863
864 if (!psp->tmr_bo && !psp->boot_time_tmr) {
865 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
866 ret = amdgpu_bo_create_kernel(adev: psp->adev, size: tmr_size,
867 PSP_TMR_ALIGNMENT,
868 AMDGPU_HAS_VRAM(psp->adev) ?
869 AMDGPU_GEM_DOMAIN_VRAM :
870 AMDGPU_GEM_DOMAIN_GTT,
871 bo_ptr: &psp->tmr_bo, gpu_addr: &psp->tmr_mc_addr,
872 cpu_addr: pptr);
873 }
874
875 return ret;
876}
877
878static bool psp_skip_tmr(struct psp_context *psp)
879{
880 switch (amdgpu_ip_version(adev: psp->adev, ip: MP0_HWIP, inst: 0)) {
881 case IP_VERSION(11, 0, 9):
882 case IP_VERSION(11, 0, 7):
883 case IP_VERSION(13, 0, 2):
884 case IP_VERSION(13, 0, 6):
885 case IP_VERSION(13, 0, 10):
886 case IP_VERSION(13, 0, 12):
887 case IP_VERSION(13, 0, 14):
888 return true;
889 default:
890 return false;
891 }
892}
893
894static int psp_tmr_load(struct psp_context *psp)
895{
896 int ret;
897 struct psp_gfx_cmd_resp *cmd;
898
899 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
900 * Already set up by host driver.
901 */
902 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
903 return 0;
904
905 cmd = acquire_psp_cmd_buf(psp);
906
907 psp_prep_tmr_cmd_buf(psp, cmd, tmr_mc: psp->tmr_mc_addr, tmr_bo: psp->tmr_bo);
908 if (psp->tmr_bo)
909 dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
910 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
911
912 ret = psp_cmd_submit_buf(psp, NULL, cmd,
913 fence_mc_addr: psp->fence_buf_mc_addr);
914
915 release_psp_cmd_buf(psp);
916
917 return ret;
918}
919
920static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
921 struct psp_gfx_cmd_resp *cmd)
922{
923 if (amdgpu_sriov_vf(psp->adev))
924 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
925 else
926 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
927}
928
929static int psp_tmr_unload(struct psp_context *psp)
930{
931 int ret;
932 struct psp_gfx_cmd_resp *cmd;
933
934 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
935 * as TMR is not loaded at all
936 */
937 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
938 return 0;
939
940 cmd = acquire_psp_cmd_buf(psp);
941
942 psp_prep_tmr_unload_cmd_buf(psp, cmd);
943 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
944
945 ret = psp_cmd_submit_buf(psp, NULL, cmd,
946 fence_mc_addr: psp->fence_buf_mc_addr);
947
948 release_psp_cmd_buf(psp);
949
950 return ret;
951}
952
953static int psp_tmr_terminate(struct psp_context *psp)
954{
955 return psp_tmr_unload(psp);
956}
957
958int psp_get_fw_attestation_records_addr(struct psp_context *psp,
959 uint64_t *output_ptr)
960{
961 int ret;
962 struct psp_gfx_cmd_resp *cmd;
963
964 if (!output_ptr)
965 return -EINVAL;
966
967 if (amdgpu_sriov_vf(psp->adev))
968 return 0;
969
970 cmd = acquire_psp_cmd_buf(psp);
971
972 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
973
974 ret = psp_cmd_submit_buf(psp, NULL, cmd,
975 fence_mc_addr: psp->fence_buf_mc_addr);
976
977 if (!ret) {
978 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
979 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
980 }
981
982 release_psp_cmd_buf(psp);
983
984 return ret;
985}
986
987static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
988{
989 struct psp_context *psp = &adev->psp;
990 struct psp_gfx_cmd_resp *cmd;
991 int ret;
992
993 if (amdgpu_sriov_vf(adev))
994 return 0;
995
996 cmd = acquire_psp_cmd_buf(psp);
997
998 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
999 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
1000
1001 ret = psp_cmd_submit_buf(psp, NULL, cmd, fence_mc_addr: psp->fence_buf_mc_addr);
1002 if (!ret) {
1003 *boot_cfg =
1004 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
1005 }
1006
1007 release_psp_cmd_buf(psp);
1008
1009 return ret;
1010}
1011
1012static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
1013{
1014 int ret;
1015 struct psp_context *psp = &adev->psp;
1016 struct psp_gfx_cmd_resp *cmd;
1017
1018 if (amdgpu_sriov_vf(adev))
1019 return 0;
1020
1021 cmd = acquire_psp_cmd_buf(psp);
1022
1023 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1024 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
1025 cmd->cmd.boot_cfg.boot_config = boot_cfg;
1026 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
1027
1028 ret = psp_cmd_submit_buf(psp, NULL, cmd, fence_mc_addr: psp->fence_buf_mc_addr);
1029
1030 release_psp_cmd_buf(psp);
1031
1032 return ret;
1033}
1034
1035static int psp_rl_load(struct amdgpu_device *adev)
1036{
1037 int ret;
1038 struct psp_context *psp = &adev->psp;
1039 struct psp_gfx_cmd_resp *cmd;
1040
1041 if (!is_psp_fw_valid(bin: psp->rl))
1042 return 0;
1043
1044 cmd = acquire_psp_cmd_buf(psp);
1045
1046 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1047 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
1048
1049 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1050 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1051 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1052 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1053 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1054
1055 ret = psp_cmd_submit_buf(psp, NULL, cmd, fence_mc_addr: psp->fence_buf_mc_addr);
1056
1057 release_psp_cmd_buf(psp);
1058
1059 return ret;
1060}
1061
1062int psp_memory_partition(struct psp_context *psp, int mode)
1063{
1064 struct psp_gfx_cmd_resp *cmd;
1065 int ret;
1066
1067 if (amdgpu_sriov_vf(psp->adev))
1068 return 0;
1069
1070 cmd = acquire_psp_cmd_buf(psp);
1071
1072 cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE;
1073 cmd->cmd.cmd_memory_part.mode = mode;
1074
1075 dev_info(psp->adev->dev,
1076 "Requesting %d memory partition change through PSP", mode);
1077 ret = psp_cmd_submit_buf(psp, NULL, cmd, fence_mc_addr: psp->fence_buf_mc_addr);
1078 if (ret)
1079 dev_err(psp->adev->dev,
1080 "PSP request failed to change to NPS%d mode\n", mode);
1081
1082 release_psp_cmd_buf(psp);
1083
1084 return ret;
1085}
1086
1087int psp_spatial_partition(struct psp_context *psp, int mode)
1088{
1089 struct psp_gfx_cmd_resp *cmd;
1090 int ret;
1091
1092 if (amdgpu_sriov_vf(psp->adev))
1093 return 0;
1094
1095 cmd = acquire_psp_cmd_buf(psp);
1096
1097 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1098 cmd->cmd.cmd_spatial_part.mode = mode;
1099
1100 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1101 ret = psp_cmd_submit_buf(psp, NULL, cmd, fence_mc_addr: psp->fence_buf_mc_addr);
1102
1103 release_psp_cmd_buf(psp);
1104
1105 return ret;
1106}
1107
1108static int psp_asd_initialize(struct psp_context *psp)
1109{
1110 int ret;
1111
1112 /* If PSP version doesn't match ASD version, asd loading will be failed.
1113 * add workaround to bypass it for sriov now.
1114 * TODO: add version check to make it common
1115 */
1116 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1117 return 0;
1118
1119 /* bypass asd if display hardware is not available */
1120 if (!amdgpu_device_has_display_hardware(adev: psp->adev) &&
1121 amdgpu_ip_version(adev: psp->adev, ip: MP0_HWIP, inst: 0) >= IP_VERSION(13, 0, 10))
1122 return 0;
1123
1124 psp->asd_context.mem_context.shared_mc_addr = 0;
1125 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1126 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD;
1127
1128 ret = psp_ta_load(psp, context: &psp->asd_context);
1129 if (!ret)
1130 psp->asd_context.initialized = true;
1131
1132 return ret;
1133}
1134
1135static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1136 uint32_t session_id)
1137{
1138 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1139 cmd->cmd.cmd_unload_ta.session_id = session_id;
1140}
1141
1142int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1143{
1144 int ret;
1145 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1146
1147 psp_prep_ta_unload_cmd_buf(cmd, session_id: context->session_id);
1148
1149 ret = psp_cmd_submit_buf(psp, NULL, cmd, fence_mc_addr: psp->fence_buf_mc_addr);
1150
1151 context->resp_status = cmd->resp.status;
1152
1153 release_psp_cmd_buf(psp);
1154
1155 return ret;
1156}
1157
1158static int psp_asd_terminate(struct psp_context *psp)
1159{
1160 int ret;
1161
1162 if (amdgpu_sriov_vf(psp->adev))
1163 return 0;
1164
1165 if (!psp->asd_context.initialized)
1166 return 0;
1167
1168 ret = psp_ta_unload(psp, context: &psp->asd_context);
1169 if (!ret)
1170 psp->asd_context.initialized = false;
1171
1172 return ret;
1173}
1174
1175static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1176 uint32_t id, uint32_t value)
1177{
1178 cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1179 cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1180 cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1181}
1182
1183int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1184 uint32_t value)
1185{
1186 struct psp_gfx_cmd_resp *cmd;
1187 int ret = 0;
1188
1189 if (reg >= PSP_REG_LAST)
1190 return -EINVAL;
1191
1192 cmd = acquire_psp_cmd_buf(psp);
1193
1194 psp_prep_reg_prog_cmd_buf(cmd, id: reg, value);
1195 ret = psp_cmd_submit_buf(psp, NULL, cmd, fence_mc_addr: psp->fence_buf_mc_addr);
1196 if (ret)
1197 dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
1198
1199 release_psp_cmd_buf(psp);
1200
1201 return ret;
1202}
1203
1204static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1205 uint64_t ta_bin_mc,
1206 struct ta_context *context)
1207{
1208 cmd->cmd_id = context->ta_load_type;
1209 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
1210 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
1211 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes;
1212
1213 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1214 lower_32_bits(context->mem_context.shared_mc_addr);
1215 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1216 upper_32_bits(context->mem_context.shared_mc_addr);
1217 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1218}
1219
1220int psp_ta_init_shared_buf(struct psp_context *psp,
1221 struct ta_mem_context *mem_ctx)
1222{
1223 /*
1224 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1225 * physical) for ta to host memory
1226 */
1227 return amdgpu_bo_create_kernel(adev: psp->adev, size: mem_ctx->shared_mem_size,
1228 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1229 AMDGPU_GEM_DOMAIN_GTT,
1230 bo_ptr: &mem_ctx->shared_bo,
1231 gpu_addr: &mem_ctx->shared_mc_addr,
1232 cpu_addr: &mem_ctx->shared_buf);
1233}
1234
1235static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1236 uint32_t ta_cmd_id,
1237 uint32_t session_id)
1238{
1239 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
1240 cmd->cmd.cmd_invoke_cmd.session_id = session_id;
1241 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
1242}
1243
1244int psp_ta_invoke(struct psp_context *psp,
1245 uint32_t ta_cmd_id,
1246 struct ta_context *context)
1247{
1248 int ret;
1249 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1250
1251 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id: context->session_id);
1252
1253 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1254 fence_mc_addr: psp->fence_buf_mc_addr);
1255
1256 context->resp_status = cmd->resp.status;
1257
1258 release_psp_cmd_buf(psp);
1259
1260 return ret;
1261}
1262
1263int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1264{
1265 int ret;
1266 struct psp_gfx_cmd_resp *cmd;
1267
1268 cmd = acquire_psp_cmd_buf(psp);
1269
1270 psp_copy_fw(psp, start_addr: context->bin_desc.start_addr,
1271 bin_size: context->bin_desc.size_bytes);
1272
1273 psp_prep_ta_load_cmd_buf(cmd, ta_bin_mc: psp->fw_pri_mc_addr, context);
1274
1275 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1276 fence_mc_addr: psp->fence_buf_mc_addr);
1277
1278 context->resp_status = cmd->resp.status;
1279
1280 if (!ret)
1281 context->session_id = cmd->resp.session_id;
1282
1283 release_psp_cmd_buf(psp);
1284
1285 return ret;
1286}
1287
1288int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1289{
1290 return psp_ta_invoke(psp, ta_cmd_id, context: &psp->xgmi_context.context);
1291}
1292
1293int psp_xgmi_terminate(struct psp_context *psp)
1294{
1295 int ret;
1296 struct amdgpu_device *adev = psp->adev;
1297
1298 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1299 if (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) == IP_VERSION(11, 0, 4) ||
1300 (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) == IP_VERSION(13, 0, 2) &&
1301 adev->gmc.xgmi.connected_to_cpu))
1302 return 0;
1303
1304 if (!psp->xgmi_context.context.initialized)
1305 return 0;
1306
1307 ret = psp_ta_unload(psp, context: &psp->xgmi_context.context);
1308
1309 psp->xgmi_context.context.initialized = false;
1310
1311 return ret;
1312}
1313
1314int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1315{
1316 struct ta_xgmi_shared_memory *xgmi_cmd;
1317 int ret;
1318
1319 if (!psp->ta_fw ||
1320 !psp->xgmi_context.context.bin_desc.size_bytes ||
1321 !psp->xgmi_context.context.bin_desc.start_addr)
1322 return -ENOENT;
1323
1324 if (!load_ta)
1325 goto invoke;
1326
1327 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1328 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1329
1330 if (!psp->xgmi_context.context.mem_context.shared_buf) {
1331 ret = psp_ta_init_shared_buf(psp, mem_ctx: &psp->xgmi_context.context.mem_context);
1332 if (ret)
1333 return ret;
1334 }
1335
1336 /* Load XGMI TA */
1337 ret = psp_ta_load(psp, context: &psp->xgmi_context.context);
1338 if (!ret)
1339 psp->xgmi_context.context.initialized = true;
1340 else
1341 return ret;
1342
1343invoke:
1344 /* Initialize XGMI session */
1345 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1346 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1347 xgmi_cmd->flag_extend_link_record = set_extended_data;
1348 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1349
1350 ret = psp_xgmi_invoke(psp, ta_cmd_id: xgmi_cmd->cmd_id);
1351 /* note down the capbility flag for XGMI TA */
1352 psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
1353
1354 return ret;
1355}
1356
1357int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1358{
1359 struct ta_xgmi_shared_memory *xgmi_cmd;
1360 int ret;
1361
1362 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1363 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1364
1365 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1366
1367 /* Invoke xgmi ta to get hive id */
1368 ret = psp_xgmi_invoke(psp, ta_cmd_id: xgmi_cmd->cmd_id);
1369 if (ret)
1370 return ret;
1371
1372 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1373
1374 return 0;
1375}
1376
1377int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1378{
1379 struct ta_xgmi_shared_memory *xgmi_cmd;
1380 int ret;
1381
1382 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1383 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1384
1385 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1386
1387 /* Invoke xgmi ta to get the node id */
1388 ret = psp_xgmi_invoke(psp, ta_cmd_id: xgmi_cmd->cmd_id);
1389 if (ret)
1390 return ret;
1391
1392 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1393
1394 return 0;
1395}
1396
1397static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1398{
1399 return (amdgpu_ip_version(adev: psp->adev, ip: MP0_HWIP, inst: 0) ==
1400 IP_VERSION(13, 0, 2) &&
1401 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1402 amdgpu_ip_version(adev: psp->adev, ip: MP0_HWIP, inst: 0) >=
1403 IP_VERSION(13, 0, 6);
1404}
1405
1406/*
1407 * Chips that support extended topology information require the driver to
1408 * reflect topology information in the opposite direction. This is
1409 * because the TA has already exceeded its link record limit and if the
1410 * TA holds bi-directional information, the driver would have to do
1411 * multiple fetches instead of just two.
1412 */
1413static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1414 struct psp_xgmi_node_info node_info)
1415{
1416 struct amdgpu_device *mirror_adev;
1417 struct amdgpu_hive_info *hive;
1418 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1419 uint64_t dst_node_id = node_info.node_id;
1420 uint8_t dst_num_hops = node_info.num_hops;
1421 uint8_t dst_num_links = node_info.num_links;
1422
1423 hive = amdgpu_get_xgmi_hive(adev: psp->adev);
1424 if (WARN_ON(!hive))
1425 return;
1426
1427 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1428 struct psp_xgmi_topology_info *mirror_top_info;
1429 int j;
1430
1431 if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1432 continue;
1433
1434 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1435 for (j = 0; j < mirror_top_info->num_nodes; j++) {
1436 if (mirror_top_info->nodes[j].node_id != src_node_id)
1437 continue;
1438
1439 mirror_top_info->nodes[j].num_hops = dst_num_hops;
1440 /*
1441 * prevent 0 num_links value re-reflection since reflection
1442 * criteria is based on num_hops (direct or indirect).
1443 *
1444 */
1445 if (dst_num_links)
1446 mirror_top_info->nodes[j].num_links = dst_num_links;
1447
1448 break;
1449 }
1450
1451 break;
1452 }
1453
1454 amdgpu_put_xgmi_hive(hive);
1455}
1456
1457int psp_xgmi_get_topology_info(struct psp_context *psp,
1458 int number_devices,
1459 struct psp_xgmi_topology_info *topology,
1460 bool get_extended_data)
1461{
1462 struct ta_xgmi_shared_memory *xgmi_cmd;
1463 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1464 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1465 int i;
1466 int ret;
1467
1468 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1469 return -EINVAL;
1470
1471 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1472 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1473 xgmi_cmd->flag_extend_link_record = get_extended_data;
1474
1475 /* Fill in the shared memory with topology information as input */
1476 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1477 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
1478 topology_info_input->num_nodes = number_devices;
1479
1480 for (i = 0; i < topology_info_input->num_nodes; i++) {
1481 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1482 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1483 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1484 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1485 }
1486
1487 /* Invoke xgmi ta to get the topology information */
1488 ret = psp_xgmi_invoke(psp, ta_cmd_id: TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
1489 if (ret)
1490 return ret;
1491
1492 /* Read the output topology information from the shared memory */
1493 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1494 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1495 for (i = 0; i < topology->num_nodes; i++) {
1496 /* extended data will either be 0 or equal to non-extended data */
1497 if (topology_info_output->nodes[i].num_hops)
1498 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1499
1500 /* non-extended data gets everything here so no need to update */
1501 if (!get_extended_data) {
1502 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1503 topology->nodes[i].is_sharing_enabled =
1504 topology_info_output->nodes[i].is_sharing_enabled;
1505 topology->nodes[i].sdma_engine =
1506 topology_info_output->nodes[i].sdma_engine;
1507 }
1508
1509 }
1510
1511 /* Invoke xgmi ta again to get the link information */
1512 if (psp_xgmi_peer_link_info_supported(psp)) {
1513 struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
1514 struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
1515 bool requires_reflection =
1516 (psp->xgmi_context.supports_extended_data &&
1517 get_extended_data) ||
1518 amdgpu_ip_version(adev: psp->adev, ip: MP0_HWIP, inst: 0) ==
1519 IP_VERSION(13, 0, 6) ||
1520 amdgpu_ip_version(adev: psp->adev, ip: MP0_HWIP, inst: 0) ==
1521 IP_VERSION(13, 0, 14);
1522 bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
1523 psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
1524
1525 /* popluate the shared output buffer rather than the cmd input buffer
1526 * with node_ids as the input for GET_PEER_LINKS command execution.
1527 * This is required for GET_PEER_LINKS per xgmi ta implementation.
1528 * The same requirement for GET_EXTEND_PEER_LINKS command.
1529 */
1530 if (ta_port_num_support) {
1531 link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
1532
1533 for (i = 0; i < topology->num_nodes; i++)
1534 link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1535
1536 link_extend_info_output->num_nodes = topology->num_nodes;
1537 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
1538 } else {
1539 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1540
1541 for (i = 0; i < topology->num_nodes; i++)
1542 link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1543
1544 link_info_output->num_nodes = topology->num_nodes;
1545 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1546 }
1547
1548 ret = psp_xgmi_invoke(psp, ta_cmd_id: xgmi_cmd->cmd_id);
1549 if (ret)
1550 return ret;
1551
1552 for (i = 0; i < topology->num_nodes; i++) {
1553 uint8_t node_num_links = ta_port_num_support ?
1554 link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
1555 /* accumulate num_links on extended data */
1556 if (get_extended_data) {
1557 topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
1558 } else {
1559 topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
1560 topology->nodes[i].num_links : node_num_links;
1561 }
1562 /* popluate the connected port num info if supported and available */
1563 if (ta_port_num_support && topology->nodes[i].num_links) {
1564 memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
1565 sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
1566 }
1567
1568 /* reflect the topology information for bi-directionality */
1569 if (requires_reflection && topology->nodes[i].num_hops)
1570 psp_xgmi_reflect_topology_info(psp, node_info: topology->nodes[i]);
1571 }
1572 }
1573
1574 return 0;
1575}
1576
1577int psp_xgmi_set_topology_info(struct psp_context *psp,
1578 int number_devices,
1579 struct psp_xgmi_topology_info *topology)
1580{
1581 struct ta_xgmi_shared_memory *xgmi_cmd;
1582 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1583 int i;
1584
1585 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1586 return -EINVAL;
1587
1588 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1589 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1590
1591 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1592 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1593 topology_info_input->num_nodes = number_devices;
1594
1595 for (i = 0; i < topology_info_input->num_nodes; i++) {
1596 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1597 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1598 topology_info_input->nodes[i].is_sharing_enabled = 1;
1599 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1600 }
1601
1602 /* Invoke xgmi ta to set topology information */
1603 return psp_xgmi_invoke(psp, ta_cmd_id: TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1604}
1605
1606// ras begin
1607static void psp_ras_ta_check_status(struct psp_context *psp)
1608{
1609 struct ta_ras_shared_memory *ras_cmd =
1610 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1611
1612 switch (ras_cmd->ras_status) {
1613 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1614 dev_warn(psp->adev->dev,
1615 "RAS WARNING: cmd failed due to unsupported ip\n");
1616 break;
1617 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1618 dev_warn(psp->adev->dev,
1619 "RAS WARNING: cmd failed due to unsupported error injection\n");
1620 break;
1621 case TA_RAS_STATUS__SUCCESS:
1622 break;
1623 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1624 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1625 dev_warn(psp->adev->dev,
1626 "RAS WARNING: Inject error to critical region is not allowed\n");
1627 break;
1628 default:
1629 dev_warn(psp->adev->dev,
1630 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1631 break;
1632 }
1633}
1634
1635static int psp_ras_send_cmd(struct psp_context *psp,
1636 enum ras_command cmd_id, void *in, void *out)
1637{
1638 struct ta_ras_shared_memory *ras_cmd;
1639 uint32_t cmd = cmd_id;
1640 int ret = 0;
1641
1642 if (!in)
1643 return -EINVAL;
1644
1645 mutex_lock(&psp->ras_context.mutex);
1646 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1647 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1648
1649 switch (cmd) {
1650 case TA_RAS_COMMAND__ENABLE_FEATURES:
1651 case TA_RAS_COMMAND__DISABLE_FEATURES:
1652 memcpy(&ras_cmd->ras_in_message,
1653 in, sizeof(ras_cmd->ras_in_message));
1654 break;
1655 case TA_RAS_COMMAND__TRIGGER_ERROR:
1656 memcpy(&ras_cmd->ras_in_message.trigger_error,
1657 in, sizeof(ras_cmd->ras_in_message.trigger_error));
1658 break;
1659 case TA_RAS_COMMAND__QUERY_ADDRESS:
1660 memcpy(&ras_cmd->ras_in_message.address,
1661 in, sizeof(ras_cmd->ras_in_message.address));
1662 break;
1663 default:
1664 dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd);
1665 ret = -EINVAL;
1666 goto err_out;
1667 }
1668
1669 ras_cmd->cmd_id = cmd;
1670 ret = psp_ras_invoke(psp, ta_cmd_id: ras_cmd->cmd_id);
1671
1672 switch (cmd) {
1673 case TA_RAS_COMMAND__TRIGGER_ERROR:
1674 if (!ret && out)
1675 memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status));
1676 break;
1677 case TA_RAS_COMMAND__QUERY_ADDRESS:
1678 if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
1679 ret = -EINVAL;
1680 else if (out)
1681 memcpy(out,
1682 &ras_cmd->ras_out_message.address,
1683 sizeof(ras_cmd->ras_out_message.address));
1684 break;
1685 default:
1686 break;
1687 }
1688
1689err_out:
1690 mutex_unlock(lock: &psp->ras_context.mutex);
1691
1692 return ret;
1693}
1694
1695int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1696{
1697 struct ta_ras_shared_memory *ras_cmd;
1698 int ret;
1699
1700 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1701
1702 /*
1703 * TODO: bypass the loading in sriov for now
1704 */
1705 if (amdgpu_sriov_vf(psp->adev))
1706 return 0;
1707
1708 ret = psp_ta_invoke(psp, ta_cmd_id, context: &psp->ras_context.context);
1709
1710 if (amdgpu_ras_intr_triggered())
1711 return ret;
1712
1713 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1714 dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
1715 return -EINVAL;
1716 }
1717
1718 if (!ret) {
1719 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1720 dev_warn(psp->adev->dev, "ECC switch disabled\n");
1721
1722 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1723 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1724 dev_warn(psp->adev->dev,
1725 "RAS internal register access blocked\n");
1726
1727 psp_ras_ta_check_status(psp);
1728 }
1729
1730 return ret;
1731}
1732
1733int psp_ras_enable_features(struct psp_context *psp,
1734 union ta_ras_cmd_input *info, bool enable)
1735{
1736 enum ras_command cmd_id;
1737 int ret;
1738
1739 if (!psp->ras_context.context.initialized || !info)
1740 return -EINVAL;
1741
1742 cmd_id = enable ?
1743 TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES;
1744 ret = psp_ras_send_cmd(psp, cmd_id, in: info, NULL);
1745 if (ret)
1746 return -EINVAL;
1747
1748 return 0;
1749}
1750
1751int psp_ras_terminate(struct psp_context *psp)
1752{
1753 int ret;
1754
1755 /*
1756 * TODO: bypass the terminate in sriov for now
1757 */
1758 if (amdgpu_sriov_vf(psp->adev))
1759 return 0;
1760
1761 if (!psp->ras_context.context.initialized)
1762 return 0;
1763
1764 ret = psp_ta_unload(psp, context: &psp->ras_context.context);
1765
1766 psp->ras_context.context.initialized = false;
1767
1768 mutex_destroy(lock: &psp->ras_context.mutex);
1769
1770 return ret;
1771}
1772
1773int psp_ras_initialize(struct psp_context *psp)
1774{
1775 int ret;
1776 uint32_t boot_cfg = 0xFF;
1777 struct amdgpu_device *adev = psp->adev;
1778 struct ta_ras_shared_memory *ras_cmd;
1779
1780 /*
1781 * TODO: bypass the initialize in sriov for now
1782 */
1783 if (amdgpu_sriov_vf(adev))
1784 return 0;
1785
1786 if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1787 !adev->psp.ras_context.context.bin_desc.start_addr) {
1788 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1789 return 0;
1790 }
1791
1792 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1793 /* query GECC enablement status from boot config
1794 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1795 */
1796 ret = psp_boot_config_get(adev, boot_cfg: &boot_cfg);
1797 if (ret)
1798 dev_warn(adev->dev, "PSP get boot config failed\n");
1799
1800 if (boot_cfg == 1 && !adev->ras_default_ecc_enabled &&
1801 amdgpu_ras_is_supported(adev, block: AMDGPU_RAS_BLOCK__UMC)) {
1802 dev_warn(adev->dev, "GECC is currently enabled, which may affect performance\n");
1803 dev_warn(adev->dev,
1804 "To disable GECC, please reboot the system and load the amdgpu driver with the parameter amdgpu_ras_enable=0\n");
1805 } else {
1806 if ((adev->ras_default_ecc_enabled || amdgpu_ras_enable == 1) &&
1807 amdgpu_ras_is_supported(adev, block: AMDGPU_RAS_BLOCK__UMC)) {
1808 if (boot_cfg == 1) {
1809 dev_info(adev->dev, "GECC is enabled\n");
1810 } else {
1811 /* enable GECC in next boot cycle if it is disabled
1812 * in boot config, or force enable GECC if failed to
1813 * get boot configuration
1814 */
1815 ret = psp_boot_config_set(adev, boot_cfg: BOOT_CONFIG_GECC);
1816 if (ret)
1817 dev_warn(adev->dev, "PSP set boot config failed\n");
1818 else
1819 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1820 }
1821 } else {
1822 if (!boot_cfg) {
1823 if (!adev->ras_default_ecc_enabled &&
1824 amdgpu_ras_enable != 1 &&
1825 amdgpu_ras_is_supported(adev, block: AMDGPU_RAS_BLOCK__UMC))
1826 dev_warn(adev->dev, "GECC is disabled, set amdgpu_ras_enable=1 to enable GECC in next boot cycle if needed\n");
1827 else
1828 dev_info(adev->dev, "GECC is disabled\n");
1829 } else {
1830 /* disable GECC in next boot cycle if ras is
1831 * disabled by module parameter amdgpu_ras_enable
1832 * and/or amdgpu_ras_mask, or boot_config_get call
1833 * is failed
1834 */
1835 ret = psp_boot_config_set(adev, boot_cfg: 0);
1836 if (ret)
1837 dev_warn(adev->dev, "PSP set boot config failed\n");
1838 else
1839 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1840 }
1841 }
1842 }
1843 }
1844
1845 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1846 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1847
1848 if (!psp->ras_context.context.mem_context.shared_buf) {
1849 ret = psp_ta_init_shared_buf(psp, mem_ctx: &psp->ras_context.context.mem_context);
1850 if (ret)
1851 return ret;
1852 }
1853
1854 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1855 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1856
1857 if (amdgpu_ras_is_poison_mode_supported(adev))
1858 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1859 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1860 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1861 ras_cmd->ras_in_message.init_flags.xcc_mask =
1862 adev->gfx.xcc_mask;
1863 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1864 if (adev->gmc.gmc_funcs->query_mem_partition_mode)
1865 ras_cmd->ras_in_message.init_flags.nps_mode =
1866 adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
1867 ras_cmd->ras_in_message.init_flags.active_umc_mask = adev->umc.active_mask;
1868
1869 ret = psp_ta_load(psp, context: &psp->ras_context.context);
1870
1871 if (!ret && !ras_cmd->ras_status) {
1872 psp->ras_context.context.initialized = true;
1873 mutex_init(&psp->ras_context.mutex);
1874 } else {
1875 if (ras_cmd->ras_status)
1876 dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
1877
1878 /* fail to load RAS TA */
1879 psp->ras_context.context.initialized = false;
1880 }
1881
1882 return ret;
1883}
1884
1885int psp_ras_trigger_error(struct psp_context *psp,
1886 struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
1887{
1888 struct amdgpu_device *adev = psp->adev;
1889 int ret;
1890 uint32_t dev_mask;
1891 uint32_t ras_status = 0;
1892
1893 if (!psp->ras_context.context.initialized || !info)
1894 return -EINVAL;
1895
1896 switch (info->block_id) {
1897 case TA_RAS_BLOCK__GFX:
1898 dev_mask = GET_MASK(GC, instance_mask);
1899 break;
1900 case TA_RAS_BLOCK__SDMA:
1901 dev_mask = GET_MASK(SDMA0, instance_mask);
1902 break;
1903 case TA_RAS_BLOCK__VCN:
1904 case TA_RAS_BLOCK__JPEG:
1905 dev_mask = GET_MASK(VCN, instance_mask);
1906 break;
1907 default:
1908 dev_mask = instance_mask;
1909 break;
1910 }
1911
1912 /* reuse sub_block_index for backward compatibility */
1913 dev_mask <<= AMDGPU_RAS_INST_SHIFT;
1914 dev_mask &= AMDGPU_RAS_INST_MASK;
1915 info->sub_block_index |= dev_mask;
1916
1917 ret = psp_ras_send_cmd(psp,
1918 cmd_id: TA_RAS_COMMAND__TRIGGER_ERROR, in: info, out: &ras_status);
1919 if (ret)
1920 return -EINVAL;
1921
1922 /* If err_event_athub occurs error inject was successful, however
1923 * return status from TA is no long reliable
1924 */
1925 if (amdgpu_ras_intr_triggered())
1926 return 0;
1927
1928 if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
1929 return -EACCES;
1930 else if (ras_status)
1931 return -EINVAL;
1932
1933 return 0;
1934}
1935
1936int psp_ras_query_address(struct psp_context *psp,
1937 struct ta_ras_query_address_input *addr_in,
1938 struct ta_ras_query_address_output *addr_out)
1939{
1940 int ret;
1941
1942 if (!psp->ras_context.context.initialized ||
1943 !addr_in || !addr_out)
1944 return -EINVAL;
1945
1946 ret = psp_ras_send_cmd(psp,
1947 cmd_id: TA_RAS_COMMAND__QUERY_ADDRESS, in: addr_in, out: addr_out);
1948
1949 return ret;
1950}
1951// ras end
1952
1953// HDCP start
1954static int psp_hdcp_initialize(struct psp_context *psp)
1955{
1956 int ret;
1957
1958 /*
1959 * TODO: bypass the initialize in sriov for now
1960 */
1961 if (amdgpu_sriov_vf(psp->adev))
1962 return 0;
1963
1964 /* bypass hdcp initialization if dmu is harvested */
1965 if (!amdgpu_device_has_display_hardware(adev: psp->adev))
1966 return 0;
1967
1968 if (!psp->hdcp_context.context.bin_desc.size_bytes ||
1969 !psp->hdcp_context.context.bin_desc.start_addr) {
1970 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
1971 return 0;
1972 }
1973
1974 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
1975 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1976
1977 if (!psp->hdcp_context.context.mem_context.shared_buf) {
1978 ret = psp_ta_init_shared_buf(psp, mem_ctx: &psp->hdcp_context.context.mem_context);
1979 if (ret)
1980 return ret;
1981 }
1982
1983 ret = psp_ta_load(psp, context: &psp->hdcp_context.context);
1984 if (!ret) {
1985 psp->hdcp_context.context.initialized = true;
1986 mutex_init(&psp->hdcp_context.mutex);
1987 }
1988
1989 return ret;
1990}
1991
1992int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1993{
1994 /*
1995 * TODO: bypass the loading in sriov for now
1996 */
1997 if (amdgpu_sriov_vf(psp->adev))
1998 return 0;
1999
2000 if (!psp->hdcp_context.context.initialized)
2001 return 0;
2002
2003 return psp_ta_invoke(psp, ta_cmd_id, context: &psp->hdcp_context.context);
2004}
2005
2006static int psp_hdcp_terminate(struct psp_context *psp)
2007{
2008 int ret;
2009
2010 /*
2011 * TODO: bypass the terminate in sriov for now
2012 */
2013 if (amdgpu_sriov_vf(psp->adev))
2014 return 0;
2015
2016 if (!psp->hdcp_context.context.initialized)
2017 return 0;
2018
2019 ret = psp_ta_unload(psp, context: &psp->hdcp_context.context);
2020
2021 psp->hdcp_context.context.initialized = false;
2022
2023 return ret;
2024}
2025// HDCP end
2026
2027// DTM start
2028static int psp_dtm_initialize(struct psp_context *psp)
2029{
2030 int ret;
2031
2032 /*
2033 * TODO: bypass the initialize in sriov for now
2034 */
2035 if (amdgpu_sriov_vf(psp->adev))
2036 return 0;
2037
2038 /* bypass dtm initialization if dmu is harvested */
2039 if (!amdgpu_device_has_display_hardware(adev: psp->adev))
2040 return 0;
2041
2042 if (!psp->dtm_context.context.bin_desc.size_bytes ||
2043 !psp->dtm_context.context.bin_desc.start_addr) {
2044 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
2045 return 0;
2046 }
2047
2048 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
2049 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2050
2051 if (!psp->dtm_context.context.mem_context.shared_buf) {
2052 ret = psp_ta_init_shared_buf(psp, mem_ctx: &psp->dtm_context.context.mem_context);
2053 if (ret)
2054 return ret;
2055 }
2056
2057 ret = psp_ta_load(psp, context: &psp->dtm_context.context);
2058 if (!ret) {
2059 psp->dtm_context.context.initialized = true;
2060 mutex_init(&psp->dtm_context.mutex);
2061 }
2062
2063 return ret;
2064}
2065
2066int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2067{
2068 /*
2069 * TODO: bypass the loading in sriov for now
2070 */
2071 if (amdgpu_sriov_vf(psp->adev))
2072 return 0;
2073
2074 if (!psp->dtm_context.context.initialized)
2075 return 0;
2076
2077 return psp_ta_invoke(psp, ta_cmd_id, context: &psp->dtm_context.context);
2078}
2079
2080static int psp_dtm_terminate(struct psp_context *psp)
2081{
2082 int ret;
2083
2084 /*
2085 * TODO: bypass the terminate in sriov for now
2086 */
2087 if (amdgpu_sriov_vf(psp->adev))
2088 return 0;
2089
2090 if (!psp->dtm_context.context.initialized)
2091 return 0;
2092
2093 ret = psp_ta_unload(psp, context: &psp->dtm_context.context);
2094
2095 psp->dtm_context.context.initialized = false;
2096
2097 return ret;
2098}
2099// DTM end
2100
2101// RAP start
2102static int psp_rap_initialize(struct psp_context *psp)
2103{
2104 int ret;
2105 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
2106
2107 /*
2108 * TODO: bypass the initialize in sriov for now
2109 */
2110 if (amdgpu_sriov_vf(psp->adev))
2111 return 0;
2112
2113 if (!psp->rap_context.context.bin_desc.size_bytes ||
2114 !psp->rap_context.context.bin_desc.start_addr) {
2115 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
2116 return 0;
2117 }
2118
2119 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
2120 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2121
2122 if (!psp->rap_context.context.mem_context.shared_buf) {
2123 ret = psp_ta_init_shared_buf(psp, mem_ctx: &psp->rap_context.context.mem_context);
2124 if (ret)
2125 return ret;
2126 }
2127
2128 ret = psp_ta_load(psp, context: &psp->rap_context.context);
2129 if (!ret) {
2130 psp->rap_context.context.initialized = true;
2131 mutex_init(&psp->rap_context.mutex);
2132 } else
2133 return ret;
2134
2135 ret = psp_rap_invoke(psp, ta_cmd_id: TA_CMD_RAP__INITIALIZE, status: &status);
2136 if (ret || status != TA_RAP_STATUS__SUCCESS) {
2137 psp_rap_terminate(psp);
2138 /* free rap shared memory */
2139 psp_ta_free_shared_buf(mem_ctx: &psp->rap_context.context.mem_context);
2140
2141 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
2142 ret, status);
2143
2144 return ret;
2145 }
2146
2147 return 0;
2148}
2149
2150static int psp_rap_terminate(struct psp_context *psp)
2151{
2152 int ret;
2153
2154 if (!psp->rap_context.context.initialized)
2155 return 0;
2156
2157 ret = psp_ta_unload(psp, context: &psp->rap_context.context);
2158
2159 psp->rap_context.context.initialized = false;
2160
2161 return ret;
2162}
2163
2164int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
2165{
2166 struct ta_rap_shared_memory *rap_cmd;
2167 int ret = 0;
2168
2169 if (!psp->rap_context.context.initialized)
2170 return 0;
2171
2172 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
2173 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
2174 return -EINVAL;
2175
2176 mutex_lock(&psp->rap_context.mutex);
2177
2178 rap_cmd = (struct ta_rap_shared_memory *)
2179 psp->rap_context.context.mem_context.shared_buf;
2180 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
2181
2182 rap_cmd->cmd_id = ta_cmd_id;
2183 rap_cmd->validation_method_id = METHOD_A;
2184
2185 ret = psp_ta_invoke(psp, ta_cmd_id: rap_cmd->cmd_id, context: &psp->rap_context.context);
2186 if (ret)
2187 goto out_unlock;
2188
2189 if (status)
2190 *status = rap_cmd->rap_status;
2191
2192out_unlock:
2193 mutex_unlock(lock: &psp->rap_context.mutex);
2194
2195 return ret;
2196}
2197// RAP end
2198
2199/* securedisplay start */
2200static int psp_securedisplay_initialize(struct psp_context *psp)
2201{
2202 int ret;
2203 struct ta_securedisplay_cmd *securedisplay_cmd;
2204
2205 /*
2206 * TODO: bypass the initialize in sriov for now
2207 */
2208 if (amdgpu_sriov_vf(psp->adev))
2209 return 0;
2210
2211 /* bypass securedisplay initialization if dmu is harvested */
2212 if (!amdgpu_device_has_display_hardware(adev: psp->adev))
2213 return 0;
2214
2215 if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
2216 !psp->securedisplay_context.context.bin_desc.start_addr) {
2217 dev_info(psp->adev->dev,
2218 "SECUREDISPLAY: optional securedisplay ta ucode is not available\n");
2219 return 0;
2220 }
2221
2222 psp->securedisplay_context.context.mem_context.shared_mem_size =
2223 PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2224 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2225
2226 if (!psp->securedisplay_context.context.initialized) {
2227 ret = psp_ta_init_shared_buf(psp,
2228 mem_ctx: &psp->securedisplay_context.context.mem_context);
2229 if (ret)
2230 return ret;
2231 }
2232
2233 ret = psp_ta_load(psp, context: &psp->securedisplay_context.context);
2234 if (!ret) {
2235 psp->securedisplay_context.context.initialized = true;
2236 mutex_init(&psp->securedisplay_context.mutex);
2237 } else
2238 return ret;
2239
2240 mutex_lock(&psp->securedisplay_context.mutex);
2241
2242 psp_prep_securedisplay_cmd_buf(psp, cmd: &securedisplay_cmd,
2243 command_id: TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2244
2245 ret = psp_securedisplay_invoke(psp, ta_cmd_id: TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2246
2247 mutex_unlock(lock: &psp->securedisplay_context.mutex);
2248
2249 if (ret) {
2250 psp_securedisplay_terminate(psp);
2251 /* free securedisplay shared memory */
2252 psp_ta_free_shared_buf(mem_ctx: &psp->securedisplay_context.context.mem_context);
2253 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2254 return -EINVAL;
2255 }
2256
2257 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2258 psp_securedisplay_parse_resp_status(psp, status: securedisplay_cmd->status);
2259 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2260 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2261 /* don't try again */
2262 psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2263 }
2264
2265 return 0;
2266}
2267
2268static int psp_securedisplay_terminate(struct psp_context *psp)
2269{
2270 int ret;
2271
2272 /*
2273 * TODO:bypass the terminate in sriov for now
2274 */
2275 if (amdgpu_sriov_vf(psp->adev))
2276 return 0;
2277
2278 if (!psp->securedisplay_context.context.initialized)
2279 return 0;
2280
2281 ret = psp_ta_unload(psp, context: &psp->securedisplay_context.context);
2282
2283 psp->securedisplay_context.context.initialized = false;
2284
2285 return ret;
2286}
2287
2288int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2289{
2290 int ret;
2291
2292 if (!psp->securedisplay_context.context.initialized)
2293 return -EINVAL;
2294
2295 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2296 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC &&
2297 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2)
2298 return -EINVAL;
2299
2300 ret = psp_ta_invoke(psp, ta_cmd_id, context: &psp->securedisplay_context.context);
2301
2302 return ret;
2303}
2304/* SECUREDISPLAY end */
2305
2306int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2307{
2308 struct psp_context *psp = &adev->psp;
2309 int ret = 0;
2310
2311 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2312 ret = psp->funcs->wait_for_bootloader(psp);
2313
2314 return ret;
2315}
2316
2317bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
2318{
2319 if (psp->funcs &&
2320 psp->funcs->get_ras_capability) {
2321 return psp->funcs->get_ras_capability(psp);
2322 } else {
2323 return false;
2324 }
2325}
2326
2327bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev)
2328{
2329 struct psp_context *psp = &adev->psp;
2330
2331 if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
2332 return false;
2333
2334 if (psp->funcs && psp->funcs->is_reload_needed)
2335 return psp->funcs->is_reload_needed(psp);
2336
2337 return false;
2338}
2339
2340static int psp_hw_start(struct psp_context *psp)
2341{
2342 struct amdgpu_device *adev = psp->adev;
2343 int ret;
2344
2345 if (!amdgpu_sriov_vf(adev)) {
2346 if ((is_psp_fw_valid(bin: psp->kdb)) &&
2347 (psp->funcs->bootloader_load_kdb != NULL)) {
2348 ret = psp_bootloader_load_kdb(psp);
2349 if (ret) {
2350 dev_err(adev->dev, "PSP load kdb failed!\n");
2351 return ret;
2352 }
2353 }
2354
2355 if ((is_psp_fw_valid(bin: psp->spl)) &&
2356 (psp->funcs->bootloader_load_spl != NULL)) {
2357 ret = psp_bootloader_load_spl(psp);
2358 if (ret) {
2359 dev_err(adev->dev, "PSP load spl failed!\n");
2360 return ret;
2361 }
2362 }
2363
2364 if ((is_psp_fw_valid(bin: psp->sys)) &&
2365 (psp->funcs->bootloader_load_sysdrv != NULL)) {
2366 ret = psp_bootloader_load_sysdrv(psp);
2367 if (ret) {
2368 dev_err(adev->dev, "PSP load sys drv failed!\n");
2369 return ret;
2370 }
2371 }
2372
2373 if ((is_psp_fw_valid(bin: psp->soc_drv)) &&
2374 (psp->funcs->bootloader_load_soc_drv != NULL)) {
2375 ret = psp_bootloader_load_soc_drv(psp);
2376 if (ret) {
2377 dev_err(adev->dev, "PSP load soc drv failed!\n");
2378 return ret;
2379 }
2380 }
2381
2382 if ((is_psp_fw_valid(bin: psp->intf_drv)) &&
2383 (psp->funcs->bootloader_load_intf_drv != NULL)) {
2384 ret = psp_bootloader_load_intf_drv(psp);
2385 if (ret) {
2386 dev_err(adev->dev, "PSP load intf drv failed!\n");
2387 return ret;
2388 }
2389 }
2390
2391 if ((is_psp_fw_valid(bin: psp->dbg_drv)) &&
2392 (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2393 ret = psp_bootloader_load_dbg_drv(psp);
2394 if (ret) {
2395 dev_err(adev->dev, "PSP load dbg drv failed!\n");
2396 return ret;
2397 }
2398 }
2399
2400 if ((is_psp_fw_valid(bin: psp->ras_drv)) &&
2401 (psp->funcs->bootloader_load_ras_drv != NULL)) {
2402 ret = psp_bootloader_load_ras_drv(psp);
2403 if (ret) {
2404 dev_err(adev->dev, "PSP load ras_drv failed!\n");
2405 return ret;
2406 }
2407 }
2408
2409 if ((is_psp_fw_valid(bin: psp->ipkeymgr_drv)) &&
2410 (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) {
2411 ret = psp_bootloader_load_ipkeymgr_drv(psp);
2412 if (ret) {
2413 dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n");
2414 return ret;
2415 }
2416 }
2417
2418 if ((is_psp_fw_valid(bin: psp->spdm_drv)) &&
2419 (psp->funcs->bootloader_load_spdm_drv != NULL)) {
2420 ret = psp_bootloader_load_spdm_drv(psp);
2421 if (ret) {
2422 dev_err(adev->dev, "PSP load spdm_drv failed!\n");
2423 return ret;
2424 }
2425 }
2426
2427 if ((is_psp_fw_valid(bin: psp->sos)) &&
2428 (psp->funcs->bootloader_load_sos != NULL)) {
2429 ret = psp_bootloader_load_sos(psp);
2430 if (ret) {
2431 dev_err(adev->dev, "PSP load sos failed!\n");
2432 return ret;
2433 }
2434 }
2435 }
2436
2437 ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2438 if (ret) {
2439 dev_err(adev->dev, "PSP create ring failed!\n");
2440 return ret;
2441 }
2442
2443 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2444 goto skip_pin_bo;
2445
2446 if (!psp->boot_time_tmr || psp->autoload_supported) {
2447 ret = psp_tmr_init(psp);
2448 if (ret) {
2449 dev_err(adev->dev, "PSP tmr init failed!\n");
2450 return ret;
2451 }
2452 }
2453
2454skip_pin_bo:
2455 /*
2456 * For ASICs with DF Cstate management centralized
2457 * to PMFW, TMR setup should be performed after PMFW
2458 * loaded and before other non-psp firmware loaded.
2459 */
2460 if (psp->pmfw_centralized_cstate_management) {
2461 ret = psp_load_smu_fw(psp);
2462 if (ret)
2463 return ret;
2464 }
2465
2466 if (!psp->boot_time_tmr || !psp->autoload_supported) {
2467 ret = psp_tmr_load(psp);
2468 if (ret) {
2469 dev_err(adev->dev, "PSP load tmr failed!\n");
2470 return ret;
2471 }
2472 }
2473
2474 return 0;
2475}
2476
2477static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2478 enum psp_gfx_fw_type *type)
2479{
2480 switch (ucode->ucode_id) {
2481 case AMDGPU_UCODE_ID_CAP:
2482 *type = GFX_FW_TYPE_CAP;
2483 break;
2484 case AMDGPU_UCODE_ID_SDMA0:
2485 *type = GFX_FW_TYPE_SDMA0;
2486 break;
2487 case AMDGPU_UCODE_ID_SDMA1:
2488 *type = GFX_FW_TYPE_SDMA1;
2489 break;
2490 case AMDGPU_UCODE_ID_SDMA2:
2491 *type = GFX_FW_TYPE_SDMA2;
2492 break;
2493 case AMDGPU_UCODE_ID_SDMA3:
2494 *type = GFX_FW_TYPE_SDMA3;
2495 break;
2496 case AMDGPU_UCODE_ID_SDMA4:
2497 *type = GFX_FW_TYPE_SDMA4;
2498 break;
2499 case AMDGPU_UCODE_ID_SDMA5:
2500 *type = GFX_FW_TYPE_SDMA5;
2501 break;
2502 case AMDGPU_UCODE_ID_SDMA6:
2503 *type = GFX_FW_TYPE_SDMA6;
2504 break;
2505 case AMDGPU_UCODE_ID_SDMA7:
2506 *type = GFX_FW_TYPE_SDMA7;
2507 break;
2508 case AMDGPU_UCODE_ID_CP_MES:
2509 *type = GFX_FW_TYPE_CP_MES;
2510 break;
2511 case AMDGPU_UCODE_ID_CP_MES_DATA:
2512 *type = GFX_FW_TYPE_MES_STACK;
2513 break;
2514 case AMDGPU_UCODE_ID_CP_MES1:
2515 *type = GFX_FW_TYPE_CP_MES_KIQ;
2516 break;
2517 case AMDGPU_UCODE_ID_CP_MES1_DATA:
2518 *type = GFX_FW_TYPE_MES_KIQ_STACK;
2519 break;
2520 case AMDGPU_UCODE_ID_CP_CE:
2521 *type = GFX_FW_TYPE_CP_CE;
2522 break;
2523 case AMDGPU_UCODE_ID_CP_PFP:
2524 *type = GFX_FW_TYPE_CP_PFP;
2525 break;
2526 case AMDGPU_UCODE_ID_CP_ME:
2527 *type = GFX_FW_TYPE_CP_ME;
2528 break;
2529 case AMDGPU_UCODE_ID_CP_MEC1:
2530 *type = GFX_FW_TYPE_CP_MEC;
2531 break;
2532 case AMDGPU_UCODE_ID_CP_MEC1_JT:
2533 *type = GFX_FW_TYPE_CP_MEC_ME1;
2534 break;
2535 case AMDGPU_UCODE_ID_CP_MEC2:
2536 *type = GFX_FW_TYPE_CP_MEC;
2537 break;
2538 case AMDGPU_UCODE_ID_CP_MEC2_JT:
2539 *type = GFX_FW_TYPE_CP_MEC_ME2;
2540 break;
2541 case AMDGPU_UCODE_ID_RLC_P:
2542 *type = GFX_FW_TYPE_RLC_P;
2543 break;
2544 case AMDGPU_UCODE_ID_RLC_V:
2545 *type = GFX_FW_TYPE_RLC_V;
2546 break;
2547 case AMDGPU_UCODE_ID_RLC_G:
2548 *type = GFX_FW_TYPE_RLC_G;
2549 break;
2550 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2551 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2552 break;
2553 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2554 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2555 break;
2556 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2557 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2558 break;
2559 case AMDGPU_UCODE_ID_RLC_IRAM:
2560 *type = GFX_FW_TYPE_RLC_IRAM;
2561 break;
2562 case AMDGPU_UCODE_ID_RLC_DRAM:
2563 *type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2564 break;
2565 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2566 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2567 break;
2568 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2569 *type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2570 break;
2571 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2572 *type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2573 break;
2574 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2575 *type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2576 break;
2577 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2578 *type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2579 break;
2580 case AMDGPU_UCODE_ID_SMC:
2581 *type = GFX_FW_TYPE_SMU;
2582 break;
2583 case AMDGPU_UCODE_ID_PPTABLE:
2584 *type = GFX_FW_TYPE_PPTABLE;
2585 break;
2586 case AMDGPU_UCODE_ID_UVD:
2587 *type = GFX_FW_TYPE_UVD;
2588 break;
2589 case AMDGPU_UCODE_ID_UVD1:
2590 *type = GFX_FW_TYPE_UVD1;
2591 break;
2592 case AMDGPU_UCODE_ID_VCE:
2593 *type = GFX_FW_TYPE_VCE;
2594 break;
2595 case AMDGPU_UCODE_ID_VCN:
2596 *type = GFX_FW_TYPE_VCN;
2597 break;
2598 case AMDGPU_UCODE_ID_VCN1:
2599 *type = GFX_FW_TYPE_VCN1;
2600 break;
2601 case AMDGPU_UCODE_ID_DMCU_ERAM:
2602 *type = GFX_FW_TYPE_DMCU_ERAM;
2603 break;
2604 case AMDGPU_UCODE_ID_DMCU_INTV:
2605 *type = GFX_FW_TYPE_DMCU_ISR;
2606 break;
2607 case AMDGPU_UCODE_ID_VCN0_RAM:
2608 *type = GFX_FW_TYPE_VCN0_RAM;
2609 break;
2610 case AMDGPU_UCODE_ID_VCN1_RAM:
2611 *type = GFX_FW_TYPE_VCN1_RAM;
2612 break;
2613 case AMDGPU_UCODE_ID_DMCUB:
2614 *type = GFX_FW_TYPE_DMUB;
2615 break;
2616 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2617 case AMDGPU_UCODE_ID_SDMA_RS64:
2618 *type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2619 break;
2620 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2621 *type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2622 break;
2623 case AMDGPU_UCODE_ID_IMU_I:
2624 *type = GFX_FW_TYPE_IMU_I;
2625 break;
2626 case AMDGPU_UCODE_ID_IMU_D:
2627 *type = GFX_FW_TYPE_IMU_D;
2628 break;
2629 case AMDGPU_UCODE_ID_CP_RS64_PFP:
2630 *type = GFX_FW_TYPE_RS64_PFP;
2631 break;
2632 case AMDGPU_UCODE_ID_CP_RS64_ME:
2633 *type = GFX_FW_TYPE_RS64_ME;
2634 break;
2635 case AMDGPU_UCODE_ID_CP_RS64_MEC:
2636 *type = GFX_FW_TYPE_RS64_MEC;
2637 break;
2638 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2639 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2640 break;
2641 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2642 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2643 break;
2644 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2645 *type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2646 break;
2647 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2648 *type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2649 break;
2650 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2651 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2652 break;
2653 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2654 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2655 break;
2656 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2657 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2658 break;
2659 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2660 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2661 break;
2662 case AMDGPU_UCODE_ID_VPE_CTX:
2663 *type = GFX_FW_TYPE_VPEC_FW1;
2664 break;
2665 case AMDGPU_UCODE_ID_VPE_CTL:
2666 *type = GFX_FW_TYPE_VPEC_FW2;
2667 break;
2668 case AMDGPU_UCODE_ID_VPE:
2669 *type = GFX_FW_TYPE_VPE;
2670 break;
2671 case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
2672 *type = GFX_FW_TYPE_UMSCH_UCODE;
2673 break;
2674 case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
2675 *type = GFX_FW_TYPE_UMSCH_DATA;
2676 break;
2677 case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
2678 *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
2679 break;
2680 case AMDGPU_UCODE_ID_P2S_TABLE:
2681 *type = GFX_FW_TYPE_P2S_TABLE;
2682 break;
2683 case AMDGPU_UCODE_ID_JPEG_RAM:
2684 *type = GFX_FW_TYPE_JPEG_RAM;
2685 break;
2686 case AMDGPU_UCODE_ID_ISP:
2687 *type = GFX_FW_TYPE_ISP;
2688 break;
2689 case AMDGPU_UCODE_ID_MAXIMUM:
2690 default:
2691 return -EINVAL;
2692 }
2693
2694 return 0;
2695}
2696
2697static void psp_print_fw_hdr(struct psp_context *psp,
2698 struct amdgpu_firmware_info *ucode)
2699{
2700 struct amdgpu_device *adev = psp->adev;
2701 struct common_firmware_header *hdr;
2702
2703 switch (ucode->ucode_id) {
2704 case AMDGPU_UCODE_ID_SDMA0:
2705 case AMDGPU_UCODE_ID_SDMA1:
2706 case AMDGPU_UCODE_ID_SDMA2:
2707 case AMDGPU_UCODE_ID_SDMA3:
2708 case AMDGPU_UCODE_ID_SDMA4:
2709 case AMDGPU_UCODE_ID_SDMA5:
2710 case AMDGPU_UCODE_ID_SDMA6:
2711 case AMDGPU_UCODE_ID_SDMA7:
2712 hdr = (struct common_firmware_header *)
2713 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2714 amdgpu_ucode_print_sdma_hdr(hdr);
2715 break;
2716 case AMDGPU_UCODE_ID_CP_CE:
2717 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2718 amdgpu_ucode_print_gfx_hdr(hdr);
2719 break;
2720 case AMDGPU_UCODE_ID_CP_PFP:
2721 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2722 amdgpu_ucode_print_gfx_hdr(hdr);
2723 break;
2724 case AMDGPU_UCODE_ID_CP_ME:
2725 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2726 amdgpu_ucode_print_gfx_hdr(hdr);
2727 break;
2728 case AMDGPU_UCODE_ID_CP_MEC1:
2729 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2730 amdgpu_ucode_print_gfx_hdr(hdr);
2731 break;
2732 case AMDGPU_UCODE_ID_RLC_G:
2733 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2734 amdgpu_ucode_print_rlc_hdr(hdr);
2735 break;
2736 case AMDGPU_UCODE_ID_SMC:
2737 hdr = (struct common_firmware_header *)adev->pm.fw->data;
2738 amdgpu_ucode_print_smc_hdr(hdr);
2739 break;
2740 default:
2741 break;
2742 }
2743}
2744
2745static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
2746 struct amdgpu_firmware_info *ucode,
2747 struct psp_gfx_cmd_resp *cmd)
2748{
2749 int ret;
2750 uint64_t fw_mem_mc_addr = ucode->mc_addr;
2751
2752 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2753 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2754 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2755 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2756
2757 ret = psp_get_fw_type(ucode, type: &cmd->cmd.cmd_load_ip_fw.fw_type);
2758 if (ret)
2759 dev_err(psp->adev->dev, "Unknown firmware type\n");
2760
2761 return ret;
2762}
2763
2764int psp_execute_ip_fw_load(struct psp_context *psp,
2765 struct amdgpu_firmware_info *ucode)
2766{
2767 int ret = 0;
2768 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2769
2770 ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
2771 if (!ret) {
2772 ret = psp_cmd_submit_buf(psp, ucode, cmd,
2773 fence_mc_addr: psp->fence_buf_mc_addr);
2774 }
2775
2776 release_psp_cmd_buf(psp);
2777
2778 return ret;
2779}
2780
2781static int psp_load_p2s_table(struct psp_context *psp)
2782{
2783 int ret;
2784 struct amdgpu_device *adev = psp->adev;
2785 struct amdgpu_firmware_info *ucode =
2786 &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
2787
2788 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2789 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2790 return 0;
2791
2792 if (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) == IP_VERSION(13, 0, 6) ||
2793 amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) == IP_VERSION(13, 0, 14)) {
2794 uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
2795 0x0036003C;
2796 if (psp->sos.fw_version < supp_vers)
2797 return 0;
2798 }
2799
2800 if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2801 return 0;
2802
2803 ret = psp_execute_ip_fw_load(psp, ucode);
2804
2805 return ret;
2806}
2807
2808static int psp_load_smu_fw(struct psp_context *psp)
2809{
2810 int ret;
2811 struct amdgpu_device *adev = psp->adev;
2812 struct amdgpu_firmware_info *ucode =
2813 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2814 struct amdgpu_ras *ras = psp->ras_context.ras;
2815
2816 /*
2817 * Skip SMU FW reloading in case of using BACO for runpm only,
2818 * as SMU is always alive.
2819 */
2820 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2821 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2822 return 0;
2823
2824 if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2825 return 0;
2826
2827 if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
2828 (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) == IP_VERSION(11, 0, 4) ||
2829 amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) == IP_VERSION(11, 0, 2)))) {
2830 ret = amdgpu_dpm_set_mp1_state(adev, mp1_state: PP_MP1_STATE_UNLOAD);
2831 if (ret)
2832 dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
2833 }
2834
2835 ret = psp_execute_ip_fw_load(psp, ucode);
2836
2837 if (ret)
2838 dev_err(adev->dev, "PSP load smu failed!\n");
2839
2840 return ret;
2841}
2842
2843static bool fw_load_skip_check(struct psp_context *psp,
2844 struct amdgpu_firmware_info *ucode)
2845{
2846 if (!ucode->fw || !ucode->ucode_size)
2847 return true;
2848
2849 if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
2850 return true;
2851
2852 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2853 (psp_smu_reload_quirk(psp) ||
2854 psp->autoload_supported ||
2855 psp->pmfw_centralized_cstate_management))
2856 return true;
2857
2858 if (amdgpu_sriov_vf(psp->adev) &&
2859 amdgpu_virt_fw_load_skip_check(adev: psp->adev, ucode_id: ucode->ucode_id))
2860 return true;
2861
2862 if (psp->autoload_supported &&
2863 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
2864 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
2865 /* skip mec JT when autoload is enabled */
2866 return true;
2867
2868 return false;
2869}
2870
2871int psp_load_fw_list(struct psp_context *psp,
2872 struct amdgpu_firmware_info **ucode_list, int ucode_count)
2873{
2874 int ret = 0, i;
2875 struct amdgpu_firmware_info *ucode;
2876
2877 for (i = 0; i < ucode_count; ++i) {
2878 ucode = ucode_list[i];
2879 psp_print_fw_hdr(psp, ucode);
2880 ret = psp_execute_ip_fw_load(psp, ucode);
2881 if (ret)
2882 return ret;
2883 }
2884 return ret;
2885}
2886
2887static int psp_load_non_psp_fw(struct psp_context *psp)
2888{
2889 int i, ret;
2890 struct amdgpu_firmware_info *ucode;
2891 struct amdgpu_device *adev = psp->adev;
2892
2893 if (psp->autoload_supported &&
2894 !psp->pmfw_centralized_cstate_management) {
2895 ret = psp_load_smu_fw(psp);
2896 if (ret)
2897 return ret;
2898 }
2899
2900 /* Load P2S table first if it's available */
2901 psp_load_p2s_table(psp);
2902
2903 for (i = 0; i < adev->firmware.max_ucodes; i++) {
2904 ucode = &adev->firmware.ucode[i];
2905
2906 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2907 !fw_load_skip_check(psp, ucode)) {
2908 ret = psp_load_smu_fw(psp);
2909 if (ret)
2910 return ret;
2911 continue;
2912 }
2913
2914 if (fw_load_skip_check(psp, ucode))
2915 continue;
2916
2917 if (psp->autoload_supported &&
2918 (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) ==
2919 IP_VERSION(11, 0, 7) ||
2920 amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) ==
2921 IP_VERSION(11, 0, 11) ||
2922 amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) ==
2923 IP_VERSION(11, 0, 12)) &&
2924 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
2925 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
2926 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
2927 /* PSP only receive one SDMA fw for sienna_cichlid,
2928 * as all four sdma fw are same
2929 */
2930 continue;
2931
2932 psp_print_fw_hdr(psp, ucode);
2933
2934 ret = psp_execute_ip_fw_load(psp, ucode);
2935 if (ret)
2936 return ret;
2937
2938 /* Start rlc autoload after psp received all the gfx firmware */
2939 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
2940 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
2941 ret = psp_rlc_autoload_start(psp);
2942 if (ret) {
2943 dev_err(adev->dev, "Failed to start rlc autoload\n");
2944 return ret;
2945 }
2946 }
2947 }
2948
2949 return 0;
2950}
2951
2952static int psp_load_fw(struct amdgpu_device *adev)
2953{
2954 int ret;
2955 struct psp_context *psp = &adev->psp;
2956
2957 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2958 /* should not destroy ring, only stop */
2959 psp_ring_stop(psp, PSP_RING_TYPE__KM);
2960 } else {
2961 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
2962
2963 ret = psp_ring_init(psp, ring_type: PSP_RING_TYPE__KM);
2964 if (ret) {
2965 dev_err(adev->dev, "PSP ring init failed!\n");
2966 goto failed;
2967 }
2968 }
2969
2970 ret = psp_hw_start(psp);
2971 if (ret)
2972 goto failed;
2973
2974 ret = psp_load_non_psp_fw(psp);
2975 if (ret)
2976 goto failed1;
2977
2978 ret = psp_asd_initialize(psp);
2979 if (ret) {
2980 dev_err(adev->dev, "PSP load asd failed!\n");
2981 goto failed1;
2982 }
2983
2984 ret = psp_rl_load(adev);
2985 if (ret) {
2986 dev_err(adev->dev, "PSP load RL failed!\n");
2987 goto failed1;
2988 }
2989
2990 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2991 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2992 ret = psp_xgmi_initialize(psp, set_extended_data: false, load_ta: true);
2993 /* Warning the XGMI seesion initialize failure
2994 * Instead of stop driver initialization
2995 */
2996 if (ret)
2997 dev_err(psp->adev->dev,
2998 "XGMI: Failed to initialize XGMI session\n");
2999 }
3000 }
3001
3002 if (psp->ta_fw) {
3003 ret = psp_ras_initialize(psp);
3004 if (ret)
3005 dev_err(psp->adev->dev,
3006 "RAS: Failed to initialize RAS\n");
3007
3008 ret = psp_hdcp_initialize(psp);
3009 if (ret)
3010 dev_err(psp->adev->dev,
3011 "HDCP: Failed to initialize HDCP\n");
3012
3013 ret = psp_dtm_initialize(psp);
3014 if (ret)
3015 dev_err(psp->adev->dev,
3016 "DTM: Failed to initialize DTM\n");
3017
3018 ret = psp_rap_initialize(psp);
3019 if (ret)
3020 dev_err(psp->adev->dev,
3021 "RAP: Failed to initialize RAP\n");
3022
3023 ret = psp_securedisplay_initialize(psp);
3024 if (ret)
3025 dev_err(psp->adev->dev,
3026 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3027 }
3028
3029 return 0;
3030
3031failed1:
3032 psp_free_shared_bufs(psp);
3033failed:
3034 /*
3035 * all cleanup jobs (xgmi terminate, ras terminate,
3036 * ring destroy, cmd/fence/fw buffers destory,
3037 * psp->cmd destory) are delayed to psp_hw_fini
3038 */
3039 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3040 return ret;
3041}
3042
3043static int psp_hw_init(struct amdgpu_ip_block *ip_block)
3044{
3045 int ret;
3046 struct amdgpu_device *adev = ip_block->adev;
3047
3048 mutex_lock(&adev->firmware.mutex);
3049
3050 ret = amdgpu_ucode_init_bo(adev);
3051 if (ret)
3052 goto failed;
3053
3054 ret = psp_load_fw(adev);
3055 if (ret) {
3056 dev_err(adev->dev, "PSP firmware loading failed\n");
3057 goto failed;
3058 }
3059
3060 mutex_unlock(lock: &adev->firmware.mutex);
3061 return 0;
3062
3063failed:
3064 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
3065 mutex_unlock(lock: &adev->firmware.mutex);
3066 return -EINVAL;
3067}
3068
3069static int psp_hw_fini(struct amdgpu_ip_block *ip_block)
3070{
3071 struct amdgpu_device *adev = ip_block->adev;
3072 struct psp_context *psp = &adev->psp;
3073
3074 if (psp->ta_fw) {
3075 psp_ras_terminate(psp);
3076 psp_securedisplay_terminate(psp);
3077 psp_rap_terminate(psp);
3078 psp_dtm_terminate(psp);
3079 psp_hdcp_terminate(psp);
3080
3081 if (adev->gmc.xgmi.num_physical_nodes > 1)
3082 psp_xgmi_terminate(psp);
3083 }
3084
3085 psp_asd_terminate(psp);
3086 psp_tmr_terminate(psp);
3087
3088 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3089
3090 return 0;
3091}
3092
3093static int psp_suspend(struct amdgpu_ip_block *ip_block)
3094{
3095 int ret = 0;
3096 struct amdgpu_device *adev = ip_block->adev;
3097 struct psp_context *psp = &adev->psp;
3098
3099 if (adev->gmc.xgmi.num_physical_nodes > 1 &&
3100 psp->xgmi_context.context.initialized) {
3101 ret = psp_xgmi_terminate(psp);
3102 if (ret) {
3103 dev_err(adev->dev, "Failed to terminate xgmi ta\n");
3104 goto out;
3105 }
3106 }
3107
3108 if (psp->ta_fw) {
3109 ret = psp_ras_terminate(psp);
3110 if (ret) {
3111 dev_err(adev->dev, "Failed to terminate ras ta\n");
3112 goto out;
3113 }
3114 ret = psp_hdcp_terminate(psp);
3115 if (ret) {
3116 dev_err(adev->dev, "Failed to terminate hdcp ta\n");
3117 goto out;
3118 }
3119 ret = psp_dtm_terminate(psp);
3120 if (ret) {
3121 dev_err(adev->dev, "Failed to terminate dtm ta\n");
3122 goto out;
3123 }
3124 ret = psp_rap_terminate(psp);
3125 if (ret) {
3126 dev_err(adev->dev, "Failed to terminate rap ta\n");
3127 goto out;
3128 }
3129 ret = psp_securedisplay_terminate(psp);
3130 if (ret) {
3131 dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
3132 goto out;
3133 }
3134 }
3135
3136 ret = psp_asd_terminate(psp);
3137 if (ret) {
3138 dev_err(adev->dev, "Failed to terminate asd\n");
3139 goto out;
3140 }
3141
3142 ret = psp_tmr_terminate(psp);
3143 if (ret) {
3144 dev_err(adev->dev, "Failed to terminate tmr\n");
3145 goto out;
3146 }
3147
3148 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
3149 if (ret)
3150 dev_err(adev->dev, "PSP ring stop failed\n");
3151
3152out:
3153 return ret;
3154}
3155
3156static int psp_resume(struct amdgpu_ip_block *ip_block)
3157{
3158 int ret;
3159 struct amdgpu_device *adev = ip_block->adev;
3160 struct psp_context *psp = &adev->psp;
3161
3162 dev_info(adev->dev, "PSP is resuming...\n");
3163
3164 if (psp->mem_train_ctx.enable_mem_training) {
3165 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
3166 if (ret) {
3167 dev_err(adev->dev, "Failed to process memory training!\n");
3168 return ret;
3169 }
3170 }
3171
3172 mutex_lock(&adev->firmware.mutex);
3173
3174 ret = amdgpu_ucode_init_bo(adev);
3175 if (ret)
3176 goto failed;
3177
3178 ret = psp_hw_start(psp);
3179 if (ret)
3180 goto failed;
3181
3182 ret = psp_load_non_psp_fw(psp);
3183 if (ret)
3184 goto failed;
3185
3186 ret = psp_asd_initialize(psp);
3187 if (ret) {
3188 dev_err(adev->dev, "PSP load asd failed!\n");
3189 goto failed;
3190 }
3191
3192 ret = psp_rl_load(adev);
3193 if (ret) {
3194 dev_err(adev->dev, "PSP load RL failed!\n");
3195 goto failed;
3196 }
3197
3198 if (adev->gmc.xgmi.num_physical_nodes > 1) {
3199 ret = psp_xgmi_initialize(psp, set_extended_data: false, load_ta: true);
3200 /* Warning the XGMI seesion initialize failure
3201 * Instead of stop driver initialization
3202 */
3203 if (ret)
3204 dev_err(psp->adev->dev,
3205 "XGMI: Failed to initialize XGMI session\n");
3206 }
3207
3208 if (psp->ta_fw) {
3209 ret = psp_ras_initialize(psp);
3210 if (ret)
3211 dev_err(psp->adev->dev,
3212 "RAS: Failed to initialize RAS\n");
3213
3214 ret = psp_hdcp_initialize(psp);
3215 if (ret)
3216 dev_err(psp->adev->dev,
3217 "HDCP: Failed to initialize HDCP\n");
3218
3219 ret = psp_dtm_initialize(psp);
3220 if (ret)
3221 dev_err(psp->adev->dev,
3222 "DTM: Failed to initialize DTM\n");
3223
3224 ret = psp_rap_initialize(psp);
3225 if (ret)
3226 dev_err(psp->adev->dev,
3227 "RAP: Failed to initialize RAP\n");
3228
3229 ret = psp_securedisplay_initialize(psp);
3230 if (ret)
3231 dev_err(psp->adev->dev,
3232 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3233 }
3234
3235 mutex_unlock(lock: &adev->firmware.mutex);
3236
3237 return 0;
3238
3239failed:
3240 dev_err(adev->dev, "PSP resume failed\n");
3241 mutex_unlock(lock: &adev->firmware.mutex);
3242 return ret;
3243}
3244
3245int psp_gpu_reset(struct amdgpu_device *adev)
3246{
3247 int ret;
3248
3249 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
3250 return 0;
3251
3252 mutex_lock(&adev->psp.mutex);
3253 ret = psp_mode1_reset(&adev->psp);
3254 mutex_unlock(lock: &adev->psp.mutex);
3255
3256 return ret;
3257}
3258
3259int psp_rlc_autoload_start(struct psp_context *psp)
3260{
3261 int ret;
3262 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3263
3264 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
3265
3266 ret = psp_cmd_submit_buf(psp, NULL, cmd,
3267 fence_mc_addr: psp->fence_buf_mc_addr);
3268
3269 release_psp_cmd_buf(psp);
3270
3271 return ret;
3272}
3273
3274int psp_ring_cmd_submit(struct psp_context *psp,
3275 uint64_t cmd_buf_mc_addr,
3276 uint64_t fence_mc_addr,
3277 int index)
3278{
3279 unsigned int psp_write_ptr_reg = 0;
3280 struct psp_gfx_rb_frame *write_frame;
3281 struct psp_ring *ring = &psp->km_ring;
3282 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
3283 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
3284 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
3285 struct amdgpu_device *adev = psp->adev;
3286 uint32_t ring_size_dw = ring->ring_size / 4;
3287 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
3288
3289 /* KM (GPCOM) prepare write pointer */
3290 psp_write_ptr_reg = psp_ring_get_wptr(psp);
3291
3292 /* Update KM RB frame pointer to new frame */
3293 /* write_frame ptr increments by size of rb_frame in bytes */
3294 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
3295 if ((psp_write_ptr_reg % ring_size_dw) == 0)
3296 write_frame = ring_buffer_start;
3297 else
3298 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
3299 /* Check invalid write_frame ptr address */
3300 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
3301 dev_err(adev->dev,
3302 "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
3303 ring_buffer_start, ring_buffer_end, write_frame);
3304 dev_err(adev->dev,
3305 "write_frame is pointing to address out of bounds\n");
3306 return -EINVAL;
3307 }
3308
3309 /* Initialize KM RB frame */
3310 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
3311
3312 /* Update KM RB frame */
3313 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
3314 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
3315 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
3316 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
3317 write_frame->fence_value = index;
3318 amdgpu_device_flush_hdp(adev, NULL);
3319
3320 /* Update the write Pointer in DWORDs */
3321 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3322 psp_ring_set_wptr(psp, psp_write_ptr_reg);
3323 return 0;
3324}
3325
3326int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
3327{
3328 struct amdgpu_device *adev = psp->adev;
3329 const struct psp_firmware_header_v1_0 *asd_hdr;
3330 int err = 0;
3331
3332 err = amdgpu_ucode_request(adev, fw: &adev->psp.asd_fw, required: AMDGPU_UCODE_REQUIRED,
3333 fmt: "amdgpu/%s_asd.bin", chip_name);
3334 if (err)
3335 goto out;
3336
3337 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3338 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3339 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3340 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3341 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3342 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3343 return 0;
3344out:
3345 amdgpu_ucode_release(fw: &adev->psp.asd_fw);
3346 return err;
3347}
3348
3349int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3350{
3351 struct amdgpu_device *adev = psp->adev;
3352 const struct psp_firmware_header_v1_0 *toc_hdr;
3353 int err = 0;
3354
3355 err = amdgpu_ucode_request(adev, fw: &adev->psp.toc_fw, required: AMDGPU_UCODE_REQUIRED,
3356 fmt: "amdgpu/%s_toc.bin", chip_name);
3357 if (err)
3358 goto out;
3359
3360 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3361 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3362 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3363 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3364 adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3365 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3366 return 0;
3367out:
3368 amdgpu_ucode_release(fw: &adev->psp.toc_fw);
3369 return err;
3370}
3371
3372static int parse_sos_bin_descriptor(struct psp_context *psp,
3373 const struct psp_fw_bin_desc *desc,
3374 const struct psp_firmware_header_v2_0 *sos_hdr)
3375{
3376 uint8_t *ucode_start_addr = NULL;
3377
3378 if (!psp || !desc || !sos_hdr)
3379 return -EINVAL;
3380
3381 ucode_start_addr = (uint8_t *)sos_hdr +
3382 le32_to_cpu(desc->offset_bytes) +
3383 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3384
3385 switch (desc->fw_type) {
3386 case PSP_FW_TYPE_PSP_SOS:
3387 psp->sos.fw_version = le32_to_cpu(desc->fw_version);
3388 psp->sos.feature_version = le32_to_cpu(desc->fw_version);
3389 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes);
3390 psp->sos.start_addr = ucode_start_addr;
3391 break;
3392 case PSP_FW_TYPE_PSP_SYS_DRV:
3393 psp->sys.fw_version = le32_to_cpu(desc->fw_version);
3394 psp->sys.feature_version = le32_to_cpu(desc->fw_version);
3395 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes);
3396 psp->sys.start_addr = ucode_start_addr;
3397 break;
3398 case PSP_FW_TYPE_PSP_KDB:
3399 psp->kdb.fw_version = le32_to_cpu(desc->fw_version);
3400 psp->kdb.feature_version = le32_to_cpu(desc->fw_version);
3401 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes);
3402 psp->kdb.start_addr = ucode_start_addr;
3403 break;
3404 case PSP_FW_TYPE_PSP_TOC:
3405 psp->toc.fw_version = le32_to_cpu(desc->fw_version);
3406 psp->toc.feature_version = le32_to_cpu(desc->fw_version);
3407 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes);
3408 psp->toc.start_addr = ucode_start_addr;
3409 break;
3410 case PSP_FW_TYPE_PSP_SPL:
3411 psp->spl.fw_version = le32_to_cpu(desc->fw_version);
3412 psp->spl.feature_version = le32_to_cpu(desc->fw_version);
3413 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes);
3414 psp->spl.start_addr = ucode_start_addr;
3415 break;
3416 case PSP_FW_TYPE_PSP_RL:
3417 psp->rl.fw_version = le32_to_cpu(desc->fw_version);
3418 psp->rl.feature_version = le32_to_cpu(desc->fw_version);
3419 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes);
3420 psp->rl.start_addr = ucode_start_addr;
3421 break;
3422 case PSP_FW_TYPE_PSP_SOC_DRV:
3423 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version);
3424 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version);
3425 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3426 psp->soc_drv.start_addr = ucode_start_addr;
3427 break;
3428 case PSP_FW_TYPE_PSP_INTF_DRV:
3429 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version);
3430 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version);
3431 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3432 psp->intf_drv.start_addr = ucode_start_addr;
3433 break;
3434 case PSP_FW_TYPE_PSP_DBG_DRV:
3435 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version);
3436 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version);
3437 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3438 psp->dbg_drv.start_addr = ucode_start_addr;
3439 break;
3440 case PSP_FW_TYPE_PSP_RAS_DRV:
3441 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version);
3442 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version);
3443 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3444 psp->ras_drv.start_addr = ucode_start_addr;
3445 break;
3446 case PSP_FW_TYPE_PSP_IPKEYMGR_DRV:
3447 psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version);
3448 psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version);
3449 psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3450 psp->ipkeymgr_drv.start_addr = ucode_start_addr;
3451 break;
3452 case PSP_FW_TYPE_PSP_SPDM_DRV:
3453 psp->spdm_drv.fw_version = le32_to_cpu(desc->fw_version);
3454 psp->spdm_drv.feature_version = le32_to_cpu(desc->fw_version);
3455 psp->spdm_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3456 psp->spdm_drv.start_addr = ucode_start_addr;
3457 break;
3458 default:
3459 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3460 break;
3461 }
3462
3463 return 0;
3464}
3465
3466static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3467{
3468 const struct psp_firmware_header_v1_0 *sos_hdr;
3469 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3470 uint8_t *ucode_array_start_addr;
3471
3472 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3473 ucode_array_start_addr = (uint8_t *)sos_hdr +
3474 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3475
3476 if (adev->gmc.xgmi.connected_to_cpu ||
3477 (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) != IP_VERSION(13, 0, 2))) {
3478 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3479 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3480
3481 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3482 adev->psp.sys.start_addr = ucode_array_start_addr;
3483
3484 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3485 adev->psp.sos.start_addr = ucode_array_start_addr +
3486 le32_to_cpu(sos_hdr->sos.offset_bytes);
3487 } else {
3488 /* Load alternate PSP SOS FW */
3489 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3490
3491 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3492 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3493
3494 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3495 adev->psp.sys.start_addr = ucode_array_start_addr +
3496 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3497
3498 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3499 adev->psp.sos.start_addr = ucode_array_start_addr +
3500 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3501 }
3502
3503 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3504 dev_warn(adev->dev, "PSP SOS FW not available");
3505 return -EINVAL;
3506 }
3507
3508 return 0;
3509}
3510
3511int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3512{
3513 struct amdgpu_device *adev = psp->adev;
3514 const struct psp_firmware_header_v1_0 *sos_hdr;
3515 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3516 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3517 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3518 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3519 const struct psp_firmware_header_v2_1 *sos_hdr_v2_1;
3520 int fw_index, fw_bin_count, start_index = 0;
3521 const struct psp_fw_bin_desc *fw_bin;
3522 uint8_t *ucode_array_start_addr;
3523 int err = 0;
3524
3525 err = amdgpu_ucode_request(adev, fw: &adev->psp.sos_fw, required: AMDGPU_UCODE_REQUIRED,
3526 fmt: "amdgpu/%s_sos.bin", chip_name);
3527 if (err)
3528 goto out;
3529
3530 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3531 ucode_array_start_addr = (uint8_t *)sos_hdr +
3532 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3533 amdgpu_ucode_print_psp_hdr(hdr: &sos_hdr->header);
3534
3535 switch (sos_hdr->header.header_version_major) {
3536 case 1:
3537 err = psp_init_sos_base_fw(adev);
3538 if (err)
3539 goto out;
3540
3541 if (sos_hdr->header.header_version_minor == 1) {
3542 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3543 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3544 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3545 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3546 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3547 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3548 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3549 }
3550 if (sos_hdr->header.header_version_minor == 2) {
3551 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3552 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3553 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3554 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3555 }
3556 if (sos_hdr->header.header_version_minor == 3) {
3557 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3558 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3559 adev->psp.toc.start_addr = ucode_array_start_addr +
3560 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3561 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3562 adev->psp.kdb.start_addr = ucode_array_start_addr +
3563 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3564 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3565 adev->psp.spl.start_addr = ucode_array_start_addr +
3566 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3567 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3568 adev->psp.rl.start_addr = ucode_array_start_addr +
3569 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3570 }
3571 break;
3572 case 2:
3573 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3574
3575 fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count);
3576
3577 if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) {
3578 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3579 err = -EINVAL;
3580 goto out;
3581 }
3582
3583 if (sos_hdr_v2_0->header.header_version_minor == 1) {
3584 sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data;
3585
3586 fw_bin = sos_hdr_v2_1->psp_fw_bin;
3587
3588 if (psp_is_aux_sos_load_required(psp))
3589 start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3590 else
3591 fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3592
3593 } else {
3594 fw_bin = sos_hdr_v2_0->psp_fw_bin;
3595 }
3596
3597 for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) {
3598 err = parse_sos_bin_descriptor(psp, desc: fw_bin + fw_index,
3599 sos_hdr: sos_hdr_v2_0);
3600 if (err)
3601 goto out;
3602 }
3603 break;
3604 default:
3605 dev_err(adev->dev,
3606 "unsupported psp sos firmware\n");
3607 err = -EINVAL;
3608 goto out;
3609 }
3610
3611 return 0;
3612out:
3613 amdgpu_ucode_release(fw: &adev->psp.sos_fw);
3614
3615 return err;
3616}
3617
3618static bool is_ta_fw_applicable(struct psp_context *psp,
3619 const struct psp_fw_bin_desc *desc)
3620{
3621 struct amdgpu_device *adev = psp->adev;
3622 uint32_t fw_version;
3623
3624 switch (desc->fw_type) {
3625 case TA_FW_TYPE_PSP_XGMI:
3626 case TA_FW_TYPE_PSP_XGMI_AUX:
3627 /* for now, AUX TA only exists on 13.0.6 ta bin,
3628 * from v20.00.0x.14
3629 */
3630 if (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) ==
3631 IP_VERSION(13, 0, 6)) {
3632 fw_version = le32_to_cpu(desc->fw_version);
3633
3634 if (adev->flags & AMD_IS_APU &&
3635 (fw_version & 0xff) >= 0x14)
3636 return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX;
3637 else
3638 return desc->fw_type == TA_FW_TYPE_PSP_XGMI;
3639 }
3640 break;
3641 default:
3642 break;
3643 }
3644
3645 return true;
3646}
3647
3648static int parse_ta_bin_descriptor(struct psp_context *psp,
3649 const struct psp_fw_bin_desc *desc,
3650 const struct ta_firmware_header_v2_0 *ta_hdr)
3651{
3652 uint8_t *ucode_start_addr = NULL;
3653
3654 if (!psp || !desc || !ta_hdr)
3655 return -EINVAL;
3656
3657 if (!is_ta_fw_applicable(psp, desc))
3658 return 0;
3659
3660 ucode_start_addr = (uint8_t *)ta_hdr +
3661 le32_to_cpu(desc->offset_bytes) +
3662 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3663
3664 switch (desc->fw_type) {
3665 case TA_FW_TYPE_PSP_ASD:
3666 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3667 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version);
3668 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3669 psp->asd_context.bin_desc.start_addr = ucode_start_addr;
3670 break;
3671 case TA_FW_TYPE_PSP_XGMI:
3672 case TA_FW_TYPE_PSP_XGMI_AUX:
3673 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3674 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3675 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr;
3676 break;
3677 case TA_FW_TYPE_PSP_RAS:
3678 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3679 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3680 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr;
3681 break;
3682 case TA_FW_TYPE_PSP_HDCP:
3683 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3684 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3685 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr;
3686 break;
3687 case TA_FW_TYPE_PSP_DTM:
3688 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3689 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3690 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr;
3691 break;
3692 case TA_FW_TYPE_PSP_RAP:
3693 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3694 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3695 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr;
3696 break;
3697 case TA_FW_TYPE_PSP_SECUREDISPLAY:
3698 psp->securedisplay_context.context.bin_desc.fw_version =
3699 le32_to_cpu(desc->fw_version);
3700 psp->securedisplay_context.context.bin_desc.size_bytes =
3701 le32_to_cpu(desc->size_bytes);
3702 psp->securedisplay_context.context.bin_desc.start_addr =
3703 ucode_start_addr;
3704 break;
3705 default:
3706 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3707 break;
3708 }
3709
3710 return 0;
3711}
3712
3713static int parse_ta_v1_microcode(struct psp_context *psp)
3714{
3715 const struct ta_firmware_header_v1_0 *ta_hdr;
3716 struct amdgpu_device *adev = psp->adev;
3717
3718 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3719
3720 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3721 return -EINVAL;
3722
3723 adev->psp.xgmi_context.context.bin_desc.fw_version =
3724 le32_to_cpu(ta_hdr->xgmi.fw_version);
3725 adev->psp.xgmi_context.context.bin_desc.size_bytes =
3726 le32_to_cpu(ta_hdr->xgmi.size_bytes);
3727 adev->psp.xgmi_context.context.bin_desc.start_addr =
3728 (uint8_t *)ta_hdr +
3729 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3730
3731 adev->psp.ras_context.context.bin_desc.fw_version =
3732 le32_to_cpu(ta_hdr->ras.fw_version);
3733 adev->psp.ras_context.context.bin_desc.size_bytes =
3734 le32_to_cpu(ta_hdr->ras.size_bytes);
3735 adev->psp.ras_context.context.bin_desc.start_addr =
3736 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3737 le32_to_cpu(ta_hdr->ras.offset_bytes);
3738
3739 adev->psp.hdcp_context.context.bin_desc.fw_version =
3740 le32_to_cpu(ta_hdr->hdcp.fw_version);
3741 adev->psp.hdcp_context.context.bin_desc.size_bytes =
3742 le32_to_cpu(ta_hdr->hdcp.size_bytes);
3743 adev->psp.hdcp_context.context.bin_desc.start_addr =
3744 (uint8_t *)ta_hdr +
3745 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3746
3747 adev->psp.dtm_context.context.bin_desc.fw_version =
3748 le32_to_cpu(ta_hdr->dtm.fw_version);
3749 adev->psp.dtm_context.context.bin_desc.size_bytes =
3750 le32_to_cpu(ta_hdr->dtm.size_bytes);
3751 adev->psp.dtm_context.context.bin_desc.start_addr =
3752 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3753 le32_to_cpu(ta_hdr->dtm.offset_bytes);
3754
3755 adev->psp.securedisplay_context.context.bin_desc.fw_version =
3756 le32_to_cpu(ta_hdr->securedisplay.fw_version);
3757 adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3758 le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3759 adev->psp.securedisplay_context.context.bin_desc.start_addr =
3760 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3761 le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3762
3763 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3764
3765 return 0;
3766}
3767
3768static int parse_ta_v2_microcode(struct psp_context *psp)
3769{
3770 const struct ta_firmware_header_v2_0 *ta_hdr;
3771 struct amdgpu_device *adev = psp->adev;
3772 int err = 0;
3773 int ta_index = 0;
3774
3775 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3776
3777 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3778 return -EINVAL;
3779
3780 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3781 dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3782 return -EINVAL;
3783 }
3784
3785 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3786 err = parse_ta_bin_descriptor(psp,
3787 desc: &ta_hdr->ta_fw_bin[ta_index],
3788 ta_hdr);
3789 if (err)
3790 return err;
3791 }
3792
3793 return 0;
3794}
3795
3796int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3797{
3798 const struct common_firmware_header *hdr;
3799 struct amdgpu_device *adev = psp->adev;
3800 int err;
3801
3802 err = amdgpu_ucode_request(adev, fw: &adev->psp.ta_fw, required: AMDGPU_UCODE_REQUIRED,
3803 fmt: "amdgpu/%s_ta.bin", chip_name);
3804 if (err)
3805 return err;
3806
3807 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3808 switch (le16_to_cpu(hdr->header_version_major)) {
3809 case 1:
3810 err = parse_ta_v1_microcode(psp);
3811 break;
3812 case 2:
3813 err = parse_ta_v2_microcode(psp);
3814 break;
3815 default:
3816 dev_err(adev->dev, "unsupported TA header version\n");
3817 err = -EINVAL;
3818 }
3819
3820 if (err)
3821 amdgpu_ucode_release(fw: &adev->psp.ta_fw);
3822
3823 return err;
3824}
3825
3826int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3827{
3828 struct amdgpu_device *adev = psp->adev;
3829 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
3830 struct amdgpu_firmware_info *info = NULL;
3831 int err = 0;
3832
3833 if (!amdgpu_sriov_vf(adev)) {
3834 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
3835 return -EINVAL;
3836 }
3837
3838 err = amdgpu_ucode_request(adev, fw: &adev->psp.cap_fw, required: AMDGPU_UCODE_OPTIONAL,
3839 fmt: "amdgpu/%s_cap.bin", chip_name);
3840 if (err) {
3841 if (err == -ENODEV) {
3842 dev_warn(adev->dev, "cap microcode does not exist, skip\n");
3843 err = 0;
3844 } else {
3845 dev_err(adev->dev, "fail to initialize cap microcode\n");
3846 }
3847 goto out;
3848 }
3849
3850 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
3851 info->ucode_id = AMDGPU_UCODE_ID_CAP;
3852 info->fw = adev->psp.cap_fw;
3853 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
3854 adev->psp.cap_fw->data;
3855 adev->firmware.fw_size += ALIGN(
3856 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
3857 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
3858 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
3859 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
3860
3861 return 0;
3862
3863out:
3864 amdgpu_ucode_release(fw: &adev->psp.cap_fw);
3865 return err;
3866}
3867
3868int psp_config_sq_perfmon(struct psp_context *psp,
3869 uint32_t xcp_id, bool core_override_enable,
3870 bool reg_override_enable, bool perfmon_override_enable)
3871{
3872 int ret;
3873
3874 if (amdgpu_sriov_vf(psp->adev))
3875 return 0;
3876
3877 if (xcp_id > MAX_XCP) {
3878 dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id);
3879 return -EINVAL;
3880 }
3881
3882 if (amdgpu_ip_version(adev: psp->adev, ip: MP0_HWIP, inst: 0) != IP_VERSION(13, 0, 6)) {
3883 dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n",
3884 amdgpu_ip_version(psp->adev, MP0_HWIP, 0));
3885 return -EINVAL;
3886 }
3887 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3888
3889 cmd->cmd_id = GFX_CMD_ID_CONFIG_SQ_PERFMON;
3890 cmd->cmd.config_sq_perfmon.gfx_xcp_mask = BIT_MASK(xcp_id);
3891 cmd->cmd.config_sq_perfmon.core_override = core_override_enable;
3892 cmd->cmd.config_sq_perfmon.reg_override = reg_override_enable;
3893 cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable;
3894
3895 ret = psp_cmd_submit_buf(psp, NULL, cmd, fence_mc_addr: psp->fence_buf_mc_addr);
3896 if (ret)
3897 dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n",
3898 xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable);
3899
3900 release_psp_cmd_buf(psp);
3901 return ret;
3902}
3903
3904static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
3905 enum amd_clockgating_state state)
3906{
3907 return 0;
3908}
3909
3910static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block,
3911 enum amd_powergating_state state)
3912{
3913 return 0;
3914}
3915
3916static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
3917 struct device_attribute *attr,
3918 char *buf)
3919{
3920 struct drm_device *ddev = dev_get_drvdata(dev);
3921 struct amdgpu_device *adev = drm_to_adev(ddev);
3922 struct amdgpu_ip_block *ip_block;
3923 uint32_t fw_ver;
3924 int ret;
3925
3926 ip_block = amdgpu_device_ip_get_ip_block(adev, type: AMD_IP_BLOCK_TYPE_PSP);
3927 if (!ip_block || !ip_block->status.late_initialized) {
3928 dev_info(adev->dev, "PSP block is not ready yet\n.");
3929 return -EBUSY;
3930 }
3931
3932 mutex_lock(&adev->psp.mutex);
3933 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
3934 mutex_unlock(lock: &adev->psp.mutex);
3935
3936 if (ret) {
3937 dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
3938 return ret;
3939 }
3940
3941 return sysfs_emit(buf, fmt: "%x\n", fw_ver);
3942}
3943
3944static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
3945 struct device_attribute *attr,
3946 const char *buf,
3947 size_t count)
3948{
3949 struct drm_device *ddev = dev_get_drvdata(dev);
3950 struct amdgpu_device *adev = drm_to_adev(ddev);
3951 int ret, idx;
3952 const struct firmware *usbc_pd_fw;
3953 struct amdgpu_bo *fw_buf_bo = NULL;
3954 uint64_t fw_pri_mc_addr;
3955 void *fw_pri_cpu_addr;
3956 struct amdgpu_ip_block *ip_block;
3957
3958 ip_block = amdgpu_device_ip_get_ip_block(adev, type: AMD_IP_BLOCK_TYPE_PSP);
3959 if (!ip_block || !ip_block->status.late_initialized) {
3960 dev_err(adev->dev, "PSP block is not ready yet.");
3961 return -EBUSY;
3962 }
3963
3964 if (!drm_dev_enter(dev: ddev, idx: &idx))
3965 return -ENODEV;
3966
3967 ret = amdgpu_ucode_request(adev, fw: &usbc_pd_fw, required: AMDGPU_UCODE_REQUIRED,
3968 fmt: "amdgpu/%s", buf);
3969 if (ret)
3970 goto fail;
3971
3972 /* LFB address which is aligned to 1MB boundary per PSP request */
3973 ret = amdgpu_bo_create_kernel(adev, size: usbc_pd_fw->size, align: 0x100000,
3974 AMDGPU_GEM_DOMAIN_VRAM |
3975 AMDGPU_GEM_DOMAIN_GTT,
3976 bo_ptr: &fw_buf_bo, gpu_addr: &fw_pri_mc_addr,
3977 cpu_addr: &fw_pri_cpu_addr);
3978 if (ret)
3979 goto rel_buf;
3980
3981 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
3982
3983 mutex_lock(&adev->psp.mutex);
3984 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
3985 mutex_unlock(lock: &adev->psp.mutex);
3986
3987 amdgpu_bo_free_kernel(bo: &fw_buf_bo, gpu_addr: &fw_pri_mc_addr, cpu_addr: &fw_pri_cpu_addr);
3988
3989rel_buf:
3990 amdgpu_ucode_release(fw: &usbc_pd_fw);
3991fail:
3992 if (ret) {
3993 dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
3994 count = ret;
3995 }
3996
3997 drm_dev_exit(idx);
3998 return count;
3999}
4000
4001void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
4002{
4003 int idx;
4004
4005 if (!drm_dev_enter(dev: adev_to_drm(adev: psp->adev), idx: &idx))
4006 return;
4007
4008 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
4009 memcpy(psp->fw_pri_buf, start_addr, bin_size);
4010
4011 drm_dev_exit(idx);
4012}
4013
4014/**
4015 * DOC: usbc_pd_fw
4016 * Reading from this file will retrieve the USB-C PD firmware version. Writing to
4017 * this file will trigger the update process.
4018 */
4019static DEVICE_ATTR(usbc_pd_fw, 0644,
4020 psp_usbc_pd_fw_sysfs_read,
4021 psp_usbc_pd_fw_sysfs_write);
4022
4023int is_psp_fw_valid(struct psp_bin_desc bin)
4024{
4025 return bin.size_bytes;
4026}
4027
4028static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
4029 const struct bin_attribute *bin_attr,
4030 char *buffer, loff_t pos, size_t count)
4031{
4032 struct device *dev = kobj_to_dev(kobj);
4033 struct drm_device *ddev = dev_get_drvdata(dev);
4034 struct amdgpu_device *adev = drm_to_adev(ddev);
4035
4036 adev->psp.vbflash_done = false;
4037
4038 /* Safeguard against memory drain */
4039 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
4040 dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
4041 kvfree(addr: adev->psp.vbflash_tmp_buf);
4042 adev->psp.vbflash_tmp_buf = NULL;
4043 adev->psp.vbflash_image_size = 0;
4044 return -ENOMEM;
4045 }
4046
4047 /* TODO Just allocate max for now and optimize to realloc later if needed */
4048 if (!adev->psp.vbflash_tmp_buf) {
4049 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
4050 if (!adev->psp.vbflash_tmp_buf)
4051 return -ENOMEM;
4052 }
4053
4054 mutex_lock(&adev->psp.mutex);
4055 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
4056 adev->psp.vbflash_image_size += count;
4057 mutex_unlock(lock: &adev->psp.mutex);
4058
4059 dev_dbg(adev->dev, "IFWI staged for update\n");
4060
4061 return count;
4062}
4063
4064static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
4065 const struct bin_attribute *bin_attr, char *buffer,
4066 loff_t pos, size_t count)
4067{
4068 struct device *dev = kobj_to_dev(kobj);
4069 struct drm_device *ddev = dev_get_drvdata(dev);
4070 struct amdgpu_device *adev = drm_to_adev(ddev);
4071 struct amdgpu_bo *fw_buf_bo = NULL;
4072 uint64_t fw_pri_mc_addr;
4073 void *fw_pri_cpu_addr;
4074 int ret;
4075
4076 if (adev->psp.vbflash_image_size == 0)
4077 return -EINVAL;
4078
4079 dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
4080
4081 ret = amdgpu_bo_create_kernel(adev, size: adev->psp.vbflash_image_size,
4082 AMDGPU_GPU_PAGE_SIZE,
4083 AMDGPU_GEM_DOMAIN_VRAM,
4084 bo_ptr: &fw_buf_bo,
4085 gpu_addr: &fw_pri_mc_addr,
4086 cpu_addr: &fw_pri_cpu_addr);
4087 if (ret)
4088 goto rel_buf;
4089
4090 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
4091
4092 mutex_lock(&adev->psp.mutex);
4093 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
4094 mutex_unlock(lock: &adev->psp.mutex);
4095
4096 amdgpu_bo_free_kernel(bo: &fw_buf_bo, gpu_addr: &fw_pri_mc_addr, cpu_addr: &fw_pri_cpu_addr);
4097
4098rel_buf:
4099 kvfree(addr: adev->psp.vbflash_tmp_buf);
4100 adev->psp.vbflash_tmp_buf = NULL;
4101 adev->psp.vbflash_image_size = 0;
4102
4103 if (ret) {
4104 dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
4105 return ret;
4106 }
4107
4108 dev_dbg(adev->dev, "PSP IFWI flash process done\n");
4109 return 0;
4110}
4111
4112/**
4113 * DOC: psp_vbflash
4114 * Writing to this file will stage an IFWI for update. Reading from this file
4115 * will trigger the update process.
4116 */
4117static const struct bin_attribute psp_vbflash_bin_attr = {
4118 .attr = {.name = "psp_vbflash", .mode = 0660},
4119 .size = 0,
4120 .write_new = amdgpu_psp_vbflash_write,
4121 .read_new = amdgpu_psp_vbflash_read,
4122};
4123
4124/**
4125 * DOC: psp_vbflash_status
4126 * The status of the flash process.
4127 * 0: IFWI flash not complete.
4128 * 1: IFWI flash complete.
4129 */
4130static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
4131 struct device_attribute *attr,
4132 char *buf)
4133{
4134 struct drm_device *ddev = dev_get_drvdata(dev);
4135 struct amdgpu_device *adev = drm_to_adev(ddev);
4136 uint32_t vbflash_status;
4137
4138 vbflash_status = psp_vbflash_status(&adev->psp);
4139 if (!adev->psp.vbflash_done)
4140 vbflash_status = 0;
4141 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
4142 vbflash_status = 1;
4143
4144 return sysfs_emit(buf, fmt: "0x%x\n", vbflash_status);
4145}
4146static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
4147
4148static const struct bin_attribute *const bin_flash_attrs[] = {
4149 &psp_vbflash_bin_attr,
4150 NULL
4151};
4152
4153static struct attribute *flash_attrs[] = {
4154 &dev_attr_psp_vbflash_status.attr,
4155 &dev_attr_usbc_pd_fw.attr,
4156 NULL
4157};
4158
4159static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
4160{
4161 struct device *dev = kobj_to_dev(kobj);
4162 struct drm_device *ddev = dev_get_drvdata(dev);
4163 struct amdgpu_device *adev = drm_to_adev(ddev);
4164
4165 if (attr == &dev_attr_usbc_pd_fw.attr)
4166 return adev->psp.sup_pd_fw_up ? 0660 : 0;
4167
4168 return adev->psp.sup_ifwi_up ? 0440 : 0;
4169}
4170
4171static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
4172 const struct bin_attribute *attr,
4173 int idx)
4174{
4175 struct device *dev = kobj_to_dev(kobj);
4176 struct drm_device *ddev = dev_get_drvdata(dev);
4177 struct amdgpu_device *adev = drm_to_adev(ddev);
4178
4179 return adev->psp.sup_ifwi_up ? 0660 : 0;
4180}
4181
4182const struct attribute_group amdgpu_flash_attr_group = {
4183 .attrs = flash_attrs,
4184 .bin_attrs_new = bin_flash_attrs,
4185 .is_bin_visible = amdgpu_bin_flash_attr_is_visible,
4186 .is_visible = amdgpu_flash_attr_is_visible,
4187};
4188
4189#if defined(CONFIG_DEBUG_FS)
4190static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp)
4191{
4192 struct amdgpu_device *adev = filp->f_inode->i_private;
4193 struct spirom_bo *bo_triplet;
4194 int ret;
4195
4196 /* serialize the open() file calling */
4197 if (!mutex_trylock(&adev->psp.mutex))
4198 return -EBUSY;
4199
4200 /*
4201 * make sure only one userpace process is alive for dumping so that
4202 * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed.
4203 * let's say the case where one process try opening the file while
4204 * another one has proceeded to read or release. In this way, eliminate
4205 * the use of mutex for read() or release() callback as well.
4206 */
4207 if (adev->psp.spirom_dump_trip) {
4208 mutex_unlock(lock: &adev->psp.mutex);
4209 return -EBUSY;
4210 }
4211
4212 bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL);
4213 if (!bo_triplet) {
4214 mutex_unlock(lock: &adev->psp.mutex);
4215 return -ENOMEM;
4216 }
4217
4218 ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2,
4219 AMDGPU_GPU_PAGE_SIZE,
4220 AMDGPU_GEM_DOMAIN_GTT,
4221 bo_ptr: &bo_triplet->bo,
4222 gpu_addr: &bo_triplet->mc_addr,
4223 cpu_addr: &bo_triplet->cpu_addr);
4224 if (ret)
4225 goto rel_trip;
4226
4227 ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr);
4228 if (ret)
4229 goto rel_bo;
4230
4231 adev->psp.spirom_dump_trip = bo_triplet;
4232 mutex_unlock(lock: &adev->psp.mutex);
4233 return 0;
4234rel_bo:
4235 amdgpu_bo_free_kernel(bo: &bo_triplet->bo, gpu_addr: &bo_triplet->mc_addr,
4236 cpu_addr: &bo_triplet->cpu_addr);
4237rel_trip:
4238 kfree(objp: bo_triplet);
4239 mutex_unlock(lock: &adev->psp.mutex);
4240 dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret);
4241 return ret;
4242}
4243
4244static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size,
4245 loff_t *pos)
4246{
4247 struct amdgpu_device *adev = filp->f_inode->i_private;
4248 struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
4249
4250 if (!bo_triplet)
4251 return -EINVAL;
4252
4253 return simple_read_from_buffer(to: buf,
4254 count: size,
4255 ppos: pos, from: bo_triplet->cpu_addr,
4256 AMD_VBIOS_FILE_MAX_SIZE_B * 2);
4257}
4258
4259static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp)
4260{
4261 struct amdgpu_device *adev = filp->f_inode->i_private;
4262 struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
4263
4264 if (bo_triplet) {
4265 amdgpu_bo_free_kernel(bo: &bo_triplet->bo, gpu_addr: &bo_triplet->mc_addr,
4266 cpu_addr: &bo_triplet->cpu_addr);
4267 kfree(objp: bo_triplet);
4268 }
4269
4270 adev->psp.spirom_dump_trip = NULL;
4271 return 0;
4272}
4273
4274static const struct file_operations psp_dump_spirom_debugfs_ops = {
4275 .owner = THIS_MODULE,
4276 .open = psp_read_spirom_debugfs_open,
4277 .read = psp_read_spirom_debugfs_read,
4278 .release = psp_read_spirom_debugfs_release,
4279 .llseek = default_llseek,
4280};
4281#endif
4282
4283void amdgpu_psp_debugfs_init(struct amdgpu_device *adev)
4284{
4285#if defined(CONFIG_DEBUG_FS)
4286 struct drm_minor *minor = adev_to_drm(adev)->primary;
4287
4288 debugfs_create_file_size(name: "psp_spirom_dump", mode: 0444, parent: minor->debugfs_root,
4289 data: adev, fops: &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2);
4290#endif
4291}
4292
4293const struct amd_ip_funcs psp_ip_funcs = {
4294 .name = "psp",
4295 .early_init = psp_early_init,
4296 .sw_init = psp_sw_init,
4297 .sw_fini = psp_sw_fini,
4298 .hw_init = psp_hw_init,
4299 .hw_fini = psp_hw_fini,
4300 .suspend = psp_suspend,
4301 .resume = psp_resume,
4302 .set_clockgating_state = psp_set_clockgating_state,
4303 .set_powergating_state = psp_set_powergating_state,
4304};
4305
4306const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
4307 .type = AMD_IP_BLOCK_TYPE_PSP,
4308 .major = 3,
4309 .minor = 1,
4310 .rev = 0,
4311 .funcs = &psp_ip_funcs,
4312};
4313
4314const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
4315 .type = AMD_IP_BLOCK_TYPE_PSP,
4316 .major = 10,
4317 .minor = 0,
4318 .rev = 0,
4319 .funcs = &psp_ip_funcs,
4320};
4321
4322const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
4323 .type = AMD_IP_BLOCK_TYPE_PSP,
4324 .major = 11,
4325 .minor = 0,
4326 .rev = 0,
4327 .funcs = &psp_ip_funcs,
4328};
4329
4330const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
4331 .type = AMD_IP_BLOCK_TYPE_PSP,
4332 .major = 11,
4333 .minor = 0,
4334 .rev = 8,
4335 .funcs = &psp_ip_funcs,
4336};
4337
4338const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
4339 .type = AMD_IP_BLOCK_TYPE_PSP,
4340 .major = 12,
4341 .minor = 0,
4342 .rev = 0,
4343 .funcs = &psp_ip_funcs,
4344};
4345
4346const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
4347 .type = AMD_IP_BLOCK_TYPE_PSP,
4348 .major = 13,
4349 .minor = 0,
4350 .rev = 0,
4351 .funcs = &psp_ip_funcs,
4352};
4353
4354const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
4355 .type = AMD_IP_BLOCK_TYPE_PSP,
4356 .major = 13,
4357 .minor = 0,
4358 .rev = 4,
4359 .funcs = &psp_ip_funcs,
4360};
4361
4362const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
4363 .type = AMD_IP_BLOCK_TYPE_PSP,
4364 .major = 14,
4365 .minor = 0,
4366 .rev = 0,
4367 .funcs = &psp_ip_funcs,
4368};
4369

source code of linux/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c