1/*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26
27
28#include "core_types.h"
29#include "clk_mgr_internal.h"
30#include "reg_helper.h"
31#include "dm_helpers.h"
32#include "dcn35_smu.h"
33
34#include "mp/mp_14_0_0_offset.h"
35#include "mp/mp_14_0_0_sh_mask.h"
36
37/* TODO: Use the real headers when they're correct */
38#define MP1_BASE__INST0_SEG0 0x00016000
39#define MP1_BASE__INST0_SEG1 0x0243FC00
40#define MP1_BASE__INST0_SEG2 0x00DC0000
41#define MP1_BASE__INST0_SEG3 0x00E00000
42#define MP1_BASE__INST0_SEG4 0x00E40000
43#define MP1_BASE__INST0_SEG5 0
44
45#ifdef BASE_INNER
46#undef BASE_INNER
47#endif
48
49#define BASE_INNER(seg) MP1_BASE__INST0_SEG ## seg
50
51#define BASE(seg) BASE_INNER(seg)
52
53#define REG(reg_name) (BASE(reg##reg_name##_BASE_IDX) + reg##reg_name)
54
55#define FN(reg_name, field) \
56 FD(reg_name##__##field)
57
58#include "logger_types.h"
59#undef DC_LOGGER
60#define DC_LOGGER \
61 CTX->logger
62#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
63
64#define VBIOSSMC_MSG_TestMessage 0x1
65#define VBIOSSMC_MSG_GetSmuVersion 0x2
66#define VBIOSSMC_MSG_PowerUpGfx 0x3
67#define VBIOSSMC_MSG_SetDispclkFreq 0x4
68#define VBIOSSMC_MSG_SetDprefclkFreq 0x5 //Not used. DPRef is constant
69#define VBIOSSMC_MSG_SetDppclkFreq 0x6
70#define VBIOSSMC_MSG_SetHardMinDcfclkByFreq 0x7
71#define VBIOSSMC_MSG_SetMinDeepSleepDcfclk 0x8
72#define VBIOSSMC_MSG_SetPhyclkVoltageByFreq 0x9 //Keep it in case VMIN dees not support phy clk
73#define VBIOSSMC_MSG_GetFclkFrequency 0xA
74#define VBIOSSMC_MSG_SetDisplayCount 0xB //Not used anymore
75#define VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown 0xC //To ask PMFW turn off TMDP 48MHz refclk during display off to save power
76#define VBIOSSMC_MSG_UpdatePmeRestore 0xD
77#define VBIOSSMC_MSG_SetVbiosDramAddrHigh 0xE //Used for WM table txfr
78#define VBIOSSMC_MSG_SetVbiosDramAddrLow 0xF
79#define VBIOSSMC_MSG_TransferTableSmu2Dram 0x10
80#define VBIOSSMC_MSG_TransferTableDram2Smu 0x11
81#define VBIOSSMC_MSG_SetDisplayIdleOptimizations 0x12
82#define VBIOSSMC_MSG_GetDprefclkFreq 0x13
83#define VBIOSSMC_MSG_GetDtbclkFreq 0x14
84#define VBIOSSMC_MSG_AllowZstatesEntry 0x15
85#define VBIOSSMC_MSG_DisallowZstatesEntry 0x16
86#define VBIOSSMC_MSG_SetDtbClk 0x17
87#define VBIOSSMC_MSG_DispPsrEntry 0x18 ///< Display PSR entry, DMU
88#define VBIOSSMC_MSG_DispPsrExit 0x19 ///< Display PSR exit, DMU
89#define VBIOSSMC_MSG_DisableLSdma 0x1A ///< Disable LSDMA; only sent by VBIOS
90#define VBIOSSMC_MSG_DpControllerPhyStatus 0x1B ///< Inform PMFW about the pre conditions for turning SLDO2 on/off . bit[0]==1 precondition is met, bit[1-2] are for DPPHY number
91#define VBIOSSMC_MSG_QueryIPS2Support 0x1C ///< Return 1: support; else not supported
92#define VBIOSSMC_Message_Count 0x1D
93
94#define VBIOSSMC_Status_BUSY 0x0
95#define VBIOSSMC_Result_OK 0x1
96#define VBIOSSMC_Result_Failed 0xFF
97#define VBIOSSMC_Result_UnknownCmd 0xFE
98#define VBIOSSMC_Result_CmdRejectedPrereq 0xFD
99#define VBIOSSMC_Result_CmdRejectedBusy 0xFC
100
101/*
102 * Function to be used instead of REG_WAIT macro because the wait ends when
103 * the register is NOT EQUAL to zero, and because `the translation in msg_if.h
104 * won't work with REG_WAIT.
105 */
106static uint32_t dcn35_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries)
107{
108 uint32_t res_val = VBIOSSMC_Status_BUSY;
109
110 do {
111 res_val = REG_READ(MP1_SMN_C2PMSG_91);
112 if (res_val != VBIOSSMC_Status_BUSY)
113 break;
114
115 if (delay_us >= 1000)
116 msleep(msecs: delay_us/1000);
117 else if (delay_us > 0)
118 udelay(delay_us);
119
120 if (clk_mgr->base.ctx->dc->debug.disable_timeout)
121 max_retries++;
122 } while (max_retries--);
123
124 return res_val;
125}
126
127static int dcn35_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
128 unsigned int msg_id,
129 unsigned int param)
130{
131 uint32_t result;
132
133 result = dcn35_smu_wait_for_response(clk_mgr, delay_us: 10, max_retries: 2000000);
134 ASSERT(result == VBIOSSMC_Result_OK);
135
136 if (result != VBIOSSMC_Result_OK) {
137 DC_LOG_WARNING("SMU response after wait: %d, msg id = %d\n", result, msg_id);
138
139 if (result == VBIOSSMC_Status_BUSY)
140 return -1;
141 }
142
143 /* First clear response register */
144 REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Status_BUSY);
145
146 /* Set the parameter register for the SMU message, unit is Mhz */
147 REG_WRITE(MP1_SMN_C2PMSG_83, param);
148
149 /* Trigger the message transaction by writing the message ID */
150 REG_WRITE(MP1_SMN_C2PMSG_67, msg_id);
151
152 result = dcn35_smu_wait_for_response(clk_mgr, delay_us: 10, max_retries: 2000000);
153
154 if (result == VBIOSSMC_Result_Failed) {
155 if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
156 param == TABLE_WATERMARKS)
157 DC_LOG_WARNING("Watermarks table not configured properly by SMU");
158 else
159 ASSERT(0);
160 REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
161 DC_LOG_WARNING("SMU response after wait: %d, msg id = %d\n", result, msg_id);
162 return -1;
163 }
164
165 if (IS_SMU_TIMEOUT(result)) {
166 ASSERT(0);
167 result = dcn35_smu_wait_for_response(clk_mgr, delay_us: 10, max_retries: 2000000);
168 //dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000);
169 DC_LOG_WARNING("SMU response after wait: %d, msg id = %d\n", result, msg_id);
170 }
171
172 return REG_READ(MP1_SMN_C2PMSG_83);
173}
174
175int dcn35_smu_get_smu_version(struct clk_mgr_internal *clk_mgr)
176{
177 return dcn35_smu_send_msg_with_param(
178 clk_mgr,
179 VBIOSSMC_MSG_GetSmuVersion,
180 param: 0);
181}
182
183
184int dcn35_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz)
185{
186 int actual_dispclk_set_mhz = -1;
187
188 if (!clk_mgr->smu_present)
189 return requested_dispclk_khz;
190
191 /* Unit of SMU msg parameter is Mhz */
192 actual_dispclk_set_mhz = dcn35_smu_send_msg_with_param(
193 clk_mgr,
194 VBIOSSMC_MSG_SetDispclkFreq,
195 param: khz_to_mhz_ceil(khz: requested_dispclk_khz));
196
197 smu_print("requested_dispclk_khz = %d, actual_dispclk_set_mhz: %d\n", requested_dispclk_khz, actual_dispclk_set_mhz);
198 return actual_dispclk_set_mhz * 1000;
199}
200
201int dcn35_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
202{
203 int actual_dprefclk_set_mhz = -1;
204
205 if (!clk_mgr->smu_present)
206 return clk_mgr->base.dprefclk_khz;
207
208 actual_dprefclk_set_mhz = dcn35_smu_send_msg_with_param(
209 clk_mgr,
210 VBIOSSMC_MSG_SetDprefclkFreq,
211 param: khz_to_mhz_ceil(khz: clk_mgr->base.dprefclk_khz));
212
213 /* TODO: add code for programing DP DTO, currently this is down by command table */
214
215 return actual_dprefclk_set_mhz * 1000;
216}
217
218int dcn35_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz)
219{
220 int actual_dcfclk_set_mhz = -1;
221
222 if (!clk_mgr->smu_present)
223 return requested_dcfclk_khz;
224
225 actual_dcfclk_set_mhz = dcn35_smu_send_msg_with_param(
226 clk_mgr,
227 VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
228 param: khz_to_mhz_ceil(khz: requested_dcfclk_khz));
229
230 smu_print("requested_dcfclk_khz = %d, actual_dcfclk_set_mhz: %d\n", requested_dcfclk_khz, actual_dcfclk_set_mhz);
231
232 return actual_dcfclk_set_mhz * 1000;
233}
234
235int dcn35_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz)
236{
237 int actual_min_ds_dcfclk_mhz = -1;
238
239 if (!clk_mgr->smu_present)
240 return requested_min_ds_dcfclk_khz;
241
242 actual_min_ds_dcfclk_mhz = dcn35_smu_send_msg_with_param(
243 clk_mgr,
244 VBIOSSMC_MSG_SetMinDeepSleepDcfclk,
245 param: khz_to_mhz_ceil(khz: requested_min_ds_dcfclk_khz));
246
247 smu_print("requested_min_ds_dcfclk_khz = %d, actual_min_ds_dcfclk_mhz: %d\n", requested_min_ds_dcfclk_khz, actual_min_ds_dcfclk_mhz);
248
249 return actual_min_ds_dcfclk_mhz * 1000;
250}
251
252int dcn35_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz)
253{
254 int actual_dppclk_set_mhz = -1;
255
256 if (!clk_mgr->smu_present)
257 return requested_dpp_khz;
258
259 actual_dppclk_set_mhz = dcn35_smu_send_msg_with_param(
260 clk_mgr,
261 VBIOSSMC_MSG_SetDppclkFreq,
262 param: khz_to_mhz_ceil(khz: requested_dpp_khz));
263
264 smu_print("requested_dpp_khz = %d, actual_dppclk_set_mhz: %d\n", requested_dpp_khz, actual_dppclk_set_mhz);
265
266 return actual_dppclk_set_mhz * 1000;
267}
268
269void dcn35_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, uint32_t idle_info)
270{
271 if (!clk_mgr->base.ctx->dc->debug.pstate_enabled)
272 return;
273
274 if (!clk_mgr->smu_present)
275 return;
276
277 //TODO: Work with smu team to define optimization options.
278 dcn35_smu_send_msg_with_param(
279 clk_mgr,
280 VBIOSSMC_MSG_SetDisplayIdleOptimizations,
281 param: idle_info);
282 smu_print("%s: VBIOSSMC_MSG_SetDisplayIdleOptimizations idle_info = %x\n", __func__, idle_info);
283}
284
285void dcn35_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
286{
287 union display_idle_optimization_u idle_info = { 0 };
288
289 if (!clk_mgr->smu_present)
290 return;
291
292 if (enable) {
293 idle_info.idle_info.df_request_disabled = 1;
294 idle_info.idle_info.phy_ref_clk_off = 1;
295 }
296
297 dcn35_smu_send_msg_with_param(
298 clk_mgr,
299 VBIOSSMC_MSG_SetDisplayIdleOptimizations,
300 param: idle_info.data);
301 smu_print("%s smu_enable_phy_refclk_pwrdwn = %d\n", __func__, enable ? 1 : 0);
302}
303
304void dcn35_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
305{
306 if (!clk_mgr->smu_present)
307 return;
308
309 dcn35_smu_send_msg_with_param(
310 clk_mgr,
311 VBIOSSMC_MSG_UpdatePmeRestore,
312 param: 0);
313 smu_print("%s: SMC_MSG_UpdatePmeRestore\n", __func__);
314}
315
316void dcn35_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
317{
318 if (!clk_mgr->smu_present)
319 return;
320
321 dcn35_smu_send_msg_with_param(clk_mgr,
322 VBIOSSMC_MSG_SetVbiosDramAddrHigh, param: addr_high);
323}
324
325void dcn35_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low)
326{
327 if (!clk_mgr->smu_present)
328 return;
329
330 dcn35_smu_send_msg_with_param(clk_mgr,
331 VBIOSSMC_MSG_SetVbiosDramAddrLow, param: addr_low);
332}
333
334void dcn35_smu_transfer_dpm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr)
335{
336 if (!clk_mgr->smu_present)
337 return;
338
339 dcn35_smu_send_msg_with_param(clk_mgr,
340 VBIOSSMC_MSG_TransferTableSmu2Dram, TABLE_DPMCLOCKS);
341}
342
343void dcn35_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
344{
345 if (!clk_mgr->smu_present)
346 return;
347
348 dcn35_smu_send_msg_with_param(clk_mgr,
349 VBIOSSMC_MSG_TransferTableDram2Smu, TABLE_WATERMARKS);
350}
351
352void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support)
353{
354 unsigned int msg_id, param, retv;
355
356 if (!clk_mgr->smu_present)
357 return;
358
359 switch (support) {
360
361 case DCN_ZSTATE_SUPPORT_ALLOW:
362 msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
363 param = (1 << 10) | (1 << 9) | (1 << 8);
364 smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW, param = 0x%x\n", __func__, param);
365 break;
366
367 case DCN_ZSTATE_SUPPORT_DISALLOW:
368 msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
369 param = 0;
370 smu_print("%s: SMC_MSG_AllowZstatesEntry msg_id = DISALLOW, param = 0x%x\n", __func__, param);
371 break;
372
373
374 case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY:
375 msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
376 param = (1 << 10);
377 smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z10_ONLY, param = 0x%x\n", __func__, param);
378 break;
379
380 case DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY:
381 msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
382 param = (1 << 10) | (1 << 8);
383 smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_Z10_ONLY, param = 0x%x\n", __func__, param);
384 break;
385
386 case DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY:
387 msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
388 param = (1 << 8);
389 smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_ONLY, param = 0x%x\n", __func__, param);
390 break;
391
392 default: //DCN_ZSTATE_SUPPORT_UNKNOWN
393 msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
394 param = 0;
395 break;
396 }
397
398
399 retv = dcn35_smu_send_msg_with_param(
400 clk_mgr,
401 msg_id,
402 param);
403 smu_print("%s: msg_id = %d, param = 0x%x, return = 0x%x\n", __func__, msg_id, param, retv);
404}
405
406int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr)
407{
408 int dprefclk;
409
410 if (!clk_mgr->smu_present)
411 return 0;
412
413 dprefclk = dcn35_smu_send_msg_with_param(clk_mgr,
414 VBIOSSMC_MSG_GetDprefclkFreq,
415 param: 0);
416
417 smu_print("%s: SMU DPREF clk = %d mhz\n", __func__, dprefclk);
418 return dprefclk * 1000;
419}
420
421int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr)
422{
423 int dtbclk;
424
425 if (!clk_mgr->smu_present)
426 return 0;
427
428 dtbclk = dcn35_smu_send_msg_with_param(clk_mgr,
429 VBIOSSMC_MSG_GetDtbclkFreq,
430 param: 0);
431
432 smu_print("%s: get_dtbclk = %dmhz\n", __func__, dtbclk);
433 return dtbclk * 1000;
434}
435/* Arg = 1: Turn DTB on; 0: Turn DTB CLK OFF. when it is on, it is 600MHZ */
436void dcn35_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)
437{
438 if (!clk_mgr->smu_present)
439 return;
440
441 dcn35_smu_send_msg_with_param(
442 clk_mgr,
443 VBIOSSMC_MSG_SetDtbClk,
444 param: enable);
445 smu_print("%s: smu_set_dtbclk = %d\n", __func__, enable ? 1 : 0);
446}
447
448void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
449{
450 if (!clk_mgr->smu_present)
451 return;
452
453 dcn35_smu_send_msg_with_param(
454 clk_mgr,
455 VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown,
456 param: enable);
457 smu_print("%s: smu_enable_48mhz_tmdp_refclk_pwrdwn = %d\n", __func__, enable ? 1 : 0);
458}
459
460int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr)
461{
462 int retv;
463
464 if (!clk_mgr->smu_present)
465 return 0;
466
467 retv = dcn35_smu_send_msg_with_param(
468 clk_mgr,
469 VBIOSSMC_MSG_DispPsrExit,
470 param: 0);
471 smu_print("%s: smu_exit_low_power_state return = %d\n", __func__, retv);
472 return retv;
473}
474
475int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr)
476{
477 int retv;
478
479 if (!clk_mgr->smu_present)
480 return 0;
481
482 retv = dcn35_smu_send_msg_with_param(
483 clk_mgr,
484 VBIOSSMC_MSG_QueryIPS2Support,
485 param: 0);
486
487 //smu_print("%s: VBIOSSMC_MSG_QueryIPS2Support return = %x\n", __func__, retv);
488 return retv;
489}
490
491void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param)
492{
493 if (!clk_mgr->smu_present)
494 return;
495
496 REG_WRITE(MP1_SMN_C2PMSG_71, param);
497 //smu_print("%s: write_ips_scratch = %x\n", __func__, param);
498}
499
500uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr)
501{
502 uint32_t retv;
503
504 if (!clk_mgr->smu_present)
505 return 0;
506
507 retv = REG_READ(MP1_SMN_C2PMSG_71);
508 //smu_print("%s: dcn35_smu_read_ips_scratch = %x\n", __func__, retv);
509 return retv;
510}
511

source code of linux/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c