1 | /* |
2 | * Copyright 2022 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | * Authors: AMD |
23 | * |
24 | */ |
25 | |
26 | |
27 | #include "dcn35_clk_mgr.h" |
28 | |
29 | #include "dccg.h" |
30 | #include "clk_mgr_internal.h" |
31 | |
32 | // For dce12_get_dp_ref_freq_khz |
33 | #include "dce100/dce_clk_mgr.h" |
34 | |
35 | // For dcn20_update_clocks_update_dpp_dto |
36 | #include "dcn20/dcn20_clk_mgr.h" |
37 | |
38 | |
39 | |
40 | |
41 | #include "reg_helper.h" |
42 | #include "core_types.h" |
43 | #include "dcn35_smu.h" |
44 | #include "dm_helpers.h" |
45 | |
46 | /* TODO: remove this include once we ported over remaining clk mgr functions*/ |
47 | #include "dcn30/dcn30_clk_mgr.h" |
48 | #include "dcn31/dcn31_clk_mgr.h" |
49 | |
50 | #include "dc_dmub_srv.h" |
51 | #include "link.h" |
52 | #include "logger_types.h" |
53 | |
54 | #undef DC_LOGGER |
55 | #define DC_LOGGER \ |
56 | clk_mgr->base.base.ctx->logger |
57 | |
58 | #define regCLK1_CLK_PLL_REQ 0x0237 |
59 | #define regCLK1_CLK_PLL_REQ_BASE_IDX 0 |
60 | |
61 | #define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0 |
62 | #define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc |
63 | #define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10 |
64 | #define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL |
65 | #define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L |
66 | #define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L |
67 | |
68 | #define regCLK1_CLK2_BYPASS_CNTL 0x029c |
69 | #define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX 0 |
70 | |
71 | #define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0 |
72 | #define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT 0x10 |
73 | #define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L |
74 | #define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L |
75 | |
76 | #define regCLK5_0_CLK5_spll_field_8 0x464b |
77 | #define regCLK5_0_CLK5_spll_field_8_BASE_IDX 0 |
78 | |
79 | #define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT 0xd |
80 | #define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L |
81 | |
82 | #define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0 |
83 | |
84 | #define REG(reg_name) \ |
85 | (ctx->clk_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) |
86 | |
87 | #define TO_CLK_MGR_DCN35(clk_mgr)\ |
88 | container_of(clk_mgr, struct clk_mgr_dcn35, base) |
89 | |
90 | static int dcn35_get_active_display_cnt_wa( |
91 | struct dc *dc, |
92 | struct dc_state *context, |
93 | int *all_active_disps) |
94 | { |
95 | int i, display_count = 0; |
96 | bool tmds_present = false; |
97 | |
98 | for (i = 0; i < context->stream_count; i++) { |
99 | const struct dc_stream_state *stream = context->streams[i]; |
100 | |
101 | if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A || |
102 | stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || |
103 | stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) |
104 | tmds_present = true; |
105 | } |
106 | |
107 | for (i = 0; i < dc->link_count; i++) { |
108 | const struct dc_link *link = dc->links[i]; |
109 | |
110 | /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */ |
111 | if (link->link_enc && link->link_enc->funcs->is_dig_enabled && |
112 | link->link_enc->funcs->is_dig_enabled(link->link_enc)) |
113 | display_count++; |
114 | } |
115 | if (all_active_disps != NULL) |
116 | *all_active_disps = display_count; |
117 | /* WA for hang on HDMI after display off back on*/ |
118 | if (display_count == 0 && tmds_present) |
119 | display_count = 1; |
120 | |
121 | return display_count; |
122 | } |
123 | |
124 | static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, |
125 | bool safe_to_lower, bool disable) |
126 | { |
127 | struct dc *dc = clk_mgr_base->ctx->dc; |
128 | int i; |
129 | |
130 | for (i = 0; i < dc->res_pool->pipe_count; ++i) { |
131 | struct pipe_ctx *pipe = safe_to_lower |
132 | ? &context->res_ctx.pipe_ctx[i] |
133 | : &dc->current_state->res_ctx.pipe_ctx[i]; |
134 | |
135 | if (pipe->top_pipe || pipe->prev_odm_pipe) |
136 | continue; |
137 | if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(signal: pipe->stream->signal) || |
138 | !pipe->stream->link_enc)) { |
139 | if (disable) { |
140 | if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc) |
141 | pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); |
142 | |
143 | reset_sync_context_for_pipe(dc, context, pipe_idx: i); |
144 | } else { |
145 | pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); |
146 | } |
147 | } |
148 | } |
149 | } |
150 | |
151 | static void dcn35_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr, |
152 | struct dc_state *context, |
153 | int ref_dtbclk_khz) |
154 | { |
155 | struct dccg *dccg = clk_mgr->dccg; |
156 | uint32_t tg_mask = 0; |
157 | int i; |
158 | |
159 | for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { |
160 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; |
161 | struct dtbclk_dto_params dto_params = {0}; |
162 | |
163 | /* use mask to program DTO once per tg */ |
164 | if (pipe_ctx->stream_res.tg && |
165 | !(tg_mask & (1 << pipe_ctx->stream_res.tg->inst))) { |
166 | tg_mask |= (1 << pipe_ctx->stream_res.tg->inst); |
167 | |
168 | dto_params.otg_inst = pipe_ctx->stream_res.tg->inst; |
169 | dto_params.ref_dtbclk_khz = ref_dtbclk_khz; |
170 | |
171 | dccg->funcs->set_dtbclk_dto(clk_mgr->dccg, &dto_params); |
172 | //dccg->funcs->set_audio_dtbclk_dto(clk_mgr->dccg, &dto_params); |
173 | } |
174 | } |
175 | } |
176 | |
177 | static void dcn35_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, |
178 | struct dc_state *context, bool safe_to_lower) |
179 | { |
180 | int i; |
181 | bool dppclk_active[MAX_PIPES] = {0}; |
182 | |
183 | |
184 | clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz; |
185 | for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { |
186 | int dpp_inst = 0, dppclk_khz, prev_dppclk_khz; |
187 | |
188 | dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; |
189 | |
190 | if (context->res_ctx.pipe_ctx[i].plane_res.dpp) |
191 | dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst; |
192 | else if (!context->res_ctx.pipe_ctx[i].plane_res.dpp && dppclk_khz == 0) { |
193 | /* dpp == NULL && dppclk_khz == 0 is valid because of pipe harvesting. |
194 | * In this case just continue in loop |
195 | */ |
196 | continue; |
197 | } else if (!context->res_ctx.pipe_ctx[i].plane_res.dpp && dppclk_khz > 0) { |
198 | /* The software state is not valid if dpp resource is NULL and |
199 | * dppclk_khz > 0. |
200 | */ |
201 | ASSERT(false); |
202 | continue; |
203 | } |
204 | |
205 | prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i]; |
206 | |
207 | if (safe_to_lower || prev_dppclk_khz < dppclk_khz) |
208 | clk_mgr->dccg->funcs->update_dpp_dto( |
209 | clk_mgr->dccg, dpp_inst, dppclk_khz); |
210 | dppclk_active[dpp_inst] = true; |
211 | } |
212 | if (safe_to_lower) |
213 | for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { |
214 | struct dpp *old_dpp = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.dpp; |
215 | |
216 | if (old_dpp && !dppclk_active[old_dpp->inst]) |
217 | clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, old_dpp->inst, 0); |
218 | } |
219 | } |
220 | |
221 | void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, |
222 | struct dc_state *context, |
223 | bool safe_to_lower) |
224 | { |
225 | union dmub_rb_cmd cmd; |
226 | struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); |
227 | struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; |
228 | struct dc *dc = clk_mgr_base->ctx->dc; |
229 | int display_count = 0; |
230 | bool update_dppclk = false; |
231 | bool update_dispclk = false; |
232 | bool dpp_clock_lowered = false; |
233 | int all_active_disps = 0; |
234 | |
235 | if (dc->work_arounds.skip_clock_update) |
236 | return; |
237 | |
238 | display_count = dcn35_get_active_display_cnt_wa(dc, context, all_active_disps: &all_active_disps); |
239 | if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz) |
240 | new_clocks->ref_dtbclk_khz = 600000; |
241 | |
242 | /* |
243 | * if it is safe to lower, but we are already in the lower state, we don't have to do anything |
244 | * also if safe to lower is false, we just go in the higher state |
245 | */ |
246 | if (safe_to_lower) { |
247 | if (new_clocks->zstate_support != DCN_ZSTATE_SUPPORT_DISALLOW && |
248 | new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) { |
249 | dcn35_smu_set_zstate_support(clk_mgr, support: new_clocks->zstate_support); |
250 | dm_helpers_enable_periodic_detection(ctx: clk_mgr_base->ctx, enable: true); |
251 | clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; |
252 | } |
253 | |
254 | if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) { |
255 | dcn35_smu_set_dtbclk(clk_mgr, enable: false); |
256 | clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; |
257 | } |
258 | /* check that we're not already in lower */ |
259 | if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { |
260 | /* if we can go lower, go lower */ |
261 | if (display_count == 0) |
262 | clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; |
263 | } |
264 | } else { |
265 | if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW && |
266 | new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) { |
267 | dcn35_smu_set_zstate_support(clk_mgr, support: DCN_ZSTATE_SUPPORT_DISALLOW); |
268 | dm_helpers_enable_periodic_detection(ctx: clk_mgr_base->ctx, enable: false); |
269 | clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; |
270 | } |
271 | |
272 | if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) { |
273 | dcn35_smu_set_dtbclk(clk_mgr, enable: true); |
274 | clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; |
275 | |
276 | dcn35_update_clocks_update_dtb_dto(clk_mgr, context, ref_dtbclk_khz: new_clocks->ref_dtbclk_khz); |
277 | clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz; |
278 | } |
279 | |
280 | /* check that we're not already in D0 */ |
281 | if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) { |
282 | union display_idle_optimization_u idle_info = { 0 }; |
283 | |
284 | dcn35_smu_set_display_idle_optimization(clk_mgr, idle_info: idle_info.data); |
285 | /* update power state */ |
286 | clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE; |
287 | } |
288 | } |
289 | if (dc->debug.force_min_dcfclk_mhz > 0) |
290 | new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ? |
291 | new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000); |
292 | |
293 | if (should_set_clock(safe_to_lower, calc_clk: new_clocks->dcfclk_khz, cur_clk: clk_mgr_base->clks.dcfclk_khz)) { |
294 | clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz; |
295 | dcn35_smu_set_hard_min_dcfclk(clk_mgr, requested_dcfclk_khz: clk_mgr_base->clks.dcfclk_khz); |
296 | } |
297 | |
298 | if (should_set_clock(safe_to_lower, |
299 | calc_clk: new_clocks->dcfclk_deep_sleep_khz, cur_clk: clk_mgr_base->clks.dcfclk_deep_sleep_khz)) { |
300 | clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; |
301 | dcn35_smu_set_min_deep_sleep_dcfclk(clk_mgr, requested_min_ds_dcfclk_khz: clk_mgr_base->clks.dcfclk_deep_sleep_khz); |
302 | } |
303 | |
304 | // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. |
305 | if (new_clocks->dppclk_khz < 100000) |
306 | new_clocks->dppclk_khz = 100000; |
307 | |
308 | if (should_set_clock(safe_to_lower, calc_clk: new_clocks->dppclk_khz, cur_clk: clk_mgr->base.clks.dppclk_khz)) { |
309 | if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) |
310 | dpp_clock_lowered = true; |
311 | clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz; |
312 | update_dppclk = true; |
313 | } |
314 | |
315 | if (should_set_clock(safe_to_lower, calc_clk: new_clocks->dispclk_khz, cur_clk: clk_mgr_base->clks.dispclk_khz)) { |
316 | dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, disable: true); |
317 | |
318 | clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; |
319 | dcn35_smu_set_dispclk(clk_mgr, requested_dispclk_khz: clk_mgr_base->clks.dispclk_khz); |
320 | dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, disable: false); |
321 | |
322 | update_dispclk = true; |
323 | } |
324 | |
325 | /* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */ |
326 | if (!dc->debug.disable_dtb_ref_clk_switch && |
327 | should_set_clock(safe_to_lower, calc_clk: new_clocks->ref_dtbclk_khz / 1000, |
328 | cur_clk: clk_mgr_base->clks.ref_dtbclk_khz / 1000)) { |
329 | dcn35_update_clocks_update_dtb_dto(clk_mgr, context, ref_dtbclk_khz: new_clocks->ref_dtbclk_khz); |
330 | clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz; |
331 | } |
332 | |
333 | if (dpp_clock_lowered) { |
334 | // increase per DPP DTO before lowering global dppclk |
335 | dcn35_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); |
336 | dcn35_smu_set_dppclk(clk_mgr, requested_dpp_khz: clk_mgr_base->clks.dppclk_khz); |
337 | } else { |
338 | // increase global DPPCLK before lowering per DPP DTO |
339 | if (update_dppclk || update_dispclk) |
340 | dcn35_smu_set_dppclk(clk_mgr, requested_dpp_khz: clk_mgr_base->clks.dppclk_khz); |
341 | dcn35_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); |
342 | } |
343 | |
344 | // notify DMCUB of latest clocks |
345 | memset(&cmd, 0, sizeof(cmd)); |
346 | cmd.notify_clocks.header.type = DMUB_CMD__CLK_MGR; |
347 | cmd.notify_clocks.header.sub_type = DMUB_CMD__CLK_MGR_NOTIFY_CLOCKS; |
348 | cmd.notify_clocks.clocks.dcfclk_khz = clk_mgr_base->clks.dcfclk_khz; |
349 | cmd.notify_clocks.clocks.dcfclk_deep_sleep_khz = |
350 | clk_mgr_base->clks.dcfclk_deep_sleep_khz; |
351 | cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; |
352 | cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; |
353 | |
354 | dc_wake_and_execute_dmub_cmd(ctx: dc->ctx, cmd: &cmd, wait_type: DM_DMUB_WAIT_TYPE_WAIT); |
355 | } |
356 | |
357 | static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) |
358 | { |
359 | /* get FbMult value */ |
360 | struct fixed31_32 pll_req; |
361 | unsigned int fbmult_frac_val = 0; |
362 | unsigned int fbmult_int_val = 0; |
363 | struct dc_context *ctx = clk_mgr->base.ctx; |
364 | |
365 | /* |
366 | * Register value of fbmult is in 8.16 format, we are converting to 314.32 |
367 | * to leverage the fix point operations available in driver |
368 | */ |
369 | |
370 | REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/ |
371 | REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */ |
372 | |
373 | pll_req = dc_fixpt_from_int(arg: fbmult_int_val); |
374 | |
375 | /* |
376 | * since fractional part is only 16 bit in register definition but is 32 bit |
377 | * in our fix point definiton, need to shift left by 16 to obtain correct value |
378 | */ |
379 | pll_req.value |= fbmult_frac_val << 16; |
380 | |
381 | /* multiply by REFCLK period */ |
382 | pll_req = dc_fixpt_mul_int(arg1: pll_req, arg2: clk_mgr->dfs_ref_freq_khz); |
383 | |
384 | /* integer part is now VCO frequency in kHz */ |
385 | return dc_fixpt_floor(arg: pll_req); |
386 | } |
387 | |
388 | static void dcn35_enable_pme_wa(struct clk_mgr *clk_mgr_base) |
389 | { |
390 | struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); |
391 | |
392 | dcn35_smu_enable_pme_wa(clk_mgr); |
393 | } |
394 | |
395 | |
396 | bool dcn35_are_clock_states_equal(struct dc_clocks *a, |
397 | struct dc_clocks *b) |
398 | { |
399 | if (a->dispclk_khz != b->dispclk_khz) |
400 | return false; |
401 | else if (a->dppclk_khz != b->dppclk_khz) |
402 | return false; |
403 | else if (a->dcfclk_khz != b->dcfclk_khz) |
404 | return false; |
405 | else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz) |
406 | return false; |
407 | else if (a->zstate_support != b->zstate_support) |
408 | return false; |
409 | else if (a->dtbclk_en != b->dtbclk_en) |
410 | return false; |
411 | |
412 | return true; |
413 | } |
414 | |
415 | static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, |
416 | struct clk_mgr_dcn35 *clk_mgr) |
417 | { |
418 | } |
419 | |
420 | static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base) |
421 | { |
422 | struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); |
423 | struct dc_context *ctx = clk_mgr->base.ctx; |
424 | uint32_t ssc_enable; |
425 | |
426 | REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable); |
427 | |
428 | return ssc_enable == 1; |
429 | } |
430 | |
431 | static void init_clk_states(struct clk_mgr *clk_mgr) |
432 | { |
433 | struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); |
434 | uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; |
435 | memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); |
436 | |
437 | if (clk_mgr_int->smu_ver >= SMU_VER_THRESHOLD) |
438 | clk_mgr->clks.dtbclk_en = true; // request DTBCLK disable on first commit |
439 | clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk |
440 | clk_mgr->clks.p_state_change_support = true; |
441 | clk_mgr->clks.prev_p_state_change_support = true; |
442 | clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; |
443 | clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN; |
444 | } |
445 | |
446 | void dcn35_init_clocks(struct clk_mgr *clk_mgr) |
447 | { |
448 | struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); |
449 | init_clk_states(clk_mgr); |
450 | |
451 | // to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk |
452 | if (dcn35_is_spll_ssc_enabled(clk_mgr_base: clk_mgr)) |
453 | clk_mgr->dp_dto_source_clock_in_khz = |
454 | dce_adjust_dp_ref_freq_for_ss(clk_mgr_dce: clk_mgr_int, dp_ref_clk_khz: clk_mgr->dprefclk_khz); |
455 | else |
456 | clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz; |
457 | |
458 | } |
459 | static struct clk_bw_params dcn35_bw_params = { |
460 | .vram_type = Ddr4MemType, |
461 | .num_channels = 1, |
462 | .clk_table = { |
463 | .num_entries = 4, |
464 | }, |
465 | |
466 | }; |
467 | |
468 | static struct wm_table ddr5_wm_table = { |
469 | .entries = { |
470 | { |
471 | .wm_inst = WM_A, |
472 | .wm_type = WM_TYPE_PSTATE_CHG, |
473 | .pstate_latency_us = 11.72, |
474 | .sr_exit_time_us = 28.0, |
475 | .sr_enter_plus_exit_time_us = 30.0, |
476 | .valid = true, |
477 | }, |
478 | { |
479 | .wm_inst = WM_B, |
480 | .wm_type = WM_TYPE_PSTATE_CHG, |
481 | .pstate_latency_us = 11.72, |
482 | .sr_exit_time_us = 28.0, |
483 | .sr_enter_plus_exit_time_us = 30.0, |
484 | .valid = true, |
485 | }, |
486 | { |
487 | .wm_inst = WM_C, |
488 | .wm_type = WM_TYPE_PSTATE_CHG, |
489 | .pstate_latency_us = 11.72, |
490 | .sr_exit_time_us = 28.0, |
491 | .sr_enter_plus_exit_time_us = 30.0, |
492 | .valid = true, |
493 | }, |
494 | { |
495 | .wm_inst = WM_D, |
496 | .wm_type = WM_TYPE_PSTATE_CHG, |
497 | .pstate_latency_us = 11.72, |
498 | .sr_exit_time_us = 28.0, |
499 | .sr_enter_plus_exit_time_us = 30.0, |
500 | .valid = true, |
501 | }, |
502 | } |
503 | }; |
504 | |
505 | static struct wm_table lpddr5_wm_table = { |
506 | .entries = { |
507 | { |
508 | .wm_inst = WM_A, |
509 | .wm_type = WM_TYPE_PSTATE_CHG, |
510 | .pstate_latency_us = 11.65333, |
511 | .sr_exit_time_us = 28.0, |
512 | .sr_enter_plus_exit_time_us = 30.0, |
513 | .valid = true, |
514 | }, |
515 | { |
516 | .wm_inst = WM_B, |
517 | .wm_type = WM_TYPE_PSTATE_CHG, |
518 | .pstate_latency_us = 11.65333, |
519 | .sr_exit_time_us = 28.0, |
520 | .sr_enter_plus_exit_time_us = 30.0, |
521 | .valid = true, |
522 | }, |
523 | { |
524 | .wm_inst = WM_C, |
525 | .wm_type = WM_TYPE_PSTATE_CHG, |
526 | .pstate_latency_us = 11.65333, |
527 | .sr_exit_time_us = 28.0, |
528 | .sr_enter_plus_exit_time_us = 30.0, |
529 | .valid = true, |
530 | }, |
531 | { |
532 | .wm_inst = WM_D, |
533 | .wm_type = WM_TYPE_PSTATE_CHG, |
534 | .pstate_latency_us = 11.65333, |
535 | .sr_exit_time_us = 28.0, |
536 | .sr_enter_plus_exit_time_us = 30.0, |
537 | .valid = true, |
538 | }, |
539 | } |
540 | }; |
541 | |
542 | static DpmClocks_t_dcn35 dummy_clocks; |
543 | |
544 | static struct dcn35_watermarks dummy_wms = { 0 }; |
545 | |
546 | static struct dcn35_ss_info_table ss_info_table = { |
547 | .ss_divider = 1000, |
548 | .ss_percentage = {0, 0, 375, 375, 375} |
549 | }; |
550 | |
551 | static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr) |
552 | { |
553 | struct dc_context *ctx = clk_mgr->base.ctx; |
554 | uint32_t clock_source; |
555 | |
556 | REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source); |
557 | // If it's DFS mode, clock_source is 0. |
558 | if (dcn35_is_spll_ssc_enabled(clk_mgr_base: &clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) { |
559 | clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source]; |
560 | |
561 | if (clk_mgr->dprefclk_ss_percentage != 0) { |
562 | clk_mgr->ss_on_dprefclk = true; |
563 | clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider; |
564 | } |
565 | } |
566 | } |
567 | |
568 | static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table) |
569 | { |
570 | int i, num_valid_sets; |
571 | |
572 | num_valid_sets = 0; |
573 | |
574 | for (i = 0; i < WM_SET_COUNT; i++) { |
575 | /* skip empty entries, the smu array has no holes*/ |
576 | if (!bw_params->wm_table.entries[i].valid) |
577 | continue; |
578 | |
579 | table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst; |
580 | table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type; |
581 | /* We will not select WM based on fclk, so leave it as unconstrained */ |
582 | table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0; |
583 | table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF; |
584 | |
585 | if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) { |
586 | if (i == 0) |
587 | table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0; |
588 | else { |
589 | /* add 1 to make it non-overlapping with next lvl */ |
590 | table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = |
591 | bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1; |
592 | } |
593 | table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk = |
594 | bw_params->clk_table.entries[i].dcfclk_mhz; |
595 | |
596 | } else { |
597 | /* unconstrained for memory retraining */ |
598 | table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0; |
599 | table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF; |
600 | |
601 | /* Modify previous watermark range to cover up to max */ |
602 | table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF; |
603 | } |
604 | num_valid_sets++; |
605 | } |
606 | |
607 | ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */ |
608 | |
609 | /* modify the min and max to make sure we cover the whole range*/ |
610 | table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0; |
611 | table->WatermarkRow[WM_DCFCLK][0].MinClock = 0; |
612 | table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF; |
613 | table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF; |
614 | |
615 | /* This is for writeback only, does not matter currently as no writeback support*/ |
616 | table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A; |
617 | table->WatermarkRow[WM_SOCCLK][0].MinClock = 0; |
618 | table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF; |
619 | table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0; |
620 | table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF; |
621 | } |
622 | |
623 | static void dcn35_notify_wm_ranges(struct clk_mgr *clk_mgr_base) |
624 | { |
625 | struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); |
626 | struct clk_mgr_dcn35 *clk_mgr_dcn35 = TO_CLK_MGR_DCN35(clk_mgr); |
627 | struct dcn35_watermarks *table = clk_mgr_dcn35->smu_wm_set.wm_set; |
628 | |
629 | if (!clk_mgr->smu_ver) |
630 | return; |
631 | |
632 | if (!table || clk_mgr_dcn35->smu_wm_set.mc_address.quad_part == 0) |
633 | return; |
634 | |
635 | memset(table, 0, sizeof(*table)); |
636 | |
637 | dcn35_build_watermark_ranges(bw_params: clk_mgr_base->bw_params, table); |
638 | |
639 | dcn35_smu_set_dram_addr_high(clk_mgr, |
640 | addr_high: clk_mgr_dcn35->smu_wm_set.mc_address.high_part); |
641 | dcn35_smu_set_dram_addr_low(clk_mgr, |
642 | addr_low: clk_mgr_dcn35->smu_wm_set.mc_address.low_part); |
643 | dcn35_smu_transfer_wm_table_dram_2_smu(clk_mgr); |
644 | } |
645 | |
646 | static void dcn35_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr, |
647 | struct dcn35_smu_dpm_clks *smu_dpm_clks) |
648 | { |
649 | DpmClocks_t_dcn35 *table = smu_dpm_clks->dpm_clks; |
650 | |
651 | if (!clk_mgr->smu_ver) |
652 | return; |
653 | |
654 | if (!table || smu_dpm_clks->mc_address.quad_part == 0) |
655 | return; |
656 | |
657 | memset(table, 0, sizeof(*table)); |
658 | |
659 | dcn35_smu_set_dram_addr_high(clk_mgr, |
660 | addr_high: smu_dpm_clks->mc_address.high_part); |
661 | dcn35_smu_set_dram_addr_low(clk_mgr, |
662 | addr_low: smu_dpm_clks->mc_address.low_part); |
663 | dcn35_smu_transfer_dpm_table_smu_2_dram(clk_mgr); |
664 | } |
665 | |
666 | static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks) |
667 | { |
668 | uint32_t max = 0; |
669 | int i; |
670 | |
671 | for (i = 0; i < num_clocks; ++i) { |
672 | if (clocks[i] > max) |
673 | max = clocks[i]; |
674 | } |
675 | |
676 | return max; |
677 | } |
678 | |
679 | static inline bool is_valid_clock_value(uint32_t clock_value) |
680 | { |
681 | return clock_value > 1 && clock_value < 100000; |
682 | } |
683 | |
684 | static unsigned int convert_wck_ratio(uint8_t wck_ratio) |
685 | { |
686 | switch (wck_ratio) { |
687 | case WCK_RATIO_1_2: |
688 | return 2; |
689 | |
690 | case WCK_RATIO_1_4: |
691 | return 4; |
692 | /* Find lowest DPM, FCLK is filled in reverse order*/ |
693 | |
694 | default: |
695 | break; |
696 | } |
697 | |
698 | return 1; |
699 | } |
700 | |
701 | static inline uint32_t calc_dram_speed_mts(const MemPstateTable_t *entry) |
702 | { |
703 | return entry->UClk * convert_wck_ratio(wck_ratio: entry->WckRatio) * 2; |
704 | } |
705 | |
706 | static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr, |
707 | struct integrated_info *bios_info, |
708 | DpmClocks_t_dcn35 *clock_table) |
709 | { |
710 | struct clk_bw_params *bw_params = clk_mgr->base.bw_params; |
711 | struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1]; |
712 | uint32_t max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0; |
713 | uint32_t max_pstate = 0, max_dram_speed_mts = 0, min_dram_speed_mts = 0; |
714 | uint32_t num_memps, num_fclk, num_dcfclk; |
715 | int i; |
716 | |
717 | /* Determine min/max p-state values. */ |
718 | num_memps = (clock_table->NumMemPstatesEnabled > NUM_MEM_PSTATE_LEVELS) ? NUM_MEM_PSTATE_LEVELS : |
719 | clock_table->NumMemPstatesEnabled; |
720 | for (i = 0; i < num_memps; i++) { |
721 | uint32_t dram_speed_mts = calc_dram_speed_mts(entry: &clock_table->MemPstateTable[i]); |
722 | |
723 | if (is_valid_clock_value(clock_value: dram_speed_mts) && dram_speed_mts > max_dram_speed_mts) { |
724 | max_dram_speed_mts = dram_speed_mts; |
725 | max_pstate = i; |
726 | } |
727 | } |
728 | |
729 | min_dram_speed_mts = max_dram_speed_mts; |
730 | min_pstate = max_pstate; |
731 | |
732 | for (i = 0; i < num_memps; i++) { |
733 | uint32_t dram_speed_mts = calc_dram_speed_mts(entry: &clock_table->MemPstateTable[i]); |
734 | |
735 | if (is_valid_clock_value(clock_value: dram_speed_mts) && dram_speed_mts < min_dram_speed_mts) { |
736 | min_dram_speed_mts = dram_speed_mts; |
737 | min_pstate = i; |
738 | } |
739 | } |
740 | |
741 | /* We expect the table to contain at least one valid P-state entry. */ |
742 | ASSERT(clock_table->NumMemPstatesEnabled && |
743 | is_valid_clock_value(max_dram_speed_mts) && |
744 | is_valid_clock_value(min_dram_speed_mts)); |
745 | |
746 | /* dispclk and dppclk can be max at any voltage, same number of levels for both */ |
747 | if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS && |
748 | clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) { |
749 | max_dispclk = find_max_clk_value(clocks: clock_table->DispClocks, |
750 | num_clocks: clock_table->NumDispClkLevelsEnabled); |
751 | max_dppclk = find_max_clk_value(clocks: clock_table->DppClocks, |
752 | num_clocks: clock_table->NumDispClkLevelsEnabled); |
753 | } else { |
754 | /* Invalid number of entries in the table from PMFW. */ |
755 | ASSERT(0); |
756 | } |
757 | |
758 | /* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */ |
759 | ASSERT(clock_table->NumDcfClkLevelsEnabled > 0); |
760 | |
761 | num_fclk = (clock_table->NumFclkLevelsEnabled > NUM_FCLK_DPM_LEVELS) ? NUM_FCLK_DPM_LEVELS : |
762 | clock_table->NumFclkLevelsEnabled; |
763 | max_fclk = find_max_clk_value(clocks: clock_table->FclkClocks_Freq, num_clocks: num_fclk); |
764 | |
765 | num_dcfclk = (clock_table->NumDcfClkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS : |
766 | clock_table->NumDcfClkLevelsEnabled; |
767 | for (i = 0; i < num_dcfclk; i++) { |
768 | int j; |
769 | |
770 | /* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */ |
771 | for (j = bw_params->clk_table.num_entries - 1; j > 0; j--) |
772 | if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i]) |
773 | break; |
774 | |
775 | bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz; |
776 | bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz; |
777 | bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz; |
778 | |
779 | /* Now update clocks we do read */ |
780 | bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[min_pstate].MemClk; |
781 | bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[min_pstate].Voltage; |
782 | bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i]; |
783 | bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i]; |
784 | bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk; |
785 | bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk; |
786 | bw_params->clk_table.entries[i].wck_ratio = |
787 | convert_wck_ratio(wck_ratio: clock_table->MemPstateTable[min_pstate].WckRatio); |
788 | |
789 | /* Dcfclk and Fclk are tied, but at a different ratio */ |
790 | bw_params->clk_table.entries[i].fclk_mhz = min(max_fclk, 2 * clock_table->DcfClocks[i]); |
791 | } |
792 | |
793 | /* Make sure to include at least one entry at highest pstate */ |
794 | if (max_pstate != min_pstate || i == 0) { |
795 | if (i > MAX_NUM_DPM_LVL - 1) |
796 | i = MAX_NUM_DPM_LVL - 1; |
797 | |
798 | bw_params->clk_table.entries[i].fclk_mhz = max_fclk; |
799 | bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[max_pstate].MemClk; |
800 | bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[max_pstate].Voltage; |
801 | bw_params->clk_table.entries[i].dcfclk_mhz = |
802 | find_max_clk_value(clocks: clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS); |
803 | bw_params->clk_table.entries[i].socclk_mhz = |
804 | find_max_clk_value(clocks: clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS); |
805 | bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk; |
806 | bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk; |
807 | bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio( |
808 | wck_ratio: clock_table->MemPstateTable[max_pstate].WckRatio); |
809 | i++; |
810 | } |
811 | bw_params->clk_table.num_entries = i--; |
812 | |
813 | /* Make sure all highest clocks are included*/ |
814 | bw_params->clk_table.entries[i].socclk_mhz = |
815 | find_max_clk_value(clocks: clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS); |
816 | bw_params->clk_table.entries[i].dispclk_mhz = |
817 | find_max_clk_value(clocks: clock_table->DispClocks, NUM_DISPCLK_DPM_LEVELS); |
818 | bw_params->clk_table.entries[i].dppclk_mhz = |
819 | find_max_clk_value(clocks: clock_table->DppClocks, NUM_DPPCLK_DPM_LEVELS); |
820 | bw_params->clk_table.entries[i].fclk_mhz = |
821 | find_max_clk_value(clocks: clock_table->FclkClocks_Freq, NUM_FCLK_DPM_LEVELS); |
822 | ASSERT(clock_table->DcfClocks[i] == find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS)); |
823 | bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz; |
824 | bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz; |
825 | bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz; |
826 | bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels = clock_table->NumDcfClkLevelsEnabled; |
827 | bw_params->clk_table.num_entries_per_clk.num_dispclk_levels = clock_table->NumDispClkLevelsEnabled; |
828 | bw_params->clk_table.num_entries_per_clk.num_dppclk_levels = clock_table->NumDispClkLevelsEnabled; |
829 | bw_params->clk_table.num_entries_per_clk.num_fclk_levels = clock_table->NumFclkLevelsEnabled; |
830 | bw_params->clk_table.num_entries_per_clk.num_memclk_levels = clock_table->NumMemPstatesEnabled; |
831 | bw_params->clk_table.num_entries_per_clk.num_socclk_levels = clock_table->NumSocClkLevelsEnabled; |
832 | |
833 | /* |
834 | * Set any 0 clocks to max default setting. Not an issue for |
835 | * power since we aren't doing switching in such case anyway |
836 | */ |
837 | for (i = 0; i < bw_params->clk_table.num_entries; i++) { |
838 | if (!bw_params->clk_table.entries[i].fclk_mhz) { |
839 | bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz; |
840 | bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz; |
841 | bw_params->clk_table.entries[i].voltage = def_max.voltage; |
842 | } |
843 | if (!bw_params->clk_table.entries[i].dcfclk_mhz) |
844 | bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz; |
845 | if (!bw_params->clk_table.entries[i].socclk_mhz) |
846 | bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz; |
847 | if (!bw_params->clk_table.entries[i].dispclk_mhz) |
848 | bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz; |
849 | if (!bw_params->clk_table.entries[i].dppclk_mhz) |
850 | bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz; |
851 | if (!bw_params->clk_table.entries[i].fclk_mhz) |
852 | bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz; |
853 | if (!bw_params->clk_table.entries[i].phyclk_mhz) |
854 | bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz; |
855 | if (!bw_params->clk_table.entries[i].phyclk_d18_mhz) |
856 | bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz; |
857 | if (!bw_params->clk_table.entries[i].dtbclk_mhz) |
858 | bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz; |
859 | } |
860 | ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz); |
861 | bw_params->vram_type = bios_info->memory_type; |
862 | bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4; |
863 | bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4; |
864 | |
865 | for (i = 0; i < WM_SET_COUNT; i++) { |
866 | bw_params->wm_table.entries[i].wm_inst = i; |
867 | |
868 | if (i >= bw_params->clk_table.num_entries) { |
869 | bw_params->wm_table.entries[i].valid = false; |
870 | continue; |
871 | } |
872 | |
873 | bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG; |
874 | bw_params->wm_table.entries[i].valid = true; |
875 | } |
876 | } |
877 | |
878 | static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base) |
879 | { |
880 | int display_count; |
881 | struct dc *dc = clk_mgr_base->ctx->dc; |
882 | struct dc_state *context = dc->current_state; |
883 | |
884 | if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { |
885 | display_count = dcn35_get_active_display_cnt_wa(dc, context, NULL); |
886 | /* if we can go lower, go lower */ |
887 | if (display_count == 0) |
888 | clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; |
889 | } |
890 | } |
891 | |
892 | static void dcn35_set_ips_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle) |
893 | { |
894 | struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); |
895 | struct dc *dc = clk_mgr_base->ctx->dc; |
896 | uint32_t val = dcn35_smu_read_ips_scratch(clk_mgr); |
897 | |
898 | if (dc->config.disable_ips == DMUB_IPS_ENABLE || |
899 | dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) { |
900 | val = val & ~DMUB_IPS1_ALLOW_MASK; |
901 | val = val & ~DMUB_IPS2_ALLOW_MASK; |
902 | } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) { |
903 | val |= DMUB_IPS1_ALLOW_MASK; |
904 | val |= DMUB_IPS2_ALLOW_MASK; |
905 | } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) { |
906 | val = val & ~DMUB_IPS1_ALLOW_MASK; |
907 | val |= DMUB_IPS2_ALLOW_MASK; |
908 | } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) { |
909 | val = val & ~DMUB_IPS1_ALLOW_MASK; |
910 | val = val & ~DMUB_IPS2_ALLOW_MASK; |
911 | } |
912 | |
913 | if (!allow_idle) { |
914 | val |= DMUB_IPS1_ALLOW_MASK; |
915 | val |= DMUB_IPS2_ALLOW_MASK; |
916 | } |
917 | |
918 | dcn35_smu_write_ips_scratch(clk_mgr, param: val); |
919 | } |
920 | |
921 | static void dcn35_exit_low_power_state(struct clk_mgr *clk_mgr_base) |
922 | { |
923 | struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); |
924 | |
925 | //SMU optimization is performed part of low power state exit. |
926 | dcn35_smu_exit_low_power_state(clk_mgr); |
927 | |
928 | } |
929 | |
930 | static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base) |
931 | { |
932 | struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); |
933 | bool ips_supported = true; |
934 | |
935 | ips_supported = dcn35_smu_get_ips_supported(clk_mgr) ? true : false; |
936 | |
937 | return ips_supported; |
938 | } |
939 | |
940 | static uint32_t dcn35_get_ips_idle_state(struct clk_mgr *clk_mgr_base) |
941 | { |
942 | struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); |
943 | |
944 | return dcn35_smu_read_ips_scratch(clk_mgr); |
945 | } |
946 | |
947 | static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr) |
948 | { |
949 | init_clk_states(clk_mgr); |
950 | |
951 | /* TODO: Implement the functions and remove the ifndef guard */ |
952 | } |
953 | |
954 | static void dcn35_update_clocks_fpga(struct clk_mgr *clk_mgr, |
955 | struct dc_state *context, |
956 | bool safe_to_lower) |
957 | { |
958 | struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); |
959 | struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; |
960 | int fclk_adj = new_clocks->fclk_khz; |
961 | |
962 | /* TODO: remove this after correctly set by DML */ |
963 | new_clocks->dcfclk_khz = 400000; |
964 | new_clocks->socclk_khz = 400000; |
965 | |
966 | /* Min fclk = 1.2GHz since all the extra scemi logic seems to run off of it */ |
967 | //int fclk_adj = new_clocks->fclk_khz > 1200000 ? new_clocks->fclk_khz : 1200000; |
968 | new_clocks->fclk_khz = 4320000; |
969 | |
970 | if (should_set_clock(safe_to_lower, calc_clk: new_clocks->phyclk_khz, cur_clk: clk_mgr->clks.phyclk_khz)) { |
971 | clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz; |
972 | } |
973 | |
974 | if (should_set_clock(safe_to_lower, calc_clk: new_clocks->dcfclk_khz, cur_clk: clk_mgr->clks.dcfclk_khz)) { |
975 | clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz; |
976 | } |
977 | |
978 | if (should_set_clock(safe_to_lower, |
979 | calc_clk: new_clocks->dcfclk_deep_sleep_khz, cur_clk: clk_mgr->clks.dcfclk_deep_sleep_khz)) { |
980 | clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; |
981 | } |
982 | |
983 | if (should_set_clock(safe_to_lower, calc_clk: new_clocks->socclk_khz, cur_clk: clk_mgr->clks.socclk_khz)) { |
984 | clk_mgr->clks.socclk_khz = new_clocks->socclk_khz; |
985 | } |
986 | |
987 | if (should_set_clock(safe_to_lower, calc_clk: new_clocks->dramclk_khz, cur_clk: clk_mgr->clks.dramclk_khz)) { |
988 | clk_mgr->clks.dramclk_khz = new_clocks->dramclk_khz; |
989 | } |
990 | |
991 | if (should_set_clock(safe_to_lower, calc_clk: new_clocks->dppclk_khz, cur_clk: clk_mgr->clks.dppclk_khz)) { |
992 | clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz; |
993 | } |
994 | |
995 | if (should_set_clock(safe_to_lower, calc_clk: fclk_adj, cur_clk: clk_mgr->clks.fclk_khz)) { |
996 | clk_mgr->clks.fclk_khz = fclk_adj; |
997 | } |
998 | |
999 | if (should_set_clock(safe_to_lower, calc_clk: new_clocks->dispclk_khz, cur_clk: clk_mgr->clks.dispclk_khz)) { |
1000 | clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz; |
1001 | } |
1002 | |
1003 | /* Both fclk and ref_dppclk run on the same scemi clock. |
1004 | * So take the higher value since the DPP DTO is typically programmed |
1005 | * such that max dppclk is 1:1 with ref_dppclk. |
1006 | */ |
1007 | if (clk_mgr->clks.fclk_khz > clk_mgr->clks.dppclk_khz) |
1008 | clk_mgr->clks.dppclk_khz = clk_mgr->clks.fclk_khz; |
1009 | if (clk_mgr->clks.dppclk_khz > clk_mgr->clks.fclk_khz) |
1010 | clk_mgr->clks.fclk_khz = clk_mgr->clks.dppclk_khz; |
1011 | |
1012 | // Both fclk and ref_dppclk run on the same scemi clock. |
1013 | clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz; |
1014 | |
1015 | /* TODO: set dtbclk in correct place */ |
1016 | clk_mgr->clks.dtbclk_en = true; |
1017 | dm_set_dcn_clocks(ctx: clk_mgr->ctx, clks: &clk_mgr->clks); |
1018 | dcn35_update_clocks_update_dpp_dto(clk_mgr: clk_mgr_int, context, safe_to_lower); |
1019 | |
1020 | dcn35_update_clocks_update_dtb_dto(clk_mgr: clk_mgr_int, context, ref_dtbclk_khz: clk_mgr->clks.ref_dtbclk_khz); |
1021 | } |
1022 | |
1023 | static struct clk_mgr_funcs dcn35_funcs = { |
1024 | .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, |
1025 | .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, |
1026 | .update_clocks = dcn35_update_clocks, |
1027 | .init_clocks = dcn35_init_clocks, |
1028 | .enable_pme_wa = dcn35_enable_pme_wa, |
1029 | .are_clock_states_equal = dcn35_are_clock_states_equal, |
1030 | .notify_wm_ranges = dcn35_notify_wm_ranges, |
1031 | .set_low_power_state = dcn35_set_low_power_state, |
1032 | .exit_low_power_state = dcn35_exit_low_power_state, |
1033 | .is_ips_supported = dcn35_is_ips_supported, |
1034 | .set_idle_state = dcn35_set_ips_idle_state, |
1035 | .get_idle_state = dcn35_get_ips_idle_state |
1036 | }; |
1037 | |
1038 | struct clk_mgr_funcs dcn35_fpga_funcs = { |
1039 | .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, |
1040 | .update_clocks = dcn35_update_clocks_fpga, |
1041 | .init_clocks = dcn35_init_clocks_fpga, |
1042 | .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, |
1043 | }; |
1044 | |
1045 | void dcn35_clk_mgr_construct( |
1046 | struct dc_context *ctx, |
1047 | struct clk_mgr_dcn35 *clk_mgr, |
1048 | struct pp_smu_funcs *pp_smu, |
1049 | struct dccg *dccg) |
1050 | { |
1051 | struct dcn35_smu_dpm_clks smu_dpm_clks = { 0 }; |
1052 | clk_mgr->base.base.ctx = ctx; |
1053 | clk_mgr->base.base.funcs = &dcn35_funcs; |
1054 | |
1055 | clk_mgr->base.pp_smu = pp_smu; |
1056 | |
1057 | clk_mgr->base.dccg = dccg; |
1058 | clk_mgr->base.dfs_bypass_disp_clk = 0; |
1059 | |
1060 | clk_mgr->base.dprefclk_ss_percentage = 0; |
1061 | clk_mgr->base.dprefclk_ss_divider = 1000; |
1062 | clk_mgr->base.ss_on_dprefclk = false; |
1063 | clk_mgr->base.dfs_ref_freq_khz = 48000; |
1064 | |
1065 | clk_mgr->smu_wm_set.wm_set = (struct dcn35_watermarks *)dm_helpers_allocate_gpu_mem( |
1066 | ctx: clk_mgr->base.base.ctx, |
1067 | type: DC_MEM_ALLOC_TYPE_FRAME_BUFFER, |
1068 | size: sizeof(struct dcn35_watermarks), |
1069 | addr: &clk_mgr->smu_wm_set.mc_address.quad_part); |
1070 | |
1071 | if (!clk_mgr->smu_wm_set.wm_set) { |
1072 | clk_mgr->smu_wm_set.wm_set = &dummy_wms; |
1073 | clk_mgr->smu_wm_set.mc_address.quad_part = 0; |
1074 | } |
1075 | ASSERT(clk_mgr->smu_wm_set.wm_set); |
1076 | |
1077 | smu_dpm_clks.dpm_clks = (DpmClocks_t_dcn35 *)dm_helpers_allocate_gpu_mem( |
1078 | ctx: clk_mgr->base.base.ctx, |
1079 | type: DC_MEM_ALLOC_TYPE_FRAME_BUFFER, |
1080 | size: sizeof(DpmClocks_t_dcn35), |
1081 | addr: &smu_dpm_clks.mc_address.quad_part); |
1082 | |
1083 | if (smu_dpm_clks.dpm_clks == NULL) { |
1084 | smu_dpm_clks.dpm_clks = &dummy_clocks; |
1085 | smu_dpm_clks.mc_address.quad_part = 0; |
1086 | } |
1087 | |
1088 | ASSERT(smu_dpm_clks.dpm_clks); |
1089 | |
1090 | clk_mgr->base.smu_ver = dcn35_smu_get_smu_version(clk_mgr: &clk_mgr->base); |
1091 | |
1092 | if (clk_mgr->base.smu_ver) |
1093 | clk_mgr->base.smu_present = true; |
1094 | |
1095 | /* TODO: Check we get what we expect during bringup */ |
1096 | clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr: &clk_mgr->base); |
1097 | |
1098 | if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { |
1099 | dcn35_bw_params.wm_table = lpddr5_wm_table; |
1100 | } else { |
1101 | dcn35_bw_params.wm_table = ddr5_wm_table; |
1102 | } |
1103 | /* Saved clocks configured at boot for debug purposes */ |
1104 | dcn35_dump_clk_registers(regs_and_bypass: &clk_mgr->base.base.boot_snapshot, clk_mgr); |
1105 | |
1106 | clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(clk_mgr: &clk_mgr->base); |
1107 | clk_mgr->base.base.clks.ref_dtbclk_khz = 600000; |
1108 | |
1109 | dce_clock_read_ss_info(dccg_dce: &clk_mgr->base); |
1110 | /*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/ |
1111 | |
1112 | dcn35_read_ss_info_from_lut(clk_mgr: &clk_mgr->base); |
1113 | |
1114 | clk_mgr->base.base.bw_params = &dcn35_bw_params; |
1115 | |
1116 | if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) { |
1117 | int i; |
1118 | dcn35_get_dpm_table_from_smu(clk_mgr: &clk_mgr->base, smu_dpm_clks: &smu_dpm_clks); |
1119 | DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n" |
1120 | "NumDispClkLevelsEnabled: %d\n" |
1121 | "NumSocClkLevelsEnabled: %d\n" |
1122 | "VcnClkLevelsEnabled: %d\n" |
1123 | "FClkLevelsEnabled: %d\n" |
1124 | "NumMemPstatesEnabled: %d\n" |
1125 | "MinGfxClk: %d\n" |
1126 | "MaxGfxClk: %d\n" , |
1127 | smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled, |
1128 | smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled, |
1129 | smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled, |
1130 | smu_dpm_clks.dpm_clks->VcnClkLevelsEnabled, |
1131 | smu_dpm_clks.dpm_clks->NumFclkLevelsEnabled, |
1132 | smu_dpm_clks.dpm_clks->NumMemPstatesEnabled, |
1133 | smu_dpm_clks.dpm_clks->MinGfxClk, |
1134 | smu_dpm_clks.dpm_clks->MaxGfxClk); |
1135 | for (i = 0; i < smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled; i++) { |
1136 | DC_LOG_SMU("smu_dpm_clks.dpm_clks->DcfClocks[%d] = %d\n" , |
1137 | i, |
1138 | smu_dpm_clks.dpm_clks->DcfClocks[i]); |
1139 | } |
1140 | for (i = 0; i < smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled; i++) { |
1141 | DC_LOG_SMU("smu_dpm_clks.dpm_clks->DispClocks[%d] = %d\n" , |
1142 | i, smu_dpm_clks.dpm_clks->DispClocks[i]); |
1143 | } |
1144 | for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++) { |
1145 | DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocClocks[%d] = %d\n" , |
1146 | i, smu_dpm_clks.dpm_clks->SocClocks[i]); |
1147 | } |
1148 | for (i = 0; i < smu_dpm_clks.dpm_clks->NumFclkLevelsEnabled; i++) { |
1149 | DC_LOG_SMU("smu_dpm_clks.dpm_clks->FclkClocks_Freq[%d] = %d\n" , |
1150 | i, smu_dpm_clks.dpm_clks->FclkClocks_Freq[i]); |
1151 | DC_LOG_SMU("smu_dpm_clks.dpm_clks->FclkClocks_Voltage[%d] = %d\n" , |
1152 | i, smu_dpm_clks.dpm_clks->FclkClocks_Voltage[i]); |
1153 | } |
1154 | for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++) |
1155 | DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocVoltage[%d] = %d\n" , |
1156 | i, smu_dpm_clks.dpm_clks->SocVoltage[i]); |
1157 | |
1158 | for (i = 0; i < smu_dpm_clks.dpm_clks->NumMemPstatesEnabled; i++) { |
1159 | DC_LOG_SMU("smu_dpm_clks.dpm_clks.MemPstateTable[%d].UClk = %d\n" |
1160 | "smu_dpm_clks.dpm_clks->MemPstateTable[%d].MemClk= %d\n" |
1161 | "smu_dpm_clks.dpm_clks->MemPstateTable[%d].Voltage = %d\n" , |
1162 | i, smu_dpm_clks.dpm_clks->MemPstateTable[i].UClk, |
1163 | i, smu_dpm_clks.dpm_clks->MemPstateTable[i].MemClk, |
1164 | i, smu_dpm_clks.dpm_clks->MemPstateTable[i].Voltage); |
1165 | } |
1166 | |
1167 | if (ctx->dc_bios && ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) { |
1168 | dcn35_clk_mgr_helper_populate_bw_params( |
1169 | clk_mgr: &clk_mgr->base, |
1170 | bios_info: ctx->dc_bios->integrated_info, |
1171 | clock_table: smu_dpm_clks.dpm_clks); |
1172 | } |
1173 | } |
1174 | |
1175 | if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0) |
1176 | dm_helpers_free_gpu_mem(ctx: clk_mgr->base.base.ctx, type: DC_MEM_ALLOC_TYPE_FRAME_BUFFER, |
1177 | pvMem: smu_dpm_clks.dpm_clks); |
1178 | |
1179 | if (ctx->dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) { |
1180 | bool ips_support = false; |
1181 | |
1182 | /*avoid call pmfw at init*/ |
1183 | ips_support = dcn35_smu_get_ips_supported(clk_mgr: &clk_mgr->base); |
1184 | if (ips_support) { |
1185 | ctx->dc->debug.ignore_pg = false; |
1186 | ctx->dc->debug.disable_dpp_power_gate = false; |
1187 | ctx->dc->debug.disable_hubp_power_gate = false; |
1188 | ctx->dc->debug.disable_dsc_power_gate = false; |
1189 | } else { |
1190 | /*let's reset the config control flag*/ |
1191 | ctx->dc->config.disable_ips = DMUB_IPS_DISABLE_ALL; /*pmfw not support it, disable it all*/ |
1192 | } |
1193 | } |
1194 | } |
1195 | |
1196 | void dcn35_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int) |
1197 | { |
1198 | struct clk_mgr_dcn35 *clk_mgr = TO_CLK_MGR_DCN35(clk_mgr_int); |
1199 | |
1200 | if (clk_mgr->smu_wm_set.wm_set && clk_mgr->smu_wm_set.mc_address.quad_part != 0) |
1201 | dm_helpers_free_gpu_mem(ctx: clk_mgr_int->base.ctx, type: DC_MEM_ALLOC_TYPE_FRAME_BUFFER, |
1202 | pvMem: clk_mgr->smu_wm_set.wm_set); |
1203 | } |
1204 | |