1 | /* |
2 | * Copyright 2012 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | |
24 | #include "radeon.h" |
25 | #include "radeon_asic.h" |
26 | #include "sumod.h" |
27 | #include "r600_dpm.h" |
28 | #include "cypress_dpm.h" |
29 | #include "sumo_dpm.h" |
30 | #include <linux/seq_file.h> |
31 | |
32 | #define SUMO_MAX_DEEPSLEEP_DIVIDER_ID 5 |
33 | #define SUMO_MINIMUM_ENGINE_CLOCK 800 |
34 | #define BOOST_DPM_LEVEL 7 |
35 | |
36 | static const u32 sumo_utc[SUMO_PM_NUMBER_OF_TC] = { |
37 | SUMO_UTC_DFLT_00, |
38 | SUMO_UTC_DFLT_01, |
39 | SUMO_UTC_DFLT_02, |
40 | SUMO_UTC_DFLT_03, |
41 | SUMO_UTC_DFLT_04, |
42 | SUMO_UTC_DFLT_05, |
43 | SUMO_UTC_DFLT_06, |
44 | SUMO_UTC_DFLT_07, |
45 | SUMO_UTC_DFLT_08, |
46 | SUMO_UTC_DFLT_09, |
47 | SUMO_UTC_DFLT_10, |
48 | SUMO_UTC_DFLT_11, |
49 | SUMO_UTC_DFLT_12, |
50 | SUMO_UTC_DFLT_13, |
51 | SUMO_UTC_DFLT_14, |
52 | }; |
53 | |
54 | static const u32 sumo_dtc[SUMO_PM_NUMBER_OF_TC] = { |
55 | SUMO_DTC_DFLT_00, |
56 | SUMO_DTC_DFLT_01, |
57 | SUMO_DTC_DFLT_02, |
58 | SUMO_DTC_DFLT_03, |
59 | SUMO_DTC_DFLT_04, |
60 | SUMO_DTC_DFLT_05, |
61 | SUMO_DTC_DFLT_06, |
62 | SUMO_DTC_DFLT_07, |
63 | SUMO_DTC_DFLT_08, |
64 | SUMO_DTC_DFLT_09, |
65 | SUMO_DTC_DFLT_10, |
66 | SUMO_DTC_DFLT_11, |
67 | SUMO_DTC_DFLT_12, |
68 | SUMO_DTC_DFLT_13, |
69 | SUMO_DTC_DFLT_14, |
70 | }; |
71 | |
72 | static struct sumo_ps *sumo_get_ps(struct radeon_ps *rps) |
73 | { |
74 | struct sumo_ps *ps = rps->ps_priv; |
75 | |
76 | return ps; |
77 | } |
78 | |
79 | struct sumo_power_info *sumo_get_pi(struct radeon_device *rdev) |
80 | { |
81 | struct sumo_power_info *pi = rdev->pm.dpm.priv; |
82 | |
83 | return pi; |
84 | } |
85 | |
86 | static void sumo_gfx_clockgating_enable(struct radeon_device *rdev, bool enable) |
87 | { |
88 | if (enable) |
89 | WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN); |
90 | else { |
91 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN); |
92 | WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON); |
93 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON); |
94 | RREG32(GB_ADDR_CONFIG); |
95 | } |
96 | } |
97 | |
98 | #define CGCG_CGTT_LOCAL0_MASK 0xE5BFFFFF |
99 | #define CGCG_CGTT_LOCAL1_MASK 0xEFFF07FF |
100 | |
101 | static void sumo_mg_clockgating_enable(struct radeon_device *rdev, bool enable) |
102 | { |
103 | u32 local0; |
104 | u32 local1; |
105 | |
106 | local0 = RREG32(CG_CGTT_LOCAL_0); |
107 | local1 = RREG32(CG_CGTT_LOCAL_1); |
108 | |
109 | if (enable) { |
110 | WREG32(CG_CGTT_LOCAL_0, (0 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK)); |
111 | WREG32(CG_CGTT_LOCAL_1, (0 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK)); |
112 | } else { |
113 | WREG32(CG_CGTT_LOCAL_0, (0xFFFFFFFF & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK)); |
114 | WREG32(CG_CGTT_LOCAL_1, (0xFFFFCFFF & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK)); |
115 | } |
116 | } |
117 | |
118 | static void sumo_program_git(struct radeon_device *rdev) |
119 | { |
120 | u32 p, u; |
121 | u32 xclk = radeon_get_xclk(rdev); |
122 | |
123 | r600_calculate_u_and_p(SUMO_GICST_DFLT, |
124 | r_c: xclk, p_b: 16, p: &p, u: &u); |
125 | |
126 | WREG32_P(CG_GIT, CG_GICST(p), ~CG_GICST_MASK); |
127 | } |
128 | |
129 | static void sumo_program_grsd(struct radeon_device *rdev) |
130 | { |
131 | u32 p, u; |
132 | u32 xclk = radeon_get_xclk(rdev); |
133 | u32 grs = 256 * 25 / 100; |
134 | |
135 | r600_calculate_u_and_p(i: 1, r_c: xclk, p_b: 14, p: &p, u: &u); |
136 | |
137 | WREG32(CG_GCOOR, PHC(grs) | SDC(p) | SU(u)); |
138 | } |
139 | |
140 | void sumo_gfx_clockgating_initialize(struct radeon_device *rdev) |
141 | { |
142 | sumo_program_git(rdev); |
143 | sumo_program_grsd(rdev); |
144 | } |
145 | |
146 | static void sumo_gfx_powergating_initialize(struct radeon_device *rdev) |
147 | { |
148 | u32 rcu_pwr_gating_cntl; |
149 | u32 p, u; |
150 | u32 p_c, p_p, d_p; |
151 | u32 r_t, i_t; |
152 | u32 xclk = radeon_get_xclk(rdev); |
153 | |
154 | if (rdev->family == CHIP_PALM) { |
155 | p_c = 4; |
156 | d_p = 10; |
157 | r_t = 10; |
158 | i_t = 4; |
159 | p_p = 50 + 1000/200 + 6 * 32; |
160 | } else { |
161 | p_c = 16; |
162 | d_p = 50; |
163 | r_t = 50; |
164 | i_t = 50; |
165 | p_p = 113; |
166 | } |
167 | |
168 | WREG32(CG_SCRATCH2, 0x01B60A17); |
169 | |
170 | r600_calculate_u_and_p(SUMO_GFXPOWERGATINGT_DFLT, |
171 | r_c: xclk, p_b: 16, p: &p, u: &u); |
172 | |
173 | WREG32_P(CG_PWR_GATING_CNTL, PGP(p) | PGU(u), |
174 | ~(PGP_MASK | PGU_MASK)); |
175 | |
176 | r600_calculate_u_and_p(SUMO_VOLTAGEDROPT_DFLT, |
177 | r_c: xclk, p_b: 16, p: &p, u: &u); |
178 | |
179 | WREG32_P(CG_CG_VOLTAGE_CNTL, PGP(p) | PGU(u), |
180 | ~(PGP_MASK | PGU_MASK)); |
181 | |
182 | if (rdev->family == CHIP_PALM) { |
183 | WREG32_RCU(RCU_PWR_GATING_SEQ0, 0x10103210); |
184 | WREG32_RCU(RCU_PWR_GATING_SEQ1, 0x10101010); |
185 | } else { |
186 | WREG32_RCU(RCU_PWR_GATING_SEQ0, 0x76543210); |
187 | WREG32_RCU(RCU_PWR_GATING_SEQ1, 0xFEDCBA98); |
188 | } |
189 | |
190 | rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL); |
191 | rcu_pwr_gating_cntl &= |
192 | ~(RSVD_MASK | PCV_MASK | PGS_MASK); |
193 | rcu_pwr_gating_cntl |= PCV(p_c) | PGS(1) | PWR_GATING_EN; |
194 | if (rdev->family == CHIP_PALM) { |
195 | rcu_pwr_gating_cntl &= ~PCP_MASK; |
196 | rcu_pwr_gating_cntl |= PCP(0x77); |
197 | } |
198 | WREG32_RCU(RCU_PWR_GATING_CNTL, rcu_pwr_gating_cntl); |
199 | |
200 | rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_2); |
201 | rcu_pwr_gating_cntl &= ~(MPPU_MASK | MPPD_MASK); |
202 | rcu_pwr_gating_cntl |= MPPU(p_p) | MPPD(50); |
203 | WREG32_RCU(RCU_PWR_GATING_CNTL_2, rcu_pwr_gating_cntl); |
204 | |
205 | rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_3); |
206 | rcu_pwr_gating_cntl &= ~(DPPU_MASK | DPPD_MASK); |
207 | rcu_pwr_gating_cntl |= DPPU(d_p) | DPPD(50); |
208 | WREG32_RCU(RCU_PWR_GATING_CNTL_3, rcu_pwr_gating_cntl); |
209 | |
210 | rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_4); |
211 | rcu_pwr_gating_cntl &= ~(RT_MASK | IT_MASK); |
212 | rcu_pwr_gating_cntl |= RT(r_t) | IT(i_t); |
213 | WREG32_RCU(RCU_PWR_GATING_CNTL_4, rcu_pwr_gating_cntl); |
214 | |
215 | if (rdev->family == CHIP_PALM) |
216 | WREG32_RCU(RCU_PWR_GATING_CNTL_5, 0xA02); |
217 | |
218 | sumo_smu_pg_init(rdev); |
219 | |
220 | rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL); |
221 | rcu_pwr_gating_cntl &= |
222 | ~(RSVD_MASK | PCV_MASK | PGS_MASK); |
223 | rcu_pwr_gating_cntl |= PCV(p_c) | PGS(4) | PWR_GATING_EN; |
224 | if (rdev->family == CHIP_PALM) { |
225 | rcu_pwr_gating_cntl &= ~PCP_MASK; |
226 | rcu_pwr_gating_cntl |= PCP(0x77); |
227 | } |
228 | WREG32_RCU(RCU_PWR_GATING_CNTL, rcu_pwr_gating_cntl); |
229 | |
230 | if (rdev->family == CHIP_PALM) { |
231 | rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_2); |
232 | rcu_pwr_gating_cntl &= ~(MPPU_MASK | MPPD_MASK); |
233 | rcu_pwr_gating_cntl |= MPPU(113) | MPPD(50); |
234 | WREG32_RCU(RCU_PWR_GATING_CNTL_2, rcu_pwr_gating_cntl); |
235 | |
236 | rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_3); |
237 | rcu_pwr_gating_cntl &= ~(DPPU_MASK | DPPD_MASK); |
238 | rcu_pwr_gating_cntl |= DPPU(16) | DPPD(50); |
239 | WREG32_RCU(RCU_PWR_GATING_CNTL_3, rcu_pwr_gating_cntl); |
240 | } |
241 | |
242 | sumo_smu_pg_init(rdev); |
243 | |
244 | rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL); |
245 | rcu_pwr_gating_cntl &= |
246 | ~(RSVD_MASK | PCV_MASK | PGS_MASK); |
247 | rcu_pwr_gating_cntl |= PGS(5) | PWR_GATING_EN; |
248 | |
249 | if (rdev->family == CHIP_PALM) { |
250 | rcu_pwr_gating_cntl |= PCV(4); |
251 | rcu_pwr_gating_cntl &= ~PCP_MASK; |
252 | rcu_pwr_gating_cntl |= PCP(0x77); |
253 | } else |
254 | rcu_pwr_gating_cntl |= PCV(11); |
255 | WREG32_RCU(RCU_PWR_GATING_CNTL, rcu_pwr_gating_cntl); |
256 | |
257 | if (rdev->family == CHIP_PALM) { |
258 | rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_2); |
259 | rcu_pwr_gating_cntl &= ~(MPPU_MASK | MPPD_MASK); |
260 | rcu_pwr_gating_cntl |= MPPU(113) | MPPD(50); |
261 | WREG32_RCU(RCU_PWR_GATING_CNTL_2, rcu_pwr_gating_cntl); |
262 | |
263 | rcu_pwr_gating_cntl = RREG32_RCU(RCU_PWR_GATING_CNTL_3); |
264 | rcu_pwr_gating_cntl &= ~(DPPU_MASK | DPPD_MASK); |
265 | rcu_pwr_gating_cntl |= DPPU(22) | DPPD(50); |
266 | WREG32_RCU(RCU_PWR_GATING_CNTL_3, rcu_pwr_gating_cntl); |
267 | } |
268 | |
269 | sumo_smu_pg_init(rdev); |
270 | } |
271 | |
272 | static void sumo_gfx_powergating_enable(struct radeon_device *rdev, bool enable) |
273 | { |
274 | if (enable) |
275 | WREG32_P(CG_PWR_GATING_CNTL, DYN_PWR_DOWN_EN, ~DYN_PWR_DOWN_EN); |
276 | else { |
277 | WREG32_P(CG_PWR_GATING_CNTL, 0, ~DYN_PWR_DOWN_EN); |
278 | RREG32(GB_ADDR_CONFIG); |
279 | } |
280 | } |
281 | |
282 | static int sumo_enable_clock_power_gating(struct radeon_device *rdev) |
283 | { |
284 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
285 | |
286 | if (pi->enable_gfx_clock_gating) |
287 | sumo_gfx_clockgating_initialize(rdev); |
288 | if (pi->enable_gfx_power_gating) |
289 | sumo_gfx_powergating_initialize(rdev); |
290 | if (pi->enable_mg_clock_gating) |
291 | sumo_mg_clockgating_enable(rdev, enable: true); |
292 | if (pi->enable_gfx_clock_gating) |
293 | sumo_gfx_clockgating_enable(rdev, enable: true); |
294 | if (pi->enable_gfx_power_gating) |
295 | sumo_gfx_powergating_enable(rdev, enable: true); |
296 | |
297 | return 0; |
298 | } |
299 | |
300 | static void sumo_disable_clock_power_gating(struct radeon_device *rdev) |
301 | { |
302 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
303 | |
304 | if (pi->enable_gfx_clock_gating) |
305 | sumo_gfx_clockgating_enable(rdev, enable: false); |
306 | if (pi->enable_gfx_power_gating) |
307 | sumo_gfx_powergating_enable(rdev, enable: false); |
308 | if (pi->enable_mg_clock_gating) |
309 | sumo_mg_clockgating_enable(rdev, enable: false); |
310 | } |
311 | |
312 | static void sumo_calculate_bsp(struct radeon_device *rdev, |
313 | u32 high_clk) |
314 | { |
315 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
316 | u32 xclk = radeon_get_xclk(rdev); |
317 | |
318 | pi->pasi = 65535 * 100 / high_clk; |
319 | pi->asi = 65535 * 100 / high_clk; |
320 | |
321 | r600_calculate_u_and_p(i: pi->asi, |
322 | r_c: xclk, p_b: 16, p: &pi->bsp, u: &pi->bsu); |
323 | |
324 | r600_calculate_u_and_p(i: pi->pasi, |
325 | r_c: xclk, p_b: 16, p: &pi->pbsp, u: &pi->pbsu); |
326 | |
327 | pi->dsp = BSP(pi->bsp) | BSU(pi->bsu); |
328 | pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu); |
329 | } |
330 | |
331 | static void sumo_init_bsp(struct radeon_device *rdev) |
332 | { |
333 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
334 | |
335 | WREG32(CG_BSP_0, pi->psp); |
336 | } |
337 | |
338 | |
339 | static void sumo_program_bsp(struct radeon_device *rdev, |
340 | struct radeon_ps *rps) |
341 | { |
342 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
343 | struct sumo_ps *ps = sumo_get_ps(rps); |
344 | u32 i; |
345 | u32 highest_engine_clock = ps->levels[ps->num_levels - 1].sclk; |
346 | |
347 | if (ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) |
348 | highest_engine_clock = pi->boost_pl.sclk; |
349 | |
350 | sumo_calculate_bsp(rdev, high_clk: highest_engine_clock); |
351 | |
352 | for (i = 0; i < ps->num_levels - 1; i++) |
353 | WREG32(CG_BSP_0 + (i * 4), pi->dsp); |
354 | |
355 | WREG32(CG_BSP_0 + (i * 4), pi->psp); |
356 | |
357 | if (ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) |
358 | WREG32(CG_BSP_0 + (BOOST_DPM_LEVEL * 4), pi->psp); |
359 | } |
360 | |
361 | static void sumo_write_at(struct radeon_device *rdev, |
362 | u32 index, u32 value) |
363 | { |
364 | if (index == 0) |
365 | WREG32(CG_AT_0, value); |
366 | else if (index == 1) |
367 | WREG32(CG_AT_1, value); |
368 | else if (index == 2) |
369 | WREG32(CG_AT_2, value); |
370 | else if (index == 3) |
371 | WREG32(CG_AT_3, value); |
372 | else if (index == 4) |
373 | WREG32(CG_AT_4, value); |
374 | else if (index == 5) |
375 | WREG32(CG_AT_5, value); |
376 | else if (index == 6) |
377 | WREG32(CG_AT_6, value); |
378 | else if (index == 7) |
379 | WREG32(CG_AT_7, value); |
380 | } |
381 | |
382 | static void sumo_program_at(struct radeon_device *rdev, |
383 | struct radeon_ps *rps) |
384 | { |
385 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
386 | struct sumo_ps *ps = sumo_get_ps(rps); |
387 | u32 asi; |
388 | u32 i; |
389 | u32 m_a; |
390 | u32 a_t; |
391 | u32 r[SUMO_MAX_HARDWARE_POWERLEVELS]; |
392 | u32 l[SUMO_MAX_HARDWARE_POWERLEVELS]; |
393 | |
394 | r[0] = SUMO_R_DFLT0; |
395 | r[1] = SUMO_R_DFLT1; |
396 | r[2] = SUMO_R_DFLT2; |
397 | r[3] = SUMO_R_DFLT3; |
398 | r[4] = SUMO_R_DFLT4; |
399 | |
400 | l[0] = SUMO_L_DFLT0; |
401 | l[1] = SUMO_L_DFLT1; |
402 | l[2] = SUMO_L_DFLT2; |
403 | l[3] = SUMO_L_DFLT3; |
404 | l[4] = SUMO_L_DFLT4; |
405 | |
406 | for (i = 0; i < ps->num_levels; i++) { |
407 | asi = (i == ps->num_levels - 1) ? pi->pasi : pi->asi; |
408 | |
409 | m_a = asi * ps->levels[i].sclk / 100; |
410 | |
411 | a_t = CG_R(m_a * r[i] / 100) | CG_L(m_a * l[i] / 100); |
412 | |
413 | sumo_write_at(rdev, index: i, value: a_t); |
414 | } |
415 | |
416 | if (ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) { |
417 | asi = pi->pasi; |
418 | |
419 | m_a = asi * pi->boost_pl.sclk / 100; |
420 | |
421 | a_t = CG_R(m_a * r[ps->num_levels - 1] / 100) | |
422 | CG_L(m_a * l[ps->num_levels - 1] / 100); |
423 | |
424 | sumo_write_at(rdev, BOOST_DPM_LEVEL, value: a_t); |
425 | } |
426 | } |
427 | |
428 | static void sumo_program_tp(struct radeon_device *rdev) |
429 | { |
430 | int i; |
431 | enum r600_td td = R600_TD_DFLT; |
432 | |
433 | for (i = 0; i < SUMO_PM_NUMBER_OF_TC; i++) { |
434 | WREG32_P(CG_FFCT_0 + (i * 4), UTC_0(sumo_utc[i]), ~UTC_0_MASK); |
435 | WREG32_P(CG_FFCT_0 + (i * 4), DTC_0(sumo_dtc[i]), ~DTC_0_MASK); |
436 | } |
437 | |
438 | if (td == R600_TD_AUTO) |
439 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL); |
440 | else |
441 | WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL); |
442 | |
443 | if (td == R600_TD_UP) |
444 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE); |
445 | |
446 | if (td == R600_TD_DOWN) |
447 | WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE); |
448 | } |
449 | |
450 | void sumo_program_vc(struct radeon_device *rdev, u32 vrc) |
451 | { |
452 | WREG32(CG_FTV, vrc); |
453 | } |
454 | |
455 | void sumo_clear_vc(struct radeon_device *rdev) |
456 | { |
457 | WREG32(CG_FTV, 0); |
458 | } |
459 | |
460 | void sumo_program_sstp(struct radeon_device *rdev) |
461 | { |
462 | u32 p, u; |
463 | u32 xclk = radeon_get_xclk(rdev); |
464 | |
465 | r600_calculate_u_and_p(SUMO_SST_DFLT, |
466 | r_c: xclk, p_b: 16, p: &p, u: &u); |
467 | |
468 | WREG32(CG_SSP, SSTU(u) | SST(p)); |
469 | } |
470 | |
471 | static void sumo_set_divider_value(struct radeon_device *rdev, |
472 | u32 index, u32 divider) |
473 | { |
474 | u32 reg_index = index / 4; |
475 | u32 field_index = index % 4; |
476 | |
477 | if (field_index == 0) |
478 | WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4), |
479 | SCLK_FSTATE_0_DIV(divider), ~SCLK_FSTATE_0_DIV_MASK); |
480 | else if (field_index == 1) |
481 | WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4), |
482 | SCLK_FSTATE_1_DIV(divider), ~SCLK_FSTATE_1_DIV_MASK); |
483 | else if (field_index == 2) |
484 | WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4), |
485 | SCLK_FSTATE_2_DIV(divider), ~SCLK_FSTATE_2_DIV_MASK); |
486 | else if (field_index == 3) |
487 | WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4), |
488 | SCLK_FSTATE_3_DIV(divider), ~SCLK_FSTATE_3_DIV_MASK); |
489 | } |
490 | |
491 | static void sumo_set_ds_dividers(struct radeon_device *rdev, |
492 | u32 index, u32 divider) |
493 | { |
494 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
495 | |
496 | if (pi->enable_sclk_ds) { |
497 | u32 dpm_ctrl = RREG32(CG_SCLK_DPM_CTRL_6); |
498 | |
499 | dpm_ctrl &= ~(0x7 << (index * 3)); |
500 | dpm_ctrl |= (divider << (index * 3)); |
501 | WREG32(CG_SCLK_DPM_CTRL_6, dpm_ctrl); |
502 | } |
503 | } |
504 | |
505 | static void sumo_set_ss_dividers(struct radeon_device *rdev, |
506 | u32 index, u32 divider) |
507 | { |
508 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
509 | |
510 | if (pi->enable_sclk_ds) { |
511 | u32 dpm_ctrl = RREG32(CG_SCLK_DPM_CTRL_11); |
512 | |
513 | dpm_ctrl &= ~(0x7 << (index * 3)); |
514 | dpm_ctrl |= (divider << (index * 3)); |
515 | WREG32(CG_SCLK_DPM_CTRL_11, dpm_ctrl); |
516 | } |
517 | } |
518 | |
519 | static void sumo_set_vid(struct radeon_device *rdev, u32 index, u32 vid) |
520 | { |
521 | u32 voltage_cntl = RREG32(CG_DPM_VOLTAGE_CNTL); |
522 | |
523 | voltage_cntl &= ~(DPM_STATE0_LEVEL_MASK << (index * 2)); |
524 | voltage_cntl |= (vid << (DPM_STATE0_LEVEL_SHIFT + index * 2)); |
525 | WREG32(CG_DPM_VOLTAGE_CNTL, voltage_cntl); |
526 | } |
527 | |
528 | static void sumo_set_allos_gnb_slow(struct radeon_device *rdev, u32 index, u32 gnb_slow) |
529 | { |
530 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
531 | u32 temp = gnb_slow; |
532 | u32 cg_sclk_dpm_ctrl_3; |
533 | |
534 | if (pi->driver_nbps_policy_disable) |
535 | temp = 1; |
536 | |
537 | cg_sclk_dpm_ctrl_3 = RREG32(CG_SCLK_DPM_CTRL_3); |
538 | cg_sclk_dpm_ctrl_3 &= ~(GNB_SLOW_FSTATE_0_MASK << index); |
539 | cg_sclk_dpm_ctrl_3 |= (temp << (GNB_SLOW_FSTATE_0_SHIFT + index)); |
540 | |
541 | WREG32(CG_SCLK_DPM_CTRL_3, cg_sclk_dpm_ctrl_3); |
542 | } |
543 | |
544 | static void sumo_program_power_level(struct radeon_device *rdev, |
545 | struct sumo_pl *pl, u32 index) |
546 | { |
547 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
548 | int ret; |
549 | struct atom_clock_dividers dividers; |
550 | u32 ds_en = RREG32(DEEP_SLEEP_CNTL) & ENABLE_DS; |
551 | |
552 | ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, |
553 | clock: pl->sclk, strobe_mode: false, dividers: ÷rs); |
554 | if (ret) |
555 | return; |
556 | |
557 | sumo_set_divider_value(rdev, index, divider: dividers.post_div); |
558 | |
559 | sumo_set_vid(rdev, index, vid: pl->vddc_index); |
560 | |
561 | if (pl->ss_divider_index == 0 || pl->ds_divider_index == 0) { |
562 | if (ds_en) |
563 | WREG32_P(DEEP_SLEEP_CNTL, 0, ~ENABLE_DS); |
564 | } else { |
565 | sumo_set_ss_dividers(rdev, index, divider: pl->ss_divider_index); |
566 | sumo_set_ds_dividers(rdev, index, divider: pl->ds_divider_index); |
567 | |
568 | if (!ds_en) |
569 | WREG32_P(DEEP_SLEEP_CNTL, ENABLE_DS, ~ENABLE_DS); |
570 | } |
571 | |
572 | sumo_set_allos_gnb_slow(rdev, index, gnb_slow: pl->allow_gnb_slow); |
573 | |
574 | if (pi->enable_boost) |
575 | sumo_set_tdp_limit(rdev, index, tdp_limit: pl->sclk_dpm_tdp_limit); |
576 | } |
577 | |
578 | static void sumo_power_level_enable(struct radeon_device *rdev, u32 index, bool enable) |
579 | { |
580 | u32 reg_index = index / 4; |
581 | u32 field_index = index % 4; |
582 | |
583 | if (field_index == 0) |
584 | WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4), |
585 | enable ? SCLK_FSTATE_0_VLD : 0, ~SCLK_FSTATE_0_VLD); |
586 | else if (field_index == 1) |
587 | WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4), |
588 | enable ? SCLK_FSTATE_1_VLD : 0, ~SCLK_FSTATE_1_VLD); |
589 | else if (field_index == 2) |
590 | WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4), |
591 | enable ? SCLK_FSTATE_2_VLD : 0, ~SCLK_FSTATE_2_VLD); |
592 | else if (field_index == 3) |
593 | WREG32_P(CG_SCLK_DPM_CTRL + (reg_index * 4), |
594 | enable ? SCLK_FSTATE_3_VLD : 0, ~SCLK_FSTATE_3_VLD); |
595 | } |
596 | |
597 | static bool sumo_dpm_enabled(struct radeon_device *rdev) |
598 | { |
599 | if (RREG32(CG_SCLK_DPM_CTRL_3) & DPM_SCLK_ENABLE) |
600 | return true; |
601 | else |
602 | return false; |
603 | } |
604 | |
605 | static void sumo_start_dpm(struct radeon_device *rdev) |
606 | { |
607 | WREG32_P(CG_SCLK_DPM_CTRL_3, DPM_SCLK_ENABLE, ~DPM_SCLK_ENABLE); |
608 | } |
609 | |
610 | static void sumo_stop_dpm(struct radeon_device *rdev) |
611 | { |
612 | WREG32_P(CG_SCLK_DPM_CTRL_3, 0, ~DPM_SCLK_ENABLE); |
613 | } |
614 | |
615 | static void sumo_set_forced_mode(struct radeon_device *rdev, bool enable) |
616 | { |
617 | if (enable) |
618 | WREG32_P(CG_SCLK_DPM_CTRL_3, FORCE_SCLK_STATE_EN, ~FORCE_SCLK_STATE_EN); |
619 | else |
620 | WREG32_P(CG_SCLK_DPM_CTRL_3, 0, ~FORCE_SCLK_STATE_EN); |
621 | } |
622 | |
623 | static void sumo_set_forced_mode_enabled(struct radeon_device *rdev) |
624 | { |
625 | int i; |
626 | |
627 | sumo_set_forced_mode(rdev, enable: true); |
628 | for (i = 0; i < rdev->usec_timeout; i++) { |
629 | if (RREG32(CG_SCLK_STATUS) & SCLK_OVERCLK_DETECT) |
630 | break; |
631 | udelay(usec: 1); |
632 | } |
633 | } |
634 | |
635 | static void sumo_wait_for_level_0(struct radeon_device *rdev) |
636 | { |
637 | int i; |
638 | |
639 | for (i = 0; i < rdev->usec_timeout; i++) { |
640 | if ((RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) == 0) |
641 | break; |
642 | udelay(usec: 1); |
643 | } |
644 | for (i = 0; i < rdev->usec_timeout; i++) { |
645 | if ((RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_INDEX_MASK) == 0) |
646 | break; |
647 | udelay(usec: 1); |
648 | } |
649 | } |
650 | |
651 | static void sumo_set_forced_mode_disabled(struct radeon_device *rdev) |
652 | { |
653 | sumo_set_forced_mode(rdev, enable: false); |
654 | } |
655 | |
656 | static void sumo_enable_power_level_0(struct radeon_device *rdev) |
657 | { |
658 | sumo_power_level_enable(rdev, index: 0, enable: true); |
659 | } |
660 | |
661 | static void sumo_patch_boost_state(struct radeon_device *rdev, |
662 | struct radeon_ps *rps) |
663 | { |
664 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
665 | struct sumo_ps *new_ps = sumo_get_ps(rps); |
666 | |
667 | if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) { |
668 | pi->boost_pl = new_ps->levels[new_ps->num_levels - 1]; |
669 | pi->boost_pl.sclk = pi->sys_info.boost_sclk; |
670 | pi->boost_pl.vddc_index = pi->sys_info.boost_vid_2bit; |
671 | pi->boost_pl.sclk_dpm_tdp_limit = pi->sys_info.sclk_dpm_tdp_limit_boost; |
672 | } |
673 | } |
674 | |
675 | static void sumo_pre_notify_alt_vddnb_change(struct radeon_device *rdev, |
676 | struct radeon_ps *new_rps, |
677 | struct radeon_ps *old_rps) |
678 | { |
679 | struct sumo_ps *new_ps = sumo_get_ps(rps: new_rps); |
680 | struct sumo_ps *old_ps = sumo_get_ps(rps: old_rps); |
681 | u32 nbps1_old = 0; |
682 | u32 nbps1_new = 0; |
683 | |
684 | if (old_ps != NULL) |
685 | nbps1_old = (old_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) ? 1 : 0; |
686 | |
687 | nbps1_new = (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) ? 1 : 0; |
688 | |
689 | if (nbps1_old == 1 && nbps1_new == 0) |
690 | sumo_smu_notify_alt_vddnb_change(rdev, powersaving: 0, force_nbps1: 0); |
691 | } |
692 | |
693 | static void sumo_post_notify_alt_vddnb_change(struct radeon_device *rdev, |
694 | struct radeon_ps *new_rps, |
695 | struct radeon_ps *old_rps) |
696 | { |
697 | struct sumo_ps *new_ps = sumo_get_ps(rps: new_rps); |
698 | struct sumo_ps *old_ps = sumo_get_ps(rps: old_rps); |
699 | u32 nbps1_old = 0; |
700 | u32 nbps1_new = 0; |
701 | |
702 | if (old_ps != NULL) |
703 | nbps1_old = (old_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) ? 1 : 0; |
704 | |
705 | nbps1_new = (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) ? 1 : 0; |
706 | |
707 | if (nbps1_old == 0 && nbps1_new == 1) |
708 | sumo_smu_notify_alt_vddnb_change(rdev, powersaving: 1, force_nbps1: 1); |
709 | } |
710 | |
711 | static void sumo_enable_boost(struct radeon_device *rdev, |
712 | struct radeon_ps *rps, |
713 | bool enable) |
714 | { |
715 | struct sumo_ps *new_ps = sumo_get_ps(rps); |
716 | |
717 | if (enable) { |
718 | if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) |
719 | sumo_boost_state_enable(rdev, enable: true); |
720 | } else |
721 | sumo_boost_state_enable(rdev, enable: false); |
722 | } |
723 | |
724 | static void sumo_set_forced_level(struct radeon_device *rdev, u32 index) |
725 | { |
726 | WREG32_P(CG_SCLK_DPM_CTRL_3, FORCE_SCLK_STATE(index), ~FORCE_SCLK_STATE_MASK); |
727 | } |
728 | |
729 | static void sumo_set_forced_level_0(struct radeon_device *rdev) |
730 | { |
731 | sumo_set_forced_level(rdev, index: 0); |
732 | } |
733 | |
734 | static void sumo_program_wl(struct radeon_device *rdev, |
735 | struct radeon_ps *rps) |
736 | { |
737 | struct sumo_ps *new_ps = sumo_get_ps(rps); |
738 | u32 dpm_ctrl4 = RREG32(CG_SCLK_DPM_CTRL_4); |
739 | |
740 | dpm_ctrl4 &= 0xFFFFFF00; |
741 | dpm_ctrl4 |= (1 << (new_ps->num_levels - 1)); |
742 | |
743 | if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) |
744 | dpm_ctrl4 |= (1 << BOOST_DPM_LEVEL); |
745 | |
746 | WREG32(CG_SCLK_DPM_CTRL_4, dpm_ctrl4); |
747 | } |
748 | |
749 | static void sumo_program_power_levels_0_to_n(struct radeon_device *rdev, |
750 | struct radeon_ps *new_rps, |
751 | struct radeon_ps *old_rps) |
752 | { |
753 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
754 | struct sumo_ps *new_ps = sumo_get_ps(rps: new_rps); |
755 | struct sumo_ps *old_ps = sumo_get_ps(rps: old_rps); |
756 | u32 i; |
757 | u32 n_current_state_levels = (old_ps == NULL) ? 1 : old_ps->num_levels; |
758 | |
759 | for (i = 0; i < new_ps->num_levels; i++) { |
760 | sumo_program_power_level(rdev, pl: &new_ps->levels[i], index: i); |
761 | sumo_power_level_enable(rdev, index: i, enable: true); |
762 | } |
763 | |
764 | for (i = new_ps->num_levels; i < n_current_state_levels; i++) |
765 | sumo_power_level_enable(rdev, index: i, enable: false); |
766 | |
767 | if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) |
768 | sumo_program_power_level(rdev, pl: &pi->boost_pl, BOOST_DPM_LEVEL); |
769 | } |
770 | |
771 | static void sumo_enable_acpi_pm(struct radeon_device *rdev) |
772 | { |
773 | WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN); |
774 | } |
775 | |
776 | static void sumo_program_power_level_enter_state(struct radeon_device *rdev) |
777 | { |
778 | WREG32_P(CG_SCLK_DPM_CTRL_5, SCLK_FSTATE_BOOTUP(0), ~SCLK_FSTATE_BOOTUP_MASK); |
779 | } |
780 | |
781 | static void sumo_program_acpi_power_level(struct radeon_device *rdev) |
782 | { |
783 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
784 | struct atom_clock_dividers dividers; |
785 | int ret; |
786 | |
787 | ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, |
788 | clock: pi->acpi_pl.sclk, |
789 | strobe_mode: false, dividers: ÷rs); |
790 | if (ret) |
791 | return; |
792 | |
793 | WREG32_P(CG_ACPI_CNTL, SCLK_ACPI_DIV(dividers.post_div), ~SCLK_ACPI_DIV_MASK); |
794 | WREG32_P(CG_ACPI_VOLTAGE_CNTL, 0, ~ACPI_VOLTAGE_EN); |
795 | } |
796 | |
797 | static void sumo_program_bootup_state(struct radeon_device *rdev) |
798 | { |
799 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
800 | u32 dpm_ctrl4 = RREG32(CG_SCLK_DPM_CTRL_4); |
801 | u32 i; |
802 | |
803 | sumo_program_power_level(rdev, pl: &pi->boot_pl, index: 0); |
804 | |
805 | dpm_ctrl4 &= 0xFFFFFF00; |
806 | WREG32(CG_SCLK_DPM_CTRL_4, dpm_ctrl4); |
807 | |
808 | for (i = 1; i < 8; i++) |
809 | sumo_power_level_enable(rdev, index: i, enable: false); |
810 | } |
811 | |
812 | static void sumo_setup_uvd_clocks(struct radeon_device *rdev, |
813 | struct radeon_ps *new_rps, |
814 | struct radeon_ps *old_rps) |
815 | { |
816 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
817 | |
818 | if (pi->enable_gfx_power_gating) { |
819 | sumo_gfx_powergating_enable(rdev, enable: false); |
820 | } |
821 | |
822 | radeon_set_uvd_clocks(rdev, new_rps->vclk, new_rps->dclk); |
823 | |
824 | if (pi->enable_gfx_power_gating) { |
825 | if (!pi->disable_gfx_power_gating_in_uvd || |
826 | !r600_is_uvd_state(class: new_rps->class, class2: new_rps->class2)) |
827 | sumo_gfx_powergating_enable(rdev, enable: true); |
828 | } |
829 | } |
830 | |
831 | static void sumo_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev, |
832 | struct radeon_ps *new_rps, |
833 | struct radeon_ps *old_rps) |
834 | { |
835 | struct sumo_ps *new_ps = sumo_get_ps(rps: new_rps); |
836 | struct sumo_ps *current_ps = sumo_get_ps(rps: old_rps); |
837 | |
838 | if ((new_rps->vclk == old_rps->vclk) && |
839 | (new_rps->dclk == old_rps->dclk)) |
840 | return; |
841 | |
842 | if (new_ps->levels[new_ps->num_levels - 1].sclk >= |
843 | current_ps->levels[current_ps->num_levels - 1].sclk) |
844 | return; |
845 | |
846 | sumo_setup_uvd_clocks(rdev, new_rps, old_rps); |
847 | } |
848 | |
849 | static void sumo_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev, |
850 | struct radeon_ps *new_rps, |
851 | struct radeon_ps *old_rps) |
852 | { |
853 | struct sumo_ps *new_ps = sumo_get_ps(rps: new_rps); |
854 | struct sumo_ps *current_ps = sumo_get_ps(rps: old_rps); |
855 | |
856 | if ((new_rps->vclk == old_rps->vclk) && |
857 | (new_rps->dclk == old_rps->dclk)) |
858 | return; |
859 | |
860 | if (new_ps->levels[new_ps->num_levels - 1].sclk < |
861 | current_ps->levels[current_ps->num_levels - 1].sclk) |
862 | return; |
863 | |
864 | sumo_setup_uvd_clocks(rdev, new_rps, old_rps); |
865 | } |
866 | |
867 | void sumo_take_smu_control(struct radeon_device *rdev, bool enable) |
868 | { |
869 | /* This bit selects who handles display phy powergating. |
870 | * Clear the bit to let atom handle it. |
871 | * Set it to let the driver handle it. |
872 | * For now we just let atom handle it. |
873 | */ |
874 | #if 0 |
875 | u32 v = RREG32(DOUT_SCRATCH3); |
876 | |
877 | if (enable) |
878 | v |= 0x4; |
879 | else |
880 | v &= 0xFFFFFFFB; |
881 | |
882 | WREG32(DOUT_SCRATCH3, v); |
883 | #endif |
884 | } |
885 | |
886 | static void sumo_enable_sclk_ds(struct radeon_device *rdev, bool enable) |
887 | { |
888 | if (enable) { |
889 | u32 deep_sleep_cntl = RREG32(DEEP_SLEEP_CNTL); |
890 | u32 deep_sleep_cntl2 = RREG32(DEEP_SLEEP_CNTL2); |
891 | u32 t = 1; |
892 | |
893 | deep_sleep_cntl &= ~R_DIS; |
894 | deep_sleep_cntl &= ~HS_MASK; |
895 | deep_sleep_cntl |= HS(t > 4095 ? 4095 : t); |
896 | |
897 | deep_sleep_cntl2 |= LB_UFP_EN; |
898 | deep_sleep_cntl2 &= INOUT_C_MASK; |
899 | deep_sleep_cntl2 |= INOUT_C(0xf); |
900 | |
901 | WREG32(DEEP_SLEEP_CNTL2, deep_sleep_cntl2); |
902 | WREG32(DEEP_SLEEP_CNTL, deep_sleep_cntl); |
903 | } else |
904 | WREG32_P(DEEP_SLEEP_CNTL, 0, ~ENABLE_DS); |
905 | } |
906 | |
907 | static void sumo_program_bootup_at(struct radeon_device *rdev) |
908 | { |
909 | WREG32_P(CG_AT_0, CG_R(0xffff), ~CG_R_MASK); |
910 | WREG32_P(CG_AT_0, CG_L(0), ~CG_L_MASK); |
911 | } |
912 | |
913 | static void sumo_reset_am(struct radeon_device *rdev) |
914 | { |
915 | WREG32_P(SCLK_PWRMGT_CNTL, FIR_RESET, ~FIR_RESET); |
916 | } |
917 | |
918 | static void sumo_start_am(struct radeon_device *rdev) |
919 | { |
920 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_RESET); |
921 | } |
922 | |
923 | static void sumo_program_ttp(struct radeon_device *rdev) |
924 | { |
925 | u32 xclk = radeon_get_xclk(rdev); |
926 | u32 p, u; |
927 | u32 cg_sclk_dpm_ctrl_5 = RREG32(CG_SCLK_DPM_CTRL_5); |
928 | |
929 | r600_calculate_u_and_p(i: 1000, |
930 | r_c: xclk, p_b: 16, p: &p, u: &u); |
931 | |
932 | cg_sclk_dpm_ctrl_5 &= ~(TT_TP_MASK | TT_TU_MASK); |
933 | cg_sclk_dpm_ctrl_5 |= TT_TP(p) | TT_TU(u); |
934 | |
935 | WREG32(CG_SCLK_DPM_CTRL_5, cg_sclk_dpm_ctrl_5); |
936 | } |
937 | |
938 | static void sumo_program_ttt(struct radeon_device *rdev) |
939 | { |
940 | u32 cg_sclk_dpm_ctrl_3 = RREG32(CG_SCLK_DPM_CTRL_3); |
941 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
942 | |
943 | cg_sclk_dpm_ctrl_3 &= ~(GNB_TT_MASK | GNB_THERMTHRO_MASK); |
944 | cg_sclk_dpm_ctrl_3 |= GNB_TT(pi->thermal_auto_throttling + 49); |
945 | |
946 | WREG32(CG_SCLK_DPM_CTRL_3, cg_sclk_dpm_ctrl_3); |
947 | } |
948 | |
949 | |
950 | static void sumo_enable_voltage_scaling(struct radeon_device *rdev, bool enable) |
951 | { |
952 | if (enable) { |
953 | WREG32_P(CG_DPM_VOLTAGE_CNTL, DPM_VOLTAGE_EN, ~DPM_VOLTAGE_EN); |
954 | WREG32_P(CG_CG_VOLTAGE_CNTL, 0, ~CG_VOLTAGE_EN); |
955 | } else { |
956 | WREG32_P(CG_CG_VOLTAGE_CNTL, CG_VOLTAGE_EN, ~CG_VOLTAGE_EN); |
957 | WREG32_P(CG_DPM_VOLTAGE_CNTL, 0, ~DPM_VOLTAGE_EN); |
958 | } |
959 | } |
960 | |
961 | static void sumo_override_cnb_thermal_events(struct radeon_device *rdev) |
962 | { |
963 | WREG32_P(CG_SCLK_DPM_CTRL_3, CNB_THERMTHRO_MASK_SCLK, |
964 | ~CNB_THERMTHRO_MASK_SCLK); |
965 | } |
966 | |
967 | static void sumo_program_dc_hto(struct radeon_device *rdev) |
968 | { |
969 | u32 cg_sclk_dpm_ctrl_4 = RREG32(CG_SCLK_DPM_CTRL_4); |
970 | u32 p, u; |
971 | u32 xclk = radeon_get_xclk(rdev); |
972 | |
973 | r600_calculate_u_and_p(i: 100000, |
974 | r_c: xclk, p_b: 14, p: &p, u: &u); |
975 | |
976 | cg_sclk_dpm_ctrl_4 &= ~(DC_HDC_MASK | DC_HU_MASK); |
977 | cg_sclk_dpm_ctrl_4 |= DC_HDC(p) | DC_HU(u); |
978 | |
979 | WREG32(CG_SCLK_DPM_CTRL_4, cg_sclk_dpm_ctrl_4); |
980 | } |
981 | |
982 | static void sumo_force_nbp_state(struct radeon_device *rdev, |
983 | struct radeon_ps *rps) |
984 | { |
985 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
986 | struct sumo_ps *new_ps = sumo_get_ps(rps); |
987 | |
988 | if (!pi->driver_nbps_policy_disable) { |
989 | if (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) |
990 | WREG32_P(CG_SCLK_DPM_CTRL_3, FORCE_NB_PSTATE_1, ~FORCE_NB_PSTATE_1); |
991 | else |
992 | WREG32_P(CG_SCLK_DPM_CTRL_3, 0, ~FORCE_NB_PSTATE_1); |
993 | } |
994 | } |
995 | |
996 | u32 sumo_get_sleep_divider_from_id(u32 id) |
997 | { |
998 | return 1 << id; |
999 | } |
1000 | |
1001 | u32 sumo_get_sleep_divider_id_from_clock(struct radeon_device *rdev, |
1002 | u32 sclk, |
1003 | u32 min_sclk_in_sr) |
1004 | { |
1005 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1006 | u32 i; |
1007 | u32 temp; |
1008 | u32 min = (min_sclk_in_sr > SUMO_MINIMUM_ENGINE_CLOCK) ? |
1009 | min_sclk_in_sr : SUMO_MINIMUM_ENGINE_CLOCK; |
1010 | |
1011 | if (sclk < min) |
1012 | return 0; |
1013 | |
1014 | if (!pi->enable_sclk_ds) |
1015 | return 0; |
1016 | |
1017 | for (i = SUMO_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { |
1018 | temp = sclk / sumo_get_sleep_divider_from_id(id: i); |
1019 | |
1020 | if (temp >= min || i == 0) |
1021 | break; |
1022 | } |
1023 | return i; |
1024 | } |
1025 | |
1026 | static u32 sumo_get_valid_engine_clock(struct radeon_device *rdev, |
1027 | u32 lower_limit) |
1028 | { |
1029 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1030 | u32 i; |
1031 | |
1032 | for (i = 0; i < pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries; i++) { |
1033 | if (pi->sys_info.sclk_voltage_mapping_table.entries[i].sclk_frequency >= lower_limit) |
1034 | return pi->sys_info.sclk_voltage_mapping_table.entries[i].sclk_frequency; |
1035 | } |
1036 | |
1037 | return pi->sys_info.sclk_voltage_mapping_table.entries[pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1].sclk_frequency; |
1038 | } |
1039 | |
1040 | static void sumo_patch_thermal_state(struct radeon_device *rdev, |
1041 | struct sumo_ps *ps, |
1042 | struct sumo_ps *current_ps) |
1043 | { |
1044 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1045 | u32 sclk_in_sr = pi->sys_info.min_sclk; /* ??? */ |
1046 | u32 current_vddc; |
1047 | u32 current_sclk; |
1048 | u32 current_index = 0; |
1049 | |
1050 | if (current_ps) { |
1051 | current_vddc = current_ps->levels[current_index].vddc_index; |
1052 | current_sclk = current_ps->levels[current_index].sclk; |
1053 | } else { |
1054 | current_vddc = pi->boot_pl.vddc_index; |
1055 | current_sclk = pi->boot_pl.sclk; |
1056 | } |
1057 | |
1058 | ps->levels[0].vddc_index = current_vddc; |
1059 | |
1060 | if (ps->levels[0].sclk > current_sclk) |
1061 | ps->levels[0].sclk = current_sclk; |
1062 | |
1063 | ps->levels[0].ss_divider_index = |
1064 | sumo_get_sleep_divider_id_from_clock(rdev, sclk: ps->levels[0].sclk, min_sclk_in_sr: sclk_in_sr); |
1065 | |
1066 | ps->levels[0].ds_divider_index = |
1067 | sumo_get_sleep_divider_id_from_clock(rdev, sclk: ps->levels[0].sclk, SUMO_MINIMUM_ENGINE_CLOCK); |
1068 | |
1069 | if (ps->levels[0].ds_divider_index > ps->levels[0].ss_divider_index + 1) |
1070 | ps->levels[0].ds_divider_index = ps->levels[0].ss_divider_index + 1; |
1071 | |
1072 | if (ps->levels[0].ss_divider_index == ps->levels[0].ds_divider_index) { |
1073 | if (ps->levels[0].ss_divider_index > 1) |
1074 | ps->levels[0].ss_divider_index = ps->levels[0].ss_divider_index - 1; |
1075 | } |
1076 | |
1077 | if (ps->levels[0].ss_divider_index == 0) |
1078 | ps->levels[0].ds_divider_index = 0; |
1079 | |
1080 | if (ps->levels[0].ds_divider_index == 0) |
1081 | ps->levels[0].ss_divider_index = 0; |
1082 | } |
1083 | |
1084 | static void sumo_apply_state_adjust_rules(struct radeon_device *rdev, |
1085 | struct radeon_ps *new_rps, |
1086 | struct radeon_ps *old_rps) |
1087 | { |
1088 | struct sumo_ps *ps = sumo_get_ps(rps: new_rps); |
1089 | struct sumo_ps *current_ps = sumo_get_ps(rps: old_rps); |
1090 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1091 | u32 min_voltage = 0; /* ??? */ |
1092 | u32 min_sclk = pi->sys_info.min_sclk; /* XXX check against disp reqs */ |
1093 | u32 sclk_in_sr = pi->sys_info.min_sclk; /* ??? */ |
1094 | u32 i; |
1095 | |
1096 | if (new_rps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) |
1097 | return sumo_patch_thermal_state(rdev, ps, current_ps); |
1098 | |
1099 | if (pi->enable_boost) { |
1100 | if (new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) |
1101 | ps->flags |= SUMO_POWERSTATE_FLAGS_BOOST_STATE; |
1102 | } |
1103 | |
1104 | if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) || |
1105 | (new_rps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) || |
1106 | (new_rps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)) |
1107 | ps->flags |= SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE; |
1108 | |
1109 | for (i = 0; i < ps->num_levels; i++) { |
1110 | if (ps->levels[i].vddc_index < min_voltage) |
1111 | ps->levels[i].vddc_index = min_voltage; |
1112 | |
1113 | if (ps->levels[i].sclk < min_sclk) |
1114 | ps->levels[i].sclk = |
1115 | sumo_get_valid_engine_clock(rdev, lower_limit: min_sclk); |
1116 | |
1117 | ps->levels[i].ss_divider_index = |
1118 | sumo_get_sleep_divider_id_from_clock(rdev, sclk: ps->levels[i].sclk, min_sclk_in_sr: sclk_in_sr); |
1119 | |
1120 | ps->levels[i].ds_divider_index = |
1121 | sumo_get_sleep_divider_id_from_clock(rdev, sclk: ps->levels[i].sclk, SUMO_MINIMUM_ENGINE_CLOCK); |
1122 | |
1123 | if (ps->levels[i].ds_divider_index > ps->levels[i].ss_divider_index + 1) |
1124 | ps->levels[i].ds_divider_index = ps->levels[i].ss_divider_index + 1; |
1125 | |
1126 | if (ps->levels[i].ss_divider_index == ps->levels[i].ds_divider_index) { |
1127 | if (ps->levels[i].ss_divider_index > 1) |
1128 | ps->levels[i].ss_divider_index = ps->levels[i].ss_divider_index - 1; |
1129 | } |
1130 | |
1131 | if (ps->levels[i].ss_divider_index == 0) |
1132 | ps->levels[i].ds_divider_index = 0; |
1133 | |
1134 | if (ps->levels[i].ds_divider_index == 0) |
1135 | ps->levels[i].ss_divider_index = 0; |
1136 | |
1137 | if (ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) |
1138 | ps->levels[i].allow_gnb_slow = 1; |
1139 | else if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) || |
1140 | (new_rps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)) |
1141 | ps->levels[i].allow_gnb_slow = 0; |
1142 | else if (i == ps->num_levels - 1) |
1143 | ps->levels[i].allow_gnb_slow = 0; |
1144 | else |
1145 | ps->levels[i].allow_gnb_slow = 1; |
1146 | } |
1147 | } |
1148 | |
1149 | static void sumo_cleanup_asic(struct radeon_device *rdev) |
1150 | { |
1151 | sumo_take_smu_control(rdev, enable: false); |
1152 | } |
1153 | |
1154 | static int sumo_set_thermal_temperature_range(struct radeon_device *rdev, |
1155 | int min_temp, int max_temp) |
1156 | { |
1157 | int low_temp = 0 * 1000; |
1158 | int high_temp = 255 * 1000; |
1159 | |
1160 | if (low_temp < min_temp) |
1161 | low_temp = min_temp; |
1162 | if (high_temp > max_temp) |
1163 | high_temp = max_temp; |
1164 | if (high_temp < low_temp) { |
1165 | DRM_ERROR("invalid thermal range: %d - %d\n" , low_temp, high_temp); |
1166 | return -EINVAL; |
1167 | } |
1168 | |
1169 | WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(49 + (high_temp / 1000)), ~DIG_THERM_INTH_MASK); |
1170 | WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(49 + (low_temp / 1000)), ~DIG_THERM_INTL_MASK); |
1171 | |
1172 | rdev->pm.dpm.thermal.min_temp = low_temp; |
1173 | rdev->pm.dpm.thermal.max_temp = high_temp; |
1174 | |
1175 | return 0; |
1176 | } |
1177 | |
1178 | static void sumo_update_current_ps(struct radeon_device *rdev, |
1179 | struct radeon_ps *rps) |
1180 | { |
1181 | struct sumo_ps *new_ps = sumo_get_ps(rps); |
1182 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1183 | |
1184 | pi->current_rps = *rps; |
1185 | pi->current_ps = *new_ps; |
1186 | pi->current_rps.ps_priv = &pi->current_ps; |
1187 | } |
1188 | |
1189 | static void sumo_update_requested_ps(struct radeon_device *rdev, |
1190 | struct radeon_ps *rps) |
1191 | { |
1192 | struct sumo_ps *new_ps = sumo_get_ps(rps); |
1193 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1194 | |
1195 | pi->requested_rps = *rps; |
1196 | pi->requested_ps = *new_ps; |
1197 | pi->requested_rps.ps_priv = &pi->requested_ps; |
1198 | } |
1199 | |
1200 | int sumo_dpm_enable(struct radeon_device *rdev) |
1201 | { |
1202 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1203 | |
1204 | if (sumo_dpm_enabled(rdev)) |
1205 | return -EINVAL; |
1206 | |
1207 | sumo_program_bootup_state(rdev); |
1208 | sumo_init_bsp(rdev); |
1209 | sumo_reset_am(rdev); |
1210 | sumo_program_tp(rdev); |
1211 | sumo_program_bootup_at(rdev); |
1212 | sumo_start_am(rdev); |
1213 | if (pi->enable_auto_thermal_throttling) { |
1214 | sumo_program_ttp(rdev); |
1215 | sumo_program_ttt(rdev); |
1216 | } |
1217 | sumo_program_dc_hto(rdev); |
1218 | sumo_program_power_level_enter_state(rdev); |
1219 | sumo_enable_voltage_scaling(rdev, enable: true); |
1220 | sumo_program_sstp(rdev); |
1221 | sumo_program_vc(rdev, SUMO_VRC_DFLT); |
1222 | sumo_override_cnb_thermal_events(rdev); |
1223 | sumo_start_dpm(rdev); |
1224 | sumo_wait_for_level_0(rdev); |
1225 | if (pi->enable_sclk_ds) |
1226 | sumo_enable_sclk_ds(rdev, enable: true); |
1227 | if (pi->enable_boost) |
1228 | sumo_enable_boost_timer(rdev); |
1229 | |
1230 | sumo_update_current_ps(rdev, rps: rdev->pm.dpm.boot_ps); |
1231 | |
1232 | return 0; |
1233 | } |
1234 | |
1235 | int sumo_dpm_late_enable(struct radeon_device *rdev) |
1236 | { |
1237 | int ret; |
1238 | |
1239 | ret = sumo_enable_clock_power_gating(rdev); |
1240 | if (ret) |
1241 | return ret; |
1242 | |
1243 | if (rdev->irq.installed && |
1244 | r600_is_internal_thermal_sensor(sensor: rdev->pm.int_thermal_type)) { |
1245 | ret = sumo_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); |
1246 | if (ret) |
1247 | return ret; |
1248 | rdev->irq.dpm_thermal = true; |
1249 | radeon_irq_set(rdev); |
1250 | } |
1251 | |
1252 | return 0; |
1253 | } |
1254 | |
1255 | void sumo_dpm_disable(struct radeon_device *rdev) |
1256 | { |
1257 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1258 | |
1259 | if (!sumo_dpm_enabled(rdev)) |
1260 | return; |
1261 | sumo_disable_clock_power_gating(rdev); |
1262 | if (pi->enable_sclk_ds) |
1263 | sumo_enable_sclk_ds(rdev, enable: false); |
1264 | sumo_clear_vc(rdev); |
1265 | sumo_wait_for_level_0(rdev); |
1266 | sumo_stop_dpm(rdev); |
1267 | sumo_enable_voltage_scaling(rdev, enable: false); |
1268 | |
1269 | if (rdev->irq.installed && |
1270 | r600_is_internal_thermal_sensor(sensor: rdev->pm.int_thermal_type)) { |
1271 | rdev->irq.dpm_thermal = false; |
1272 | radeon_irq_set(rdev); |
1273 | } |
1274 | |
1275 | sumo_update_current_ps(rdev, rps: rdev->pm.dpm.boot_ps); |
1276 | } |
1277 | |
1278 | int sumo_dpm_pre_set_power_state(struct radeon_device *rdev) |
1279 | { |
1280 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1281 | struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps; |
1282 | struct radeon_ps *new_ps = &requested_ps; |
1283 | |
1284 | sumo_update_requested_ps(rdev, rps: new_ps); |
1285 | |
1286 | if (pi->enable_dynamic_patch_ps) |
1287 | sumo_apply_state_adjust_rules(rdev, |
1288 | new_rps: &pi->requested_rps, |
1289 | old_rps: &pi->current_rps); |
1290 | |
1291 | return 0; |
1292 | } |
1293 | |
1294 | int sumo_dpm_set_power_state(struct radeon_device *rdev) |
1295 | { |
1296 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1297 | struct radeon_ps *new_ps = &pi->requested_rps; |
1298 | struct radeon_ps *old_ps = &pi->current_rps; |
1299 | |
1300 | if (pi->enable_dpm) |
1301 | sumo_set_uvd_clock_before_set_eng_clock(rdev, new_rps: new_ps, old_rps: old_ps); |
1302 | if (pi->enable_boost) { |
1303 | sumo_enable_boost(rdev, rps: new_ps, enable: false); |
1304 | sumo_patch_boost_state(rdev, rps: new_ps); |
1305 | } |
1306 | if (pi->enable_dpm) { |
1307 | sumo_pre_notify_alt_vddnb_change(rdev, new_rps: new_ps, old_rps: old_ps); |
1308 | sumo_enable_power_level_0(rdev); |
1309 | sumo_set_forced_level_0(rdev); |
1310 | sumo_set_forced_mode_enabled(rdev); |
1311 | sumo_wait_for_level_0(rdev); |
1312 | sumo_program_power_levels_0_to_n(rdev, new_rps: new_ps, old_rps: old_ps); |
1313 | sumo_program_wl(rdev, rps: new_ps); |
1314 | sumo_program_bsp(rdev, rps: new_ps); |
1315 | sumo_program_at(rdev, rps: new_ps); |
1316 | sumo_force_nbp_state(rdev, rps: new_ps); |
1317 | sumo_set_forced_mode_disabled(rdev); |
1318 | sumo_set_forced_mode_enabled(rdev); |
1319 | sumo_set_forced_mode_disabled(rdev); |
1320 | sumo_post_notify_alt_vddnb_change(rdev, new_rps: new_ps, old_rps: old_ps); |
1321 | } |
1322 | if (pi->enable_boost) |
1323 | sumo_enable_boost(rdev, rps: new_ps, enable: true); |
1324 | if (pi->enable_dpm) |
1325 | sumo_set_uvd_clock_after_set_eng_clock(rdev, new_rps: new_ps, old_rps: old_ps); |
1326 | |
1327 | return 0; |
1328 | } |
1329 | |
1330 | void sumo_dpm_post_set_power_state(struct radeon_device *rdev) |
1331 | { |
1332 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1333 | struct radeon_ps *new_ps = &pi->requested_rps; |
1334 | |
1335 | sumo_update_current_ps(rdev, rps: new_ps); |
1336 | } |
1337 | |
1338 | #if 0 |
1339 | void sumo_dpm_reset_asic(struct radeon_device *rdev) |
1340 | { |
1341 | sumo_program_bootup_state(rdev); |
1342 | sumo_enable_power_level_0(rdev); |
1343 | sumo_set_forced_level_0(rdev); |
1344 | sumo_set_forced_mode_enabled(rdev); |
1345 | sumo_wait_for_level_0(rdev); |
1346 | sumo_set_forced_mode_disabled(rdev); |
1347 | sumo_set_forced_mode_enabled(rdev); |
1348 | sumo_set_forced_mode_disabled(rdev); |
1349 | } |
1350 | #endif |
1351 | |
1352 | void sumo_dpm_setup_asic(struct radeon_device *rdev) |
1353 | { |
1354 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1355 | |
1356 | sumo_initialize_m3_arb(rdev); |
1357 | pi->fw_version = sumo_get_running_fw_version(rdev); |
1358 | DRM_INFO("Found smc ucode version: 0x%08x\n" , pi->fw_version); |
1359 | sumo_program_acpi_power_level(rdev); |
1360 | sumo_enable_acpi_pm(rdev); |
1361 | sumo_take_smu_control(rdev, enable: true); |
1362 | } |
1363 | |
1364 | void sumo_dpm_display_configuration_changed(struct radeon_device *rdev) |
1365 | { |
1366 | |
1367 | } |
1368 | |
1369 | union power_info { |
1370 | struct _ATOM_POWERPLAY_INFO info; |
1371 | struct _ATOM_POWERPLAY_INFO_V2 info_2; |
1372 | struct _ATOM_POWERPLAY_INFO_V3 info_3; |
1373 | struct _ATOM_PPLIB_POWERPLAYTABLE pplib; |
1374 | struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; |
1375 | struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; |
1376 | }; |
1377 | |
1378 | union pplib_clock_info { |
1379 | struct _ATOM_PPLIB_R600_CLOCK_INFO r600; |
1380 | struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; |
1381 | struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; |
1382 | struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; |
1383 | }; |
1384 | |
1385 | union pplib_power_state { |
1386 | struct _ATOM_PPLIB_STATE v1; |
1387 | struct _ATOM_PPLIB_STATE_V2 v2; |
1388 | }; |
1389 | |
1390 | static void sumo_patch_boot_state(struct radeon_device *rdev, |
1391 | struct sumo_ps *ps) |
1392 | { |
1393 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1394 | |
1395 | ps->num_levels = 1; |
1396 | ps->flags = 0; |
1397 | ps->levels[0] = pi->boot_pl; |
1398 | } |
1399 | |
1400 | static void sumo_parse_pplib_non_clock_info(struct radeon_device *rdev, |
1401 | struct radeon_ps *rps, |
1402 | struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, |
1403 | u8 table_rev) |
1404 | { |
1405 | struct sumo_ps *ps = sumo_get_ps(rps); |
1406 | |
1407 | rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); |
1408 | rps->class = le16_to_cpu(non_clock_info->usClassification); |
1409 | rps->class2 = le16_to_cpu(non_clock_info->usClassification2); |
1410 | |
1411 | if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { |
1412 | rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); |
1413 | rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); |
1414 | } else { |
1415 | rps->vclk = 0; |
1416 | rps->dclk = 0; |
1417 | } |
1418 | |
1419 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { |
1420 | rdev->pm.dpm.boot_ps = rps; |
1421 | sumo_patch_boot_state(rdev, ps); |
1422 | } |
1423 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) |
1424 | rdev->pm.dpm.uvd_ps = rps; |
1425 | } |
1426 | |
1427 | static void sumo_parse_pplib_clock_info(struct radeon_device *rdev, |
1428 | struct radeon_ps *rps, int index, |
1429 | union pplib_clock_info *clock_info) |
1430 | { |
1431 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1432 | struct sumo_ps *ps = sumo_get_ps(rps); |
1433 | struct sumo_pl *pl = &ps->levels[index]; |
1434 | u32 sclk; |
1435 | |
1436 | sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); |
1437 | sclk |= clock_info->sumo.ucEngineClockHigh << 16; |
1438 | pl->sclk = sclk; |
1439 | pl->vddc_index = clock_info->sumo.vddcIndex; |
1440 | pl->sclk_dpm_tdp_limit = clock_info->sumo.tdpLimit; |
1441 | |
1442 | ps->num_levels = index + 1; |
1443 | |
1444 | if (pi->enable_sclk_ds) { |
1445 | pl->ds_divider_index = 5; |
1446 | pl->ss_divider_index = 4; |
1447 | } |
1448 | } |
1449 | |
1450 | static int sumo_parse_power_table(struct radeon_device *rdev) |
1451 | { |
1452 | struct radeon_mode_info *mode_info = &rdev->mode_info; |
1453 | struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; |
1454 | union pplib_power_state *power_state; |
1455 | int i, j, k, non_clock_array_index, clock_array_index; |
1456 | union pplib_clock_info *clock_info; |
1457 | struct _StateArray *state_array; |
1458 | struct _ClockInfoArray *clock_info_array; |
1459 | struct _NonClockInfoArray *non_clock_info_array; |
1460 | union power_info *power_info; |
1461 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
1462 | u16 data_offset; |
1463 | u8 frev, crev; |
1464 | u8 *power_state_offset; |
1465 | struct sumo_ps *ps; |
1466 | |
1467 | if (!atom_parse_data_header(ctx: mode_info->atom_context, index, NULL, |
1468 | frev: &frev, crev: &crev, data_start: &data_offset)) |
1469 | return -EINVAL; |
1470 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); |
1471 | |
1472 | state_array = (struct _StateArray *) |
1473 | (mode_info->atom_context->bios + data_offset + |
1474 | le16_to_cpu(power_info->pplib.usStateArrayOffset)); |
1475 | clock_info_array = (struct _ClockInfoArray *) |
1476 | (mode_info->atom_context->bios + data_offset + |
1477 | le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); |
1478 | non_clock_info_array = (struct _NonClockInfoArray *) |
1479 | (mode_info->atom_context->bios + data_offset + |
1480 | le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); |
1481 | |
1482 | rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, |
1483 | sizeof(struct radeon_ps), |
1484 | GFP_KERNEL); |
1485 | if (!rdev->pm.dpm.ps) |
1486 | return -ENOMEM; |
1487 | power_state_offset = (u8 *)state_array->states; |
1488 | for (i = 0; i < state_array->ucNumEntries; i++) { |
1489 | u8 *idx; |
1490 | power_state = (union pplib_power_state *)power_state_offset; |
1491 | non_clock_array_index = power_state->v2.nonClockInfoIndex; |
1492 | non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) |
1493 | &non_clock_info_array->nonClockInfo[non_clock_array_index]; |
1494 | if (!rdev->pm.power_state[i].clock_info) { |
1495 | kfree(objp: rdev->pm.dpm.ps); |
1496 | return -EINVAL; |
1497 | } |
1498 | ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL); |
1499 | if (ps == NULL) { |
1500 | kfree(objp: rdev->pm.dpm.ps); |
1501 | return -ENOMEM; |
1502 | } |
1503 | rdev->pm.dpm.ps[i].ps_priv = ps; |
1504 | k = 0; |
1505 | idx = (u8 *)&power_state->v2.clockInfoIndex[0]; |
1506 | for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { |
1507 | clock_array_index = idx[j]; |
1508 | if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) |
1509 | break; |
1510 | |
1511 | clock_info = (union pplib_clock_info *) |
1512 | ((u8 *)&clock_info_array->clockInfo[0] + |
1513 | (clock_array_index * clock_info_array->ucEntrySize)); |
1514 | sumo_parse_pplib_clock_info(rdev, |
1515 | rps: &rdev->pm.dpm.ps[i], index: k, |
1516 | clock_info); |
1517 | k++; |
1518 | } |
1519 | sumo_parse_pplib_non_clock_info(rdev, rps: &rdev->pm.dpm.ps[i], |
1520 | non_clock_info, |
1521 | table_rev: non_clock_info_array->ucEntrySize); |
1522 | power_state_offset += 2 + power_state->v2.ucNumDPMLevels; |
1523 | } |
1524 | rdev->pm.dpm.num_ps = state_array->ucNumEntries; |
1525 | return 0; |
1526 | } |
1527 | |
1528 | u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev, |
1529 | struct sumo_vid_mapping_table *vid_mapping_table, |
1530 | u32 vid_2bit) |
1531 | { |
1532 | u32 i; |
1533 | |
1534 | for (i = 0; i < vid_mapping_table->num_entries; i++) { |
1535 | if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) |
1536 | return vid_mapping_table->entries[i].vid_7bit; |
1537 | } |
1538 | |
1539 | return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; |
1540 | } |
1541 | |
1542 | #if 0 |
1543 | u32 sumo_convert_vid7_to_vid2(struct radeon_device *rdev, |
1544 | struct sumo_vid_mapping_table *vid_mapping_table, |
1545 | u32 vid_7bit) |
1546 | { |
1547 | u32 i; |
1548 | |
1549 | for (i = 0; i < vid_mapping_table->num_entries; i++) { |
1550 | if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) |
1551 | return vid_mapping_table->entries[i].vid_2bit; |
1552 | } |
1553 | |
1554 | return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; |
1555 | } |
1556 | #endif |
1557 | |
1558 | static u16 sumo_convert_voltage_index_to_value(struct radeon_device *rdev, |
1559 | u32 vid_2bit) |
1560 | { |
1561 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1562 | u32 vid_7bit = sumo_convert_vid2_to_vid7(rdev, vid_mapping_table: &pi->sys_info.vid_mapping_table, vid_2bit); |
1563 | |
1564 | if (vid_7bit > 0x7C) |
1565 | return 0; |
1566 | |
1567 | return (15500 - vid_7bit * 125 + 5) / 10; |
1568 | } |
1569 | |
1570 | static void sumo_construct_display_voltage_mapping_table(struct radeon_device *rdev, |
1571 | struct sumo_disp_clock_voltage_mapping_table *disp_clk_voltage_mapping_table, |
1572 | ATOM_CLK_VOLT_CAPABILITY *table) |
1573 | { |
1574 | u32 i; |
1575 | |
1576 | for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) { |
1577 | if (table[i].ulMaximumSupportedCLK == 0) |
1578 | break; |
1579 | |
1580 | disp_clk_voltage_mapping_table->display_clock_frequency[i] = |
1581 | table[i].ulMaximumSupportedCLK; |
1582 | } |
1583 | |
1584 | disp_clk_voltage_mapping_table->num_max_voltage_levels = i; |
1585 | |
1586 | if (disp_clk_voltage_mapping_table->num_max_voltage_levels == 0) { |
1587 | disp_clk_voltage_mapping_table->display_clock_frequency[0] = 80000; |
1588 | disp_clk_voltage_mapping_table->num_max_voltage_levels = 1; |
1589 | } |
1590 | } |
1591 | |
1592 | void sumo_construct_sclk_voltage_mapping_table(struct radeon_device *rdev, |
1593 | struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table, |
1594 | ATOM_AVAILABLE_SCLK_LIST *table) |
1595 | { |
1596 | u32 i; |
1597 | u32 n = 0; |
1598 | u32 prev_sclk = 0; |
1599 | |
1600 | for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { |
1601 | if (table[i].ulSupportedSCLK > prev_sclk) { |
1602 | sclk_voltage_mapping_table->entries[n].sclk_frequency = |
1603 | table[i].ulSupportedSCLK; |
1604 | sclk_voltage_mapping_table->entries[n].vid_2bit = |
1605 | table[i].usVoltageIndex; |
1606 | prev_sclk = table[i].ulSupportedSCLK; |
1607 | n++; |
1608 | } |
1609 | } |
1610 | |
1611 | sclk_voltage_mapping_table->num_max_dpm_entries = n; |
1612 | } |
1613 | |
1614 | void sumo_construct_vid_mapping_table(struct radeon_device *rdev, |
1615 | struct sumo_vid_mapping_table *vid_mapping_table, |
1616 | ATOM_AVAILABLE_SCLK_LIST *table) |
1617 | { |
1618 | u32 i, j; |
1619 | |
1620 | for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { |
1621 | if (table[i].ulSupportedSCLK != 0) { |
1622 | if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES) |
1623 | continue; |
1624 | vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = |
1625 | table[i].usVoltageID; |
1626 | vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = |
1627 | table[i].usVoltageIndex; |
1628 | } |
1629 | } |
1630 | |
1631 | for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) { |
1632 | if (vid_mapping_table->entries[i].vid_7bit == 0) { |
1633 | for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) { |
1634 | if (vid_mapping_table->entries[j].vid_7bit != 0) { |
1635 | vid_mapping_table->entries[i] = |
1636 | vid_mapping_table->entries[j]; |
1637 | vid_mapping_table->entries[j].vid_7bit = 0; |
1638 | break; |
1639 | } |
1640 | } |
1641 | |
1642 | if (j == SUMO_MAX_NUMBER_VOLTAGES) |
1643 | break; |
1644 | } |
1645 | } |
1646 | |
1647 | vid_mapping_table->num_entries = i; |
1648 | } |
1649 | |
1650 | union igp_info { |
1651 | struct _ATOM_INTEGRATED_SYSTEM_INFO info; |
1652 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; |
1653 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; |
1654 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; |
1655 | }; |
1656 | |
1657 | static int sumo_parse_sys_info_table(struct radeon_device *rdev) |
1658 | { |
1659 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1660 | struct radeon_mode_info *mode_info = &rdev->mode_info; |
1661 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); |
1662 | union igp_info *igp_info; |
1663 | u8 frev, crev; |
1664 | u16 data_offset; |
1665 | int i; |
1666 | |
1667 | if (atom_parse_data_header(ctx: mode_info->atom_context, index, NULL, |
1668 | frev: &frev, crev: &crev, data_start: &data_offset)) { |
1669 | igp_info = (union igp_info *)(mode_info->atom_context->bios + |
1670 | data_offset); |
1671 | |
1672 | if (crev != 6) { |
1673 | DRM_ERROR("Unsupported IGP table: %d %d\n" , frev, crev); |
1674 | return -EINVAL; |
1675 | } |
1676 | pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_6.ulBootUpEngineClock); |
1677 | pi->sys_info.min_sclk = le32_to_cpu(igp_info->info_6.ulMinEngineClock); |
1678 | pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_6.ulBootUpUMAClock); |
1679 | pi->sys_info.bootup_nb_voltage_index = |
1680 | le16_to_cpu(igp_info->info_6.usBootUpNBVoltage); |
1681 | if (igp_info->info_6.ucHtcTmpLmt == 0) |
1682 | pi->sys_info.htc_tmp_lmt = 203; |
1683 | else |
1684 | pi->sys_info.htc_tmp_lmt = igp_info->info_6.ucHtcTmpLmt; |
1685 | if (igp_info->info_6.ucHtcHystLmt == 0) |
1686 | pi->sys_info.htc_hyst_lmt = 5; |
1687 | else |
1688 | pi->sys_info.htc_hyst_lmt = igp_info->info_6.ucHtcHystLmt; |
1689 | if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { |
1690 | DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n" ); |
1691 | } |
1692 | for (i = 0; i < NUMBER_OF_M3ARB_PARAM_SETS; i++) { |
1693 | pi->sys_info.csr_m3_arb_cntl_default[i] = |
1694 | le32_to_cpu(igp_info->info_6.ulCSR_M3_ARB_CNTL_DEFAULT[i]); |
1695 | pi->sys_info.csr_m3_arb_cntl_uvd[i] = |
1696 | le32_to_cpu(igp_info->info_6.ulCSR_M3_ARB_CNTL_UVD[i]); |
1697 | pi->sys_info.csr_m3_arb_cntl_fs3d[i] = |
1698 | le32_to_cpu(igp_info->info_6.ulCSR_M3_ARB_CNTL_FS3D[i]); |
1699 | } |
1700 | pi->sys_info.sclk_dpm_boost_margin = |
1701 | le32_to_cpu(igp_info->info_6.SclkDpmBoostMargin); |
1702 | pi->sys_info.sclk_dpm_throttle_margin = |
1703 | le32_to_cpu(igp_info->info_6.SclkDpmThrottleMargin); |
1704 | pi->sys_info.sclk_dpm_tdp_limit_pg = |
1705 | le16_to_cpu(igp_info->info_6.SclkDpmTdpLimitPG); |
1706 | pi->sys_info.gnb_tdp_limit = le16_to_cpu(igp_info->info_6.GnbTdpLimit); |
1707 | pi->sys_info.sclk_dpm_tdp_limit_boost = |
1708 | le16_to_cpu(igp_info->info_6.SclkDpmTdpLimitBoost); |
1709 | pi->sys_info.boost_sclk = le32_to_cpu(igp_info->info_6.ulBoostEngineCLock); |
1710 | pi->sys_info.boost_vid_2bit = igp_info->info_6.ulBoostVid_2bit; |
1711 | if (igp_info->info_6.EnableBoost) |
1712 | pi->sys_info.enable_boost = true; |
1713 | else |
1714 | pi->sys_info.enable_boost = false; |
1715 | sumo_construct_display_voltage_mapping_table(rdev, |
1716 | disp_clk_voltage_mapping_table: &pi->sys_info.disp_clk_voltage_mapping_table, |
1717 | table: igp_info->info_6.sDISPCLK_Voltage); |
1718 | sumo_construct_sclk_voltage_mapping_table(rdev, |
1719 | sclk_voltage_mapping_table: &pi->sys_info.sclk_voltage_mapping_table, |
1720 | table: igp_info->info_6.sAvail_SCLK); |
1721 | sumo_construct_vid_mapping_table(rdev, vid_mapping_table: &pi->sys_info.vid_mapping_table, |
1722 | table: igp_info->info_6.sAvail_SCLK); |
1723 | |
1724 | } |
1725 | return 0; |
1726 | } |
1727 | |
1728 | static void sumo_construct_boot_and_acpi_state(struct radeon_device *rdev) |
1729 | { |
1730 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1731 | |
1732 | pi->boot_pl.sclk = pi->sys_info.bootup_sclk; |
1733 | pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; |
1734 | pi->boot_pl.ds_divider_index = 0; |
1735 | pi->boot_pl.ss_divider_index = 0; |
1736 | pi->boot_pl.allow_gnb_slow = 1; |
1737 | pi->acpi_pl = pi->boot_pl; |
1738 | pi->current_ps.num_levels = 1; |
1739 | pi->current_ps.levels[0] = pi->boot_pl; |
1740 | } |
1741 | |
1742 | int sumo_dpm_init(struct radeon_device *rdev) |
1743 | { |
1744 | struct sumo_power_info *pi; |
1745 | u32 hw_rev = (RREG32(HW_REV) & ATI_REV_ID_MASK) >> ATI_REV_ID_SHIFT; |
1746 | int ret; |
1747 | |
1748 | pi = kzalloc(sizeof(struct sumo_power_info), GFP_KERNEL); |
1749 | if (pi == NULL) |
1750 | return -ENOMEM; |
1751 | rdev->pm.dpm.priv = pi; |
1752 | |
1753 | pi->driver_nbps_policy_disable = false; |
1754 | if ((rdev->family == CHIP_PALM) && (hw_rev < 3)) |
1755 | pi->disable_gfx_power_gating_in_uvd = true; |
1756 | else |
1757 | pi->disable_gfx_power_gating_in_uvd = false; |
1758 | pi->enable_alt_vddnb = true; |
1759 | pi->enable_sclk_ds = true; |
1760 | pi->enable_dynamic_m3_arbiter = false; |
1761 | pi->enable_dynamic_patch_ps = true; |
1762 | /* Some PALM chips don't seem to properly ungate gfx when UVD is in use; |
1763 | * for now just disable gfx PG. |
1764 | */ |
1765 | if (rdev->family == CHIP_PALM) |
1766 | pi->enable_gfx_power_gating = false; |
1767 | else |
1768 | pi->enable_gfx_power_gating = true; |
1769 | pi->enable_gfx_clock_gating = true; |
1770 | pi->enable_mg_clock_gating = true; |
1771 | pi->enable_auto_thermal_throttling = true; |
1772 | |
1773 | ret = sumo_parse_sys_info_table(rdev); |
1774 | if (ret) |
1775 | return ret; |
1776 | |
1777 | sumo_construct_boot_and_acpi_state(rdev); |
1778 | |
1779 | ret = r600_get_platform_caps(rdev); |
1780 | if (ret) |
1781 | return ret; |
1782 | |
1783 | ret = sumo_parse_power_table(rdev); |
1784 | if (ret) |
1785 | return ret; |
1786 | |
1787 | pi->pasi = CYPRESS_HASI_DFLT; |
1788 | pi->asi = RV770_ASI_DFLT; |
1789 | pi->thermal_auto_throttling = pi->sys_info.htc_tmp_lmt; |
1790 | pi->enable_boost = pi->sys_info.enable_boost; |
1791 | pi->enable_dpm = true; |
1792 | |
1793 | return 0; |
1794 | } |
1795 | |
1796 | void sumo_dpm_print_power_state(struct radeon_device *rdev, |
1797 | struct radeon_ps *rps) |
1798 | { |
1799 | int i; |
1800 | struct sumo_ps *ps = sumo_get_ps(rps); |
1801 | |
1802 | r600_dpm_print_class_info(class: rps->class, class2: rps->class2); |
1803 | r600_dpm_print_cap_info(caps: rps->caps); |
1804 | printk("\tuvd vclk: %d dclk: %d\n" , rps->vclk, rps->dclk); |
1805 | for (i = 0; i < ps->num_levels; i++) { |
1806 | struct sumo_pl *pl = &ps->levels[i]; |
1807 | printk("\t\tpower level %d sclk: %u vddc: %u\n" , |
1808 | i, pl->sclk, |
1809 | sumo_convert_voltage_index_to_value(rdev, pl->vddc_index)); |
1810 | } |
1811 | r600_dpm_print_ps_status(rdev, rps); |
1812 | } |
1813 | |
1814 | void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, |
1815 | struct seq_file *m) |
1816 | { |
1817 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1818 | struct radeon_ps *rps = &pi->current_rps; |
1819 | struct sumo_ps *ps = sumo_get_ps(rps); |
1820 | struct sumo_pl *pl; |
1821 | u32 current_index = |
1822 | (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_INDEX_MASK) >> |
1823 | CURR_INDEX_SHIFT; |
1824 | |
1825 | if (current_index == BOOST_DPM_LEVEL) { |
1826 | pl = &pi->boost_pl; |
1827 | seq_printf(m, fmt: "uvd vclk: %d dclk: %d\n" , rps->vclk, rps->dclk); |
1828 | seq_printf(m, fmt: "power level %d sclk: %u vddc: %u\n" , |
1829 | current_index, pl->sclk, |
1830 | sumo_convert_voltage_index_to_value(rdev, vid_2bit: pl->vddc_index)); |
1831 | } else if (current_index >= ps->num_levels) { |
1832 | seq_printf(m, fmt: "invalid dpm profile %d\n" , current_index); |
1833 | } else { |
1834 | pl = &ps->levels[current_index]; |
1835 | seq_printf(m, fmt: "uvd vclk: %d dclk: %d\n" , rps->vclk, rps->dclk); |
1836 | seq_printf(m, fmt: "power level %d sclk: %u vddc: %u\n" , |
1837 | current_index, pl->sclk, |
1838 | sumo_convert_voltage_index_to_value(rdev, vid_2bit: pl->vddc_index)); |
1839 | } |
1840 | } |
1841 | |
1842 | u32 sumo_dpm_get_current_sclk(struct radeon_device *rdev) |
1843 | { |
1844 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1845 | struct radeon_ps *rps = &pi->current_rps; |
1846 | struct sumo_ps *ps = sumo_get_ps(rps); |
1847 | struct sumo_pl *pl; |
1848 | u32 current_index = |
1849 | (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_INDEX_MASK) >> |
1850 | CURR_INDEX_SHIFT; |
1851 | |
1852 | if (current_index == BOOST_DPM_LEVEL) { |
1853 | pl = &pi->boost_pl; |
1854 | return pl->sclk; |
1855 | } else if (current_index >= ps->num_levels) { |
1856 | return 0; |
1857 | } else { |
1858 | pl = &ps->levels[current_index]; |
1859 | return pl->sclk; |
1860 | } |
1861 | } |
1862 | |
1863 | u32 sumo_dpm_get_current_mclk(struct radeon_device *rdev) |
1864 | { |
1865 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1866 | |
1867 | return pi->sys_info.bootup_uma_clk; |
1868 | } |
1869 | |
1870 | u16 sumo_dpm_get_current_vddc(struct radeon_device *rdev) |
1871 | { |
1872 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1873 | struct radeon_ps *rps = &pi->current_rps; |
1874 | struct sumo_ps *ps = sumo_get_ps(rps); |
1875 | struct sumo_pl *pl; |
1876 | u32 current_index = |
1877 | (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_INDEX_MASK) >> |
1878 | CURR_INDEX_SHIFT; |
1879 | |
1880 | if (current_index == BOOST_DPM_LEVEL) { |
1881 | pl = &pi->boost_pl; |
1882 | } else if (current_index >= ps->num_levels) { |
1883 | return 0; |
1884 | } else { |
1885 | pl = &ps->levels[current_index]; |
1886 | } |
1887 | return sumo_convert_voltage_index_to_value(rdev, vid_2bit: pl->vddc_index); |
1888 | } |
1889 | |
1890 | void sumo_dpm_fini(struct radeon_device *rdev) |
1891 | { |
1892 | int i; |
1893 | |
1894 | sumo_cleanup_asic(rdev); /* ??? */ |
1895 | |
1896 | for (i = 0; i < rdev->pm.dpm.num_ps; i++) { |
1897 | kfree(objp: rdev->pm.dpm.ps[i].ps_priv); |
1898 | } |
1899 | kfree(objp: rdev->pm.dpm.ps); |
1900 | kfree(objp: rdev->pm.dpm.priv); |
1901 | } |
1902 | |
1903 | u32 sumo_dpm_get_sclk(struct radeon_device *rdev, bool low) |
1904 | { |
1905 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1906 | struct sumo_ps *requested_state = sumo_get_ps(rps: &pi->requested_rps); |
1907 | |
1908 | if (low) |
1909 | return requested_state->levels[0].sclk; |
1910 | else |
1911 | return requested_state->levels[requested_state->num_levels - 1].sclk; |
1912 | } |
1913 | |
1914 | u32 sumo_dpm_get_mclk(struct radeon_device *rdev, bool low) |
1915 | { |
1916 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1917 | |
1918 | return pi->sys_info.bootup_uma_clk; |
1919 | } |
1920 | |
1921 | int sumo_dpm_force_performance_level(struct radeon_device *rdev, |
1922 | enum radeon_dpm_forced_level level) |
1923 | { |
1924 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1925 | struct radeon_ps *rps = &pi->current_rps; |
1926 | struct sumo_ps *ps = sumo_get_ps(rps); |
1927 | int i; |
1928 | |
1929 | if (ps->num_levels <= 1) |
1930 | return 0; |
1931 | |
1932 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { |
1933 | if (pi->enable_boost) |
1934 | sumo_enable_boost(rdev, rps, enable: false); |
1935 | sumo_power_level_enable(rdev, index: ps->num_levels - 1, enable: true); |
1936 | sumo_set_forced_level(rdev, index: ps->num_levels - 1); |
1937 | sumo_set_forced_mode_enabled(rdev); |
1938 | for (i = 0; i < ps->num_levels - 1; i++) { |
1939 | sumo_power_level_enable(rdev, index: i, enable: false); |
1940 | } |
1941 | sumo_set_forced_mode(rdev, enable: false); |
1942 | sumo_set_forced_mode_enabled(rdev); |
1943 | sumo_set_forced_mode(rdev, enable: false); |
1944 | } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { |
1945 | if (pi->enable_boost) |
1946 | sumo_enable_boost(rdev, rps, enable: false); |
1947 | sumo_power_level_enable(rdev, index: 0, enable: true); |
1948 | sumo_set_forced_level(rdev, index: 0); |
1949 | sumo_set_forced_mode_enabled(rdev); |
1950 | for (i = 1; i < ps->num_levels; i++) { |
1951 | sumo_power_level_enable(rdev, index: i, enable: false); |
1952 | } |
1953 | sumo_set_forced_mode(rdev, enable: false); |
1954 | sumo_set_forced_mode_enabled(rdev); |
1955 | sumo_set_forced_mode(rdev, enable: false); |
1956 | } else { |
1957 | for (i = 0; i < ps->num_levels; i++) { |
1958 | sumo_power_level_enable(rdev, index: i, enable: true); |
1959 | } |
1960 | if (pi->enable_boost) |
1961 | sumo_enable_boost(rdev, rps, enable: true); |
1962 | } |
1963 | |
1964 | rdev->pm.dpm.forced_level = level; |
1965 | |
1966 | return 0; |
1967 | } |
1968 | |