1 | /* |
2 | * Copyright 2018 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | * Authors: AMD |
23 | */ |
24 | #include <linux/string.h> |
25 | #include <linux/acpi.h> |
26 | |
27 | #include <drm/drm_probe_helper.h> |
28 | #include <drm/amdgpu_drm.h> |
29 | #include "dm_services.h" |
30 | #include "amdgpu.h" |
31 | #include "amdgpu_dm.h" |
32 | #include "amdgpu_dm_irq.h" |
33 | #include "amdgpu_pm.h" |
34 | #include "dm_pp_smu.h" |
35 | |
36 | bool dm_pp_apply_display_requirements( |
37 | const struct dc_context *ctx, |
38 | const struct dm_pp_display_configuration *pp_display_cfg) |
39 | { |
40 | struct amdgpu_device *adev = ctx->driver_context; |
41 | int i; |
42 | |
43 | if (adev->pm.dpm_enabled) { |
44 | |
45 | memset(&adev->pm.pm_display_cfg, 0, |
46 | sizeof(adev->pm.pm_display_cfg)); |
47 | |
48 | adev->pm.pm_display_cfg.cpu_cc6_disable = |
49 | pp_display_cfg->cpu_cc6_disable; |
50 | |
51 | adev->pm.pm_display_cfg.cpu_pstate_disable = |
52 | pp_display_cfg->cpu_pstate_disable; |
53 | |
54 | adev->pm.pm_display_cfg.cpu_pstate_separation_time = |
55 | pp_display_cfg->cpu_pstate_separation_time; |
56 | |
57 | adev->pm.pm_display_cfg.nb_pstate_switch_disable = |
58 | pp_display_cfg->nb_pstate_switch_disable; |
59 | |
60 | adev->pm.pm_display_cfg.num_display = |
61 | pp_display_cfg->display_count; |
62 | adev->pm.pm_display_cfg.num_path_including_non_display = |
63 | pp_display_cfg->display_count; |
64 | |
65 | adev->pm.pm_display_cfg.min_core_set_clock = |
66 | pp_display_cfg->min_engine_clock_khz/10; |
67 | adev->pm.pm_display_cfg.min_core_set_clock_in_sr = |
68 | pp_display_cfg->min_engine_clock_deep_sleep_khz/10; |
69 | adev->pm.pm_display_cfg.min_mem_set_clock = |
70 | pp_display_cfg->min_memory_clock_khz/10; |
71 | |
72 | adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk = |
73 | pp_display_cfg->min_engine_clock_deep_sleep_khz/10; |
74 | adev->pm.pm_display_cfg.min_dcef_set_clk = |
75 | pp_display_cfg->min_dcfclock_khz/10; |
76 | |
77 | adev->pm.pm_display_cfg.multi_monitor_in_sync = |
78 | pp_display_cfg->all_displays_in_sync; |
79 | adev->pm.pm_display_cfg.min_vblank_time = |
80 | pp_display_cfg->avail_mclk_switch_time_us; |
81 | |
82 | adev->pm.pm_display_cfg.display_clk = |
83 | pp_display_cfg->disp_clk_khz/10; |
84 | |
85 | adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency = |
86 | pp_display_cfg->avail_mclk_switch_time_in_disp_active_us; |
87 | |
88 | adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index; |
89 | adev->pm.pm_display_cfg.line_time_in_us = |
90 | pp_display_cfg->line_time_in_us; |
91 | |
92 | adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh; |
93 | adev->pm.pm_display_cfg.crossfire_display_index = -1; |
94 | adev->pm.pm_display_cfg.min_bus_bandwidth = 0; |
95 | |
96 | for (i = 0; i < pp_display_cfg->display_count; i++) { |
97 | const struct dm_pp_single_disp_config *dc_cfg = |
98 | &pp_display_cfg->disp_configs[i]; |
99 | adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1; |
100 | } |
101 | |
102 | amdgpu_dpm_display_configuration_change(adev, input: &adev->pm.pm_display_cfg); |
103 | |
104 | amdgpu_dpm_compute_clocks(adev); |
105 | } |
106 | |
107 | return true; |
108 | } |
109 | |
110 | static void get_default_clock_levels( |
111 | enum dm_pp_clock_type clk_type, |
112 | struct dm_pp_clock_levels *clks) |
113 | { |
114 | uint32_t disp_clks_in_khz[6] = { |
115 | 300000, 400000, 496560, 626090, 685720, 757900 }; |
116 | uint32_t sclks_in_khz[6] = { |
117 | 300000, 360000, 423530, 514290, 626090, 720000 }; |
118 | uint32_t mclks_in_khz[2] = { 333000, 800000 }; |
119 | |
120 | switch (clk_type) { |
121 | case DM_PP_CLOCK_TYPE_DISPLAY_CLK: |
122 | clks->num_levels = 6; |
123 | memmove(clks->clocks_in_khz, disp_clks_in_khz, |
124 | sizeof(disp_clks_in_khz)); |
125 | break; |
126 | case DM_PP_CLOCK_TYPE_ENGINE_CLK: |
127 | clks->num_levels = 6; |
128 | memmove(clks->clocks_in_khz, sclks_in_khz, |
129 | sizeof(sclks_in_khz)); |
130 | break; |
131 | case DM_PP_CLOCK_TYPE_MEMORY_CLK: |
132 | clks->num_levels = 2; |
133 | memmove(clks->clocks_in_khz, mclks_in_khz, |
134 | sizeof(mclks_in_khz)); |
135 | break; |
136 | default: |
137 | clks->num_levels = 0; |
138 | break; |
139 | } |
140 | } |
141 | |
142 | static enum amd_pp_clock_type dc_to_pp_clock_type( |
143 | enum dm_pp_clock_type dm_pp_clk_type) |
144 | { |
145 | enum amd_pp_clock_type amd_pp_clk_type = 0; |
146 | |
147 | switch (dm_pp_clk_type) { |
148 | case DM_PP_CLOCK_TYPE_DISPLAY_CLK: |
149 | amd_pp_clk_type = amd_pp_disp_clock; |
150 | break; |
151 | case DM_PP_CLOCK_TYPE_ENGINE_CLK: |
152 | amd_pp_clk_type = amd_pp_sys_clock; |
153 | break; |
154 | case DM_PP_CLOCK_TYPE_MEMORY_CLK: |
155 | amd_pp_clk_type = amd_pp_mem_clock; |
156 | break; |
157 | case DM_PP_CLOCK_TYPE_DCEFCLK: |
158 | amd_pp_clk_type = amd_pp_dcef_clock; |
159 | break; |
160 | case DM_PP_CLOCK_TYPE_DCFCLK: |
161 | amd_pp_clk_type = amd_pp_dcf_clock; |
162 | break; |
163 | case DM_PP_CLOCK_TYPE_PIXELCLK: |
164 | amd_pp_clk_type = amd_pp_pixel_clock; |
165 | break; |
166 | case DM_PP_CLOCK_TYPE_FCLK: |
167 | amd_pp_clk_type = amd_pp_f_clock; |
168 | break; |
169 | case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK: |
170 | amd_pp_clk_type = amd_pp_phy_clock; |
171 | break; |
172 | case DM_PP_CLOCK_TYPE_DPPCLK: |
173 | amd_pp_clk_type = amd_pp_dpp_clock; |
174 | break; |
175 | default: |
176 | DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n" , |
177 | dm_pp_clk_type); |
178 | break; |
179 | } |
180 | |
181 | return amd_pp_clk_type; |
182 | } |
183 | |
184 | static enum dm_pp_clocks_state pp_to_dc_powerlevel_state( |
185 | enum PP_DAL_POWERLEVEL max_clocks_state) |
186 | { |
187 | switch (max_clocks_state) { |
188 | case PP_DAL_POWERLEVEL_0: |
189 | return DM_PP_CLOCKS_DPM_STATE_LEVEL_0; |
190 | case PP_DAL_POWERLEVEL_1: |
191 | return DM_PP_CLOCKS_DPM_STATE_LEVEL_1; |
192 | case PP_DAL_POWERLEVEL_2: |
193 | return DM_PP_CLOCKS_DPM_STATE_LEVEL_2; |
194 | case PP_DAL_POWERLEVEL_3: |
195 | return DM_PP_CLOCKS_DPM_STATE_LEVEL_3; |
196 | case PP_DAL_POWERLEVEL_4: |
197 | return DM_PP_CLOCKS_DPM_STATE_LEVEL_4; |
198 | case PP_DAL_POWERLEVEL_5: |
199 | return DM_PP_CLOCKS_DPM_STATE_LEVEL_5; |
200 | case PP_DAL_POWERLEVEL_6: |
201 | return DM_PP_CLOCKS_DPM_STATE_LEVEL_6; |
202 | case PP_DAL_POWERLEVEL_7: |
203 | return DM_PP_CLOCKS_DPM_STATE_LEVEL_7; |
204 | default: |
205 | DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n" , |
206 | max_clocks_state); |
207 | return DM_PP_CLOCKS_STATE_INVALID; |
208 | } |
209 | } |
210 | |
211 | static void pp_to_dc_clock_levels( |
212 | const struct amd_pp_clocks *pp_clks, |
213 | struct dm_pp_clock_levels *dc_clks, |
214 | enum dm_pp_clock_type dc_clk_type) |
215 | { |
216 | uint32_t i; |
217 | |
218 | if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) { |
219 | DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n" , |
220 | DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), |
221 | pp_clks->count, |
222 | DM_PP_MAX_CLOCK_LEVELS); |
223 | |
224 | dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS; |
225 | } else |
226 | dc_clks->num_levels = pp_clks->count; |
227 | |
228 | DRM_INFO("DM_PPLIB: values for %s clock\n" , |
229 | DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); |
230 | |
231 | for (i = 0; i < dc_clks->num_levels; i++) { |
232 | DRM_INFO("DM_PPLIB:\t %d\n" , pp_clks->clock[i]); |
233 | dc_clks->clocks_in_khz[i] = pp_clks->clock[i]; |
234 | } |
235 | } |
236 | |
237 | static void pp_to_dc_clock_levels_with_latency( |
238 | const struct pp_clock_levels_with_latency *pp_clks, |
239 | struct dm_pp_clock_levels_with_latency *clk_level_info, |
240 | enum dm_pp_clock_type dc_clk_type) |
241 | { |
242 | uint32_t i; |
243 | |
244 | if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) { |
245 | DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n" , |
246 | DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), |
247 | pp_clks->num_levels, |
248 | DM_PP_MAX_CLOCK_LEVELS); |
249 | |
250 | clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS; |
251 | } else |
252 | clk_level_info->num_levels = pp_clks->num_levels; |
253 | |
254 | DRM_DEBUG("DM_PPLIB: values for %s clock\n" , |
255 | DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); |
256 | |
257 | for (i = 0; i < clk_level_info->num_levels; i++) { |
258 | DRM_DEBUG("DM_PPLIB:\t %d in kHz\n" , pp_clks->data[i].clocks_in_khz); |
259 | clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz; |
260 | clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us; |
261 | } |
262 | } |
263 | |
264 | static void pp_to_dc_clock_levels_with_voltage( |
265 | const struct pp_clock_levels_with_voltage *pp_clks, |
266 | struct dm_pp_clock_levels_with_voltage *clk_level_info, |
267 | enum dm_pp_clock_type dc_clk_type) |
268 | { |
269 | uint32_t i; |
270 | |
271 | if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) { |
272 | DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n" , |
273 | DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), |
274 | pp_clks->num_levels, |
275 | DM_PP_MAX_CLOCK_LEVELS); |
276 | |
277 | clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS; |
278 | } else |
279 | clk_level_info->num_levels = pp_clks->num_levels; |
280 | |
281 | DRM_INFO("DM_PPLIB: values for %s clock\n" , |
282 | DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); |
283 | |
284 | for (i = 0; i < clk_level_info->num_levels; i++) { |
285 | DRM_INFO("DM_PPLIB:\t %d in kHz, %d in mV\n" , pp_clks->data[i].clocks_in_khz, |
286 | pp_clks->data[i].voltage_in_mv); |
287 | clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz; |
288 | clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv; |
289 | } |
290 | } |
291 | |
292 | bool dm_pp_get_clock_levels_by_type( |
293 | const struct dc_context *ctx, |
294 | enum dm_pp_clock_type clk_type, |
295 | struct dm_pp_clock_levels *dc_clks) |
296 | { |
297 | struct amdgpu_device *adev = ctx->driver_context; |
298 | struct amd_pp_clocks pp_clks = { 0 }; |
299 | struct amd_pp_simple_clock_info validation_clks = { 0 }; |
300 | uint32_t i; |
301 | |
302 | if (amdgpu_dpm_get_clock_by_type(adev, |
303 | type: dc_to_pp_clock_type(dm_pp_clk_type: clk_type), clocks: &pp_clks)) { |
304 | /* Error in pplib. Provide default values. */ |
305 | get_default_clock_levels(clk_type, clks: dc_clks); |
306 | return true; |
307 | } |
308 | |
309 | pp_to_dc_clock_levels(pp_clks: &pp_clks, dc_clks, dc_clk_type: clk_type); |
310 | |
311 | if (amdgpu_dpm_get_display_mode_validation_clks(adev, clocks: &validation_clks)) { |
312 | /* Error in pplib. Provide default values. */ |
313 | DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n" ); |
314 | validation_clks.engine_max_clock = 72000; |
315 | validation_clks.memory_max_clock = 80000; |
316 | validation_clks.level = 0; |
317 | } |
318 | |
319 | DRM_INFO("DM_PPLIB: Validation clocks:\n" ); |
320 | DRM_INFO("DM_PPLIB: engine_max_clock: %d\n" , |
321 | validation_clks.engine_max_clock); |
322 | DRM_INFO("DM_PPLIB: memory_max_clock: %d\n" , |
323 | validation_clks.memory_max_clock); |
324 | DRM_INFO("DM_PPLIB: level : %d\n" , |
325 | validation_clks.level); |
326 | |
327 | /* Translate 10 kHz to kHz. */ |
328 | validation_clks.engine_max_clock *= 10; |
329 | validation_clks.memory_max_clock *= 10; |
330 | |
331 | /* Determine the highest non-boosted level from the Validation Clocks */ |
332 | if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) { |
333 | for (i = 0; i < dc_clks->num_levels; i++) { |
334 | if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) { |
335 | /* This clock is higher the validation clock. |
336 | * Than means the previous one is the highest |
337 | * non-boosted one. |
338 | */ |
339 | DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n" , |
340 | dc_clks->num_levels, i); |
341 | dc_clks->num_levels = i > 0 ? i : 1; |
342 | break; |
343 | } |
344 | } |
345 | } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) { |
346 | for (i = 0; i < dc_clks->num_levels; i++) { |
347 | if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) { |
348 | DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n" , |
349 | dc_clks->num_levels, i); |
350 | dc_clks->num_levels = i > 0 ? i : 1; |
351 | break; |
352 | } |
353 | } |
354 | } |
355 | |
356 | return true; |
357 | } |
358 | |
359 | bool dm_pp_get_clock_levels_by_type_with_latency( |
360 | const struct dc_context *ctx, |
361 | enum dm_pp_clock_type clk_type, |
362 | struct dm_pp_clock_levels_with_latency *clk_level_info) |
363 | { |
364 | struct amdgpu_device *adev = ctx->driver_context; |
365 | struct pp_clock_levels_with_latency pp_clks = { 0 }; |
366 | int ret; |
367 | |
368 | ret = amdgpu_dpm_get_clock_by_type_with_latency(adev, |
369 | type: dc_to_pp_clock_type(dm_pp_clk_type: clk_type), |
370 | clocks: &pp_clks); |
371 | if (ret) |
372 | return false; |
373 | |
374 | pp_to_dc_clock_levels_with_latency(pp_clks: &pp_clks, clk_level_info, dc_clk_type: clk_type); |
375 | |
376 | return true; |
377 | } |
378 | |
379 | bool dm_pp_get_clock_levels_by_type_with_voltage( |
380 | const struct dc_context *ctx, |
381 | enum dm_pp_clock_type clk_type, |
382 | struct dm_pp_clock_levels_with_voltage *clk_level_info) |
383 | { |
384 | struct amdgpu_device *adev = ctx->driver_context; |
385 | struct pp_clock_levels_with_voltage pp_clk_info = {0}; |
386 | int ret; |
387 | |
388 | ret = amdgpu_dpm_get_clock_by_type_with_voltage(adev, |
389 | type: dc_to_pp_clock_type(dm_pp_clk_type: clk_type), |
390 | clocks: &pp_clk_info); |
391 | if (ret) |
392 | return false; |
393 | |
394 | pp_to_dc_clock_levels_with_voltage(pp_clks: &pp_clk_info, clk_level_info, dc_clk_type: clk_type); |
395 | |
396 | return true; |
397 | } |
398 | |
399 | bool dm_pp_notify_wm_clock_changes( |
400 | const struct dc_context *ctx, |
401 | struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges) |
402 | { |
403 | struct amdgpu_device *adev = ctx->driver_context; |
404 | |
405 | /* |
406 | * Limit this watermark setting for Polaris for now |
407 | * TODO: expand this to other ASICs |
408 | */ |
409 | if ((adev->asic_type >= CHIP_POLARIS10) && |
410 | (adev->asic_type <= CHIP_VEGAM) && |
411 | !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, |
412 | clock_ranges: (void *)wm_with_clock_ranges)) |
413 | return true; |
414 | |
415 | return false; |
416 | } |
417 | |
418 | bool dm_pp_apply_power_level_change_request( |
419 | const struct dc_context *ctx, |
420 | struct dm_pp_power_level_change_request *level_change_req) |
421 | { |
422 | /* TODO: to be implemented */ |
423 | return false; |
424 | } |
425 | |
426 | bool dm_pp_apply_clock_for_voltage_request( |
427 | const struct dc_context *ctx, |
428 | struct dm_pp_clock_for_voltage_req *clock_for_voltage_req) |
429 | { |
430 | struct amdgpu_device *adev = ctx->driver_context; |
431 | struct pp_display_clock_request pp_clock_request = {0}; |
432 | int ret = 0; |
433 | |
434 | pp_clock_request.clock_type = dc_to_pp_clock_type(dm_pp_clk_type: clock_for_voltage_req->clk_type); |
435 | pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz; |
436 | |
437 | if (!pp_clock_request.clock_type) |
438 | return false; |
439 | |
440 | ret = amdgpu_dpm_display_clock_voltage_request(adev, clock: &pp_clock_request); |
441 | if (ret && (ret != -EOPNOTSUPP)) |
442 | return false; |
443 | |
444 | return true; |
445 | } |
446 | |
447 | bool dm_pp_get_static_clocks( |
448 | const struct dc_context *ctx, |
449 | struct dm_pp_static_clock_info *static_clk_info) |
450 | { |
451 | struct amdgpu_device *adev = ctx->driver_context; |
452 | struct amd_pp_clock_info pp_clk_info = {0}; |
453 | |
454 | if (amdgpu_dpm_get_current_clocks(adev, clocks: &pp_clk_info)) |
455 | return false; |
456 | |
457 | static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(max_clocks_state: pp_clk_info.max_clocks_state); |
458 | static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10; |
459 | static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10; |
460 | |
461 | return true; |
462 | } |
463 | |
464 | static void pp_rv_set_wm_ranges(struct pp_smu *pp, |
465 | struct pp_smu_wm_range_sets *ranges) |
466 | { |
467 | const struct dc_context *ctx = pp->dm; |
468 | struct amdgpu_device *adev = ctx->driver_context; |
469 | struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges; |
470 | struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges; |
471 | struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges; |
472 | int32_t i; |
473 | |
474 | wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets; |
475 | wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets; |
476 | |
477 | for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) { |
478 | if (ranges->reader_wm_sets[i].wm_inst > 3) |
479 | wm_dce_clocks[i].wm_set_id = WM_SET_A; |
480 | else |
481 | wm_dce_clocks[i].wm_set_id = |
482 | ranges->reader_wm_sets[i].wm_inst; |
483 | wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz = |
484 | ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000; |
485 | wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz = |
486 | ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000; |
487 | wm_dce_clocks[i].wm_max_mem_clk_in_khz = |
488 | ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000; |
489 | wm_dce_clocks[i].wm_min_mem_clk_in_khz = |
490 | ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000; |
491 | } |
492 | |
493 | for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) { |
494 | if (ranges->writer_wm_sets[i].wm_inst > 3) |
495 | wm_soc_clocks[i].wm_set_id = WM_SET_A; |
496 | else |
497 | wm_soc_clocks[i].wm_set_id = |
498 | ranges->writer_wm_sets[i].wm_inst; |
499 | wm_soc_clocks[i].wm_max_socclk_clk_in_khz = |
500 | ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000; |
501 | wm_soc_clocks[i].wm_min_socclk_clk_in_khz = |
502 | ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000; |
503 | wm_soc_clocks[i].wm_max_mem_clk_in_khz = |
504 | ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000; |
505 | wm_soc_clocks[i].wm_min_mem_clk_in_khz = |
506 | ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000; |
507 | } |
508 | |
509 | amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, |
510 | clock_ranges: &wm_with_clock_ranges); |
511 | } |
512 | |
513 | static void pp_rv_set_pme_wa_enable(struct pp_smu *pp) |
514 | { |
515 | const struct dc_context *ctx = pp->dm; |
516 | struct amdgpu_device *adev = ctx->driver_context; |
517 | |
518 | amdgpu_dpm_notify_smu_enable_pwe(adev); |
519 | } |
520 | |
521 | static void pp_rv_set_active_display_count(struct pp_smu *pp, int count) |
522 | { |
523 | const struct dc_context *ctx = pp->dm; |
524 | struct amdgpu_device *adev = ctx->driver_context; |
525 | |
526 | amdgpu_dpm_set_active_display_count(adev, count); |
527 | } |
528 | |
529 | static void pp_rv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int clock) |
530 | { |
531 | const struct dc_context *ctx = pp->dm; |
532 | struct amdgpu_device *adev = ctx->driver_context; |
533 | |
534 | amdgpu_dpm_set_min_deep_sleep_dcefclk(adev, clock); |
535 | } |
536 | |
537 | static void pp_rv_set_hard_min_dcefclk_by_freq(struct pp_smu *pp, int clock) |
538 | { |
539 | const struct dc_context *ctx = pp->dm; |
540 | struct amdgpu_device *adev = ctx->driver_context; |
541 | |
542 | amdgpu_dpm_set_hard_min_dcefclk_by_freq(adev, clock); |
543 | } |
544 | |
545 | static void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz) |
546 | { |
547 | const struct dc_context *ctx = pp->dm; |
548 | struct amdgpu_device *adev = ctx->driver_context; |
549 | |
550 | amdgpu_dpm_set_hard_min_fclk_by_freq(adev, clock: mhz); |
551 | } |
552 | |
553 | static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp, |
554 | struct pp_smu_wm_range_sets *ranges) |
555 | { |
556 | const struct dc_context *ctx = pp->dm; |
557 | struct amdgpu_device *adev = ctx->driver_context; |
558 | |
559 | amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, clock_ranges: ranges); |
560 | |
561 | return PP_SMU_RESULT_OK; |
562 | } |
563 | |
564 | static enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count) |
565 | { |
566 | const struct dc_context *ctx = pp->dm; |
567 | struct amdgpu_device *adev = ctx->driver_context; |
568 | int ret = 0; |
569 | |
570 | ret = amdgpu_dpm_set_active_display_count(adev, count); |
571 | if (ret == -EOPNOTSUPP) |
572 | return PP_SMU_RESULT_UNSUPPORTED; |
573 | else if (ret) |
574 | /* 0: successful or smu.ppt_funcs->set_display_count = NULL; 1: fail */ |
575 | return PP_SMU_RESULT_FAIL; |
576 | |
577 | return PP_SMU_RESULT_OK; |
578 | } |
579 | |
580 | static enum pp_smu_status |
581 | pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz) |
582 | { |
583 | const struct dc_context *ctx = pp->dm; |
584 | struct amdgpu_device *adev = ctx->driver_context; |
585 | int ret = 0; |
586 | |
587 | /* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */ |
588 | ret = amdgpu_dpm_set_min_deep_sleep_dcefclk(adev, clock: mhz); |
589 | if (ret == -EOPNOTSUPP) |
590 | return PP_SMU_RESULT_UNSUPPORTED; |
591 | else if (ret) |
592 | return PP_SMU_RESULT_FAIL; |
593 | |
594 | return PP_SMU_RESULT_OK; |
595 | } |
596 | |
597 | static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq( |
598 | struct pp_smu *pp, int mhz) |
599 | { |
600 | const struct dc_context *ctx = pp->dm; |
601 | struct amdgpu_device *adev = ctx->driver_context; |
602 | struct pp_display_clock_request clock_req; |
603 | int ret = 0; |
604 | |
605 | clock_req.clock_type = amd_pp_dcef_clock; |
606 | clock_req.clock_freq_in_khz = mhz * 1000; |
607 | |
608 | /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL |
609 | * 1: fail |
610 | */ |
611 | ret = amdgpu_dpm_display_clock_voltage_request(adev, clock: &clock_req); |
612 | if (ret == -EOPNOTSUPP) |
613 | return PP_SMU_RESULT_UNSUPPORTED; |
614 | else if (ret) |
615 | return PP_SMU_RESULT_FAIL; |
616 | |
617 | return PP_SMU_RESULT_OK; |
618 | } |
619 | |
620 | static enum pp_smu_status |
621 | pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz) |
622 | { |
623 | const struct dc_context *ctx = pp->dm; |
624 | struct amdgpu_device *adev = ctx->driver_context; |
625 | struct pp_display_clock_request clock_req; |
626 | int ret = 0; |
627 | |
628 | clock_req.clock_type = amd_pp_mem_clock; |
629 | clock_req.clock_freq_in_khz = mhz * 1000; |
630 | |
631 | /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL |
632 | * 1: fail |
633 | */ |
634 | ret = amdgpu_dpm_display_clock_voltage_request(adev, clock: &clock_req); |
635 | if (ret == -EOPNOTSUPP) |
636 | return PP_SMU_RESULT_UNSUPPORTED; |
637 | else if (ret) |
638 | return PP_SMU_RESULT_FAIL; |
639 | |
640 | return PP_SMU_RESULT_OK; |
641 | } |
642 | |
643 | static enum pp_smu_status pp_nv_set_pstate_handshake_support( |
644 | struct pp_smu *pp, bool pstate_handshake_supported) |
645 | { |
646 | const struct dc_context *ctx = pp->dm; |
647 | struct amdgpu_device *adev = ctx->driver_context; |
648 | |
649 | if (amdgpu_dpm_display_disable_memory_clock_switch(adev, |
650 | disable_memory_clock_switch: !pstate_handshake_supported)) |
651 | return PP_SMU_RESULT_FAIL; |
652 | |
653 | return PP_SMU_RESULT_OK; |
654 | } |
655 | |
656 | static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp, |
657 | enum pp_smu_nv_clock_id clock_id, int mhz) |
658 | { |
659 | const struct dc_context *ctx = pp->dm; |
660 | struct amdgpu_device *adev = ctx->driver_context; |
661 | struct pp_display_clock_request clock_req; |
662 | int ret = 0; |
663 | |
664 | switch (clock_id) { |
665 | case PP_SMU_NV_DISPCLK: |
666 | clock_req.clock_type = amd_pp_disp_clock; |
667 | break; |
668 | case PP_SMU_NV_PHYCLK: |
669 | clock_req.clock_type = amd_pp_phy_clock; |
670 | break; |
671 | case PP_SMU_NV_PIXELCLK: |
672 | clock_req.clock_type = amd_pp_pixel_clock; |
673 | break; |
674 | default: |
675 | break; |
676 | } |
677 | clock_req.clock_freq_in_khz = mhz * 1000; |
678 | |
679 | /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL |
680 | * 1: fail |
681 | */ |
682 | ret = amdgpu_dpm_display_clock_voltage_request(adev, clock: &clock_req); |
683 | if (ret == -EOPNOTSUPP) |
684 | return PP_SMU_RESULT_UNSUPPORTED; |
685 | else if (ret) |
686 | return PP_SMU_RESULT_FAIL; |
687 | |
688 | return PP_SMU_RESULT_OK; |
689 | } |
690 | |
691 | static enum pp_smu_status pp_nv_get_maximum_sustainable_clocks( |
692 | struct pp_smu *pp, struct pp_smu_nv_clock_table *max_clocks) |
693 | { |
694 | const struct dc_context *ctx = pp->dm; |
695 | struct amdgpu_device *adev = ctx->driver_context; |
696 | int ret = 0; |
697 | |
698 | ret = amdgpu_dpm_get_max_sustainable_clocks_by_dc(adev, |
699 | max_clocks); |
700 | if (ret == -EOPNOTSUPP) |
701 | return PP_SMU_RESULT_UNSUPPORTED; |
702 | else if (ret) |
703 | return PP_SMU_RESULT_FAIL; |
704 | |
705 | return PP_SMU_RESULT_OK; |
706 | } |
707 | |
708 | static enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp, |
709 | unsigned int *clock_values_in_khz, unsigned int *num_states) |
710 | { |
711 | const struct dc_context *ctx = pp->dm; |
712 | struct amdgpu_device *adev = ctx->driver_context; |
713 | int ret = 0; |
714 | |
715 | ret = amdgpu_dpm_get_uclk_dpm_states(adev, |
716 | clock_values_in_khz, |
717 | num_states); |
718 | if (ret == -EOPNOTSUPP) |
719 | return PP_SMU_RESULT_UNSUPPORTED; |
720 | else if (ret) |
721 | return PP_SMU_RESULT_FAIL; |
722 | |
723 | return PP_SMU_RESULT_OK; |
724 | } |
725 | |
726 | static enum pp_smu_status pp_rn_get_dpm_clock_table( |
727 | struct pp_smu *pp, struct dpm_clocks *clock_table) |
728 | { |
729 | const struct dc_context *ctx = pp->dm; |
730 | struct amdgpu_device *adev = ctx->driver_context; |
731 | int ret = 0; |
732 | |
733 | ret = amdgpu_dpm_get_dpm_clock_table(adev, clock_table); |
734 | if (ret == -EOPNOTSUPP) |
735 | return PP_SMU_RESULT_UNSUPPORTED; |
736 | else if (ret) |
737 | return PP_SMU_RESULT_FAIL; |
738 | |
739 | return PP_SMU_RESULT_OK; |
740 | } |
741 | |
742 | static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp, |
743 | struct pp_smu_wm_range_sets *ranges) |
744 | { |
745 | const struct dc_context *ctx = pp->dm; |
746 | struct amdgpu_device *adev = ctx->driver_context; |
747 | |
748 | amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, clock_ranges: ranges); |
749 | |
750 | return PP_SMU_RESULT_OK; |
751 | } |
752 | |
753 | void dm_pp_get_funcs( |
754 | struct dc_context *ctx, |
755 | struct pp_smu_funcs *funcs) |
756 | { |
757 | switch (ctx->dce_version) { |
758 | case DCN_VERSION_1_0: |
759 | case DCN_VERSION_1_01: |
760 | funcs->ctx.ver = PP_SMU_VER_RV; |
761 | funcs->rv_funcs.pp_smu.dm = ctx; |
762 | funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges; |
763 | funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable; |
764 | funcs->rv_funcs.set_display_count = |
765 | pp_rv_set_active_display_count; |
766 | funcs->rv_funcs.set_min_deep_sleep_dcfclk = |
767 | pp_rv_set_min_deep_sleep_dcfclk; |
768 | funcs->rv_funcs.set_hard_min_dcfclk_by_freq = |
769 | pp_rv_set_hard_min_dcefclk_by_freq; |
770 | funcs->rv_funcs.set_hard_min_fclk_by_freq = |
771 | pp_rv_set_hard_min_fclk_by_freq; |
772 | break; |
773 | case DCN_VERSION_2_0: |
774 | funcs->ctx.ver = PP_SMU_VER_NV; |
775 | funcs->nv_funcs.pp_smu.dm = ctx; |
776 | funcs->nv_funcs.set_display_count = pp_nv_set_display_count; |
777 | funcs->nv_funcs.set_hard_min_dcfclk_by_freq = |
778 | pp_nv_set_hard_min_dcefclk_by_freq; |
779 | funcs->nv_funcs.set_min_deep_sleep_dcfclk = |
780 | pp_nv_set_min_deep_sleep_dcfclk; |
781 | funcs->nv_funcs.set_voltage_by_freq = |
782 | pp_nv_set_voltage_by_freq; |
783 | funcs->nv_funcs.set_wm_ranges = pp_nv_set_wm_ranges; |
784 | |
785 | /* todo set_pme_wa_enable cause 4k@6ohz display not light up */ |
786 | funcs->nv_funcs.set_pme_wa_enable = NULL; |
787 | /* todo debug waring message */ |
788 | funcs->nv_funcs.set_hard_min_uclk_by_freq = pp_nv_set_hard_min_uclk_by_freq; |
789 | /* todo compare data with window driver*/ |
790 | funcs->nv_funcs.get_maximum_sustainable_clocks = pp_nv_get_maximum_sustainable_clocks; |
791 | /*todo compare data with window driver */ |
792 | funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states; |
793 | funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support; |
794 | break; |
795 | |
796 | case DCN_VERSION_2_1: |
797 | funcs->ctx.ver = PP_SMU_VER_RN; |
798 | funcs->rn_funcs.pp_smu.dm = ctx; |
799 | funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges; |
800 | funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table; |
801 | break; |
802 | default: |
803 | DRM_ERROR("smu version is not supported !\n" ); |
804 | break; |
805 | } |
806 | } |
807 | |