1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright 2023 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: AMD
24 *
25 */
26
27#include "display_mode_core.h"
28#include "dml2_internal_types.h"
29#include "dml2_utils.h"
30#include "dml2_policy.h"
31#include "dml2_translation_helper.h"
32#include "dml2_mall_phantom.h"
33#include "dml2_dc_resource_mgmt.h"
34
35
36static void initialize_dml2_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out)
37{
38 if (dml2->config.use_native_soc_bb_construction)
39 dml2_init_ip_params(dml2, in_dc, out);
40 else
41 dml2_translate_ip_params(in_dc, out);
42}
43
44static void initialize_dml2_soc_bbox(struct dml2_context *dml2, const struct dc *in_dc, struct soc_bounding_box_st *out)
45{
46 if (dml2->config.use_native_soc_bb_construction)
47 dml2_init_socbb_params(dml2, in_dc, out);
48 else
49 dml2_translate_socbb_params(in_dc, out);
50}
51
52static void initialize_dml2_soc_states(struct dml2_context *dml2,
53 const struct dc *in_dc, const struct soc_bounding_box_st *in_bbox, struct soc_states_st *out)
54{
55 if (dml2->config.use_native_soc_bb_construction)
56 dml2_init_soc_states(dml2, in_dc, in_bbox, out);
57 else
58 dml2_translate_soc_states(in_dc, out, num_states: in_dc->dml.soc.num_states);
59}
60
61static void map_hw_resources(struct dml2_context *dml2,
62 struct dml_display_cfg_st *in_out_display_cfg, struct dml_mode_support_info_st *mode_support_info)
63{
64 unsigned int num_pipes = 0;
65 int i, j;
66
67 for (i = 0; i < __DML_NUM_PLANES__; i++) {
68 in_out_display_cfg->hw.ODMMode[i] = mode_support_info->ODMMode[i];
69 in_out_display_cfg->hw.DPPPerSurface[i] = mode_support_info->DPPPerSurface[i];
70 in_out_display_cfg->hw.DSCEnabled[i] = mode_support_info->DSCEnabled[i];
71 in_out_display_cfg->hw.NumberOfDSCSlices[i] = mode_support_info->NumberOfDSCSlices[i];
72 in_out_display_cfg->hw.DLGRefClkFreqMHz = 24;
73 if (dml2->v20.dml_core_ctx.project != dml_project_dcn35 &&
74 dml2->v20.dml_core_ctx.project != dml_project_dcn351) {
75 /*dGPU default as 50Mhz*/
76 in_out_display_cfg->hw.DLGRefClkFreqMHz = 50;
77 }
78 for (j = 0; j < mode_support_info->DPPPerSurface[i]; j++) {
79 if (i >= __DML2_WRAPPER_MAX_STREAMS_PLANES__) {
80 dml_print("DML::%s: Index out of bounds: i=%d, __DML2_WRAPPER_MAX_STREAMS_PLANES__=%d\n",
81 __func__, i, __DML2_WRAPPER_MAX_STREAMS_PLANES__);
82 break;
83 }
84 dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i];
85 dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[num_pipes] = true;
86 dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[i];
87 dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id_valid[num_pipes] = true;
88 num_pipes++;
89 }
90 }
91}
92
93static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2,
94 const struct dml_display_cfg_st *display_cfg,
95 struct dml_mode_support_info_st *evaluation_info)
96{
97 struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
98
99 s->mode_support_params.mode_lib = &dml2->v20.dml_core_ctx;
100 s->mode_support_params.in_display_cfg = display_cfg;
101 s->mode_support_params.out_evaluation_info = evaluation_info;
102
103 memset(evaluation_info, 0, sizeof(struct dml_mode_support_info_st));
104 s->mode_support_params.out_lowest_state_idx = 0;
105
106 return dml_mode_support_ex(in_out_params: &s->mode_support_params);
107}
108
109static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrapper_optimize_configuration_params *p)
110{
111 int unused_dpps = p->ip_params->max_num_dpp;
112 int i, j;
113 int odms_needed, refresh_rate_hz, dpps_needed, subvp_height, pstate_width_fw_delay_lines, surface_count;
114 int subvp_timing_to_add, new_timing_index, subvp_surface_to_add, new_surface_index;
115 float frame_time_sec, max_frame_time_sec;
116 int largest_blend_and_timing = 0;
117 bool optimization_done = false;
118
119 for (i = 0; i < (int) p->cur_display_config->num_timings; i++) {
120 if (p->cur_display_config->plane.BlendingAndTiming[i] > largest_blend_and_timing)
121 largest_blend_and_timing = p->cur_display_config->plane.BlendingAndTiming[i];
122 }
123
124 if (p->new_policy != p->cur_policy)
125 *p->new_policy = *p->cur_policy;
126
127 if (p->new_display_config != p->cur_display_config)
128 *p->new_display_config = *p->cur_display_config;
129
130 // Optimize P-State Support
131 if (dml2->config.use_native_pstate_optimization) {
132 if (p->cur_mode_support_info->DRAMClockChangeSupport[0] == dml_dram_clock_change_unsupported) {
133 // Find a display with < 120Hz refresh rate with maximal refresh rate that's not already subvp
134 subvp_timing_to_add = -1;
135 subvp_surface_to_add = -1;
136 max_frame_time_sec = 0;
137 surface_count = 0;
138 for (i = 0; i < (int) p->cur_display_config->num_timings; i++) {
139 refresh_rate_hz = (int)div_u64(dividend: (unsigned long long) p->cur_display_config->timing.PixelClock[i] * 1000 * 1000,
140 divisor: (p->cur_display_config->timing.HTotal[i] * p->cur_display_config->timing.VTotal[i]));
141 if (refresh_rate_hz < 120) {
142 // Check its upstream surfaces to see if this one could be converted to subvp.
143 dpps_needed = 0;
144 for (j = 0; j < (int) p->cur_display_config->num_surfaces; j++) {
145 if (p->cur_display_config->plane.BlendingAndTiming[j] == i &&
146 p->cur_display_config->plane.UseMALLForPStateChange[j] == dml_use_mall_pstate_change_disable) {
147 dpps_needed += p->cur_mode_support_info->DPPPerSurface[j];
148 subvp_surface_to_add = j;
149 surface_count++;
150 }
151 }
152
153 if (surface_count == 1 && dpps_needed > 0 && dpps_needed <= unused_dpps) {
154 frame_time_sec = (float)1 / refresh_rate_hz;
155 if (frame_time_sec > max_frame_time_sec) {
156 max_frame_time_sec = frame_time_sec;
157 subvp_timing_to_add = i;
158 }
159 }
160 }
161 }
162 if (subvp_timing_to_add >= 0) {
163 new_timing_index = p->new_display_config->num_timings++;
164 new_surface_index = p->new_display_config->num_surfaces++;
165 // Add a phantom pipe reflecting the main pipe's timing
166 dml2_util_copy_dml_timing(dml_timing_array: &p->new_display_config->timing, dst_index: new_timing_index, src_index: subvp_timing_to_add);
167
168 pstate_width_fw_delay_lines = (int)(((double)(p->config->svp_pstate.subvp_fw_processing_delay_us +
169 p->config->svp_pstate.subvp_pstate_allow_width_us) / 1000000) *
170 (p->new_display_config->timing.PixelClock[subvp_timing_to_add] * 1000 * 1000) /
171 (double)p->new_display_config->timing.HTotal[subvp_timing_to_add]);
172
173 subvp_height = p->cur_mode_support_info->SubViewportLinesNeededInMALL[subvp_timing_to_add] + pstate_width_fw_delay_lines;
174
175 p->new_display_config->timing.VActive[new_timing_index] = subvp_height;
176 p->new_display_config->timing.VTotal[new_timing_index] = subvp_height +
177 p->new_display_config->timing.VTotal[subvp_timing_to_add] - p->new_display_config->timing.VActive[subvp_timing_to_add];
178
179 p->new_display_config->output.OutputDisabled[new_timing_index] = true;
180
181 p->new_display_config->plane.UseMALLForPStateChange[subvp_surface_to_add] = dml_use_mall_pstate_change_sub_viewport;
182
183 dml2_util_copy_dml_plane(dml_plane_array: &p->new_display_config->plane, dst_index: new_surface_index, src_index: subvp_surface_to_add);
184 dml2_util_copy_dml_surface(dml_surface_array: &p->new_display_config->surface, dst_index: new_surface_index, src_index: subvp_surface_to_add);
185
186 p->new_display_config->plane.ViewportHeight[new_surface_index] = subvp_height;
187 p->new_display_config->plane.ViewportHeightChroma[new_surface_index] = subvp_height;
188 p->new_display_config->plane.ViewportStationary[new_surface_index] = false;
189
190 p->new_display_config->plane.UseMALLForStaticScreen[new_surface_index] = dml_use_mall_static_screen_disable;
191 p->new_display_config->plane.UseMALLForPStateChange[new_surface_index] = dml_use_mall_pstate_change_phantom_pipe;
192
193 p->new_display_config->plane.NumberOfCursors[new_surface_index] = 0;
194
195 p->new_policy->ImmediateFlipRequirement[new_surface_index] = dml_immediate_flip_not_required;
196
197 p->new_display_config->plane.BlendingAndTiming[new_surface_index] = new_timing_index;
198
199 optimization_done = true;
200 }
201 }
202 }
203
204 // Optimize Clocks
205 if (!optimization_done) {
206 if (largest_blend_and_timing == 0 && p->cur_policy->ODMUse[0] == dml_odm_use_policy_combine_as_needed && dml2->config.minimize_dispclk_using_odm) {
207 odms_needed = dml2_util_get_maximum_odm_combine_for_output(force_odm_4to1: dml2->config.optimize_odm_4to1,
208 encoder: p->cur_display_config->output.OutputEncoder[0], dsc_enabled: p->cur_mode_support_info->DSCEnabled[0]) - 1;
209
210 if (odms_needed <= unused_dpps) {
211 unused_dpps -= odms_needed;
212
213 if (odms_needed == 1) {
214 p->new_policy->ODMUse[0] = dml_odm_use_policy_combine_2to1;
215 optimization_done = true;
216 } else if (odms_needed == 3) {
217 p->new_policy->ODMUse[0] = dml_odm_use_policy_combine_4to1;
218 optimization_done = true;
219 } else
220 optimization_done = false;
221 }
222 }
223 }
224
225 return optimization_done;
226}
227
228static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state)
229{
230 struct dml2_calculate_lowest_supported_state_for_temp_read_scratch *s = &dml2->v20.scratch.dml2_calculate_lowest_supported_state_for_temp_read_scratch;
231 struct dml2_wrapper_scratch *s_global = &dml2->v20.scratch;
232
233 unsigned int dml_result = 0;
234 int result = -1, i, j;
235
236 build_unoptimized_policy_settings(project: dml2->v20.dml_core_ctx.project, policy: &dml2->v20.dml_core_ctx.policy);
237
238 /* Zero out before each call before proceeding */
239 memset(s, 0, sizeof(struct dml2_calculate_lowest_supported_state_for_temp_read_scratch));
240 memset(&s_global->mode_support_params, 0, sizeof(struct dml_mode_support_ex_params_st));
241 memset(&s_global->dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
242
243 for (i = 0; i < dml2->config.dcn_pipe_count; i++) {
244 /* Calling resource_build_scaling_params will populate the pipe params
245 * with the necessary information needed for correct DML calculations
246 * This is also done in DML1 driver code path and hence display_state
247 * cannot be const.
248 */
249 struct pipe_ctx *pipe = &display_state->res_ctx.pipe_ctx[i];
250
251 if (pipe->plane_state) {
252 if (!dml2->config.callbacks.build_scaling_params(pipe)) {
253 ASSERT(false);
254 return false;
255 }
256 }
257 }
258
259 map_dc_state_into_dml_display_cfg(dml2, context: display_state, dml_dispcfg: &s->cur_display_config);
260
261 for (i = 0; i < dml2->v20.dml_core_ctx.states.num_states; i++) {
262 s->uclk_change_latencies[i] = dml2->v20.dml_core_ctx.states.state_array[i].dram_clock_change_latency_us;
263 }
264
265 for (i = 0; i < 4; i++) {
266 for (j = 0; j < dml2->v20.dml_core_ctx.states.num_states; j++) {
267 dml2->v20.dml_core_ctx.states.state_array[j].dram_clock_change_latency_us = s_global->dummy_pstate_table[i].dummy_pstate_latency_us;
268 }
269
270 dml_result = pack_and_call_dml_mode_support_ex(dml2, display_cfg: &s->cur_display_config, evaluation_info: &s->evaluation_info);
271
272 if (dml_result && s->evaluation_info.DRAMClockChangeSupport[0] == dml_dram_clock_change_vactive) {
273 map_hw_resources(dml2, in_out_display_cfg: &s->cur_display_config, mode_support_info: &s->evaluation_info);
274 dml_result = dml_mode_programming(mode_lib: &dml2->v20.dml_core_ctx, state_idx: s_global->mode_support_params.out_lowest_state_idx, display_cfg: &s->cur_display_config, call_standalone: true);
275
276 ASSERT(dml_result);
277
278 dml2_extract_watermark_set(watermark: &dml2->v20.g6_temp_read_watermark_set, dml_core_ctx: &dml2->v20.dml_core_ctx);
279 dml2->v20.g6_temp_read_watermark_set.cstate_pstate.fclk_pstate_change_ns = dml2->v20.g6_temp_read_watermark_set.cstate_pstate.pstate_change_ns;
280
281 result = s_global->mode_support_params.out_lowest_state_idx;
282
283 while (dml2->v20.dml_core_ctx.states.state_array[result].dram_speed_mts < s_global->dummy_pstate_table[i].dram_speed_mts)
284 result++;
285
286 break;
287 }
288 }
289
290 for (i = 0; i < dml2->v20.dml_core_ctx.states.num_states; i++) {
291 dml2->v20.dml_core_ctx.states.state_array[i].dram_clock_change_latency_us = s->uclk_change_latencies[i];
292 }
293
294 return result;
295}
296
297static void copy_dummy_pstate_table(struct dummy_pstate_entry *dest, struct dummy_pstate_entry *src, unsigned int num_entries)
298{
299 for (int i = 0; i < num_entries; i++) {
300 dest[i] = src[i];
301 }
302}
303
304static bool are_timings_requiring_odm_doing_blending(const struct dml_display_cfg_st *display_cfg,
305 const struct dml_mode_support_info_st *evaluation_info)
306{
307 unsigned int planes_per_timing[__DML_NUM_PLANES__] = {0};
308 int i;
309
310 for (i = 0; i < display_cfg->num_surfaces; i++)
311 planes_per_timing[display_cfg->plane.BlendingAndTiming[i]]++;
312
313 for (i = 0; i < __DML_NUM_PLANES__; i++) {
314 if (planes_per_timing[i] > 1 && evaluation_info->ODMMode[i] != dml_odm_mode_bypass)
315 return true;
316 }
317
318 return false;
319}
320
321static bool does_configuration_meet_sw_policies(struct dml2_context *ctx, const struct dml_display_cfg_st *display_cfg,
322 const struct dml_mode_support_info_st *evaluation_info)
323{
324 bool pass = true;
325
326 if (!ctx->config.enable_windowed_mpo_odm) {
327 if (are_timings_requiring_odm_doing_blending(display_cfg, evaluation_info))
328 pass = false;
329 }
330
331 return pass;
332}
333
334static bool dml_mode_support_wrapper(struct dml2_context *dml2,
335 struct dc_state *display_state)
336{
337 struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
338 unsigned int result = 0, i;
339 unsigned int optimized_result = true;
340
341 build_unoptimized_policy_settings(project: dml2->v20.dml_core_ctx.project, policy: &dml2->v20.dml_core_ctx.policy);
342
343 /* Zero out before each call before proceeding */
344 memset(&s->cur_display_config, 0, sizeof(struct dml_display_cfg_st));
345 memset(&s->mode_support_params, 0, sizeof(struct dml_mode_support_ex_params_st));
346 memset(&s->dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
347 memset(&s->optimize_configuration_params, 0, sizeof(struct dml2_wrapper_optimize_configuration_params));
348
349 for (i = 0; i < dml2->config.dcn_pipe_count; i++) {
350 /* Calling resource_build_scaling_params will populate the pipe params
351 * with the necessary information needed for correct DML calculations
352 * This is also done in DML1 driver code path and hence display_state
353 * cannot be const.
354 */
355 struct pipe_ctx *pipe = &display_state->res_ctx.pipe_ctx[i];
356
357 if (pipe->plane_state) {
358 if (!dml2->config.callbacks.build_scaling_params(pipe)) {
359 ASSERT(false);
360 return false;
361 }
362 }
363 }
364
365 map_dc_state_into_dml_display_cfg(dml2, context: display_state, dml_dispcfg: &s->cur_display_config);
366 if (!dml2->config.skip_hw_state_mapping)
367 dml2_apply_det_buffer_allocation_policy(in_ctx: dml2, dml_dispcfg: &s->cur_display_config);
368
369 result = pack_and_call_dml_mode_support_ex(dml2,
370 display_cfg: &s->cur_display_config,
371 evaluation_info: &s->mode_support_info);
372
373 if (result)
374 result = does_configuration_meet_sw_policies(ctx: dml2, display_cfg: &s->cur_display_config, evaluation_info: &s->mode_support_info);
375
376 // Try to optimize
377 if (result) {
378 s->cur_policy = dml2->v20.dml_core_ctx.policy;
379 s->optimize_configuration_params.dml_core_ctx = &dml2->v20.dml_core_ctx;
380 s->optimize_configuration_params.config = &dml2->config;
381 s->optimize_configuration_params.ip_params = &dml2->v20.dml_core_ctx.ip;
382 s->optimize_configuration_params.cur_display_config = &s->cur_display_config;
383 s->optimize_configuration_params.cur_mode_support_info = &s->mode_support_info;
384 s->optimize_configuration_params.cur_policy = &s->cur_policy;
385 s->optimize_configuration_params.new_display_config = &s->new_display_config;
386 s->optimize_configuration_params.new_policy = &s->new_policy;
387
388 while (optimized_result && optimize_configuration(dml2, p: &s->optimize_configuration_params)) {
389 dml2->v20.dml_core_ctx.policy = s->new_policy;
390 optimized_result = pack_and_call_dml_mode_support_ex(dml2,
391 display_cfg: &s->new_display_config,
392 evaluation_info: &s->mode_support_info);
393
394 if (optimized_result)
395 optimized_result = does_configuration_meet_sw_policies(ctx: dml2, display_cfg: &s->new_display_config, evaluation_info: &s->mode_support_info);
396
397 // If the new optimized state is supposed, then set current = new
398 if (optimized_result) {
399 s->cur_display_config = s->new_display_config;
400 s->cur_policy = s->new_policy;
401 } else {
402 // Else, restore policy to current
403 dml2->v20.dml_core_ctx.policy = s->cur_policy;
404 }
405 }
406
407 // Optimize ended with a failed config, so we need to restore DML state to last passing
408 if (!optimized_result) {
409 result = pack_and_call_dml_mode_support_ex(dml2,
410 display_cfg: &s->cur_display_config,
411 evaluation_info: &s->mode_support_info);
412 }
413 }
414
415 if (result)
416 map_hw_resources(dml2, in_out_display_cfg: &s->cur_display_config, mode_support_info: &s->mode_support_info);
417
418 return result;
419}
420
421static int find_drr_eligible_stream(struct dc_state *display_state)
422{
423 int i;
424
425 for (i = 0; i < display_state->stream_count; i++) {
426 if (dc_state_get_stream_subvp_type(state: display_state, stream: display_state->streams[i]) == SUBVP_NONE
427 && display_state->streams[i]->ignore_msa_timing_param) {
428 // Use ignore_msa_timing_param flag to identify as DRR
429 return i;
430 }
431 }
432
433 return -1;
434}
435
436static bool optimize_pstate_with_svp_and_drr(struct dml2_context *dml2, struct dc_state *display_state)
437{
438 struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
439 bool pstate_optimization_done = false;
440 bool pstate_optimization_success = false;
441 bool result = false;
442 int drr_display_index = 0, non_svp_streams = 0;
443 bool force_svp = dml2->config.svp_pstate.force_enable_subvp;
444 bool advanced_pstate_switching = false;
445
446 display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
447 display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false;
448
449 result = dml_mode_support_wrapper(dml2, display_state);
450
451 if (!result) {
452 pstate_optimization_done = true;
453 } else if (!advanced_pstate_switching ||
454 (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported && !force_svp)) {
455 pstate_optimization_success = true;
456 pstate_optimization_done = true;
457 }
458
459 if (display_state->stream_count == 1 && dml2->config.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch(dml2->config.callbacks.dc, display_state)) {
460 display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = true;
461
462 result = dml_mode_support_wrapper(dml2, display_state);
463 } else {
464 non_svp_streams = display_state->stream_count;
465
466 while (!pstate_optimization_done) {
467 result = dml_mode_programming(mode_lib: &dml2->v20.dml_core_ctx, state_idx: s->mode_support_params.out_lowest_state_idx, display_cfg: &s->cur_display_config, call_standalone: true);
468
469 // Always try adding SVP first
470 if (result)
471 result = dml2_svp_add_phantom_pipe_to_dc_state(ctx: dml2, state: display_state, mode_support_info: &s->mode_support_info);
472 else
473 pstate_optimization_done = true;
474
475
476 if (result) {
477 result = dml_mode_support_wrapper(dml2, display_state);
478 } else {
479 pstate_optimization_done = true;
480 }
481
482 if (result) {
483 non_svp_streams--;
484
485 if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) {
486 if (dml2_svp_validate_static_schedulability(ctx: dml2, context: display_state, pstate_change_type: s->mode_support_info.DRAMClockChangeSupport[0])) {
487 pstate_optimization_success = true;
488 pstate_optimization_done = true;
489 } else {
490 pstate_optimization_success = false;
491 pstate_optimization_done = false;
492 }
493 } else {
494 drr_display_index = find_drr_eligible_stream(display_state);
495
496 // If there is only 1 remaining non SubVP pipe that is DRR, check static
497 // schedulability for SubVP + DRR.
498 if (non_svp_streams == 1 && drr_display_index >= 0) {
499 if (dml2_svp_drr_schedulable(ctx: dml2, context: display_state, drr_timing: &display_state->streams[drr_display_index]->timing)) {
500 display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = true;
501 display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index = drr_display_index;
502 result = dml_mode_support_wrapper(dml2, display_state);
503 }
504
505 if (result && s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) {
506 pstate_optimization_success = true;
507 pstate_optimization_done = true;
508 } else {
509 pstate_optimization_success = false;
510 pstate_optimization_done = false;
511 }
512 }
513
514 if (pstate_optimization_success) {
515 pstate_optimization_done = true;
516 } else {
517 pstate_optimization_done = false;
518 }
519 }
520 }
521 }
522 }
523
524 if (!pstate_optimization_success) {
525 dml2_svp_remove_all_phantom_pipes(ctx: dml2, state: display_state);
526 display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
527 display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false;
528 result = dml_mode_support_wrapper(dml2, display_state);
529 }
530
531 return result;
532}
533
534static bool call_dml_mode_support_and_programming(struct dc_state *context)
535{
536 unsigned int result = 0;
537 unsigned int min_state;
538 int min_state_for_g6_temp_read = 0;
539 struct dml2_context *dml2 = context->bw_ctx.dml2;
540 struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
541
542 min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, display_state: context);
543
544 ASSERT(min_state_for_g6_temp_read >= 0);
545
546 if (!dml2->config.use_native_pstate_optimization) {
547 result = optimize_pstate_with_svp_and_drr(dml2, display_state: context);
548 } else {
549 result = dml_mode_support_wrapper(dml2, display_state: context);
550 }
551
552 /* Upon trying to sett certain frequencies in FRL, min_state_for_g6_temp_read is reported as -1. This leads to an invalid value of min_state causing crashes later on.
553 * Use the default logic for min_state only when min_state_for_g6_temp_read is a valid value. In other cases, use the value calculated by the DML directly.
554 */
555 if (min_state_for_g6_temp_read >= 0)
556 min_state = min_state_for_g6_temp_read > s->mode_support_params.out_lowest_state_idx ? min_state_for_g6_temp_read : s->mode_support_params.out_lowest_state_idx;
557 else
558 min_state = s->mode_support_params.out_lowest_state_idx;
559
560 if (result)
561 result = dml_mode_programming(mode_lib: &dml2->v20.dml_core_ctx, state_idx: min_state, display_cfg: &s->cur_display_config, call_standalone: true);
562
563 return result;
564}
565
566static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context)
567{
568 struct dml2_context *dml2 = context->bw_ctx.dml2;
569 struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
570 struct dml2_dcn_clocks out_clks;
571 unsigned int result = 0;
572 bool need_recalculation = false;
573
574 if (!context || context->stream_count == 0)
575 return true;
576
577 /* Zero out before each call before proceeding */
578 memset(&dml2->v20.scratch, 0, sizeof(struct dml2_wrapper_scratch));
579 memset(&dml2->v20.dml_core_ctx.policy, 0, sizeof(struct dml_mode_eval_policy_st));
580 memset(&dml2->v20.dml_core_ctx.ms, 0, sizeof(struct mode_support_st));
581 memset(&dml2->v20.dml_core_ctx.mp, 0, sizeof(struct mode_program_st));
582
583 /* Initialize DET scratch */
584 dml2_initialize_det_scratch(in_ctx: dml2);
585
586 copy_dummy_pstate_table(dest: s->dummy_pstate_table, src: in_dc->clk_mgr->bw_params->dummy_pstate_table, num_entries: 4);
587
588 result = call_dml_mode_support_and_programming(context);
589 /* Call map dc pipes to map the pipes based on the DML output. For correctly determining if recalculation
590 * is required or not, the resource context needs to correctly reflect the number of active pipes. We would
591 * only know the correct number if active pipes after dml2_map_dc_pipes is called.
592 */
593 if (result && !dml2->config.skip_hw_state_mapping)
594 dml2_map_dc_pipes(ctx: dml2, state: context, disp_cfg: &s->cur_display_config, mapping: &s->dml_to_dc_pipe_mapping, existing_state: in_dc->current_state);
595
596 /* Verify and update DET Buffer configuration if needed. dml2_verify_det_buffer_configuration will check if DET Buffer
597 * size needs to be updated. If yes it will update the DETOverride variable and set need_recalculation flag to true.
598 * Based on that flag, run mode support again. Verification needs to be run after dml_mode_programming because the getters
599 * return correct det buffer values only after dml_mode_programming is called.
600 */
601 if (result && !dml2->config.skip_hw_state_mapping) {
602 need_recalculation = dml2_verify_det_buffer_configuration(in_ctx: dml2, display_state: context, det_scratch: &dml2->det_helper_scratch);
603 if (need_recalculation) {
604 /* Engage the DML again if recalculation is required. */
605 call_dml_mode_support_and_programming(context);
606 if (!dml2->config.skip_hw_state_mapping) {
607 dml2_map_dc_pipes(ctx: dml2, state: context, disp_cfg: &s->cur_display_config, mapping: &s->dml_to_dc_pipe_mapping, existing_state: in_dc->current_state);
608 }
609 need_recalculation = dml2_verify_det_buffer_configuration(in_ctx: dml2, display_state: context, det_scratch: &dml2->det_helper_scratch);
610 ASSERT(need_recalculation == false);
611 }
612 }
613
614 if (result) {
615 unsigned int lowest_state_idx = s->mode_support_params.out_lowest_state_idx;
616 out_clks.dispclk_khz = (unsigned int)dml2->v20.dml_core_ctx.mp.Dispclk_calculated * 1000;
617 out_clks.p_state_supported = s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported;
618 if (in_dc->config.use_default_clock_table &&
619 (lowest_state_idx < dml2->v20.dml_core_ctx.states.num_states - 1)) {
620 lowest_state_idx = dml2->v20.dml_core_ctx.states.num_states - 1;
621 out_clks.dispclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dispclk_mhz * 1000;
622 }
623
624 out_clks.dcfclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dcfclk_mhz * 1000;
625 out_clks.fclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].fabricclk_mhz * 1000;
626 out_clks.uclk_mts = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dram_speed_mts;
627 out_clks.phyclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].phyclk_mhz * 1000;
628 out_clks.socclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].socclk_mhz * 1000;
629 out_clks.ref_dtbclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dtbclk_mhz * 1000;
630 context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc: in_dc, context);
631
632 if (!dml2->config.skip_hw_state_mapping) {
633 /* Call dml2_calculate_rq_and_dlg_params */
634 dml2_calculate_rq_and_dlg_params(dc: in_dc, context, out_new_hw_state: &context->res_ctx, in_ctx: dml2, pipe_cnt: in_dc->res_pool->pipe_count);
635 }
636
637 dml2_copy_clocks_to_dc_state(out_clks: &out_clks, context);
638 dml2_extract_watermark_set(watermark: &context->bw_ctx.bw.dcn.watermarks.a, dml_core_ctx: &dml2->v20.dml_core_ctx);
639 dml2_extract_watermark_set(watermark: &context->bw_ctx.bw.dcn.watermarks.b, dml_core_ctx: &dml2->v20.dml_core_ctx);
640 memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c));
641 dml2_extract_watermark_set(watermark: &context->bw_ctx.bw.dcn.watermarks.d, dml_core_ctx: &dml2->v20.dml_core_ctx);
642 //copy for deciding zstate use
643 context->bw_ctx.dml.vba.StutterPeriod = context->bw_ctx.dml2->v20.dml_core_ctx.mp.StutterPeriod;
644 }
645
646 return result;
647}
648
649static bool dml2_validate_only(struct dc_state *context)
650{
651 struct dml2_context *dml2 = context->bw_ctx.dml2;
652 unsigned int result = 0;
653
654 if (!context || context->stream_count == 0)
655 return true;
656
657 /* Zero out before each call before proceeding */
658 memset(&dml2->v20.scratch, 0, sizeof(struct dml2_wrapper_scratch));
659 memset(&dml2->v20.dml_core_ctx.policy, 0, sizeof(struct dml_mode_eval_policy_st));
660 memset(&dml2->v20.dml_core_ctx.ms, 0, sizeof(struct mode_support_st));
661 memset(&dml2->v20.dml_core_ctx.mp, 0, sizeof(struct mode_program_st));
662
663 build_unoptimized_policy_settings(project: dml2->v20.dml_core_ctx.project, policy: &dml2->v20.dml_core_ctx.policy);
664
665 map_dc_state_into_dml_display_cfg(dml2, context, dml_dispcfg: &dml2->v20.scratch.cur_display_config);
666
667 result = pack_and_call_dml_mode_support_ex(dml2,
668 display_cfg: &dml2->v20.scratch.cur_display_config,
669 evaluation_info: &dml2->v20.scratch.mode_support_info);
670
671 if (result)
672 result = does_configuration_meet_sw_policies(ctx: dml2, display_cfg: &dml2->v20.scratch.cur_display_config, evaluation_info: &dml2->v20.scratch.mode_support_info);
673
674 return (result == 1) ? true : false;
675}
676
677static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *dml2)
678{
679 if (dc->debug.override_odm_optimization) {
680 dml2->config.minimize_dispclk_using_odm = dc->debug.minimize_dispclk_using_odm;
681 }
682}
683
684bool dml2_validate(const struct dc *in_dc, struct dc_state *context, bool fast_validate)
685{
686 bool out = false;
687
688 if (!(context->bw_ctx.dml2))
689 return false;
690 dml2_apply_debug_options(dc: in_dc, dml2: context->bw_ctx.dml2);
691
692
693 /* Use dml_validate_only for fast_validate path */
694 if (fast_validate)
695 out = dml2_validate_only(context);
696 else
697 out = dml2_validate_and_build_resource(in_dc, context);
698 return out;
699}
700
701static inline struct dml2_context *dml2_allocate_memory(void)
702{
703 return (struct dml2_context *) kzalloc(size: sizeof(struct dml2_context), GFP_KERNEL);
704}
705
706static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
707{
708
709 // Store config options
710 (*dml2)->config = *config;
711
712 switch (in_dc->ctx->dce_version) {
713 case DCN_VERSION_3_5:
714 (*dml2)->v20.dml_core_ctx.project = dml_project_dcn35;
715 break;
716 case DCN_VERSION_3_51:
717 (*dml2)->v20.dml_core_ctx.project = dml_project_dcn351;
718 break;
719 case DCN_VERSION_3_2:
720 (*dml2)->v20.dml_core_ctx.project = dml_project_dcn32;
721 break;
722 case DCN_VERSION_3_21:
723 (*dml2)->v20.dml_core_ctx.project = dml_project_dcn321;
724 break;
725 default:
726 (*dml2)->v20.dml_core_ctx.project = dml_project_default;
727 break;
728 }
729
730 initialize_dml2_ip_params(dml2: *dml2, in_dc, out: &(*dml2)->v20.dml_core_ctx.ip);
731
732 initialize_dml2_soc_bbox(dml2: *dml2, in_dc, out: &(*dml2)->v20.dml_core_ctx.soc);
733
734 initialize_dml2_soc_states(dml2: *dml2, in_dc, in_bbox: &(*dml2)->v20.dml_core_ctx.soc, out: &(*dml2)->v20.dml_core_ctx.states);
735}
736
737bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
738{
739 // Allocate Mode Lib Ctx
740 *dml2 = dml2_allocate_memory();
741
742 if (!(*dml2))
743 return false;
744
745 dml2_init(in_dc, config, dml2);
746
747 return true;
748}
749
750void dml2_destroy(struct dml2_context *dml2)
751{
752 if (!dml2)
753 return;
754
755 kfree(objp: dml2);
756}
757
758void dml2_extract_dram_and_fclk_change_support(struct dml2_context *dml2,
759 unsigned int *fclk_change_support, unsigned int *dram_clk_change_support)
760{
761 *fclk_change_support = (unsigned int) dml2->v20.dml_core_ctx.ms.support.FCLKChangeSupport[0];
762 *dram_clk_change_support = (unsigned int) dml2->v20.dml_core_ctx.ms.support.DRAMClockChangeSupport[0];
763}
764
765void dml2_copy(struct dml2_context *dst_dml2,
766 struct dml2_context *src_dml2)
767{
768 /* copy Mode Lib Ctx */
769 memcpy(dst_dml2, src_dml2, sizeof(struct dml2_context));
770}
771
772bool dml2_create_copy(struct dml2_context **dst_dml2,
773 struct dml2_context *src_dml2)
774{
775 /* Allocate Mode Lib Ctx */
776 *dst_dml2 = dml2_allocate_memory();
777
778 if (!(*dst_dml2))
779 return false;
780
781 /* copy Mode Lib Ctx */
782 dml2_copy(dst_dml2: *dst_dml2, src_dml2);
783
784 return true;
785}
786
787void dml2_reinit(const struct dc *in_dc,
788 const struct dml2_configuration_options *config,
789 struct dml2_context **dml2)
790{
791
792 dml2_init(in_dc, config, dml2);
793}
794

source code of linux/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c