1 | // SPDX-License-Identifier: MIT |
2 | /* |
3 | * Copyright 2022 Advanced Micro Devices, Inc. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: |
11 | * |
12 | * The above copyright notice and this permission notice shall be included in |
13 | * all copies or substantial portions of the Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | * |
23 | * Authors: AMD |
24 | * |
25 | */ |
26 | #include "dcn32_fpu.h" |
27 | #include "dcn32/dcn32_resource.h" |
28 | #include "dcn20/dcn20_resource.h" |
29 | #include "display_mode_vba_util_32.h" |
30 | #include "dml/dcn32/display_mode_vba_32.h" |
31 | // We need this includes for WATERMARKS_* defines |
32 | #include "clk_mgr/dcn32/dcn32_smu13_driver_if.h" |
33 | #include "dcn30/dcn30_resource.h" |
34 | #include "link.h" |
35 | #include "dc_state_priv.h" |
36 | |
37 | #define DC_LOGGER_INIT(logger) |
38 | |
39 | static const struct subvp_high_refresh_list subvp_high_refresh_list = { |
40 | .min_refresh = 120, |
41 | .max_refresh = 175, |
42 | .res = { |
43 | {.width = 3840, .height = 2160, }, |
44 | {.width = 3440, .height = 1440, }, |
45 | {.width = 2560, .height = 1440, }, |
46 | {.width = 1920, .height = 1080, }}, |
47 | }; |
48 | |
49 | static const struct subvp_active_margin_list subvp_active_margin_list = { |
50 | .min_refresh = 55, |
51 | .max_refresh = 65, |
52 | .res = { |
53 | {.width = 2560, .height = 1440, }, |
54 | {.width = 1920, .height = 1080, }}, |
55 | }; |
56 | |
57 | struct _vcs_dpi_ip_params_st dcn3_2_ip = { |
58 | .gpuvm_enable = 0, |
59 | .gpuvm_max_page_table_levels = 4, |
60 | .hostvm_enable = 0, |
61 | .rob_buffer_size_kbytes = 128, |
62 | .det_buffer_size_kbytes = DCN3_2_DEFAULT_DET_SIZE, |
63 | .config_return_buffer_size_in_kbytes = 1280, |
64 | .compressed_buffer_segment_size_in_kbytes = 64, |
65 | .meta_fifo_size_in_kentries = 22, |
66 | .zero_size_buffer_entries = 512, |
67 | .compbuf_reserved_space_64b = 256, |
68 | .compbuf_reserved_space_zs = 64, |
69 | .dpp_output_buffer_pixels = 2560, |
70 | .opp_output_buffer_lines = 1, |
71 | .pixel_chunk_size_kbytes = 8, |
72 | .alpha_pixel_chunk_size_kbytes = 4, |
73 | .min_pixel_chunk_size_bytes = 1024, |
74 | .dcc_meta_buffer_size_bytes = 6272, |
75 | .meta_chunk_size_kbytes = 2, |
76 | .min_meta_chunk_size_bytes = 256, |
77 | .writeback_chunk_size_kbytes = 8, |
78 | .ptoi_supported = false, |
79 | .num_dsc = 4, |
80 | .maximum_dsc_bits_per_component = 12, |
81 | .maximum_pixels_per_line_per_dsc_unit = 6016, |
82 | .dsc422_native_support = true, |
83 | .is_line_buffer_bpp_fixed = true, |
84 | .line_buffer_fixed_bpp = 57, |
85 | .line_buffer_size_bits = 1171920, |
86 | .max_line_buffer_lines = 32, |
87 | .writeback_interface_buffer_size_kbytes = 90, |
88 | .max_num_dpp = 4, |
89 | .max_num_otg = 4, |
90 | .max_num_hdmi_frl_outputs = 1, |
91 | .max_num_wb = 1, |
92 | .max_dchub_pscl_bw_pix_per_clk = 4, |
93 | .max_pscl_lb_bw_pix_per_clk = 2, |
94 | .max_lb_vscl_bw_pix_per_clk = 4, |
95 | .max_vscl_hscl_bw_pix_per_clk = 4, |
96 | .max_hscl_ratio = 6, |
97 | .max_vscl_ratio = 6, |
98 | .max_hscl_taps = 8, |
99 | .max_vscl_taps = 8, |
100 | .dpte_buffer_size_in_pte_reqs_luma = 64, |
101 | .dpte_buffer_size_in_pte_reqs_chroma = 34, |
102 | .dispclk_ramp_margin_percent = 1, |
103 | .max_inter_dcn_tile_repeaters = 8, |
104 | .cursor_buffer_size = 16, |
105 | .cursor_chunk_size = 2, |
106 | .writeback_line_buffer_buffer_size = 0, |
107 | .writeback_min_hscl_ratio = 1, |
108 | .writeback_min_vscl_ratio = 1, |
109 | .writeback_max_hscl_ratio = 1, |
110 | .writeback_max_vscl_ratio = 1, |
111 | .writeback_max_hscl_taps = 1, |
112 | .writeback_max_vscl_taps = 1, |
113 | .dppclk_delay_subtotal = 47, |
114 | .dppclk_delay_scl = 50, |
115 | .dppclk_delay_scl_lb_only = 16, |
116 | .dppclk_delay_cnvc_formatter = 28, |
117 | .dppclk_delay_cnvc_cursor = 6, |
118 | .dispclk_delay_subtotal = 125, |
119 | .dynamic_metadata_vm_enabled = false, |
120 | .odm_combine_4to1_supported = false, |
121 | .dcc_supported = true, |
122 | .max_num_dp2p0_outputs = 2, |
123 | .max_num_dp2p0_streams = 4, |
124 | }; |
125 | |
126 | struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = { |
127 | .clock_limits = { |
128 | { |
129 | .state = 0, |
130 | .dcfclk_mhz = 1564.0, |
131 | .fabricclk_mhz = 2500.0, |
132 | .dispclk_mhz = 2150.0, |
133 | .dppclk_mhz = 2150.0, |
134 | .phyclk_mhz = 810.0, |
135 | .phyclk_d18_mhz = 667.0, |
136 | .phyclk_d32_mhz = 625.0, |
137 | .socclk_mhz = 1200.0, |
138 | .dscclk_mhz = 716.667, |
139 | .dram_speed_mts = 18000.0, |
140 | .dtbclk_mhz = 1564.0, |
141 | }, |
142 | }, |
143 | .num_states = 1, |
144 | .sr_exit_time_us = 42.97, |
145 | .sr_enter_plus_exit_time_us = 49.94, |
146 | .sr_exit_z8_time_us = 285.0, |
147 | .sr_enter_plus_exit_z8_time_us = 320, |
148 | .writeback_latency_us = 12.0, |
149 | .round_trip_ping_latency_dcfclk_cycles = 263, |
150 | .urgent_latency_pixel_data_only_us = 4.0, |
151 | .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, |
152 | .urgent_latency_vm_data_only_us = 4.0, |
153 | .fclk_change_latency_us = 25, |
154 | .usr_retraining_latency_us = 2, |
155 | .smn_latency_us = 2, |
156 | .mall_allocated_for_dcn_mbytes = 64, |
157 | .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, |
158 | .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, |
159 | .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, |
160 | .pct_ideal_sdp_bw_after_urgent = 90.0, |
161 | .pct_ideal_fabric_bw_after_urgent = 67.0, |
162 | .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0, |
163 | .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented |
164 | .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0, // N/A, for now keep as is until DML implemented |
165 | .pct_ideal_dram_bw_after_urgent_strobe = 67.0, |
166 | .max_avg_sdp_bw_use_normal_percent = 80.0, |
167 | .max_avg_fabric_bw_use_normal_percent = 60.0, |
168 | .max_avg_dram_bw_use_normal_strobe_percent = 50.0, |
169 | .max_avg_dram_bw_use_normal_percent = 15.0, |
170 | .num_chans = 24, |
171 | .dram_channel_width_bytes = 2, |
172 | .fabric_datapath_to_dcn_data_return_bytes = 64, |
173 | .return_bus_width_bytes = 64, |
174 | .downspread_percent = 0.38, |
175 | .dcn_downspread_percent = 0.5, |
176 | .dram_clock_change_latency_us = 400, |
177 | .dispclk_dppclk_vco_speed_mhz = 4300.0, |
178 | .do_urgent_latency_adjustment = true, |
179 | .urgent_latency_adjustment_fabric_clock_component_us = 1.0, |
180 | .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000, |
181 | }; |
182 | |
183 | void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr) |
184 | { |
185 | /* defaults */ |
186 | double pstate_latency_us = clk_mgr->base.ctx->dc->dml.soc.dram_clock_change_latency_us; |
187 | double fclk_change_latency_us = clk_mgr->base.ctx->dc->dml.soc.fclk_change_latency_us; |
188 | double sr_exit_time_us = clk_mgr->base.ctx->dc->dml.soc.sr_exit_time_us; |
189 | double sr_enter_plus_exit_time_us = clk_mgr->base.ctx->dc->dml.soc.sr_enter_plus_exit_time_us; |
190 | /* For min clocks use as reported by PM FW and report those as min */ |
191 | uint16_t min_uclk_mhz = clk_mgr->base.bw_params->clk_table.entries[0].memclk_mhz; |
192 | uint16_t min_dcfclk_mhz = clk_mgr->base.bw_params->clk_table.entries[0].dcfclk_mhz; |
193 | uint16_t setb_min_uclk_mhz = min_uclk_mhz; |
194 | uint16_t dcfclk_mhz_for_the_second_state = clk_mgr->base.ctx->dc->dml.soc.clock_limits[2].dcfclk_mhz; |
195 | |
196 | dc_assert_fp_enabled(); |
197 | |
198 | /* For Set B ranges use min clocks state 2 when available, and report those to PM FW */ |
199 | if (dcfclk_mhz_for_the_second_state) |
200 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = dcfclk_mhz_for_the_second_state; |
201 | else |
202 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = clk_mgr->base.bw_params->clk_table.entries[0].dcfclk_mhz; |
203 | |
204 | if (clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz) |
205 | setb_min_uclk_mhz = clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz; |
206 | |
207 | /* Set A - Normal - default values */ |
208 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].valid = true; |
209 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us = pstate_latency_us; |
210 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us = fclk_change_latency_us; |
211 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us = sr_exit_time_us; |
212 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us; |
213 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE; |
214 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; |
215 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF; |
216 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz; |
217 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF; |
218 | |
219 | /* Set B - Performance - higher clocks, using DPM[2] DCFCLK and UCLK */ |
220 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].valid = true; |
221 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us = pstate_latency_us; |
222 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.fclk_change_latency_us = fclk_change_latency_us; |
223 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us = sr_exit_time_us; |
224 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us; |
225 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE; |
226 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_dcfclk = 0xFFFF; |
227 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_uclk = setb_min_uclk_mhz; |
228 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_uclk = 0xFFFF; |
229 | |
230 | /* Set C - Dummy P-State - P-State latency set to "dummy p-state" value */ |
231 | /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */ |
232 | if (clk_mgr->base.ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) { |
233 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].valid = true; |
234 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 50; |
235 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us = fclk_change_latency_us; |
236 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us; |
237 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us; |
238 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE; |
239 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; |
240 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_dcfclk = 0xFFFF; |
241 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz; |
242 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF; |
243 | clk_mgr->base.bw_params->dummy_pstate_table[0].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[0].memclk_mhz * 16; |
244 | clk_mgr->base.bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 50; |
245 | clk_mgr->base.bw_params->dummy_pstate_table[1].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[1].memclk_mhz * 16; |
246 | clk_mgr->base.bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9; |
247 | clk_mgr->base.bw_params->dummy_pstate_table[2].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz * 16; |
248 | clk_mgr->base.bw_params->dummy_pstate_table[2].dummy_pstate_latency_us = 8; |
249 | clk_mgr->base.bw_params->dummy_pstate_table[3].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[3].memclk_mhz * 16; |
250 | clk_mgr->base.bw_params->dummy_pstate_table[3].dummy_pstate_latency_us = 5; |
251 | } |
252 | /* Set D - MALL - SR enter and exit time specific to MALL, TBD after bringup or later phase for now use DRAM values / 2 */ |
253 | /* For MALL DRAM clock change latency is N/A, for watermak calculations use lowest value dummy P state latency */ |
254 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].valid = true; |
255 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = clk_mgr->base.bw_params->dummy_pstate_table[3].dummy_pstate_latency_us; |
256 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.fclk_change_latency_us = fclk_change_latency_us; |
257 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = sr_exit_time_us / 2; // TBD |
258 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us / 2; // TBD |
259 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL; |
260 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; |
261 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF; |
262 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz; |
263 | clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF; |
264 | } |
265 | |
266 | /* |
267 | * Finds dummy_latency_index when MCLK switching using firmware based |
268 | * vblank stretch is enabled. This function will iterate through the |
269 | * table of dummy pstate latencies until the lowest value that allows |
270 | * dm_allow_self_refresh_and_mclk_switch to happen is found |
271 | */ |
272 | int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc, |
273 | struct dc_state *context, |
274 | display_e2e_pipe_params_st *pipes, |
275 | int pipe_cnt, |
276 | int vlevel) |
277 | { |
278 | const int max_latency_table_entries = 4; |
279 | struct vba_vars_st *vba = &context->bw_ctx.dml.vba; |
280 | int dummy_latency_index = 0; |
281 | enum clock_change_support temp_clock_change_support = vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; |
282 | |
283 | dc_assert_fp_enabled(); |
284 | |
285 | while (dummy_latency_index < max_latency_table_entries) { |
286 | if (temp_clock_change_support != dm_dram_clock_change_unsupported) |
287 | vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support; |
288 | context->bw_ctx.dml.soc.dram_clock_change_latency_us = |
289 | dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; |
290 | dcn32_internal_validate_bw(dc, context, pipes, pipe_cnt_out: &pipe_cnt, vlevel_out: &vlevel, fast_validate: false); |
291 | |
292 | /* for subvp + DRR case, if subvp pipes are still present we support pstate */ |
293 | if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported && |
294 | dcn32_subvp_in_use(dc, context)) |
295 | vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support; |
296 | |
297 | if (vlevel < context->bw_ctx.dml.vba.soc.num_states && |
298 | vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported) |
299 | break; |
300 | |
301 | dummy_latency_index++; |
302 | } |
303 | |
304 | if (dummy_latency_index == max_latency_table_entries) { |
305 | ASSERT(dummy_latency_index != max_latency_table_entries); |
306 | /* If the execution gets here, it means dummy p_states are |
307 | * not possible. This should never happen and would mean |
308 | * something is severely wrong. |
309 | * Here we reset dummy_latency_index to 3, because it is |
310 | * better to have underflows than system crashes. |
311 | */ |
312 | dummy_latency_index = max_latency_table_entries - 1; |
313 | } |
314 | |
315 | return dummy_latency_index; |
316 | } |
317 | |
318 | /** |
319 | * dcn32_helper_populate_phantom_dlg_params - Get DLG params for phantom pipes |
320 | * and populate pipe_ctx with those params. |
321 | * @dc: [in] current dc state |
322 | * @context: [in] new dc state |
323 | * @pipes: [in] DML pipe params array |
324 | * @pipe_cnt: [in] DML pipe count |
325 | * |
326 | * This function must be called AFTER the phantom pipes are added to context |
327 | * and run through DML (so that the DLG params for the phantom pipes can be |
328 | * populated), and BEFORE we program the timing for the phantom pipes. |
329 | */ |
330 | void dcn32_helper_populate_phantom_dlg_params(struct dc *dc, |
331 | struct dc_state *context, |
332 | display_e2e_pipe_params_st *pipes, |
333 | int pipe_cnt) |
334 | { |
335 | uint32_t i, pipe_idx; |
336 | |
337 | dc_assert_fp_enabled(); |
338 | |
339 | for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { |
340 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
341 | |
342 | if (!pipe->stream) |
343 | continue; |
344 | |
345 | if (pipe->plane_state && dc_state_get_pipe_subvp_type(state: context, pipe_ctx: pipe) == SUBVP_PHANTOM) { |
346 | pipes[pipe_idx].pipe.dest.vstartup_start = |
347 | get_vstartup(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt, which_pipe: pipe_idx); |
348 | pipes[pipe_idx].pipe.dest.vupdate_offset = |
349 | get_vupdate_offset(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt, which_pipe: pipe_idx); |
350 | pipes[pipe_idx].pipe.dest.vupdate_width = |
351 | get_vupdate_width(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt, which_pipe: pipe_idx); |
352 | pipes[pipe_idx].pipe.dest.vready_offset = |
353 | get_vready_offset(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt, which_pipe: pipe_idx); |
354 | pipe->pipe_dlg_param = pipes[pipe_idx].pipe.dest; |
355 | } |
356 | pipe_idx++; |
357 | } |
358 | } |
359 | |
360 | static float calculate_net_bw_in_kbytes_sec(struct _vcs_dpi_voltage_scaling_st *entry) |
361 | { |
362 | float memory_bw_kbytes_sec; |
363 | float fabric_bw_kbytes_sec; |
364 | float sdp_bw_kbytes_sec; |
365 | float limiting_bw_kbytes_sec; |
366 | |
367 | memory_bw_kbytes_sec = entry->dram_speed_mts * |
368 | dcn3_2_soc.num_chans * |
369 | dcn3_2_soc.dram_channel_width_bytes * |
370 | ((float)dcn3_2_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100); |
371 | |
372 | fabric_bw_kbytes_sec = entry->fabricclk_mhz * |
373 | dcn3_2_soc.return_bus_width_bytes * |
374 | ((float)dcn3_2_soc.pct_ideal_fabric_bw_after_urgent / 100); |
375 | |
376 | sdp_bw_kbytes_sec = entry->dcfclk_mhz * |
377 | dcn3_2_soc.return_bus_width_bytes * |
378 | ((float)dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / 100); |
379 | |
380 | limiting_bw_kbytes_sec = memory_bw_kbytes_sec; |
381 | |
382 | if (fabric_bw_kbytes_sec < limiting_bw_kbytes_sec) |
383 | limiting_bw_kbytes_sec = fabric_bw_kbytes_sec; |
384 | |
385 | if (sdp_bw_kbytes_sec < limiting_bw_kbytes_sec) |
386 | limiting_bw_kbytes_sec = sdp_bw_kbytes_sec; |
387 | |
388 | return limiting_bw_kbytes_sec; |
389 | } |
390 | |
391 | static void get_optimal_ntuple(struct _vcs_dpi_voltage_scaling_st *entry) |
392 | { |
393 | if (entry->dcfclk_mhz > 0) { |
394 | float bw_on_sdp = entry->dcfclk_mhz * dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / 100); |
395 | |
396 | entry->fabricclk_mhz = bw_on_sdp / (dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_fabric_bw_after_urgent / 100)); |
397 | entry->dram_speed_mts = bw_on_sdp / (dcn3_2_soc.num_chans * |
398 | dcn3_2_soc.dram_channel_width_bytes * ((float)dcn3_2_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100)); |
399 | } else if (entry->fabricclk_mhz > 0) { |
400 | float bw_on_fabric = entry->fabricclk_mhz * dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_fabric_bw_after_urgent / 100); |
401 | |
402 | entry->dcfclk_mhz = bw_on_fabric / (dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / 100)); |
403 | entry->dram_speed_mts = bw_on_fabric / (dcn3_2_soc.num_chans * |
404 | dcn3_2_soc.dram_channel_width_bytes * ((float)dcn3_2_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100)); |
405 | } else if (entry->dram_speed_mts > 0) { |
406 | float bw_on_dram = entry->dram_speed_mts * dcn3_2_soc.num_chans * |
407 | dcn3_2_soc.dram_channel_width_bytes * ((float)dcn3_2_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100); |
408 | |
409 | entry->fabricclk_mhz = bw_on_dram / (dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_fabric_bw_after_urgent / 100)); |
410 | entry->dcfclk_mhz = bw_on_dram / (dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / 100)); |
411 | } |
412 | } |
413 | |
414 | static void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table, |
415 | unsigned int *num_entries, |
416 | struct _vcs_dpi_voltage_scaling_st *entry) |
417 | { |
418 | int i = 0; |
419 | int index = 0; |
420 | |
421 | dc_assert_fp_enabled(); |
422 | |
423 | if (*num_entries == 0) { |
424 | table[0] = *entry; |
425 | (*num_entries)++; |
426 | } else { |
427 | while (entry->net_bw_in_kbytes_sec > table[index].net_bw_in_kbytes_sec) { |
428 | index++; |
429 | if (index >= *num_entries) |
430 | break; |
431 | } |
432 | |
433 | for (i = *num_entries; i > index; i--) |
434 | table[i] = table[i - 1]; |
435 | |
436 | table[index] = *entry; |
437 | (*num_entries)++; |
438 | } |
439 | } |
440 | |
441 | /** |
442 | * dcn32_set_phantom_stream_timing - Set timing params for the phantom stream |
443 | * @dc: current dc state |
444 | * @context: new dc state |
445 | * @ref_pipe: Main pipe for the phantom stream |
446 | * @phantom_stream: target phantom stream state |
447 | * @pipes: DML pipe params |
448 | * @pipe_cnt: number of DML pipes |
449 | * @dc_pipe_idx: DC pipe index for the main pipe (i.e. ref_pipe) |
450 | * |
451 | * Set timing params of the phantom stream based on calculated output from DML. |
452 | * This function first gets the DML pipe index using the DC pipe index, then |
453 | * calls into DML (get_subviewport_lines_needed_in_mall) to get the number of |
454 | * lines required for SubVP MCLK switching and assigns to the phantom stream |
455 | * accordingly. |
456 | * |
457 | * - The number of SubVP lines calculated in DML does not take into account |
458 | * FW processing delays and required pstate allow width, so we must include |
459 | * that separately. |
460 | * |
461 | * - Set phantom backporch = vstartup of main pipe |
462 | */ |
463 | void dcn32_set_phantom_stream_timing(struct dc *dc, |
464 | struct dc_state *context, |
465 | struct pipe_ctx *ref_pipe, |
466 | struct dc_stream_state *phantom_stream, |
467 | display_e2e_pipe_params_st *pipes, |
468 | unsigned int pipe_cnt, |
469 | unsigned int dc_pipe_idx) |
470 | { |
471 | unsigned int i, pipe_idx; |
472 | struct pipe_ctx *pipe; |
473 | uint32_t phantom_vactive, phantom_bp, pstate_width_fw_delay_lines; |
474 | unsigned int num_dpp; |
475 | unsigned int vlevel = context->bw_ctx.dml.vba.VoltageLevel; |
476 | unsigned int dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; |
477 | unsigned int socclk = context->bw_ctx.dml.vba.SOCCLKPerState[vlevel]; |
478 | struct vba_vars_st *vba = &context->bw_ctx.dml.vba; |
479 | struct dc_stream_state *main_stream = ref_pipe->stream; |
480 | |
481 | dc_assert_fp_enabled(); |
482 | |
483 | // Find DML pipe index (pipe_idx) using dc_pipe_idx |
484 | for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { |
485 | pipe = &context->res_ctx.pipe_ctx[i]; |
486 | |
487 | if (!pipe->stream) |
488 | continue; |
489 | |
490 | if (i == dc_pipe_idx) |
491 | break; |
492 | |
493 | pipe_idx++; |
494 | } |
495 | |
496 | // Calculate lines required for pstate allow width and FW processing delays |
497 | pstate_width_fw_delay_lines = ((double)(dc->caps.subvp_fw_processing_delay_us + |
498 | dc->caps.subvp_pstate_allow_width_us) / 1000000) * |
499 | (ref_pipe->stream->timing.pix_clk_100hz * 100) / |
500 | (double)ref_pipe->stream->timing.h_total; |
501 | |
502 | // Update clks_cfg for calling into recalculate |
503 | pipes[0].clks_cfg.voltage = vlevel; |
504 | pipes[0].clks_cfg.dcfclk_mhz = dcfclk; |
505 | pipes[0].clks_cfg.socclk_mhz = socclk; |
506 | |
507 | // DML calculation for MALL region doesn't take into account FW delay |
508 | // and required pstate allow width for multi-display cases |
509 | /* Add 16 lines margin to the MALL REGION because SUB_VP_START_LINE must be aligned |
510 | * to 2 swaths (i.e. 16 lines) |
511 | */ |
512 | phantom_vactive = get_subviewport_lines_needed_in_mall(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt, which_pipe: pipe_idx) + |
513 | pstate_width_fw_delay_lines + dc->caps.subvp_swath_height_margin_lines; |
514 | |
515 | // W/A for DCC corruption with certain high resolution timings. |
516 | // Determing if pipesplit is used. If so, add meta_row_height to the phantom vactive. |
517 | num_dpp = vba->NoOfDPP[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]]; |
518 | phantom_vactive += num_dpp > 1 ? vba->meta_row_height[vba->pipe_plane[pipe_idx]] : 0; |
519 | |
520 | /* dc->debug.subvp_extra_lines 0 by default*/ |
521 | phantom_vactive += dc->debug.subvp_extra_lines; |
522 | |
523 | // For backporch of phantom pipe, use vstartup of the main pipe |
524 | phantom_bp = get_vstartup(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt, which_pipe: pipe_idx); |
525 | |
526 | phantom_stream->dst.y = 0; |
527 | phantom_stream->dst.height = phantom_vactive; |
528 | /* When scaling, DML provides the end to end required number of lines for MALL. |
529 | * dst.height is always correct for this case, but src.height is not which causes a |
530 | * delta between main and phantom pipe scaling outputs. Need to adjust src.height on |
531 | * phantom for this case. |
532 | */ |
533 | phantom_stream->src.y = 0; |
534 | phantom_stream->src.height = (double)phantom_vactive * (double)main_stream->src.height / (double)main_stream->dst.height; |
535 | |
536 | phantom_stream->timing.v_addressable = phantom_vactive; |
537 | phantom_stream->timing.v_front_porch = 1; |
538 | phantom_stream->timing.v_total = phantom_stream->timing.v_addressable + |
539 | phantom_stream->timing.v_front_porch + |
540 | phantom_stream->timing.v_sync_width + |
541 | phantom_bp; |
542 | phantom_stream->timing.flags.DSC = 0; // Don't need DSC for phantom timing |
543 | } |
544 | |
545 | /** |
546 | * dcn32_get_num_free_pipes - Calculate number of free pipes |
547 | * @dc: current dc state |
548 | * @context: new dc state |
549 | * |
550 | * This function assumes that a "used" pipe is a pipe that has |
551 | * both a stream and a plane assigned to it. |
552 | * |
553 | * Return: Number of free pipes available in the context |
554 | */ |
555 | static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *context) |
556 | { |
557 | unsigned int i; |
558 | unsigned int free_pipes = 0; |
559 | unsigned int num_pipes = 0; |
560 | |
561 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
562 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
563 | |
564 | if (pipe->stream && !pipe->top_pipe) { |
565 | while (pipe) { |
566 | num_pipes++; |
567 | pipe = pipe->bottom_pipe; |
568 | } |
569 | } |
570 | } |
571 | |
572 | free_pipes = dc->res_pool->pipe_count - num_pipes; |
573 | return free_pipes; |
574 | } |
575 | |
576 | /** |
577 | * dcn32_assign_subvp_pipe - Function to decide which pipe will use Sub-VP. |
578 | * @dc: current dc state |
579 | * @context: new dc state |
580 | * @index: [out] dc pipe index for the pipe chosen to have phantom pipes assigned |
581 | * |
582 | * We enter this function if we are Sub-VP capable (i.e. enough pipes available) |
583 | * and regular P-State switching (i.e. VACTIVE/VBLANK) is not supported, or if |
584 | * we are forcing SubVP P-State switching on the current config. |
585 | * |
586 | * The number of pipes used for the chosen surface must be less than or equal to the |
587 | * number of free pipes available. |
588 | * |
589 | * In general we choose surfaces with the longest frame time first (better for SubVP + VBLANK). |
590 | * For multi-display cases the ActiveDRAMClockChangeMargin doesn't provide enough info on its own |
591 | * for determining which should be the SubVP pipe (need a way to determine if a pipe / plane doesn't |
592 | * support MCLK switching naturally [i.e. ACTIVE or VBLANK]). |
593 | * |
594 | * Return: True if a valid pipe assignment was found for Sub-VP. Otherwise false. |
595 | */ |
596 | static bool dcn32_assign_subvp_pipe(struct dc *dc, |
597 | struct dc_state *context, |
598 | unsigned int *index) |
599 | { |
600 | unsigned int i, pipe_idx; |
601 | unsigned int max_frame_time = 0; |
602 | bool valid_assignment_found = false; |
603 | unsigned int free_pipes = dcn32_get_num_free_pipes(dc, context); |
604 | struct vba_vars_st *vba = &context->bw_ctx.dml.vba; |
605 | |
606 | for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { |
607 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
608 | unsigned int num_pipes = 0; |
609 | unsigned int refresh_rate = 0; |
610 | |
611 | if (!pipe->stream) |
612 | continue; |
613 | |
614 | // Round up |
615 | refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 + |
616 | pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1) |
617 | / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total); |
618 | /* SubVP pipe candidate requirements: |
619 | * - Refresh rate < 120hz |
620 | * - Not able to switch in vactive naturally (switching in active means the |
621 | * DET provides enough buffer to hide the P-State switch latency -- trying |
622 | * to combine this with SubVP can cause issues with the scheduling). |
623 | * - Not TMZ surface |
624 | */ |
625 | if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && |
626 | !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) && |
627 | (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) && |
628 | dc_state_get_pipe_subvp_type(state: context, pipe_ctx: pipe) == SUBVP_NONE && |
629 | (refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) && |
630 | !pipe->plane_state->address.tmz_surface && |
631 | (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 || |
632 | (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 && |
633 | dcn32_allow_subvp_with_active_margin(pipe)))) { |
634 | while (pipe) { |
635 | num_pipes++; |
636 | pipe = pipe->bottom_pipe; |
637 | } |
638 | |
639 | pipe = &context->res_ctx.pipe_ctx[i]; |
640 | if (num_pipes <= free_pipes) { |
641 | struct dc_stream_state *stream = pipe->stream; |
642 | unsigned int frame_us = (stream->timing.v_total * stream->timing.h_total / |
643 | (double)(stream->timing.pix_clk_100hz * 100)) * 1000000; |
644 | if (frame_us > max_frame_time) { |
645 | *index = i; |
646 | max_frame_time = frame_us; |
647 | valid_assignment_found = true; |
648 | } |
649 | } |
650 | } |
651 | pipe_idx++; |
652 | } |
653 | return valid_assignment_found; |
654 | } |
655 | |
656 | /** |
657 | * dcn32_enough_pipes_for_subvp - Function to check if there are "enough" pipes for SubVP. |
658 | * @dc: current dc state |
659 | * @context: new dc state |
660 | * |
661 | * This function returns true if there are enough free pipes |
662 | * to create the required phantom pipes for any given stream |
663 | * (that does not already have phantom pipe assigned). |
664 | * |
665 | * e.g. For a 2 stream config where the first stream uses one |
666 | * pipe and the second stream uses 2 pipes (i.e. pipe split), |
667 | * this function will return true because there is 1 remaining |
668 | * pipe which can be used as the phantom pipe for the non pipe |
669 | * split pipe. |
670 | * |
671 | * Return: |
672 | * True if there are enough free pipes to assign phantom pipes to at least one |
673 | * stream that does not already have phantom pipes assigned. Otherwise false. |
674 | */ |
675 | static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context) |
676 | { |
677 | unsigned int i, split_cnt, free_pipes; |
678 | unsigned int min_pipe_split = dc->res_pool->pipe_count + 1; // init as max number of pipes + 1 |
679 | bool subvp_possible = false; |
680 | |
681 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
682 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
683 | |
684 | // Find the minimum pipe split count for non SubVP pipes |
685 | if (resource_is_pipe_type(pipe_ctx: pipe, type: OPP_HEAD) && |
686 | dc_state_get_pipe_subvp_type(state: context, pipe_ctx: pipe) == SUBVP_NONE) { |
687 | split_cnt = 0; |
688 | while (pipe) { |
689 | split_cnt++; |
690 | pipe = pipe->bottom_pipe; |
691 | } |
692 | |
693 | if (split_cnt < min_pipe_split) |
694 | min_pipe_split = split_cnt; |
695 | } |
696 | } |
697 | |
698 | free_pipes = dcn32_get_num_free_pipes(dc, context); |
699 | |
700 | // SubVP only possible if at least one pipe is being used (i.e. free_pipes |
701 | // should not equal to the pipe_count) |
702 | if (free_pipes >= min_pipe_split && free_pipes < dc->res_pool->pipe_count) |
703 | subvp_possible = true; |
704 | |
705 | return subvp_possible; |
706 | } |
707 | |
708 | /** |
709 | * subvp_subvp_schedulable - Determine if SubVP + SubVP config is schedulable |
710 | * @dc: current dc state |
711 | * @context: new dc state |
712 | * |
713 | * High level algorithm: |
714 | * 1. Find longest microschedule length (in us) between the two SubVP pipes |
715 | * 2. Check if the worst case overlap (VBLANK in middle of ACTIVE) for both |
716 | * pipes still allows for the maximum microschedule to fit in the active |
717 | * region for both pipes. |
718 | * |
719 | * Return: True if the SubVP + SubVP config is schedulable, false otherwise |
720 | */ |
721 | static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context) |
722 | { |
723 | struct pipe_ctx *subvp_pipes[2]; |
724 | struct dc_stream_state *phantom = NULL; |
725 | uint32_t microschedule_lines = 0; |
726 | uint32_t index = 0; |
727 | uint32_t i; |
728 | uint32_t max_microschedule_us = 0; |
729 | int32_t vactive1_us, vactive2_us, vblank1_us, vblank2_us; |
730 | |
731 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
732 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
733 | uint32_t time_us = 0; |
734 | |
735 | /* Loop to calculate the maximum microschedule time between the two SubVP pipes, |
736 | * and also to store the two main SubVP pipe pointers in subvp_pipes[2]. |
737 | */ |
738 | if (pipe->stream && pipe->plane_state && !pipe->top_pipe && |
739 | dc_state_get_pipe_subvp_type(state: context, pipe_ctx: pipe) == SUBVP_MAIN) { |
740 | phantom = dc_state_get_paired_subvp_stream(state: context, stream: pipe->stream); |
741 | microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) + |
742 | phantom->timing.v_addressable; |
743 | |
744 | // Round up when calculating microschedule time (+ 1 at the end) |
745 | time_us = (microschedule_lines * phantom->timing.h_total) / |
746 | (double)(phantom->timing.pix_clk_100hz * 100) * 1000000 + |
747 | dc->caps.subvp_prefetch_end_to_mall_start_us + |
748 | dc->caps.subvp_fw_processing_delay_us + 1; |
749 | if (time_us > max_microschedule_us) |
750 | max_microschedule_us = time_us; |
751 | |
752 | subvp_pipes[index] = pipe; |
753 | index++; |
754 | |
755 | // Maximum 2 SubVP pipes |
756 | if (index == 2) |
757 | break; |
758 | } |
759 | } |
760 | vactive1_us = ((subvp_pipes[0]->stream->timing.v_addressable * subvp_pipes[0]->stream->timing.h_total) / |
761 | (double)(subvp_pipes[0]->stream->timing.pix_clk_100hz * 100)) * 1000000; |
762 | vactive2_us = ((subvp_pipes[1]->stream->timing.v_addressable * subvp_pipes[1]->stream->timing.h_total) / |
763 | (double)(subvp_pipes[1]->stream->timing.pix_clk_100hz * 100)) * 1000000; |
764 | vblank1_us = (((subvp_pipes[0]->stream->timing.v_total - subvp_pipes[0]->stream->timing.v_addressable) * |
765 | subvp_pipes[0]->stream->timing.h_total) / |
766 | (double)(subvp_pipes[0]->stream->timing.pix_clk_100hz * 100)) * 1000000; |
767 | vblank2_us = (((subvp_pipes[1]->stream->timing.v_total - subvp_pipes[1]->stream->timing.v_addressable) * |
768 | subvp_pipes[1]->stream->timing.h_total) / |
769 | (double)(subvp_pipes[1]->stream->timing.pix_clk_100hz * 100)) * 1000000; |
770 | |
771 | if ((vactive1_us - vblank2_us) / 2 > max_microschedule_us && |
772 | (vactive2_us - vblank1_us) / 2 > max_microschedule_us) |
773 | return true; |
774 | |
775 | return false; |
776 | } |
777 | |
778 | /** |
779 | * subvp_drr_schedulable() - Determine if SubVP + DRR config is schedulable |
780 | * @dc: current dc state |
781 | * @context: new dc state |
782 | * |
783 | * High level algorithm: |
784 | * 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe |
785 | * 2. Determine the frame time for the DRR display when adding required margin for MCLK switching |
786 | * (the margin is equal to the MALL region + DRR margin (500us)) |
787 | * 3.If (SubVP Active - Prefetch > Stretched DRR frame + max(MALL region, Stretched DRR frame)) |
788 | * then report the configuration as supported |
789 | * |
790 | * Return: True if the SubVP + DRR config is schedulable, false otherwise |
791 | */ |
792 | static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context) |
793 | { |
794 | bool schedulable = false; |
795 | uint32_t i; |
796 | struct pipe_ctx *pipe = NULL; |
797 | struct pipe_ctx *drr_pipe = NULL; |
798 | struct dc_crtc_timing *main_timing = NULL; |
799 | struct dc_crtc_timing *phantom_timing = NULL; |
800 | struct dc_crtc_timing *drr_timing = NULL; |
801 | int16_t prefetch_us = 0; |
802 | int16_t mall_region_us = 0; |
803 | int16_t drr_frame_us = 0; // nominal frame time |
804 | int16_t subvp_active_us = 0; |
805 | int16_t stretched_drr_us = 0; |
806 | int16_t drr_stretched_vblank_us = 0; |
807 | int16_t max_vblank_mallregion = 0; |
808 | struct dc_stream_state *phantom_stream; |
809 | bool subvp_found = false; |
810 | bool drr_found = false; |
811 | |
812 | // Find SubVP pipe |
813 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
814 | pipe = &context->res_ctx.pipe_ctx[i]; |
815 | |
816 | // We check for master pipe, but it shouldn't matter since we only need |
817 | // the pipe for timing info (stream should be same for any pipe splits) |
818 | if (!resource_is_pipe_type(pipe_ctx: pipe, type: OTG_MASTER) || |
819 | !resource_is_pipe_type(pipe_ctx: pipe, type: DPP_PIPE)) |
820 | continue; |
821 | |
822 | // Find the SubVP pipe |
823 | if (dc_state_get_pipe_subvp_type(state: context, pipe_ctx: pipe) == SUBVP_MAIN) { |
824 | subvp_found = true; |
825 | break; |
826 | } |
827 | } |
828 | |
829 | // Find the DRR pipe |
830 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
831 | drr_pipe = &context->res_ctx.pipe_ctx[i]; |
832 | |
833 | // We check for master pipe only |
834 | if (!resource_is_pipe_type(pipe_ctx: drr_pipe, type: OTG_MASTER) || |
835 | !resource_is_pipe_type(pipe_ctx: drr_pipe, type: DPP_PIPE)) |
836 | continue; |
837 | |
838 | if (dc_state_get_pipe_subvp_type(state: context, pipe_ctx: drr_pipe) == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param && |
839 | (drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable || drr_pipe->stream->vrr_active_fixed)) { |
840 | drr_found = true; |
841 | break; |
842 | } |
843 | } |
844 | |
845 | if (subvp_found && drr_found) { |
846 | phantom_stream = dc_state_get_paired_subvp_stream(state: context, stream: pipe->stream); |
847 | main_timing = &pipe->stream->timing; |
848 | phantom_timing = &phantom_stream->timing; |
849 | drr_timing = &drr_pipe->stream->timing; |
850 | prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total / |
851 | (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 + |
852 | dc->caps.subvp_prefetch_end_to_mall_start_us; |
853 | subvp_active_us = main_timing->v_addressable * main_timing->h_total / |
854 | (double)(main_timing->pix_clk_100hz * 100) * 1000000; |
855 | drr_frame_us = drr_timing->v_total * drr_timing->h_total / |
856 | (double)(drr_timing->pix_clk_100hz * 100) * 1000000; |
857 | // P-State allow width and FW delays already included phantom_timing->v_addressable |
858 | mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total / |
859 | (double)(phantom_timing->pix_clk_100hz * 100) * 1000000; |
860 | stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US; |
861 | drr_stretched_vblank_us = (drr_timing->v_total - drr_timing->v_addressable) * drr_timing->h_total / |
862 | (double)(drr_timing->pix_clk_100hz * 100) * 1000000 + (stretched_drr_us - drr_frame_us); |
863 | max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us; |
864 | } |
865 | |
866 | /* We consider SubVP + DRR schedulable if the stretched frame duration of the DRR display (i.e. the |
867 | * highest refresh rate + margin that can support UCLK P-State switch) passes the static analysis |
868 | * for VBLANK: (VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time, |
869 | * and the max of (VBLANK blanking time, MALL region)). |
870 | */ |
871 | if (stretched_drr_us < (1 / (double)drr_timing->min_refresh_in_uhz) * 1000000 * 1000000 && |
872 | subvp_active_us - prefetch_us - stretched_drr_us - max_vblank_mallregion > 0) |
873 | schedulable = true; |
874 | |
875 | return schedulable; |
876 | } |
877 | |
878 | |
879 | /** |
880 | * subvp_vblank_schedulable - Determine if SubVP + VBLANK config is schedulable |
881 | * @dc: current dc state |
882 | * @context: new dc state |
883 | * |
884 | * High level algorithm: |
885 | * 1. Get timing for SubVP pipe, phantom pipe, and VBLANK pipe |
886 | * 2. If (SubVP Active - Prefetch > Vblank Frame Time + max(MALL region, Vblank blanking time)) |
887 | * then report the configuration as supported |
888 | * 3. If the VBLANK display is DRR, then take the DRR static schedulability path |
889 | * |
890 | * Return: True if the SubVP + VBLANK/DRR config is schedulable, false otherwise |
891 | */ |
892 | static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context) |
893 | { |
894 | struct pipe_ctx *pipe = NULL; |
895 | struct pipe_ctx *subvp_pipe = NULL; |
896 | bool found = false; |
897 | bool schedulable = false; |
898 | uint32_t i = 0; |
899 | uint8_t vblank_index = 0; |
900 | uint16_t prefetch_us = 0; |
901 | uint16_t mall_region_us = 0; |
902 | uint16_t vblank_frame_us = 0; |
903 | uint16_t subvp_active_us = 0; |
904 | uint16_t vblank_blank_us = 0; |
905 | uint16_t max_vblank_mallregion = 0; |
906 | struct dc_crtc_timing *main_timing = NULL; |
907 | struct dc_crtc_timing *phantom_timing = NULL; |
908 | struct dc_crtc_timing *vblank_timing = NULL; |
909 | struct dc_stream_state *phantom_stream; |
910 | enum mall_stream_type pipe_mall_type; |
911 | |
912 | /* For SubVP + VBLANK/DRR cases, we assume there can only be |
913 | * a single VBLANK/DRR display. If DML outputs SubVP + VBLANK |
914 | * is supported, it is either a single VBLANK case or two VBLANK |
915 | * displays which are synchronized (in which case they have identical |
916 | * timings). |
917 | */ |
918 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
919 | pipe = &context->res_ctx.pipe_ctx[i]; |
920 | pipe_mall_type = dc_state_get_pipe_subvp_type(state: context, pipe_ctx: pipe); |
921 | |
922 | // We check for master pipe, but it shouldn't matter since we only need |
923 | // the pipe for timing info (stream should be same for any pipe splits) |
924 | if (!resource_is_pipe_type(pipe_ctx: pipe, type: OTG_MASTER) || |
925 | !resource_is_pipe_type(pipe_ctx: pipe, type: DPP_PIPE)) |
926 | continue; |
927 | |
928 | if (!found && pipe_mall_type == SUBVP_NONE) { |
929 | // Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe). |
930 | vblank_index = i; |
931 | found = true; |
932 | } |
933 | |
934 | if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN) |
935 | subvp_pipe = pipe; |
936 | } |
937 | if (found) { |
938 | phantom_stream = dc_state_get_paired_subvp_stream(state: context, stream: subvp_pipe->stream); |
939 | main_timing = &subvp_pipe->stream->timing; |
940 | phantom_timing = &phantom_stream->timing; |
941 | vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing; |
942 | // Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe |
943 | // Also include the prefetch end to mallstart delay time |
944 | prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total / |
945 | (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 + |
946 | dc->caps.subvp_prefetch_end_to_mall_start_us; |
947 | // P-State allow width and FW delays already included phantom_timing->v_addressable |
948 | mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total / |
949 | (double)(phantom_timing->pix_clk_100hz * 100) * 1000000; |
950 | vblank_frame_us = vblank_timing->v_total * vblank_timing->h_total / |
951 | (double)(vblank_timing->pix_clk_100hz * 100) * 1000000; |
952 | vblank_blank_us = (vblank_timing->v_total - vblank_timing->v_addressable) * vblank_timing->h_total / |
953 | (double)(vblank_timing->pix_clk_100hz * 100) * 1000000; |
954 | subvp_active_us = main_timing->v_addressable * main_timing->h_total / |
955 | (double)(main_timing->pix_clk_100hz * 100) * 1000000; |
956 | max_vblank_mallregion = vblank_blank_us > mall_region_us ? vblank_blank_us : mall_region_us; |
957 | |
958 | // Schedulable if VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time, |
959 | // and the max of (VBLANK blanking time, MALL region) |
960 | // TODO: Possibly add some margin (i.e. the below conditions should be [...] > X instead of [...] > 0) |
961 | if (subvp_active_us - prefetch_us - vblank_frame_us - max_vblank_mallregion > 0) |
962 | schedulable = true; |
963 | } |
964 | return schedulable; |
965 | } |
966 | |
967 | /** |
968 | * subvp_subvp_admissable() - Determine if subvp + subvp config is admissible |
969 | * |
970 | * @dc: Current DC state |
971 | * @context: New DC state to be programmed |
972 | * |
973 | * SubVP + SubVP is admissible under the following conditions: |
974 | * - All SubVP pipes are < 120Hz OR |
975 | * - All SubVP pipes are >= 120hz |
976 | * |
977 | * Return: True if admissible, false otherwise |
978 | */ |
979 | static bool subvp_subvp_admissable(struct dc *dc, |
980 | struct dc_state *context) |
981 | { |
982 | bool result = false; |
983 | uint32_t i; |
984 | uint8_t subvp_count = 0; |
985 | uint32_t min_refresh = subvp_high_refresh_list.min_refresh, max_refresh = 0; |
986 | uint64_t refresh_rate = 0; |
987 | |
988 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
989 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
990 | |
991 | if (!pipe->stream) |
992 | continue; |
993 | |
994 | if (pipe->plane_state && !pipe->top_pipe && |
995 | dc_state_get_pipe_subvp_type(state: context, pipe_ctx: pipe) == SUBVP_MAIN) { |
996 | refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + |
997 | pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); |
998 | refresh_rate = div_u64(dividend: refresh_rate, divisor: pipe->stream->timing.v_total); |
999 | refresh_rate = div_u64(dividend: refresh_rate, divisor: pipe->stream->timing.h_total); |
1000 | |
1001 | if ((uint32_t)refresh_rate < min_refresh) |
1002 | min_refresh = (uint32_t)refresh_rate; |
1003 | if ((uint32_t)refresh_rate > max_refresh) |
1004 | max_refresh = (uint32_t)refresh_rate; |
1005 | subvp_count++; |
1006 | } |
1007 | } |
1008 | |
1009 | if (subvp_count == 2 && ((min_refresh < 120 && max_refresh < 120) || |
1010 | (min_refresh >= subvp_high_refresh_list.min_refresh && |
1011 | max_refresh <= subvp_high_refresh_list.max_refresh))) |
1012 | result = true; |
1013 | |
1014 | return result; |
1015 | } |
1016 | |
1017 | /** |
1018 | * subvp_validate_static_schedulability - Check which SubVP case is calculated |
1019 | * and handle static analysis based on the case. |
1020 | * @dc: current dc state |
1021 | * @context: new dc state |
1022 | * @vlevel: Voltage level calculated by DML |
1023 | * |
1024 | * Three cases: |
1025 | * 1. SubVP + SubVP |
1026 | * 2. SubVP + VBLANK (DRR checked internally) |
1027 | * 3. SubVP + VACTIVE (currently unsupported) |
1028 | * |
1029 | * Return: True if statically schedulable, false otherwise |
1030 | */ |
1031 | static bool subvp_validate_static_schedulability(struct dc *dc, |
1032 | struct dc_state *context, |
1033 | int vlevel) |
1034 | { |
1035 | bool schedulable = false; |
1036 | struct vba_vars_st *vba = &context->bw_ctx.dml.vba; |
1037 | uint32_t i, pipe_idx; |
1038 | uint8_t subvp_count = 0; |
1039 | uint8_t vactive_count = 0; |
1040 | uint8_t non_subvp_pipes = 0; |
1041 | |
1042 | for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { |
1043 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
1044 | enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(state: context, pipe_ctx: pipe); |
1045 | |
1046 | if (!pipe->stream) |
1047 | continue; |
1048 | |
1049 | if (pipe->plane_state && !pipe->top_pipe) { |
1050 | if (pipe_mall_type == SUBVP_MAIN) |
1051 | subvp_count++; |
1052 | if (pipe_mall_type == SUBVP_NONE) |
1053 | non_subvp_pipes++; |
1054 | } |
1055 | |
1056 | // Count how many planes that aren't SubVP/phantom are capable of VACTIVE |
1057 | // switching (SubVP + VACTIVE unsupported). In situations where we force |
1058 | // SubVP for a VACTIVE plane, we don't want to increment the vactive_count. |
1059 | if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vlevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 && |
1060 | pipe_mall_type == SUBVP_NONE) { |
1061 | vactive_count++; |
1062 | } |
1063 | pipe_idx++; |
1064 | } |
1065 | |
1066 | if (subvp_count == 2) { |
1067 | // Static schedulability check for SubVP + SubVP case |
1068 | schedulable = subvp_subvp_admissable(dc, context) && subvp_subvp_schedulable(dc, context); |
1069 | } else if (subvp_count == 1 && non_subvp_pipes == 0) { |
1070 | // Single SubVP configs will be supported by default as long as it's suppported by DML |
1071 | schedulable = true; |
1072 | } else if (subvp_count == 1 && non_subvp_pipes == 1) { |
1073 | if (dcn32_subvp_drr_admissable(dc, context)) |
1074 | schedulable = subvp_drr_schedulable(dc, context); |
1075 | else if (dcn32_subvp_vblank_admissable(dc, context, vlevel)) |
1076 | schedulable = subvp_vblank_schedulable(dc, context); |
1077 | } else if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vactive_w_mall_sub_vp && |
1078 | vactive_count > 0) { |
1079 | // For single display SubVP cases, DML will output dm_dram_clock_change_vactive_w_mall_sub_vp by default. |
1080 | // We tell the difference between SubVP vs. SubVP + VACTIVE by checking the vactive_count. |
1081 | // SubVP + VACTIVE currently unsupported |
1082 | schedulable = false; |
1083 | } |
1084 | return schedulable; |
1085 | } |
1086 | |
1087 | static void assign_subvp_index(struct dc *dc, struct dc_state *context) |
1088 | { |
1089 | int i; |
1090 | int index = 0; |
1091 | |
1092 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
1093 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; |
1094 | |
1095 | if (resource_is_pipe_type(pipe_ctx, type: OTG_MASTER) && |
1096 | dc_state_get_pipe_subvp_type(state: context, pipe_ctx) == SUBVP_MAIN) { |
1097 | pipe_ctx->subvp_index = index++; |
1098 | } else { |
1099 | pipe_ctx->subvp_index = 0; |
1100 | } |
1101 | } |
1102 | } |
1103 | |
1104 | struct pipe_slice_table { |
1105 | struct { |
1106 | struct dc_stream_state *stream; |
1107 | int slice_count; |
1108 | } odm_combines[MAX_STREAMS]; |
1109 | int odm_combine_count; |
1110 | |
1111 | struct { |
1112 | struct pipe_ctx *pri_pipe; |
1113 | struct dc_plane_state *plane; |
1114 | int slice_count; |
1115 | } mpc_combines[MAX_PLANES]; |
1116 | int mpc_combine_count; |
1117 | }; |
1118 | |
1119 | |
1120 | static void update_slice_table_for_stream(struct pipe_slice_table *table, |
1121 | struct dc_stream_state *stream, int diff) |
1122 | { |
1123 | int i; |
1124 | |
1125 | for (i = 0; i < table->odm_combine_count; i++) { |
1126 | if (table->odm_combines[i].stream == stream) { |
1127 | table->odm_combines[i].slice_count += diff; |
1128 | break; |
1129 | } |
1130 | } |
1131 | |
1132 | if (i == table->odm_combine_count) { |
1133 | table->odm_combine_count++; |
1134 | table->odm_combines[i].stream = stream; |
1135 | table->odm_combines[i].slice_count = diff; |
1136 | } |
1137 | } |
1138 | |
1139 | static void update_slice_table_for_plane(struct pipe_slice_table *table, |
1140 | struct pipe_ctx *dpp_pipe, struct dc_plane_state *plane, int diff) |
1141 | { |
1142 | int i; |
1143 | struct pipe_ctx *pri_dpp_pipe = resource_get_primary_dpp_pipe(dpp_pipe); |
1144 | |
1145 | for (i = 0; i < table->mpc_combine_count; i++) { |
1146 | if (table->mpc_combines[i].plane == plane && |
1147 | table->mpc_combines[i].pri_pipe == pri_dpp_pipe) { |
1148 | table->mpc_combines[i].slice_count += diff; |
1149 | break; |
1150 | } |
1151 | } |
1152 | |
1153 | if (i == table->mpc_combine_count) { |
1154 | table->mpc_combine_count++; |
1155 | table->mpc_combines[i].plane = plane; |
1156 | table->mpc_combines[i].pri_pipe = pri_dpp_pipe; |
1157 | table->mpc_combines[i].slice_count = diff; |
1158 | } |
1159 | } |
1160 | |
1161 | static void init_pipe_slice_table_from_context( |
1162 | struct pipe_slice_table *table, |
1163 | struct dc_state *context) |
1164 | { |
1165 | int i, j; |
1166 | struct pipe_ctx *otg_master; |
1167 | struct pipe_ctx *dpp_pipes[MAX_PIPES]; |
1168 | struct dc_stream_state *stream; |
1169 | int count; |
1170 | |
1171 | memset(table, 0, sizeof(*table)); |
1172 | |
1173 | for (i = 0; i < context->stream_count; i++) { |
1174 | stream = context->streams[i]; |
1175 | otg_master = resource_get_otg_master_for_stream( |
1176 | res_ctx: &context->res_ctx, stream); |
1177 | count = resource_get_odm_slice_count(pipe: otg_master); |
1178 | update_slice_table_for_stream(table, stream, diff: count); |
1179 | |
1180 | count = resource_get_dpp_pipes_for_opp_head(opp_head: otg_master, |
1181 | res_ctx: &context->res_ctx, dpp_pipes); |
1182 | for (j = 0; j < count; j++) |
1183 | if (dpp_pipes[j]->plane_state) |
1184 | update_slice_table_for_plane(table, dpp_pipe: dpp_pipes[j], |
1185 | plane: dpp_pipes[j]->plane_state, diff: 1); |
1186 | } |
1187 | } |
1188 | |
1189 | static bool update_pipe_slice_table_with_split_flags( |
1190 | struct pipe_slice_table *table, |
1191 | struct dc *dc, |
1192 | struct dc_state *context, |
1193 | struct vba_vars_st *vba, |
1194 | int split[MAX_PIPES], |
1195 | bool merge[MAX_PIPES]) |
1196 | { |
1197 | /* NOTE: we are deprecating the support for the concept of pipe splitting |
1198 | * or pipe merging. Instead we append slices to the end and remove |
1199 | * slices from the end. The following code converts a pipe split or |
1200 | * merge to an append or remove operation. |
1201 | * |
1202 | * For example: |
1203 | * When split flags describe the following pipe connection transition |
1204 | * |
1205 | * from: |
1206 | * pipe 0 (split=2) -> pipe 1 (split=2) |
1207 | * to: (old behavior) |
1208 | * pipe 0 -> pipe 2 -> pipe 1 -> pipe 3 |
1209 | * |
1210 | * the code below actually does: |
1211 | * pipe 0 -> pipe 1 -> pipe 2 -> pipe 3 |
1212 | * |
1213 | * This is the new intended behavior and for future DCNs we will retire |
1214 | * the old concept completely. |
1215 | */ |
1216 | struct pipe_ctx *pipe; |
1217 | bool odm; |
1218 | int dc_pipe_idx, dml_pipe_idx = 0; |
1219 | bool updated = false; |
1220 | |
1221 | for (dc_pipe_idx = 0; |
1222 | dc_pipe_idx < dc->res_pool->pipe_count; dc_pipe_idx++) { |
1223 | pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx]; |
1224 | if (resource_is_pipe_type(pipe_ctx: pipe, type: FREE_PIPE)) |
1225 | continue; |
1226 | |
1227 | if (merge[dc_pipe_idx]) { |
1228 | if (resource_is_pipe_type(pipe_ctx: pipe, type: OPP_HEAD)) |
1229 | /* merging OPP head means reducing ODM slice |
1230 | * count by 1 |
1231 | */ |
1232 | update_slice_table_for_stream(table, stream: pipe->stream, diff: -1); |
1233 | else if (resource_is_pipe_type(pipe_ctx: pipe, type: DPP_PIPE) && |
1234 | resource_get_odm_slice_index(opp_head: resource_get_opp_head(pipe_ctx: pipe)) == 0) |
1235 | /* merging DPP pipe of the first ODM slice means |
1236 | * reducing MPC slice count by 1 |
1237 | */ |
1238 | update_slice_table_for_plane(table, dpp_pipe: pipe, plane: pipe->plane_state, diff: -1); |
1239 | updated = true; |
1240 | } |
1241 | |
1242 | if (split[dc_pipe_idx]) { |
1243 | odm = vba->ODMCombineEnabled[vba->pipe_plane[dml_pipe_idx]] != |
1244 | dm_odm_combine_mode_disabled; |
1245 | if (odm && resource_is_pipe_type(pipe_ctx: pipe, type: OPP_HEAD)) |
1246 | update_slice_table_for_stream( |
1247 | table, stream: pipe->stream, diff: split[dc_pipe_idx] - 1); |
1248 | else if (!odm && resource_is_pipe_type(pipe_ctx: pipe, type: DPP_PIPE)) |
1249 | update_slice_table_for_plane(table, dpp_pipe: pipe, |
1250 | plane: pipe->plane_state, diff: split[dc_pipe_idx] - 1); |
1251 | updated = true; |
1252 | } |
1253 | dml_pipe_idx++; |
1254 | } |
1255 | return updated; |
1256 | } |
1257 | |
1258 | static void update_pipes_with_slice_table(struct dc *dc, struct dc_state *context, |
1259 | struct pipe_slice_table *table) |
1260 | { |
1261 | int i; |
1262 | |
1263 | for (i = 0; i < table->odm_combine_count; i++) |
1264 | resource_update_pipes_for_stream_with_slice_count(new_ctx: context, |
1265 | cur_ctx: dc->current_state, pool: dc->res_pool, |
1266 | stream: table->odm_combines[i].stream, |
1267 | new_slice_count: table->odm_combines[i].slice_count); |
1268 | |
1269 | for (i = 0; i < table->mpc_combine_count; i++) |
1270 | resource_update_pipes_for_plane_with_slice_count(new_ctx: context, |
1271 | cur_ctx: dc->current_state, pool: dc->res_pool, |
1272 | plane: table->mpc_combines[i].plane, |
1273 | slice_count: table->mpc_combines[i].slice_count); |
1274 | } |
1275 | |
1276 | static bool update_pipes_with_split_flags(struct dc *dc, struct dc_state *context, |
1277 | struct vba_vars_st *vba, int split[MAX_PIPES], |
1278 | bool merge[MAX_PIPES]) |
1279 | { |
1280 | struct pipe_slice_table slice_table; |
1281 | bool updated; |
1282 | |
1283 | init_pipe_slice_table_from_context(table: &slice_table, context); |
1284 | updated = update_pipe_slice_table_with_split_flags( |
1285 | table: &slice_table, dc, context, vba, |
1286 | split, merge); |
1287 | update_pipes_with_slice_table(dc, context, table: &slice_table); |
1288 | return updated; |
1289 | } |
1290 | |
1291 | static bool should_apply_odm_power_optimization(struct dc *dc, |
1292 | struct dc_state *context, struct vba_vars_st *v, int *split, |
1293 | bool *merge) |
1294 | { |
1295 | struct dc_stream_state *stream = context->streams[0]; |
1296 | struct pipe_slice_table slice_table; |
1297 | int i; |
1298 | |
1299 | /* |
1300 | * this debug flag allows us to disable ODM power optimization feature |
1301 | * unconditionally. we force the feature off if this is set to false. |
1302 | */ |
1303 | if (!dc->debug.enable_single_display_2to1_odm_policy) |
1304 | return false; |
1305 | |
1306 | /* current design and test coverage is only limited to allow ODM power |
1307 | * optimization for single stream. Supporting it for multiple streams |
1308 | * use case would require additional algorithm to decide how to |
1309 | * optimize power consumption when there are not enough free pipes to |
1310 | * allocate for all the streams. This level of optimization would |
1311 | * require multiple attempts of revalidation to make an optimized |
1312 | * decision. Unfortunately We do not support revalidation flow in |
1313 | * current version of DML. |
1314 | */ |
1315 | if (context->stream_count != 1) |
1316 | return false; |
1317 | |
1318 | /* |
1319 | * Our hardware doesn't support ODM for HDMI TMDS |
1320 | */ |
1321 | if (dc_is_hdmi_signal(signal: stream->signal)) |
1322 | return false; |
1323 | |
1324 | /* |
1325 | * ODM Combine 2:1 requires horizontal timing divisible by 2 so each |
1326 | * ODM segment has the same size. |
1327 | */ |
1328 | if (!is_h_timing_divisible_by_2(stream)) |
1329 | return false; |
1330 | |
1331 | /* |
1332 | * No power benefits if the timing's pixel clock is not high enough to |
1333 | * raise display clock from minimum power state. |
1334 | */ |
1335 | if (stream->timing.pix_clk_100hz * 100 <= DCN3_2_VMIN_DISPCLK_HZ) |
1336 | return false; |
1337 | |
1338 | if (dc->config.enable_windowed_mpo_odm) { |
1339 | /* |
1340 | * ODM power optimization should only be allowed if the feature |
1341 | * can be seamlessly toggled off within an update. This would |
1342 | * require that the feature is applied on top of a minimal |
1343 | * state. A minimal state is defined as a state validated |
1344 | * without the need of pipe split. Therefore, when transition to |
1345 | * toggle the feature off, the same stream and plane |
1346 | * configuration can be supported by the pipe resource in the |
1347 | * first ODM slice alone without the need to acquire extra |
1348 | * resources. |
1349 | */ |
1350 | init_pipe_slice_table_from_context(table: &slice_table, context); |
1351 | update_pipe_slice_table_with_split_flags( |
1352 | table: &slice_table, dc, context, vba: v, |
1353 | split, merge); |
1354 | for (i = 0; i < slice_table.mpc_combine_count; i++) |
1355 | if (slice_table.mpc_combines[i].slice_count > 1) |
1356 | return false; |
1357 | |
1358 | for (i = 0; i < slice_table.odm_combine_count; i++) |
1359 | if (slice_table.odm_combines[i].slice_count > 1) |
1360 | return false; |
1361 | } else { |
1362 | /* |
1363 | * the new ODM power optimization feature reduces software |
1364 | * design limitation and allows ODM power optimization to be |
1365 | * supported even with presence of overlay planes. The new |
1366 | * feature is enabled based on enable_windowed_mpo_odm flag. If |
1367 | * the flag is not set, we limit our feature scope due to |
1368 | * previous software design limitation |
1369 | */ |
1370 | if (context->stream_status[0].plane_count != 1) |
1371 | return false; |
1372 | |
1373 | if (memcmp(p: &context->stream_status[0].plane_states[0]->clip_rect, |
1374 | q: &stream->src, size: sizeof(struct rect)) != 0) |
1375 | return false; |
1376 | |
1377 | if (stream->src.width >= 5120 && |
1378 | stream->src.width > stream->dst.width) |
1379 | return false; |
1380 | } |
1381 | return true; |
1382 | } |
1383 | |
1384 | static void try_odm_power_optimization_and_revalidate( |
1385 | struct dc *dc, |
1386 | struct dc_state *context, |
1387 | display_e2e_pipe_params_st *pipes, |
1388 | int *split, |
1389 | bool *merge, |
1390 | unsigned int *vlevel, |
1391 | int pipe_cnt) |
1392 | { |
1393 | int i; |
1394 | unsigned int new_vlevel; |
1395 | unsigned int cur_policy[MAX_PIPES]; |
1396 | |
1397 | for (i = 0; i < pipe_cnt; i++) { |
1398 | cur_policy[i] = pipes[i].pipe.dest.odm_combine_policy; |
1399 | pipes[i].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; |
1400 | } |
1401 | |
1402 | new_vlevel = dml_get_voltage_level(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt); |
1403 | |
1404 | if (new_vlevel < context->bw_ctx.dml.soc.num_states) { |
1405 | memset(split, 0, MAX_PIPES * sizeof(int)); |
1406 | memset(merge, 0, MAX_PIPES * sizeof(bool)); |
1407 | *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel: new_vlevel, split, merge); |
1408 | context->bw_ctx.dml.vba.VoltageLevel = *vlevel; |
1409 | } else { |
1410 | for (i = 0; i < pipe_cnt; i++) |
1411 | pipes[i].pipe.dest.odm_combine_policy = cur_policy[i]; |
1412 | } |
1413 | } |
1414 | |
1415 | static bool is_test_pattern_enabled( |
1416 | struct dc_state *context) |
1417 | { |
1418 | int i; |
1419 | |
1420 | for (i = 0; i < context->stream_count; i++) { |
1421 | if (context->streams[i]->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) |
1422 | return true; |
1423 | } |
1424 | |
1425 | return false; |
1426 | } |
1427 | |
1428 | static void dcn32_full_validate_bw_helper(struct dc *dc, |
1429 | struct dc_state *context, |
1430 | display_e2e_pipe_params_st *pipes, |
1431 | int *vlevel, |
1432 | int *split, |
1433 | bool *merge, |
1434 | int *pipe_cnt) |
1435 | { |
1436 | struct vba_vars_st *vba = &context->bw_ctx.dml.vba; |
1437 | unsigned int dc_pipe_idx = 0; |
1438 | int i = 0; |
1439 | bool found_supported_config = false; |
1440 | int vlevel_temp = 0; |
1441 | |
1442 | dc_assert_fp_enabled(); |
1443 | |
1444 | /* |
1445 | * DML favors voltage over p-state, but we're more interested in |
1446 | * supporting p-state over voltage. We can't support p-state in |
1447 | * prefetch mode > 0 so try capping the prefetch mode to start. |
1448 | * Override present for testing. |
1449 | */ |
1450 | if (dc->debug.dml_disallow_alternate_prefetch_modes) |
1451 | context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = |
1452 | dm_prefetch_support_uclk_fclk_and_stutter; |
1453 | else |
1454 | context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = |
1455 | dm_prefetch_support_uclk_fclk_and_stutter_if_possible; |
1456 | |
1457 | *vlevel = dml_get_voltage_level(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: *pipe_cnt); |
1458 | /* This may adjust vlevel and maxMpcComb */ |
1459 | if (*vlevel < context->bw_ctx.dml.soc.num_states) { |
1460 | *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel: *vlevel, split, merge); |
1461 | vba->VoltageLevel = *vlevel; |
1462 | } |
1463 | |
1464 | /* Conditions for setting up phantom pipes for SubVP: |
1465 | * 1. Not force disable SubVP |
1466 | * 2. Full update (i.e. !fast_validate) |
1467 | * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?) |
1468 | * 4. Display configuration passes validation |
1469 | * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch) |
1470 | */ |
1471 | if (!dc->debug.force_disable_subvp && !dc->caps.dmub_caps.gecc_enable && dcn32_all_pipes_have_stream_and_plane(dc, context) && |
1472 | !dcn32_mpo_in_use(context) && !dcn32_any_surfaces_rotated(dc, context) && !is_test_pattern_enabled(context) && |
1473 | (*vlevel == context->bw_ctx.dml.soc.num_states || (vba->DRAMSpeedPerState[*vlevel] != vba->DRAMSpeedPerState[0] && |
1474 | vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported) || |
1475 | vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported || |
1476 | dc->debug.force_subvp_mclk_switch)) { |
1477 | |
1478 | dcn32_merge_pipes_for_subvp(dc, context); |
1479 | memset(merge, 0, MAX_PIPES * sizeof(bool)); |
1480 | |
1481 | vlevel_temp = *vlevel; |
1482 | /* to re-initialize viewport after the pipe merge */ |
1483 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
1484 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; |
1485 | |
1486 | if (!pipe_ctx->plane_state || !pipe_ctx->stream) |
1487 | continue; |
1488 | |
1489 | resource_build_scaling_params(pipe_ctx); |
1490 | } |
1491 | |
1492 | while (!found_supported_config && dcn32_enough_pipes_for_subvp(dc, context) && |
1493 | dcn32_assign_subvp_pipe(dc, context, index: &dc_pipe_idx)) { |
1494 | /* For the case where *vlevel = num_states, bandwidth validation has failed for this config. |
1495 | * Adding phantom pipes won't change the validation result, so change the DML input param |
1496 | * for P-State support before adding phantom pipes and recalculating the DML result. |
1497 | * However, this case is only applicable for SubVP + DRR cases because the prefetch mode |
1498 | * will not allow for switch in VBLANK. The DRR display must have it's VBLANK stretched |
1499 | * enough to support MCLK switching. |
1500 | */ |
1501 | if (*vlevel == context->bw_ctx.dml.soc.num_states && |
1502 | context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final == |
1503 | dm_prefetch_support_uclk_fclk_and_stutter) { |
1504 | context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = |
1505 | dm_prefetch_support_fclk_and_stutter; |
1506 | /* There are params (such as FabricClock) that need to be recalculated |
1507 | * after validation fails (otherwise it will be 0). Calculation for |
1508 | * phantom vactive requires call into DML, so we must ensure all the |
1509 | * vba params are valid otherwise we'll get incorrect phantom vactive. |
1510 | */ |
1511 | *vlevel = dml_get_voltage_level(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: *pipe_cnt); |
1512 | } |
1513 | |
1514 | dc->res_pool->funcs->add_phantom_pipes(dc, context, pipes, *pipe_cnt, dc_pipe_idx); |
1515 | |
1516 | *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false); |
1517 | // Populate dppclk to trigger a recalculate in dml_get_voltage_level |
1518 | // so the phantom pipe DLG params can be assigned correctly. |
1519 | pipes[0].clks_cfg.dppclk_mhz = get_dppclk_calculated(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: *pipe_cnt, which_pipe: 0); |
1520 | *vlevel = dml_get_voltage_level(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: *pipe_cnt); |
1521 | |
1522 | /* Check that vlevel requested supports pstate or not |
1523 | * if not, select the lowest vlevel that supports it |
1524 | */ |
1525 | for (i = *vlevel; i < context->bw_ctx.dml.soc.num_states; i++) { |
1526 | if (vba->DRAMClockChangeSupport[i][vba->maxMpcComb] != dm_dram_clock_change_unsupported) { |
1527 | *vlevel = i; |
1528 | break; |
1529 | } |
1530 | } |
1531 | |
1532 | if (*vlevel < context->bw_ctx.dml.soc.num_states |
1533 | && subvp_validate_static_schedulability(dc, context, vlevel: *vlevel)) |
1534 | found_supported_config = true; |
1535 | if (found_supported_config) { |
1536 | // For SubVP + DRR cases, we can force the lowest vlevel that supports the mode |
1537 | if (dcn32_subvp_drr_admissable(dc, context) && subvp_drr_schedulable(dc, context)) { |
1538 | /* find lowest vlevel that supports the config */ |
1539 | for (i = *vlevel; i >= 0; i--) { |
1540 | if (vba->ModeSupport[i][vba->maxMpcComb]) { |
1541 | *vlevel = i; |
1542 | } else { |
1543 | break; |
1544 | } |
1545 | } |
1546 | } |
1547 | } |
1548 | } |
1549 | |
1550 | if (vba->DRAMSpeedPerState[*vlevel] >= vba->DRAMSpeedPerState[vlevel_temp]) |
1551 | found_supported_config = false; |
1552 | |
1553 | // If SubVP pipe config is unsupported (or cannot be used for UCLK switching) |
1554 | // remove phantom pipes and repopulate dml pipes |
1555 | if (!found_supported_config) { |
1556 | dc_state_remove_phantom_streams_and_planes(dc, state: context); |
1557 | dc_state_release_phantom_streams_and_planes(dc, state: context); |
1558 | vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported; |
1559 | *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false); |
1560 | |
1561 | *vlevel = dml_get_voltage_level(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: *pipe_cnt); |
1562 | /* This may adjust vlevel and maxMpcComb */ |
1563 | if (*vlevel < context->bw_ctx.dml.soc.num_states) { |
1564 | *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel: *vlevel, split, merge); |
1565 | vba->VoltageLevel = *vlevel; |
1566 | } |
1567 | } else { |
1568 | // Most populate phantom DLG params before programming hardware / timing for phantom pipe |
1569 | dcn32_helper_populate_phantom_dlg_params(dc, context, pipes, pipe_cnt: *pipe_cnt); |
1570 | |
1571 | /* Call validate_apply_pipe_split flags after calling DML getters for |
1572 | * phantom dlg params, or some of the VBA params indicating pipe split |
1573 | * can be overwritten by the getters. |
1574 | * |
1575 | * When setting up SubVP config, all pipes are merged before attempting to |
1576 | * add phantom pipes. If pipe split (ODM / MPC) is required, both the main |
1577 | * and phantom pipes will be split in the regular pipe splitting sequence. |
1578 | */ |
1579 | memset(split, 0, MAX_PIPES * sizeof(int)); |
1580 | memset(merge, 0, MAX_PIPES * sizeof(bool)); |
1581 | *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel: *vlevel, split, merge); |
1582 | vba->VoltageLevel = *vlevel; |
1583 | // Note: We can't apply the phantom pipes to hardware at this time. We have to wait |
1584 | // until driver has acquired the DMCUB lock to do it safely. |
1585 | assign_subvp_index(dc, context); |
1586 | } |
1587 | } |
1588 | |
1589 | if (should_apply_odm_power_optimization(dc, context, v: vba, split, merge)) |
1590 | try_odm_power_optimization_and_revalidate( |
1591 | dc, context, pipes, split, merge, vlevel, pipe_cnt: *pipe_cnt); |
1592 | |
1593 | } |
1594 | |
1595 | static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) |
1596 | { |
1597 | int i; |
1598 | |
1599 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
1600 | if (!context->res_ctx.pipe_ctx[i].stream) |
1601 | continue; |
1602 | if (dc->link_srv->dp_is_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) |
1603 | return true; |
1604 | } |
1605 | return false; |
1606 | } |
1607 | |
1608 | static void dcn20_adjust_freesync_v_startup(const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start) |
1609 | { |
1610 | struct dc_crtc_timing patched_crtc_timing; |
1611 | uint32_t asic_blank_end = 0; |
1612 | uint32_t asic_blank_start = 0; |
1613 | uint32_t newVstartup = 0; |
1614 | |
1615 | patched_crtc_timing = *dc_crtc_timing; |
1616 | |
1617 | if (patched_crtc_timing.flags.INTERLACE == 1) { |
1618 | if (patched_crtc_timing.v_front_porch < 2) |
1619 | patched_crtc_timing.v_front_porch = 2; |
1620 | } else { |
1621 | if (patched_crtc_timing.v_front_porch < 1) |
1622 | patched_crtc_timing.v_front_porch = 1; |
1623 | } |
1624 | |
1625 | /* blank_start = frame end - front porch */ |
1626 | asic_blank_start = patched_crtc_timing.v_total - |
1627 | patched_crtc_timing.v_front_porch; |
1628 | |
1629 | /* blank_end = blank_start - active */ |
1630 | asic_blank_end = asic_blank_start - |
1631 | patched_crtc_timing.v_border_bottom - |
1632 | patched_crtc_timing.v_addressable - |
1633 | patched_crtc_timing.v_border_top; |
1634 | |
1635 | newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start); |
1636 | |
1637 | *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start); |
1638 | } |
1639 | |
1640 | static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, |
1641 | display_e2e_pipe_params_st *pipes, |
1642 | int pipe_cnt, int vlevel) |
1643 | { |
1644 | int i, pipe_idx, active_hubp_count = 0; |
1645 | bool usr_retraining_support = false; |
1646 | bool unbounded_req_enabled = false; |
1647 | struct vba_vars_st *vba = &context->bw_ctx.dml.vba; |
1648 | |
1649 | dc_assert_fp_enabled(); |
1650 | |
1651 | /* Writeback MCIF_WB arbitration parameters */ |
1652 | dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt); |
1653 | |
1654 | context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000; |
1655 | context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000; |
1656 | context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000; |
1657 | context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16; |
1658 | context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000; |
1659 | context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000; |
1660 | context->bw_ctx.bw.dcn.clk.p_state_change_support = |
1661 | context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] |
1662 | != dm_dram_clock_change_unsupported; |
1663 | |
1664 | /* Pstate change might not be supported by hardware, but it might be |
1665 | * possible with firmware driven vertical blank stretching. |
1666 | */ |
1667 | context->bw_ctx.bw.dcn.clk.p_state_change_support |= context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching; |
1668 | |
1669 | context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; |
1670 | context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context); |
1671 | context->bw_ctx.bw.dcn.clk.ref_dtbclk_khz = context->bw_ctx.dml.vba.DTBCLKPerState[vlevel] * 1000; |
1672 | if (context->bw_ctx.dml.vba.FCLKChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_fclock_change_unsupported) |
1673 | context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = false; |
1674 | else |
1675 | context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = true; |
1676 | |
1677 | usr_retraining_support = context->bw_ctx.dml.vba.USRRetrainingSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; |
1678 | ASSERT(usr_retraining_support); |
1679 | |
1680 | if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz) |
1681 | context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz; |
1682 | |
1683 | unbounded_req_enabled = get_unbounded_request_enabled(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt); |
1684 | |
1685 | if (unbounded_req_enabled && pipe_cnt > 1) { |
1686 | // Unbounded requesting should not ever be used when more than 1 pipe is enabled. |
1687 | ASSERT(false); |
1688 | unbounded_req_enabled = false; |
1689 | } |
1690 | |
1691 | context->bw_ctx.bw.dcn.mall_ss_size_bytes = 0; |
1692 | context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes = 0; |
1693 | context->bw_ctx.bw.dcn.mall_subvp_size_bytes = 0; |
1694 | |
1695 | for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { |
1696 | if (!context->res_ctx.pipe_ctx[i].stream) |
1697 | continue; |
1698 | if (context->res_ctx.pipe_ctx[i].plane_state) |
1699 | active_hubp_count++; |
1700 | pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt, |
1701 | which_pipe: pipe_idx); |
1702 | pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt, |
1703 | which_pipe: pipe_idx); |
1704 | pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt, |
1705 | which_pipe: pipe_idx); |
1706 | pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt, |
1707 | which_pipe: pipe_idx); |
1708 | |
1709 | if (dc_state_get_pipe_subvp_type(state: context, pipe_ctx: &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) { |
1710 | // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests |
1711 | context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0; |
1712 | context->res_ctx.pipe_ctx[i].unbounded_req = false; |
1713 | } else { |
1714 | context->res_ctx.pipe_ctx[i].det_buffer_size_kb = get_det_buffer_size_kbytes(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt, |
1715 | which_pipe: pipe_idx); |
1716 | context->res_ctx.pipe_ctx[i].unbounded_req = unbounded_req_enabled; |
1717 | } |
1718 | |
1719 | if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) |
1720 | context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; |
1721 | if (context->res_ctx.pipe_ctx[i].plane_state) |
1722 | context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; |
1723 | else |
1724 | context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0; |
1725 | context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest; |
1726 | |
1727 | context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes = get_surface_size_in_mall(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt, which_pipe: pipe_idx); |
1728 | |
1729 | if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0) |
1730 | context->res_ctx.pipe_ctx[i].has_vactive_margin = true; |
1731 | else |
1732 | context->res_ctx.pipe_ctx[i].has_vactive_margin = false; |
1733 | |
1734 | /* MALL Allocation Sizes */ |
1735 | /* count from active, top pipes per plane only */ |
1736 | if (context->res_ctx.pipe_ctx[i].stream && context->res_ctx.pipe_ctx[i].plane_state && |
1737 | (context->res_ctx.pipe_ctx[i].top_pipe == NULL || |
1738 | context->res_ctx.pipe_ctx[i].plane_state != context->res_ctx.pipe_ctx[i].top_pipe->plane_state) && |
1739 | context->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) { |
1740 | /* SS: all active surfaces stored in MALL */ |
1741 | if (dc_state_get_pipe_subvp_type(state: context, pipe_ctx: &context->res_ctx.pipe_ctx[i]) != SUBVP_PHANTOM) { |
1742 | context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes; |
1743 | |
1744 | if (context->res_ctx.pipe_ctx[i].stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) { |
1745 | /* SS PSR On: all active surfaces part of streams not supporting PSR stored in MALL */ |
1746 | context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes; |
1747 | } |
1748 | } else { |
1749 | /* SUBVP: phantom surfaces only stored in MALL */ |
1750 | context->bw_ctx.bw.dcn.mall_subvp_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes; |
1751 | } |
1752 | } |
1753 | |
1754 | if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid) |
1755 | dcn20_adjust_freesync_v_startup( |
1756 | dc_crtc_timing: &context->res_ctx.pipe_ctx[i].stream->timing, |
1757 | vstartup_start: &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start); |
1758 | |
1759 | pipe_idx++; |
1760 | } |
1761 | /* If DCN isn't making memory requests we can allow pstate change and lower clocks */ |
1762 | if (!active_hubp_count) { |
1763 | context->bw_ctx.bw.dcn.clk.socclk_khz = 0; |
1764 | context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; |
1765 | context->bw_ctx.bw.dcn.clk.dcfclk_khz = 0; |
1766 | context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = 0; |
1767 | context->bw_ctx.bw.dcn.clk.dramclk_khz = 0; |
1768 | context->bw_ctx.bw.dcn.clk.fclk_khz = 0; |
1769 | context->bw_ctx.bw.dcn.clk.p_state_change_support = true; |
1770 | context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = true; |
1771 | } |
1772 | /*save a original dppclock copy*/ |
1773 | context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz; |
1774 | context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz; |
1775 | context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz |
1776 | * 1000; |
1777 | context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz |
1778 | * 1000; |
1779 | |
1780 | context->bw_ctx.bw.dcn.clk.num_ways = dcn32_helper_calculate_num_ways_for_subvp(dc, context); |
1781 | |
1782 | context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes; |
1783 | |
1784 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
1785 | if (context->res_ctx.pipe_ctx[i].stream) |
1786 | context->bw_ctx.bw.dcn.compbuf_size_kb -= context->res_ctx.pipe_ctx[i].det_buffer_size_kb; |
1787 | } |
1788 | |
1789 | for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { |
1790 | |
1791 | if (!context->res_ctx.pipe_ctx[i].stream) |
1792 | continue; |
1793 | |
1794 | context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg_v2(&context->bw_ctx.dml, |
1795 | &context->res_ctx.pipe_ctx[i].dlg_regs, &context->res_ctx.pipe_ctx[i].ttu_regs, pipes, |
1796 | pipe_cnt, pipe_idx); |
1797 | |
1798 | context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg_v2(&context->res_ctx.pipe_ctx[i].rq_regs, |
1799 | &context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); |
1800 | pipe_idx++; |
1801 | } |
1802 | } |
1803 | |
1804 | static struct pipe_ctx *dcn32_find_split_pipe( |
1805 | struct dc *dc, |
1806 | struct dc_state *context, |
1807 | int old_index) |
1808 | { |
1809 | struct pipe_ctx *pipe = NULL; |
1810 | int i; |
1811 | |
1812 | if (old_index >= 0 && context->res_ctx.pipe_ctx[old_index].stream == NULL) { |
1813 | pipe = &context->res_ctx.pipe_ctx[old_index]; |
1814 | pipe->pipe_idx = old_index; |
1815 | } |
1816 | |
1817 | if (!pipe) |
1818 | for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { |
1819 | if (dc->current_state->res_ctx.pipe_ctx[i].top_pipe == NULL |
1820 | && dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) { |
1821 | if (context->res_ctx.pipe_ctx[i].stream == NULL) { |
1822 | pipe = &context->res_ctx.pipe_ctx[i]; |
1823 | pipe->pipe_idx = i; |
1824 | break; |
1825 | } |
1826 | } |
1827 | } |
1828 | |
1829 | /* |
1830 | * May need to fix pipes getting tossed from 1 opp to another on flip |
1831 | * Add for debugging transient underflow during topology updates: |
1832 | * ASSERT(pipe); |
1833 | */ |
1834 | if (!pipe) |
1835 | for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { |
1836 | if (context->res_ctx.pipe_ctx[i].stream == NULL) { |
1837 | pipe = &context->res_ctx.pipe_ctx[i]; |
1838 | pipe->pipe_idx = i; |
1839 | break; |
1840 | } |
1841 | } |
1842 | |
1843 | return pipe; |
1844 | } |
1845 | |
1846 | static bool dcn32_split_stream_for_mpc_or_odm( |
1847 | const struct dc *dc, |
1848 | struct resource_context *res_ctx, |
1849 | struct pipe_ctx *pri_pipe, |
1850 | struct pipe_ctx *sec_pipe, |
1851 | bool odm) |
1852 | { |
1853 | int pipe_idx = sec_pipe->pipe_idx; |
1854 | const struct resource_pool *pool = dc->res_pool; |
1855 | |
1856 | DC_LOGGER_INIT(dc->ctx->logger); |
1857 | |
1858 | if (odm && pri_pipe->plane_state) { |
1859 | /* ODM + window MPO, where MPO window is on left half only */ |
1860 | if (pri_pipe->plane_state->clip_rect.x + pri_pipe->plane_state->clip_rect.width <= |
1861 | pri_pipe->stream->src.x + pri_pipe->stream->src.width/2) { |
1862 | |
1863 | DC_LOG_SCALER("%s - ODM + window MPO(left). pri_pipe:%d\n" , |
1864 | __func__, |
1865 | pri_pipe->pipe_idx); |
1866 | return true; |
1867 | } |
1868 | |
1869 | /* ODM + window MPO, where MPO window is on right half only */ |
1870 | if (pri_pipe->plane_state->clip_rect.x >= pri_pipe->stream->src.x + pri_pipe->stream->src.width/2) { |
1871 | |
1872 | DC_LOG_SCALER("%s - ODM + window MPO(right). pri_pipe:%d\n" , |
1873 | __func__, |
1874 | pri_pipe->pipe_idx); |
1875 | return true; |
1876 | } |
1877 | } |
1878 | |
1879 | *sec_pipe = *pri_pipe; |
1880 | |
1881 | sec_pipe->pipe_idx = pipe_idx; |
1882 | sec_pipe->plane_res.mi = pool->mis[pipe_idx]; |
1883 | sec_pipe->plane_res.hubp = pool->hubps[pipe_idx]; |
1884 | sec_pipe->plane_res.ipp = pool->ipps[pipe_idx]; |
1885 | sec_pipe->plane_res.xfm = pool->transforms[pipe_idx]; |
1886 | sec_pipe->plane_res.dpp = pool->dpps[pipe_idx]; |
1887 | sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst; |
1888 | sec_pipe->stream_res.dsc = NULL; |
1889 | if (odm) { |
1890 | if (pri_pipe->next_odm_pipe) { |
1891 | ASSERT(pri_pipe->next_odm_pipe != sec_pipe); |
1892 | sec_pipe->next_odm_pipe = pri_pipe->next_odm_pipe; |
1893 | sec_pipe->next_odm_pipe->prev_odm_pipe = sec_pipe; |
1894 | } |
1895 | if (pri_pipe->top_pipe && pri_pipe->top_pipe->next_odm_pipe) { |
1896 | pri_pipe->top_pipe->next_odm_pipe->bottom_pipe = sec_pipe; |
1897 | sec_pipe->top_pipe = pri_pipe->top_pipe->next_odm_pipe; |
1898 | } |
1899 | if (pri_pipe->bottom_pipe && pri_pipe->bottom_pipe->next_odm_pipe) { |
1900 | pri_pipe->bottom_pipe->next_odm_pipe->top_pipe = sec_pipe; |
1901 | sec_pipe->bottom_pipe = pri_pipe->bottom_pipe->next_odm_pipe; |
1902 | } |
1903 | pri_pipe->next_odm_pipe = sec_pipe; |
1904 | sec_pipe->prev_odm_pipe = pri_pipe; |
1905 | ASSERT(sec_pipe->top_pipe == NULL); |
1906 | |
1907 | if (!sec_pipe->top_pipe) |
1908 | sec_pipe->stream_res.opp = pool->opps[pipe_idx]; |
1909 | else |
1910 | sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp; |
1911 | if (sec_pipe->stream->timing.flags.DSC == 1) { |
1912 | dcn20_acquire_dsc(dc, res_ctx, dsc: &sec_pipe->stream_res.dsc, pipe_idx); |
1913 | ASSERT(sec_pipe->stream_res.dsc); |
1914 | if (sec_pipe->stream_res.dsc == NULL) |
1915 | return false; |
1916 | } |
1917 | } else { |
1918 | if (pri_pipe->bottom_pipe) { |
1919 | ASSERT(pri_pipe->bottom_pipe != sec_pipe); |
1920 | sec_pipe->bottom_pipe = pri_pipe->bottom_pipe; |
1921 | sec_pipe->bottom_pipe->top_pipe = sec_pipe; |
1922 | } |
1923 | pri_pipe->bottom_pipe = sec_pipe; |
1924 | sec_pipe->top_pipe = pri_pipe; |
1925 | |
1926 | ASSERT(pri_pipe->plane_state); |
1927 | } |
1928 | |
1929 | return true; |
1930 | } |
1931 | |
1932 | bool dcn32_internal_validate_bw(struct dc *dc, |
1933 | struct dc_state *context, |
1934 | display_e2e_pipe_params_st *pipes, |
1935 | int *pipe_cnt_out, |
1936 | int *vlevel_out, |
1937 | bool fast_validate) |
1938 | { |
1939 | bool out = false; |
1940 | bool repopulate_pipes = false; |
1941 | int split[MAX_PIPES] = { 0 }; |
1942 | bool merge[MAX_PIPES] = { false }; |
1943 | bool newly_split[MAX_PIPES] = { false }; |
1944 | int pipe_cnt, i, pipe_idx; |
1945 | int vlevel = context->bw_ctx.dml.soc.num_states; |
1946 | struct vba_vars_st *vba = &context->bw_ctx.dml.vba; |
1947 | |
1948 | dc_assert_fp_enabled(); |
1949 | |
1950 | ASSERT(pipes); |
1951 | if (!pipes) |
1952 | return false; |
1953 | |
1954 | // For each full update, remove all existing phantom pipes first |
1955 | dc_state_remove_phantom_streams_and_planes(dc, state: context); |
1956 | dc_state_release_phantom_streams_and_planes(dc, state: context); |
1957 | |
1958 | dc->res_pool->funcs->update_soc_for_wm_a(dc, context); |
1959 | |
1960 | pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); |
1961 | |
1962 | if (!pipe_cnt) { |
1963 | out = true; |
1964 | goto validate_out; |
1965 | } |
1966 | |
1967 | dml_log_pipe_params(mode_lib: &context->bw_ctx.dml, pipes, pipe_cnt); |
1968 | context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context); |
1969 | |
1970 | if (!fast_validate) |
1971 | dcn32_full_validate_bw_helper(dc, context, pipes, vlevel: &vlevel, split, merge, pipe_cnt: &pipe_cnt); |
1972 | |
1973 | if (fast_validate || |
1974 | (dc->debug.dml_disallow_alternate_prefetch_modes && |
1975 | (vlevel == context->bw_ctx.dml.soc.num_states || |
1976 | vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) { |
1977 | /* |
1978 | * If dml_disallow_alternate_prefetch_modes is false, then we have already |
1979 | * tried alternate prefetch modes during full validation. |
1980 | * |
1981 | * If mode is unsupported or there is no p-state support, then |
1982 | * fall back to favouring voltage. |
1983 | * |
1984 | * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try |
1985 | * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2) |
1986 | */ |
1987 | context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = |
1988 | dm_prefetch_support_none; |
1989 | |
1990 | context->bw_ctx.dml.validate_max_state = fast_validate; |
1991 | vlevel = dml_get_voltage_level(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt); |
1992 | |
1993 | context->bw_ctx.dml.validate_max_state = false; |
1994 | |
1995 | if (vlevel < context->bw_ctx.dml.soc.num_states) { |
1996 | memset(split, 0, sizeof(split)); |
1997 | memset(merge, 0, sizeof(merge)); |
1998 | vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); |
1999 | // dcn20_validate_apply_pipe_split_flags can modify voltage level outside of DML |
2000 | vba->VoltageLevel = vlevel; |
2001 | } |
2002 | } |
2003 | |
2004 | dml_log_mode_support_params(mode_lib: &context->bw_ctx.dml); |
2005 | |
2006 | if (vlevel == context->bw_ctx.dml.soc.num_states) |
2007 | goto validate_fail; |
2008 | |
2009 | for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { |
2010 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
2011 | struct pipe_ctx *mpo_pipe = pipe->bottom_pipe; |
2012 | |
2013 | if (!pipe->stream) |
2014 | continue; |
2015 | |
2016 | if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled |
2017 | && !dc->config.enable_windowed_mpo_odm |
2018 | && pipe->plane_state && mpo_pipe |
2019 | && memcmp(p: &mpo_pipe->plane_state->clip_rect, |
2020 | q: &pipe->stream->src, |
2021 | size: sizeof(struct rect)) != 0) { |
2022 | ASSERT(mpo_pipe->plane_state != pipe->plane_state); |
2023 | goto validate_fail; |
2024 | } |
2025 | pipe_idx++; |
2026 | } |
2027 | |
2028 | if (dc->config.enable_windowed_mpo_odm) { |
2029 | repopulate_pipes = update_pipes_with_split_flags( |
2030 | dc, context, vba, split, merge); |
2031 | } else { |
2032 | /* the code below will be removed once windowed mpo odm is fully |
2033 | * enabled. |
2034 | */ |
2035 | /* merge pipes if necessary */ |
2036 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
2037 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
2038 | |
2039 | /*skip pipes that don't need merging*/ |
2040 | if (!merge[i]) |
2041 | continue; |
2042 | |
2043 | /* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */ |
2044 | if (pipe->prev_odm_pipe) { |
2045 | /*split off odm pipe*/ |
2046 | pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe; |
2047 | if (pipe->next_odm_pipe) |
2048 | pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe; |
2049 | |
2050 | /*2:1ODM+MPC Split MPO to Single Pipe + MPC Split MPO*/ |
2051 | if (pipe->bottom_pipe) { |
2052 | if (pipe->bottom_pipe->prev_odm_pipe || pipe->bottom_pipe->next_odm_pipe) { |
2053 | /*MPC split rules will handle this case*/ |
2054 | pipe->bottom_pipe->top_pipe = NULL; |
2055 | } else { |
2056 | /* when merging an ODM pipes, the bottom MPC pipe must now point to |
2057 | * the previous ODM pipe and its associated stream assets |
2058 | */ |
2059 | if (pipe->prev_odm_pipe->bottom_pipe) { |
2060 | /* 3 plane MPO*/ |
2061 | pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe->bottom_pipe; |
2062 | pipe->prev_odm_pipe->bottom_pipe->bottom_pipe = pipe->bottom_pipe; |
2063 | } else { |
2064 | /* 2 plane MPO*/ |
2065 | pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe; |
2066 | pipe->prev_odm_pipe->bottom_pipe = pipe->bottom_pipe; |
2067 | } |
2068 | |
2069 | memcpy(&pipe->bottom_pipe->stream_res, &pipe->bottom_pipe->top_pipe->stream_res, sizeof(struct stream_resource)); |
2070 | } |
2071 | } |
2072 | |
2073 | if (pipe->top_pipe) { |
2074 | pipe->top_pipe->bottom_pipe = NULL; |
2075 | } |
2076 | |
2077 | pipe->bottom_pipe = NULL; |
2078 | pipe->next_odm_pipe = NULL; |
2079 | pipe->plane_state = NULL; |
2080 | pipe->stream = NULL; |
2081 | pipe->top_pipe = NULL; |
2082 | pipe->prev_odm_pipe = NULL; |
2083 | if (pipe->stream_res.dsc) |
2084 | dcn20_release_dsc(res_ctx: &context->res_ctx, pool: dc->res_pool, dsc: &pipe->stream_res.dsc); |
2085 | memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); |
2086 | memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); |
2087 | memset(&pipe->link_res, 0, sizeof(pipe->link_res)); |
2088 | repopulate_pipes = true; |
2089 | } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { |
2090 | struct pipe_ctx *top_pipe = pipe->top_pipe; |
2091 | struct pipe_ctx *bottom_pipe = pipe->bottom_pipe; |
2092 | |
2093 | top_pipe->bottom_pipe = bottom_pipe; |
2094 | if (bottom_pipe) |
2095 | bottom_pipe->top_pipe = top_pipe; |
2096 | |
2097 | pipe->top_pipe = NULL; |
2098 | pipe->bottom_pipe = NULL; |
2099 | pipe->plane_state = NULL; |
2100 | pipe->stream = NULL; |
2101 | memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); |
2102 | memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); |
2103 | memset(&pipe->link_res, 0, sizeof(pipe->link_res)); |
2104 | repopulate_pipes = true; |
2105 | } else |
2106 | ASSERT(0); /* Should never try to merge master pipe */ |
2107 | |
2108 | } |
2109 | |
2110 | for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) { |
2111 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
2112 | struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
2113 | struct pipe_ctx *hsplit_pipe = NULL; |
2114 | bool odm; |
2115 | int old_index = -1; |
2116 | |
2117 | if (!pipe->stream || newly_split[i]) |
2118 | continue; |
2119 | |
2120 | pipe_idx++; |
2121 | odm = vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled; |
2122 | |
2123 | if (!pipe->plane_state && !odm) |
2124 | continue; |
2125 | |
2126 | if (split[i]) { |
2127 | if (odm) { |
2128 | if (split[i] == 4 && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe) |
2129 | old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx; |
2130 | else if (old_pipe->next_odm_pipe) |
2131 | old_index = old_pipe->next_odm_pipe->pipe_idx; |
2132 | } else { |
2133 | if (split[i] == 4 && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe && |
2134 | old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state) |
2135 | old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx; |
2136 | else if (old_pipe->bottom_pipe && |
2137 | old_pipe->bottom_pipe->plane_state == old_pipe->plane_state) |
2138 | old_index = old_pipe->bottom_pipe->pipe_idx; |
2139 | } |
2140 | hsplit_pipe = dcn32_find_split_pipe(dc, context, old_index); |
2141 | ASSERT(hsplit_pipe); |
2142 | if (!hsplit_pipe) |
2143 | goto validate_fail; |
2144 | |
2145 | if (!dcn32_split_stream_for_mpc_or_odm( |
2146 | dc, res_ctx: &context->res_ctx, |
2147 | pri_pipe: pipe, sec_pipe: hsplit_pipe, odm)) |
2148 | goto validate_fail; |
2149 | |
2150 | newly_split[hsplit_pipe->pipe_idx] = true; |
2151 | repopulate_pipes = true; |
2152 | } |
2153 | if (split[i] == 4) { |
2154 | struct pipe_ctx *pipe_4to1; |
2155 | |
2156 | if (odm && old_pipe->next_odm_pipe) |
2157 | old_index = old_pipe->next_odm_pipe->pipe_idx; |
2158 | else if (!odm && old_pipe->bottom_pipe && |
2159 | old_pipe->bottom_pipe->plane_state == old_pipe->plane_state) |
2160 | old_index = old_pipe->bottom_pipe->pipe_idx; |
2161 | else |
2162 | old_index = -1; |
2163 | pipe_4to1 = dcn32_find_split_pipe(dc, context, old_index); |
2164 | ASSERT(pipe_4to1); |
2165 | if (!pipe_4to1) |
2166 | goto validate_fail; |
2167 | if (!dcn32_split_stream_for_mpc_or_odm( |
2168 | dc, res_ctx: &context->res_ctx, |
2169 | pri_pipe: pipe, sec_pipe: pipe_4to1, odm)) |
2170 | goto validate_fail; |
2171 | newly_split[pipe_4to1->pipe_idx] = true; |
2172 | |
2173 | if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe |
2174 | && old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe) |
2175 | old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx; |
2176 | else if (!odm && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe && |
2177 | old_pipe->bottom_pipe->bottom_pipe->bottom_pipe && |
2178 | old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state) |
2179 | old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx; |
2180 | else |
2181 | old_index = -1; |
2182 | pipe_4to1 = dcn32_find_split_pipe(dc, context, old_index); |
2183 | ASSERT(pipe_4to1); |
2184 | if (!pipe_4to1) |
2185 | goto validate_fail; |
2186 | if (!dcn32_split_stream_for_mpc_or_odm( |
2187 | dc, res_ctx: &context->res_ctx, |
2188 | pri_pipe: hsplit_pipe, sec_pipe: pipe_4to1, odm)) |
2189 | goto validate_fail; |
2190 | newly_split[pipe_4to1->pipe_idx] = true; |
2191 | } |
2192 | if (odm) |
2193 | dcn20_build_mapped_resource(dc, context, stream: pipe->stream); |
2194 | } |
2195 | |
2196 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
2197 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
2198 | |
2199 | if (pipe->plane_state) { |
2200 | if (!resource_build_scaling_params(pipe_ctx: pipe)) |
2201 | goto validate_fail; |
2202 | } |
2203 | } |
2204 | } |
2205 | |
2206 | /* Actual dsc count per stream dsc validation*/ |
2207 | if (!dcn20_validate_dsc(dc, new_ctx: context)) { |
2208 | vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE; |
2209 | goto validate_fail; |
2210 | } |
2211 | |
2212 | if (repopulate_pipes) { |
2213 | int flag_max_mpc_comb = vba->maxMpcComb; |
2214 | int flag_vlevel = vlevel; |
2215 | int i; |
2216 | |
2217 | pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); |
2218 | if (!dc->config.enable_windowed_mpo_odm) |
2219 | dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes); |
2220 | |
2221 | /* repopulate_pipes = 1 means the pipes were either split or merged. In this case |
2222 | * we have to re-calculate the DET allocation and run through DML once more to |
2223 | * ensure all the params are calculated correctly. We do not need to run the |
2224 | * pipe split check again after this call (pipes are already split / merged). |
2225 | * */ |
2226 | context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = |
2227 | dm_prefetch_support_uclk_fclk_and_stutter_if_possible; |
2228 | |
2229 | vlevel = dml_get_voltage_level(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt); |
2230 | |
2231 | if (vlevel == context->bw_ctx.dml.soc.num_states) { |
2232 | /* failed after DET size changes */ |
2233 | goto validate_fail; |
2234 | } else if (flag_max_mpc_comb == 0 && |
2235 | flag_max_mpc_comb != context->bw_ctx.dml.vba.maxMpcComb) { |
2236 | /* check the context constructed with pipe split flags is still valid*/ |
2237 | bool flags_valid = false; |
2238 | for (i = flag_vlevel; i < context->bw_ctx.dml.soc.num_states; i++) { |
2239 | if (vba->ModeSupport[i][flag_max_mpc_comb]) { |
2240 | vba->maxMpcComb = flag_max_mpc_comb; |
2241 | vba->VoltageLevel = i; |
2242 | vlevel = i; |
2243 | flags_valid = true; |
2244 | break; |
2245 | } |
2246 | } |
2247 | |
2248 | /* this should never happen */ |
2249 | if (!flags_valid) |
2250 | goto validate_fail; |
2251 | } |
2252 | } |
2253 | *vlevel_out = vlevel; |
2254 | *pipe_cnt_out = pipe_cnt; |
2255 | |
2256 | out = true; |
2257 | goto validate_out; |
2258 | |
2259 | validate_fail: |
2260 | out = false; |
2261 | |
2262 | validate_out: |
2263 | return out; |
2264 | } |
2265 | |
2266 | |
2267 | void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, |
2268 | display_e2e_pipe_params_st *pipes, |
2269 | int pipe_cnt, |
2270 | int vlevel) |
2271 | { |
2272 | int i, pipe_idx, vlevel_temp = 0; |
2273 | double dcfclk = dcn3_2_soc.clock_limits[0].dcfclk_mhz; |
2274 | double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; |
2275 | double dram_speed_from_validation = context->bw_ctx.dml.vba.DRAMSpeed; |
2276 | double dcfclk_from_fw_based_mclk_switching = dcfclk_from_validation; |
2277 | bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != |
2278 | dm_dram_clock_change_unsupported; |
2279 | unsigned int dummy_latency_index = 0; |
2280 | int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; |
2281 | unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed; |
2282 | bool subvp_in_use = dcn32_subvp_in_use(dc, context); |
2283 | unsigned int min_dram_speed_mts_margin; |
2284 | bool need_fclk_lat_as_dummy = false; |
2285 | bool is_subvp_p_drr = false; |
2286 | struct dc_stream_state *fpo_candidate_stream = NULL; |
2287 | |
2288 | dc_assert_fp_enabled(); |
2289 | |
2290 | /* need to find dummy latency index for subvp */ |
2291 | if (subvp_in_use) { |
2292 | /* Override DRAMClockChangeSupport for SubVP + DRR case where the DRR cannot switch without stretching it's VBLANK */ |
2293 | if (!pstate_en) { |
2294 | context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp; |
2295 | context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = dm_prefetch_support_fclk_and_stutter; |
2296 | pstate_en = true; |
2297 | is_subvp_p_drr = true; |
2298 | } |
2299 | dummy_latency_index = dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(dc, |
2300 | context, pipes, pipe_cnt, vlevel); |
2301 | |
2302 | /* For DCN32/321 need to validate with fclk pstate change latency equal to dummy so prefetch is |
2303 | * scheduled correctly to account for dummy pstate. |
2304 | */ |
2305 | if (context->bw_ctx.dml.soc.fclk_change_latency_us < dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us) { |
2306 | need_fclk_lat_as_dummy = true; |
2307 | context->bw_ctx.dml.soc.fclk_change_latency_us = |
2308 | dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; |
2309 | } |
2310 | context->bw_ctx.dml.soc.dram_clock_change_latency_us = |
2311 | dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; |
2312 | dcn32_internal_validate_bw(dc, context, pipes, pipe_cnt_out: &pipe_cnt, vlevel_out: &vlevel, fast_validate: false); |
2313 | maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; |
2314 | if (is_subvp_p_drr) { |
2315 | context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp; |
2316 | } |
2317 | } |
2318 | |
2319 | context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false; |
2320 | for (i = 0; i < context->stream_count; i++) { |
2321 | if (context->streams[i]) |
2322 | context->streams[i]->fpo_in_use = false; |
2323 | } |
2324 | |
2325 | if (!pstate_en || (!dc->debug.disable_fpo_optimizations && |
2326 | pstate_en && vlevel != 0)) { |
2327 | /* only when the mclk switch can not be natural, is the fw based vblank stretch attempted */ |
2328 | fpo_candidate_stream = dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(dc, context); |
2329 | if (fpo_candidate_stream) { |
2330 | fpo_candidate_stream->fpo_in_use = true; |
2331 | context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = true; |
2332 | } |
2333 | |
2334 | if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { |
2335 | dummy_latency_index = dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(dc, |
2336 | context, pipes, pipe_cnt, vlevel); |
2337 | |
2338 | /* After calling dcn30_find_dummy_latency_index_for_fw_based_mclk_switch |
2339 | * we reinstate the original dram_clock_change_latency_us on the context |
2340 | * and all variables that may have changed up to this point, except the |
2341 | * newly found dummy_latency_index |
2342 | */ |
2343 | context->bw_ctx.dml.soc.dram_clock_change_latency_us = |
2344 | dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; |
2345 | /* For DCN32/321 need to validate with fclk pstate change latency equal to dummy so |
2346 | * prefetch is scheduled correctly to account for dummy pstate. |
2347 | */ |
2348 | if (context->bw_ctx.dml.soc.fclk_change_latency_us < dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us) { |
2349 | need_fclk_lat_as_dummy = true; |
2350 | context->bw_ctx.dml.soc.fclk_change_latency_us = |
2351 | dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; |
2352 | } |
2353 | dcn32_internal_validate_bw(dc, context, pipes, pipe_cnt_out: &pipe_cnt, vlevel_out: &vlevel_temp, fast_validate: false); |
2354 | if (vlevel_temp < vlevel) { |
2355 | vlevel = vlevel_temp; |
2356 | maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; |
2357 | dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; |
2358 | pstate_en = true; |
2359 | context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank; |
2360 | } else { |
2361 | /* Restore FCLK latency and re-run validation to go back to original validation |
2362 | * output if we find that enabling FPO does not give us any benefit (i.e. lower |
2363 | * voltage level) |
2364 | */ |
2365 | context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false; |
2366 | for (i = 0; i < context->stream_count; i++) { |
2367 | if (context->streams[i]) |
2368 | context->streams[i]->fpo_in_use = false; |
2369 | } |
2370 | context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us; |
2371 | dcn32_internal_validate_bw(dc, context, pipes, pipe_cnt_out: &pipe_cnt, vlevel_out: &vlevel, fast_validate: false); |
2372 | } |
2373 | } |
2374 | } |
2375 | |
2376 | /* Set B: |
2377 | * For Set B calculations use clocks from clock_limits[2] when available i.e. when SMU is present, |
2378 | * otherwise use arbitrary low value from spreadsheet for DCFCLK as lower is safer for watermark |
2379 | * calculations to cover bootup clocks. |
2380 | * DCFCLK: soc.clock_limits[2] when available |
2381 | * UCLK: soc.clock_limits[2] when available |
2382 | */ |
2383 | if (dcn3_2_soc.num_states > 2) { |
2384 | vlevel_temp = 2; |
2385 | dcfclk = dcn3_2_soc.clock_limits[2].dcfclk_mhz; |
2386 | } else |
2387 | dcfclk = 615; //DCFCLK Vmin_lv |
2388 | |
2389 | pipes[0].clks_cfg.voltage = vlevel_temp; |
2390 | pipes[0].clks_cfg.dcfclk_mhz = dcfclk; |
2391 | pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz; |
2392 | |
2393 | if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) { |
2394 | context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us; |
2395 | context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.fclk_change_latency_us; |
2396 | context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us; |
2397 | context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us; |
2398 | } |
2399 | context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2400 | context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2401 | context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2402 | context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2403 | context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2404 | context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2405 | context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2406 | context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2407 | context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2408 | context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = get_usr_retraining_watermark(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2409 | |
2410 | /* Set D: |
2411 | * All clocks min. |
2412 | * DCFCLK: Min, as reported by PM FW when available |
2413 | * UCLK : Min, as reported by PM FW when available |
2414 | * sr_enter_exit/sr_exit should be lower than used for DRAM (TBD after bringup or later, use as decided in Clk Mgr) |
2415 | */ |
2416 | |
2417 | /* |
2418 | if (dcn3_2_soc.num_states > 2) { |
2419 | vlevel_temp = 0; |
2420 | dcfclk = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; |
2421 | } else |
2422 | dcfclk = 615; //DCFCLK Vmin_lv |
2423 | |
2424 | pipes[0].clks_cfg.voltage = vlevel_temp; |
2425 | pipes[0].clks_cfg.dcfclk_mhz = dcfclk; |
2426 | pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz; |
2427 | |
2428 | if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) { |
2429 | context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us; |
2430 | context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.fclk_change_latency_us; |
2431 | context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us; |
2432 | context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us; |
2433 | } |
2434 | context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; |
2435 | context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; |
2436 | context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; |
2437 | context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; |
2438 | context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; |
2439 | context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; |
2440 | context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; |
2441 | context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; |
2442 | context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; |
2443 | context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; |
2444 | */ |
2445 | |
2446 | /* Set C, for Dummy P-State: |
2447 | * All clocks min. |
2448 | * DCFCLK: Min, as reported by PM FW, when available |
2449 | * UCLK : Min, as reported by PM FW, when available |
2450 | * pstate latency as per UCLK state dummy pstate latency |
2451 | */ |
2452 | |
2453 | // For Set A and Set C use values from validation |
2454 | pipes[0].clks_cfg.voltage = vlevel; |
2455 | pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_validation; |
2456 | pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; |
2457 | |
2458 | if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { |
2459 | pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_fw_based_mclk_switching; |
2460 | } |
2461 | |
2462 | if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) { |
2463 | min_dram_speed_mts = dram_speed_from_validation; |
2464 | min_dram_speed_mts_margin = 160; |
2465 | |
2466 | context->bw_ctx.dml.soc.dram_clock_change_latency_us = |
2467 | dc->clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us; |
2468 | |
2469 | if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] == |
2470 | dm_dram_clock_change_unsupported) { |
2471 | int min_dram_speed_mts_offset = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1; |
2472 | |
2473 | min_dram_speed_mts = |
2474 | dc->clk_mgr->bw_params->clk_table.entries[min_dram_speed_mts_offset].memclk_mhz * 16; |
2475 | } |
2476 | |
2477 | if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && !subvp_in_use) { |
2478 | /* find largest table entry that is lower than dram speed, |
2479 | * but lower than DPM0 still uses DPM0 |
2480 | */ |
2481 | for (dummy_latency_index = 3; dummy_latency_index > 0; dummy_latency_index--) |
2482 | if (min_dram_speed_mts + min_dram_speed_mts_margin > |
2483 | dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dram_speed_mts) |
2484 | break; |
2485 | } |
2486 | |
2487 | context->bw_ctx.dml.soc.dram_clock_change_latency_us = |
2488 | dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; |
2489 | |
2490 | context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us; |
2491 | context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us; |
2492 | context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us; |
2493 | } |
2494 | |
2495 | context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2496 | context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2497 | context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2498 | context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2499 | context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2500 | context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2501 | context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2502 | context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2503 | /* On DCN32/321, PMFW will set PSTATE_CHANGE_TYPE = 1 (FCLK) for UCLK dummy p-state. |
2504 | * In this case we must program FCLK WM Set C to use the UCLK dummy p-state WM |
2505 | * value. |
2506 | */ |
2507 | context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = get_wm_dram_clock_change(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2508 | context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = get_usr_retraining_watermark(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2509 | |
2510 | if ((!pstate_en) && (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid)) { |
2511 | /* The only difference between A and C is p-state latency, if p-state is not supported |
2512 | * with full p-state latency we want to calculate DLG based on dummy p-state latency, |
2513 | * Set A p-state watermark set to 0 on DCN30, when p-state unsupported, for now keep as DCN30. |
2514 | */ |
2515 | context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c; |
2516 | context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0; |
2517 | /* Calculate FCLK p-state change watermark based on FCLK pstate change latency in case |
2518 | * UCLK p-state is not supported, to avoid underflow in case FCLK pstate is supported |
2519 | */ |
2520 | context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2521 | } else { |
2522 | /* Set A: |
2523 | * All clocks min. |
2524 | * DCFCLK: Min, as reported by PM FW, when available |
2525 | * UCLK: Min, as reported by PM FW, when available |
2526 | */ |
2527 | |
2528 | /* For set A set the correct latency values (i.e. non-dummy values) unconditionally |
2529 | */ |
2530 | context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; |
2531 | context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us; |
2532 | context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us; |
2533 | |
2534 | context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2535 | context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2536 | context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2537 | context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2538 | context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2539 | context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2540 | context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2541 | context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2542 | context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2543 | context->bw_ctx.bw.dcn.watermarks.a.usr_retraining_ns = get_usr_retraining_watermark(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt) * 1000; |
2544 | } |
2545 | |
2546 | /* Make set D = set A since we do not optimized watermarks for MALL */ |
2547 | context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a; |
2548 | |
2549 | for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { |
2550 | if (!context->res_ctx.pipe_ctx[i].stream) |
2551 | continue; |
2552 | |
2553 | pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt); |
2554 | pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(mode_lib: &context->bw_ctx.dml, pipes, num_pipes: pipe_cnt, which_pipe: pipe_idx); |
2555 | |
2556 | if (dc->config.forced_clocks) { |
2557 | pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; |
2558 | pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; |
2559 | } |
2560 | if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000) |
2561 | pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; |
2562 | if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) |
2563 | pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; |
2564 | |
2565 | pipe_idx++; |
2566 | } |
2567 | |
2568 | context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod; |
2569 | |
2570 | /* for proper prefetch calculations, if dummy lat > fclk lat, use fclk lat = dummy lat */ |
2571 | if (need_fclk_lat_as_dummy) |
2572 | context->bw_ctx.dml.soc.fclk_change_latency_us = |
2573 | dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; |
2574 | |
2575 | dcn32_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); |
2576 | |
2577 | if (!pstate_en) |
2578 | /* Restore full p-state latency */ |
2579 | context->bw_ctx.dml.soc.dram_clock_change_latency_us = |
2580 | dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; |
2581 | |
2582 | /* revert fclk lat changes if required */ |
2583 | if (need_fclk_lat_as_dummy) |
2584 | context->bw_ctx.dml.soc.fclk_change_latency_us = |
2585 | dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us; |
2586 | } |
2587 | |
2588 | static void dcn32_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts, |
2589 | unsigned int *optimal_dcfclk, |
2590 | unsigned int *optimal_fclk) |
2591 | { |
2592 | double bw_from_dram, bw_from_dram1, bw_from_dram2; |
2593 | |
2594 | bw_from_dram1 = uclk_mts * dcn3_2_soc.num_chans * |
2595 | dcn3_2_soc.dram_channel_width_bytes * (dcn3_2_soc.max_avg_dram_bw_use_normal_percent / 100); |
2596 | bw_from_dram2 = uclk_mts * dcn3_2_soc.num_chans * |
2597 | dcn3_2_soc.dram_channel_width_bytes * (dcn3_2_soc.max_avg_sdp_bw_use_normal_percent / 100); |
2598 | |
2599 | bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2; |
2600 | |
2601 | if (optimal_fclk) |
2602 | *optimal_fclk = bw_from_dram / |
2603 | (dcn3_2_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_2_soc.max_avg_sdp_bw_use_normal_percent / 100)); |
2604 | |
2605 | if (optimal_dcfclk) |
2606 | *optimal_dcfclk = bw_from_dram / |
2607 | (dcn3_2_soc.return_bus_width_bytes * (dcn3_2_soc.max_avg_sdp_bw_use_normal_percent / 100)); |
2608 | } |
2609 | |
2610 | static void remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries, |
2611 | unsigned int index) |
2612 | { |
2613 | int i; |
2614 | |
2615 | if (*num_entries == 0) |
2616 | return; |
2617 | |
2618 | for (i = index; i < *num_entries - 1; i++) { |
2619 | table[i] = table[i + 1]; |
2620 | } |
2621 | memset(&table[--(*num_entries)], 0, sizeof(struct _vcs_dpi_voltage_scaling_st)); |
2622 | } |
2623 | |
2624 | void dcn32_patch_dpm_table(struct clk_bw_params *bw_params) |
2625 | { |
2626 | int i; |
2627 | unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, |
2628 | max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0; |
2629 | |
2630 | for (i = 0; i < MAX_NUM_DPM_LVL; i++) { |
2631 | if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) |
2632 | max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; |
2633 | if (bw_params->clk_table.entries[i].fclk_mhz > max_fclk_mhz) |
2634 | max_fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; |
2635 | if (bw_params->clk_table.entries[i].memclk_mhz > max_uclk_mhz) |
2636 | max_uclk_mhz = bw_params->clk_table.entries[i].memclk_mhz; |
2637 | if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) |
2638 | max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; |
2639 | if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) |
2640 | max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; |
2641 | if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) |
2642 | max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; |
2643 | if (bw_params->clk_table.entries[i].dtbclk_mhz > max_dtbclk_mhz) |
2644 | max_dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; |
2645 | } |
2646 | |
2647 | /* Scan through clock values we currently have and if they are 0, |
2648 | * then populate it with dcn3_2_soc.clock_limits[] value. |
2649 | * |
2650 | * Do it for DCFCLK, DISPCLK, DTBCLK and UCLK as any of those being |
2651 | * 0, will cause it to skip building the clock table. |
2652 | */ |
2653 | if (max_dcfclk_mhz == 0) |
2654 | bw_params->clk_table.entries[0].dcfclk_mhz = dcn3_2_soc.clock_limits[0].dcfclk_mhz; |
2655 | if (max_dispclk_mhz == 0) |
2656 | bw_params->clk_table.entries[0].dispclk_mhz = dcn3_2_soc.clock_limits[0].dispclk_mhz; |
2657 | if (max_dtbclk_mhz == 0) |
2658 | bw_params->clk_table.entries[0].dtbclk_mhz = dcn3_2_soc.clock_limits[0].dtbclk_mhz; |
2659 | if (max_uclk_mhz == 0) |
2660 | bw_params->clk_table.entries[0].memclk_mhz = dcn3_2_soc.clock_limits[0].dram_speed_mts / 16; |
2661 | } |
2662 | |
2663 | static void swap_table_entries(struct _vcs_dpi_voltage_scaling_st *first_entry, |
2664 | struct _vcs_dpi_voltage_scaling_st *second_entry) |
2665 | { |
2666 | struct _vcs_dpi_voltage_scaling_st temp_entry = *first_entry; |
2667 | *first_entry = *second_entry; |
2668 | *second_entry = temp_entry; |
2669 | } |
2670 | |
2671 | /* |
2672 | * sort_entries_with_same_bw - Sort entries sharing the same bandwidth by DCFCLK |
2673 | */ |
2674 | static void sort_entries_with_same_bw(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries) |
2675 | { |
2676 | unsigned int start_index = 0; |
2677 | unsigned int end_index = 0; |
2678 | unsigned int current_bw = 0; |
2679 | |
2680 | for (int i = 0; i < (*num_entries - 1); i++) { |
2681 | if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) { |
2682 | current_bw = table[i].net_bw_in_kbytes_sec; |
2683 | start_index = i; |
2684 | end_index = ++i; |
2685 | |
2686 | while ((i < (*num_entries - 1)) && (table[i+1].net_bw_in_kbytes_sec == current_bw)) |
2687 | end_index = ++i; |
2688 | } |
2689 | |
2690 | if (start_index != end_index) { |
2691 | for (int j = start_index; j < end_index; j++) { |
2692 | for (int k = start_index; k < end_index; k++) { |
2693 | if (table[k].dcfclk_mhz > table[k+1].dcfclk_mhz) |
2694 | swap_table_entries(first_entry: &table[k], second_entry: &table[k+1]); |
2695 | } |
2696 | } |
2697 | } |
2698 | |
2699 | start_index = 0; |
2700 | end_index = 0; |
2701 | |
2702 | } |
2703 | } |
2704 | |
2705 | /* |
2706 | * remove_inconsistent_entries - Ensure entries with the same bandwidth have MEMCLK and FCLK monotonically increasing |
2707 | * and remove entries that do not |
2708 | */ |
2709 | static void remove_inconsistent_entries(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries) |
2710 | { |
2711 | for (int i = 0; i < (*num_entries - 1); i++) { |
2712 | if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) { |
2713 | if ((table[i].dram_speed_mts > table[i+1].dram_speed_mts) || |
2714 | (table[i].fabricclk_mhz > table[i+1].fabricclk_mhz)) |
2715 | remove_entry_from_table_at_index(table, num_entries, index: i); |
2716 | } |
2717 | } |
2718 | } |
2719 | |
2720 | /* |
2721 | * override_max_clk_values - Overwrite the max clock frequencies with the max DC mode timings |
2722 | * Input: |
2723 | * max_clk_limit - struct containing the desired clock timings |
2724 | * Output: |
2725 | * curr_clk_limit - struct containing the timings that need to be overwritten |
2726 | * Return: 0 upon success, non-zero for failure |
2727 | */ |
2728 | static int override_max_clk_values(struct clk_limit_table_entry *max_clk_limit, |
2729 | struct clk_limit_table_entry *curr_clk_limit) |
2730 | { |
2731 | if (NULL == max_clk_limit || NULL == curr_clk_limit) |
2732 | return -1; //invalid parameters |
2733 | |
2734 | //only overwrite if desired max clock frequency is initialized |
2735 | if (max_clk_limit->dcfclk_mhz != 0) |
2736 | curr_clk_limit->dcfclk_mhz = max_clk_limit->dcfclk_mhz; |
2737 | |
2738 | if (max_clk_limit->fclk_mhz != 0) |
2739 | curr_clk_limit->fclk_mhz = max_clk_limit->fclk_mhz; |
2740 | |
2741 | if (max_clk_limit->memclk_mhz != 0) |
2742 | curr_clk_limit->memclk_mhz = max_clk_limit->memclk_mhz; |
2743 | |
2744 | if (max_clk_limit->socclk_mhz != 0) |
2745 | curr_clk_limit->socclk_mhz = max_clk_limit->socclk_mhz; |
2746 | |
2747 | if (max_clk_limit->dtbclk_mhz != 0) |
2748 | curr_clk_limit->dtbclk_mhz = max_clk_limit->dtbclk_mhz; |
2749 | |
2750 | if (max_clk_limit->dispclk_mhz != 0) |
2751 | curr_clk_limit->dispclk_mhz = max_clk_limit->dispclk_mhz; |
2752 | |
2753 | return 0; |
2754 | } |
2755 | |
2756 | static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk_bw_params *bw_params, |
2757 | struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries) |
2758 | { |
2759 | int i, j; |
2760 | struct _vcs_dpi_voltage_scaling_st entry = {0}; |
2761 | struct clk_limit_table_entry max_clk_data = {0}; |
2762 | |
2763 | unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299; |
2764 | |
2765 | static const unsigned int num_dcfclk_stas = 5; |
2766 | unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564}; |
2767 | |
2768 | unsigned int num_uclk_dpms = 0; |
2769 | unsigned int num_fclk_dpms = 0; |
2770 | unsigned int num_dcfclk_dpms = 0; |
2771 | |
2772 | unsigned int num_dc_uclk_dpms = 0; |
2773 | unsigned int num_dc_fclk_dpms = 0; |
2774 | unsigned int num_dc_dcfclk_dpms = 0; |
2775 | |
2776 | for (i = 0; i < MAX_NUM_DPM_LVL; i++) { |
2777 | if (bw_params->clk_table.entries[i].dcfclk_mhz > max_clk_data.dcfclk_mhz) |
2778 | max_clk_data.dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; |
2779 | if (bw_params->clk_table.entries[i].fclk_mhz > max_clk_data.fclk_mhz) |
2780 | max_clk_data.fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; |
2781 | if (bw_params->clk_table.entries[i].memclk_mhz > max_clk_data.memclk_mhz) |
2782 | max_clk_data.memclk_mhz = bw_params->clk_table.entries[i].memclk_mhz; |
2783 | if (bw_params->clk_table.entries[i].dispclk_mhz > max_clk_data.dispclk_mhz) |
2784 | max_clk_data.dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; |
2785 | if (bw_params->clk_table.entries[i].dppclk_mhz > max_clk_data.dppclk_mhz) |
2786 | max_clk_data.dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; |
2787 | if (bw_params->clk_table.entries[i].phyclk_mhz > max_clk_data.phyclk_mhz) |
2788 | max_clk_data.phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; |
2789 | if (bw_params->clk_table.entries[i].dtbclk_mhz > max_clk_data.dtbclk_mhz) |
2790 | max_clk_data.dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; |
2791 | |
2792 | if (bw_params->clk_table.entries[i].memclk_mhz > 0) { |
2793 | num_uclk_dpms++; |
2794 | if (bw_params->clk_table.entries[i].memclk_mhz <= bw_params->dc_mode_limit.memclk_mhz) |
2795 | num_dc_uclk_dpms++; |
2796 | } |
2797 | if (bw_params->clk_table.entries[i].fclk_mhz > 0) { |
2798 | num_fclk_dpms++; |
2799 | if (bw_params->clk_table.entries[i].fclk_mhz <= bw_params->dc_mode_limit.fclk_mhz) |
2800 | num_dc_fclk_dpms++; |
2801 | } |
2802 | if (bw_params->clk_table.entries[i].dcfclk_mhz > 0) { |
2803 | num_dcfclk_dpms++; |
2804 | if (bw_params->clk_table.entries[i].dcfclk_mhz <= bw_params->dc_mode_limit.dcfclk_mhz) |
2805 | num_dc_dcfclk_dpms++; |
2806 | } |
2807 | } |
2808 | |
2809 | if (!disable_dc_mode_overwrite) { |
2810 | //Overwrite max frequencies with max DC mode frequencies for DC mode systems |
2811 | override_max_clk_values(max_clk_limit: &bw_params->dc_mode_limit, curr_clk_limit: &max_clk_data); |
2812 | num_uclk_dpms = num_dc_uclk_dpms; |
2813 | num_fclk_dpms = num_dc_fclk_dpms; |
2814 | num_dcfclk_dpms = num_dc_dcfclk_dpms; |
2815 | bw_params->clk_table.num_entries_per_clk.num_memclk_levels = num_uclk_dpms; |
2816 | bw_params->clk_table.num_entries_per_clk.num_fclk_levels = num_fclk_dpms; |
2817 | } |
2818 | |
2819 | if (num_dcfclk_dpms > 0 && bw_params->clk_table.entries[0].fclk_mhz > min_fclk_mhz) |
2820 | min_fclk_mhz = bw_params->clk_table.entries[0].fclk_mhz; |
2821 | |
2822 | if (!max_clk_data.dcfclk_mhz || !max_clk_data.dispclk_mhz || !max_clk_data.dtbclk_mhz) |
2823 | return -1; |
2824 | |
2825 | if (max_clk_data.dppclk_mhz == 0) |
2826 | max_clk_data.dppclk_mhz = max_clk_data.dispclk_mhz; |
2827 | |
2828 | if (max_clk_data.fclk_mhz == 0) |
2829 | max_clk_data.fclk_mhz = max_clk_data.dcfclk_mhz * |
2830 | dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / |
2831 | dcn3_2_soc.pct_ideal_fabric_bw_after_urgent; |
2832 | |
2833 | if (max_clk_data.phyclk_mhz == 0) |
2834 | max_clk_data.phyclk_mhz = dcn3_2_soc.clock_limits[0].phyclk_mhz; |
2835 | |
2836 | *num_entries = 0; |
2837 | entry.dispclk_mhz = max_clk_data.dispclk_mhz; |
2838 | entry.dscclk_mhz = max_clk_data.dispclk_mhz / 3; |
2839 | entry.dppclk_mhz = max_clk_data.dppclk_mhz; |
2840 | entry.dtbclk_mhz = max_clk_data.dtbclk_mhz; |
2841 | entry.phyclk_mhz = max_clk_data.phyclk_mhz; |
2842 | entry.phyclk_d18_mhz = dcn3_2_soc.clock_limits[0].phyclk_d18_mhz; |
2843 | entry.phyclk_d32_mhz = dcn3_2_soc.clock_limits[0].phyclk_d32_mhz; |
2844 | |
2845 | // Insert all the DCFCLK STAs |
2846 | for (i = 0; i < num_dcfclk_stas; i++) { |
2847 | entry.dcfclk_mhz = dcfclk_sta_targets[i]; |
2848 | entry.fabricclk_mhz = 0; |
2849 | entry.dram_speed_mts = 0; |
2850 | |
2851 | get_optimal_ntuple(entry: &entry); |
2852 | entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(entry: &entry); |
2853 | insert_entry_into_table_sorted(table, num_entries, entry: &entry); |
2854 | } |
2855 | |
2856 | // Insert the max DCFCLK |
2857 | entry.dcfclk_mhz = max_clk_data.dcfclk_mhz; |
2858 | entry.fabricclk_mhz = 0; |
2859 | entry.dram_speed_mts = 0; |
2860 | |
2861 | get_optimal_ntuple(entry: &entry); |
2862 | entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(entry: &entry); |
2863 | insert_entry_into_table_sorted(table, num_entries, entry: &entry); |
2864 | |
2865 | // Insert the UCLK DPMS |
2866 | for (i = 0; i < num_uclk_dpms; i++) { |
2867 | entry.dcfclk_mhz = 0; |
2868 | entry.fabricclk_mhz = 0; |
2869 | entry.dram_speed_mts = bw_params->clk_table.entries[i].memclk_mhz * 16; |
2870 | |
2871 | get_optimal_ntuple(entry: &entry); |
2872 | entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(entry: &entry); |
2873 | insert_entry_into_table_sorted(table, num_entries, entry: &entry); |
2874 | } |
2875 | |
2876 | // If FCLK is coarse grained, insert individual DPMs. |
2877 | if (num_fclk_dpms > 2) { |
2878 | for (i = 0; i < num_fclk_dpms; i++) { |
2879 | entry.dcfclk_mhz = 0; |
2880 | entry.fabricclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; |
2881 | entry.dram_speed_mts = 0; |
2882 | |
2883 | get_optimal_ntuple(entry: &entry); |
2884 | entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(entry: &entry); |
2885 | insert_entry_into_table_sorted(table, num_entries, entry: &entry); |
2886 | } |
2887 | } |
2888 | // If FCLK fine grained, only insert max |
2889 | else { |
2890 | entry.dcfclk_mhz = 0; |
2891 | entry.fabricclk_mhz = max_clk_data.fclk_mhz; |
2892 | entry.dram_speed_mts = 0; |
2893 | |
2894 | get_optimal_ntuple(entry: &entry); |
2895 | entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(entry: &entry); |
2896 | insert_entry_into_table_sorted(table, num_entries, entry: &entry); |
2897 | } |
2898 | |
2899 | // At this point, the table contains all "points of interest" based on |
2900 | // DPMs from PMFW, and STAs. Table is sorted by BW, and all clock |
2901 | // ratios (by derate, are exact). |
2902 | |
2903 | // Remove states that require higher clocks than are supported |
2904 | for (i = *num_entries - 1; i >= 0 ; i--) { |
2905 | if (table[i].dcfclk_mhz > max_clk_data.dcfclk_mhz || |
2906 | table[i].fabricclk_mhz > max_clk_data.fclk_mhz || |
2907 | table[i].dram_speed_mts > max_clk_data.memclk_mhz * 16) |
2908 | remove_entry_from_table_at_index(table, num_entries, index: i); |
2909 | } |
2910 | |
2911 | // Insert entry with all max dc limits without bandwidth matching |
2912 | if (!disable_dc_mode_overwrite) { |
2913 | struct _vcs_dpi_voltage_scaling_st max_dc_limits_entry = entry; |
2914 | |
2915 | max_dc_limits_entry.dcfclk_mhz = max_clk_data.dcfclk_mhz; |
2916 | max_dc_limits_entry.fabricclk_mhz = max_clk_data.fclk_mhz; |
2917 | max_dc_limits_entry.dram_speed_mts = max_clk_data.memclk_mhz * 16; |
2918 | |
2919 | max_dc_limits_entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(entry: &max_dc_limits_entry); |
2920 | insert_entry_into_table_sorted(table, num_entries, entry: &max_dc_limits_entry); |
2921 | |
2922 | sort_entries_with_same_bw(table, num_entries); |
2923 | remove_inconsistent_entries(table, num_entries); |
2924 | } |
2925 | |
2926 | // At this point, the table only contains supported points of interest |
2927 | // it could be used as is, but some states may be redundant due to |
2928 | // coarse grained nature of some clocks, so we want to round up to |
2929 | // coarse grained DPMs and remove duplicates. |
2930 | |
2931 | // Round up UCLKs |
2932 | for (i = *num_entries - 1; i >= 0 ; i--) { |
2933 | for (j = 0; j < num_uclk_dpms; j++) { |
2934 | if (bw_params->clk_table.entries[j].memclk_mhz * 16 >= table[i].dram_speed_mts) { |
2935 | table[i].dram_speed_mts = bw_params->clk_table.entries[j].memclk_mhz * 16; |
2936 | break; |
2937 | } |
2938 | } |
2939 | } |
2940 | |
2941 | // If FCLK is coarse grained, round up to next DPMs |
2942 | if (num_fclk_dpms > 2) { |
2943 | for (i = *num_entries - 1; i >= 0 ; i--) { |
2944 | for (j = 0; j < num_fclk_dpms; j++) { |
2945 | if (bw_params->clk_table.entries[j].fclk_mhz >= table[i].fabricclk_mhz) { |
2946 | table[i].fabricclk_mhz = bw_params->clk_table.entries[j].fclk_mhz; |
2947 | break; |
2948 | } |
2949 | } |
2950 | } |
2951 | } |
2952 | // Otherwise, round up to minimum. |
2953 | else { |
2954 | for (i = *num_entries - 1; i >= 0 ; i--) { |
2955 | if (table[i].fabricclk_mhz < min_fclk_mhz) { |
2956 | table[i].fabricclk_mhz = min_fclk_mhz; |
2957 | } |
2958 | } |
2959 | } |
2960 | |
2961 | // Round DCFCLKs up to minimum |
2962 | for (i = *num_entries - 1; i >= 0 ; i--) { |
2963 | if (table[i].dcfclk_mhz < min_dcfclk_mhz) { |
2964 | table[i].dcfclk_mhz = min_dcfclk_mhz; |
2965 | } |
2966 | } |
2967 | |
2968 | // Remove duplicate states, note duplicate states are always neighbouring since table is sorted. |
2969 | i = 0; |
2970 | while (i < *num_entries - 1) { |
2971 | if (table[i].dcfclk_mhz == table[i + 1].dcfclk_mhz && |
2972 | table[i].fabricclk_mhz == table[i + 1].fabricclk_mhz && |
2973 | table[i].dram_speed_mts == table[i + 1].dram_speed_mts) |
2974 | remove_entry_from_table_at_index(table, num_entries, index: i + 1); |
2975 | else |
2976 | i++; |
2977 | } |
2978 | |
2979 | // Fix up the state indicies |
2980 | for (i = *num_entries - 1; i >= 0 ; i--) { |
2981 | table[i].state = i; |
2982 | } |
2983 | |
2984 | return 0; |
2985 | } |
2986 | |
2987 | /* |
2988 | * dcn32_update_bw_bounding_box |
2989 | * |
2990 | * This would override some dcn3_2 ip_or_soc initial parameters hardcoded from |
2991 | * spreadsheet with actual values as per dGPU SKU: |
2992 | * - with passed few options from dc->config |
2993 | * - with dentist_vco_frequency from Clk Mgr (currently hardcoded, but might |
2994 | * need to get it from PM FW) |
2995 | * - with passed latency values (passed in ns units) in dc-> bb override for |
2996 | * debugging purposes |
2997 | * - with passed latencies from VBIOS (in 100_ns units) if available for |
2998 | * certain dGPU SKU |
2999 | * - with number of DRAM channels from VBIOS (which differ for certain dGPU SKU |
3000 | * of the same ASIC) |
3001 | * - clocks levels with passed clk_table entries from Clk Mgr as reported by PM |
3002 | * FW for different clocks (which might differ for certain dGPU SKU of the |
3003 | * same ASIC) |
3004 | */ |
3005 | void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params) |
3006 | { |
3007 | dc_assert_fp_enabled(); |
3008 | |
3009 | /* Overrides from dc->config options */ |
3010 | dcn3_2_ip.clamp_min_dcfclk = dc->config.clamp_min_dcfclk; |
3011 | |
3012 | /* Override from passed dc->bb_overrides if available*/ |
3013 | if ((int)(dcn3_2_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns |
3014 | && dc->bb_overrides.sr_exit_time_ns) { |
3015 | dc->dml2_options.bbox_overrides.sr_exit_latency_us = |
3016 | dcn3_2_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; |
3017 | } |
3018 | |
3019 | if ((int)(dcn3_2_soc.sr_enter_plus_exit_time_us * 1000) |
3020 | != dc->bb_overrides.sr_enter_plus_exit_time_ns |
3021 | && dc->bb_overrides.sr_enter_plus_exit_time_ns) { |
3022 | dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us = |
3023 | dcn3_2_soc.sr_enter_plus_exit_time_us = |
3024 | dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; |
3025 | } |
3026 | |
3027 | if ((int)(dcn3_2_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns |
3028 | && dc->bb_overrides.urgent_latency_ns) { |
3029 | dcn3_2_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; |
3030 | dc->dml2_options.bbox_overrides.urgent_latency_us = |
3031 | dcn3_2_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0; |
3032 | } |
3033 | |
3034 | if ((int)(dcn3_2_soc.dram_clock_change_latency_us * 1000) |
3035 | != dc->bb_overrides.dram_clock_change_latency_ns |
3036 | && dc->bb_overrides.dram_clock_change_latency_ns) { |
3037 | dc->dml2_options.bbox_overrides.dram_clock_change_latency_us = |
3038 | dcn3_2_soc.dram_clock_change_latency_us = |
3039 | dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; |
3040 | } |
3041 | |
3042 | if ((int)(dcn3_2_soc.fclk_change_latency_us * 1000) |
3043 | != dc->bb_overrides.fclk_clock_change_latency_ns |
3044 | && dc->bb_overrides.fclk_clock_change_latency_ns) { |
3045 | dc->dml2_options.bbox_overrides.fclk_change_latency_us = |
3046 | dcn3_2_soc.fclk_change_latency_us = |
3047 | dc->bb_overrides.fclk_clock_change_latency_ns / 1000; |
3048 | } |
3049 | |
3050 | if ((int)(dcn3_2_soc.dummy_pstate_latency_us * 1000) |
3051 | != dc->bb_overrides.dummy_clock_change_latency_ns |
3052 | && dc->bb_overrides.dummy_clock_change_latency_ns) { |
3053 | dcn3_2_soc.dummy_pstate_latency_us = |
3054 | dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0; |
3055 | } |
3056 | |
3057 | /* Override from VBIOS if VBIOS bb_info available */ |
3058 | if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { |
3059 | struct bp_soc_bb_info bb_info = {0}; |
3060 | |
3061 | if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { |
3062 | if (bb_info.dram_clock_change_latency_100ns > 0) |
3063 | dc->dml2_options.bbox_overrides.dram_clock_change_latency_us = |
3064 | dcn3_2_soc.dram_clock_change_latency_us = |
3065 | bb_info.dram_clock_change_latency_100ns * 10; |
3066 | |
3067 | if (bb_info.dram_sr_enter_exit_latency_100ns > 0) |
3068 | dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us = |
3069 | dcn3_2_soc.sr_enter_plus_exit_time_us = |
3070 | bb_info.dram_sr_enter_exit_latency_100ns * 10; |
3071 | |
3072 | if (bb_info.dram_sr_exit_latency_100ns > 0) |
3073 | dc->dml2_options.bbox_overrides.sr_exit_latency_us = |
3074 | dcn3_2_soc.sr_exit_time_us = |
3075 | bb_info.dram_sr_exit_latency_100ns * 10; |
3076 | } |
3077 | } |
3078 | |
3079 | /* Override from VBIOS for num_chan */ |
3080 | if (dc->ctx->dc_bios->vram_info.num_chans) { |
3081 | dc->dml2_options.bbox_overrides.dram_num_chan = |
3082 | dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; |
3083 | dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc, |
3084 | num_chans: dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel); |
3085 | } |
3086 | |
3087 | if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) |
3088 | dc->dml2_options.bbox_overrides.dram_chanel_width_bytes = |
3089 | dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; |
3090 | |
3091 | /* DML DSC delay factor workaround */ |
3092 | dcn3_2_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0; |
3093 | |
3094 | dcn3_2_ip.min_prefetch_in_strobe_us = dc->debug.min_prefetch_in_strobe_ns / 1000.0; |
3095 | |
3096 | /* Override dispclk_dppclk_vco_speed_mhz from Clk Mgr */ |
3097 | dcn3_2_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; |
3098 | dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; |
3099 | dc->dml2_options.bbox_overrides.disp_pll_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; |
3100 | dc->dml2_options.bbox_overrides.xtalclk_mhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000.0; |
3101 | dc->dml2_options.bbox_overrides.dchub_refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; |
3102 | dc->dml2_options.bbox_overrides.dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000.0; |
3103 | |
3104 | /* Overrides Clock levelsfrom CLK Mgr table entries as reported by PM FW */ |
3105 | if (bw_params->clk_table.entries[0].memclk_mhz) { |
3106 | if (dc->debug.use_legacy_soc_bb_mechanism) { |
3107 | unsigned int i = 0, j = 0, num_states = 0; |
3108 | |
3109 | unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0}; |
3110 | unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0}; |
3111 | unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0}; |
3112 | unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0}; |
3113 | unsigned int min_dcfclk = UINT_MAX; |
3114 | /* Set 199 as first value in STA target array to have a minimum DCFCLK value. |
3115 | * For DCN32 we set min to 199 so minimum FCLK DPM0 (300Mhz can be achieved) */ |
3116 | unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564}; |
3117 | unsigned int num_dcfclk_sta_targets = 4, num_uclk_states = 0; |
3118 | unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0; |
3119 | |
3120 | for (i = 0; i < MAX_NUM_DPM_LVL; i++) { |
3121 | if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) |
3122 | max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; |
3123 | if (bw_params->clk_table.entries[i].dcfclk_mhz != 0 && |
3124 | bw_params->clk_table.entries[i].dcfclk_mhz < min_dcfclk) |
3125 | min_dcfclk = bw_params->clk_table.entries[i].dcfclk_mhz; |
3126 | if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) |
3127 | max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; |
3128 | if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) |
3129 | max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; |
3130 | if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) |
3131 | max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; |
3132 | } |
3133 | if (min_dcfclk > dcfclk_sta_targets[0]) |
3134 | dcfclk_sta_targets[0] = min_dcfclk; |
3135 | if (!max_dcfclk_mhz) |
3136 | max_dcfclk_mhz = dcn3_2_soc.clock_limits[0].dcfclk_mhz; |
3137 | if (!max_dispclk_mhz) |
3138 | max_dispclk_mhz = dcn3_2_soc.clock_limits[0].dispclk_mhz; |
3139 | if (!max_dppclk_mhz) |
3140 | max_dppclk_mhz = dcn3_2_soc.clock_limits[0].dppclk_mhz; |
3141 | if (!max_phyclk_mhz) |
3142 | max_phyclk_mhz = dcn3_2_soc.clock_limits[0].phyclk_mhz; |
3143 | |
3144 | if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { |
3145 | // If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array |
3146 | dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz; |
3147 | num_dcfclk_sta_targets++; |
3148 | } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { |
3149 | // If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates |
3150 | for (i = 0; i < num_dcfclk_sta_targets; i++) { |
3151 | if (dcfclk_sta_targets[i] > max_dcfclk_mhz) { |
3152 | dcfclk_sta_targets[i] = max_dcfclk_mhz; |
3153 | break; |
3154 | } |
3155 | } |
3156 | // Update size of array since we "removed" duplicates |
3157 | num_dcfclk_sta_targets = i + 1; |
3158 | } |
3159 | |
3160 | num_uclk_states = bw_params->clk_table.num_entries; |
3161 | |
3162 | // Calculate optimal dcfclk for each uclk |
3163 | for (i = 0; i < num_uclk_states; i++) { |
3164 | dcn32_get_optimal_dcfclk_fclk_for_uclk(uclk_mts: bw_params->clk_table.entries[i].memclk_mhz * 16, |
3165 | optimal_dcfclk: &optimal_dcfclk_for_uclk[i], NULL); |
3166 | if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) { |
3167 | optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz; |
3168 | } |
3169 | } |
3170 | |
3171 | // Calculate optimal uclk for each dcfclk sta target |
3172 | for (i = 0; i < num_dcfclk_sta_targets; i++) { |
3173 | for (j = 0; j < num_uclk_states; j++) { |
3174 | if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) { |
3175 | optimal_uclk_for_dcfclk_sta_targets[i] = |
3176 | bw_params->clk_table.entries[j].memclk_mhz * 16; |
3177 | break; |
3178 | } |
3179 | } |
3180 | } |
3181 | |
3182 | i = 0; |
3183 | j = 0; |
3184 | // create the final dcfclk and uclk table |
3185 | while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) { |
3186 | if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) { |
3187 | dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; |
3188 | dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; |
3189 | } else { |
3190 | if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { |
3191 | dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; |
3192 | dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; |
3193 | } else { |
3194 | j = num_uclk_states; |
3195 | } |
3196 | } |
3197 | } |
3198 | |
3199 | while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) { |
3200 | dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; |
3201 | dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; |
3202 | } |
3203 | |
3204 | while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES && |
3205 | optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { |
3206 | dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; |
3207 | dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; |
3208 | } |
3209 | |
3210 | dcn3_2_soc.num_states = num_states; |
3211 | for (i = 0; i < dcn3_2_soc.num_states; i++) { |
3212 | dcn3_2_soc.clock_limits[i].state = i; |
3213 | dcn3_2_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i]; |
3214 | dcn3_2_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i]; |
3215 | |
3216 | /* Fill all states with max values of all these clocks */ |
3217 | dcn3_2_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz; |
3218 | dcn3_2_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz; |
3219 | dcn3_2_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz; |
3220 | dcn3_2_soc.clock_limits[i].dscclk_mhz = max_dispclk_mhz / 3; |
3221 | |
3222 | /* Populate from bw_params for DTBCLK, SOCCLK */ |
3223 | if (i > 0) { |
3224 | if (!bw_params->clk_table.entries[i].dtbclk_mhz) { |
3225 | dcn3_2_soc.clock_limits[i].dtbclk_mhz = dcn3_2_soc.clock_limits[i-1].dtbclk_mhz; |
3226 | } else { |
3227 | dcn3_2_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; |
3228 | } |
3229 | } else if (bw_params->clk_table.entries[i].dtbclk_mhz) { |
3230 | dcn3_2_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; |
3231 | } |
3232 | |
3233 | if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0) |
3234 | dcn3_2_soc.clock_limits[i].socclk_mhz = dcn3_2_soc.clock_limits[i-1].socclk_mhz; |
3235 | else |
3236 | dcn3_2_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz; |
3237 | |
3238 | if (!dram_speed_mts[i] && i > 0) |
3239 | dcn3_2_soc.clock_limits[i].dram_speed_mts = dcn3_2_soc.clock_limits[i-1].dram_speed_mts; |
3240 | else |
3241 | dcn3_2_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i]; |
3242 | |
3243 | /* These clocks cannot come from bw_params, always fill from dcn3_2_soc[0] */ |
3244 | /* PHYCLK_D18, PHYCLK_D32 */ |
3245 | dcn3_2_soc.clock_limits[i].phyclk_d18_mhz = dcn3_2_soc.clock_limits[0].phyclk_d18_mhz; |
3246 | dcn3_2_soc.clock_limits[i].phyclk_d32_mhz = dcn3_2_soc.clock_limits[0].phyclk_d32_mhz; |
3247 | } |
3248 | } else { |
3249 | build_synthetic_soc_states(disable_dc_mode_overwrite: dc->debug.disable_dc_mode_overwrite, bw_params, |
3250 | table: dcn3_2_soc.clock_limits, num_entries: &dcn3_2_soc.num_states); |
3251 | } |
3252 | |
3253 | /* Re-init DML with updated bb */ |
3254 | dml_init_instance(lib: &dc->dml, soc_bb: &dcn3_2_soc, ip_params: &dcn3_2_ip, project: DML_PROJECT_DCN32); |
3255 | if (dc->current_state) |
3256 | dml_init_instance(lib: &dc->current_state->bw_ctx.dml, soc_bb: &dcn3_2_soc, ip_params: &dcn3_2_ip, project: DML_PROJECT_DCN32); |
3257 | } |
3258 | |
3259 | if (dc->clk_mgr->bw_params->clk_table.num_entries > 1) { |
3260 | unsigned int i = 0; |
3261 | |
3262 | dc->dml2_options.bbox_overrides.clks_table.num_states = dc->clk_mgr->bw_params->clk_table.num_entries; |
3263 | |
3264 | dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels = |
3265 | dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels; |
3266 | |
3267 | dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels = |
3268 | dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels; |
3269 | |
3270 | dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels = |
3271 | dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; |
3272 | |
3273 | dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels = |
3274 | dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels; |
3275 | |
3276 | dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels = |
3277 | dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels; |
3278 | |
3279 | dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels = |
3280 | dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; |
3281 | |
3282 | dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels = |
3283 | dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dppclk_levels; |
3284 | |
3285 | for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels; i++) { |
3286 | if (dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz) |
3287 | dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz = |
3288 | dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz; |
3289 | } |
3290 | |
3291 | for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels; i++) { |
3292 | if (dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz) |
3293 | dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz = |
3294 | dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz; |
3295 | } |
3296 | |
3297 | for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; i++) { |
3298 | if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz) |
3299 | dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz = |
3300 | dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz; |
3301 | } |
3302 | |
3303 | for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels; i++) { |
3304 | if (dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz) |
3305 | dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz = |
3306 | dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz; |
3307 | } |
3308 | |
3309 | for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels; i++) { |
3310 | if (dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz) |
3311 | dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz = |
3312 | dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz; |
3313 | } |
3314 | |
3315 | for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; i++) { |
3316 | if (dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz) { |
3317 | dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz = |
3318 | dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz; |
3319 | dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz = |
3320 | dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz; |
3321 | } |
3322 | } |
3323 | } |
3324 | } |
3325 | |
3326 | void dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes, |
3327 | int pipe_cnt) |
3328 | { |
3329 | dc_assert_fp_enabled(); |
3330 | |
3331 | pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0; |
3332 | pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0; |
3333 | } |
3334 | |
3335 | bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe) |
3336 | { |
3337 | bool allow = false; |
3338 | uint32_t refresh_rate = 0; |
3339 | uint32_t min_refresh = subvp_active_margin_list.min_refresh; |
3340 | uint32_t max_refresh = subvp_active_margin_list.max_refresh; |
3341 | uint32_t i; |
3342 | |
3343 | for (i = 0; i < SUBVP_ACTIVE_MARGIN_LIST_LEN; i++) { |
3344 | uint32_t width = subvp_active_margin_list.res[i].width; |
3345 | uint32_t height = subvp_active_margin_list.res[i].height; |
3346 | |
3347 | refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + |
3348 | pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); |
3349 | refresh_rate = div_u64(dividend: refresh_rate, divisor: pipe->stream->timing.v_total); |
3350 | refresh_rate = div_u64(dividend: refresh_rate, divisor: pipe->stream->timing.h_total); |
3351 | |
3352 | if (refresh_rate >= min_refresh && refresh_rate <= max_refresh && |
3353 | dcn32_check_native_scaling_for_res(pipe, width, height)) { |
3354 | allow = true; |
3355 | break; |
3356 | } |
3357 | } |
3358 | return allow; |
3359 | } |
3360 | |
3361 | /** |
3362 | * dcn32_allow_subvp_high_refresh_rate: Determine if the high refresh rate config will allow subvp |
3363 | * |
3364 | * @dc: Current DC state |
3365 | * @context: New DC state to be programmed |
3366 | * @pipe: Pipe to be considered for use in subvp |
3367 | * |
3368 | * On high refresh rate display configs, we will allow subvp under the following conditions: |
3369 | * 1. Resolution is 3840x2160, 3440x1440, or 2560x1440 |
3370 | * 2. Refresh rate is between 120hz - 165hz |
3371 | * 3. No scaling |
3372 | * 4. Freesync is inactive |
3373 | * 5. For single display cases, freesync must be disabled |
3374 | * |
3375 | * Return: True if pipe can be used for subvp, false otherwise |
3376 | */ |
3377 | bool dcn32_allow_subvp_high_refresh_rate(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe) |
3378 | { |
3379 | bool allow = false; |
3380 | uint32_t refresh_rate = 0; |
3381 | uint32_t subvp_min_refresh = subvp_high_refresh_list.min_refresh; |
3382 | uint32_t subvp_max_refresh = subvp_high_refresh_list.max_refresh; |
3383 | uint32_t min_refresh = subvp_max_refresh; |
3384 | uint32_t i; |
3385 | |
3386 | /* Only allow SubVP on high refresh displays if all connected displays |
3387 | * are considered "high refresh" (i.e. >= 120hz). We do not want to |
3388 | * allow combinations such as 120hz (SubVP) + 60hz (SubVP). |
3389 | */ |
3390 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
3391 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; |
3392 | |
3393 | if (!pipe_ctx->stream) |
3394 | continue; |
3395 | refresh_rate = (pipe_ctx->stream->timing.pix_clk_100hz * 100 + |
3396 | pipe_ctx->stream->timing.v_total * pipe_ctx->stream->timing.h_total - 1) |
3397 | / (double)(pipe_ctx->stream->timing.v_total * pipe_ctx->stream->timing.h_total); |
3398 | |
3399 | if (refresh_rate < min_refresh) |
3400 | min_refresh = refresh_rate; |
3401 | } |
3402 | |
3403 | if (!dc->debug.disable_subvp_high_refresh && min_refresh >= subvp_min_refresh && pipe->stream && |
3404 | pipe->plane_state && !(pipe->stream->vrr_active_variable || pipe->stream->vrr_active_fixed)) { |
3405 | refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 + |
3406 | pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1) |
3407 | / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total); |
3408 | if (refresh_rate >= subvp_min_refresh && refresh_rate <= subvp_max_refresh) { |
3409 | for (i = 0; i < SUBVP_HIGH_REFRESH_LIST_LEN; i++) { |
3410 | uint32_t width = subvp_high_refresh_list.res[i].width; |
3411 | uint32_t height = subvp_high_refresh_list.res[i].height; |
3412 | |
3413 | if (dcn32_check_native_scaling_for_res(pipe, width, height)) { |
3414 | if ((context->stream_count == 1 && !pipe->stream->allow_freesync) || context->stream_count > 1) { |
3415 | allow = true; |
3416 | break; |
3417 | } |
3418 | } |
3419 | } |
3420 | } |
3421 | } |
3422 | return allow; |
3423 | } |
3424 | |
3425 | /** |
3426 | * dcn32_determine_max_vratio_prefetch: Determine max Vratio for prefetch by driver policy |
3427 | * |
3428 | * @dc: Current DC state |
3429 | * @context: New DC state to be programmed |
3430 | * |
3431 | * Return: Max vratio for prefetch |
3432 | */ |
3433 | double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context) |
3434 | { |
3435 | double max_vratio_pre = __DML_MAX_BW_RATIO_PRE__; // Default value is 4 |
3436 | int i; |
3437 | |
3438 | /* For single display MPO configs, allow the max vratio to be 8 |
3439 | * if any plane is YUV420 format |
3440 | */ |
3441 | if (context->stream_count == 1 && context->stream_status[0].plane_count > 1) { |
3442 | for (i = 0; i < context->stream_status[0].plane_count; i++) { |
3443 | if (context->stream_status[0].plane_states[i]->format == SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr || |
3444 | context->stream_status[0].plane_states[i]->format == SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb) { |
3445 | max_vratio_pre = __DML_MAX_VRATIO_PRE__; |
3446 | } |
3447 | } |
3448 | } |
3449 | return max_vratio_pre; |
3450 | } |
3451 | |
3452 | /** |
3453 | * dcn32_assign_fpo_vactive_candidate - Assign the FPO stream candidate for FPO + VActive case |
3454 | * |
3455 | * This function chooses the FPO candidate stream for FPO + VActive cases (2 stream config). |
3456 | * For FPO + VAtive cases, the assumption is that one display has ActiveMargin > 0, and the |
3457 | * other display has ActiveMargin <= 0. This function will choose the pipe/stream that has |
3458 | * ActiveMargin <= 0 to be the FPO stream candidate if found. |
3459 | * |
3460 | * |
3461 | * @dc: current dc state |
3462 | * @context: new dc state |
3463 | * @fpo_candidate_stream: pointer to FPO stream candidate if one is found |
3464 | * |
3465 | * Return: void |
3466 | */ |
3467 | void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *context, struct dc_stream_state **fpo_candidate_stream) |
3468 | { |
3469 | unsigned int i, pipe_idx; |
3470 | const struct vba_vars_st *vba = &context->bw_ctx.dml.vba; |
3471 | |
3472 | for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { |
3473 | const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
3474 | |
3475 | /* In DCN32/321, FPO uses per-pipe P-State force. |
3476 | * If there's no planes, HUBP is power gated and |
3477 | * therefore programming UCLK_PSTATE_FORCE does |
3478 | * nothing (P-State will always be asserted naturally |
3479 | * on a pipe that has HUBP power gated. Therefore we |
3480 | * only want to enable FPO if the FPO pipe has both |
3481 | * a stream and a plane. |
3482 | */ |
3483 | if (!pipe->stream || !pipe->plane_state) |
3484 | continue; |
3485 | |
3486 | if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) { |
3487 | *fpo_candidate_stream = pipe->stream; |
3488 | break; |
3489 | } |
3490 | pipe_idx++; |
3491 | } |
3492 | } |
3493 | |
3494 | /** |
3495 | * dcn32_find_vactive_pipe - Determines if the config has a pipe that can switch in VACTIVE |
3496 | * |
3497 | * @dc: current dc state |
3498 | * @context: new dc state |
3499 | * @vactive_margin_req_us: The vactive marign required for a vactive pipe to be considered "found" |
3500 | * |
3501 | * Return: True if VACTIVE display is found, false otherwise |
3502 | */ |
3503 | bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, uint32_t vactive_margin_req_us) |
3504 | { |
3505 | unsigned int i, pipe_idx; |
3506 | const struct vba_vars_st *vba = &context->bw_ctx.dml.vba; |
3507 | bool vactive_found = false; |
3508 | unsigned int blank_us = 0; |
3509 | |
3510 | for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { |
3511 | const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
3512 | |
3513 | if (!pipe->stream) |
3514 | continue; |
3515 | |
3516 | blank_us = ((pipe->stream->timing.v_total - pipe->stream->timing.v_addressable) * pipe->stream->timing.h_total / |
3517 | (double)(pipe->stream->timing.pix_clk_100hz * 100)) * 1000000; |
3518 | if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] >= vactive_margin_req_us && |
3519 | !(pipe->stream->vrr_active_variable || pipe->stream->vrr_active_fixed) && blank_us < dc->debug.fpo_vactive_max_blank_us) { |
3520 | vactive_found = true; |
3521 | break; |
3522 | } |
3523 | pipe_idx++; |
3524 | } |
3525 | return vactive_found; |
3526 | } |
3527 | |
3528 | void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb) |
3529 | { |
3530 | dc_assert_fp_enabled(); |
3531 | dcn3_2_soc.clock_limits[0].dcfclk_mhz = 1200.0; |
3532 | } |
3533 | |
3534 | void dcn32_override_min_req_memclk(struct dc *dc, struct dc_state *context) |
3535 | { |
3536 | // WA: restrict FPO and SubVP to use first non-strobe mode (DCN32 BW issue) |
3537 | if ((context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dcn32_subvp_in_use(dc, context)) && |
3538 | dc->dml.soc.num_chans <= 8) { |
3539 | int num_mclk_levels = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; |
3540 | |
3541 | if (context->bw_ctx.dml.vba.DRAMSpeed <= dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 16 && |
3542 | num_mclk_levels > 1) { |
3543 | context->bw_ctx.dml.vba.DRAMSpeed = dc->clk_mgr->bw_params->clk_table.entries[1].memclk_mhz * 16; |
3544 | context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16; |
3545 | } |
3546 | } |
3547 | } |
3548 | |