1 | // SPDX-License-Identifier: MIT |
2 | /* |
3 | * Copyright 2022 Advanced Micro Devices, Inc. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: |
11 | * |
12 | * The above copyright notice and this permission notice shall be included in |
13 | * all copies or substantial portions of the Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | * |
23 | * Authors: AMD |
24 | * |
25 | */ |
26 | |
27 | #include "dm_services.h" |
28 | #include "dc.h" |
29 | |
30 | #include "dcn32_init.h" |
31 | |
32 | #include "resource.h" |
33 | #include "include/irq_service_interface.h" |
34 | #include "dcn32_resource.h" |
35 | |
36 | #include "dcn20/dcn20_resource.h" |
37 | #include "dcn30/dcn30_resource.h" |
38 | |
39 | #include "dcn10/dcn10_ipp.h" |
40 | #include "dcn30/dcn30_hubbub.h" |
41 | #include "dcn31/dcn31_hubbub.h" |
42 | #include "dcn32/dcn32_hubbub.h" |
43 | #include "dcn32/dcn32_mpc.h" |
44 | #include "dcn32_hubp.h" |
45 | #include "irq/dcn32/irq_service_dcn32.h" |
46 | #include "dcn32/dcn32_dpp.h" |
47 | #include "dcn32/dcn32_optc.h" |
48 | #include "dcn20/dcn20_hwseq.h" |
49 | #include "dcn30/dcn30_hwseq.h" |
50 | #include "dce110/dce110_hwseq.h" |
51 | #include "dcn30/dcn30_opp.h" |
52 | #include "dcn20/dcn20_dsc.h" |
53 | #include "dcn30/dcn30_vpg.h" |
54 | #include "dcn30/dcn30_afmt.h" |
55 | #include "dcn30/dcn30_dio_stream_encoder.h" |
56 | #include "dcn32/dcn32_dio_stream_encoder.h" |
57 | #include "dcn31/dcn31_hpo_dp_stream_encoder.h" |
58 | #include "dcn31/dcn31_hpo_dp_link_encoder.h" |
59 | #include "dcn32/dcn32_hpo_dp_link_encoder.h" |
60 | #include "dcn31/dcn31_apg.h" |
61 | #include "dcn31/dcn31_dio_link_encoder.h" |
62 | #include "dcn32/dcn32_dio_link_encoder.h" |
63 | #include "dce/dce_clock_source.h" |
64 | #include "dce/dce_audio.h" |
65 | #include "dce/dce_hwseq.h" |
66 | #include "clk_mgr.h" |
67 | #include "virtual/virtual_stream_encoder.h" |
68 | #include "dml/display_mode_vba.h" |
69 | #include "dcn32/dcn32_dccg.h" |
70 | #include "dcn10/dcn10_resource.h" |
71 | #include "link.h" |
72 | #include "dcn31/dcn31_panel_cntl.h" |
73 | |
74 | #include "dcn30/dcn30_dwb.h" |
75 | #include "dcn32/dcn32_mmhubbub.h" |
76 | |
77 | #include "dcn/dcn_3_2_0_offset.h" |
78 | #include "dcn/dcn_3_2_0_sh_mask.h" |
79 | #include "nbio/nbio_4_3_0_offset.h" |
80 | |
81 | #include "reg_helper.h" |
82 | #include "dce/dmub_abm.h" |
83 | #include "dce/dmub_psr.h" |
84 | #include "dce/dce_aux.h" |
85 | #include "dce/dce_i2c.h" |
86 | |
87 | #include "dml/dcn30/display_mode_vba_30.h" |
88 | #include "vm_helper.h" |
89 | #include "dcn20/dcn20_vmid.h" |
90 | #include "dml/dcn32/dcn32_fpu.h" |
91 | |
92 | #include "dml2/dml2_wrapper.h" |
93 | |
94 | #define DC_LOGGER_INIT(logger) |
95 | |
96 | enum dcn32_clk_src_array_id { |
97 | DCN32_CLK_SRC_PLL0, |
98 | DCN32_CLK_SRC_PLL1, |
99 | DCN32_CLK_SRC_PLL2, |
100 | DCN32_CLK_SRC_PLL3, |
101 | DCN32_CLK_SRC_PLL4, |
102 | DCN32_CLK_SRC_TOTAL |
103 | }; |
104 | |
105 | /* begin ********************* |
106 | * macros to expend register list macro defined in HW object header file |
107 | */ |
108 | |
109 | /* DCN */ |
110 | #define BASE_INNER(seg) ctx->dcn_reg_offsets[seg] |
111 | |
112 | #define BASE(seg) BASE_INNER(seg) |
113 | |
114 | #define SR(reg_name)\ |
115 | REG_STRUCT.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ |
116 | reg ## reg_name |
117 | #define SR_ARR(reg_name, id) \ |
118 | REG_STRUCT[id].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name |
119 | |
120 | #define SR_ARR_INIT(reg_name, id, value) \ |
121 | REG_STRUCT[id].reg_name = value |
122 | |
123 | #define SRI(reg_name, block, id)\ |
124 | REG_STRUCT.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ |
125 | reg ## block ## id ## _ ## reg_name |
126 | |
127 | #define SRI_ARR(reg_name, block, id)\ |
128 | REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ |
129 | reg ## block ## id ## _ ## reg_name |
130 | |
131 | #define SR_ARR_I2C(reg_name, id) \ |
132 | REG_STRUCT[id-1].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name |
133 | |
134 | #define SRI_ARR_I2C(reg_name, block, id)\ |
135 | REG_STRUCT[id-1].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ |
136 | reg ## block ## id ## _ ## reg_name |
137 | |
138 | #define SRI_ARR_ALPHABET(reg_name, block, index, id)\ |
139 | REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ |
140 | reg ## block ## id ## _ ## reg_name |
141 | |
142 | #define SRI2(reg_name, block, id)\ |
143 | .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ |
144 | reg ## reg_name |
145 | #define SRI2_ARR(reg_name, block, id)\ |
146 | REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ |
147 | reg ## reg_name |
148 | |
149 | #define SRIR(var_name, reg_name, block, id)\ |
150 | .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ |
151 | reg ## block ## id ## _ ## reg_name |
152 | |
153 | #define SRII(reg_name, block, id)\ |
154 | REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ |
155 | reg ## block ## id ## _ ## reg_name |
156 | |
157 | #define SRII_ARR_2(reg_name, block, id, inst)\ |
158 | REG_STRUCT[inst].reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ |
159 | reg ## block ## id ## _ ## reg_name |
160 | |
161 | #define SRII_MPC_RMU(reg_name, block, id)\ |
162 | .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ |
163 | reg ## block ## id ## _ ## reg_name |
164 | |
165 | #define SRII_DWB(reg_name, temp_name, block, id)\ |
166 | REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ |
167 | reg ## block ## id ## _ ## temp_name |
168 | |
169 | #define SF_DWB2(reg_name, block, id, field_name, post_fix) \ |
170 | .field_name = reg_name ## __ ## field_name ## post_fix |
171 | |
172 | #define DCCG_SRII(reg_name, block, id)\ |
173 | REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ |
174 | reg ## block ## id ## _ ## reg_name |
175 | |
176 | #define VUPDATE_SRII(reg_name, block, id)\ |
177 | REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ |
178 | reg ## reg_name ## _ ## block ## id |
179 | |
180 | /* NBIO */ |
181 | #define NBIO_BASE_INNER(seg) ctx->nbio_reg_offsets[seg] |
182 | |
183 | #define NBIO_BASE(seg) \ |
184 | NBIO_BASE_INNER(seg) |
185 | |
186 | #define NBIO_SR(reg_name)\ |
187 | REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \ |
188 | regBIF_BX0_ ## reg_name |
189 | #define NBIO_SR_ARR(reg_name, id)\ |
190 | REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \ |
191 | regBIF_BX0_ ## reg_name |
192 | |
193 | #undef CTX |
194 | #define CTX ctx |
195 | #define REG(reg_name) \ |
196 | (ctx->dcn_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) |
197 | |
198 | static struct bios_registers bios_regs; |
199 | |
200 | #define bios_regs_init() \ |
201 | ( \ |
202 | NBIO_SR(BIOS_SCRATCH_3),\ |
203 | NBIO_SR(BIOS_SCRATCH_6)\ |
204 | ) |
205 | |
206 | #define clk_src_regs_init(index, pllid)\ |
207 | CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid) |
208 | |
209 | static struct dce110_clk_src_regs clk_src_regs[5]; |
210 | |
211 | static const struct dce110_clk_src_shift cs_shift = { |
212 | CS_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT) |
213 | }; |
214 | |
215 | static const struct dce110_clk_src_mask cs_mask = { |
216 | CS_COMMON_MASK_SH_LIST_DCN3_2(_MASK) |
217 | }; |
218 | |
219 | #define abm_regs_init(id)\ |
220 | ABM_DCN32_REG_LIST_RI(id) |
221 | |
222 | static struct dce_abm_registers abm_regs[4]; |
223 | |
224 | static const struct dce_abm_shift abm_shift = { |
225 | ABM_MASK_SH_LIST_DCN32(__SHIFT) |
226 | }; |
227 | |
228 | static const struct dce_abm_mask abm_mask = { |
229 | ABM_MASK_SH_LIST_DCN32(_MASK) |
230 | }; |
231 | |
232 | #define audio_regs_init(id)\ |
233 | AUD_COMMON_REG_LIST_RI(id) |
234 | |
235 | static struct dce_audio_registers audio_regs[5]; |
236 | |
237 | #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ |
238 | SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ |
239 | SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ |
240 | AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) |
241 | |
242 | static const struct dce_audio_shift audio_shift = { |
243 | DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) |
244 | }; |
245 | |
246 | static const struct dce_audio_mask audio_mask = { |
247 | DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) |
248 | }; |
249 | |
250 | #define vpg_regs_init(id)\ |
251 | VPG_DCN3_REG_LIST_RI(id) |
252 | |
253 | static struct dcn30_vpg_registers vpg_regs[10]; |
254 | |
255 | static const struct dcn30_vpg_shift vpg_shift = { |
256 | DCN3_VPG_MASK_SH_LIST(__SHIFT) |
257 | }; |
258 | |
259 | static const struct dcn30_vpg_mask vpg_mask = { |
260 | DCN3_VPG_MASK_SH_LIST(_MASK) |
261 | }; |
262 | |
263 | #define afmt_regs_init(id)\ |
264 | AFMT_DCN3_REG_LIST_RI(id) |
265 | |
266 | static struct dcn30_afmt_registers afmt_regs[6]; |
267 | |
268 | static const struct dcn30_afmt_shift afmt_shift = { |
269 | DCN3_AFMT_MASK_SH_LIST(__SHIFT) |
270 | }; |
271 | |
272 | static const struct dcn30_afmt_mask afmt_mask = { |
273 | DCN3_AFMT_MASK_SH_LIST(_MASK) |
274 | }; |
275 | |
276 | #define apg_regs_init(id)\ |
277 | APG_DCN31_REG_LIST_RI(id) |
278 | |
279 | static struct dcn31_apg_registers apg_regs[4]; |
280 | |
281 | static const struct dcn31_apg_shift apg_shift = { |
282 | DCN31_APG_MASK_SH_LIST(__SHIFT) |
283 | }; |
284 | |
285 | static const struct dcn31_apg_mask apg_mask = { |
286 | DCN31_APG_MASK_SH_LIST(_MASK) |
287 | }; |
288 | |
289 | #define stream_enc_regs_init(id)\ |
290 | SE_DCN32_REG_LIST_RI(id) |
291 | |
292 | static struct dcn10_stream_enc_registers stream_enc_regs[5]; |
293 | |
294 | static const struct dcn10_stream_encoder_shift se_shift = { |
295 | SE_COMMON_MASK_SH_LIST_DCN32(__SHIFT) |
296 | }; |
297 | |
298 | static const struct dcn10_stream_encoder_mask se_mask = { |
299 | SE_COMMON_MASK_SH_LIST_DCN32(_MASK) |
300 | }; |
301 | |
302 | |
303 | #define aux_regs_init(id)\ |
304 | DCN2_AUX_REG_LIST_RI(id) |
305 | |
306 | static struct dcn10_link_enc_aux_registers link_enc_aux_regs[5]; |
307 | |
308 | #define hpd_regs_init(id)\ |
309 | HPD_REG_LIST_RI(id) |
310 | |
311 | static struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[5]; |
312 | |
313 | #define link_regs_init(id, phyid)\ |
314 | ( \ |
315 | LE_DCN31_REG_LIST_RI(id), \ |
316 | UNIPHY_DCN2_REG_LIST_RI(id, phyid)\ |
317 | ) |
318 | /*DPCS_DCN31_REG_LIST(id),*/ \ |
319 | |
320 | static struct dcn10_link_enc_registers link_enc_regs[5]; |
321 | |
322 | static const struct dcn10_link_enc_shift le_shift = { |
323 | LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \ |
324 | //DPCS_DCN31_MASK_SH_LIST(__SHIFT) |
325 | }; |
326 | |
327 | static const struct dcn10_link_enc_mask le_mask = { |
328 | LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), \ |
329 | //DPCS_DCN31_MASK_SH_LIST(_MASK) |
330 | }; |
331 | |
332 | #define hpo_dp_stream_encoder_reg_init(id)\ |
333 | DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id) |
334 | |
335 | static struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[4]; |
336 | |
337 | static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { |
338 | DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) |
339 | }; |
340 | |
341 | static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { |
342 | DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) |
343 | }; |
344 | |
345 | |
346 | #define hpo_dp_link_encoder_reg_init(id)\ |
347 | DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id) |
348 | /*DCN3_1_RDPCSTX_REG_LIST(0),*/ |
349 | /*DCN3_1_RDPCSTX_REG_LIST(1),*/ |
350 | /*DCN3_1_RDPCSTX_REG_LIST(2),*/ |
351 | /*DCN3_1_RDPCSTX_REG_LIST(3),*/ |
352 | |
353 | static struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[2]; |
354 | |
355 | static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { |
356 | DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT) |
357 | }; |
358 | |
359 | static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { |
360 | DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK) |
361 | }; |
362 | |
363 | #define dpp_regs_init(id)\ |
364 | DPP_REG_LIST_DCN30_COMMON_RI(id) |
365 | |
366 | static struct dcn3_dpp_registers dpp_regs[4]; |
367 | |
368 | static const struct dcn3_dpp_shift tf_shift = { |
369 | DPP_REG_LIST_SH_MASK_DCN30_COMMON(__SHIFT) |
370 | }; |
371 | |
372 | static const struct dcn3_dpp_mask tf_mask = { |
373 | DPP_REG_LIST_SH_MASK_DCN30_COMMON(_MASK) |
374 | }; |
375 | |
376 | |
377 | #define opp_regs_init(id)\ |
378 | OPP_REG_LIST_DCN30_RI(id) |
379 | |
380 | static struct dcn20_opp_registers opp_regs[4]; |
381 | |
382 | static const struct dcn20_opp_shift opp_shift = { |
383 | OPP_MASK_SH_LIST_DCN20(__SHIFT) |
384 | }; |
385 | |
386 | static const struct dcn20_opp_mask opp_mask = { |
387 | OPP_MASK_SH_LIST_DCN20(_MASK) |
388 | }; |
389 | |
390 | #define aux_engine_regs_init(id)\ |
391 | ( \ |
392 | AUX_COMMON_REG_LIST0_RI(id), \ |
393 | SR_ARR_INIT(AUXN_IMPCAL, id, 0), \ |
394 | SR_ARR_INIT(AUXP_IMPCAL, id, 0), \ |
395 | SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK), \ |
396 | SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK)\ |
397 | ) |
398 | |
399 | static struct dce110_aux_registers aux_engine_regs[5]; |
400 | |
401 | static const struct dce110_aux_registers_shift aux_shift = { |
402 | DCN_AUX_MASK_SH_LIST(__SHIFT) |
403 | }; |
404 | |
405 | static const struct dce110_aux_registers_mask aux_mask = { |
406 | DCN_AUX_MASK_SH_LIST(_MASK) |
407 | }; |
408 | |
409 | #define dwbc_regs_dcn3_init(id)\ |
410 | DWBC_COMMON_REG_LIST_DCN30_RI(id) |
411 | |
412 | static struct dcn30_dwbc_registers dwbc30_regs[1]; |
413 | |
414 | static const struct dcn30_dwbc_shift dwbc30_shift = { |
415 | DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) |
416 | }; |
417 | |
418 | static const struct dcn30_dwbc_mask dwbc30_mask = { |
419 | DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) |
420 | }; |
421 | |
422 | #define mcif_wb_regs_dcn3_init(id)\ |
423 | MCIF_WB_COMMON_REG_LIST_DCN32_RI(id) |
424 | |
425 | static struct dcn30_mmhubbub_registers mcif_wb30_regs[1]; |
426 | |
427 | static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { |
428 | MCIF_WB_COMMON_MASK_SH_LIST_DCN32(__SHIFT) |
429 | }; |
430 | |
431 | static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { |
432 | MCIF_WB_COMMON_MASK_SH_LIST_DCN32(_MASK) |
433 | }; |
434 | |
435 | #define dsc_regsDCN20_init(id)\ |
436 | DSC_REG_LIST_DCN20_RI(id) |
437 | |
438 | static struct dcn20_dsc_registers dsc_regs[4]; |
439 | |
440 | static const struct dcn20_dsc_shift dsc_shift = { |
441 | DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) |
442 | }; |
443 | |
444 | static const struct dcn20_dsc_mask dsc_mask = { |
445 | DSC_REG_LIST_SH_MASK_DCN20(_MASK) |
446 | }; |
447 | |
448 | static struct dcn30_mpc_registers mpc_regs; |
449 | |
450 | #define dcn_mpc_regs_init() \ |
451 | MPC_REG_LIST_DCN3_2_RI(0),\ |
452 | MPC_REG_LIST_DCN3_2_RI(1),\ |
453 | MPC_REG_LIST_DCN3_2_RI(2),\ |
454 | MPC_REG_LIST_DCN3_2_RI(3),\ |
455 | MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\ |
456 | MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\ |
457 | MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\ |
458 | MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\ |
459 | MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0) |
460 | |
461 | static const struct dcn30_mpc_shift mpc_shift = { |
462 | MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT) |
463 | }; |
464 | |
465 | static const struct dcn30_mpc_mask mpc_mask = { |
466 | MPC_COMMON_MASK_SH_LIST_DCN32(_MASK) |
467 | }; |
468 | |
469 | #define optc_regs_init(id)\ |
470 | OPTC_COMMON_REG_LIST_DCN3_2_RI(id) |
471 | |
472 | static struct dcn_optc_registers optc_regs[4]; |
473 | |
474 | static const struct dcn_optc_shift optc_shift = { |
475 | OPTC_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT) |
476 | }; |
477 | |
478 | static const struct dcn_optc_mask optc_mask = { |
479 | OPTC_COMMON_MASK_SH_LIST_DCN3_2(_MASK) |
480 | }; |
481 | |
482 | #define hubp_regs_init(id)\ |
483 | HUBP_REG_LIST_DCN32_RI(id) |
484 | |
485 | static struct dcn_hubp2_registers hubp_regs[4]; |
486 | |
487 | |
488 | static const struct dcn_hubp2_shift hubp_shift = { |
489 | HUBP_MASK_SH_LIST_DCN32(__SHIFT) |
490 | }; |
491 | |
492 | static const struct dcn_hubp2_mask hubp_mask = { |
493 | HUBP_MASK_SH_LIST_DCN32(_MASK) |
494 | }; |
495 | |
496 | static struct dcn_hubbub_registers hubbub_reg; |
497 | #define hubbub_reg_init()\ |
498 | HUBBUB_REG_LIST_DCN32_RI(0) |
499 | |
500 | static const struct dcn_hubbub_shift hubbub_shift = { |
501 | HUBBUB_MASK_SH_LIST_DCN32(__SHIFT) |
502 | }; |
503 | |
504 | static const struct dcn_hubbub_mask hubbub_mask = { |
505 | HUBBUB_MASK_SH_LIST_DCN32(_MASK) |
506 | }; |
507 | |
508 | static struct dccg_registers dccg_regs; |
509 | |
510 | #define dccg_regs_init()\ |
511 | DCCG_REG_LIST_DCN32_RI() |
512 | |
513 | static const struct dccg_shift dccg_shift = { |
514 | DCCG_MASK_SH_LIST_DCN32(__SHIFT) |
515 | }; |
516 | |
517 | static const struct dccg_mask dccg_mask = { |
518 | DCCG_MASK_SH_LIST_DCN32(_MASK) |
519 | }; |
520 | |
521 | |
522 | #define SRII2(reg_name_pre, reg_name_post, id)\ |
523 | .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \ |
524 | ## id ## _ ## reg_name_post ## _BASE_IDX) + \ |
525 | reg ## reg_name_pre ## id ## _ ## reg_name_post |
526 | |
527 | |
528 | #define HWSEQ_DCN32_REG_LIST()\ |
529 | SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ |
530 | SR(DIO_MEM_PWR_CTRL), \ |
531 | SR(ODM_MEM_PWR_CTRL3), \ |
532 | SR(MMHUBBUB_MEM_PWR_CNTL), \ |
533 | SR(DCCG_GATE_DISABLE_CNTL), \ |
534 | SR(DCCG_GATE_DISABLE_CNTL2), \ |
535 | SR(DCFCLK_CNTL),\ |
536 | SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ |
537 | SRII(PIXEL_RATE_CNTL, OTG, 0), \ |
538 | SRII(PIXEL_RATE_CNTL, OTG, 1),\ |
539 | SRII(PIXEL_RATE_CNTL, OTG, 2),\ |
540 | SRII(PIXEL_RATE_CNTL, OTG, 3),\ |
541 | SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\ |
542 | SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\ |
543 | SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\ |
544 | SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\ |
545 | SR(MICROSECOND_TIME_BASE_DIV), \ |
546 | SR(MILLISECOND_TIME_BASE_DIV), \ |
547 | SR(DISPCLK_FREQ_CHANGE_CNTL), \ |
548 | SR(RBBMIF_TIMEOUT_DIS), \ |
549 | SR(RBBMIF_TIMEOUT_DIS_2), \ |
550 | SR(DCHUBBUB_CRC_CTRL), \ |
551 | SR(DPP_TOP0_DPP_CRC_CTRL), \ |
552 | SR(DPP_TOP0_DPP_CRC_VAL_B_A), \ |
553 | SR(DPP_TOP0_DPP_CRC_VAL_R_G), \ |
554 | SR(MPC_CRC_CTRL), \ |
555 | SR(MPC_CRC_RESULT_GB), \ |
556 | SR(MPC_CRC_RESULT_C), \ |
557 | SR(MPC_CRC_RESULT_AR), \ |
558 | SR(DOMAIN0_PG_CONFIG), \ |
559 | SR(DOMAIN1_PG_CONFIG), \ |
560 | SR(DOMAIN2_PG_CONFIG), \ |
561 | SR(DOMAIN3_PG_CONFIG), \ |
562 | SR(DOMAIN16_PG_CONFIG), \ |
563 | SR(DOMAIN17_PG_CONFIG), \ |
564 | SR(DOMAIN18_PG_CONFIG), \ |
565 | SR(DOMAIN19_PG_CONFIG), \ |
566 | SR(DOMAIN0_PG_STATUS), \ |
567 | SR(DOMAIN1_PG_STATUS), \ |
568 | SR(DOMAIN2_PG_STATUS), \ |
569 | SR(DOMAIN3_PG_STATUS), \ |
570 | SR(DOMAIN16_PG_STATUS), \ |
571 | SR(DOMAIN17_PG_STATUS), \ |
572 | SR(DOMAIN18_PG_STATUS), \ |
573 | SR(DOMAIN19_PG_STATUS), \ |
574 | SR(D1VGA_CONTROL), \ |
575 | SR(D2VGA_CONTROL), \ |
576 | SR(D3VGA_CONTROL), \ |
577 | SR(D4VGA_CONTROL), \ |
578 | SR(D5VGA_CONTROL), \ |
579 | SR(D6VGA_CONTROL), \ |
580 | SR(DC_IP_REQUEST_CNTL), \ |
581 | SR(AZALIA_AUDIO_DTO), \ |
582 | SR(AZALIA_CONTROLLER_CLOCK_GATING) |
583 | |
584 | static struct dce_hwseq_registers hwseq_reg; |
585 | |
586 | #define hwseq_reg_init()\ |
587 | HWSEQ_DCN32_REG_LIST() |
588 | |
589 | #define HWSEQ_DCN32_MASK_SH_LIST(mask_sh)\ |
590 | HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ |
591 | HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ |
592 | HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ |
593 | HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ |
594 | HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ |
595 | HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ |
596 | HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ |
597 | HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ |
598 | HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ |
599 | HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ |
600 | HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ |
601 | HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ |
602 | HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ |
603 | HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ |
604 | HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ |
605 | HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ |
606 | HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ |
607 | HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ |
608 | HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ |
609 | HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ |
610 | HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ |
611 | HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ |
612 | HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ |
613 | HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ |
614 | HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ |
615 | HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ |
616 | HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ |
617 | HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ |
618 | HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \ |
619 | HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ |
620 | HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ |
621 | HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh) |
622 | |
623 | static const struct dce_hwseq_shift hwseq_shift = { |
624 | HWSEQ_DCN32_MASK_SH_LIST(__SHIFT) |
625 | }; |
626 | |
627 | static const struct dce_hwseq_mask hwseq_mask = { |
628 | HWSEQ_DCN32_MASK_SH_LIST(_MASK) |
629 | }; |
630 | #define vmid_regs_init(id)\ |
631 | DCN20_VMID_REG_LIST_RI(id) |
632 | |
633 | static struct dcn_vmid_registers vmid_regs[16]; |
634 | |
635 | static const struct dcn20_vmid_shift vmid_shifts = { |
636 | DCN20_VMID_MASK_SH_LIST(__SHIFT) |
637 | }; |
638 | |
639 | static const struct dcn20_vmid_mask vmid_masks = { |
640 | DCN20_VMID_MASK_SH_LIST(_MASK) |
641 | }; |
642 | |
643 | static const struct resource_caps res_cap_dcn32 = { |
644 | .num_timing_generator = 4, |
645 | .num_opp = 4, |
646 | .num_video_plane = 4, |
647 | .num_audio = 5, |
648 | .num_stream_encoder = 5, |
649 | .num_hpo_dp_stream_encoder = 4, |
650 | .num_hpo_dp_link_encoder = 2, |
651 | .num_pll = 5, |
652 | .num_dwb = 1, |
653 | .num_ddc = 5, |
654 | .num_vmid = 16, |
655 | .num_mpc_3dlut = 4, |
656 | .num_dsc = 4, |
657 | }; |
658 | |
659 | static const struct dc_plane_cap plane_cap = { |
660 | .type = DC_PLANE_TYPE_DCN_UNIVERSAL, |
661 | .per_pixel_alpha = true, |
662 | |
663 | .pixel_format_support = { |
664 | .argb8888 = true, |
665 | .nv12 = true, |
666 | .fp16 = true, |
667 | .p010 = true, |
668 | .ayuv = false, |
669 | }, |
670 | |
671 | .max_upscale_factor = { |
672 | .argb8888 = 16000, |
673 | .nv12 = 16000, |
674 | .fp16 = 16000 |
675 | }, |
676 | |
677 | // 6:1 downscaling ratio: 1000/6 = 166.666 |
678 | .max_downscale_factor = { |
679 | .argb8888 = 167, |
680 | .nv12 = 167, |
681 | .fp16 = 167 |
682 | }, |
683 | 64, |
684 | 64 |
685 | }; |
686 | |
687 | static const struct dc_debug_options debug_defaults_drv = { |
688 | .disable_dmcu = true, |
689 | .force_abm_enable = false, |
690 | .timing_trace = false, |
691 | .clock_trace = true, |
692 | .disable_pplib_clock_request = false, |
693 | .pipe_split_policy = MPC_SPLIT_AVOID, // Due to CRB, no need to MPC split anymore |
694 | .force_single_disp_pipe_split = false, |
695 | .disable_dcc = DCC_ENABLE, |
696 | .vsr_support = true, |
697 | .performance_trace = false, |
698 | .max_downscale_src_width = 7680,/*upto 8K*/ |
699 | .disable_pplib_wm_range = false, |
700 | .scl_reset_length10 = true, |
701 | .sanity_checks = false, |
702 | .underflow_assert_delay_us = 0xFFFFFFFF, |
703 | .dwb_fi_phase = -1, // -1 = disable, |
704 | .dmub_command_table = true, |
705 | .enable_mem_low_power = { |
706 | .bits = { |
707 | .vga = false, |
708 | .i2c = false, |
709 | .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled |
710 | .dscl = false, |
711 | .cm = false, |
712 | .mpc = false, |
713 | .optc = true, |
714 | } |
715 | }, |
716 | .use_max_lb = true, |
717 | .force_disable_subvp = false, |
718 | .exit_idle_opt_for_cursor_updates = true, |
719 | .using_dml2 = false, |
720 | .enable_single_display_2to1_odm_policy = true, |
721 | |
722 | /* Must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions*/ |
723 | .enable_double_buffered_dsc_pg_support = true, |
724 | .enable_dp_dig_pixel_rate_div_policy = 1, |
725 | .allow_sw_cursor_fallback = false, // Linux can't do SW cursor "fallback" |
726 | .alloc_extra_way_for_cursor = true, |
727 | .min_prefetch_in_strobe_ns = 60000, // 60us |
728 | .disable_unbounded_requesting = false, |
729 | .override_dispclk_programming = true, |
730 | .disable_fpo_optimizations = false, |
731 | .fpo_vactive_margin_us = 2000, // 2000us |
732 | .disable_fpo_vactive = false, |
733 | .disable_boot_optimizations = false, |
734 | .disable_subvp_high_refresh = false, |
735 | .disable_dp_plus_plus_wa = true, |
736 | .fpo_vactive_min_active_margin_us = 200, |
737 | .fpo_vactive_max_blank_us = 1000, |
738 | .enable_legacy_fast_update = false, |
739 | }; |
740 | |
741 | static struct dce_aux *dcn32_aux_engine_create( |
742 | struct dc_context *ctx, |
743 | uint32_t inst) |
744 | { |
745 | struct aux_engine_dce110 *aux_engine = |
746 | kzalloc(size: sizeof(struct aux_engine_dce110), GFP_KERNEL); |
747 | |
748 | if (!aux_engine) |
749 | return NULL; |
750 | |
751 | #undef REG_STRUCT |
752 | #define REG_STRUCT aux_engine_regs |
753 | aux_engine_regs_init(0), |
754 | aux_engine_regs_init(1), |
755 | aux_engine_regs_init(2), |
756 | aux_engine_regs_init(3), |
757 | aux_engine_regs_init(4); |
758 | |
759 | dce110_aux_engine_construct(aux_engine110: aux_engine, ctx, inst, |
760 | timeout_period: SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, |
761 | regs: &aux_engine_regs[inst], |
762 | mask: &aux_mask, |
763 | shift: &aux_shift, |
764 | is_ext_aux_timeout_configurable: ctx->dc->caps.extended_aux_timeout_support); |
765 | |
766 | return &aux_engine->base; |
767 | } |
768 | #define i2c_inst_regs_init(id)\ |
769 | I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id) |
770 | |
771 | static struct dce_i2c_registers i2c_hw_regs[5]; |
772 | |
773 | static const struct dce_i2c_shift i2c_shifts = { |
774 | I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT) |
775 | }; |
776 | |
777 | static const struct dce_i2c_mask i2c_masks = { |
778 | I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) |
779 | }; |
780 | |
781 | static struct dce_i2c_hw *dcn32_i2c_hw_create( |
782 | struct dc_context *ctx, |
783 | uint32_t inst) |
784 | { |
785 | struct dce_i2c_hw *dce_i2c_hw = |
786 | kzalloc(size: sizeof(struct dce_i2c_hw), GFP_KERNEL); |
787 | |
788 | if (!dce_i2c_hw) |
789 | return NULL; |
790 | |
791 | #undef REG_STRUCT |
792 | #define REG_STRUCT i2c_hw_regs |
793 | i2c_inst_regs_init(1), |
794 | i2c_inst_regs_init(2), |
795 | i2c_inst_regs_init(3), |
796 | i2c_inst_regs_init(4), |
797 | i2c_inst_regs_init(5); |
798 | |
799 | dcn2_i2c_hw_construct(dce_i2c_hw, ctx, engine_id: inst, |
800 | regs: &i2c_hw_regs[inst], shifts: &i2c_shifts, masks: &i2c_masks); |
801 | |
802 | return dce_i2c_hw; |
803 | } |
804 | |
805 | static struct clock_source *dcn32_clock_source_create( |
806 | struct dc_context *ctx, |
807 | struct dc_bios *bios, |
808 | enum clock_source_id id, |
809 | const struct dce110_clk_src_regs *regs, |
810 | bool dp_clk_src) |
811 | { |
812 | struct dce110_clk_src *clk_src = |
813 | kzalloc(size: sizeof(struct dce110_clk_src), GFP_KERNEL); |
814 | |
815 | if (!clk_src) |
816 | return NULL; |
817 | |
818 | if (dcn31_clk_src_construct(clk_src, ctx, bios, id, |
819 | regs, cs_shift: &cs_shift, cs_mask: &cs_mask)) { |
820 | clk_src->base.dp_clk_src = dp_clk_src; |
821 | return &clk_src->base; |
822 | } |
823 | |
824 | kfree(objp: clk_src); |
825 | BREAK_TO_DEBUGGER(); |
826 | return NULL; |
827 | } |
828 | |
829 | static struct hubbub *dcn32_hubbub_create(struct dc_context *ctx) |
830 | { |
831 | int i; |
832 | |
833 | struct dcn20_hubbub *hubbub2 = kzalloc(size: sizeof(struct dcn20_hubbub), |
834 | GFP_KERNEL); |
835 | |
836 | if (!hubbub2) |
837 | return NULL; |
838 | |
839 | #undef REG_STRUCT |
840 | #define REG_STRUCT hubbub_reg |
841 | hubbub_reg_init(); |
842 | |
843 | #undef REG_STRUCT |
844 | #define REG_STRUCT vmid_regs |
845 | vmid_regs_init(0), |
846 | vmid_regs_init(1), |
847 | vmid_regs_init(2), |
848 | vmid_regs_init(3), |
849 | vmid_regs_init(4), |
850 | vmid_regs_init(5), |
851 | vmid_regs_init(6), |
852 | vmid_regs_init(7), |
853 | vmid_regs_init(8), |
854 | vmid_regs_init(9), |
855 | vmid_regs_init(10), |
856 | vmid_regs_init(11), |
857 | vmid_regs_init(12), |
858 | vmid_regs_init(13), |
859 | vmid_regs_init(14), |
860 | vmid_regs_init(15); |
861 | |
862 | hubbub32_construct(hubbub2, ctx, |
863 | hubbub_regs: &hubbub_reg, |
864 | hubbub_shift: &hubbub_shift, |
865 | hubbub_mask: &hubbub_mask, |
866 | det_size_kb: ctx->dc->dml.ip.det_buffer_size_kbytes, |
867 | pixel_chunk_size_kb: ctx->dc->dml.ip.pixel_chunk_size_kbytes, |
868 | config_return_buffer_size_kb: ctx->dc->dml.ip.config_return_buffer_size_in_kbytes); |
869 | |
870 | |
871 | for (i = 0; i < res_cap_dcn32.num_vmid; i++) { |
872 | struct dcn20_vmid *vmid = &hubbub2->vmid[i]; |
873 | |
874 | vmid->ctx = ctx; |
875 | |
876 | vmid->regs = &vmid_regs[i]; |
877 | vmid->shifts = &vmid_shifts; |
878 | vmid->masks = &vmid_masks; |
879 | } |
880 | |
881 | return &hubbub2->base; |
882 | } |
883 | |
884 | static struct hubp *dcn32_hubp_create( |
885 | struct dc_context *ctx, |
886 | uint32_t inst) |
887 | { |
888 | struct dcn20_hubp *hubp2 = |
889 | kzalloc(size: sizeof(struct dcn20_hubp), GFP_KERNEL); |
890 | |
891 | if (!hubp2) |
892 | return NULL; |
893 | |
894 | #undef REG_STRUCT |
895 | #define REG_STRUCT hubp_regs |
896 | hubp_regs_init(0), |
897 | hubp_regs_init(1), |
898 | hubp_regs_init(2), |
899 | hubp_regs_init(3); |
900 | |
901 | if (hubp32_construct(hubp2, ctx, inst, |
902 | hubp_regs: &hubp_regs[inst], hubp_shift: &hubp_shift, hubp_mask: &hubp_mask)) |
903 | return &hubp2->base; |
904 | |
905 | BREAK_TO_DEBUGGER(); |
906 | kfree(objp: hubp2); |
907 | return NULL; |
908 | } |
909 | |
910 | static void dcn32_dpp_destroy(struct dpp **dpp) |
911 | { |
912 | kfree(TO_DCN30_DPP(*dpp)); |
913 | *dpp = NULL; |
914 | } |
915 | |
916 | static struct dpp *dcn32_dpp_create( |
917 | struct dc_context *ctx, |
918 | uint32_t inst) |
919 | { |
920 | struct dcn3_dpp *dpp3 = |
921 | kzalloc(size: sizeof(struct dcn3_dpp), GFP_KERNEL); |
922 | |
923 | if (!dpp3) |
924 | return NULL; |
925 | |
926 | #undef REG_STRUCT |
927 | #define REG_STRUCT dpp_regs |
928 | dpp_regs_init(0), |
929 | dpp_regs_init(1), |
930 | dpp_regs_init(2), |
931 | dpp_regs_init(3); |
932 | |
933 | if (dpp32_construct(dpp3, ctx, inst, |
934 | tf_regs: &dpp_regs[inst], tf_shift: &tf_shift, tf_mask: &tf_mask)) |
935 | return &dpp3->base; |
936 | |
937 | BREAK_TO_DEBUGGER(); |
938 | kfree(objp: dpp3); |
939 | return NULL; |
940 | } |
941 | |
942 | static struct mpc *dcn32_mpc_create( |
943 | struct dc_context *ctx, |
944 | int num_mpcc, |
945 | int num_rmu) |
946 | { |
947 | struct dcn30_mpc *mpc30 = kzalloc(size: sizeof(struct dcn30_mpc), |
948 | GFP_KERNEL); |
949 | |
950 | if (!mpc30) |
951 | return NULL; |
952 | |
953 | #undef REG_STRUCT |
954 | #define REG_STRUCT mpc_regs |
955 | dcn_mpc_regs_init(); |
956 | |
957 | dcn32_mpc_construct(mpc30, ctx, |
958 | mpc_regs: &mpc_regs, |
959 | mpc_shift: &mpc_shift, |
960 | mpc_mask: &mpc_mask, |
961 | num_mpcc, |
962 | num_rmu); |
963 | |
964 | return &mpc30->base; |
965 | } |
966 | |
967 | static struct output_pixel_processor *dcn32_opp_create( |
968 | struct dc_context *ctx, uint32_t inst) |
969 | { |
970 | struct dcn20_opp *opp2 = |
971 | kzalloc(size: sizeof(struct dcn20_opp), GFP_KERNEL); |
972 | |
973 | if (!opp2) { |
974 | BREAK_TO_DEBUGGER(); |
975 | return NULL; |
976 | } |
977 | |
978 | #undef REG_STRUCT |
979 | #define REG_STRUCT opp_regs |
980 | opp_regs_init(0), |
981 | opp_regs_init(1), |
982 | opp_regs_init(2), |
983 | opp_regs_init(3); |
984 | |
985 | dcn20_opp_construct(oppn20: opp2, ctx, inst, |
986 | regs: &opp_regs[inst], opp_shift: &opp_shift, opp_mask: &opp_mask); |
987 | return &opp2->base; |
988 | } |
989 | |
990 | |
991 | static struct timing_generator *dcn32_timing_generator_create( |
992 | struct dc_context *ctx, |
993 | uint32_t instance) |
994 | { |
995 | struct optc *tgn10 = |
996 | kzalloc(size: sizeof(struct optc), GFP_KERNEL); |
997 | |
998 | if (!tgn10) |
999 | return NULL; |
1000 | |
1001 | #undef REG_STRUCT |
1002 | #define REG_STRUCT optc_regs |
1003 | optc_regs_init(0), |
1004 | optc_regs_init(1), |
1005 | optc_regs_init(2), |
1006 | optc_regs_init(3); |
1007 | |
1008 | tgn10->base.inst = instance; |
1009 | tgn10->base.ctx = ctx; |
1010 | |
1011 | tgn10->tg_regs = &optc_regs[instance]; |
1012 | tgn10->tg_shift = &optc_shift; |
1013 | tgn10->tg_mask = &optc_mask; |
1014 | |
1015 | dcn32_timing_generator_init(optc1: tgn10); |
1016 | |
1017 | return &tgn10->base; |
1018 | } |
1019 | |
1020 | static const struct encoder_feature_support link_enc_feature = { |
1021 | .max_hdmi_deep_color = COLOR_DEPTH_121212, |
1022 | .max_hdmi_pixel_clock = 600000, |
1023 | .hdmi_ycbcr420_supported = true, |
1024 | .dp_ycbcr420_supported = true, |
1025 | .fec_supported = true, |
1026 | .flags.bits.IS_HBR2_CAPABLE = true, |
1027 | .flags.bits.IS_HBR3_CAPABLE = true, |
1028 | .flags.bits.IS_TPS3_CAPABLE = true, |
1029 | .flags.bits.IS_TPS4_CAPABLE = true |
1030 | }; |
1031 | |
1032 | static struct link_encoder *dcn32_link_encoder_create( |
1033 | struct dc_context *ctx, |
1034 | const struct encoder_init_data *enc_init_data) |
1035 | { |
1036 | struct dcn20_link_encoder *enc20 = |
1037 | kzalloc(size: sizeof(struct dcn20_link_encoder), GFP_KERNEL); |
1038 | |
1039 | if (!enc20) |
1040 | return NULL; |
1041 | |
1042 | #undef REG_STRUCT |
1043 | #define REG_STRUCT link_enc_aux_regs |
1044 | aux_regs_init(0), |
1045 | aux_regs_init(1), |
1046 | aux_regs_init(2), |
1047 | aux_regs_init(3), |
1048 | aux_regs_init(4); |
1049 | |
1050 | #undef REG_STRUCT |
1051 | #define REG_STRUCT link_enc_hpd_regs |
1052 | hpd_regs_init(0), |
1053 | hpd_regs_init(1), |
1054 | hpd_regs_init(2), |
1055 | hpd_regs_init(3), |
1056 | hpd_regs_init(4); |
1057 | |
1058 | #undef REG_STRUCT |
1059 | #define REG_STRUCT link_enc_regs |
1060 | link_regs_init(0, A), |
1061 | link_regs_init(1, B), |
1062 | link_regs_init(2, C), |
1063 | link_regs_init(3, D), |
1064 | link_regs_init(4, E); |
1065 | |
1066 | dcn32_link_encoder_construct(enc20, |
1067 | init_data: enc_init_data, |
1068 | enc_features: &link_enc_feature, |
1069 | link_regs: &link_enc_regs[enc_init_data->transmitter], |
1070 | aux_regs: &link_enc_aux_regs[enc_init_data->channel - 1], |
1071 | hpd_regs: &link_enc_hpd_regs[enc_init_data->hpd_source], |
1072 | link_shift: &le_shift, |
1073 | link_mask: &le_mask); |
1074 | |
1075 | return &enc20->enc10.base; |
1076 | } |
1077 | |
1078 | struct panel_cntl *dcn32_panel_cntl_create(const struct panel_cntl_init_data *init_data) |
1079 | { |
1080 | struct dcn31_panel_cntl *panel_cntl = |
1081 | kzalloc(size: sizeof(struct dcn31_panel_cntl), GFP_KERNEL); |
1082 | |
1083 | if (!panel_cntl) |
1084 | return NULL; |
1085 | |
1086 | dcn31_panel_cntl_construct(dcn31_panel_cntl: panel_cntl, init_data); |
1087 | |
1088 | return &panel_cntl->base; |
1089 | } |
1090 | |
1091 | static void read_dce_straps( |
1092 | struct dc_context *ctx, |
1093 | struct resource_straps *straps) |
1094 | { |
1095 | generic_reg_get(ctx, addr: ctx->dcn_reg_offsets[regDC_PINSTRAPS_BASE_IDX] + regDC_PINSTRAPS, |
1096 | FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), field_value: &straps->dc_pinstraps_audio); |
1097 | |
1098 | } |
1099 | |
1100 | static struct audio *dcn32_create_audio( |
1101 | struct dc_context *ctx, unsigned int inst) |
1102 | { |
1103 | |
1104 | #undef REG_STRUCT |
1105 | #define REG_STRUCT audio_regs |
1106 | audio_regs_init(0), |
1107 | audio_regs_init(1), |
1108 | audio_regs_init(2), |
1109 | audio_regs_init(3), |
1110 | audio_regs_init(4); |
1111 | |
1112 | return dce_audio_create(ctx, inst, |
1113 | reg: &audio_regs[inst], shifts: &audio_shift, masks: &audio_mask); |
1114 | } |
1115 | |
1116 | static struct vpg *dcn32_vpg_create( |
1117 | struct dc_context *ctx, |
1118 | uint32_t inst) |
1119 | { |
1120 | struct dcn30_vpg *vpg3 = kzalloc(size: sizeof(struct dcn30_vpg), GFP_KERNEL); |
1121 | |
1122 | if (!vpg3) |
1123 | return NULL; |
1124 | |
1125 | #undef REG_STRUCT |
1126 | #define REG_STRUCT vpg_regs |
1127 | vpg_regs_init(0), |
1128 | vpg_regs_init(1), |
1129 | vpg_regs_init(2), |
1130 | vpg_regs_init(3), |
1131 | vpg_regs_init(4), |
1132 | vpg_regs_init(5), |
1133 | vpg_regs_init(6), |
1134 | vpg_regs_init(7), |
1135 | vpg_regs_init(8), |
1136 | vpg_regs_init(9); |
1137 | |
1138 | vpg3_construct(vpg3, ctx, inst, |
1139 | vpg_regs: &vpg_regs[inst], |
1140 | vpg_shift: &vpg_shift, |
1141 | vpg_mask: &vpg_mask); |
1142 | |
1143 | return &vpg3->base; |
1144 | } |
1145 | |
1146 | static struct afmt *dcn32_afmt_create( |
1147 | struct dc_context *ctx, |
1148 | uint32_t inst) |
1149 | { |
1150 | struct dcn30_afmt *afmt3 = kzalloc(size: sizeof(struct dcn30_afmt), GFP_KERNEL); |
1151 | |
1152 | if (!afmt3) |
1153 | return NULL; |
1154 | |
1155 | #undef REG_STRUCT |
1156 | #define REG_STRUCT afmt_regs |
1157 | afmt_regs_init(0), |
1158 | afmt_regs_init(1), |
1159 | afmt_regs_init(2), |
1160 | afmt_regs_init(3), |
1161 | afmt_regs_init(4), |
1162 | afmt_regs_init(5); |
1163 | |
1164 | afmt3_construct(afmt3, ctx, inst, |
1165 | afmt_regs: &afmt_regs[inst], |
1166 | afmt_shift: &afmt_shift, |
1167 | afmt_mask: &afmt_mask); |
1168 | |
1169 | return &afmt3->base; |
1170 | } |
1171 | |
1172 | static struct apg *dcn31_apg_create( |
1173 | struct dc_context *ctx, |
1174 | uint32_t inst) |
1175 | { |
1176 | struct dcn31_apg *apg31 = kzalloc(size: sizeof(struct dcn31_apg), GFP_KERNEL); |
1177 | |
1178 | if (!apg31) |
1179 | return NULL; |
1180 | |
1181 | #undef REG_STRUCT |
1182 | #define REG_STRUCT apg_regs |
1183 | apg_regs_init(0), |
1184 | apg_regs_init(1), |
1185 | apg_regs_init(2), |
1186 | apg_regs_init(3); |
1187 | |
1188 | apg31_construct(apg3: apg31, ctx, inst, |
1189 | apg_regs: &apg_regs[inst], |
1190 | apg_shift: &apg_shift, |
1191 | apg_mask: &apg_mask); |
1192 | |
1193 | return &apg31->base; |
1194 | } |
1195 | |
1196 | static struct stream_encoder *dcn32_stream_encoder_create( |
1197 | enum engine_id eng_id, |
1198 | struct dc_context *ctx) |
1199 | { |
1200 | struct dcn10_stream_encoder *enc1; |
1201 | struct vpg *vpg; |
1202 | struct afmt *afmt; |
1203 | int vpg_inst; |
1204 | int afmt_inst; |
1205 | |
1206 | /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ |
1207 | if (eng_id <= ENGINE_ID_DIGF) { |
1208 | vpg_inst = eng_id; |
1209 | afmt_inst = eng_id; |
1210 | } else |
1211 | return NULL; |
1212 | |
1213 | enc1 = kzalloc(size: sizeof(struct dcn10_stream_encoder), GFP_KERNEL); |
1214 | vpg = dcn32_vpg_create(ctx, inst: vpg_inst); |
1215 | afmt = dcn32_afmt_create(ctx, inst: afmt_inst); |
1216 | |
1217 | if (!enc1 || !vpg || !afmt) { |
1218 | kfree(objp: enc1); |
1219 | kfree(objp: vpg); |
1220 | kfree(objp: afmt); |
1221 | return NULL; |
1222 | } |
1223 | |
1224 | #undef REG_STRUCT |
1225 | #define REG_STRUCT stream_enc_regs |
1226 | stream_enc_regs_init(0), |
1227 | stream_enc_regs_init(1), |
1228 | stream_enc_regs_init(2), |
1229 | stream_enc_regs_init(3), |
1230 | stream_enc_regs_init(4); |
1231 | |
1232 | dcn32_dio_stream_encoder_construct(enc1, ctx, bp: ctx->dc_bios, |
1233 | eng_id, vpg, afmt, |
1234 | regs: &stream_enc_regs[eng_id], |
1235 | se_shift: &se_shift, se_mask: &se_mask); |
1236 | |
1237 | return &enc1->base; |
1238 | } |
1239 | |
1240 | static struct hpo_dp_stream_encoder *dcn32_hpo_dp_stream_encoder_create( |
1241 | enum engine_id eng_id, |
1242 | struct dc_context *ctx) |
1243 | { |
1244 | struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; |
1245 | struct vpg *vpg; |
1246 | struct apg *apg; |
1247 | uint32_t hpo_dp_inst; |
1248 | uint32_t vpg_inst; |
1249 | uint32_t apg_inst; |
1250 | |
1251 | ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); |
1252 | hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; |
1253 | |
1254 | /* Mapping of VPG register blocks to HPO DP block instance: |
1255 | * VPG[6] -> HPO_DP[0] |
1256 | * VPG[7] -> HPO_DP[1] |
1257 | * VPG[8] -> HPO_DP[2] |
1258 | * VPG[9] -> HPO_DP[3] |
1259 | */ |
1260 | vpg_inst = hpo_dp_inst + 6; |
1261 | |
1262 | /* Mapping of APG register blocks to HPO DP block instance: |
1263 | * APG[0] -> HPO_DP[0] |
1264 | * APG[1] -> HPO_DP[1] |
1265 | * APG[2] -> HPO_DP[2] |
1266 | * APG[3] -> HPO_DP[3] |
1267 | */ |
1268 | apg_inst = hpo_dp_inst; |
1269 | |
1270 | /* allocate HPO stream encoder and create VPG sub-block */ |
1271 | hpo_dp_enc31 = kzalloc(size: sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); |
1272 | vpg = dcn32_vpg_create(ctx, inst: vpg_inst); |
1273 | apg = dcn31_apg_create(ctx, inst: apg_inst); |
1274 | |
1275 | if (!hpo_dp_enc31 || !vpg || !apg) { |
1276 | kfree(objp: hpo_dp_enc31); |
1277 | kfree(objp: vpg); |
1278 | kfree(objp: apg); |
1279 | return NULL; |
1280 | } |
1281 | |
1282 | #undef REG_STRUCT |
1283 | #define REG_STRUCT hpo_dp_stream_enc_regs |
1284 | hpo_dp_stream_encoder_reg_init(0), |
1285 | hpo_dp_stream_encoder_reg_init(1), |
1286 | hpo_dp_stream_encoder_reg_init(2), |
1287 | hpo_dp_stream_encoder_reg_init(3); |
1288 | |
1289 | dcn31_hpo_dp_stream_encoder_construct(enc3: hpo_dp_enc31, ctx, bp: ctx->dc_bios, |
1290 | inst: hpo_dp_inst, eng_id, vpg, apg, |
1291 | regs: &hpo_dp_stream_enc_regs[hpo_dp_inst], |
1292 | hpo_se_shift: &hpo_dp_se_shift, hpo_se_mask: &hpo_dp_se_mask); |
1293 | |
1294 | return &hpo_dp_enc31->base; |
1295 | } |
1296 | |
1297 | static struct hpo_dp_link_encoder *dcn32_hpo_dp_link_encoder_create( |
1298 | uint8_t inst, |
1299 | struct dc_context *ctx) |
1300 | { |
1301 | struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; |
1302 | |
1303 | /* allocate HPO link encoder */ |
1304 | hpo_dp_enc31 = kzalloc(size: sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); |
1305 | |
1306 | #undef REG_STRUCT |
1307 | #define REG_STRUCT hpo_dp_link_enc_regs |
1308 | hpo_dp_link_encoder_reg_init(0), |
1309 | hpo_dp_link_encoder_reg_init(1); |
1310 | |
1311 | hpo_dp_link_encoder32_construct(enc31: hpo_dp_enc31, ctx, inst, |
1312 | hpo_le_regs: &hpo_dp_link_enc_regs[inst], |
1313 | hpo_le_shift: &hpo_dp_le_shift, hpo_le_mask: &hpo_dp_le_mask); |
1314 | |
1315 | return &hpo_dp_enc31->base; |
1316 | } |
1317 | |
1318 | static struct dce_hwseq *dcn32_hwseq_create( |
1319 | struct dc_context *ctx) |
1320 | { |
1321 | struct dce_hwseq *hws = kzalloc(size: sizeof(struct dce_hwseq), GFP_KERNEL); |
1322 | |
1323 | #undef REG_STRUCT |
1324 | #define REG_STRUCT hwseq_reg |
1325 | hwseq_reg_init(); |
1326 | |
1327 | if (hws) { |
1328 | hws->ctx = ctx; |
1329 | hws->regs = &hwseq_reg; |
1330 | hws->shifts = &hwseq_shift; |
1331 | hws->masks = &hwseq_mask; |
1332 | } |
1333 | return hws; |
1334 | } |
1335 | static const struct resource_create_funcs res_create_funcs = { |
1336 | .read_dce_straps = read_dce_straps, |
1337 | .create_audio = dcn32_create_audio, |
1338 | .create_stream_encoder = dcn32_stream_encoder_create, |
1339 | .create_hpo_dp_stream_encoder = dcn32_hpo_dp_stream_encoder_create, |
1340 | .create_hpo_dp_link_encoder = dcn32_hpo_dp_link_encoder_create, |
1341 | .create_hwseq = dcn32_hwseq_create, |
1342 | }; |
1343 | |
1344 | static void dcn32_resource_destruct(struct dcn32_resource_pool *pool) |
1345 | { |
1346 | unsigned int i; |
1347 | |
1348 | for (i = 0; i < pool->base.stream_enc_count; i++) { |
1349 | if (pool->base.stream_enc[i] != NULL) { |
1350 | if (pool->base.stream_enc[i]->vpg != NULL) { |
1351 | kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); |
1352 | pool->base.stream_enc[i]->vpg = NULL; |
1353 | } |
1354 | if (pool->base.stream_enc[i]->afmt != NULL) { |
1355 | kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); |
1356 | pool->base.stream_enc[i]->afmt = NULL; |
1357 | } |
1358 | kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); |
1359 | pool->base.stream_enc[i] = NULL; |
1360 | } |
1361 | } |
1362 | |
1363 | for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { |
1364 | if (pool->base.hpo_dp_stream_enc[i] != NULL) { |
1365 | if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { |
1366 | kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); |
1367 | pool->base.hpo_dp_stream_enc[i]->vpg = NULL; |
1368 | } |
1369 | if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { |
1370 | kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); |
1371 | pool->base.hpo_dp_stream_enc[i]->apg = NULL; |
1372 | } |
1373 | kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); |
1374 | pool->base.hpo_dp_stream_enc[i] = NULL; |
1375 | } |
1376 | } |
1377 | |
1378 | for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { |
1379 | if (pool->base.hpo_dp_link_enc[i] != NULL) { |
1380 | kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); |
1381 | pool->base.hpo_dp_link_enc[i] = NULL; |
1382 | } |
1383 | } |
1384 | |
1385 | for (i = 0; i < pool->base.res_cap->num_dsc; i++) { |
1386 | if (pool->base.dscs[i] != NULL) |
1387 | dcn20_dsc_destroy(dsc: &pool->base.dscs[i]); |
1388 | } |
1389 | |
1390 | if (pool->base.mpc != NULL) { |
1391 | kfree(TO_DCN20_MPC(pool->base.mpc)); |
1392 | pool->base.mpc = NULL; |
1393 | } |
1394 | if (pool->base.hubbub != NULL) { |
1395 | kfree(TO_DCN20_HUBBUB(pool->base.hubbub)); |
1396 | pool->base.hubbub = NULL; |
1397 | } |
1398 | for (i = 0; i < pool->base.pipe_count; i++) { |
1399 | if (pool->base.dpps[i] != NULL) |
1400 | dcn32_dpp_destroy(dpp: &pool->base.dpps[i]); |
1401 | |
1402 | if (pool->base.ipps[i] != NULL) |
1403 | pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); |
1404 | |
1405 | if (pool->base.hubps[i] != NULL) { |
1406 | kfree(TO_DCN20_HUBP(pool->base.hubps[i])); |
1407 | pool->base.hubps[i] = NULL; |
1408 | } |
1409 | |
1410 | if (pool->base.irqs != NULL) { |
1411 | dal_irq_service_destroy(irq_service: &pool->base.irqs); |
1412 | } |
1413 | } |
1414 | |
1415 | for (i = 0; i < pool->base.res_cap->num_ddc; i++) { |
1416 | if (pool->base.engines[i] != NULL) |
1417 | dce110_engine_destroy(engine: &pool->base.engines[i]); |
1418 | if (pool->base.hw_i2cs[i] != NULL) { |
1419 | kfree(objp: pool->base.hw_i2cs[i]); |
1420 | pool->base.hw_i2cs[i] = NULL; |
1421 | } |
1422 | if (pool->base.sw_i2cs[i] != NULL) { |
1423 | kfree(objp: pool->base.sw_i2cs[i]); |
1424 | pool->base.sw_i2cs[i] = NULL; |
1425 | } |
1426 | } |
1427 | |
1428 | for (i = 0; i < pool->base.res_cap->num_opp; i++) { |
1429 | if (pool->base.opps[i] != NULL) |
1430 | pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); |
1431 | } |
1432 | |
1433 | for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { |
1434 | if (pool->base.timing_generators[i] != NULL) { |
1435 | kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); |
1436 | pool->base.timing_generators[i] = NULL; |
1437 | } |
1438 | } |
1439 | |
1440 | for (i = 0; i < pool->base.res_cap->num_dwb; i++) { |
1441 | if (pool->base.dwbc[i] != NULL) { |
1442 | kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); |
1443 | pool->base.dwbc[i] = NULL; |
1444 | } |
1445 | if (pool->base.mcif_wb[i] != NULL) { |
1446 | kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); |
1447 | pool->base.mcif_wb[i] = NULL; |
1448 | } |
1449 | } |
1450 | |
1451 | for (i = 0; i < pool->base.audio_count; i++) { |
1452 | if (pool->base.audios[i]) |
1453 | dce_aud_destroy(audio: &pool->base.audios[i]); |
1454 | } |
1455 | |
1456 | for (i = 0; i < pool->base.clk_src_count; i++) { |
1457 | if (pool->base.clock_sources[i] != NULL) { |
1458 | dcn20_clock_source_destroy(clk_src: &pool->base.clock_sources[i]); |
1459 | pool->base.clock_sources[i] = NULL; |
1460 | } |
1461 | } |
1462 | |
1463 | for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { |
1464 | if (pool->base.mpc_lut[i] != NULL) { |
1465 | dc_3dlut_func_release(lut: pool->base.mpc_lut[i]); |
1466 | pool->base.mpc_lut[i] = NULL; |
1467 | } |
1468 | if (pool->base.mpc_shaper[i] != NULL) { |
1469 | dc_transfer_func_release(dc_tf: pool->base.mpc_shaper[i]); |
1470 | pool->base.mpc_shaper[i] = NULL; |
1471 | } |
1472 | } |
1473 | |
1474 | if (pool->base.dp_clock_source != NULL) { |
1475 | dcn20_clock_source_destroy(clk_src: &pool->base.dp_clock_source); |
1476 | pool->base.dp_clock_source = NULL; |
1477 | } |
1478 | |
1479 | for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { |
1480 | if (pool->base.multiple_abms[i] != NULL) |
1481 | dce_abm_destroy(abm: &pool->base.multiple_abms[i]); |
1482 | } |
1483 | |
1484 | if (pool->base.psr != NULL) |
1485 | dmub_psr_destroy(dmub: &pool->base.psr); |
1486 | |
1487 | if (pool->base.dccg != NULL) |
1488 | dcn_dccg_destroy(dccg: &pool->base.dccg); |
1489 | |
1490 | if (pool->base.oem_device != NULL) { |
1491 | struct dc *dc = pool->base.oem_device->ctx->dc; |
1492 | |
1493 | dc->link_srv->destroy_ddc_service(&pool->base.oem_device); |
1494 | } |
1495 | } |
1496 | |
1497 | |
1498 | static bool dcn32_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) |
1499 | { |
1500 | int i; |
1501 | uint32_t dwb_count = pool->res_cap->num_dwb; |
1502 | |
1503 | for (i = 0; i < dwb_count; i++) { |
1504 | struct dcn30_dwbc *dwbc30 = kzalloc(size: sizeof(struct dcn30_dwbc), |
1505 | GFP_KERNEL); |
1506 | |
1507 | if (!dwbc30) { |
1508 | dm_error("DC: failed to create dwbc30!\n" ); |
1509 | return false; |
1510 | } |
1511 | |
1512 | #undef REG_STRUCT |
1513 | #define REG_STRUCT dwbc30_regs |
1514 | dwbc_regs_dcn3_init(0); |
1515 | |
1516 | dcn30_dwbc_construct(dwbc30, ctx, |
1517 | dwbc_regs: &dwbc30_regs[i], |
1518 | dwbc_shift: &dwbc30_shift, |
1519 | dwbc_mask: &dwbc30_mask, |
1520 | inst: i); |
1521 | |
1522 | pool->dwbc[i] = &dwbc30->base; |
1523 | } |
1524 | return true; |
1525 | } |
1526 | |
1527 | static bool dcn32_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) |
1528 | { |
1529 | int i; |
1530 | uint32_t dwb_count = pool->res_cap->num_dwb; |
1531 | |
1532 | for (i = 0; i < dwb_count; i++) { |
1533 | struct dcn30_mmhubbub *mcif_wb30 = kzalloc(size: sizeof(struct dcn30_mmhubbub), |
1534 | GFP_KERNEL); |
1535 | |
1536 | if (!mcif_wb30) { |
1537 | dm_error("DC: failed to create mcif_wb30!\n" ); |
1538 | return false; |
1539 | } |
1540 | |
1541 | #undef REG_STRUCT |
1542 | #define REG_STRUCT mcif_wb30_regs |
1543 | mcif_wb_regs_dcn3_init(0); |
1544 | |
1545 | dcn32_mmhubbub_construct(mcif_wb30, ctx, |
1546 | mcif_wb_regs: &mcif_wb30_regs[i], |
1547 | mcif_wb_shift: &mcif_wb30_shift, |
1548 | mcif_wb_mask: &mcif_wb30_mask, |
1549 | inst: i); |
1550 | |
1551 | pool->mcif_wb[i] = &mcif_wb30->base; |
1552 | } |
1553 | return true; |
1554 | } |
1555 | |
1556 | static struct display_stream_compressor *dcn32_dsc_create( |
1557 | struct dc_context *ctx, uint32_t inst) |
1558 | { |
1559 | struct dcn20_dsc *dsc = |
1560 | kzalloc(size: sizeof(struct dcn20_dsc), GFP_KERNEL); |
1561 | |
1562 | if (!dsc) { |
1563 | BREAK_TO_DEBUGGER(); |
1564 | return NULL; |
1565 | } |
1566 | |
1567 | #undef REG_STRUCT |
1568 | #define REG_STRUCT dsc_regs |
1569 | dsc_regsDCN20_init(0), |
1570 | dsc_regsDCN20_init(1), |
1571 | dsc_regsDCN20_init(2), |
1572 | dsc_regsDCN20_init(3); |
1573 | |
1574 | dsc2_construct(dsc, ctx, inst, dsc_regs: &dsc_regs[inst], dsc_shift: &dsc_shift, dsc_mask: &dsc_mask); |
1575 | |
1576 | dsc->max_image_width = 6016; |
1577 | |
1578 | return &dsc->base; |
1579 | } |
1580 | |
1581 | static void dcn32_destroy_resource_pool(struct resource_pool **pool) |
1582 | { |
1583 | struct dcn32_resource_pool *dcn32_pool = TO_DCN32_RES_POOL(*pool); |
1584 | |
1585 | dcn32_resource_destruct(pool: dcn32_pool); |
1586 | kfree(objp: dcn32_pool); |
1587 | *pool = NULL; |
1588 | } |
1589 | |
1590 | bool dcn32_acquire_post_bldn_3dlut( |
1591 | struct resource_context *res_ctx, |
1592 | const struct resource_pool *pool, |
1593 | int mpcc_id, |
1594 | struct dc_3dlut **lut, |
1595 | struct dc_transfer_func **shaper) |
1596 | { |
1597 | bool ret = false; |
1598 | |
1599 | ASSERT(*lut == NULL && *shaper == NULL); |
1600 | *lut = NULL; |
1601 | *shaper = NULL; |
1602 | |
1603 | if (!res_ctx->is_mpc_3dlut_acquired[mpcc_id]) { |
1604 | *lut = pool->mpc_lut[mpcc_id]; |
1605 | *shaper = pool->mpc_shaper[mpcc_id]; |
1606 | res_ctx->is_mpc_3dlut_acquired[mpcc_id] = true; |
1607 | ret = true; |
1608 | } |
1609 | return ret; |
1610 | } |
1611 | |
1612 | bool dcn32_release_post_bldn_3dlut( |
1613 | struct resource_context *res_ctx, |
1614 | const struct resource_pool *pool, |
1615 | struct dc_3dlut **lut, |
1616 | struct dc_transfer_func **shaper) |
1617 | { |
1618 | int i; |
1619 | bool ret = false; |
1620 | |
1621 | for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) { |
1622 | if (pool->mpc_lut[i] == *lut && pool->mpc_shaper[i] == *shaper) { |
1623 | res_ctx->is_mpc_3dlut_acquired[i] = false; |
1624 | pool->mpc_lut[i]->state.raw = 0; |
1625 | *lut = NULL; |
1626 | *shaper = NULL; |
1627 | ret = true; |
1628 | break; |
1629 | } |
1630 | } |
1631 | return ret; |
1632 | } |
1633 | |
1634 | static void dcn32_enable_phantom_plane(struct dc *dc, |
1635 | struct dc_state *context, |
1636 | struct dc_stream_state *phantom_stream, |
1637 | unsigned int dc_pipe_idx) |
1638 | { |
1639 | struct dc_plane_state *phantom_plane = NULL; |
1640 | struct dc_plane_state *prev_phantom_plane = NULL; |
1641 | struct pipe_ctx *curr_pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx]; |
1642 | |
1643 | while (curr_pipe) { |
1644 | if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state) |
1645 | phantom_plane = prev_phantom_plane; |
1646 | else |
1647 | phantom_plane = dc_create_plane_state(dc); |
1648 | |
1649 | memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address)); |
1650 | memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality, |
1651 | sizeof(phantom_plane->scaling_quality)); |
1652 | memcpy(&phantom_plane->src_rect, &curr_pipe->plane_state->src_rect, sizeof(phantom_plane->src_rect)); |
1653 | memcpy(&phantom_plane->dst_rect, &curr_pipe->plane_state->dst_rect, sizeof(phantom_plane->dst_rect)); |
1654 | memcpy(&phantom_plane->clip_rect, &curr_pipe->plane_state->clip_rect, sizeof(phantom_plane->clip_rect)); |
1655 | memcpy(&phantom_plane->plane_size, &curr_pipe->plane_state->plane_size, |
1656 | sizeof(phantom_plane->plane_size)); |
1657 | memcpy(&phantom_plane->tiling_info, &curr_pipe->plane_state->tiling_info, |
1658 | sizeof(phantom_plane->tiling_info)); |
1659 | memcpy(&phantom_plane->dcc, &curr_pipe->plane_state->dcc, sizeof(phantom_plane->dcc)); |
1660 | phantom_plane->format = curr_pipe->plane_state->format; |
1661 | phantom_plane->rotation = curr_pipe->plane_state->rotation; |
1662 | phantom_plane->visible = curr_pipe->plane_state->visible; |
1663 | |
1664 | /* Shadow pipe has small viewport. */ |
1665 | phantom_plane->clip_rect.y = 0; |
1666 | phantom_plane->clip_rect.height = phantom_stream->src.height; |
1667 | |
1668 | phantom_plane->is_phantom = true; |
1669 | |
1670 | dc_add_plane_to_context(dc, stream: phantom_stream, plane_state: phantom_plane, context); |
1671 | |
1672 | curr_pipe = curr_pipe->bottom_pipe; |
1673 | prev_phantom_plane = phantom_plane; |
1674 | } |
1675 | } |
1676 | |
1677 | static struct dc_stream_state *dcn32_enable_phantom_stream(struct dc *dc, |
1678 | struct dc_state *context, |
1679 | display_e2e_pipe_params_st *pipes, |
1680 | unsigned int pipe_cnt, |
1681 | unsigned int dc_pipe_idx) |
1682 | { |
1683 | struct dc_stream_state *phantom_stream = NULL; |
1684 | struct pipe_ctx *ref_pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx]; |
1685 | |
1686 | phantom_stream = dc_create_stream_for_sink(dc_sink: ref_pipe->stream->sink); |
1687 | phantom_stream->signal = SIGNAL_TYPE_VIRTUAL; |
1688 | phantom_stream->dpms_off = true; |
1689 | phantom_stream->mall_stream_config.type = SUBVP_PHANTOM; |
1690 | phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream; |
1691 | ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN; |
1692 | ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream; |
1693 | |
1694 | /* stream has limited viewport and small timing */ |
1695 | memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing)); |
1696 | memcpy(&phantom_stream->src, &ref_pipe->stream->src, sizeof(phantom_stream->src)); |
1697 | memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst)); |
1698 | DC_FP_START(); |
1699 | dcn32_set_phantom_stream_timing(dc, context, ref_pipe, phantom_stream, pipes, pipe_cnt, dc_pipe_idx); |
1700 | DC_FP_END(); |
1701 | |
1702 | dc_add_stream_to_ctx(dc, new_ctx: context, stream: phantom_stream); |
1703 | return phantom_stream; |
1704 | } |
1705 | |
1706 | void dcn32_retain_phantom_pipes(struct dc *dc, struct dc_state *context) |
1707 | { |
1708 | int i; |
1709 | struct dc_plane_state *phantom_plane = NULL; |
1710 | struct dc_stream_state *phantom_stream = NULL; |
1711 | |
1712 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
1713 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
1714 | |
1715 | if (resource_is_pipe_type(pipe_ctx: pipe, type: OTG_MASTER) && |
1716 | resource_is_pipe_type(pipe_ctx: pipe, type: DPP_PIPE) && |
1717 | pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { |
1718 | phantom_plane = pipe->plane_state; |
1719 | phantom_stream = pipe->stream; |
1720 | |
1721 | dc_plane_state_retain(plane_state: phantom_plane); |
1722 | dc_stream_retain(dc_stream: phantom_stream); |
1723 | } |
1724 | } |
1725 | } |
1726 | |
1727 | // return true if removed piped from ctx, false otherwise |
1728 | bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context, bool fast_update) |
1729 | { |
1730 | int i; |
1731 | bool removed_pipe = false; |
1732 | struct dc_plane_state *phantom_plane = NULL; |
1733 | struct dc_stream_state *phantom_stream = NULL; |
1734 | |
1735 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
1736 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
1737 | // build scaling params for phantom pipes |
1738 | if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { |
1739 | phantom_plane = pipe->plane_state; |
1740 | phantom_stream = pipe->stream; |
1741 | |
1742 | dc_rem_all_planes_for_stream(dc, stream: pipe->stream, context); |
1743 | dc_remove_stream_from_ctx(dc, new_ctx: context, stream: pipe->stream); |
1744 | |
1745 | /* Ref count is incremented on allocation and also when added to the context. |
1746 | * Therefore we must call release for the the phantom plane and stream once |
1747 | * they are removed from the ctx to finally decrement the refcount to 0 to free. |
1748 | */ |
1749 | dc_plane_state_release(plane_state: phantom_plane); |
1750 | dc_stream_release(dc_stream: phantom_stream); |
1751 | |
1752 | removed_pipe = true; |
1753 | } |
1754 | |
1755 | /* For non-full updates, a shallow copy of the current state |
1756 | * is created. In this case we don't want to erase the current |
1757 | * state (there can be 2 HIRQL threads, one in flip, and one in |
1758 | * checkMPO) that can cause a race condition. |
1759 | * |
1760 | * This is just a workaround, needs a proper fix. |
1761 | */ |
1762 | if (!fast_update) { |
1763 | // Clear all phantom stream info |
1764 | if (pipe->stream) { |
1765 | pipe->stream->mall_stream_config.type = SUBVP_NONE; |
1766 | pipe->stream->mall_stream_config.paired_stream = NULL; |
1767 | } |
1768 | |
1769 | if (pipe->plane_state) { |
1770 | pipe->plane_state->is_phantom = false; |
1771 | } |
1772 | } |
1773 | } |
1774 | return removed_pipe; |
1775 | } |
1776 | |
1777 | /* TODO: Input to this function should indicate which pipe indexes (or streams) |
1778 | * require a phantom pipe / stream |
1779 | */ |
1780 | void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context, |
1781 | display_e2e_pipe_params_st *pipes, |
1782 | unsigned int pipe_cnt, |
1783 | unsigned int index) |
1784 | { |
1785 | struct dc_stream_state *phantom_stream = NULL; |
1786 | unsigned int i; |
1787 | |
1788 | // The index of the DC pipe passed into this function is guarenteed to |
1789 | // be a valid candidate for SubVP (i.e. has a plane, stream, doesn't |
1790 | // already have phantom pipe assigned, etc.) by previous checks. |
1791 | phantom_stream = dcn32_enable_phantom_stream(dc, context, pipes, pipe_cnt, dc_pipe_idx: index); |
1792 | dcn32_enable_phantom_plane(dc, context, phantom_stream, dc_pipe_idx: index); |
1793 | |
1794 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
1795 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
1796 | |
1797 | // Build scaling params for phantom pipes which were newly added. |
1798 | // We determine which phantom pipes were added by comparing with |
1799 | // the phantom stream. |
1800 | if (pipe->plane_state && pipe->stream && pipe->stream == phantom_stream && |
1801 | pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { |
1802 | pipe->stream->use_dynamic_meta = false; |
1803 | pipe->plane_state->flip_immediate = false; |
1804 | if (!resource_build_scaling_params(pipe_ctx: pipe)) { |
1805 | // Log / remove phantom pipes since failed to build scaling params |
1806 | } |
1807 | } |
1808 | } |
1809 | } |
1810 | |
1811 | static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_validate) |
1812 | { |
1813 | bool out = false; |
1814 | |
1815 | BW_VAL_TRACE_SETUP(); |
1816 | |
1817 | int vlevel = 0; |
1818 | int pipe_cnt = 0; |
1819 | display_e2e_pipe_params_st *pipes = kzalloc(size: dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); |
1820 | struct mall_temp_config mall_temp_config; |
1821 | |
1822 | /* To handle Freesync properly, setting FreeSync DML parameters |
1823 | * to its default state for the first stage of validation |
1824 | */ |
1825 | context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false; |
1826 | context->bw_ctx.dml.soc.dram_clock_change_requirement_final = true; |
1827 | |
1828 | DC_LOGGER_INIT(dc->ctx->logger); |
1829 | |
1830 | /* For fast validation, there are situations where a shallow copy of |
1831 | * of the dc->current_state is created for the validation. In this case |
1832 | * we want to save and restore the mall config because we always |
1833 | * teardown subvp at the beginning of validation (and don't attempt |
1834 | * to add it back if it's fast validation). If we don't restore the |
1835 | * subvp config in cases of fast validation + shallow copy of the |
1836 | * dc->current_state, the dc->current_state will have a partially |
1837 | * removed subvp state when we did not intend to remove it. |
1838 | */ |
1839 | if (fast_validate) { |
1840 | memset(&mall_temp_config, 0, sizeof(mall_temp_config)); |
1841 | dcn32_save_mall_state(dc, context, temp_config: &mall_temp_config); |
1842 | } |
1843 | |
1844 | BW_VAL_TRACE_COUNT(); |
1845 | |
1846 | DC_FP_START(); |
1847 | out = dcn32_internal_validate_bw(dc, context, pipes, pipe_cnt_out: &pipe_cnt, vlevel_out: &vlevel, fast_validate); |
1848 | DC_FP_END(); |
1849 | |
1850 | if (fast_validate) |
1851 | dcn32_restore_mall_state(dc, context, temp_config: &mall_temp_config); |
1852 | |
1853 | if (pipe_cnt == 0) |
1854 | goto validate_out; |
1855 | |
1856 | if (!out) |
1857 | goto validate_fail; |
1858 | |
1859 | BW_VAL_TRACE_END_VOLTAGE_LEVEL(); |
1860 | |
1861 | if (fast_validate) { |
1862 | BW_VAL_TRACE_SKIP(fast); |
1863 | goto validate_out; |
1864 | } |
1865 | |
1866 | dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); |
1867 | |
1868 | dcn32_override_min_req_memclk(dc, context); |
1869 | |
1870 | BW_VAL_TRACE_END_WATERMARKS(); |
1871 | |
1872 | goto validate_out; |
1873 | |
1874 | validate_fail: |
1875 | DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n" , |
1876 | dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); |
1877 | |
1878 | BW_VAL_TRACE_SKIP(fail); |
1879 | out = false; |
1880 | |
1881 | validate_out: |
1882 | kfree(objp: pipes); |
1883 | |
1884 | BW_VAL_TRACE_FINISH(); |
1885 | |
1886 | return out; |
1887 | } |
1888 | |
1889 | bool dcn32_validate_bandwidth(struct dc *dc, |
1890 | struct dc_state *context, |
1891 | bool fast_validate) |
1892 | { |
1893 | bool out = false; |
1894 | |
1895 | if (dc->debug.using_dml2) |
1896 | out = dml2_validate(in_dc: dc, context, fast_validate); |
1897 | else |
1898 | out = dml1_validate(dc, context, fast_validate); |
1899 | return out; |
1900 | } |
1901 | |
1902 | int dcn32_populate_dml_pipes_from_context( |
1903 | struct dc *dc, struct dc_state *context, |
1904 | display_e2e_pipe_params_st *pipes, |
1905 | bool fast_validate) |
1906 | { |
1907 | int i, pipe_cnt; |
1908 | struct resource_context *res_ctx = &context->res_ctx; |
1909 | struct pipe_ctx *pipe = NULL; |
1910 | bool subvp_in_use = false; |
1911 | struct dc_crtc_timing *timing; |
1912 | |
1913 | dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); |
1914 | |
1915 | for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { |
1916 | |
1917 | if (!res_ctx->pipe_ctx[i].stream) |
1918 | continue; |
1919 | pipe = &res_ctx->pipe_ctx[i]; |
1920 | timing = &pipe->stream->timing; |
1921 | |
1922 | pipes[pipe_cnt].pipe.src.gpuvm = true; |
1923 | DC_FP_START(); |
1924 | dcn32_zero_pipe_dcc_fraction(pipes, pipe_cnt); |
1925 | DC_FP_END(); |
1926 | pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; |
1927 | pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; |
1928 | pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet |
1929 | pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; |
1930 | pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19; |
1931 | |
1932 | /* Only populate DML input with subvp info for full updates. |
1933 | * This is just a workaround -- needs a proper fix. |
1934 | */ |
1935 | if (!fast_validate) { |
1936 | switch (pipe->stream->mall_stream_config.type) { |
1937 | case SUBVP_MAIN: |
1938 | pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_sub_viewport; |
1939 | subvp_in_use = true; |
1940 | break; |
1941 | case SUBVP_PHANTOM: |
1942 | pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_phantom_pipe; |
1943 | pipes[pipe_cnt].pipe.src.use_mall_for_static_screen = dm_use_mall_static_screen_disable; |
1944 | // Disallow unbounded req for SubVP according to DCHUB programming guide |
1945 | pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; |
1946 | break; |
1947 | case SUBVP_NONE: |
1948 | pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_disable; |
1949 | pipes[pipe_cnt].pipe.src.use_mall_for_static_screen = dm_use_mall_static_screen_disable; |
1950 | break; |
1951 | default: |
1952 | break; |
1953 | } |
1954 | } |
1955 | |
1956 | pipes[pipe_cnt].dout.dsc_input_bpc = 0; |
1957 | if (pipes[pipe_cnt].dout.dsc_enable) { |
1958 | switch (timing->display_color_depth) { |
1959 | case COLOR_DEPTH_888: |
1960 | pipes[pipe_cnt].dout.dsc_input_bpc = 8; |
1961 | break; |
1962 | case COLOR_DEPTH_101010: |
1963 | pipes[pipe_cnt].dout.dsc_input_bpc = 10; |
1964 | break; |
1965 | case COLOR_DEPTH_121212: |
1966 | pipes[pipe_cnt].dout.dsc_input_bpc = 12; |
1967 | break; |
1968 | default: |
1969 | ASSERT(0); |
1970 | break; |
1971 | } |
1972 | } |
1973 | |
1974 | |
1975 | pipe_cnt++; |
1976 | } |
1977 | |
1978 | /* For DET allocation, we don't want to use DML policy (not optimal for utilizing all |
1979 | * the DET available for each pipe). Use the DET override input to maintain our driver |
1980 | * policy. |
1981 | */ |
1982 | dcn32_set_det_allocations(dc, context, pipes); |
1983 | |
1984 | // In general cases we want to keep the dram clock change requirement |
1985 | // (prefer configs that support MCLK switch). Only override to false |
1986 | // for SubVP |
1987 | if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || subvp_in_use) |
1988 | context->bw_ctx.dml.soc.dram_clock_change_requirement_final = false; |
1989 | else |
1990 | context->bw_ctx.dml.soc.dram_clock_change_requirement_final = true; |
1991 | |
1992 | return pipe_cnt; |
1993 | } |
1994 | |
1995 | static struct dc_cap_funcs cap_funcs = { |
1996 | .get_dcc_compression_cap = dcn20_get_dcc_compression_cap, |
1997 | .get_subvp_en = dcn32_subvp_in_use, |
1998 | }; |
1999 | |
2000 | void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context, |
2001 | display_e2e_pipe_params_st *pipes, |
2002 | int pipe_cnt, |
2003 | int vlevel) |
2004 | { |
2005 | DC_FP_START(); |
2006 | dcn32_calculate_wm_and_dlg_fpu(dc, context, pipes, pipe_cnt, vlevel); |
2007 | DC_FP_END(); |
2008 | } |
2009 | |
2010 | static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) |
2011 | { |
2012 | DC_FP_START(); |
2013 | dcn32_update_bw_bounding_box_fpu(dc, bw_params); |
2014 | DC_FP_END(); |
2015 | } |
2016 | |
2017 | static struct resource_funcs dcn32_res_pool_funcs = { |
2018 | .destroy = dcn32_destroy_resource_pool, |
2019 | .link_enc_create = dcn32_link_encoder_create, |
2020 | .link_enc_create_minimal = NULL, |
2021 | .panel_cntl_create = dcn32_panel_cntl_create, |
2022 | .validate_bandwidth = dcn32_validate_bandwidth, |
2023 | .calculate_wm_and_dlg = dcn32_calculate_wm_and_dlg, |
2024 | .populate_dml_pipes = dcn32_populate_dml_pipes_from_context, |
2025 | .acquire_free_pipe_as_secondary_dpp_pipe = dcn32_acquire_free_pipe_as_secondary_dpp_pipe, |
2026 | .acquire_free_pipe_as_secondary_opp_head = dcn32_acquire_free_pipe_as_secondary_opp_head, |
2027 | .release_pipe = dcn20_release_pipe, |
2028 | .add_stream_to_ctx = dcn30_add_stream_to_ctx, |
2029 | .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, |
2030 | .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, |
2031 | .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, |
2032 | .set_mcif_arb_params = dcn30_set_mcif_arb_params, |
2033 | .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, |
2034 | .acquire_post_bldn_3dlut = dcn32_acquire_post_bldn_3dlut, |
2035 | .release_post_bldn_3dlut = dcn32_release_post_bldn_3dlut, |
2036 | .update_bw_bounding_box = dcn32_update_bw_bounding_box, |
2037 | .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, |
2038 | .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, |
2039 | .add_phantom_pipes = dcn32_add_phantom_pipes, |
2040 | .remove_phantom_pipes = dcn32_remove_phantom_pipes, |
2041 | .retain_phantom_pipes = dcn32_retain_phantom_pipes, |
2042 | .save_mall_state = dcn32_save_mall_state, |
2043 | .restore_mall_state = dcn32_restore_mall_state, |
2044 | }; |
2045 | |
2046 | static uint32_t read_pipe_fuses(struct dc_context *ctx) |
2047 | { |
2048 | uint32_t value = REG_READ(CC_DC_PIPE_DIS); |
2049 | /* DCN32 support max 4 pipes */ |
2050 | value = value & 0xf; |
2051 | return value; |
2052 | } |
2053 | |
2054 | |
2055 | static bool dcn32_resource_construct( |
2056 | uint8_t num_virtual_links, |
2057 | struct dc *dc, |
2058 | struct dcn32_resource_pool *pool) |
2059 | { |
2060 | int i, j; |
2061 | struct dc_context *ctx = dc->ctx; |
2062 | struct irq_service_init_data init_data; |
2063 | struct ddc_service_init_data ddc_init_data = {0}; |
2064 | uint32_t pipe_fuses = 0; |
2065 | uint32_t num_pipes = 4; |
2066 | |
2067 | #undef REG_STRUCT |
2068 | #define REG_STRUCT bios_regs |
2069 | bios_regs_init(); |
2070 | |
2071 | #undef REG_STRUCT |
2072 | #define REG_STRUCT clk_src_regs |
2073 | clk_src_regs_init(0, A), |
2074 | clk_src_regs_init(1, B), |
2075 | clk_src_regs_init(2, C), |
2076 | clk_src_regs_init(3, D), |
2077 | clk_src_regs_init(4, E); |
2078 | |
2079 | #undef REG_STRUCT |
2080 | #define REG_STRUCT abm_regs |
2081 | abm_regs_init(0), |
2082 | abm_regs_init(1), |
2083 | abm_regs_init(2), |
2084 | abm_regs_init(3); |
2085 | |
2086 | #undef REG_STRUCT |
2087 | #define REG_STRUCT dccg_regs |
2088 | dccg_regs_init(); |
2089 | |
2090 | DC_FP_START(); |
2091 | |
2092 | ctx->dc_bios->regs = &bios_regs; |
2093 | |
2094 | pool->base.res_cap = &res_cap_dcn32; |
2095 | /* max number of pipes for ASIC before checking for pipe fuses */ |
2096 | num_pipes = pool->base.res_cap->num_timing_generator; |
2097 | pipe_fuses = read_pipe_fuses(ctx); |
2098 | |
2099 | for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) |
2100 | if (pipe_fuses & 1 << i) |
2101 | num_pipes--; |
2102 | |
2103 | if (pipe_fuses & 1) |
2104 | ASSERT(0); //Unexpected - Pipe 0 should always be fully functional! |
2105 | |
2106 | if (pipe_fuses & CC_DC_PIPE_DIS__DC_FULL_DIS_MASK) |
2107 | ASSERT(0); //Entire DCN is harvested! |
2108 | |
2109 | /* within dml lib, initial value is hard coded, if ASIC pipe is fused, the |
2110 | * value will be changed, update max_num_dpp and max_num_otg for dml. |
2111 | */ |
2112 | dcn3_2_ip.max_num_dpp = num_pipes; |
2113 | dcn3_2_ip.max_num_otg = num_pipes; |
2114 | |
2115 | pool->base.funcs = &dcn32_res_pool_funcs; |
2116 | |
2117 | /************************************************* |
2118 | * Resource + asic cap harcoding * |
2119 | *************************************************/ |
2120 | pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; |
2121 | pool->base.timing_generator_count = num_pipes; |
2122 | pool->base.pipe_count = num_pipes; |
2123 | pool->base.mpcc_count = num_pipes; |
2124 | dc->caps.max_downscale_ratio = 600; |
2125 | dc->caps.i2c_speed_in_khz = 100; |
2126 | dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/ |
2127 | /* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/ |
2128 | dc->caps.max_cursor_size = 64; |
2129 | dc->caps.min_horizontal_blanking_period = 80; |
2130 | dc->caps.dmdata_alloc_size = 2048; |
2131 | dc->caps.mall_size_per_mem_channel = 4; |
2132 | dc->caps.mall_size_total = 0; |
2133 | dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; |
2134 | |
2135 | dc->caps.cache_line_size = 64; |
2136 | dc->caps.cache_num_ways = 16; |
2137 | |
2138 | /* Calculate the available MALL space */ |
2139 | dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall( |
2140 | dc, num_chans: dc->ctx->dc_bios->vram_info.num_chans) * |
2141 | dc->caps.mall_size_per_mem_channel * 1024 * 1024; |
2142 | dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes; |
2143 | |
2144 | dc->caps.subvp_fw_processing_delay_us = 15; |
2145 | dc->caps.subvp_drr_max_vblank_margin_us = 40; |
2146 | dc->caps.subvp_prefetch_end_to_mall_start_us = 15; |
2147 | dc->caps.subvp_swath_height_margin_lines = 16; |
2148 | dc->caps.subvp_pstate_allow_width_us = 20; |
2149 | dc->caps.subvp_vertical_int_margin_us = 30; |
2150 | dc->caps.subvp_drr_vblank_start_margin_us = 100; // 100us margin |
2151 | |
2152 | dc->caps.max_slave_planes = 2; |
2153 | dc->caps.max_slave_yuv_planes = 2; |
2154 | dc->caps.max_slave_rgb_planes = 2; |
2155 | dc->caps.post_blend_color_processing = true; |
2156 | dc->caps.force_dp_tps4_for_cp2520 = true; |
2157 | if (dc->config.forceHBR2CP2520) |
2158 | dc->caps.force_dp_tps4_for_cp2520 = false; |
2159 | dc->caps.dp_hpo = true; |
2160 | dc->caps.dp_hdmi21_pcon_support = true; |
2161 | dc->caps.edp_dsc_support = true; |
2162 | dc->caps.extended_aux_timeout_support = true; |
2163 | dc->caps.dmcub_support = true; |
2164 | dc->caps.seamless_odm = true; |
2165 | dc->caps.max_v_total = (1 << 15) - 1; |
2166 | |
2167 | /* Color pipeline capabilities */ |
2168 | dc->caps.color.dpp.dcn_arch = 1; |
2169 | dc->caps.color.dpp.input_lut_shared = 0; |
2170 | dc->caps.color.dpp.icsc = 1; |
2171 | dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr |
2172 | dc->caps.color.dpp.dgam_rom_caps.srgb = 1; |
2173 | dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; |
2174 | dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; |
2175 | dc->caps.color.dpp.dgam_rom_caps.pq = 1; |
2176 | dc->caps.color.dpp.dgam_rom_caps.hlg = 1; |
2177 | dc->caps.color.dpp.post_csc = 1; |
2178 | dc->caps.color.dpp.gamma_corr = 1; |
2179 | dc->caps.color.dpp.dgam_rom_for_yuv = 0; |
2180 | |
2181 | dc->caps.color.dpp.hw_3d_lut = 1; |
2182 | dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 |
2183 | // no OGAM ROM on DCN2 and later ASICs |
2184 | dc->caps.color.dpp.ogam_rom_caps.srgb = 0; |
2185 | dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; |
2186 | dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; |
2187 | dc->caps.color.dpp.ogam_rom_caps.pq = 0; |
2188 | dc->caps.color.dpp.ogam_rom_caps.hlg = 0; |
2189 | dc->caps.color.dpp.ocsc = 0; |
2190 | |
2191 | dc->caps.color.mpc.gamut_remap = 1; |
2192 | dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //4, configurable to be before or after BLND in MPCC |
2193 | dc->caps.color.mpc.ogam_ram = 1; |
2194 | dc->caps.color.mpc.ogam_rom_caps.srgb = 0; |
2195 | dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; |
2196 | dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; |
2197 | dc->caps.color.mpc.ogam_rom_caps.pq = 0; |
2198 | dc->caps.color.mpc.ogam_rom_caps.hlg = 0; |
2199 | dc->caps.color.mpc.ocsc = 1; |
2200 | |
2201 | /* Use pipe context based otg sync logic */ |
2202 | dc->config.use_pipe_ctx_sync_logic = true; |
2203 | |
2204 | dc->config.dc_mode_clk_limit_support = true; |
2205 | /* read VBIOS LTTPR caps */ |
2206 | { |
2207 | if (ctx->dc_bios->funcs->get_lttpr_caps) { |
2208 | enum bp_result bp_query_result; |
2209 | uint8_t is_vbios_lttpr_enable = 0; |
2210 | |
2211 | bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); |
2212 | dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; |
2213 | } |
2214 | |
2215 | /* interop bit is implicit */ |
2216 | { |
2217 | dc->caps.vbios_lttpr_aware = true; |
2218 | } |
2219 | } |
2220 | |
2221 | if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) |
2222 | dc->debug = debug_defaults_drv; |
2223 | |
2224 | // Init the vm_helper |
2225 | if (dc->vm_helper) |
2226 | vm_helper_init(vm_helper: dc->vm_helper, num_vmid: 16); |
2227 | |
2228 | /************************************************* |
2229 | * Create resources * |
2230 | *************************************************/ |
2231 | |
2232 | /* Clock Sources for Pixel Clock*/ |
2233 | pool->base.clock_sources[DCN32_CLK_SRC_PLL0] = |
2234 | dcn32_clock_source_create(ctx, bios: ctx->dc_bios, |
2235 | id: CLOCK_SOURCE_COMBO_PHY_PLL0, |
2236 | regs: &clk_src_regs[0], dp_clk_src: false); |
2237 | pool->base.clock_sources[DCN32_CLK_SRC_PLL1] = |
2238 | dcn32_clock_source_create(ctx, bios: ctx->dc_bios, |
2239 | id: CLOCK_SOURCE_COMBO_PHY_PLL1, |
2240 | regs: &clk_src_regs[1], dp_clk_src: false); |
2241 | pool->base.clock_sources[DCN32_CLK_SRC_PLL2] = |
2242 | dcn32_clock_source_create(ctx, bios: ctx->dc_bios, |
2243 | id: CLOCK_SOURCE_COMBO_PHY_PLL2, |
2244 | regs: &clk_src_regs[2], dp_clk_src: false); |
2245 | pool->base.clock_sources[DCN32_CLK_SRC_PLL3] = |
2246 | dcn32_clock_source_create(ctx, bios: ctx->dc_bios, |
2247 | id: CLOCK_SOURCE_COMBO_PHY_PLL3, |
2248 | regs: &clk_src_regs[3], dp_clk_src: false); |
2249 | pool->base.clock_sources[DCN32_CLK_SRC_PLL4] = |
2250 | dcn32_clock_source_create(ctx, bios: ctx->dc_bios, |
2251 | id: CLOCK_SOURCE_COMBO_PHY_PLL4, |
2252 | regs: &clk_src_regs[4], dp_clk_src: false); |
2253 | |
2254 | pool->base.clk_src_count = DCN32_CLK_SRC_TOTAL; |
2255 | |
2256 | /* todo: not reuse phy_pll registers */ |
2257 | pool->base.dp_clock_source = |
2258 | dcn32_clock_source_create(ctx, bios: ctx->dc_bios, |
2259 | id: CLOCK_SOURCE_ID_DP_DTO, |
2260 | regs: &clk_src_regs[0], dp_clk_src: true); |
2261 | |
2262 | for (i = 0; i < pool->base.clk_src_count; i++) { |
2263 | if (pool->base.clock_sources[i] == NULL) { |
2264 | dm_error("DC: failed to create clock sources!\n" ); |
2265 | BREAK_TO_DEBUGGER(); |
2266 | goto create_fail; |
2267 | } |
2268 | } |
2269 | |
2270 | /* DCCG */ |
2271 | pool->base.dccg = dccg32_create(ctx, regs: &dccg_regs, dccg_shift: &dccg_shift, dccg_mask: &dccg_mask); |
2272 | if (pool->base.dccg == NULL) { |
2273 | dm_error("DC: failed to create dccg!\n" ); |
2274 | BREAK_TO_DEBUGGER(); |
2275 | goto create_fail; |
2276 | } |
2277 | |
2278 | /* DML */ |
2279 | dml_init_instance(lib: &dc->dml, soc_bb: &dcn3_2_soc, ip_params: &dcn3_2_ip, project: DML_PROJECT_DCN32); |
2280 | |
2281 | /* IRQ Service */ |
2282 | init_data.ctx = dc->ctx; |
2283 | pool->base.irqs = dal_irq_service_dcn32_create(init_data: &init_data); |
2284 | if (!pool->base.irqs) |
2285 | goto create_fail; |
2286 | |
2287 | /* HUBBUB */ |
2288 | pool->base.hubbub = dcn32_hubbub_create(ctx); |
2289 | if (pool->base.hubbub == NULL) { |
2290 | BREAK_TO_DEBUGGER(); |
2291 | dm_error("DC: failed to create hubbub!\n" ); |
2292 | goto create_fail; |
2293 | } |
2294 | |
2295 | /* HUBPs, DPPs, OPPs, TGs, ABMs */ |
2296 | for (i = 0, j = 0; i < pool->base.res_cap->num_timing_generator; i++) { |
2297 | |
2298 | /* if pipe is disabled, skip instance of HW pipe, |
2299 | * i.e, skip ASIC register instance |
2300 | */ |
2301 | if (pipe_fuses & 1 << i) |
2302 | continue; |
2303 | |
2304 | /* HUBPs */ |
2305 | pool->base.hubps[j] = dcn32_hubp_create(ctx, inst: i); |
2306 | if (pool->base.hubps[j] == NULL) { |
2307 | BREAK_TO_DEBUGGER(); |
2308 | dm_error( |
2309 | "DC: failed to create hubps!\n" ); |
2310 | goto create_fail; |
2311 | } |
2312 | |
2313 | /* DPPs */ |
2314 | pool->base.dpps[j] = dcn32_dpp_create(ctx, inst: i); |
2315 | if (pool->base.dpps[j] == NULL) { |
2316 | BREAK_TO_DEBUGGER(); |
2317 | dm_error( |
2318 | "DC: failed to create dpps!\n" ); |
2319 | goto create_fail; |
2320 | } |
2321 | |
2322 | /* OPPs */ |
2323 | pool->base.opps[j] = dcn32_opp_create(ctx, inst: i); |
2324 | if (pool->base.opps[j] == NULL) { |
2325 | BREAK_TO_DEBUGGER(); |
2326 | dm_error( |
2327 | "DC: failed to create output pixel processor!\n" ); |
2328 | goto create_fail; |
2329 | } |
2330 | |
2331 | /* TGs */ |
2332 | pool->base.timing_generators[j] = dcn32_timing_generator_create( |
2333 | ctx, instance: i); |
2334 | if (pool->base.timing_generators[j] == NULL) { |
2335 | BREAK_TO_DEBUGGER(); |
2336 | dm_error("DC: failed to create tg!\n" ); |
2337 | goto create_fail; |
2338 | } |
2339 | |
2340 | /* ABMs */ |
2341 | pool->base.multiple_abms[j] = dmub_abm_create(ctx, |
2342 | regs: &abm_regs[i], |
2343 | abm_shift: &abm_shift, |
2344 | abm_mask: &abm_mask); |
2345 | if (pool->base.multiple_abms[j] == NULL) { |
2346 | dm_error("DC: failed to create abm for pipe %d!\n" , i); |
2347 | BREAK_TO_DEBUGGER(); |
2348 | goto create_fail; |
2349 | } |
2350 | |
2351 | /* index for resource pool arrays for next valid pipe */ |
2352 | j++; |
2353 | } |
2354 | |
2355 | /* PSR */ |
2356 | pool->base.psr = dmub_psr_create(ctx); |
2357 | if (pool->base.psr == NULL) { |
2358 | dm_error("DC: failed to create psr obj!\n" ); |
2359 | BREAK_TO_DEBUGGER(); |
2360 | goto create_fail; |
2361 | } |
2362 | |
2363 | /* MPCCs */ |
2364 | pool->base.mpc = dcn32_mpc_create(ctx, num_mpcc: pool->base.res_cap->num_timing_generator, num_rmu: pool->base.res_cap->num_mpc_3dlut); |
2365 | if (pool->base.mpc == NULL) { |
2366 | BREAK_TO_DEBUGGER(); |
2367 | dm_error("DC: failed to create mpc!\n" ); |
2368 | goto create_fail; |
2369 | } |
2370 | |
2371 | /* DSCs */ |
2372 | for (i = 0; i < pool->base.res_cap->num_dsc; i++) { |
2373 | pool->base.dscs[i] = dcn32_dsc_create(ctx, inst: i); |
2374 | if (pool->base.dscs[i] == NULL) { |
2375 | BREAK_TO_DEBUGGER(); |
2376 | dm_error("DC: failed to create display stream compressor %d!\n" , i); |
2377 | goto create_fail; |
2378 | } |
2379 | } |
2380 | |
2381 | /* DWB */ |
2382 | if (!dcn32_dwbc_create(ctx, pool: &pool->base)) { |
2383 | BREAK_TO_DEBUGGER(); |
2384 | dm_error("DC: failed to create dwbc!\n" ); |
2385 | goto create_fail; |
2386 | } |
2387 | |
2388 | /* MMHUBBUB */ |
2389 | if (!dcn32_mmhubbub_create(ctx, pool: &pool->base)) { |
2390 | BREAK_TO_DEBUGGER(); |
2391 | dm_error("DC: failed to create mcif_wb!\n" ); |
2392 | goto create_fail; |
2393 | } |
2394 | |
2395 | /* AUX and I2C */ |
2396 | for (i = 0; i < pool->base.res_cap->num_ddc; i++) { |
2397 | pool->base.engines[i] = dcn32_aux_engine_create(ctx, inst: i); |
2398 | if (pool->base.engines[i] == NULL) { |
2399 | BREAK_TO_DEBUGGER(); |
2400 | dm_error( |
2401 | "DC:failed to create aux engine!!\n" ); |
2402 | goto create_fail; |
2403 | } |
2404 | pool->base.hw_i2cs[i] = dcn32_i2c_hw_create(ctx, inst: i); |
2405 | if (pool->base.hw_i2cs[i] == NULL) { |
2406 | BREAK_TO_DEBUGGER(); |
2407 | dm_error( |
2408 | "DC:failed to create hw i2c!!\n" ); |
2409 | goto create_fail; |
2410 | } |
2411 | pool->base.sw_i2cs[i] = NULL; |
2412 | } |
2413 | |
2414 | /* Audio, HWSeq, Stream Encoders including HPO and virtual, MPC 3D LUTs */ |
2415 | if (!resource_construct(num_virtual_links, dc, pool: &pool->base, |
2416 | create_funcs: &res_create_funcs)) |
2417 | goto create_fail; |
2418 | |
2419 | /* HW Sequencer init functions and Plane caps */ |
2420 | dcn32_hw_sequencer_init_functions(dc); |
2421 | |
2422 | dc->caps.max_planes = pool->base.pipe_count; |
2423 | |
2424 | for (i = 0; i < dc->caps.max_planes; ++i) |
2425 | dc->caps.planes[i] = plane_cap; |
2426 | |
2427 | dc->cap_funcs = cap_funcs; |
2428 | |
2429 | if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { |
2430 | ddc_init_data.ctx = dc->ctx; |
2431 | ddc_init_data.link = NULL; |
2432 | ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; |
2433 | ddc_init_data.id.enum_id = 0; |
2434 | ddc_init_data.id.type = OBJECT_TYPE_GENERIC; |
2435 | pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data); |
2436 | } else { |
2437 | pool->base.oem_device = NULL; |
2438 | } |
2439 | |
2440 | dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; |
2441 | dc->dml2_options.use_native_pstate_optimization = false; |
2442 | dc->dml2_options.use_native_soc_bb_construction = true; |
2443 | dc->dml2_options.minimize_dispclk_using_odm = true; |
2444 | |
2445 | dc->dml2_options.callbacks.dc = dc; |
2446 | dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; |
2447 | dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; |
2448 | dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; |
2449 | dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; |
2450 | dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; |
2451 | dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; |
2452 | dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; |
2453 | dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; |
2454 | |
2455 | dc->dml2_options.svp_pstate.callbacks.dc = dc; |
2456 | dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context; |
2457 | dc->dml2_options.svp_pstate.callbacks.add_stream_to_ctx = &dc_add_stream_to_ctx; |
2458 | dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params; |
2459 | dc->dml2_options.svp_pstate.callbacks.create_plane = &dc_create_plane_state; |
2460 | dc->dml2_options.svp_pstate.callbacks.remove_plane_from_context = &dc_remove_plane_from_context; |
2461 | dc->dml2_options.svp_pstate.callbacks.remove_stream_from_ctx = &dc_remove_stream_from_ctx; |
2462 | dc->dml2_options.svp_pstate.callbacks.create_stream_for_sink = &dc_create_stream_for_sink; |
2463 | dc->dml2_options.svp_pstate.callbacks.plane_state_release = &dc_plane_state_release; |
2464 | dc->dml2_options.svp_pstate.callbacks.stream_release = &dc_stream_release; |
2465 | dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc; |
2466 | |
2467 | dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us; |
2468 | dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us; |
2469 | dc->dml2_options.svp_pstate.subvp_pstate_allow_width_us = dc->caps.subvp_pstate_allow_width_us; |
2470 | dc->dml2_options.svp_pstate.subvp_swath_height_margin_lines = dc->caps.subvp_swath_height_margin_lines; |
2471 | |
2472 | dc->dml2_options.svp_pstate.force_disable_subvp = dc->debug.force_disable_subvp; |
2473 | dc->dml2_options.svp_pstate.force_enable_subvp = dc->debug.force_subvp_mclk_switch; |
2474 | |
2475 | dc->dml2_options.mall_cfg.cache_line_size_bytes = dc->caps.cache_line_size; |
2476 | dc->dml2_options.mall_cfg.cache_num_ways = dc->caps.cache_num_ways; |
2477 | dc->dml2_options.mall_cfg.max_cab_allocation_bytes = dc->caps.max_cab_allocation_bytes; |
2478 | dc->dml2_options.mall_cfg.mblk_height_4bpe_pixels = DCN3_2_MBLK_HEIGHT_4BPE; |
2479 | dc->dml2_options.mall_cfg.mblk_height_8bpe_pixels = DCN3_2_MBLK_HEIGHT_8BPE; |
2480 | dc->dml2_options.mall_cfg.mblk_size_bytes = DCN3_2_MALL_MBLK_SIZE_BYTES; |
2481 | dc->dml2_options.mall_cfg.mblk_width_pixels = DCN3_2_MBLK_WIDTH; |
2482 | |
2483 | dc->dml2_options.max_segments_per_hubp = 18; |
2484 | dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE; |
2485 | dc->dml2_options.map_dc_pipes_with_callbacks = true; |
2486 | |
2487 | if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0)) |
2488 | dc->config.sdpif_request_limit_words_per_umc = 16; |
2489 | |
2490 | DC_FP_END(); |
2491 | |
2492 | return true; |
2493 | |
2494 | create_fail: |
2495 | |
2496 | DC_FP_END(); |
2497 | |
2498 | dcn32_resource_destruct(pool); |
2499 | |
2500 | return false; |
2501 | } |
2502 | |
2503 | struct resource_pool *dcn32_create_resource_pool( |
2504 | const struct dc_init_data *init_data, |
2505 | struct dc *dc) |
2506 | { |
2507 | struct dcn32_resource_pool *pool = |
2508 | kzalloc(size: sizeof(struct dcn32_resource_pool), GFP_KERNEL); |
2509 | |
2510 | if (!pool) |
2511 | return NULL; |
2512 | |
2513 | if (dcn32_resource_construct(num_virtual_links: init_data->num_virtual_links, dc, pool)) |
2514 | return &pool->base; |
2515 | |
2516 | BREAK_TO_DEBUGGER(); |
2517 | kfree(objp: pool); |
2518 | return NULL; |
2519 | } |
2520 | |
2521 | /* |
2522 | * Find the most optimal free pipe from res_ctx, which could be used as a |
2523 | * secondary dpp pipe for input opp head pipe. |
2524 | * |
2525 | * a free pipe - a pipe in input res_ctx not yet used for any streams or |
2526 | * planes. |
2527 | * secondary dpp pipe - a pipe gets inserted to a head OPP pipe's MPC blending |
2528 | * tree. This is typical used for rendering MPO planes or additional offset |
2529 | * areas in MPCC combine. |
2530 | * |
2531 | * Hardware Transition Minimization Algorithm for Finding a Secondary DPP Pipe |
2532 | * ------------------------------------------------------------------------- |
2533 | * |
2534 | * PROBLEM: |
2535 | * |
2536 | * 1. There is a hardware limitation that a secondary DPP pipe cannot be |
2537 | * transferred from one MPC blending tree to the other in a single frame. |
2538 | * Otherwise it could cause glitches on the screen. |
2539 | * |
2540 | * For instance, we cannot transition from state 1 to state 2 in one frame. This |
2541 | * is because PIPE1 is transferred from PIPE0's MPC blending tree over to |
2542 | * PIPE2's MPC blending tree, which is not supported by hardware. |
2543 | * To support this transition we need to first remove PIPE1 from PIPE0's MPC |
2544 | * blending tree in one frame and then insert PIPE1 to PIPE2's MPC blending tree |
2545 | * in the next frame. This is not optimal as it will delay the flip for two |
2546 | * frames. |
2547 | * |
2548 | * State 1: |
2549 | * PIPE0 -- secondary DPP pipe --> (PIPE1) |
2550 | * PIPE2 -- secondary DPP pipe --> NONE |
2551 | * |
2552 | * State 2: |
2553 | * PIPE0 -- secondary DPP pipe --> NONE |
2554 | * PIPE2 -- secondary DPP pipe --> (PIPE1) |
2555 | * |
2556 | * 2. We want to in general minimize the unnecessary changes in pipe topology. |
2557 | * If a pipe is already added in current blending tree and there are no changes |
2558 | * to plane topology, we don't want to swap it with another free pipe |
2559 | * unnecessarily in every update. Powering up and down a pipe would require a |
2560 | * full update which delays the flip for 1 frame. If we use the original pipe |
2561 | * we don't have to toggle its power. So we can flip faster. |
2562 | */ |
2563 | static int find_optimal_free_pipe_as_secondary_dpp_pipe( |
2564 | const struct resource_context *cur_res_ctx, |
2565 | struct resource_context *new_res_ctx, |
2566 | const struct resource_pool *pool, |
2567 | const struct pipe_ctx *new_opp_head) |
2568 | { |
2569 | const struct pipe_ctx *cur_opp_head; |
2570 | int free_pipe_idx; |
2571 | |
2572 | cur_opp_head = &cur_res_ctx->pipe_ctx[new_opp_head->pipe_idx]; |
2573 | free_pipe_idx = resource_find_free_pipe_used_in_cur_mpc_blending_tree( |
2574 | cur_res_ctx, new_res_ctx, cur_opp_head); |
2575 | |
2576 | /* Up until here if we have not found a free secondary pipe, we will |
2577 | * need to wait for at least one frame to complete the transition |
2578 | * sequence. |
2579 | */ |
2580 | if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) |
2581 | free_pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx( |
2582 | cur_res_ctx, new_res_ctx, pool); |
2583 | |
2584 | /* Up until here if we have not found a free secondary pipe, we will |
2585 | * need to wait for at least two frames to complete the transition |
2586 | * sequence. It really doesn't matter which pipe we decide take from |
2587 | * current enabled pipes. It won't save our frame time when we swap only |
2588 | * one pipe or more pipes. |
2589 | */ |
2590 | if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) |
2591 | free_pipe_idx = resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine( |
2592 | cur_res_ctx, new_res_ctx, pool); |
2593 | |
2594 | if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) |
2595 | free_pipe_idx = resource_find_any_free_pipe(new_res_ctx, pool); |
2596 | |
2597 | return free_pipe_idx; |
2598 | } |
2599 | |
2600 | static struct pipe_ctx *find_idle_secondary_pipe_check_mpo( |
2601 | struct resource_context *res_ctx, |
2602 | const struct resource_pool *pool, |
2603 | const struct pipe_ctx *primary_pipe) |
2604 | { |
2605 | int i; |
2606 | struct pipe_ctx *secondary_pipe = NULL; |
2607 | struct pipe_ctx *next_odm_mpo_pipe = NULL; |
2608 | int primary_index, preferred_pipe_idx; |
2609 | struct pipe_ctx *old_primary_pipe = NULL; |
2610 | |
2611 | /* |
2612 | * Modified from find_idle_secondary_pipe |
2613 | * With windowed MPO and ODM, we want to avoid the case where we want a |
2614 | * free pipe for the left side but the free pipe is being used on the |
2615 | * right side. |
2616 | * Add check on current_state if the primary_pipe is the left side, |
2617 | * to check the right side ( primary_pipe->next_odm_pipe ) to see if |
2618 | * it is using a pipe for MPO ( primary_pipe->next_odm_pipe->bottom_pipe ) |
2619 | * - If so, then don't use this pipe |
2620 | * EXCEPTION - 3 plane ( 2 MPO plane ) case |
2621 | * - in this case, the primary pipe has already gotten a free pipe for the |
2622 | * MPO window in the left |
2623 | * - when it tries to get a free pipe for the MPO window on the right, |
2624 | * it will see that it is already assigned to the right side |
2625 | * ( primary_pipe->next_odm_pipe ). But in this case, we want this |
2626 | * free pipe, since it will be for the right side. So add an |
2627 | * additional condition, that skipping the free pipe on the right only |
2628 | * applies if the primary pipe has no bottom pipe currently assigned |
2629 | */ |
2630 | if (primary_pipe) { |
2631 | primary_index = primary_pipe->pipe_idx; |
2632 | old_primary_pipe = &primary_pipe->stream->ctx->dc->current_state->res_ctx.pipe_ctx[primary_index]; |
2633 | if ((old_primary_pipe->next_odm_pipe) && (old_primary_pipe->next_odm_pipe->bottom_pipe) |
2634 | && (!primary_pipe->bottom_pipe)) |
2635 | next_odm_mpo_pipe = old_primary_pipe->next_odm_pipe->bottom_pipe; |
2636 | |
2637 | preferred_pipe_idx = (pool->pipe_count - 1) - primary_pipe->pipe_idx; |
2638 | if ((res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) && |
2639 | !(next_odm_mpo_pipe && next_odm_mpo_pipe->pipe_idx == preferred_pipe_idx)) { |
2640 | secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; |
2641 | secondary_pipe->pipe_idx = preferred_pipe_idx; |
2642 | } |
2643 | } |
2644 | |
2645 | /* |
2646 | * search backwards for the second pipe to keep pipe |
2647 | * assignment more consistent |
2648 | */ |
2649 | if (!secondary_pipe) |
2650 | for (i = pool->pipe_count - 1; i >= 0; i--) { |
2651 | if ((res_ctx->pipe_ctx[i].stream == NULL) && |
2652 | !(next_odm_mpo_pipe && next_odm_mpo_pipe->pipe_idx == i)) { |
2653 | secondary_pipe = &res_ctx->pipe_ctx[i]; |
2654 | secondary_pipe->pipe_idx = i; |
2655 | break; |
2656 | } |
2657 | } |
2658 | |
2659 | return secondary_pipe; |
2660 | } |
2661 | |
2662 | static struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer( |
2663 | struct dc_state *state, |
2664 | const struct resource_pool *pool, |
2665 | struct dc_stream_state *stream, |
2666 | const struct pipe_ctx *head_pipe) |
2667 | { |
2668 | struct resource_context *res_ctx = &state->res_ctx; |
2669 | struct pipe_ctx *idle_pipe, *pipe; |
2670 | struct resource_context *old_ctx = &stream->ctx->dc->current_state->res_ctx; |
2671 | int head_index; |
2672 | |
2673 | if (!head_pipe) |
2674 | ASSERT(0); |
2675 | |
2676 | /* |
2677 | * Modified from dcn20_acquire_idle_pipe_for_layer |
2678 | * Check if head_pipe in old_context already has bottom_pipe allocated. |
2679 | * - If so, check if that pipe is available in the current context. |
2680 | * -- If so, reuse pipe from old_context |
2681 | */ |
2682 | head_index = head_pipe->pipe_idx; |
2683 | pipe = &old_ctx->pipe_ctx[head_index]; |
2684 | if (pipe->bottom_pipe && res_ctx->pipe_ctx[pipe->bottom_pipe->pipe_idx].stream == NULL) { |
2685 | idle_pipe = &res_ctx->pipe_ctx[pipe->bottom_pipe->pipe_idx]; |
2686 | idle_pipe->pipe_idx = pipe->bottom_pipe->pipe_idx; |
2687 | } else { |
2688 | idle_pipe = find_idle_secondary_pipe_check_mpo(res_ctx, pool, primary_pipe: head_pipe); |
2689 | if (!idle_pipe) |
2690 | return NULL; |
2691 | } |
2692 | |
2693 | idle_pipe->stream = head_pipe->stream; |
2694 | idle_pipe->stream_res.tg = head_pipe->stream_res.tg; |
2695 | idle_pipe->stream_res.opp = head_pipe->stream_res.opp; |
2696 | |
2697 | idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; |
2698 | idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx]; |
2699 | idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx]; |
2700 | idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst; |
2701 | |
2702 | return idle_pipe; |
2703 | } |
2704 | |
2705 | static int find_optimal_free_pipe_as_secondary_opp_head( |
2706 | const struct resource_context *cur_res_ctx, |
2707 | struct resource_context *new_res_ctx, |
2708 | const struct resource_pool *pool, |
2709 | const struct pipe_ctx *new_otg_master) |
2710 | { |
2711 | const struct pipe_ctx *cur_otg_master; |
2712 | int free_pipe_idx; |
2713 | |
2714 | cur_otg_master = &cur_res_ctx->pipe_ctx[new_otg_master->pipe_idx]; |
2715 | free_pipe_idx = resource_find_free_pipe_used_as_sec_opp_head_by_cur_otg_master( |
2716 | cur_res_ctx, new_res_ctx, cur_otg_master); |
2717 | |
2718 | /* Up until here if we have not found a free secondary pipe, we will |
2719 | * need to wait for at least one frame to complete the transition |
2720 | * sequence. |
2721 | */ |
2722 | if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) |
2723 | free_pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx( |
2724 | cur_res_ctx, new_res_ctx, pool); |
2725 | |
2726 | if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) |
2727 | free_pipe_idx = resource_find_any_free_pipe(new_res_ctx, pool); |
2728 | |
2729 | return free_pipe_idx; |
2730 | } |
2731 | |
2732 | struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe( |
2733 | const struct dc_state *cur_ctx, |
2734 | struct dc_state *new_ctx, |
2735 | const struct resource_pool *pool, |
2736 | const struct pipe_ctx *opp_head_pipe) |
2737 | { |
2738 | |
2739 | int free_pipe_idx; |
2740 | struct pipe_ctx *free_pipe; |
2741 | |
2742 | if (!opp_head_pipe->stream->ctx->dc->config.enable_windowed_mpo_odm) |
2743 | return dcn32_acquire_idle_pipe_for_head_pipe_in_layer( |
2744 | state: new_ctx, pool, stream: opp_head_pipe->stream, head_pipe: opp_head_pipe); |
2745 | |
2746 | free_pipe_idx = find_optimal_free_pipe_as_secondary_dpp_pipe( |
2747 | cur_res_ctx: &cur_ctx->res_ctx, new_res_ctx: &new_ctx->res_ctx, |
2748 | pool, new_opp_head: opp_head_pipe); |
2749 | if (free_pipe_idx >= 0) { |
2750 | free_pipe = &new_ctx->res_ctx.pipe_ctx[free_pipe_idx]; |
2751 | free_pipe->pipe_idx = free_pipe_idx; |
2752 | free_pipe->stream = opp_head_pipe->stream; |
2753 | free_pipe->stream_res.tg = opp_head_pipe->stream_res.tg; |
2754 | free_pipe->stream_res.opp = opp_head_pipe->stream_res.opp; |
2755 | |
2756 | free_pipe->plane_res.hubp = pool->hubps[free_pipe->pipe_idx]; |
2757 | free_pipe->plane_res.ipp = pool->ipps[free_pipe->pipe_idx]; |
2758 | free_pipe->plane_res.dpp = pool->dpps[free_pipe->pipe_idx]; |
2759 | free_pipe->plane_res.mpcc_inst = |
2760 | pool->dpps[free_pipe->pipe_idx]->inst; |
2761 | } else { |
2762 | ASSERT(opp_head_pipe); |
2763 | free_pipe = NULL; |
2764 | } |
2765 | |
2766 | return free_pipe; |
2767 | } |
2768 | |
2769 | struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_opp_head( |
2770 | const struct dc_state *cur_ctx, |
2771 | struct dc_state *new_ctx, |
2772 | const struct resource_pool *pool, |
2773 | const struct pipe_ctx *otg_master) |
2774 | { |
2775 | int free_pipe_idx = find_optimal_free_pipe_as_secondary_opp_head( |
2776 | cur_res_ctx: &cur_ctx->res_ctx, new_res_ctx: &new_ctx->res_ctx, |
2777 | pool, new_otg_master: otg_master); |
2778 | struct pipe_ctx *free_pipe; |
2779 | |
2780 | if (free_pipe_idx >= 0) { |
2781 | free_pipe = &new_ctx->res_ctx.pipe_ctx[free_pipe_idx]; |
2782 | free_pipe->pipe_idx = free_pipe_idx; |
2783 | free_pipe->stream = otg_master->stream; |
2784 | free_pipe->stream_res.tg = otg_master->stream_res.tg; |
2785 | free_pipe->stream_res.dsc = NULL; |
2786 | free_pipe->stream_res.opp = pool->opps[free_pipe_idx]; |
2787 | free_pipe->plane_res.mi = pool->mis[free_pipe_idx]; |
2788 | free_pipe->plane_res.hubp = pool->hubps[free_pipe_idx]; |
2789 | free_pipe->plane_res.ipp = pool->ipps[free_pipe_idx]; |
2790 | free_pipe->plane_res.xfm = pool->transforms[free_pipe_idx]; |
2791 | free_pipe->plane_res.dpp = pool->dpps[free_pipe_idx]; |
2792 | free_pipe->plane_res.mpcc_inst = pool->dpps[free_pipe_idx]->inst; |
2793 | if (free_pipe->stream->timing.flags.DSC == 1) { |
2794 | dcn20_acquire_dsc(dc: free_pipe->stream->ctx->dc, |
2795 | res_ctx: &new_ctx->res_ctx, |
2796 | dsc: &free_pipe->stream_res.dsc, |
2797 | pipe_idx: free_pipe_idx); |
2798 | ASSERT(free_pipe->stream_res.dsc); |
2799 | if (free_pipe->stream_res.dsc == NULL) { |
2800 | memset(free_pipe, 0, sizeof(*free_pipe)); |
2801 | free_pipe = NULL; |
2802 | } |
2803 | } |
2804 | } else { |
2805 | ASSERT(otg_master); |
2806 | free_pipe = NULL; |
2807 | } |
2808 | |
2809 | return free_pipe; |
2810 | } |
2811 | |
2812 | unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans) |
2813 | { |
2814 | /* |
2815 | * DCN32 and DCN321 SKUs may have different sizes for MALL |
2816 | * but we may not be able to access all the MALL space. |
2817 | * If the num_chans is power of 2, then we can access all |
2818 | * of the available MALL space. Otherwise, we can only |
2819 | * access: |
2820 | * |
2821 | * max_cab_size_in_bytes = total_cache_size_in_bytes * |
2822 | * ((2^floor(log2(num_chans)))/num_chans) |
2823 | * |
2824 | * Calculating the MALL sizes for all available SKUs, we |
2825 | * have come up with the follow simplified check. |
2826 | * - we have max_chans which provides the max MALL size. |
2827 | * Each chans supports 4MB of MALL so: |
2828 | * |
2829 | * total_cache_size_in_bytes = max_chans * 4 MB |
2830 | * |
2831 | * - we have avail_chans which shows the number of channels |
2832 | * we can use if we can't access the entire MALL space. |
2833 | * It is generally half of max_chans |
2834 | * - so we use the following checks: |
2835 | * |
2836 | * if (num_chans == max_chans), return max_chans |
2837 | * if (num_chans < max_chans), return avail_chans |
2838 | * |
2839 | * - exception is GC_11_0_0 where we can't access max_chans, |
2840 | * so we define max_avail_chans as the maximum available |
2841 | * MALL space |
2842 | * |
2843 | */ |
2844 | int gc_11_0_0_max_chans = 48; |
2845 | int gc_11_0_0_max_avail_chans = 32; |
2846 | int gc_11_0_0_avail_chans = 16; |
2847 | int gc_11_0_3_max_chans = 16; |
2848 | int gc_11_0_3_avail_chans = 8; |
2849 | int gc_11_0_2_max_chans = 8; |
2850 | int gc_11_0_2_avail_chans = 4; |
2851 | |
2852 | if (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev)) { |
2853 | return (num_chans == gc_11_0_0_max_chans) ? |
2854 | gc_11_0_0_max_avail_chans : gc_11_0_0_avail_chans; |
2855 | } else if (ASICREV_IS_GC_11_0_2(dc->ctx->asic_id.hw_internal_rev)) { |
2856 | return (num_chans == gc_11_0_2_max_chans) ? |
2857 | gc_11_0_2_max_chans : gc_11_0_2_avail_chans; |
2858 | } else { // if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev)) { |
2859 | return (num_chans == gc_11_0_3_max_chans) ? |
2860 | gc_11_0_3_max_chans : gc_11_0_3_avail_chans; |
2861 | } |
2862 | } |
2863 | |