1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include <linux/delay.h>
27#include "dm_services.h"
28#include "basics/dc_common.h"
29#include "core_types.h"
30#include "resource.h"
31#include "custom_float.h"
32#include "dcn10_hwseq.h"
33#include "dcn10/dcn10_hw_sequencer_debug.h"
34#include "dce/dce_hwseq.h"
35#include "abm.h"
36#include "dmcu.h"
37#include "dcn10/dcn10_optc.h"
38#include "dcn10/dcn10_dpp.h"
39#include "dcn10/dcn10_mpc.h"
40#include "timing_generator.h"
41#include "opp.h"
42#include "ipp.h"
43#include "mpc.h"
44#include "reg_helper.h"
45#include "dcn10/dcn10_hubp.h"
46#include "dcn10/dcn10_hubbub.h"
47#include "dcn10/dcn10_cm_common.h"
48#include "dccg.h"
49#include "clk_mgr.h"
50#include "link_hwss.h"
51#include "dpcd_defs.h"
52#include "dsc.h"
53#include "dce/dmub_psr.h"
54#include "dc_dmub_srv.h"
55#include "dce/dmub_hw_lock_mgr.h"
56#include "dc_trace.h"
57#include "dce/dmub_outbox.h"
58#include "link.h"
59#include "dc_state_priv.h"
60
61#define DC_LOGGER \
62 dc_logger
63#define DC_LOGGER_INIT(logger) \
64 struct dal_logger *dc_logger = logger
65
66#define CTX \
67 hws->ctx
68#define REG(reg)\
69 hws->regs->reg
70
71#undef FN
72#define FN(reg_name, field_name) \
73 hws->shifts->field_name, hws->masks->field_name
74
75/*print is 17 wide, first two characters are spaces*/
76#define DTN_INFO_MICRO_SEC(ref_cycle) \
77 print_microsec(dc_ctx, log_ctx, ref_cycle)
78
79#define GAMMA_HW_POINTS_NUM 256
80
81#define PGFSM_POWER_ON 0
82#define PGFSM_POWER_OFF 2
83
84static void print_microsec(struct dc_context *dc_ctx,
85 struct dc_log_buffer_ctx *log_ctx,
86 uint32_t ref_cycle)
87{
88 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
89 static const unsigned int frac = 1000;
90 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
91
92 DTN_INFO(" %11d.%03d",
93 us_x10 / frac,
94 us_x10 % frac);
95}
96
97void dcn10_lock_all_pipes(struct dc *dc,
98 struct dc_state *context,
99 bool lock)
100{
101 struct pipe_ctx *pipe_ctx;
102 struct pipe_ctx *old_pipe_ctx;
103 struct timing_generator *tg;
104 int i;
105
106 for (i = 0; i < dc->res_pool->pipe_count; i++) {
107 old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
108 pipe_ctx = &context->res_ctx.pipe_ctx[i];
109 tg = pipe_ctx->stream_res.tg;
110
111 /*
112 * Only lock the top pipe's tg to prevent redundant
113 * (un)locking. Also skip if pipe is disabled.
114 */
115 if (pipe_ctx->top_pipe ||
116 !pipe_ctx->stream ||
117 (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
118 !tg->funcs->is_tg_enabled(tg) ||
119 dc_state_get_pipe_subvp_type(state: context, pipe_ctx) == SUBVP_PHANTOM)
120 continue;
121
122 if (lock)
123 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
124 else
125 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
126 }
127}
128
129static void log_mpc_crc(struct dc *dc,
130 struct dc_log_buffer_ctx *log_ctx)
131{
132 struct dc_context *dc_ctx = dc->ctx;
133 struct dce_hwseq *hws = dc->hwseq;
134
135 if (REG(MPC_CRC_RESULT_GB))
136 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
137 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
138 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
139 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
140 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
141}
142
143static void dcn10_log_hubbub_state(struct dc *dc,
144 struct dc_log_buffer_ctx *log_ctx)
145{
146 struct dc_context *dc_ctx = dc->ctx;
147 struct dcn_hubbub_wm wm;
148 int i;
149
150 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
151 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
152
153 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
154 " sr_enter sr_exit dram_clk_change\n");
155
156 for (i = 0; i < 4; i++) {
157 struct dcn_hubbub_wm_set *s;
158
159 s = &wm.sets[i];
160 DTN_INFO("WM_Set[%d]:", s->wm_set);
161 DTN_INFO_MICRO_SEC(s->data_urgent);
162 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
163 DTN_INFO_MICRO_SEC(s->sr_enter);
164 DTN_INFO_MICRO_SEC(s->sr_exit);
165 DTN_INFO_MICRO_SEC(s->dram_clk_change);
166 DTN_INFO("\n");
167 }
168
169 DTN_INFO("\n");
170}
171
172static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
173{
174 struct dc_context *dc_ctx = dc->ctx;
175 struct resource_pool *pool = dc->res_pool;
176 int i;
177
178 DTN_INFO(
179 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
180 for (i = 0; i < pool->pipe_count; i++) {
181 struct hubp *hubp = pool->hubps[i];
182 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
183
184 hubp->funcs->hubp_read_state(hubp);
185
186 if (!s->blank_en) {
187 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
188 hubp->inst,
189 s->pixel_format,
190 s->inuse_addr_hi,
191 s->viewport_width,
192 s->viewport_height,
193 s->rotation_angle,
194 s->h_mirror_en,
195 s->sw_mode,
196 s->dcc_en,
197 s->blank_en,
198 s->clock_en,
199 s->ttu_disable,
200 s->underflow_status);
201 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
202 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
203 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
204 DTN_INFO("\n");
205 }
206 }
207
208 DTN_INFO("\n=========RQ========\n");
209 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
210 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
211 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
212 for (i = 0; i < pool->pipe_count; i++) {
213 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
214 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
215
216 if (!s->blank_en)
217 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
218 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
219 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
220 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
221 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
222 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
223 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
224 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
225 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
226 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
227 }
228
229 DTN_INFO("========DLG========\n");
230 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
231 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
232 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
233 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
234 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
235 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
236 " x_rp_dlay x_rr_sfl\n");
237 for (i = 0; i < pool->pipe_count; i++) {
238 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
239 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
240
241 if (!s->blank_en)
242 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
243 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
244 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
245 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
246 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
247 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
248 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
249 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
250 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
251 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
252 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
253 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
254 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
255 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
256 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
257 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
258 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
259 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
260 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
261 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
262 dlg_regs->xfc_reg_remote_surface_flip_latency);
263 }
264
265 DTN_INFO("========TTU========\n");
266 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
267 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
268 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
269 for (i = 0; i < pool->pipe_count; i++) {
270 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
271 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
272
273 if (!s->blank_en)
274 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
275 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
276 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
277 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
278 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
279 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
280 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
281 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
282 }
283 DTN_INFO("\n");
284}
285
286static void dcn10_log_color_state(struct dc *dc,
287 struct dc_log_buffer_ctx *log_ctx)
288{
289 struct dc_context *dc_ctx = dc->ctx;
290 struct resource_pool *pool = dc->res_pool;
291 int i;
292
293 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
294 " GAMUT adjust "
295 "C11 C12 C13 C14 "
296 "C21 C22 C23 C24 "
297 "C31 C32 C33 C34 \n");
298 for (i = 0; i < pool->pipe_count; i++) {
299 struct dpp *dpp = pool->dpps[i];
300 struct dcn_dpp_state s = {0};
301
302 dpp->funcs->dpp_read_state(dpp, &s);
303 dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
304
305 if (!s.is_enabled)
306 continue;
307
308 DTN_INFO("[%2d]: %11xh %11s %9s %9s"
309 " %12s "
310 "%010lld %010lld %010lld %010lld "
311 "%010lld %010lld %010lld %010lld "
312 "%010lld %010lld %010lld %010lld",
313 dpp->inst,
314 s.igam_input_format,
315 (s.igam_lut_mode == 0) ? "BypassFixed" :
316 ((s.igam_lut_mode == 1) ? "BypassFloat" :
317 ((s.igam_lut_mode == 2) ? "RAM" :
318 ((s.igam_lut_mode == 3) ? "RAM" :
319 "Unknown"))),
320 (s.dgam_lut_mode == 0) ? "Bypass" :
321 ((s.dgam_lut_mode == 1) ? "sRGB" :
322 ((s.dgam_lut_mode == 2) ? "Ycc" :
323 ((s.dgam_lut_mode == 3) ? "RAM" :
324 ((s.dgam_lut_mode == 4) ? "RAM" :
325 "Unknown")))),
326 (s.rgam_lut_mode == 0) ? "Bypass" :
327 ((s.rgam_lut_mode == 1) ? "sRGB" :
328 ((s.rgam_lut_mode == 2) ? "Ycc" :
329 ((s.rgam_lut_mode == 3) ? "RAM" :
330 ((s.rgam_lut_mode == 4) ? "RAM" :
331 "Unknown")))),
332 (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
333 ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
334 "SW"),
335 s.gamut_remap.temperature_matrix[0].value,
336 s.gamut_remap.temperature_matrix[1].value,
337 s.gamut_remap.temperature_matrix[2].value,
338 s.gamut_remap.temperature_matrix[3].value,
339 s.gamut_remap.temperature_matrix[4].value,
340 s.gamut_remap.temperature_matrix[5].value,
341 s.gamut_remap.temperature_matrix[6].value,
342 s.gamut_remap.temperature_matrix[7].value,
343 s.gamut_remap.temperature_matrix[8].value,
344 s.gamut_remap.temperature_matrix[9].value,
345 s.gamut_remap.temperature_matrix[10].value,
346 s.gamut_remap.temperature_matrix[11].value);
347 DTN_INFO("\n");
348 }
349 DTN_INFO("\n");
350 DTN_INFO("DPP Color Caps: input_lut_shared:%d icsc:%d"
351 " dgam_ram:%d dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d"
352 " post_csc:%d gamcor:%d dgam_rom_for_yuv:%d 3d_lut:%d"
353 " blnd_lut:%d oscs:%d\n\n",
354 dc->caps.color.dpp.input_lut_shared,
355 dc->caps.color.dpp.icsc,
356 dc->caps.color.dpp.dgam_ram,
357 dc->caps.color.dpp.dgam_rom_caps.srgb,
358 dc->caps.color.dpp.dgam_rom_caps.bt2020,
359 dc->caps.color.dpp.dgam_rom_caps.gamma2_2,
360 dc->caps.color.dpp.dgam_rom_caps.pq,
361 dc->caps.color.dpp.dgam_rom_caps.hlg,
362 dc->caps.color.dpp.post_csc,
363 dc->caps.color.dpp.gamma_corr,
364 dc->caps.color.dpp.dgam_rom_for_yuv,
365 dc->caps.color.dpp.hw_3d_lut,
366 dc->caps.color.dpp.ogam_ram,
367 dc->caps.color.dpp.ocsc);
368
369 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
370 for (i = 0; i < pool->pipe_count; i++) {
371 struct mpcc_state s = {0};
372
373 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
374 if (s.opp_id != 0xf)
375 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
376 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
377 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
378 s.idle);
379 }
380 DTN_INFO("\n");
381 DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n",
382 dc->caps.color.mpc.gamut_remap,
383 dc->caps.color.mpc.num_3dluts,
384 dc->caps.color.mpc.ogam_ram,
385 dc->caps.color.mpc.ocsc);
386}
387
388void dcn10_log_hw_state(struct dc *dc,
389 struct dc_log_buffer_ctx *log_ctx)
390{
391 struct dc_context *dc_ctx = dc->ctx;
392 struct resource_pool *pool = dc->res_pool;
393 int i;
394
395 DTN_INFO_BEGIN();
396
397 dcn10_log_hubbub_state(dc, log_ctx);
398
399 dcn10_log_hubp_states(dc, log_ctx);
400
401 if (dc->hwss.log_color_state)
402 dc->hwss.log_color_state(dc, log_ctx);
403 else
404 dcn10_log_color_state(dc, log_ctx);
405
406 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
407
408 for (i = 0; i < pool->timing_generator_count; i++) {
409 struct timing_generator *tg = pool->timing_generators[i];
410 struct dcn_otg_state s = {0};
411 /* Read shared OTG state registers for all DCNx */
412 optc1_read_otg_state(DCN10TG_FROM_TG(tg), s: &s);
413
414 /*
415 * For DCN2 and greater, a register on the OPP is used to
416 * determine if the CRTC is blanked instead of the OTG. So use
417 * dpg_is_blanked() if exists, otherwise fallback on otg.
418 *
419 * TODO: Implement DCN-specific read_otg_state hooks.
420 */
421 if (pool->opps[i]->funcs->dpg_is_blanked)
422 s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
423 else
424 s.blank_enabled = tg->funcs->is_blanked(tg);
425
426 //only print if OTG master is enabled
427 if ((s.otg_enabled & 1) == 0)
428 continue;
429
430 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
431 tg->inst,
432 s.v_blank_start,
433 s.v_blank_end,
434 s.v_sync_a_start,
435 s.v_sync_a_end,
436 s.v_sync_a_pol,
437 s.v_total_max,
438 s.v_total_min,
439 s.v_total_max_sel,
440 s.v_total_min_sel,
441 s.h_blank_start,
442 s.h_blank_end,
443 s.h_sync_a_start,
444 s.h_sync_a_end,
445 s.h_sync_a_pol,
446 s.h_total,
447 s.v_total,
448 s.underflow_occurred_status,
449 s.blank_enabled);
450
451 // Clear underflow for debug purposes
452 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
453 // This function is called only from Windows or Diags test environment, hence it's safe to clear
454 // it from here without affecting the original intent.
455 tg->funcs->clear_optc_underflow(tg);
456 }
457 DTN_INFO("\n");
458
459 // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
460 // TODO: Update golden log header to reflect this name change
461 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
462 for (i = 0; i < pool->res_cap->num_dsc; i++) {
463 struct display_stream_compressor *dsc = pool->dscs[i];
464 struct dcn_dsc_state s = {0};
465
466 dsc->funcs->dsc_read_state(dsc, &s);
467 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
468 dsc->inst,
469 s.dsc_clock_en,
470 s.dsc_slice_width,
471 s.dsc_bits_per_pixel);
472 DTN_INFO("\n");
473 }
474 DTN_INFO("\n");
475
476 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
477 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
478 for (i = 0; i < pool->stream_enc_count; i++) {
479 struct stream_encoder *enc = pool->stream_enc[i];
480 struct enc_state s = {0};
481
482 if (enc->funcs->enc_read_state) {
483 enc->funcs->enc_read_state(enc, &s);
484 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
485 enc->id,
486 s.dsc_mode,
487 s.sec_gsp_pps_line_num,
488 s.vbid6_line_reference,
489 s.vbid6_line_num,
490 s.sec_gsp_pps_enable,
491 s.sec_stream_enable);
492 DTN_INFO("\n");
493 }
494 }
495 DTN_INFO("\n");
496
497 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
498 for (i = 0; i < dc->link_count; i++) {
499 struct link_encoder *lenc = dc->links[i]->link_enc;
500
501 struct link_enc_state s = {0};
502
503 if (lenc && lenc->funcs->read_state) {
504 lenc->funcs->read_state(lenc, &s);
505 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
506 i,
507 s.dphy_fec_en,
508 s.dphy_fec_ready_shadow,
509 s.dphy_fec_active_status,
510 s.dp_link_training_complete);
511 DTN_INFO("\n");
512 }
513 }
514 DTN_INFO("\n");
515
516 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
517 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
518 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
519 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
520 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
521 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
522 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
523 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
524 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
525
526 log_mpc_crc(dc, log_ctx);
527
528 {
529 if (pool->hpo_dp_stream_enc_count > 0) {
530 DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
531 for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
532 struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
533 struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
534
535 if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
536 hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
537
538 DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n",
539 hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
540 hpo_dp_se_state.stream_enc_enabled,
541 hpo_dp_se_state.otg_inst,
542 (hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
543 ((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
544 (hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
545 (hpo_dp_se_state.component_depth == 0) ? 6 :
546 ((hpo_dp_se_state.component_depth == 1) ? 8 :
547 (hpo_dp_se_state.component_depth == 2) ? 10 : 12),
548 hpo_dp_se_state.vid_stream_enabled,
549 hpo_dp_se_state.sdp_enabled,
550 hpo_dp_se_state.compressed_format,
551 hpo_dp_se_state.mapped_to_link_enc);
552 }
553 }
554
555 DTN_INFO("\n");
556 }
557
558 /* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
559 if (pool->hpo_dp_link_enc_count) {
560 DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");
561
562 for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
563 struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
564 struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
565
566 if (hpo_dp_link_enc->funcs->read_state) {
567 hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
568 DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
569 hpo_dp_link_enc->inst,
570 hpo_dp_le_state.link_enc_enabled,
571 (hpo_dp_le_state.link_mode == 0) ? "TPS1" :
572 (hpo_dp_le_state.link_mode == 1) ? "TPS2" :
573 (hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
574 hpo_dp_le_state.lane_count,
575 hpo_dp_le_state.stream_src[0],
576 hpo_dp_le_state.slot_count[0],
577 hpo_dp_le_state.vc_rate_x[0],
578 hpo_dp_le_state.vc_rate_y[0]);
579 DTN_INFO("\n");
580 }
581 }
582
583 DTN_INFO("\n");
584 }
585 }
586
587 DTN_INFO_END();
588}
589
590bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
591{
592 struct hubp *hubp = pipe_ctx->plane_res.hubp;
593 struct timing_generator *tg = pipe_ctx->stream_res.tg;
594
595 if (tg->funcs->is_optc_underflow_occurred(tg)) {
596 tg->funcs->clear_optc_underflow(tg);
597 return true;
598 }
599
600 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
601 hubp->funcs->hubp_clear_underflow(hubp);
602 return true;
603 }
604 return false;
605}
606
607void dcn10_enable_power_gating_plane(
608 struct dce_hwseq *hws,
609 bool enable)
610{
611 bool force_on = true; /* disable power gating */
612
613 if (enable)
614 force_on = false;
615
616 /* DCHUBP0/1/2/3 */
617 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
618 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
619 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
620 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
621
622 /* DPP0/1/2/3 */
623 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
624 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
625 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
626 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
627}
628
629void dcn10_disable_vga(
630 struct dce_hwseq *hws)
631{
632 unsigned int in_vga1_mode = 0;
633 unsigned int in_vga2_mode = 0;
634 unsigned int in_vga3_mode = 0;
635 unsigned int in_vga4_mode = 0;
636
637 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
638 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
639 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
640 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
641
642 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
643 in_vga3_mode == 0 && in_vga4_mode == 0)
644 return;
645
646 REG_WRITE(D1VGA_CONTROL, 0);
647 REG_WRITE(D2VGA_CONTROL, 0);
648 REG_WRITE(D3VGA_CONTROL, 0);
649 REG_WRITE(D4VGA_CONTROL, 0);
650
651 /* HW Engineer's Notes:
652 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
653 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
654 *
655 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
656 * VGA_TEST_ENABLE, to leave it in the same state as before.
657 */
658 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
659 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
660}
661
662/**
663 * dcn10_dpp_pg_control - DPP power gate control.
664 *
665 * @hws: dce_hwseq reference.
666 * @dpp_inst: DPP instance reference.
667 * @power_on: true if we want to enable power gate, false otherwise.
668 *
669 * Enable or disable power gate in the specific DPP instance.
670 */
671void dcn10_dpp_pg_control(
672 struct dce_hwseq *hws,
673 unsigned int dpp_inst,
674 bool power_on)
675{
676 uint32_t power_gate = power_on ? 0 : 1;
677 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
678
679 if (hws->ctx->dc->debug.disable_dpp_power_gate)
680 return;
681 if (REG(DOMAIN1_PG_CONFIG) == 0)
682 return;
683
684 switch (dpp_inst) {
685 case 0: /* DPP0 */
686 REG_UPDATE(DOMAIN1_PG_CONFIG,
687 DOMAIN1_POWER_GATE, power_gate);
688
689 REG_WAIT(DOMAIN1_PG_STATUS,
690 DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
691 1, 1000);
692 break;
693 case 1: /* DPP1 */
694 REG_UPDATE(DOMAIN3_PG_CONFIG,
695 DOMAIN3_POWER_GATE, power_gate);
696
697 REG_WAIT(DOMAIN3_PG_STATUS,
698 DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
699 1, 1000);
700 break;
701 case 2: /* DPP2 */
702 REG_UPDATE(DOMAIN5_PG_CONFIG,
703 DOMAIN5_POWER_GATE, power_gate);
704
705 REG_WAIT(DOMAIN5_PG_STATUS,
706 DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
707 1, 1000);
708 break;
709 case 3: /* DPP3 */
710 REG_UPDATE(DOMAIN7_PG_CONFIG,
711 DOMAIN7_POWER_GATE, power_gate);
712
713 REG_WAIT(DOMAIN7_PG_STATUS,
714 DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
715 1, 1000);
716 break;
717 default:
718 BREAK_TO_DEBUGGER();
719 break;
720 }
721}
722
723/**
724 * dcn10_hubp_pg_control - HUBP power gate control.
725 *
726 * @hws: dce_hwseq reference.
727 * @hubp_inst: DPP instance reference.
728 * @power_on: true if we want to enable power gate, false otherwise.
729 *
730 * Enable or disable power gate in the specific HUBP instance.
731 */
732void dcn10_hubp_pg_control(
733 struct dce_hwseq *hws,
734 unsigned int hubp_inst,
735 bool power_on)
736{
737 uint32_t power_gate = power_on ? 0 : 1;
738 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
739
740 if (hws->ctx->dc->debug.disable_hubp_power_gate)
741 return;
742 if (REG(DOMAIN0_PG_CONFIG) == 0)
743 return;
744
745 switch (hubp_inst) {
746 case 0: /* DCHUBP0 */
747 REG_UPDATE(DOMAIN0_PG_CONFIG,
748 DOMAIN0_POWER_GATE, power_gate);
749
750 REG_WAIT(DOMAIN0_PG_STATUS,
751 DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
752 1, 1000);
753 break;
754 case 1: /* DCHUBP1 */
755 REG_UPDATE(DOMAIN2_PG_CONFIG,
756 DOMAIN2_POWER_GATE, power_gate);
757
758 REG_WAIT(DOMAIN2_PG_STATUS,
759 DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
760 1, 1000);
761 break;
762 case 2: /* DCHUBP2 */
763 REG_UPDATE(DOMAIN4_PG_CONFIG,
764 DOMAIN4_POWER_GATE, power_gate);
765
766 REG_WAIT(DOMAIN4_PG_STATUS,
767 DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
768 1, 1000);
769 break;
770 case 3: /* DCHUBP3 */
771 REG_UPDATE(DOMAIN6_PG_CONFIG,
772 DOMAIN6_POWER_GATE, power_gate);
773
774 REG_WAIT(DOMAIN6_PG_STATUS,
775 DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
776 1, 1000);
777 break;
778 default:
779 BREAK_TO_DEBUGGER();
780 break;
781 }
782}
783
784static void power_on_plane_resources(
785 struct dce_hwseq *hws,
786 int plane_id)
787{
788 DC_LOGGER_INIT(hws->ctx->logger);
789
790 if (hws->funcs.dpp_root_clock_control)
791 hws->funcs.dpp_root_clock_control(hws, plane_id, true);
792
793 if (REG(DC_IP_REQUEST_CNTL)) {
794 REG_SET(DC_IP_REQUEST_CNTL, 0,
795 IP_REQUEST_EN, 1);
796
797 if (hws->funcs.dpp_pg_control)
798 hws->funcs.dpp_pg_control(hws, plane_id, true);
799
800 if (hws->funcs.hubp_pg_control)
801 hws->funcs.hubp_pg_control(hws, plane_id, true);
802
803 REG_SET(DC_IP_REQUEST_CNTL, 0,
804 IP_REQUEST_EN, 0);
805 DC_LOG_DEBUG(
806 "Un-gated front end for pipe %d\n", plane_id);
807 }
808}
809
810static void undo_DEGVIDCN10_253_wa(struct dc *dc)
811{
812 struct dce_hwseq *hws = dc->hwseq;
813 struct hubp *hubp = dc->res_pool->hubps[0];
814
815 if (!hws->wa_state.DEGVIDCN10_253_applied)
816 return;
817
818 hubp->funcs->set_blank(hubp, true);
819
820 REG_SET(DC_IP_REQUEST_CNTL, 0,
821 IP_REQUEST_EN, 1);
822
823 hws->funcs.hubp_pg_control(hws, 0, false);
824 REG_SET(DC_IP_REQUEST_CNTL, 0,
825 IP_REQUEST_EN, 0);
826
827 hws->wa_state.DEGVIDCN10_253_applied = false;
828}
829
830static void apply_DEGVIDCN10_253_wa(struct dc *dc)
831{
832 struct dce_hwseq *hws = dc->hwseq;
833 struct hubp *hubp = dc->res_pool->hubps[0];
834 int i;
835
836 if (dc->debug.disable_stutter)
837 return;
838
839 if (!hws->wa.DEGVIDCN10_253)
840 return;
841
842 for (i = 0; i < dc->res_pool->pipe_count; i++) {
843 if (!dc->res_pool->hubps[i]->power_gated)
844 return;
845 }
846
847 /* all pipe power gated, apply work around to enable stutter. */
848
849 REG_SET(DC_IP_REQUEST_CNTL, 0,
850 IP_REQUEST_EN, 1);
851
852 hws->funcs.hubp_pg_control(hws, 0, true);
853 REG_SET(DC_IP_REQUEST_CNTL, 0,
854 IP_REQUEST_EN, 0);
855
856 hubp->funcs->set_hubp_blank_en(hubp, false);
857 hws->wa_state.DEGVIDCN10_253_applied = true;
858}
859
860void dcn10_bios_golden_init(struct dc *dc)
861{
862 struct dce_hwseq *hws = dc->hwseq;
863 struct dc_bios *bp = dc->ctx->dc_bios;
864 int i;
865 bool allow_self_fresh_force_enable = true;
866
867 if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
868 return;
869
870 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
871 allow_self_fresh_force_enable =
872 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
873
874
875 /* WA for making DF sleep when idle after resume from S0i3.
876 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
877 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
878 * before calling command table and it changed to 1 after,
879 * it should be set back to 0.
880 */
881
882 /* initialize dcn global */
883 bp->funcs->enable_disp_power_gating(bp,
884 CONTROLLER_ID_D0, ASIC_PIPE_INIT);
885
886 for (i = 0; i < dc->res_pool->pipe_count; i++) {
887 /* initialize dcn per pipe */
888 bp->funcs->enable_disp_power_gating(bp,
889 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
890 }
891
892 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
893 if (allow_self_fresh_force_enable == false &&
894 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
895 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
896 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
897
898}
899
900static void false_optc_underflow_wa(
901 struct dc *dc,
902 const struct dc_stream_state *stream,
903 struct timing_generator *tg)
904{
905 int i;
906 bool underflow;
907
908 if (!dc->hwseq->wa.false_optc_underflow)
909 return;
910
911 underflow = tg->funcs->is_optc_underflow_occurred(tg);
912
913 for (i = 0; i < dc->res_pool->pipe_count; i++) {
914 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
915
916 if (old_pipe_ctx->stream != stream)
917 continue;
918
919 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
920 }
921
922 if (tg->funcs->set_blank_data_double_buffer)
923 tg->funcs->set_blank_data_double_buffer(tg, true);
924
925 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
926 tg->funcs->clear_optc_underflow(tg);
927}
928
929static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
930{
931 struct pipe_ctx *other_pipe;
932 int vready_offset = pipe->pipe_dlg_param.vready_offset;
933
934 /* Always use the largest vready_offset of all connected pipes */
935 for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
936 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
937 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
938 }
939 for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
940 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
941 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
942 }
943 for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
944 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
945 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
946 }
947 for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
948 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
949 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
950 }
951
952 return vready_offset;
953}
954
955enum dc_status dcn10_enable_stream_timing(
956 struct pipe_ctx *pipe_ctx,
957 struct dc_state *context,
958 struct dc *dc)
959{
960 struct dc_stream_state *stream = pipe_ctx->stream;
961 enum dc_color_space color_space;
962 struct tg_color black_color = {0};
963
964 /* by upper caller loop, pipe0 is parent pipe and be called first.
965 * back end is set up by for pipe0. Other children pipe share back end
966 * with pipe 0. No program is needed.
967 */
968 if (pipe_ctx->top_pipe != NULL)
969 return DC_OK;
970
971 /* TODO check if timing_changed, disable stream if timing changed */
972
973 /* HW program guide assume display already disable
974 * by unplug sequence. OTG assume stop.
975 */
976 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
977
978 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
979 pipe_ctx->clock_source,
980 &pipe_ctx->stream_res.pix_clk_params,
981 dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
982 &pipe_ctx->pll_settings)) {
983 BREAK_TO_DEBUGGER();
984 return DC_ERROR_UNEXPECTED;
985 }
986
987 if (dc_is_hdmi_tmds_signal(signal: stream->signal)) {
988 stream->link->phy_state.symclk_ref_cnts.otg = 1;
989 if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
990 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
991 else
992 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
993 }
994
995 pipe_ctx->stream_res.tg->funcs->program_timing(
996 pipe_ctx->stream_res.tg,
997 &stream->timing,
998 calculate_vready_offset_for_group(pipe: pipe_ctx),
999 pipe_ctx->pipe_dlg_param.vstartup_start,
1000 pipe_ctx->pipe_dlg_param.vupdate_offset,
1001 pipe_ctx->pipe_dlg_param.vupdate_width,
1002 pipe_ctx->stream->signal,
1003 true);
1004
1005#if 0 /* move to after enable_crtc */
1006 /* TODO: OPP FMT, ABM. etc. should be done here. */
1007 /* or FPGA now. instance 0 only. TODO: move to opp.c */
1008
1009 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
1010
1011 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
1012 pipe_ctx->stream_res.opp,
1013 &stream->bit_depth_params,
1014 &stream->clamping);
1015#endif
1016 /* program otg blank color */
1017 color_space = stream->output_color_space;
1018 color_space_to_black_color(dc, colorspace: color_space, black_color: &black_color);
1019
1020 /*
1021 * The way 420 is packed, 2 channels carry Y component, 1 channel
1022 * alternate between Cb and Cr, so both channels need the pixel
1023 * value for Y
1024 */
1025 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
1026 black_color.color_r_cr = black_color.color_g_y;
1027
1028 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
1029 pipe_ctx->stream_res.tg->funcs->set_blank_color(
1030 pipe_ctx->stream_res.tg,
1031 &black_color);
1032
1033 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
1034 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
1035 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
1036 hwss_wait_for_blank_complete(tg: pipe_ctx->stream_res.tg);
1037 false_optc_underflow_wa(dc, stream: pipe_ctx->stream, tg: pipe_ctx->stream_res.tg);
1038 }
1039
1040 /* VTG is within DCHUB command block. DCFCLK is always on */
1041 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
1042 BREAK_TO_DEBUGGER();
1043 return DC_ERROR_UNEXPECTED;
1044 }
1045
1046 /* TODO program crtc source select for non-virtual signal*/
1047 /* TODO program FMT */
1048 /* TODO setup link_enc */
1049 /* TODO set stream attributes */
1050 /* TODO program audio */
1051 /* TODO enable stream if timing changed */
1052 /* TODO unblank stream if DP */
1053
1054 return DC_OK;
1055}
1056
1057static void dcn10_reset_back_end_for_pipe(
1058 struct dc *dc,
1059 struct pipe_ctx *pipe_ctx,
1060 struct dc_state *context)
1061{
1062 int i;
1063 struct dc_link *link;
1064 DC_LOGGER_INIT(dc->ctx->logger);
1065 if (pipe_ctx->stream_res.stream_enc == NULL) {
1066 pipe_ctx->stream = NULL;
1067 return;
1068 }
1069
1070 link = pipe_ctx->stream->link;
1071 /* DPMS may already disable or */
1072 /* dpms_off status is incorrect due to fastboot
1073 * feature. When system resume from S4 with second
1074 * screen only, the dpms_off would be true but
1075 * VBIOS lit up eDP, so check link status too.
1076 */
1077 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1078 dc->link_srv->set_dpms_off(pipe_ctx);
1079 else if (pipe_ctx->stream_res.audio)
1080 dc->hwss.disable_audio_stream(pipe_ctx);
1081
1082 if (pipe_ctx->stream_res.audio) {
1083 /*disable az_endpoint*/
1084 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1085
1086 /*free audio*/
1087 if (dc->caps.dynamic_audio == true) {
1088 /*we have to dynamic arbitrate the audio endpoints*/
1089 /*we free the resource, need reset is_audio_acquired*/
1090 update_audio_usage(res_ctx: &dc->current_state->res_ctx, pool: dc->res_pool,
1091 audio: pipe_ctx->stream_res.audio, acquired: false);
1092 pipe_ctx->stream_res.audio = NULL;
1093 }
1094 }
1095
1096 /* by upper caller loop, parent pipe: pipe0, will be reset last.
1097 * back end share by all pipes and will be disable only when disable
1098 * parent pipe.
1099 */
1100 if (pipe_ctx->top_pipe == NULL) {
1101
1102 if (pipe_ctx->stream_res.abm)
1103 dc->hwss.set_abm_immediate_disable(pipe_ctx);
1104
1105 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1106
1107 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1108 if (pipe_ctx->stream_res.tg->funcs->set_drr)
1109 pipe_ctx->stream_res.tg->funcs->set_drr(
1110 pipe_ctx->stream_res.tg, NULL);
1111 if (dc_is_hdmi_tmds_signal(signal: pipe_ctx->stream->signal))
1112 pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
1113 }
1114
1115 for (i = 0; i < dc->res_pool->pipe_count; i++)
1116 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1117 break;
1118
1119 if (i == dc->res_pool->pipe_count)
1120 return;
1121
1122 pipe_ctx->stream = NULL;
1123 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1124 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1125}
1126
1127static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1128{
1129 struct hubp *hubp ;
1130 unsigned int i;
1131 bool need_recover = true;
1132
1133 if (!dc->debug.recovery_enabled)
1134 return false;
1135
1136 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1137 struct pipe_ctx *pipe_ctx =
1138 &dc->current_state->res_ctx.pipe_ctx[i];
1139 if (pipe_ctx != NULL) {
1140 hubp = pipe_ctx->plane_res.hubp;
1141 if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
1142 if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
1143 /* one pipe underflow, we will reset all the pipes*/
1144 need_recover = true;
1145 }
1146 }
1147 }
1148 }
1149 if (!need_recover)
1150 return false;
1151 /*
1152 DCHUBP_CNTL:HUBP_BLANK_EN=1
1153 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1154 DCHUBP_CNTL:HUBP_DISABLE=1
1155 DCHUBP_CNTL:HUBP_DISABLE=0
1156 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1157 DCSURF_PRIMARY_SURFACE_ADDRESS
1158 DCHUBP_CNTL:HUBP_BLANK_EN=0
1159 */
1160
1161 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1162 struct pipe_ctx *pipe_ctx =
1163 &dc->current_state->res_ctx.pipe_ctx[i];
1164 if (pipe_ctx != NULL) {
1165 hubp = pipe_ctx->plane_res.hubp;
1166 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1167 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1168 hubp->funcs->set_hubp_blank_en(hubp, true);
1169 }
1170 }
1171 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1172 hubbub1_soft_reset(hubbub: dc->res_pool->hubbub, reset: true);
1173
1174 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1175 struct pipe_ctx *pipe_ctx =
1176 &dc->current_state->res_ctx.pipe_ctx[i];
1177 if (pipe_ctx != NULL) {
1178 hubp = pipe_ctx->plane_res.hubp;
1179 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
1180 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1181 hubp->funcs->hubp_disable_control(hubp, true);
1182 }
1183 }
1184 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1185 struct pipe_ctx *pipe_ctx =
1186 &dc->current_state->res_ctx.pipe_ctx[i];
1187 if (pipe_ctx != NULL) {
1188 hubp = pipe_ctx->plane_res.hubp;
1189 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
1190 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1191 hubp->funcs->hubp_disable_control(hubp, true);
1192 }
1193 }
1194 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1195 hubbub1_soft_reset(hubbub: dc->res_pool->hubbub, reset: false);
1196 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1197 struct pipe_ctx *pipe_ctx =
1198 &dc->current_state->res_ctx.pipe_ctx[i];
1199 if (pipe_ctx != NULL) {
1200 hubp = pipe_ctx->plane_res.hubp;
1201 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1202 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1203 hubp->funcs->set_hubp_blank_en(hubp, true);
1204 }
1205 }
1206 return true;
1207
1208}
1209
1210void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1211{
1212 struct hubbub *hubbub = dc->res_pool->hubbub;
1213 static bool should_log_hw_state; /* prevent hw state log by default */
1214
1215 if (!hubbub->funcs->verify_allow_pstate_change_high)
1216 return;
1217
1218 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1219 int i = 0;
1220
1221 if (should_log_hw_state)
1222 dcn10_log_hw_state(dc, NULL);
1223
1224 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1225 BREAK_TO_DEBUGGER();
1226 if (dcn10_hw_wa_force_recovery(dc)) {
1227 /*check again*/
1228 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1229 BREAK_TO_DEBUGGER();
1230 }
1231 }
1232}
1233
1234/* trigger HW to start disconnect plane from stream on the next vsync */
1235void dcn10_plane_atomic_disconnect(struct dc *dc,
1236 struct dc_state *state,
1237 struct pipe_ctx *pipe_ctx)
1238{
1239 struct dce_hwseq *hws = dc->hwseq;
1240 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1241 int dpp_id = pipe_ctx->plane_res.dpp->inst;
1242 struct mpc *mpc = dc->res_pool->mpc;
1243 struct mpc_tree *mpc_tree_params;
1244 struct mpcc *mpcc_to_remove = NULL;
1245 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1246
1247 mpc_tree_params = &(opp->mpc_tree_params);
1248 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1249
1250 /*Already reset*/
1251 if (mpcc_to_remove == NULL)
1252 return;
1253
1254 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1255 // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
1256 // so don't wait for MPCC_IDLE in the programming sequence
1257 if (opp != NULL && dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM)
1258 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1259
1260 dc->optimized_required = true;
1261
1262 if (hubp->funcs->hubp_disconnect)
1263 hubp->funcs->hubp_disconnect(hubp);
1264
1265 if (dc->debug.sanity_checks)
1266 hws->funcs.verify_allow_pstate_change_high(dc);
1267}
1268
1269/**
1270 * dcn10_plane_atomic_power_down - Power down plane components.
1271 *
1272 * @dc: dc struct reference. used for grab hwseq.
1273 * @dpp: dpp struct reference.
1274 * @hubp: hubp struct reference.
1275 *
1276 * Keep in mind that this operation requires a power gate configuration;
1277 * however, requests for switch power gate are precisely controlled to avoid
1278 * problems. For this reason, power gate request is usually disabled. This
1279 * function first needs to enable the power gate request before disabling DPP
1280 * and HUBP. Finally, it disables the power gate request again.
1281 */
1282void dcn10_plane_atomic_power_down(struct dc *dc,
1283 struct dpp *dpp,
1284 struct hubp *hubp)
1285{
1286 struct dce_hwseq *hws = dc->hwseq;
1287 DC_LOGGER_INIT(dc->ctx->logger);
1288
1289 if (REG(DC_IP_REQUEST_CNTL)) {
1290 REG_SET(DC_IP_REQUEST_CNTL, 0,
1291 IP_REQUEST_EN, 1);
1292
1293 if (hws->funcs.dpp_pg_control)
1294 hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1295
1296 if (hws->funcs.hubp_pg_control)
1297 hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1298
1299 dpp->funcs->dpp_reset(dpp);
1300
1301 REG_SET(DC_IP_REQUEST_CNTL, 0,
1302 IP_REQUEST_EN, 0);
1303 DC_LOG_DEBUG(
1304 "Power gated front end %d\n", hubp->inst);
1305 }
1306
1307 if (hws->funcs.dpp_root_clock_control)
1308 hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
1309}
1310
1311/* disable HW used by plane.
1312 * note: cannot disable until disconnect is complete
1313 */
1314void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1315{
1316 struct dce_hwseq *hws = dc->hwseq;
1317 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1318 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1319 int opp_id = hubp->opp_id;
1320
1321 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1322
1323 hubp->funcs->hubp_clk_cntl(hubp, false);
1324
1325 dpp->funcs->dpp_dppclk_control(dpp, false, false);
1326
1327 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1328 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1329 pipe_ctx->stream_res.opp,
1330 false);
1331
1332 hubp->power_gated = true;
1333 dc->optimized_required = false; /* We're powering off, no need to optimize */
1334
1335 hws->funcs.plane_atomic_power_down(dc,
1336 pipe_ctx->plane_res.dpp,
1337 pipe_ctx->plane_res.hubp);
1338
1339 pipe_ctx->stream = NULL;
1340 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1341 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1342 pipe_ctx->top_pipe = NULL;
1343 pipe_ctx->bottom_pipe = NULL;
1344 pipe_ctx->plane_state = NULL;
1345}
1346
1347void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
1348{
1349 struct dce_hwseq *hws = dc->hwseq;
1350 DC_LOGGER_INIT(dc->ctx->logger);
1351
1352 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1353 return;
1354
1355 hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1356
1357 apply_DEGVIDCN10_253_wa(dc);
1358
1359 DC_LOG_DC("Power down front end %d\n",
1360 pipe_ctx->pipe_idx);
1361}
1362
1363void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1364{
1365 int i;
1366 struct dce_hwseq *hws = dc->hwseq;
1367 struct hubbub *hubbub = dc->res_pool->hubbub;
1368 bool can_apply_seamless_boot = false;
1369
1370 for (i = 0; i < context->stream_count; i++) {
1371 if (context->streams[i]->apply_seamless_boot_optimization) {
1372 can_apply_seamless_boot = true;
1373 break;
1374 }
1375 }
1376
1377 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1378 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1379 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1380
1381 /* There is assumption that pipe_ctx is not mapping irregularly
1382 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1383 * we will use the pipe, so don't disable
1384 */
1385 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1386 continue;
1387
1388 /* Blank controller using driver code instead of
1389 * command table.
1390 */
1391 if (tg->funcs->is_tg_enabled(tg)) {
1392 if (hws->funcs.init_blank != NULL) {
1393 hws->funcs.init_blank(dc, tg);
1394 tg->funcs->lock(tg);
1395 } else {
1396 tg->funcs->lock(tg);
1397 tg->funcs->set_blank(tg, true);
1398 hwss_wait_for_blank_complete(tg);
1399 }
1400 }
1401 }
1402
1403 /* Reset det size */
1404 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1405 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1406 struct hubp *hubp = dc->res_pool->hubps[i];
1407
1408 /* Do not need to reset for seamless boot */
1409 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1410 continue;
1411
1412 if (hubbub && hubp) {
1413 if (hubbub->funcs->program_det_size)
1414 hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
1415 }
1416 }
1417
1418 /* num_opp will be equal to number of mpcc */
1419 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1420 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1421
1422 /* Cannot reset the MPC mux if seamless boot */
1423 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1424 continue;
1425
1426 dc->res_pool->mpc->funcs->mpc_init_single_inst(
1427 dc->res_pool->mpc, i);
1428 }
1429
1430 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1431 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1432 struct hubp *hubp = dc->res_pool->hubps[i];
1433 struct dpp *dpp = dc->res_pool->dpps[i];
1434 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1435
1436 /* There is assumption that pipe_ctx is not mapping irregularly
1437 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1438 * we will use the pipe, so don't disable
1439 */
1440 if (can_apply_seamless_boot &&
1441 pipe_ctx->stream != NULL &&
1442 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1443 pipe_ctx->stream_res.tg)) {
1444 // Enable double buffering for OTG_BLANK no matter if
1445 // seamless boot is enabled or not to suppress global sync
1446 // signals when OTG blanked. This is to prevent pipe from
1447 // requesting data while in PSR.
1448 tg->funcs->tg_init(tg);
1449 hubp->power_gated = true;
1450 continue;
1451 }
1452
1453 /* Disable on the current state so the new one isn't cleared. */
1454 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1455
1456 dpp->funcs->dpp_reset(dpp);
1457
1458 pipe_ctx->stream_res.tg = tg;
1459 pipe_ctx->pipe_idx = i;
1460
1461 pipe_ctx->plane_res.hubp = hubp;
1462 pipe_ctx->plane_res.dpp = dpp;
1463 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1464 hubp->mpcc_id = dpp->inst;
1465 hubp->opp_id = OPP_ID_INVALID;
1466 hubp->power_gated = false;
1467
1468 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1469 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1470 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1471 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1472
1473 hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
1474
1475 if (tg->funcs->is_tg_enabled(tg))
1476 tg->funcs->unlock(tg);
1477
1478 dc->hwss.disable_plane(dc, context, pipe_ctx);
1479
1480 pipe_ctx->stream_res.tg = NULL;
1481 pipe_ctx->plane_res.hubp = NULL;
1482
1483 if (tg->funcs->is_tg_enabled(tg)) {
1484 if (tg->funcs->init_odm)
1485 tg->funcs->init_odm(tg);
1486 }
1487
1488 tg->funcs->tg_init(tg);
1489 }
1490
1491 /* Power gate DSCs */
1492 if (hws->funcs.dsc_pg_control != NULL) {
1493 uint32_t num_opps = 0;
1494 uint32_t opp_id_src0 = OPP_ID_INVALID;
1495 uint32_t opp_id_src1 = OPP_ID_INVALID;
1496
1497 // Step 1: To find out which OPTC is running & OPTC DSC is ON
1498 // We can't use res_pool->res_cap->num_timing_generator to check
1499 // Because it records display pipes default setting built in driver,
1500 // not display pipes of the current chip.
1501 // Some ASICs would be fused display pipes less than the default setting.
1502 // In dcnxx_resource_construct function, driver would obatin real information.
1503 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1504 uint32_t optc_dsc_state = 0;
1505 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1506
1507 if (tg->funcs->is_tg_enabled(tg)) {
1508 if (tg->funcs->get_dsc_status)
1509 tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1510 // Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1511 // non-zero value is DSC enabled
1512 if (optc_dsc_state != 0) {
1513 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1514 break;
1515 }
1516 }
1517 }
1518
1519 // Step 2: To power down DSC but skip DSC of running OPTC
1520 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1521 struct dcn_dsc_state s = {0};
1522
1523 dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1524
1525 if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1526 s.dsc_clock_en && s.dsc_fw_en)
1527 continue;
1528
1529 hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1530 }
1531 }
1532}
1533
1534void dcn10_init_hw(struct dc *dc)
1535{
1536 int i;
1537 struct abm *abm = dc->res_pool->abm;
1538 struct dmcu *dmcu = dc->res_pool->dmcu;
1539 struct dce_hwseq *hws = dc->hwseq;
1540 struct dc_bios *dcb = dc->ctx->dc_bios;
1541 struct resource_pool *res_pool = dc->res_pool;
1542 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1543 uint32_t user_level = MAX_BACKLIGHT_LEVEL;
1544 bool is_optimized_init_done = false;
1545
1546 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1547 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1548
1549 /* Align bw context with hw config when system resume. */
1550 if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1551 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1552 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1553 }
1554
1555 // Initialize the dccg
1556 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1557 dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1558
1559 if (!dcb->funcs->is_accelerated_mode(dcb))
1560 hws->funcs.disable_vga(dc->hwseq);
1561
1562 if (!dc_dmub_srv_optimized_init_done(dc_dmub_srv: dc->ctx->dmub_srv))
1563 hws->funcs.bios_golden_init(dc);
1564
1565
1566 if (dc->ctx->dc_bios->fw_info_valid) {
1567 res_pool->ref_clocks.xtalin_clock_inKhz =
1568 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1569
1570 if (res_pool->dccg && res_pool->hubbub) {
1571
1572 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1573 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1574 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
1575
1576 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1577 res_pool->ref_clocks.dccg_ref_clock_inKhz,
1578 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
1579 } else {
1580 // Not all ASICs have DCCG sw component
1581 res_pool->ref_clocks.dccg_ref_clock_inKhz =
1582 res_pool->ref_clocks.xtalin_clock_inKhz;
1583 res_pool->ref_clocks.dchub_ref_clock_inKhz =
1584 res_pool->ref_clocks.xtalin_clock_inKhz;
1585 }
1586 } else
1587 ASSERT_CRITICAL(false);
1588
1589 for (i = 0; i < dc->link_count; i++) {
1590 /* Power up AND update implementation according to the
1591 * required signal (which may be different from the
1592 * default signal on connector).
1593 */
1594 struct dc_link *link = dc->links[i];
1595
1596 if (!is_optimized_init_done)
1597 link->link_enc->funcs->hw_init(link->link_enc);
1598
1599 /* Check for enabled DIG to identify enabled display */
1600 if (link->link_enc->funcs->is_dig_enabled &&
1601 link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1602 link->link_status.link_active = true;
1603 if (link->link_enc->funcs->fec_is_active &&
1604 link->link_enc->funcs->fec_is_active(link->link_enc))
1605 link->fec_state = dc_link_fec_enabled;
1606 }
1607 }
1608
1609 /* we want to turn off all dp displays before doing detection */
1610 dc->link_srv->blank_all_dp_displays(dc);
1611
1612 if (hws->funcs.enable_power_gating_plane)
1613 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1614
1615 /* If taking control over from VBIOS, we may want to optimize our first
1616 * mode set, so we need to skip powering down pipes until we know which
1617 * pipes we want to use.
1618 * Otherwise, if taking control is not possible, we need to power
1619 * everything down.
1620 */
1621 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1622 if (!is_optimized_init_done) {
1623 hws->funcs.init_pipes(dc, dc->current_state);
1624 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1625 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1626 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1627 }
1628 }
1629
1630 if (!is_optimized_init_done) {
1631
1632 for (i = 0; i < res_pool->audio_count; i++) {
1633 struct audio *audio = res_pool->audios[i];
1634
1635 audio->funcs->hw_init(audio);
1636 }
1637
1638 for (i = 0; i < dc->link_count; i++) {
1639 struct dc_link *link = dc->links[i];
1640
1641 if (link->panel_cntl) {
1642 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1643 user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
1644 }
1645 }
1646
1647 if (abm != NULL)
1648 abm->funcs->abm_init(abm, backlight, user_level);
1649
1650 if (dmcu != NULL && !dmcu->auto_load_dmcu)
1651 dmcu->funcs->dmcu_init(dmcu);
1652 }
1653
1654 if (abm != NULL && dmcu != NULL)
1655 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1656
1657 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1658 if (!is_optimized_init_done)
1659 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1660
1661 if (!dc->debug.disable_clock_gate) {
1662 /* enable all DCN clock gating */
1663 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1664
1665 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1666
1667 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1668 }
1669
1670 if (dc->clk_mgr->funcs->notify_wm_ranges)
1671 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1672}
1673
1674/* In headless boot cases, DIG may be turned
1675 * on which causes HW/SW discrepancies.
1676 * To avoid this, power down hardware on boot
1677 * if DIG is turned on
1678 */
1679void dcn10_power_down_on_boot(struct dc *dc)
1680{
1681 struct dc_link *edp_links[MAX_NUM_EDP];
1682 struct dc_link *edp_link = NULL;
1683 int edp_num;
1684 int i = 0;
1685
1686 dc_get_edp_links(dc, edp_links, edp_num: &edp_num);
1687 if (edp_num)
1688 edp_link = edp_links[0];
1689
1690 if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1691 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1692 dc->hwseq->funcs.edp_backlight_control &&
1693 dc->hwss.power_down &&
1694 dc->hwss.edp_power_control) {
1695 dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1696 dc->hwss.power_down(dc);
1697 dc->hwss.edp_power_control(edp_link, false);
1698 } else {
1699 for (i = 0; i < dc->link_count; i++) {
1700 struct dc_link *link = dc->links[i];
1701
1702 if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1703 link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1704 dc->hwss.power_down) {
1705 dc->hwss.power_down(dc);
1706 break;
1707 }
1708
1709 }
1710 }
1711
1712 /*
1713 * Call update_clocks with empty context
1714 * to send DISPLAY_OFF
1715 * Otherwise DISPLAY_OFF may not be asserted
1716 */
1717 if (dc->clk_mgr->funcs->set_low_power_state)
1718 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1719}
1720
1721void dcn10_reset_hw_ctx_wrap(
1722 struct dc *dc,
1723 struct dc_state *context)
1724{
1725 int i;
1726 struct dce_hwseq *hws = dc->hwseq;
1727
1728 /* Reset Back End*/
1729 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1730 struct pipe_ctx *pipe_ctx_old =
1731 &dc->current_state->res_ctx.pipe_ctx[i];
1732 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1733
1734 if (!pipe_ctx_old->stream)
1735 continue;
1736
1737 if (pipe_ctx_old->top_pipe)
1738 continue;
1739
1740 if (!pipe_ctx->stream ||
1741 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1742 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1743
1744 dcn10_reset_back_end_for_pipe(dc, pipe_ctx: pipe_ctx_old, context: dc->current_state);
1745 if (hws->funcs.enable_stream_gating)
1746 hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1747 if (old_clk)
1748 old_clk->funcs->cs_power_down(old_clk);
1749 }
1750 }
1751}
1752
1753static bool patch_address_for_sbs_tb_stereo(
1754 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1755{
1756 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1757 bool sec_split = pipe_ctx->top_pipe &&
1758 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1759 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1760 (pipe_ctx->stream->timing.timing_3d_format ==
1761 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1762 pipe_ctx->stream->timing.timing_3d_format ==
1763 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1764 *addr = plane_state->address.grph_stereo.left_addr;
1765 plane_state->address.grph_stereo.left_addr =
1766 plane_state->address.grph_stereo.right_addr;
1767 return true;
1768 } else {
1769 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1770 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1771 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1772 plane_state->address.grph_stereo.right_addr =
1773 plane_state->address.grph_stereo.left_addr;
1774 plane_state->address.grph_stereo.right_meta_addr =
1775 plane_state->address.grph_stereo.left_meta_addr;
1776 }
1777 }
1778 return false;
1779}
1780
1781void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1782{
1783 bool addr_patched = false;
1784 PHYSICAL_ADDRESS_LOC addr;
1785 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1786
1787 if (plane_state == NULL)
1788 return;
1789
1790 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, addr: &addr);
1791
1792 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1793 pipe_ctx->plane_res.hubp,
1794 &plane_state->address,
1795 plane_state->flip_immediate);
1796
1797 plane_state->status.requested_address = plane_state->address;
1798
1799 if (plane_state->flip_immediate)
1800 plane_state->status.current_address = plane_state->address;
1801
1802 if (addr_patched)
1803 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1804}
1805
1806bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1807 const struct dc_plane_state *plane_state)
1808{
1809 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1810 const struct dc_transfer_func *tf = NULL;
1811 bool result = true;
1812
1813 if (dpp_base == NULL)
1814 return false;
1815
1816 if (plane_state->in_transfer_func)
1817 tf = plane_state->in_transfer_func;
1818
1819 if (plane_state->gamma_correction &&
1820 !dpp_base->ctx->dc->debug.always_use_regamma
1821 && !plane_state->gamma_correction->is_identity
1822 && dce_use_lut(format: plane_state->format))
1823 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1824
1825 if (tf == NULL)
1826 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1827 else if (tf->type == TF_TYPE_PREDEFINED) {
1828 switch (tf->tf) {
1829 case TRANSFER_FUNCTION_SRGB:
1830 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1831 break;
1832 case TRANSFER_FUNCTION_BT709:
1833 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1834 break;
1835 case TRANSFER_FUNCTION_LINEAR:
1836 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1837 break;
1838 case TRANSFER_FUNCTION_PQ:
1839 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1840 cm_helper_translate_curve_to_degamma_hw_format(output_tf: tf, lut_params: &dpp_base->degamma_params);
1841 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1842 result = true;
1843 break;
1844 default:
1845 result = false;
1846 break;
1847 }
1848 } else if (tf->type == TF_TYPE_BYPASS) {
1849 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1850 } else {
1851 cm_helper_translate_curve_to_degamma_hw_format(output_tf: tf,
1852 lut_params: &dpp_base->degamma_params);
1853 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1854 &dpp_base->degamma_params);
1855 result = true;
1856 }
1857
1858 return result;
1859}
1860
1861#define MAX_NUM_HW_POINTS 0x200
1862
1863static void log_tf(struct dc_context *ctx,
1864 struct dc_transfer_func *tf, uint32_t hw_points_num)
1865{
1866 // DC_LOG_GAMMA is default logging of all hw points
1867 // DC_LOG_ALL_GAMMA logs all points, not only hw points
1868 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
1869 int i = 0;
1870
1871 DC_LOG_GAMMA("Gamma Correction TF");
1872 DC_LOG_ALL_GAMMA("Logging all tf points...");
1873 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1874
1875 for (i = 0; i < hw_points_num; i++) {
1876 DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1877 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1878 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1879 }
1880
1881 for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1882 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1883 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1884 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1885 }
1886}
1887
1888bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1889 const struct dc_stream_state *stream)
1890{
1891 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1892
1893 if (!stream)
1894 return false;
1895
1896 if (dpp == NULL)
1897 return false;
1898
1899 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1900
1901 if (stream->out_transfer_func &&
1902 stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1903 stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1904 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1905
1906 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1907 * update.
1908 */
1909 else if (cm_helper_translate_curve_to_hw_format(ctx: dc->ctx,
1910 output_tf: stream->out_transfer_func,
1911 lut_params: &dpp->regamma_params, fixpoint: false)) {
1912 dpp->funcs->dpp_program_regamma_pwl(
1913 dpp,
1914 &dpp->regamma_params, OPP_REGAMMA_USER);
1915 } else
1916 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1917
1918 if (stream->ctx &&
1919 stream->out_transfer_func) {
1920 log_tf(ctx: stream->ctx,
1921 tf: stream->out_transfer_func,
1922 hw_points_num: dpp->regamma_params.hw_points_num);
1923 }
1924
1925 return true;
1926}
1927
1928void dcn10_pipe_control_lock(
1929 struct dc *dc,
1930 struct pipe_ctx *pipe,
1931 bool lock)
1932{
1933 struct dce_hwseq *hws = dc->hwseq;
1934
1935 /* use TG master update lock to lock everything on the TG
1936 * therefore only top pipe need to lock
1937 */
1938 if (!pipe || pipe->top_pipe)
1939 return;
1940
1941 if (dc->debug.sanity_checks)
1942 hws->funcs.verify_allow_pstate_change_high(dc);
1943
1944 if (lock)
1945 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1946 else
1947 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1948
1949 if (dc->debug.sanity_checks)
1950 hws->funcs.verify_allow_pstate_change_high(dc);
1951}
1952
1953/**
1954 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1955 *
1956 * Software keepout workaround to prevent cursor update locking from stalling
1957 * out cursor updates indefinitely or from old values from being retained in
1958 * the case where the viewport changes in the same frame as the cursor.
1959 *
1960 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1961 * too close to VUPDATE, then stall out until VUPDATE finishes.
1962 *
1963 * TODO: Optimize cursor programming to be once per frame before VUPDATE
1964 * to avoid the need for this workaround.
1965 *
1966 * @dc: Current DC state
1967 * @pipe_ctx: Pipe_ctx pointer for delayed cursor update
1968 *
1969 * Return: void
1970 */
1971static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1972{
1973 struct dc_stream_state *stream = pipe_ctx->stream;
1974 struct crtc_position position;
1975 uint32_t vupdate_start, vupdate_end;
1976 unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1977 unsigned int us_per_line, us_vupdate;
1978
1979 if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1980 return;
1981
1982 if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1983 return;
1984
1985 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1986 &vupdate_end);
1987
1988 dc->hwss.get_position(&pipe_ctx, 1, &position);
1989 vpos = position.vertical_count;
1990
1991 /* Avoid wraparound calculation issues */
1992 vupdate_start += stream->timing.v_total;
1993 vupdate_end += stream->timing.v_total;
1994 vpos += stream->timing.v_total;
1995
1996 if (vpos <= vupdate_start) {
1997 /* VPOS is in VACTIVE or back porch. */
1998 lines_to_vupdate = vupdate_start - vpos;
1999 } else if (vpos > vupdate_end) {
2000 /* VPOS is in the front porch. */
2001 return;
2002 } else {
2003 /* VPOS is in VUPDATE. */
2004 lines_to_vupdate = 0;
2005 }
2006
2007 /* Calculate time until VUPDATE in microseconds. */
2008 us_per_line =
2009 stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
2010 us_to_vupdate = lines_to_vupdate * us_per_line;
2011
2012 /* 70 us is a conservative estimate of cursor update time*/
2013 if (us_to_vupdate > 70)
2014 return;
2015
2016 /* Stall out until the cursor update completes. */
2017 if (vupdate_end < vupdate_start)
2018 vupdate_end += stream->timing.v_total;
2019 us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
2020 udelay(us_to_vupdate + us_vupdate);
2021}
2022
2023void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
2024{
2025 /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
2026 if (!pipe || pipe->top_pipe)
2027 return;
2028
2029 /* Prevent cursor lock from stalling out cursor updates. */
2030 if (lock)
2031 delay_cursor_until_vupdate(dc, pipe_ctx: pipe);
2032
2033 if (pipe->stream && should_use_dmub_lock(link: pipe->stream->link)) {
2034 union dmub_hw_lock_flags hw_locks = { 0 };
2035 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
2036
2037 hw_locks.bits.lock_cursor = 1;
2038 inst_flags.opp_inst = pipe->stream_res.opp->inst;
2039
2040 dmub_hw_lock_mgr_cmd(dmub_srv: dc->ctx->dmub_srv,
2041 lock,
2042 hw_locks: &hw_locks,
2043 inst_flags: &inst_flags);
2044 } else
2045 dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
2046 pipe->stream_res.opp->inst, lock);
2047}
2048
2049static bool wait_for_reset_trigger_to_occur(
2050 struct dc_context *dc_ctx,
2051 struct timing_generator *tg)
2052{
2053 bool rc = false;
2054
2055 DC_LOGGER_INIT(dc_ctx->logger);
2056
2057 /* To avoid endless loop we wait at most
2058 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
2059 const uint32_t frames_to_wait_on_triggered_reset = 10;
2060 int i;
2061
2062 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
2063
2064 if (!tg->funcs->is_counter_moving(tg)) {
2065 DC_ERROR("TG counter is not moving!\n");
2066 break;
2067 }
2068
2069 if (tg->funcs->did_triggered_reset_occur(tg)) {
2070 rc = true;
2071 /* usually occurs at i=1 */
2072 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
2073 i);
2074 break;
2075 }
2076
2077 /* Wait for one frame. */
2078 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
2079 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
2080 }
2081
2082 if (false == rc)
2083 DC_ERROR("GSL: Timeout on reset trigger!\n");
2084
2085 return rc;
2086}
2087
2088static uint64_t reduceSizeAndFraction(uint64_t *numerator,
2089 uint64_t *denominator,
2090 bool checkUint32Bounary)
2091{
2092 int i;
2093 bool ret = checkUint32Bounary == false;
2094 uint64_t max_int32 = 0xffffffff;
2095 uint64_t num, denom;
2096 static const uint16_t prime_numbers[] = {
2097 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
2098 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
2099 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
2100 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
2101 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
2102 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
2103 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
2104 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
2105 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
2106 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
2107 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
2108 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
2109 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
2110 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
2111 941, 947, 953, 967, 971, 977, 983, 991, 997};
2112 int count = ARRAY_SIZE(prime_numbers);
2113
2114 num = *numerator;
2115 denom = *denominator;
2116 for (i = 0; i < count; i++) {
2117 uint32_t num_remainder, denom_remainder;
2118 uint64_t num_result, denom_result;
2119 if (checkUint32Bounary &&
2120 num <= max_int32 && denom <= max_int32) {
2121 ret = true;
2122 break;
2123 }
2124 do {
2125 num_result = div_u64_rem(dividend: num, divisor: prime_numbers[i], remainder: &num_remainder);
2126 denom_result = div_u64_rem(dividend: denom, divisor: prime_numbers[i], remainder: &denom_remainder);
2127 if (num_remainder == 0 && denom_remainder == 0) {
2128 num = num_result;
2129 denom = denom_result;
2130 }
2131 } while (num_remainder == 0 && denom_remainder == 0);
2132 }
2133 *numerator = num;
2134 *denominator = denom;
2135 return ret;
2136}
2137
2138static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2139{
2140 uint32_t master_pipe_refresh_rate =
2141 pipe->stream->timing.pix_clk_100hz * 100 /
2142 pipe->stream->timing.h_total /
2143 pipe->stream->timing.v_total;
2144 return master_pipe_refresh_rate <= 30;
2145}
2146
2147static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2148 bool account_low_refresh_rate)
2149{
2150 uint32_t clock_divider = 1;
2151 uint32_t numpipes = 1;
2152
2153 if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2154 clock_divider *= 2;
2155
2156 if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2157 clock_divider *= 2;
2158
2159 while (pipe->next_odm_pipe) {
2160 pipe = pipe->next_odm_pipe;
2161 numpipes++;
2162 }
2163 clock_divider *= numpipes;
2164
2165 return clock_divider;
2166}
2167
2168static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2169 struct pipe_ctx *grouped_pipes[])
2170{
2171 struct dc_context *dc_ctx = dc->ctx;
2172 int i, master = -1, embedded = -1;
2173 struct dc_crtc_timing *hw_crtc_timing;
2174 uint64_t phase[MAX_PIPES];
2175 uint64_t modulo[MAX_PIPES];
2176 unsigned int pclk;
2177
2178 uint32_t embedded_pix_clk_100hz;
2179 uint16_t embedded_h_total;
2180 uint16_t embedded_v_total;
2181 uint32_t dp_ref_clk_100hz =
2182 dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2183
2184 DC_LOGGER_INIT(dc_ctx->logger);
2185
2186 hw_crtc_timing = kcalloc(MAX_PIPES, size: sizeof(*hw_crtc_timing), GFP_KERNEL);
2187 if (!hw_crtc_timing)
2188 return master;
2189
2190 if (dc->config.vblank_alignment_dto_params &&
2191 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2192 embedded_h_total =
2193 (dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2194 embedded_v_total =
2195 (dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2196 embedded_pix_clk_100hz =
2197 dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2198
2199 for (i = 0; i < group_size; i++) {
2200 grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2201 grouped_pipes[i]->stream_res.tg,
2202 &hw_crtc_timing[i]);
2203 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2204 dc->res_pool->dp_clock_source,
2205 grouped_pipes[i]->stream_res.tg->inst,
2206 &pclk);
2207 hw_crtc_timing[i].pix_clk_100hz = pclk;
2208 if (dc_is_embedded_signal(
2209 signal: grouped_pipes[i]->stream->signal)) {
2210 embedded = i;
2211 master = i;
2212 phase[i] = embedded_pix_clk_100hz*100;
2213 modulo[i] = dp_ref_clk_100hz*100;
2214 } else {
2215
2216 phase[i] = (uint64_t)embedded_pix_clk_100hz*
2217 hw_crtc_timing[i].h_total*
2218 hw_crtc_timing[i].v_total;
2219 phase[i] = div_u64(dividend: phase[i], divisor: get_clock_divider(pipe: grouped_pipes[i], account_low_refresh_rate: true));
2220 modulo[i] = (uint64_t)dp_ref_clk_100hz*
2221 embedded_h_total*
2222 embedded_v_total;
2223
2224 if (reduceSizeAndFraction(numerator: &phase[i],
2225 denominator: &modulo[i], checkUint32Bounary: true) == false) {
2226 /*
2227 * this will help to stop reporting
2228 * this timing synchronizable
2229 */
2230 DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2231 grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2232 }
2233 }
2234 }
2235
2236 for (i = 0; i < group_size; i++) {
2237 if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2238 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2239 dc->res_pool->dp_clock_source,
2240 grouped_pipes[i]->stream_res.tg->inst,
2241 phase[i], modulo[i]);
2242 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2243 dc->res_pool->dp_clock_source,
2244 grouped_pipes[i]->stream_res.tg->inst, &pclk);
2245 grouped_pipes[i]->stream->timing.pix_clk_100hz =
2246 pclk*get_clock_divider(pipe: grouped_pipes[i], account_low_refresh_rate: false);
2247 if (master == -1)
2248 master = i;
2249 }
2250 }
2251
2252 }
2253
2254 kfree(objp: hw_crtc_timing);
2255 return master;
2256}
2257
2258void dcn10_enable_vblanks_synchronization(
2259 struct dc *dc,
2260 int group_index,
2261 int group_size,
2262 struct pipe_ctx *grouped_pipes[])
2263{
2264 struct dc_context *dc_ctx = dc->ctx;
2265 struct output_pixel_processor *opp;
2266 struct timing_generator *tg;
2267 int i, width, height, master;
2268
2269 DC_LOGGER_INIT(dc_ctx->logger);
2270
2271 for (i = 1; i < group_size; i++) {
2272 opp = grouped_pipes[i]->stream_res.opp;
2273 tg = grouped_pipes[i]->stream_res.tg;
2274 tg->funcs->get_otg_active_size(tg, &width, &height);
2275
2276 if (!tg->funcs->is_tg_enabled(tg)) {
2277 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2278 return;
2279 }
2280
2281 if (opp->funcs->opp_program_dpg_dimensions)
2282 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2283 }
2284
2285 for (i = 0; i < group_size; i++) {
2286 if (grouped_pipes[i]->stream == NULL)
2287 continue;
2288 grouped_pipes[i]->stream->vblank_synchronized = false;
2289 grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2290 }
2291
2292 DC_SYNC_INFO("Aligning DP DTOs\n");
2293
2294 master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2295
2296 DC_SYNC_INFO("Synchronizing VBlanks\n");
2297
2298 if (master >= 0) {
2299 for (i = 0; i < group_size; i++) {
2300 if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2301 grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2302 grouped_pipes[master]->stream_res.tg,
2303 grouped_pipes[i]->stream_res.tg,
2304 grouped_pipes[master]->stream->timing.pix_clk_100hz,
2305 grouped_pipes[i]->stream->timing.pix_clk_100hz,
2306 get_clock_divider(pipe: grouped_pipes[master], account_low_refresh_rate: false),
2307 get_clock_divider(pipe: grouped_pipes[i], account_low_refresh_rate: false));
2308 grouped_pipes[i]->stream->vblank_synchronized = true;
2309 }
2310 grouped_pipes[master]->stream->vblank_synchronized = true;
2311 DC_SYNC_INFO("Sync complete\n");
2312 }
2313
2314 for (i = 1; i < group_size; i++) {
2315 opp = grouped_pipes[i]->stream_res.opp;
2316 tg = grouped_pipes[i]->stream_res.tg;
2317 tg->funcs->get_otg_active_size(tg, &width, &height);
2318 if (opp->funcs->opp_program_dpg_dimensions)
2319 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2320 }
2321}
2322
2323void dcn10_enable_timing_synchronization(
2324 struct dc *dc,
2325 struct dc_state *state,
2326 int group_index,
2327 int group_size,
2328 struct pipe_ctx *grouped_pipes[])
2329{
2330 struct dc_context *dc_ctx = dc->ctx;
2331 struct output_pixel_processor *opp;
2332 struct timing_generator *tg;
2333 int i, width, height;
2334
2335 DC_LOGGER_INIT(dc_ctx->logger);
2336
2337 DC_SYNC_INFO("Setting up OTG reset trigger\n");
2338
2339 for (i = 1; i < group_size; i++) {
2340 if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, pipe_ctx: grouped_pipes[i]) == SUBVP_PHANTOM)
2341 continue;
2342
2343 opp = grouped_pipes[i]->stream_res.opp;
2344 tg = grouped_pipes[i]->stream_res.tg;
2345 tg->funcs->get_otg_active_size(tg, &width, &height);
2346
2347 if (!tg->funcs->is_tg_enabled(tg)) {
2348 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2349 return;
2350 }
2351
2352 if (opp->funcs->opp_program_dpg_dimensions)
2353 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2354 }
2355
2356 for (i = 0; i < group_size; i++) {
2357 if (grouped_pipes[i]->stream == NULL)
2358 continue;
2359
2360 if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, pipe_ctx: grouped_pipes[i]) == SUBVP_PHANTOM)
2361 continue;
2362
2363 grouped_pipes[i]->stream->vblank_synchronized = false;
2364 }
2365
2366 for (i = 1; i < group_size; i++) {
2367 if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, pipe_ctx: grouped_pipes[i]) == SUBVP_PHANTOM)
2368 continue;
2369
2370 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2371 grouped_pipes[i]->stream_res.tg,
2372 grouped_pipes[0]->stream_res.tg->inst);
2373 }
2374
2375 DC_SYNC_INFO("Waiting for trigger\n");
2376
2377 /* Need to get only check 1 pipe for having reset as all the others are
2378 * synchronized. Look at last pipe programmed to reset.
2379 */
2380
2381 if (grouped_pipes[1]->stream && dc_state_get_pipe_subvp_type(state, pipe_ctx: grouped_pipes[1]) != SUBVP_PHANTOM)
2382 wait_for_reset_trigger_to_occur(dc_ctx, tg: grouped_pipes[1]->stream_res.tg);
2383
2384 for (i = 1; i < group_size; i++) {
2385 if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, pipe_ctx: grouped_pipes[i]) == SUBVP_PHANTOM)
2386 continue;
2387
2388 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2389 grouped_pipes[i]->stream_res.tg);
2390 }
2391
2392 for (i = 1; i < group_size; i++) {
2393 if (dc_state_get_pipe_subvp_type(state, pipe_ctx: grouped_pipes[i]) == SUBVP_PHANTOM)
2394 continue;
2395
2396 opp = grouped_pipes[i]->stream_res.opp;
2397 tg = grouped_pipes[i]->stream_res.tg;
2398 tg->funcs->get_otg_active_size(tg, &width, &height);
2399 if (opp->funcs->opp_program_dpg_dimensions)
2400 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2401 }
2402
2403 DC_SYNC_INFO("Sync complete\n");
2404}
2405
2406void dcn10_enable_per_frame_crtc_position_reset(
2407 struct dc *dc,
2408 int group_size,
2409 struct pipe_ctx *grouped_pipes[])
2410{
2411 struct dc_context *dc_ctx = dc->ctx;
2412 int i;
2413
2414 DC_LOGGER_INIT(dc_ctx->logger);
2415
2416 DC_SYNC_INFO("Setting up\n");
2417 for (i = 0; i < group_size; i++)
2418 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2419 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2420 grouped_pipes[i]->stream_res.tg,
2421 0,
2422 &grouped_pipes[i]->stream->triggered_crtc_reset);
2423
2424 DC_SYNC_INFO("Waiting for trigger\n");
2425
2426 for (i = 0; i < group_size; i++)
2427 wait_for_reset_trigger_to_occur(dc_ctx, tg: grouped_pipes[i]->stream_res.tg);
2428
2429 DC_SYNC_INFO("Multi-display sync is complete\n");
2430}
2431
2432static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2433 struct vm_system_aperture_param *apt,
2434 struct dce_hwseq *hws)
2435{
2436 PHYSICAL_ADDRESS_LOC physical_page_number;
2437 uint32_t logical_addr_low;
2438 uint32_t logical_addr_high;
2439
2440 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2441 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2442 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2443 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2444
2445 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2446 LOGICAL_ADDR, &logical_addr_low);
2447
2448 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2449 LOGICAL_ADDR, &logical_addr_high);
2450
2451 apt->sys_default.quad_part = physical_page_number.quad_part << 12;
2452 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
2453 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
2454}
2455
2456/* Temporary read settings, future will get values from kmd directly */
2457static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2458 struct vm_context0_param *vm0,
2459 struct dce_hwseq *hws)
2460{
2461 PHYSICAL_ADDRESS_LOC fb_base;
2462 PHYSICAL_ADDRESS_LOC fb_offset;
2463 uint32_t fb_base_value;
2464 uint32_t fb_offset_value;
2465
2466 REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2467 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2468
2469 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2470 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2471 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2472 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2473
2474 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2475 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2476 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2477 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2478
2479 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2480 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2481 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2482 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2483
2484 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2485 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2486 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2487 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2488
2489 /*
2490 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2491 * Therefore we need to do
2492 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2493 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2494 */
2495 fb_base.quad_part = (uint64_t)fb_base_value << 24;
2496 fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2497 vm0->pte_base.quad_part += fb_base.quad_part;
2498 vm0->pte_base.quad_part -= fb_offset.quad_part;
2499}
2500
2501
2502static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2503{
2504 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2505 struct vm_system_aperture_param apt = {0};
2506 struct vm_context0_param vm0 = {0};
2507
2508 mmhub_read_vm_system_aperture_settings(hubp1, apt: &apt, hws);
2509 mmhub_read_vm_context0_settings(hubp1, vm0: &vm0, hws);
2510
2511 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2512 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2513}
2514
2515static void dcn10_enable_plane(
2516 struct dc *dc,
2517 struct pipe_ctx *pipe_ctx,
2518 struct dc_state *context)
2519{
2520 struct dce_hwseq *hws = dc->hwseq;
2521
2522 if (dc->debug.sanity_checks) {
2523 hws->funcs.verify_allow_pstate_change_high(dc);
2524 }
2525
2526 undo_DEGVIDCN10_253_wa(dc);
2527
2528 power_on_plane_resources(hws: dc->hwseq,
2529 plane_id: pipe_ctx->plane_res.hubp->inst);
2530
2531 /* enable DCFCLK current DCHUB */
2532 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2533
2534 /* make sure OPP_PIPE_CLOCK_EN = 1 */
2535 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2536 pipe_ctx->stream_res.opp,
2537 true);
2538
2539 if (dc->config.gpu_vm_support)
2540 dcn10_program_pte_vm(hws, hubp: pipe_ctx->plane_res.hubp);
2541
2542 if (dc->debug.sanity_checks) {
2543 hws->funcs.verify_allow_pstate_change_high(dc);
2544 }
2545
2546 if (!pipe_ctx->top_pipe
2547 && pipe_ctx->plane_state
2548 && pipe_ctx->plane_state->flip_int_enabled
2549 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2550 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2551
2552}
2553
2554void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2555{
2556 int i = 0;
2557 struct dpp_grph_csc_adjustment adjust;
2558 memset(&adjust, 0, sizeof(adjust));
2559 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2560
2561
2562 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2563 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2564 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2565 adjust.temperature_matrix[i] =
2566 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2567 } else if (pipe_ctx->plane_state &&
2568 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2569 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2570 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2571 adjust.temperature_matrix[i] =
2572 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2573 }
2574
2575 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2576}
2577
2578
2579static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2580{
2581 if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(output_color_space: colorspace)) {
2582 if (pipe_ctx->top_pipe) {
2583 struct pipe_ctx *top = pipe_ctx->top_pipe;
2584
2585 while (top->top_pipe)
2586 top = top->top_pipe; // Traverse to top pipe_ctx
2587 if (top->plane_state && top->plane_state->layer_index == 0)
2588 return true; // Front MPO plane not hidden
2589 }
2590 }
2591 return false;
2592}
2593
2594static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2595{
2596 // Override rear plane RGB bias to fix MPO brightness
2597 uint16_t rgb_bias = matrix[3];
2598
2599 matrix[3] = 0;
2600 matrix[7] = 0;
2601 matrix[11] = 0;
2602 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2603 matrix[3] = rgb_bias;
2604 matrix[7] = rgb_bias;
2605 matrix[11] = rgb_bias;
2606}
2607
2608void dcn10_program_output_csc(struct dc *dc,
2609 struct pipe_ctx *pipe_ctx,
2610 enum dc_color_space colorspace,
2611 uint16_t *matrix,
2612 int opp_id)
2613{
2614 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2615 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2616
2617 /* MPO is broken with RGB colorspaces when OCSC matrix
2618 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2619 * Blending adds offsets from front + rear to rear plane
2620 *
2621 * Fix is to set RGB bias to 0 on rear plane, top plane
2622 * black value pixels add offset instead of rear + front
2623 */
2624
2625 int16_t rgb_bias = matrix[3];
2626 // matrix[3/7/11] are all the same offset value
2627
2628 if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2629 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2630 } else {
2631 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2632 }
2633 }
2634 } else {
2635 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2636 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2637 }
2638}
2639
2640static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2641{
2642 struct dc_bias_and_scale bns_params = {0};
2643
2644 // program the input csc
2645 dpp->funcs->dpp_setup(dpp,
2646 plane_state->format,
2647 EXPANSION_MODE_ZERO,
2648 plane_state->input_csc_color_matrix,
2649 plane_state->color_space,
2650 NULL);
2651
2652 //set scale and bias registers
2653 build_prescale_params(bias_and_scale: &bns_params, plane_state);
2654 if (dpp->funcs->dpp_program_bias_and_scale)
2655 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2656}
2657
2658void dcn10_update_visual_confirm_color(struct dc *dc,
2659 struct pipe_ctx *pipe_ctx,
2660 int mpcc_id)
2661{
2662 struct mpc *mpc = dc->res_pool->mpc;
2663
2664 if (mpc->funcs->set_bg_color) {
2665 memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color));
2666 mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id);
2667 }
2668}
2669
2670void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2671{
2672 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2673 struct mpcc_blnd_cfg blnd_cfg = {0};
2674 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2675 int mpcc_id;
2676 struct mpcc *new_mpcc;
2677 struct mpc *mpc = dc->res_pool->mpc;
2678 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2679
2680 blnd_cfg.overlap_only = false;
2681 blnd_cfg.global_gain = 0xff;
2682
2683 if (per_pixel_alpha) {
2684 /* DCN1.0 has output CM before MPC which seems to screw with
2685 * pre-multiplied alpha.
2686 */
2687 blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
2688 output_color_space: pipe_ctx->stream->output_color_space)
2689 && pipe_ctx->plane_state->pre_multiplied_alpha);
2690 if (pipe_ctx->plane_state->global_alpha) {
2691 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
2692 blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
2693 } else {
2694 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2695 }
2696 } else {
2697 blnd_cfg.pre_multiplied_alpha = false;
2698 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2699 }
2700
2701 if (pipe_ctx->plane_state->global_alpha)
2702 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2703 else
2704 blnd_cfg.global_alpha = 0xff;
2705
2706 /*
2707 * TODO: remove hack
2708 * Note: currently there is a bug in init_hw such that
2709 * on resume from hibernate, BIOS sets up MPCC0, and
2710 * we do mpcc_remove but the mpcc cannot go to idle
2711 * after remove. This cause us to pick mpcc1 here,
2712 * which causes a pstate hang for yet unknown reason.
2713 */
2714 mpcc_id = hubp->inst;
2715
2716 /* If there is no full update, don't need to touch MPC tree*/
2717 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2718 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2719 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2720 return;
2721 }
2722
2723 /* check if this MPCC is already being used */
2724 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2725 /* remove MPCC if being used */
2726 if (new_mpcc != NULL)
2727 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2728 else
2729 if (dc->debug.sanity_checks)
2730 mpc->funcs->assert_mpcc_idle_before_connect(
2731 dc->res_pool->mpc, mpcc_id);
2732
2733 /* Call MPC to insert new plane */
2734 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2735 mpc_tree_params,
2736 &blnd_cfg,
2737 NULL,
2738 NULL,
2739 hubp->inst,
2740 mpcc_id);
2741 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2742
2743 ASSERT(new_mpcc != NULL);
2744 hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2745 hubp->mpcc_id = mpcc_id;
2746}
2747
2748static void update_scaler(struct pipe_ctx *pipe_ctx)
2749{
2750 bool per_pixel_alpha =
2751 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2752
2753 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2754 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2755 /* scaler configuration */
2756 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2757 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2758}
2759
2760static void dcn10_update_dchubp_dpp(
2761 struct dc *dc,
2762 struct pipe_ctx *pipe_ctx,
2763 struct dc_state *context)
2764{
2765 struct dce_hwseq *hws = dc->hwseq;
2766 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2767 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2768 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2769 struct plane_size size = plane_state->plane_size;
2770 unsigned int compat_level = 0;
2771 bool should_divided_by_2 = false;
2772
2773 /* depends on DML calculation, DPP clock value may change dynamically */
2774 /* If request max dpp clk is lower than current dispclk, no need to
2775 * divided by 2
2776 */
2777 if (plane_state->update_flags.bits.full_update) {
2778
2779 /* new calculated dispclk, dppclk are stored in
2780 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2781 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2782 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2783 * dispclk will put in use after optimize_bandwidth when
2784 * ramp_up_dispclk_with_dpp is called.
2785 * there are two places for dppclk be put in use. One location
2786 * is the same as the location as dispclk. Another is within
2787 * update_dchubp_dpp which happens between pre_bandwidth and
2788 * optimize_bandwidth.
2789 * dppclk updated within update_dchubp_dpp will cause new
2790 * clock values of dispclk and dppclk not be in use at the same
2791 * time. when clocks are decreased, this may cause dppclk is
2792 * lower than previous configuration and let pipe stuck.
2793 * for example, eDP + external dp, change resolution of DP from
2794 * 1920x1080x144hz to 1280x960x60hz.
2795 * before change: dispclk = 337889 dppclk = 337889
2796 * change mode, dcn10_validate_bandwidth calculate
2797 * dispclk = 143122 dppclk = 143122
2798 * update_dchubp_dpp be executed before dispclk be updated,
2799 * dispclk = 337889, but dppclk use new value dispclk /2 =
2800 * 168944. this will cause pipe pstate warning issue.
2801 * solution: between pre_bandwidth and optimize_bandwidth, while
2802 * dispclk is going to be decreased, keep dppclk = dispclk
2803 **/
2804 if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2805 dc->clk_mgr->clks.dispclk_khz)
2806 should_divided_by_2 = false;
2807 else
2808 should_divided_by_2 =
2809 context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2810 dc->clk_mgr->clks.dispclk_khz / 2;
2811
2812 dpp->funcs->dpp_dppclk_control(
2813 dpp,
2814 should_divided_by_2,
2815 true);
2816
2817 if (dc->res_pool->dccg)
2818 dc->res_pool->dccg->funcs->update_dpp_dto(
2819 dc->res_pool->dccg,
2820 dpp->inst,
2821 pipe_ctx->plane_res.bw.dppclk_khz);
2822 else
2823 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2824 dc->clk_mgr->clks.dispclk_khz / 2 :
2825 dc->clk_mgr->clks.dispclk_khz;
2826 }
2827
2828 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2829 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2830 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2831 */
2832 if (plane_state->update_flags.bits.full_update) {
2833 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2834
2835 hubp->funcs->hubp_setup(
2836 hubp,
2837 &pipe_ctx->dlg_regs,
2838 &pipe_ctx->ttu_regs,
2839 &pipe_ctx->rq_regs,
2840 &pipe_ctx->pipe_dlg_param);
2841 hubp->funcs->hubp_setup_interdependent(
2842 hubp,
2843 &pipe_ctx->dlg_regs,
2844 &pipe_ctx->ttu_regs);
2845 }
2846
2847 size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2848
2849 if (plane_state->update_flags.bits.full_update ||
2850 plane_state->update_flags.bits.bpp_change)
2851 dcn10_update_dpp(dpp, plane_state);
2852
2853 if (plane_state->update_flags.bits.full_update ||
2854 plane_state->update_flags.bits.per_pixel_alpha_change ||
2855 plane_state->update_flags.bits.global_alpha_change)
2856 hws->funcs.update_mpcc(dc, pipe_ctx);
2857
2858 if (plane_state->update_flags.bits.full_update ||
2859 plane_state->update_flags.bits.per_pixel_alpha_change ||
2860 plane_state->update_flags.bits.global_alpha_change ||
2861 plane_state->update_flags.bits.scaling_change ||
2862 plane_state->update_flags.bits.position_change) {
2863 update_scaler(pipe_ctx);
2864 }
2865
2866 if (plane_state->update_flags.bits.full_update ||
2867 plane_state->update_flags.bits.scaling_change ||
2868 plane_state->update_flags.bits.position_change) {
2869 hubp->funcs->mem_program_viewport(
2870 hubp,
2871 &pipe_ctx->plane_res.scl_data.viewport,
2872 &pipe_ctx->plane_res.scl_data.viewport_c);
2873 }
2874
2875 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2876 dc->hwss.set_cursor_position(pipe_ctx);
2877 dc->hwss.set_cursor_attribute(pipe_ctx);
2878
2879 if (dc->hwss.set_cursor_sdr_white_level)
2880 dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2881 }
2882
2883 if (plane_state->update_flags.bits.full_update) {
2884 /*gamut remap*/
2885 dc->hwss.program_gamut_remap(pipe_ctx);
2886
2887 dc->hwss.program_output_csc(dc,
2888 pipe_ctx,
2889 pipe_ctx->stream->output_color_space,
2890 pipe_ctx->stream->csc_color_matrix.matrix,
2891 pipe_ctx->stream_res.opp->inst);
2892 }
2893
2894 if (plane_state->update_flags.bits.full_update ||
2895 plane_state->update_flags.bits.pixel_format_change ||
2896 plane_state->update_flags.bits.horizontal_mirror_change ||
2897 plane_state->update_flags.bits.rotation_change ||
2898 plane_state->update_flags.bits.swizzle_change ||
2899 plane_state->update_flags.bits.dcc_change ||
2900 plane_state->update_flags.bits.bpp_change ||
2901 plane_state->update_flags.bits.scaling_change ||
2902 plane_state->update_flags.bits.plane_size_change) {
2903 hubp->funcs->hubp_program_surface_config(
2904 hubp,
2905 plane_state->format,
2906 &plane_state->tiling_info,
2907 &size,
2908 plane_state->rotation,
2909 &plane_state->dcc,
2910 plane_state->horizontal_mirror,
2911 compat_level);
2912 }
2913
2914 hubp->power_gated = false;
2915
2916 hws->funcs.update_plane_addr(dc, pipe_ctx);
2917
2918 if (is_pipe_tree_visible(pipe_ctx))
2919 hubp->funcs->set_blank(hubp, false);
2920}
2921
2922void dcn10_blank_pixel_data(
2923 struct dc *dc,
2924 struct pipe_ctx *pipe_ctx,
2925 bool blank)
2926{
2927 enum dc_color_space color_space;
2928 struct tg_color black_color = {0};
2929 struct stream_resource *stream_res = &pipe_ctx->stream_res;
2930 struct dc_stream_state *stream = pipe_ctx->stream;
2931
2932 /* program otg blank color */
2933 color_space = stream->output_color_space;
2934 color_space_to_black_color(dc, colorspace: color_space, black_color: &black_color);
2935
2936 /*
2937 * The way 420 is packed, 2 channels carry Y component, 1 channel
2938 * alternate between Cb and Cr, so both channels need the pixel
2939 * value for Y
2940 */
2941 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2942 black_color.color_r_cr = black_color.color_g_y;
2943
2944
2945 if (stream_res->tg->funcs->set_blank_color)
2946 stream_res->tg->funcs->set_blank_color(
2947 stream_res->tg,
2948 &black_color);
2949
2950 if (!blank) {
2951 if (stream_res->tg->funcs->set_blank)
2952 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2953 if (stream_res->abm) {
2954 dc->hwss.set_pipe(pipe_ctx);
2955 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2956 }
2957 } else {
2958 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2959 if (stream_res->tg->funcs->set_blank) {
2960 stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2961 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2962 }
2963 }
2964}
2965
2966void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2967{
2968 struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2969 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2970 struct custom_float_format fmt;
2971
2972 fmt.exponenta_bits = 6;
2973 fmt.mantissa_bits = 12;
2974 fmt.sign = true;
2975
2976
2977 if (!dc_fixpt_eq(arg1: multiplier, arg2: dc_fixpt_from_int(arg: 0))) // check != 0
2978 convert_to_custom_float_format(value: multiplier, format: &fmt, result: &hw_mult);
2979
2980 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2981 pipe_ctx->plane_res.dpp, hw_mult);
2982}
2983
2984void dcn10_program_pipe(
2985 struct dc *dc,
2986 struct pipe_ctx *pipe_ctx,
2987 struct dc_state *context)
2988{
2989 struct dce_hwseq *hws = dc->hwseq;
2990
2991 if (pipe_ctx->top_pipe == NULL) {
2992 bool blank = !is_pipe_tree_visible(pipe_ctx);
2993
2994 pipe_ctx->stream_res.tg->funcs->program_global_sync(
2995 pipe_ctx->stream_res.tg,
2996 calculate_vready_offset_for_group(pipe: pipe_ctx),
2997 pipe_ctx->pipe_dlg_param.vstartup_start,
2998 pipe_ctx->pipe_dlg_param.vupdate_offset,
2999 pipe_ctx->pipe_dlg_param.vupdate_width);
3000
3001 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
3002 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
3003
3004 if (hws->funcs.setup_vupdate_interrupt)
3005 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
3006
3007 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
3008 }
3009
3010 if (pipe_ctx->plane_state->update_flags.bits.full_update)
3011 dcn10_enable_plane(dc, pipe_ctx, context);
3012
3013 dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
3014
3015 hws->funcs.set_hdr_multiplier(pipe_ctx);
3016
3017 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
3018 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
3019 pipe_ctx->plane_state->update_flags.bits.gamma_change)
3020 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
3021
3022 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
3023 * only do gamma programming for full update.
3024 * TODO: This can be further optimized/cleaned up
3025 * Always call this for now since it does memcmp inside before
3026 * doing heavy calculation and programming
3027 */
3028 if (pipe_ctx->plane_state->update_flags.bits.full_update)
3029 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
3030}
3031
3032void dcn10_wait_for_pending_cleared(struct dc *dc,
3033 struct dc_state *context)
3034{
3035 struct pipe_ctx *pipe_ctx;
3036 struct timing_generator *tg;
3037 int i;
3038
3039 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3040 pipe_ctx = &context->res_ctx.pipe_ctx[i];
3041 tg = pipe_ctx->stream_res.tg;
3042
3043 /*
3044 * Only wait for top pipe's tg penindg bit
3045 * Also skip if pipe is disabled.
3046 */
3047 if (pipe_ctx->top_pipe ||
3048 !pipe_ctx->stream || !pipe_ctx->plane_state ||
3049 !tg->funcs->is_tg_enabled(tg))
3050 continue;
3051
3052 /*
3053 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
3054 * For some reason waiting for OTG_UPDATE_PENDING cleared
3055 * seems to not trigger the update right away, and if we
3056 * lock again before VUPDATE then we don't get a separated
3057 * operation.
3058 */
3059 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
3060 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
3061 }
3062}
3063
3064void dcn10_post_unlock_program_front_end(
3065 struct dc *dc,
3066 struct dc_state *context)
3067{
3068 int i;
3069
3070 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3071 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3072
3073 if (!pipe_ctx->top_pipe &&
3074 !pipe_ctx->prev_odm_pipe &&
3075 pipe_ctx->stream) {
3076 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3077
3078 if (context->stream_status[i].plane_count == 0)
3079 false_optc_underflow_wa(dc, stream: pipe_ctx->stream, tg);
3080 }
3081 }
3082
3083 for (i = 0; i < dc->res_pool->pipe_count; i++)
3084 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
3085 dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
3086
3087 for (i = 0; i < dc->res_pool->pipe_count; i++)
3088 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
3089 dc->hwss.optimize_bandwidth(dc, context);
3090 break;
3091 }
3092
3093 if (dc->hwseq->wa.DEGVIDCN10_254)
3094 hubbub1_wm_change_req_wa(hubbub: dc->res_pool->hubbub);
3095}
3096
3097static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3098{
3099 uint8_t i;
3100
3101 for (i = 0; i < context->stream_count; i++) {
3102 if (context->streams[i]->timing.timing_3d_format
3103 == TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3104 /*
3105 * Disable stutter
3106 */
3107 hubbub1_allow_self_refresh_control(hubbub: dc->res_pool->hubbub, allow: false);
3108 break;
3109 }
3110 }
3111}
3112
3113void dcn10_prepare_bandwidth(
3114 struct dc *dc,
3115 struct dc_state *context)
3116{
3117 struct dce_hwseq *hws = dc->hwseq;
3118 struct hubbub *hubbub = dc->res_pool->hubbub;
3119 int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3120
3121 if (dc->debug.sanity_checks)
3122 hws->funcs.verify_allow_pstate_change_high(dc);
3123
3124 if (context->stream_count == 0)
3125 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3126
3127 dc->clk_mgr->funcs->update_clocks(
3128 dc->clk_mgr,
3129 context,
3130 false);
3131
3132 dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3133 &context->bw_ctx.bw.dcn.watermarks,
3134 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3135 true);
3136 dcn10_stereo_hw_frame_pack_wa(dc, context);
3137
3138 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3139 DC_FP_START();
3140 dcn_get_soc_clks(
3141 dc, min_fclk_khz: &min_fclk_khz, min_dcfclk_khz: &min_dcfclk_khz, socclk_khz: &socclk_khz);
3142 DC_FP_END();
3143 dcn_bw_notify_pplib_of_wm_ranges(
3144 dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3145 }
3146
3147 if (dc->debug.sanity_checks)
3148 hws->funcs.verify_allow_pstate_change_high(dc);
3149}
3150
3151void dcn10_optimize_bandwidth(
3152 struct dc *dc,
3153 struct dc_state *context)
3154{
3155 struct dce_hwseq *hws = dc->hwseq;
3156 struct hubbub *hubbub = dc->res_pool->hubbub;
3157 int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3158
3159 if (dc->debug.sanity_checks)
3160 hws->funcs.verify_allow_pstate_change_high(dc);
3161
3162 if (context->stream_count == 0)
3163 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3164
3165 dc->clk_mgr->funcs->update_clocks(
3166 dc->clk_mgr,
3167 context,
3168 true);
3169
3170 hubbub->funcs->program_watermarks(hubbub,
3171 &context->bw_ctx.bw.dcn.watermarks,
3172 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3173 true);
3174
3175 dcn10_stereo_hw_frame_pack_wa(dc, context);
3176
3177 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3178 DC_FP_START();
3179 dcn_get_soc_clks(
3180 dc, min_fclk_khz: &min_fclk_khz, min_dcfclk_khz: &min_dcfclk_khz, socclk_khz: &socclk_khz);
3181 DC_FP_END();
3182 dcn_bw_notify_pplib_of_wm_ranges(
3183 dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3184 }
3185
3186 if (dc->debug.sanity_checks)
3187 hws->funcs.verify_allow_pstate_change_high(dc);
3188}
3189
3190void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3191 int num_pipes, struct dc_crtc_timing_adjust adjust)
3192{
3193 int i = 0;
3194 struct drr_params params = {0};
3195 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3196 unsigned int event_triggers = 0x800;
3197 // Note DRR trigger events are generated regardless of whether num frames met.
3198 unsigned int num_frames = 2;
3199
3200 params.vertical_total_max = adjust.v_total_max;
3201 params.vertical_total_min = adjust.v_total_min;
3202 params.vertical_total_mid = adjust.v_total_mid;
3203 params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3204 /* TODO: If multiple pipes are to be supported, you need
3205 * some GSL stuff. Static screen triggers may be programmed differently
3206 * as well.
3207 */
3208 for (i = 0; i < num_pipes; i++) {
3209 if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
3210 if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
3211 pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3212 pipe_ctx[i]->stream_res.tg, &params);
3213 if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3214 if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
3215 pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3216 pipe_ctx[i]->stream_res.tg,
3217 event_triggers, num_frames);
3218 }
3219 }
3220}
3221
3222void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3223 int num_pipes,
3224 struct crtc_position *position)
3225{
3226 int i = 0;
3227
3228 /* TODO: handle pipes > 1
3229 */
3230 for (i = 0; i < num_pipes; i++)
3231 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3232}
3233
3234void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3235 int num_pipes, const struct dc_static_screen_params *params)
3236{
3237 unsigned int i;
3238 unsigned int triggers = 0;
3239
3240 if (params->triggers.surface_update)
3241 triggers |= 0x80;
3242 if (params->triggers.cursor_update)
3243 triggers |= 0x2;
3244 if (params->triggers.force_trigger)
3245 triggers |= 0x1;
3246
3247 for (i = 0; i < num_pipes; i++)
3248 pipe_ctx[i]->stream_res.tg->funcs->
3249 set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3250 triggers, params->num_frames);
3251}
3252
3253static void dcn10_config_stereo_parameters(
3254 struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3255{
3256 enum view_3d_format view_format = stream->view_format;
3257 enum dc_timing_3d_format timing_3d_format =\
3258 stream->timing.timing_3d_format;
3259 bool non_stereo_timing = false;
3260
3261 if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3262 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3263 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3264 non_stereo_timing = true;
3265
3266 if (non_stereo_timing == false &&
3267 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3268
3269 flags->PROGRAM_STEREO = 1;
3270 flags->PROGRAM_POLARITY = 1;
3271 if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3272 timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3273 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3274 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3275
3276 if (stream->link && stream->link->ddc) {
3277 enum display_dongle_type dongle = \
3278 stream->link->ddc->dongle_type;
3279
3280 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3281 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3282 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3283 flags->DISABLE_STEREO_DP_SYNC = 1;
3284 }
3285 }
3286 flags->RIGHT_EYE_POLARITY =\
3287 stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3288 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3289 flags->FRAME_PACKED = 1;
3290 }
3291
3292 return;
3293}
3294
3295void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3296{
3297 struct crtc_stereo_flags flags = { 0 };
3298 struct dc_stream_state *stream = pipe_ctx->stream;
3299
3300 dcn10_config_stereo_parameters(stream, flags: &flags);
3301
3302 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3303 if (!dc_set_generic_gpio_for_stereo(enable: true, gpio_service: dc->ctx->gpio_service))
3304 dc_set_generic_gpio_for_stereo(enable: false, gpio_service: dc->ctx->gpio_service);
3305 } else {
3306 dc_set_generic_gpio_for_stereo(enable: false, gpio_service: dc->ctx->gpio_service);
3307 }
3308
3309 pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3310 pipe_ctx->stream_res.opp,
3311 flags.PROGRAM_STEREO == 1,
3312 &stream->timing);
3313
3314 pipe_ctx->stream_res.tg->funcs->program_stereo(
3315 pipe_ctx->stream_res.tg,
3316 &stream->timing,
3317 &flags);
3318
3319 return;
3320}
3321
3322static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3323{
3324 int i;
3325
3326 for (i = 0; i < res_pool->pipe_count; i++) {
3327 if (res_pool->hubps[i]->inst == mpcc_inst)
3328 return res_pool->hubps[i];
3329 }
3330 ASSERT(false);
3331 return NULL;
3332}
3333
3334void dcn10_wait_for_mpcc_disconnect(
3335 struct dc *dc,
3336 struct resource_pool *res_pool,
3337 struct pipe_ctx *pipe_ctx)
3338{
3339 struct dce_hwseq *hws = dc->hwseq;
3340 int mpcc_inst;
3341
3342 if (dc->debug.sanity_checks) {
3343 hws->funcs.verify_allow_pstate_change_high(dc);
3344 }
3345
3346 if (!pipe_ctx->stream_res.opp)
3347 return;
3348
3349 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3350 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3351 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3352
3353 if (pipe_ctx->stream_res.tg &&
3354 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
3355 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3356 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3357 hubp->funcs->set_blank(hubp, true);
3358 }
3359 }
3360
3361 if (dc->debug.sanity_checks) {
3362 hws->funcs.verify_allow_pstate_change_high(dc);
3363 }
3364
3365}
3366
3367bool dcn10_dummy_display_power_gating(
3368 struct dc *dc,
3369 uint8_t controller_id,
3370 struct dc_bios *dcb,
3371 enum pipe_gating_control power_gating)
3372{
3373 return true;
3374}
3375
3376void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3377{
3378 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3379 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3380 bool flip_pending;
3381 struct dc *dc = pipe_ctx->stream->ctx->dc;
3382
3383 if (plane_state == NULL)
3384 return;
3385
3386 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3387 pipe_ctx->plane_res.hubp);
3388
3389 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3390
3391 if (!flip_pending)
3392 plane_state->status.current_address = plane_state->status.requested_address;
3393
3394 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3395 tg->funcs->is_stereo_left_eye) {
3396 plane_state->status.is_right_eye =
3397 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3398 }
3399
3400 if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3401 struct dce_hwseq *hwseq = dc->hwseq;
3402 struct timing_generator *tg = dc->res_pool->timing_generators[0];
3403 unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3404
3405 if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3406 struct hubbub *hubbub = dc->res_pool->hubbub;
3407
3408 hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3409 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3410 }
3411 }
3412}
3413
3414void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3415{
3416 struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3417
3418 /* In DCN, this programming sequence is owned by the hubbub */
3419 hubbub->funcs->update_dchub(hubbub, dh_data);
3420}
3421
3422static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3423{
3424 struct pipe_ctx *test_pipe, *split_pipe;
3425 const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3426 struct rect r1 = scl_data->recout, r2, r2_half;
3427 int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
3428 int cur_layer = pipe_ctx->plane_state->layer_index;
3429
3430 /**
3431 * Disable the cursor if there's another pipe above this with a
3432 * plane that contains this pipe's viewport to prevent double cursor
3433 * and incorrect scaling artifacts.
3434 */
3435 for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3436 test_pipe = test_pipe->top_pipe) {
3437 // Skip invisible layer and pipe-split plane on same layer
3438 if (!test_pipe->plane_state ||
3439 !test_pipe->plane_state->visible ||
3440 test_pipe->plane_state->layer_index == cur_layer)
3441 continue;
3442
3443 r2 = test_pipe->plane_res.scl_data.recout;
3444 r2_r = r2.x + r2.width;
3445 r2_b = r2.y + r2.height;
3446 split_pipe = test_pipe;
3447
3448 /**
3449 * There is another half plane on same layer because of
3450 * pipe-split, merge together per same height.
3451 */
3452 for (split_pipe = pipe_ctx->top_pipe; split_pipe;
3453 split_pipe = split_pipe->top_pipe)
3454 if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
3455 r2_half = split_pipe->plane_res.scl_data.recout;
3456 r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
3457 r2.width = r2.width + r2_half.width;
3458 r2_r = r2.x + r2.width;
3459 break;
3460 }
3461
3462 if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
3463 return true;
3464 }
3465
3466 return false;
3467}
3468
3469void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3470{
3471 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3472 struct hubp *hubp = pipe_ctx->plane_res.hubp;
3473 struct dpp *dpp = pipe_ctx->plane_res.dpp;
3474 struct dc_cursor_mi_param param = {
3475 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3476 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3477 .viewport = pipe_ctx->plane_res.scl_data.viewport,
3478 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3479 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3480 .rotation = pipe_ctx->plane_state->rotation,
3481 .mirror = pipe_ctx->plane_state->horizontal_mirror,
3482 .stream = pipe_ctx->stream,
3483 };
3484 bool pipe_split_on = false;
3485 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3486 (pipe_ctx->prev_odm_pipe != NULL);
3487
3488 int x_plane = pipe_ctx->plane_state->dst_rect.x;
3489 int y_plane = pipe_ctx->plane_state->dst_rect.y;
3490 int x_pos = pos_cpy.x;
3491 int y_pos = pos_cpy.y;
3492
3493 if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
3494 if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
3495 (pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
3496 pipe_split_on = true;
3497 }
3498 }
3499
3500 /**
3501 * DC cursor is stream space, HW cursor is plane space and drawn
3502 * as part of the framebuffer.
3503 *
3504 * Cursor position can't be negative, but hotspot can be used to
3505 * shift cursor out of the plane bounds. Hotspot must be smaller
3506 * than the cursor size.
3507 */
3508
3509 /**
3510 * Translate cursor from stream space to plane space.
3511 *
3512 * If the cursor is scaled then we need to scale the position
3513 * to be in the approximately correct place. We can't do anything
3514 * about the actual size being incorrect, that's a limitation of
3515 * the hardware.
3516 */
3517 if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3518 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3519 pipe_ctx->plane_state->dst_rect.width;
3520 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3521 pipe_ctx->plane_state->dst_rect.height;
3522 } else {
3523 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3524 pipe_ctx->plane_state->dst_rect.width;
3525 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3526 pipe_ctx->plane_state->dst_rect.height;
3527 }
3528
3529 /**
3530 * If the cursor's source viewport is clipped then we need to
3531 * translate the cursor to appear in the correct position on
3532 * the screen.
3533 *
3534 * This translation isn't affected by scaling so it needs to be
3535 * done *after* we adjust the position for the scale factor.
3536 *
3537 * This is only done by opt-in for now since there are still
3538 * some usecases like tiled display that might enable the
3539 * cursor on both streams while expecting dc to clip it.
3540 */
3541 if (pos_cpy.translate_by_source) {
3542 x_pos += pipe_ctx->plane_state->src_rect.x;
3543 y_pos += pipe_ctx->plane_state->src_rect.y;
3544 }
3545
3546 /**
3547 * If the position is negative then we need to add to the hotspot
3548 * to shift the cursor outside the plane.
3549 */
3550
3551 if (x_pos < 0) {
3552 pos_cpy.x_hotspot -= x_pos;
3553 x_pos = 0;
3554 }
3555
3556 if (y_pos < 0) {
3557 pos_cpy.y_hotspot -= y_pos;
3558 y_pos = 0;
3559 }
3560
3561 pos_cpy.x = (uint32_t)x_pos;
3562 pos_cpy.y = (uint32_t)y_pos;
3563
3564 if (pipe_ctx->plane_state->address.type
3565 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3566 pos_cpy.enable = false;
3567
3568 if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3569 pos_cpy.enable = false;
3570
3571
3572 if (param.rotation == ROTATION_ANGLE_0) {
3573 int viewport_width =
3574 pipe_ctx->plane_res.scl_data.viewport.width;
3575 int viewport_x =
3576 pipe_ctx->plane_res.scl_data.viewport.x;
3577
3578 if (param.mirror) {
3579 if (pipe_split_on || odm_combine_on) {
3580 if (pos_cpy.x >= viewport_width + viewport_x) {
3581 pos_cpy.x = 2 * viewport_width
3582 - pos_cpy.x + 2 * viewport_x;
3583 } else {
3584 uint32_t temp_x = pos_cpy.x;
3585
3586 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3587 if (temp_x >= viewport_x +
3588 (int)hubp->curs_attr.width || pos_cpy.x
3589 <= (int)hubp->curs_attr.width +
3590 pipe_ctx->plane_state->src_rect.x) {
3591 pos_cpy.x = temp_x + viewport_width;
3592 }
3593 }
3594 } else {
3595 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3596 }
3597 }
3598 }
3599 // Swap axis and mirror horizontally
3600 else if (param.rotation == ROTATION_ANGLE_90) {
3601 uint32_t temp_x = pos_cpy.x;
3602
3603 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3604 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3605 pos_cpy.y = temp_x;
3606 }
3607 // Swap axis and mirror vertically
3608 else if (param.rotation == ROTATION_ANGLE_270) {
3609 uint32_t temp_y = pos_cpy.y;
3610 int viewport_height =
3611 pipe_ctx->plane_res.scl_data.viewport.height;
3612 int viewport_y =
3613 pipe_ctx->plane_res.scl_data.viewport.y;
3614
3615 /**
3616 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3617 * For pipe split cases:
3618 * - apply offset of viewport.y to normalize pos_cpy.x
3619 * - calculate the pos_cpy.y as before
3620 * - shift pos_cpy.y back by same offset to get final value
3621 * - since we iterate through both pipes, use the lower
3622 * viewport.y for offset
3623 * For non pipe split cases, use the same calculation for
3624 * pos_cpy.y as the 180 degree rotation case below,
3625 * but use pos_cpy.x as our input because we are rotating
3626 * 270 degrees
3627 */
3628 if (pipe_split_on || odm_combine_on) {
3629 int pos_cpy_x_offset;
3630 int other_pipe_viewport_y;
3631
3632 if (pipe_split_on) {
3633 if (pipe_ctx->bottom_pipe) {
3634 other_pipe_viewport_y =
3635 pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3636 } else {
3637 other_pipe_viewport_y =
3638 pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3639 }
3640 } else {
3641 if (pipe_ctx->next_odm_pipe) {
3642 other_pipe_viewport_y =
3643 pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3644 } else {
3645 other_pipe_viewport_y =
3646 pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3647 }
3648 }
3649 pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3650 other_pipe_viewport_y : viewport_y;
3651 pos_cpy.x -= pos_cpy_x_offset;
3652 if (pos_cpy.x > viewport_height) {
3653 pos_cpy.x = pos_cpy.x - viewport_height;
3654 pos_cpy.y = viewport_height - pos_cpy.x;
3655 } else {
3656 pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3657 }
3658 pos_cpy.y += pos_cpy_x_offset;
3659 } else {
3660 pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3661 }
3662 pos_cpy.x = temp_y;
3663 }
3664 // Mirror horizontally and vertically
3665 else if (param.rotation == ROTATION_ANGLE_180) {
3666 int viewport_width =
3667 pipe_ctx->plane_res.scl_data.viewport.width;
3668 int viewport_x =
3669 pipe_ctx->plane_res.scl_data.viewport.x;
3670
3671 if (!param.mirror) {
3672 if (pipe_split_on || odm_combine_on) {
3673 if (pos_cpy.x >= viewport_width + viewport_x) {
3674 pos_cpy.x = 2 * viewport_width
3675 - pos_cpy.x + 2 * viewport_x;
3676 } else {
3677 uint32_t temp_x = pos_cpy.x;
3678
3679 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3680 if (temp_x >= viewport_x +
3681 (int)hubp->curs_attr.width || pos_cpy.x
3682 <= (int)hubp->curs_attr.width +
3683 pipe_ctx->plane_state->src_rect.x) {
3684 pos_cpy.x = 2 * viewport_width - temp_x;
3685 }
3686 }
3687 } else {
3688 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3689 }
3690 }
3691
3692 /**
3693 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3694 * Calculation:
3695 * delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3696 * pos_cpy.y_new = viewport.y + delta_from_bottom
3697 * Simplify it as:
3698 * pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3699 */
3700 pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3701 pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3702 }
3703
3704 hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
3705 dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
3706}
3707
3708void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3709{
3710 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3711
3712 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3713 pipe_ctx->plane_res.hubp, attributes);
3714 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3715 pipe_ctx->plane_res.dpp, attributes);
3716}
3717
3718void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3719{
3720 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3721 struct fixed31_32 multiplier;
3722 struct dpp_cursor_attributes opt_attr = { 0 };
3723 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3724 struct custom_float_format fmt;
3725
3726 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3727 return;
3728
3729 fmt.exponenta_bits = 5;
3730 fmt.mantissa_bits = 10;
3731 fmt.sign = true;
3732
3733 if (sdr_white_level > 80) {
3734 multiplier = dc_fixpt_from_fraction(numerator: sdr_white_level, denominator: 80);
3735 convert_to_custom_float_format(value: multiplier, format: &fmt, result: &hw_scale);
3736 }
3737
3738 opt_attr.scale = hw_scale;
3739 opt_attr.bias = 0;
3740
3741 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3742 pipe_ctx->plane_res.dpp, &opt_attr);
3743}
3744
3745/*
3746 * apply_front_porch_workaround TODO FPGA still need?
3747 *
3748 * This is a workaround for a bug that has existed since R5xx and has not been
3749 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3750 */
3751static void apply_front_porch_workaround(
3752 struct dc_crtc_timing *timing)
3753{
3754 if (timing->flags.INTERLACE == 1) {
3755 if (timing->v_front_porch < 2)
3756 timing->v_front_porch = 2;
3757 } else {
3758 if (timing->v_front_porch < 1)
3759 timing->v_front_porch = 1;
3760 }
3761}
3762
3763int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3764{
3765 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3766 struct dc_crtc_timing patched_crtc_timing;
3767 int vesa_sync_start;
3768 int asic_blank_end;
3769 int interlace_factor;
3770
3771 patched_crtc_timing = *dc_crtc_timing;
3772 apply_front_porch_workaround(timing: &patched_crtc_timing);
3773
3774 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3775
3776 vesa_sync_start = patched_crtc_timing.v_addressable +
3777 patched_crtc_timing.v_border_bottom +
3778 patched_crtc_timing.v_front_porch;
3779
3780 asic_blank_end = (patched_crtc_timing.v_total -
3781 vesa_sync_start -
3782 patched_crtc_timing.v_border_top)
3783 * interlace_factor;
3784
3785 return asic_blank_end -
3786 pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3787}
3788
3789void dcn10_calc_vupdate_position(
3790 struct dc *dc,
3791 struct pipe_ctx *pipe_ctx,
3792 uint32_t *start_line,
3793 uint32_t *end_line)
3794{
3795 const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3796 int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3797
3798 if (vupdate_pos >= 0)
3799 *start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
3800 else
3801 *start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
3802 *end_line = (*start_line + 2) % timing->v_total;
3803}
3804
3805static void dcn10_cal_vline_position(
3806 struct dc *dc,
3807 struct pipe_ctx *pipe_ctx,
3808 uint32_t *start_line,
3809 uint32_t *end_line)
3810{
3811 const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3812 int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
3813
3814 if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
3815 if (vline_pos > 0)
3816 vline_pos--;
3817 else if (vline_pos < 0)
3818 vline_pos++;
3819
3820 vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3821 if (vline_pos >= 0)
3822 *start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
3823 else
3824 *start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
3825 *end_line = (*start_line + 2) % timing->v_total;
3826 } else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
3827 // vsync is line 0 so start_line is just the requested line offset
3828 *start_line = vline_pos;
3829 *end_line = (*start_line + 2) % timing->v_total;
3830 } else
3831 ASSERT(0);
3832}
3833
3834void dcn10_setup_periodic_interrupt(
3835 struct dc *dc,
3836 struct pipe_ctx *pipe_ctx)
3837{
3838 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3839 uint32_t start_line = 0;
3840 uint32_t end_line = 0;
3841
3842 dcn10_cal_vline_position(dc, pipe_ctx, start_line: &start_line, end_line: &end_line);
3843
3844 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3845}
3846
3847void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3848{
3849 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3850 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3851
3852 if (start_line < 0) {
3853 ASSERT(0);
3854 start_line = 0;
3855 }
3856
3857 if (tg->funcs->setup_vertical_interrupt2)
3858 tg->funcs->setup_vertical_interrupt2(tg, start_line);
3859}
3860
3861void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3862 struct dc_link_settings *link_settings)
3863{
3864 struct encoder_unblank_param params = {0};
3865 struct dc_stream_state *stream = pipe_ctx->stream;
3866 struct dc_link *link = stream->link;
3867 struct dce_hwseq *hws = link->dc->hwseq;
3868
3869 /* only 3 items below are used by unblank */
3870 params.timing = pipe_ctx->stream->timing;
3871
3872 params.link_settings.link_rate = link_settings->link_rate;
3873
3874 if (dc_is_dp_signal(signal: pipe_ctx->stream->signal)) {
3875 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3876 params.timing.pix_clk_100hz /= 2;
3877 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
3878 }
3879
3880 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3881 hws->funcs.edp_backlight_control(link, true);
3882 }
3883}
3884
3885void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3886 const uint8_t *custom_sdp_message,
3887 unsigned int sdp_message_size)
3888{
3889 if (dc_is_dp_signal(signal: pipe_ctx->stream->signal)) {
3890 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3891 pipe_ctx->stream_res.stream_enc,
3892 custom_sdp_message,
3893 sdp_message_size);
3894 }
3895}
3896enum dc_status dcn10_set_clock(struct dc *dc,
3897 enum dc_clock_type clock_type,
3898 uint32_t clk_khz,
3899 uint32_t stepping)
3900{
3901 struct dc_state *context = dc->current_state;
3902 struct dc_clock_config clock_cfg = {0};
3903 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3904
3905 if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3906 return DC_FAIL_UNSUPPORTED_1;
3907
3908 dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3909 context, clock_type, &clock_cfg);
3910
3911 if (clk_khz > clock_cfg.max_clock_khz)
3912 return DC_FAIL_CLK_EXCEED_MAX;
3913
3914 if (clk_khz < clock_cfg.min_clock_khz)
3915 return DC_FAIL_CLK_BELOW_MIN;
3916
3917 if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3918 return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3919
3920 /*update internal request clock for update clock use*/
3921 if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3922 current_clocks->dispclk_khz = clk_khz;
3923 else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3924 current_clocks->dppclk_khz = clk_khz;
3925 else
3926 return DC_ERROR_UNEXPECTED;
3927
3928 if (dc->clk_mgr->funcs->update_clocks)
3929 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3930 context, true);
3931 return DC_OK;
3932
3933}
3934
3935void dcn10_get_clock(struct dc *dc,
3936 enum dc_clock_type clock_type,
3937 struct dc_clock_config *clock_cfg)
3938{
3939 struct dc_state *context = dc->current_state;
3940
3941 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3942 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3943
3944}
3945
3946void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3947{
3948 struct resource_pool *pool = dc->res_pool;
3949 int i;
3950
3951 for (i = 0; i < pool->pipe_count; i++) {
3952 struct hubp *hubp = pool->hubps[i];
3953 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3954
3955 hubp->funcs->hubp_read_state(hubp);
3956
3957 if (!s->blank_en)
3958 dcc_en_bits[i] = s->dcc_en ? 1 : 0;
3959 }
3960}
3961

source code of linux/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c