1 | /* |
---|---|
2 | * Copyright 2015 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | * Authors: AMD |
23 | */ |
24 | |
25 | #include "dm_services.h" |
26 | |
27 | #include "amdgpu.h" |
28 | |
29 | #include "dc.h" |
30 | |
31 | #include "core_status.h" |
32 | #include "core_types.h" |
33 | #include "hw_sequencer.h" |
34 | #include "dce/dce_hwseq.h" |
35 | |
36 | #include "resource.h" |
37 | #include "dc_state.h" |
38 | #include "dc_state_priv.h" |
39 | #include "dc_plane.h" |
40 | #include "dc_plane_priv.h" |
41 | #include "dc_stream_priv.h" |
42 | |
43 | #include "gpio_service_interface.h" |
44 | #include "clk_mgr.h" |
45 | #include "clock_source.h" |
46 | #include "dc_bios_types.h" |
47 | |
48 | #include "bios_parser_interface.h" |
49 | #include "bios/bios_parser_helper.h" |
50 | #include "include/irq_service_interface.h" |
51 | #include "transform.h" |
52 | #include "dmcu.h" |
53 | #include "dpp.h" |
54 | #include "timing_generator.h" |
55 | #include "abm.h" |
56 | #include "virtual/virtual_link_encoder.h" |
57 | #include "hubp.h" |
58 | |
59 | #include "link_hwss.h" |
60 | #include "link_encoder.h" |
61 | #include "link_enc_cfg.h" |
62 | |
63 | #include "link.h" |
64 | #include "dm_helpers.h" |
65 | #include "mem_input.h" |
66 | |
67 | #include "dc_dmub_srv.h" |
68 | |
69 | #include "dsc.h" |
70 | |
71 | #include "vm_helper.h" |
72 | |
73 | #include "dce/dce_i2c.h" |
74 | |
75 | #include "dmub/dmub_srv.h" |
76 | |
77 | #include "dce/dmub_psr.h" |
78 | |
79 | #include "dce/dmub_hw_lock_mgr.h" |
80 | |
81 | #include "dc_trace.h" |
82 | |
83 | #include "hw_sequencer_private.h" |
84 | |
85 | #if defined(CONFIG_DRM_AMD_DC_FP) |
86 | #include "dml2/dml2_internal_types.h" |
87 | #endif |
88 | |
89 | #include "dce/dmub_outbox.h" |
90 | |
91 | #define CTX \ |
92 | dc->ctx |
93 | |
94 | #define DC_LOGGER \ |
95 | dc->ctx->logger |
96 | |
97 | static const char DC_BUILD_ID[] = "production-build"; |
98 | |
99 | /** |
100 | * DOC: Overview |
101 | * |
102 | * DC is the OS-agnostic component of the amdgpu DC driver. |
103 | * |
104 | * DC maintains and validates a set of structs representing the state of the |
105 | * driver and writes that state to AMD hardware |
106 | * |
107 | * Main DC HW structs: |
108 | * |
109 | * struct dc - The central struct. One per driver. Created on driver load, |
110 | * destroyed on driver unload. |
111 | * |
112 | * struct dc_context - One per driver. |
113 | * Used as a backpointer by most other structs in dc. |
114 | * |
115 | * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP |
116 | * plugpoints). Created on driver load, destroyed on driver unload. |
117 | * |
118 | * struct dc_sink - One per display. Created on boot or hotplug. |
119 | * Destroyed on shutdown or hotunplug. A dc_link can have a local sink |
120 | * (the display directly attached). It may also have one or more remote |
121 | * sinks (in the Multi-Stream Transport case) |
122 | * |
123 | * struct resource_pool - One per driver. Represents the hw blocks not in the |
124 | * main pipeline. Not directly accessible by dm. |
125 | * |
126 | * Main dc state structs: |
127 | * |
128 | * These structs can be created and destroyed as needed. There is a full set of |
129 | * these structs in dc->current_state representing the currently programmed state. |
130 | * |
131 | * struct dc_state - The global DC state to track global state information, |
132 | * such as bandwidth values. |
133 | * |
134 | * struct dc_stream_state - Represents the hw configuration for the pipeline from |
135 | * a framebuffer to a display. Maps one-to-one with dc_sink. |
136 | * |
137 | * struct dc_plane_state - Represents a framebuffer. Each stream has at least one, |
138 | * and may have more in the Multi-Plane Overlay case. |
139 | * |
140 | * struct resource_context - Represents the programmable state of everything in |
141 | * the resource_pool. Not directly accessible by dm. |
142 | * |
143 | * struct pipe_ctx - A member of struct resource_context. Represents the |
144 | * internal hardware pipeline components. Each dc_plane_state has either |
145 | * one or two (in the pipe-split case). |
146 | */ |
147 | |
148 | /* Private functions */ |
149 | |
150 | static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new) |
151 | { |
152 | if (new > *original) |
153 | *original = new; |
154 | } |
155 | |
156 | static void destroy_links(struct dc *dc) |
157 | { |
158 | uint32_t i; |
159 | |
160 | for (i = 0; i < dc->link_count; i++) { |
161 | if (NULL != dc->links[i]) |
162 | dc->link_srv->destroy_link(&dc->links[i]); |
163 | } |
164 | } |
165 | |
166 | static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links) |
167 | { |
168 | int i; |
169 | uint32_t count = 0; |
170 | |
171 | for (i = 0; i < num_links; i++) { |
172 | if (links[i]->connector_signal == SIGNAL_TYPE_EDP || |
173 | links[i]->is_internal_display) |
174 | count++; |
175 | } |
176 | |
177 | return count; |
178 | } |
179 | |
180 | static int get_seamless_boot_stream_count(struct dc_state *ctx) |
181 | { |
182 | uint8_t i; |
183 | uint8_t seamless_boot_stream_count = 0; |
184 | |
185 | for (i = 0; i < ctx->stream_count; i++) |
186 | if (ctx->streams[i]->apply_seamless_boot_optimization) |
187 | seamless_boot_stream_count++; |
188 | |
189 | return seamless_boot_stream_count; |
190 | } |
191 | |
192 | static bool create_links( |
193 | struct dc *dc, |
194 | uint32_t num_virtual_links) |
195 | { |
196 | int i; |
197 | int connectors_num; |
198 | struct dc_bios *bios = dc->ctx->dc_bios; |
199 | |
200 | dc->link_count = 0; |
201 | |
202 | connectors_num = bios->funcs->get_connectors_number(bios); |
203 | |
204 | DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num); |
205 | |
206 | if (connectors_num > ENUM_ID_COUNT) { |
207 | dm_error( |
208 | "DC: Number of connectors %d exceeds maximum of %d!\n", |
209 | connectors_num, |
210 | ENUM_ID_COUNT); |
211 | return false; |
212 | } |
213 | |
214 | dm_output_to_console( |
215 | "DC: %s: connectors_num: physical:%d, virtual:%d\n", |
216 | __func__, |
217 | connectors_num, |
218 | num_virtual_links); |
219 | |
220 | // condition loop on link_count to allow skipping invalid indices |
221 | for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) { |
222 | struct link_init_data link_init_params = {0}; |
223 | struct dc_link *link; |
224 | |
225 | DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count); |
226 | |
227 | link_init_params.ctx = dc->ctx; |
228 | /* next BIOS object table connector */ |
229 | link_init_params.connector_index = i; |
230 | link_init_params.link_index = dc->link_count; |
231 | link_init_params.dc = dc; |
232 | link = dc->link_srv->create_link(&link_init_params); |
233 | |
234 | if (link) { |
235 | dc->links[dc->link_count] = link; |
236 | link->dc = dc; |
237 | ++dc->link_count; |
238 | } |
239 | } |
240 | |
241 | DC_LOG_DC("BIOS object table - end"); |
242 | |
243 | /* Create a link for each usb4 dpia port */ |
244 | for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) { |
245 | struct link_init_data link_init_params = {0}; |
246 | struct dc_link *link; |
247 | |
248 | link_init_params.ctx = dc->ctx; |
249 | link_init_params.connector_index = i; |
250 | link_init_params.link_index = dc->link_count; |
251 | link_init_params.dc = dc; |
252 | link_init_params.is_dpia_link = true; |
253 | |
254 | link = dc->link_srv->create_link(&link_init_params); |
255 | if (link) { |
256 | dc->links[dc->link_count] = link; |
257 | link->dc = dc; |
258 | ++dc->link_count; |
259 | } |
260 | } |
261 | |
262 | for (i = 0; i < num_virtual_links; i++) { |
263 | struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL); |
264 | struct encoder_init_data enc_init = {0}; |
265 | |
266 | if (link == NULL) { |
267 | BREAK_TO_DEBUGGER(); |
268 | goto failed_alloc; |
269 | } |
270 | |
271 | link->link_index = dc->link_count; |
272 | dc->links[dc->link_count] = link; |
273 | dc->link_count++; |
274 | |
275 | link->ctx = dc->ctx; |
276 | link->dc = dc; |
277 | link->connector_signal = SIGNAL_TYPE_VIRTUAL; |
278 | link->link_id.type = OBJECT_TYPE_CONNECTOR; |
279 | link->link_id.id = CONNECTOR_ID_VIRTUAL; |
280 | link->link_id.enum_id = ENUM_ID_1; |
281 | link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; |
282 | link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL); |
283 | |
284 | if (!link->link_enc) { |
285 | BREAK_TO_DEBUGGER(); |
286 | goto failed_alloc; |
287 | } |
288 | |
289 | link->link_status.dpcd_caps = &link->dpcd_caps; |
290 | |
291 | enc_init.ctx = dc->ctx; |
292 | enc_init.channel = CHANNEL_ID_UNKNOWN; |
293 | enc_init.hpd_source = HPD_SOURCEID_UNKNOWN; |
294 | enc_init.transmitter = TRANSMITTER_UNKNOWN; |
295 | enc_init.connector = link->link_id; |
296 | enc_init.encoder.type = OBJECT_TYPE_ENCODER; |
297 | enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL; |
298 | enc_init.encoder.enum_id = ENUM_ID_1; |
299 | virtual_link_encoder_construct(enc: link->link_enc, init_data: &enc_init); |
300 | } |
301 | |
302 | dc->caps.num_of_internal_disp = get_num_of_internal_disp(links: dc->links, num_links: dc->link_count); |
303 | |
304 | return true; |
305 | |
306 | failed_alloc: |
307 | return false; |
308 | } |
309 | |
310 | /* Create additional DIG link encoder objects if fewer than the platform |
311 | * supports were created during link construction. This can happen if the |
312 | * number of physical connectors is less than the number of DIGs. |
313 | */ |
314 | static bool create_link_encoders(struct dc *dc) |
315 | { |
316 | bool res = true; |
317 | unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; |
318 | unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; |
319 | int i; |
320 | |
321 | /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG |
322 | * link encoders and physical display endpoints and does not require |
323 | * additional link encoder objects. |
324 | */ |
325 | if (num_usb4_dpia == 0) |
326 | return res; |
327 | |
328 | /* Create as many link encoder objects as the platform supports. DPIA |
329 | * endpoints can be programmably mapped to any DIG. |
330 | */ |
331 | if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) { |
332 | for (i = 0; i < num_dig_link_enc; i++) { |
333 | struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; |
334 | |
335 | if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) { |
336 | link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx, |
337 | (enum engine_id)(ENGINE_ID_DIGA + i)); |
338 | if (link_enc) { |
339 | dc->res_pool->link_encoders[i] = link_enc; |
340 | dc->res_pool->dig_link_enc_count++; |
341 | } else { |
342 | res = false; |
343 | } |
344 | } |
345 | } |
346 | } |
347 | |
348 | return res; |
349 | } |
350 | |
351 | /* Destroy any additional DIG link encoder objects created by |
352 | * create_link_encoders(). |
353 | * NB: Must only be called after destroy_links(). |
354 | */ |
355 | static void destroy_link_encoders(struct dc *dc) |
356 | { |
357 | unsigned int num_usb4_dpia; |
358 | unsigned int num_dig_link_enc; |
359 | int i; |
360 | |
361 | if (!dc->res_pool) |
362 | return; |
363 | |
364 | num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; |
365 | num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; |
366 | |
367 | /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG |
368 | * link encoders and physical display endpoints and does not require |
369 | * additional link encoder objects. |
370 | */ |
371 | if (num_usb4_dpia == 0) |
372 | return; |
373 | |
374 | for (i = 0; i < num_dig_link_enc; i++) { |
375 | struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; |
376 | |
377 | if (link_enc) { |
378 | link_enc->funcs->destroy(&link_enc); |
379 | dc->res_pool->link_encoders[i] = NULL; |
380 | dc->res_pool->dig_link_enc_count--; |
381 | } |
382 | } |
383 | } |
384 | |
385 | static struct dc_perf_trace *dc_perf_trace_create(void) |
386 | { |
387 | return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL); |
388 | } |
389 | |
390 | static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace) |
391 | { |
392 | kfree(objp: *perf_trace); |
393 | *perf_trace = NULL; |
394 | } |
395 | |
396 | static bool set_long_vtotal(struct dc *dc, struct dc_stream_state *stream, struct dc_crtc_timing_adjust *adjust) |
397 | { |
398 | if (!dc || !stream || !adjust) |
399 | return false; |
400 | |
401 | if (!dc->current_state) |
402 | return false; |
403 | |
404 | int i; |
405 | |
406 | for (i = 0; i < MAX_PIPES; i++) { |
407 | struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
408 | |
409 | if (pipe->stream == stream && pipe->stream_res.tg) { |
410 | if (dc->hwss.set_long_vtotal) |
411 | dc->hwss.set_long_vtotal(&pipe, 1, adjust->v_total_min, adjust->v_total_max); |
412 | |
413 | return true; |
414 | } |
415 | } |
416 | |
417 | return false; |
418 | } |
419 | |
420 | /** |
421 | * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR |
422 | * @dc: dc reference |
423 | * @stream: Initial dc stream state |
424 | * @adjust: Updated parameters for vertical_total_min and vertical_total_max |
425 | * |
426 | * Looks up the pipe context of dc_stream_state and updates the |
427 | * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh |
428 | * Rate, which is a power-saving feature that targets reducing panel |
429 | * refresh rate while the screen is static |
430 | * |
431 | * Return: %true if the pipe context is found and adjusted; |
432 | * %false if the pipe context is not found. |
433 | */ |
434 | bool dc_stream_adjust_vmin_vmax(struct dc *dc, |
435 | struct dc_stream_state *stream, |
436 | struct dc_crtc_timing_adjust *adjust) |
437 | { |
438 | int i; |
439 | |
440 | /* |
441 | * Don't adjust DRR while there's bandwidth optimizations pending to |
442 | * avoid conflicting with firmware updates. |
443 | */ |
444 | if (dc->ctx->dce_version > DCE_VERSION_MAX) { |
445 | if (dc->optimized_required || dc->wm_optimized_required) { |
446 | stream->adjust.timing_adjust_pending = true; |
447 | return false; |
448 | } |
449 | } |
450 | |
451 | dc_exit_ips_for_hw_access(dc); |
452 | |
453 | stream->adjust.v_total_max = adjust->v_total_max; |
454 | stream->adjust.v_total_mid = adjust->v_total_mid; |
455 | stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; |
456 | stream->adjust.v_total_min = adjust->v_total_min; |
457 | stream->adjust.allow_otg_v_count_halt = adjust->allow_otg_v_count_halt; |
458 | |
459 | if (dc->caps.max_v_total != 0 && |
460 | (adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) { |
461 | stream->adjust.timing_adjust_pending = false; |
462 | if (adjust->allow_otg_v_count_halt) |
463 | return set_long_vtotal(dc, stream, adjust); |
464 | else |
465 | return false; |
466 | } |
467 | |
468 | for (i = 0; i < MAX_PIPES; i++) { |
469 | struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
470 | |
471 | if (pipe->stream == stream && pipe->stream_res.tg) { |
472 | dc->hwss.set_drr(&pipe, |
473 | 1, |
474 | *adjust); |
475 | stream->adjust.timing_adjust_pending = false; |
476 | return true; |
477 | } |
478 | } |
479 | return false; |
480 | } |
481 | |
482 | /** |
483 | * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of |
484 | * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate) |
485 | * |
486 | * @dc: [in] dc reference |
487 | * @stream: [in] Initial dc stream state |
488 | * @refresh_rate: [in] new refresh_rate |
489 | * |
490 | * Return: %true if the pipe context is found and there is an associated |
491 | * timing_generator for the DC; |
492 | * %false if the pipe context is not found or there is no |
493 | * timing_generator for the DC. |
494 | */ |
495 | bool dc_stream_get_last_used_drr_vtotal(struct dc *dc, |
496 | struct dc_stream_state *stream, |
497 | uint32_t *refresh_rate) |
498 | { |
499 | bool status = false; |
500 | |
501 | int i = 0; |
502 | |
503 | dc_exit_ips_for_hw_access(dc); |
504 | |
505 | for (i = 0; i < MAX_PIPES; i++) { |
506 | struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
507 | |
508 | if (pipe->stream == stream && pipe->stream_res.tg) { |
509 | /* Only execute if a function pointer has been defined for |
510 | * the DC version in question |
511 | */ |
512 | if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) { |
513 | pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate); |
514 | |
515 | status = true; |
516 | |
517 | break; |
518 | } |
519 | } |
520 | } |
521 | |
522 | return status; |
523 | } |
524 | |
525 | #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) |
526 | static inline void |
527 | dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv, |
528 | struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) |
529 | { |
530 | union dmub_rb_cmd cmd = {0}; |
531 | |
532 | cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num; |
533 | cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num; |
534 | |
535 | if (is_stop) { |
536 | cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; |
537 | cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE; |
538 | } else { |
539 | cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; |
540 | cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY; |
541 | cmd.secure_display.roi_info.x_start = rect->x; |
542 | cmd.secure_display.roi_info.y_start = rect->y; |
543 | cmd.secure_display.roi_info.x_end = rect->x + rect->width; |
544 | cmd.secure_display.roi_info.y_end = rect->y + rect->height; |
545 | } |
546 | |
547 | dc_wake_and_execute_dmub_cmd(ctx: dmub_srv->ctx, cmd: &cmd, wait_type: DM_DMUB_WAIT_TYPE_NO_WAIT); |
548 | } |
549 | |
550 | static inline void |
551 | dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu, |
552 | struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) |
553 | { |
554 | if (is_stop) |
555 | dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping); |
556 | else |
557 | dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping); |
558 | } |
559 | |
560 | bool |
561 | dc_stream_forward_crc_window(struct dc_stream_state *stream, |
562 | struct rect *rect, uint8_t phy_id, bool is_stop) |
563 | { |
564 | struct dmcu *dmcu; |
565 | struct dc_dmub_srv *dmub_srv; |
566 | struct otg_phy_mux mux_mapping; |
567 | struct pipe_ctx *pipe; |
568 | int i; |
569 | struct dc *dc = stream->ctx->dc; |
570 | |
571 | for (i = 0; i < MAX_PIPES; i++) { |
572 | pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
573 | if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) |
574 | break; |
575 | } |
576 | |
577 | /* Stream not found */ |
578 | if (i == MAX_PIPES) |
579 | return false; |
580 | |
581 | mux_mapping.phy_output_num = phy_id; |
582 | mux_mapping.otg_output_num = pipe->stream_res.tg->inst; |
583 | |
584 | dmcu = dc->res_pool->dmcu; |
585 | dmub_srv = dc->ctx->dmub_srv; |
586 | |
587 | /* forward to dmub */ |
588 | if (dmub_srv) |
589 | dc_stream_forward_dmub_crc_window(dmub_srv, rect, mux_mapping: &mux_mapping, is_stop); |
590 | /* forward to dmcu */ |
591 | else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) |
592 | dc_stream_forward_dmcu_crc_window(dmcu, rect, mux_mapping: &mux_mapping, is_stop); |
593 | else |
594 | return false; |
595 | |
596 | return true; |
597 | } |
598 | |
599 | static void |
600 | dc_stream_forward_dmub_multiple_crc_window(struct dc_dmub_srv *dmub_srv, |
601 | struct crc_window *window, struct otg_phy_mux *mux_mapping, bool stop) |
602 | { |
603 | int i; |
604 | union dmub_rb_cmd cmd = {0}; |
605 | |
606 | cmd.secure_display.mul_roi_ctl.phy_id = mux_mapping->phy_output_num; |
607 | cmd.secure_display.mul_roi_ctl.otg_id = mux_mapping->otg_output_num; |
608 | |
609 | cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; |
610 | |
611 | if (stop) { |
612 | cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_STOP_UPDATE; |
613 | } else { |
614 | cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_WIN_NOTIFY; |
615 | for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) { |
616 | cmd.secure_display.mul_roi_ctl.roi_ctl[i].x_start = window[i].rect.x; |
617 | cmd.secure_display.mul_roi_ctl.roi_ctl[i].y_start = window[i].rect.y; |
618 | cmd.secure_display.mul_roi_ctl.roi_ctl[i].x_end = window[i].rect.x + window[i].rect.width; |
619 | cmd.secure_display.mul_roi_ctl.roi_ctl[i].y_end = window[i].rect.y + window[i].rect.height; |
620 | cmd.secure_display.mul_roi_ctl.roi_ctl[i].enable = window[i].enable; |
621 | } |
622 | } |
623 | |
624 | dc_wake_and_execute_dmub_cmd(ctx: dmub_srv->ctx, cmd: &cmd, wait_type: DM_DMUB_WAIT_TYPE_NO_WAIT); |
625 | } |
626 | |
627 | bool |
628 | dc_stream_forward_multiple_crc_window(struct dc_stream_state *stream, |
629 | struct crc_window *window, uint8_t phy_id, bool stop) |
630 | { |
631 | struct dc_dmub_srv *dmub_srv; |
632 | struct otg_phy_mux mux_mapping; |
633 | struct pipe_ctx *pipe; |
634 | int i; |
635 | struct dc *dc = stream->ctx->dc; |
636 | |
637 | for (i = 0; i < MAX_PIPES; i++) { |
638 | pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
639 | if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) |
640 | break; |
641 | } |
642 | |
643 | /* Stream not found */ |
644 | if (i == MAX_PIPES) |
645 | return false; |
646 | |
647 | mux_mapping.phy_output_num = phy_id; |
648 | mux_mapping.otg_output_num = pipe->stream_res.tg->inst; |
649 | |
650 | dmub_srv = dc->ctx->dmub_srv; |
651 | |
652 | /* forward to dmub only. no dmcu support*/ |
653 | if (dmub_srv) |
654 | dc_stream_forward_dmub_multiple_crc_window(dmub_srv, window, mux_mapping: &mux_mapping, stop); |
655 | else |
656 | return false; |
657 | |
658 | return true; |
659 | } |
660 | #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ |
661 | |
662 | /** |
663 | * dc_stream_configure_crc() - Configure CRC capture for the given stream. |
664 | * @dc: DC Object |
665 | * @stream: The stream to configure CRC on. |
666 | * @crc_window: CRC window (x/y start/end) information |
667 | * @enable: Enable CRC if true, disable otherwise. |
668 | * @continuous: Capture CRC on every frame if true. Otherwise, only capture |
669 | * once. |
670 | * @idx: Capture CRC on which CRC engine instance |
671 | * @reset: Reset CRC engine before the configuration |
672 | * |
673 | * By default, the entire frame is used to calculate the CRC. |
674 | * |
675 | * Return: %false if the stream is not found or CRC capture is not supported; |
676 | * %true if the stream has been configured. |
677 | */ |
678 | bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, |
679 | struct crc_params *crc_window, bool enable, bool continuous, |
680 | uint8_t idx, bool reset) |
681 | { |
682 | struct pipe_ctx *pipe; |
683 | struct crc_params param; |
684 | struct timing_generator *tg; |
685 | |
686 | pipe = resource_get_otg_master_for_stream( |
687 | res_ctx: &dc->current_state->res_ctx, stream); |
688 | |
689 | /* Stream not found */ |
690 | if (pipe == NULL) |
691 | return false; |
692 | |
693 | dc_exit_ips_for_hw_access(dc); |
694 | |
695 | /* By default, capture the full frame */ |
696 | param.windowa_x_start = 0; |
697 | param.windowa_y_start = 0; |
698 | param.windowa_x_end = pipe->stream->timing.h_addressable; |
699 | param.windowa_y_end = pipe->stream->timing.v_addressable; |
700 | param.windowb_x_start = 0; |
701 | param.windowb_y_start = 0; |
702 | param.windowb_x_end = pipe->stream->timing.h_addressable; |
703 | param.windowb_y_end = pipe->stream->timing.v_addressable; |
704 | |
705 | if (crc_window) { |
706 | param.windowa_x_start = crc_window->windowa_x_start; |
707 | param.windowa_y_start = crc_window->windowa_y_start; |
708 | param.windowa_x_end = crc_window->windowa_x_end; |
709 | param.windowa_y_end = crc_window->windowa_y_end; |
710 | param.windowb_x_start = crc_window->windowb_x_start; |
711 | param.windowb_y_start = crc_window->windowb_y_start; |
712 | param.windowb_x_end = crc_window->windowb_x_end; |
713 | param.windowb_y_end = crc_window->windowb_y_end; |
714 | } |
715 | |
716 | param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0; |
717 | param.odm_mode = pipe->next_odm_pipe ? 1:0; |
718 | |
719 | /* Default to the union of both windows */ |
720 | param.selection = UNION_WINDOW_A_B; |
721 | param.continuous_mode = continuous; |
722 | param.enable = enable; |
723 | |
724 | param.crc_eng_inst = idx; |
725 | param.reset = reset; |
726 | |
727 | tg = pipe->stream_res.tg; |
728 | |
729 | /* Only call if supported */ |
730 | if (tg->funcs->configure_crc) |
731 | return tg->funcs->configure_crc(tg, ¶m); |
732 | DC_LOG_WARNING("CRC capture not supported."); |
733 | return false; |
734 | } |
735 | |
736 | /** |
737 | * dc_stream_get_crc() - Get CRC values for the given stream. |
738 | * |
739 | * @dc: DC object. |
740 | * @stream: The DC stream state of the stream to get CRCs from. |
741 | * @idx: index of crc engine to get CRC from |
742 | * @r_cr: CRC value for the red component. |
743 | * @g_y: CRC value for the green component. |
744 | * @b_cb: CRC value for the blue component. |
745 | * |
746 | * dc_stream_configure_crc needs to be called beforehand to enable CRCs. |
747 | * |
748 | * Return: |
749 | * %false if stream is not found, or if CRCs are not enabled. |
750 | */ |
751 | bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, uint8_t idx, |
752 | uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) |
753 | { |
754 | int i; |
755 | struct pipe_ctx *pipe; |
756 | struct timing_generator *tg; |
757 | |
758 | dc_exit_ips_for_hw_access(dc); |
759 | |
760 | for (i = 0; i < MAX_PIPES; i++) { |
761 | pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
762 | if (pipe->stream == stream) |
763 | break; |
764 | } |
765 | /* Stream not found */ |
766 | if (i == MAX_PIPES) |
767 | return false; |
768 | |
769 | tg = pipe->stream_res.tg; |
770 | |
771 | if (tg->funcs->get_crc) |
772 | return tg->funcs->get_crc(tg, idx, r_cr, g_y, b_cb); |
773 | DC_LOG_WARNING("CRC capture not supported."); |
774 | return false; |
775 | } |
776 | |
777 | void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, |
778 | enum dc_dynamic_expansion option) |
779 | { |
780 | /* OPP FMT dyn expansion updates*/ |
781 | int i; |
782 | struct pipe_ctx *pipe_ctx; |
783 | |
784 | dc_exit_ips_for_hw_access(dc); |
785 | |
786 | for (i = 0; i < MAX_PIPES; i++) { |
787 | if (dc->current_state->res_ctx.pipe_ctx[i].stream |
788 | == stream) { |
789 | pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; |
790 | pipe_ctx->stream_res.opp->dyn_expansion = option; |
791 | pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( |
792 | pipe_ctx->stream_res.opp, |
793 | COLOR_SPACE_YCBCR601, |
794 | stream->timing.display_color_depth, |
795 | stream->signal); |
796 | } |
797 | } |
798 | } |
799 | |
800 | void dc_stream_set_dither_option(struct dc_stream_state *stream, |
801 | enum dc_dither_option option) |
802 | { |
803 | struct bit_depth_reduction_params params; |
804 | struct dc_link *link = stream->link; |
805 | struct pipe_ctx *pipes = NULL; |
806 | int i; |
807 | |
808 | for (i = 0; i < MAX_PIPES; i++) { |
809 | if (link->dc->current_state->res_ctx.pipe_ctx[i].stream == |
810 | stream) { |
811 | pipes = &link->dc->current_state->res_ctx.pipe_ctx[i]; |
812 | break; |
813 | } |
814 | } |
815 | |
816 | if (!pipes) |
817 | return; |
818 | if (option > DITHER_OPTION_MAX) |
819 | return; |
820 | |
821 | dc_exit_ips_for_hw_access(stream->ctx->dc); |
822 | |
823 | stream->dither_option = option; |
824 | |
825 | memset(¶ms, 0, sizeof(params)); |
826 | resource_build_bit_depth_reduction_params(stream, fmt_bit_depth: ¶ms); |
827 | stream->bit_depth_params = params; |
828 | |
829 | if (pipes->plane_res.xfm && |
830 | pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) { |
831 | pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth( |
832 | pipes->plane_res.xfm, |
833 | pipes->plane_res.scl_data.lb_params.depth, |
834 | &stream->bit_depth_params); |
835 | } |
836 | |
837 | pipes->stream_res.opp->funcs-> |
838 | opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms); |
839 | } |
840 | |
841 | bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream) |
842 | { |
843 | int i; |
844 | bool ret = false; |
845 | struct pipe_ctx *pipes; |
846 | |
847 | dc_exit_ips_for_hw_access(dc); |
848 | |
849 | for (i = 0; i < MAX_PIPES; i++) { |
850 | if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { |
851 | pipes = &dc->current_state->res_ctx.pipe_ctx[i]; |
852 | dc->hwss.program_gamut_remap(pipes); |
853 | ret = true; |
854 | } |
855 | } |
856 | |
857 | return ret; |
858 | } |
859 | |
860 | bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) |
861 | { |
862 | int i; |
863 | bool ret = false; |
864 | struct pipe_ctx *pipes; |
865 | |
866 | dc_exit_ips_for_hw_access(dc); |
867 | |
868 | for (i = 0; i < MAX_PIPES; i++) { |
869 | if (dc->current_state->res_ctx.pipe_ctx[i].stream |
870 | == stream) { |
871 | |
872 | pipes = &dc->current_state->res_ctx.pipe_ctx[i]; |
873 | dc->hwss.program_output_csc(dc, |
874 | pipes, |
875 | stream->output_color_space, |
876 | stream->csc_color_matrix.matrix, |
877 | pipes->stream_res.opp->inst); |
878 | ret = true; |
879 | } |
880 | } |
881 | |
882 | return ret; |
883 | } |
884 | |
885 | void dc_stream_set_static_screen_params(struct dc *dc, |
886 | struct dc_stream_state **streams, |
887 | int num_streams, |
888 | const struct dc_static_screen_params *params) |
889 | { |
890 | int i, j; |
891 | struct pipe_ctx *pipes_affected[MAX_PIPES]; |
892 | int num_pipes_affected = 0; |
893 | |
894 | dc_exit_ips_for_hw_access(dc); |
895 | |
896 | for (i = 0; i < num_streams; i++) { |
897 | struct dc_stream_state *stream = streams[i]; |
898 | |
899 | for (j = 0; j < MAX_PIPES; j++) { |
900 | if (dc->current_state->res_ctx.pipe_ctx[j].stream |
901 | == stream) { |
902 | pipes_affected[num_pipes_affected++] = |
903 | &dc->current_state->res_ctx.pipe_ctx[j]; |
904 | } |
905 | } |
906 | } |
907 | |
908 | dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params); |
909 | } |
910 | |
911 | static void dc_destruct(struct dc *dc) |
912 | { |
913 | // reset link encoder assignment table on destruct |
914 | if (dc->res_pool && dc->res_pool->funcs->link_encs_assign && |
915 | !dc->config.unify_link_enc_assignment) |
916 | link_enc_cfg_init(dc, state: dc->current_state); |
917 | |
918 | if (dc->current_state) { |
919 | dc_state_release(state: dc->current_state); |
920 | dc->current_state = NULL; |
921 | } |
922 | |
923 | destroy_links(dc); |
924 | |
925 | destroy_link_encoders(dc); |
926 | |
927 | if (dc->clk_mgr) { |
928 | dc_destroy_clk_mgr(clk_mgr: dc->clk_mgr); |
929 | dc->clk_mgr = NULL; |
930 | } |
931 | |
932 | dc_destroy_resource_pool(dc); |
933 | |
934 | if (dc->link_srv) |
935 | link_destroy_link_service(link_srv: &dc->link_srv); |
936 | |
937 | if (dc->ctx->gpio_service) |
938 | dal_gpio_service_destroy(ptr: &dc->ctx->gpio_service); |
939 | |
940 | if (dc->ctx->created_bios) |
941 | dal_bios_parser_destroy(dcb: &dc->ctx->dc_bios); |
942 | |
943 | kfree(objp: dc->ctx->logger); |
944 | dc_perf_trace_destroy(perf_trace: &dc->ctx->perf_trace); |
945 | |
946 | kfree(objp: dc->ctx); |
947 | dc->ctx = NULL; |
948 | |
949 | kfree(objp: dc->bw_vbios); |
950 | dc->bw_vbios = NULL; |
951 | |
952 | kfree(objp: dc->bw_dceip); |
953 | dc->bw_dceip = NULL; |
954 | |
955 | kfree(objp: dc->dcn_soc); |
956 | dc->dcn_soc = NULL; |
957 | |
958 | kfree(objp: dc->dcn_ip); |
959 | dc->dcn_ip = NULL; |
960 | |
961 | kfree(objp: dc->vm_helper); |
962 | dc->vm_helper = NULL; |
963 | |
964 | } |
965 | |
966 | static bool dc_construct_ctx(struct dc *dc, |
967 | const struct dc_init_data *init_params) |
968 | { |
969 | struct dc_context *dc_ctx; |
970 | |
971 | dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); |
972 | if (!dc_ctx) |
973 | return false; |
974 | |
975 | dc_ctx->cgs_device = init_params->cgs_device; |
976 | dc_ctx->driver_context = init_params->driver; |
977 | dc_ctx->dc = dc; |
978 | dc_ctx->asic_id = init_params->asic_id; |
979 | dc_ctx->dc_sink_id_count = 0; |
980 | dc_ctx->dc_stream_id_count = 0; |
981 | dc_ctx->dce_environment = init_params->dce_environment; |
982 | dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets; |
983 | dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets; |
984 | dc_ctx->clk_reg_offsets = init_params->clk_reg_offsets; |
985 | |
986 | /* Create logger */ |
987 | dc_ctx->logger = kmalloc(sizeof(*dc_ctx->logger), GFP_KERNEL); |
988 | |
989 | if (!dc_ctx->logger) { |
990 | kfree(objp: dc_ctx); |
991 | return false; |
992 | } |
993 | |
994 | dc_ctx->logger->dev = adev_to_drm(adev: init_params->driver); |
995 | dc->dml.logger = dc_ctx->logger; |
996 | |
997 | dc_ctx->dce_version = resource_parse_asic_id(asic_id: init_params->asic_id); |
998 | |
999 | dc_ctx->perf_trace = dc_perf_trace_create(); |
1000 | if (!dc_ctx->perf_trace) { |
1001 | kfree(objp: dc_ctx); |
1002 | ASSERT_CRITICAL(false); |
1003 | return false; |
1004 | } |
1005 | |
1006 | dc->ctx = dc_ctx; |
1007 | |
1008 | dc->link_srv = link_create_link_service(); |
1009 | if (!dc->link_srv) |
1010 | return false; |
1011 | |
1012 | return true; |
1013 | } |
1014 | |
1015 | static bool dc_construct(struct dc *dc, |
1016 | const struct dc_init_data *init_params) |
1017 | { |
1018 | struct dc_context *dc_ctx; |
1019 | struct bw_calcs_dceip *dc_dceip; |
1020 | struct bw_calcs_vbios *dc_vbios; |
1021 | struct dcn_soc_bounding_box *dcn_soc; |
1022 | struct dcn_ip_params *dcn_ip; |
1023 | |
1024 | dc->config = init_params->flags; |
1025 | |
1026 | // Allocate memory for the vm_helper |
1027 | dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL); |
1028 | if (!dc->vm_helper) { |
1029 | dm_error("%s: failed to create dc->vm_helper\n", __func__); |
1030 | goto fail; |
1031 | } |
1032 | |
1033 | memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides)); |
1034 | |
1035 | dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL); |
1036 | if (!dc_dceip) { |
1037 | dm_error("%s: failed to create dceip\n", __func__); |
1038 | goto fail; |
1039 | } |
1040 | |
1041 | dc->bw_dceip = dc_dceip; |
1042 | |
1043 | dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL); |
1044 | if (!dc_vbios) { |
1045 | dm_error("%s: failed to create vbios\n", __func__); |
1046 | goto fail; |
1047 | } |
1048 | |
1049 | dc->bw_vbios = dc_vbios; |
1050 | dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL); |
1051 | if (!dcn_soc) { |
1052 | dm_error("%s: failed to create dcn_soc\n", __func__); |
1053 | goto fail; |
1054 | } |
1055 | |
1056 | dc->dcn_soc = dcn_soc; |
1057 | |
1058 | dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL); |
1059 | if (!dcn_ip) { |
1060 | dm_error("%s: failed to create dcn_ip\n", __func__); |
1061 | goto fail; |
1062 | } |
1063 | |
1064 | dc->dcn_ip = dcn_ip; |
1065 | |
1066 | if (init_params->bb_from_dmub) |
1067 | dc->dml2_options.bb_from_dmub = init_params->bb_from_dmub; |
1068 | else |
1069 | dc->dml2_options.bb_from_dmub = NULL; |
1070 | |
1071 | if (!dc_construct_ctx(dc, init_params)) { |
1072 | dm_error("%s: failed to create ctx\n", __func__); |
1073 | goto fail; |
1074 | } |
1075 | |
1076 | dc_ctx = dc->ctx; |
1077 | |
1078 | /* Resource should construct all asic specific resources. |
1079 | * This should be the only place where we need to parse the asic id |
1080 | */ |
1081 | if (init_params->vbios_override) |
1082 | dc_ctx->dc_bios = init_params->vbios_override; |
1083 | else { |
1084 | /* Create BIOS parser */ |
1085 | struct bp_init_data bp_init_data; |
1086 | |
1087 | bp_init_data.ctx = dc_ctx; |
1088 | bp_init_data.bios = init_params->asic_id.atombios_base_address; |
1089 | |
1090 | dc_ctx->dc_bios = dal_bios_parser_create( |
1091 | init: &bp_init_data, dce_version: dc_ctx->dce_version); |
1092 | |
1093 | if (!dc_ctx->dc_bios) { |
1094 | ASSERT_CRITICAL(false); |
1095 | goto fail; |
1096 | } |
1097 | |
1098 | dc_ctx->created_bios = true; |
1099 | } |
1100 | |
1101 | dc->vendor_signature = init_params->vendor_signature; |
1102 | |
1103 | /* Create GPIO service */ |
1104 | dc_ctx->gpio_service = dal_gpio_service_create( |
1105 | dce_version: dc_ctx->dce_version, |
1106 | dce_environment: dc_ctx->dce_environment, |
1107 | ctx: dc_ctx); |
1108 | |
1109 | if (!dc_ctx->gpio_service) { |
1110 | ASSERT_CRITICAL(false); |
1111 | goto fail; |
1112 | } |
1113 | |
1114 | dc->res_pool = dc_create_resource_pool(dc, init_data: init_params, dc_version: dc_ctx->dce_version); |
1115 | if (!dc->res_pool) |
1116 | goto fail; |
1117 | |
1118 | /* set i2c speed if not done by the respective dcnxxx__resource.c */ |
1119 | if (dc->caps.i2c_speed_in_khz_hdcp == 0) |
1120 | dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz; |
1121 | if (dc->caps.max_optimizable_video_width == 0) |
1122 | dc->caps.max_optimizable_video_width = 5120; |
1123 | dc->clk_mgr = dc_clk_mgr_create(ctx: dc->ctx, pp_smu: dc->res_pool->pp_smu, dccg: dc->res_pool->dccg); |
1124 | if (!dc->clk_mgr) |
1125 | goto fail; |
1126 | #ifdef CONFIG_DRM_AMD_DC_FP |
1127 | dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; |
1128 | |
1129 | if (dc->res_pool->funcs->update_bw_bounding_box) { |
1130 | DC_FP_START(); |
1131 | dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); |
1132 | DC_FP_END(); |
1133 | } |
1134 | #endif |
1135 | |
1136 | if (!create_links(dc, num_virtual_links: init_params->num_virtual_links)) |
1137 | goto fail; |
1138 | |
1139 | /* Create additional DIG link encoder objects if fewer than the platform |
1140 | * supports were created during link construction. |
1141 | */ |
1142 | if (!create_link_encoders(dc)) |
1143 | goto fail; |
1144 | |
1145 | /* Creation of current_state must occur after dc->dml |
1146 | * is initialized in dc_create_resource_pool because |
1147 | * on creation it copies the contents of dc->dml |
1148 | */ |
1149 | dc->current_state = dc_state_create(dc, NULL); |
1150 | |
1151 | if (!dc->current_state) { |
1152 | dm_error("%s: failed to create validate ctx\n", __func__); |
1153 | goto fail; |
1154 | } |
1155 | |
1156 | return true; |
1157 | |
1158 | fail: |
1159 | return false; |
1160 | } |
1161 | |
1162 | static void disable_all_writeback_pipes_for_stream( |
1163 | const struct dc *dc, |
1164 | struct dc_stream_state *stream, |
1165 | struct dc_state *context) |
1166 | { |
1167 | int i; |
1168 | |
1169 | for (i = 0; i < stream->num_wb_info; i++) |
1170 | stream->writeback_info[i].wb_enabled = false; |
1171 | } |
1172 | |
1173 | static void apply_ctx_interdependent_lock(struct dc *dc, |
1174 | struct dc_state *context, |
1175 | struct dc_stream_state *stream, |
1176 | bool lock) |
1177 | { |
1178 | int i; |
1179 | |
1180 | /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */ |
1181 | if (dc->hwss.interdependent_update_lock) |
1182 | dc->hwss.interdependent_update_lock(dc, context, lock); |
1183 | else { |
1184 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
1185 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; |
1186 | struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; |
1187 | |
1188 | // Copied conditions that were previously in dce110_apply_ctx_for_surface |
1189 | if (stream == pipe_ctx->stream) { |
1190 | if (resource_is_pipe_type(pipe_ctx, type: OPP_HEAD) && |
1191 | (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) |
1192 | dc->hwss.pipe_control_lock(dc, pipe_ctx, lock); |
1193 | } |
1194 | } |
1195 | } |
1196 | } |
1197 | |
1198 | static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) |
1199 | { |
1200 | if (dc->debug.visual_confirm & VISUAL_CONFIRM_EXPLICIT) { |
1201 | memcpy(&pipe_ctx->visual_confirm_color, &pipe_ctx->plane_state->visual_confirm_color, |
1202 | sizeof(pipe_ctx->visual_confirm_color)); |
1203 | return; |
1204 | } |
1205 | |
1206 | if (dc->ctx->dce_version >= DCN_VERSION_1_0) { |
1207 | memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color)); |
1208 | |
1209 | if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) |
1210 | get_hdr_visual_confirm_color(pipe_ctx, color: &(pipe_ctx->visual_confirm_color)); |
1211 | else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) |
1212 | get_surface_visual_confirm_color(pipe_ctx, color: &(pipe_ctx->visual_confirm_color)); |
1213 | else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) |
1214 | get_surface_tile_visual_confirm_color(pipe_ctx, color: &(pipe_ctx->visual_confirm_color)); |
1215 | else if (dc->debug.visual_confirm == VISUAL_CONFIRM_HW_CURSOR) |
1216 | get_cursor_visual_confirm_color(pipe_ctx, color: &(pipe_ctx->visual_confirm_color)); |
1217 | else if (dc->debug.visual_confirm == VISUAL_CONFIRM_DCC) |
1218 | get_dcc_visual_confirm_color(dc, pipe_ctx, color: &(pipe_ctx->visual_confirm_color)); |
1219 | else { |
1220 | if (dc->ctx->dce_version < DCN_VERSION_2_0) |
1221 | color_space_to_black_color( |
1222 | dc, colorspace: pipe_ctx->stream->output_color_space, black_color: &(pipe_ctx->visual_confirm_color)); |
1223 | } |
1224 | if (dc->ctx->dce_version >= DCN_VERSION_2_0) { |
1225 | if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) |
1226 | get_mpctree_visual_confirm_color(pipe_ctx, color: &(pipe_ctx->visual_confirm_color)); |
1227 | else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP) |
1228 | get_subvp_visual_confirm_color(pipe_ctx, color: &(pipe_ctx->visual_confirm_color)); |
1229 | else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) |
1230 | get_mclk_switch_visual_confirm_color(pipe_ctx, color: &(pipe_ctx->visual_confirm_color)); |
1231 | else if (dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2) |
1232 | get_fams2_visual_confirm_color(dc, context, pipe_ctx, color: &(pipe_ctx->visual_confirm_color)); |
1233 | else if (dc->debug.visual_confirm == VISUAL_CONFIRM_VABC) |
1234 | get_vabc_visual_confirm_color(pipe_ctx, color: &(pipe_ctx->visual_confirm_color)); |
1235 | } |
1236 | } |
1237 | } |
1238 | |
1239 | void dc_get_visual_confirm_for_stream( |
1240 | struct dc *dc, |
1241 | struct dc_stream_state *stream_state, |
1242 | struct tg_color *color) |
1243 | { |
1244 | struct dc_stream_status *stream_status = dc_stream_get_status(dc_stream: stream_state); |
1245 | struct pipe_ctx *pipe_ctx; |
1246 | int i; |
1247 | struct dc_plane_state *plane_state = NULL; |
1248 | |
1249 | if (!stream_status) |
1250 | return; |
1251 | |
1252 | switch (dc->debug.visual_confirm) { |
1253 | case VISUAL_CONFIRM_DISABLE: |
1254 | return; |
1255 | case VISUAL_CONFIRM_PSR: |
1256 | case VISUAL_CONFIRM_FAMS: |
1257 | pipe_ctx = dc_stream_get_pipe_ctx(stream: stream_state); |
1258 | if (!pipe_ctx) |
1259 | return; |
1260 | dc_dmub_srv_get_visual_confirm_color_cmd(dc, pipe_ctx); |
1261 | memcpy(color, &dc->ctx->dmub_srv->dmub->visual_confirm_color, sizeof(struct tg_color)); |
1262 | return; |
1263 | |
1264 | default: |
1265 | /* find plane with highest layer_index */ |
1266 | for (i = 0; i < stream_status->plane_count; i++) { |
1267 | if (stream_status->plane_states[i]->visible) |
1268 | plane_state = stream_status->plane_states[i]; |
1269 | } |
1270 | if (!plane_state) |
1271 | return; |
1272 | /* find pipe that contains plane with highest layer index */ |
1273 | for (i = 0; i < MAX_PIPES; i++) { |
1274 | struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
1275 | |
1276 | if (pipe->plane_state == plane_state) { |
1277 | memcpy(color, &pipe->visual_confirm_color, sizeof(struct tg_color)); |
1278 | return; |
1279 | } |
1280 | } |
1281 | } |
1282 | } |
1283 | |
1284 | static void disable_dangling_plane(struct dc *dc, struct dc_state *context) |
1285 | { |
1286 | int i, j; |
1287 | struct dc_state *dangling_context = dc_state_create_current_copy(dc); |
1288 | struct dc_state *current_ctx; |
1289 | struct pipe_ctx *pipe; |
1290 | struct timing_generator *tg; |
1291 | |
1292 | if (dangling_context == NULL) |
1293 | return; |
1294 | |
1295 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
1296 | struct dc_stream_state *old_stream = |
1297 | dc->current_state->res_ctx.pipe_ctx[i].stream; |
1298 | bool should_disable = true; |
1299 | bool pipe_split_change = false; |
1300 | |
1301 | if ((context->res_ctx.pipe_ctx[i].top_pipe) && |
1302 | (dc->current_state->res_ctx.pipe_ctx[i].top_pipe)) |
1303 | pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx != |
1304 | dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx; |
1305 | else |
1306 | pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe != |
1307 | dc->current_state->res_ctx.pipe_ctx[i].top_pipe; |
1308 | |
1309 | for (j = 0; j < context->stream_count; j++) { |
1310 | if (old_stream == context->streams[j]) { |
1311 | should_disable = false; |
1312 | break; |
1313 | } |
1314 | } |
1315 | if (!should_disable && pipe_split_change && |
1316 | dc->current_state->stream_count != context->stream_count) |
1317 | should_disable = true; |
1318 | |
1319 | if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe && |
1320 | !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) { |
1321 | struct pipe_ctx *old_pipe, *new_pipe; |
1322 | |
1323 | old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
1324 | new_pipe = &context->res_ctx.pipe_ctx[i]; |
1325 | |
1326 | if (old_pipe->plane_state && !new_pipe->plane_state) |
1327 | should_disable = true; |
1328 | } |
1329 | |
1330 | if (should_disable && old_stream) { |
1331 | bool is_phantom = dc_state_get_stream_subvp_type(state: dc->current_state, stream: old_stream) == SUBVP_PHANTOM; |
1332 | pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
1333 | tg = pipe->stream_res.tg; |
1334 | /* When disabling plane for a phantom pipe, we must turn on the |
1335 | * phantom OTG so the disable programming gets the double buffer |
1336 | * update. Otherwise the pipe will be left in a partially disabled |
1337 | * state that can result in underflow or hang when enabling it |
1338 | * again for different use. |
1339 | */ |
1340 | if (is_phantom) { |
1341 | if (tg->funcs->enable_crtc) { |
1342 | if (dc->hwseq->funcs.blank_pixel_data) |
1343 | dc->hwseq->funcs.blank_pixel_data(dc, pipe, true); |
1344 | tg->funcs->enable_crtc(tg); |
1345 | } |
1346 | } |
1347 | |
1348 | if (is_phantom) |
1349 | dc_state_rem_all_phantom_planes_for_stream(dc, phantom_stream: old_stream, state: dangling_context, should_release_planes: true); |
1350 | else |
1351 | dc_state_rem_all_planes_for_stream(dc, stream: old_stream, state: dangling_context); |
1352 | disable_all_writeback_pipes_for_stream(dc, stream: old_stream, context: dangling_context); |
1353 | |
1354 | if (pipe->stream && pipe->plane_state) { |
1355 | if (!dc->debug.using_dml2) |
1356 | set_p_state_switch_method(dc, context, pipe_ctx: pipe); |
1357 | dc_update_visual_confirm_color(dc, context, pipe_ctx: pipe); |
1358 | } |
1359 | |
1360 | if (dc->hwss.apply_ctx_for_surface) { |
1361 | apply_ctx_interdependent_lock(dc, context: dc->current_state, stream: old_stream, lock: true); |
1362 | dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); |
1363 | apply_ctx_interdependent_lock(dc, context: dc->current_state, stream: old_stream, lock: false); |
1364 | dc->hwss.post_unlock_program_front_end(dc, dangling_context); |
1365 | } |
1366 | |
1367 | if (dc->res_pool->funcs->prepare_mcache_programming) |
1368 | dc->res_pool->funcs->prepare_mcache_programming(dc, dangling_context); |
1369 | if (dc->hwss.program_front_end_for_ctx) { |
1370 | dc->hwss.interdependent_update_lock(dc, dc->current_state, true); |
1371 | dc->hwss.program_front_end_for_ctx(dc, dangling_context); |
1372 | dc->hwss.interdependent_update_lock(dc, dc->current_state, false); |
1373 | dc->hwss.post_unlock_program_front_end(dc, dangling_context); |
1374 | } |
1375 | /* We need to put the phantom OTG back into it's default (disabled) state or we |
1376 | * can get corruption when transition from one SubVP config to a different one. |
1377 | * The OTG is set to disable on falling edge of VUPDATE so the plane disable |
1378 | * will still get it's double buffer update. |
1379 | */ |
1380 | if (is_phantom) { |
1381 | if (tg->funcs->disable_phantom_crtc) |
1382 | tg->funcs->disable_phantom_crtc(tg); |
1383 | } |
1384 | } |
1385 | } |
1386 | |
1387 | current_ctx = dc->current_state; |
1388 | dc->current_state = dangling_context; |
1389 | dc_state_release(state: current_ctx); |
1390 | } |
1391 | |
1392 | static void disable_vbios_mode_if_required( |
1393 | struct dc *dc, |
1394 | struct dc_state *context) |
1395 | { |
1396 | unsigned int i, j; |
1397 | |
1398 | /* check if timing_changed, disable stream*/ |
1399 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
1400 | struct dc_stream_state *stream = NULL; |
1401 | struct dc_link *link = NULL; |
1402 | struct pipe_ctx *pipe = NULL; |
1403 | |
1404 | pipe = &context->res_ctx.pipe_ctx[i]; |
1405 | stream = pipe->stream; |
1406 | if (stream == NULL) |
1407 | continue; |
1408 | |
1409 | if (stream->apply_seamless_boot_optimization) |
1410 | continue; |
1411 | |
1412 | // only looking for first odm pipe |
1413 | if (pipe->prev_odm_pipe) |
1414 | continue; |
1415 | |
1416 | if (stream->link->local_sink && |
1417 | stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { |
1418 | link = stream->link; |
1419 | } |
1420 | |
1421 | if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { |
1422 | unsigned int enc_inst, tg_inst = 0; |
1423 | unsigned int pix_clk_100hz = 0; |
1424 | |
1425 | enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); |
1426 | if (enc_inst != ENGINE_ID_UNKNOWN) { |
1427 | for (j = 0; j < dc->res_pool->stream_enc_count; j++) { |
1428 | if (dc->res_pool->stream_enc[j]->id == enc_inst) { |
1429 | tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg( |
1430 | dc->res_pool->stream_enc[j]); |
1431 | break; |
1432 | } |
1433 | } |
1434 | |
1435 | dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( |
1436 | dc->res_pool->dp_clock_source, |
1437 | tg_inst, &pix_clk_100hz); |
1438 | |
1439 | if (link->link_status.link_active) { |
1440 | uint32_t requested_pix_clk_100hz = |
1441 | pipe->stream_res.pix_clk_params.requested_pix_clk_100hz; |
1442 | |
1443 | if (pix_clk_100hz != requested_pix_clk_100hz) { |
1444 | dc->link_srv->set_dpms_off(pipe); |
1445 | pipe->stream->dpms_off = false; |
1446 | } |
1447 | } |
1448 | } |
1449 | } |
1450 | } |
1451 | } |
1452 | |
1453 | /* Public functions */ |
1454 | |
1455 | struct dc *dc_create(const struct dc_init_data *init_params) |
1456 | { |
1457 | struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL); |
1458 | unsigned int full_pipe_count; |
1459 | |
1460 | if (!dc) |
1461 | return NULL; |
1462 | |
1463 | if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) { |
1464 | dc->caps.linear_pitch_alignment = 64; |
1465 | if (!dc_construct_ctx(dc, init_params)) |
1466 | goto destruct_dc; |
1467 | } else { |
1468 | if (!dc_construct(dc, init_params)) |
1469 | goto destruct_dc; |
1470 | |
1471 | full_pipe_count = dc->res_pool->pipe_count; |
1472 | if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) |
1473 | full_pipe_count--; |
1474 | dc->caps.max_streams = min( |
1475 | full_pipe_count, |
1476 | dc->res_pool->stream_enc_count); |
1477 | |
1478 | dc->caps.max_links = dc->link_count; |
1479 | dc->caps.max_audios = dc->res_pool->audio_count; |
1480 | dc->caps.linear_pitch_alignment = 64; |
1481 | |
1482 | dc->caps.max_dp_protocol_version = DP_VERSION_1_4; |
1483 | |
1484 | dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator; |
1485 | |
1486 | if (dc->res_pool->dmcu != NULL) |
1487 | dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; |
1488 | } |
1489 | |
1490 | dc->dcn_reg_offsets = init_params->dcn_reg_offsets; |
1491 | dc->nbio_reg_offsets = init_params->nbio_reg_offsets; |
1492 | dc->clk_reg_offsets = init_params->clk_reg_offsets; |
1493 | |
1494 | /* Populate versioning information */ |
1495 | dc->versions.dc_ver = DC_VER; |
1496 | |
1497 | dc->build_id = DC_BUILD_ID; |
1498 | |
1499 | DC_LOG_DC("Display Core initialized\n"); |
1500 | |
1501 | return dc; |
1502 | |
1503 | destruct_dc: |
1504 | dc_destruct(dc); |
1505 | kfree(objp: dc); |
1506 | return NULL; |
1507 | } |
1508 | |
1509 | static void detect_edp_presence(struct dc *dc) |
1510 | { |
1511 | struct dc_link *edp_links[MAX_NUM_EDP]; |
1512 | struct dc_link *edp_link = NULL; |
1513 | enum dc_connection_type type; |
1514 | int i; |
1515 | int edp_num; |
1516 | |
1517 | dc_get_edp_links(dc, edp_links, edp_num: &edp_num); |
1518 | if (!edp_num) |
1519 | return; |
1520 | |
1521 | for (i = 0; i < edp_num; i++) { |
1522 | edp_link = edp_links[i]; |
1523 | if (dc->config.edp_not_connected) { |
1524 | edp_link->edp_sink_present = false; |
1525 | } else { |
1526 | dc_link_detect_connection_type(link: edp_link, type: &type); |
1527 | edp_link->edp_sink_present = (type != dc_connection_none); |
1528 | } |
1529 | } |
1530 | } |
1531 | |
1532 | void dc_hardware_init(struct dc *dc) |
1533 | { |
1534 | |
1535 | detect_edp_presence(dc); |
1536 | if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) |
1537 | dc->hwss.init_hw(dc); |
1538 | dc_dmub_srv_notify_fw_dc_power_state(dc_dmub_srv: dc->ctx->dmub_srv, power_state: DC_ACPI_CM_POWER_STATE_D0); |
1539 | } |
1540 | |
1541 | void dc_init_callbacks(struct dc *dc, |
1542 | const struct dc_callback_init *init_params) |
1543 | { |
1544 | dc->ctx->cp_psp = init_params->cp_psp; |
1545 | } |
1546 | |
1547 | void dc_deinit_callbacks(struct dc *dc) |
1548 | { |
1549 | memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp)); |
1550 | } |
1551 | |
1552 | void dc_destroy(struct dc **dc) |
1553 | { |
1554 | dc_destruct(dc: *dc); |
1555 | kfree(objp: *dc); |
1556 | *dc = NULL; |
1557 | } |
1558 | |
1559 | static void enable_timing_multisync( |
1560 | struct dc *dc, |
1561 | struct dc_state *ctx) |
1562 | { |
1563 | int i, multisync_count = 0; |
1564 | int pipe_count = dc->res_pool->pipe_count; |
1565 | struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL }; |
1566 | |
1567 | for (i = 0; i < pipe_count; i++) { |
1568 | if (!ctx->res_ctx.pipe_ctx[i].stream || |
1569 | !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled) |
1570 | continue; |
1571 | if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source) |
1572 | continue; |
1573 | multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i]; |
1574 | multisync_count++; |
1575 | } |
1576 | |
1577 | if (multisync_count > 0) { |
1578 | dc->hwss.enable_per_frame_crtc_position_reset( |
1579 | dc, multisync_count, multisync_pipes); |
1580 | } |
1581 | } |
1582 | |
1583 | static void program_timing_sync( |
1584 | struct dc *dc, |
1585 | struct dc_state *ctx) |
1586 | { |
1587 | int i, j, k; |
1588 | int group_index = 0; |
1589 | int num_group = 0; |
1590 | int pipe_count = dc->res_pool->pipe_count; |
1591 | struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL }; |
1592 | |
1593 | for (i = 0; i < pipe_count; i++) { |
1594 | if (!ctx->res_ctx.pipe_ctx[i].stream |
1595 | || ctx->res_ctx.pipe_ctx[i].top_pipe |
1596 | || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe) |
1597 | continue; |
1598 | |
1599 | unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i]; |
1600 | } |
1601 | |
1602 | for (i = 0; i < pipe_count; i++) { |
1603 | int group_size = 1; |
1604 | enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE; |
1605 | struct pipe_ctx *pipe_set[MAX_PIPES]; |
1606 | |
1607 | if (!unsynced_pipes[i]) |
1608 | continue; |
1609 | |
1610 | pipe_set[0] = unsynced_pipes[i]; |
1611 | unsynced_pipes[i] = NULL; |
1612 | |
1613 | /* Add tg to the set, search rest of the tg's for ones with |
1614 | * same timing, add all tgs with same timing to the group |
1615 | */ |
1616 | for (j = i + 1; j < pipe_count; j++) { |
1617 | if (!unsynced_pipes[j]) |
1618 | continue; |
1619 | if (sync_type != TIMING_SYNCHRONIZABLE && |
1620 | dc->hwss.enable_vblanks_synchronization && |
1621 | unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks && |
1622 | resource_are_vblanks_synchronizable( |
1623 | stream1: unsynced_pipes[j]->stream, |
1624 | stream2: pipe_set[0]->stream)) { |
1625 | sync_type = VBLANK_SYNCHRONIZABLE; |
1626 | pipe_set[group_size] = unsynced_pipes[j]; |
1627 | unsynced_pipes[j] = NULL; |
1628 | group_size++; |
1629 | } else |
1630 | if (sync_type != VBLANK_SYNCHRONIZABLE && |
1631 | resource_are_streams_timing_synchronizable( |
1632 | stream1: unsynced_pipes[j]->stream, |
1633 | stream2: pipe_set[0]->stream)) { |
1634 | sync_type = TIMING_SYNCHRONIZABLE; |
1635 | pipe_set[group_size] = unsynced_pipes[j]; |
1636 | unsynced_pipes[j] = NULL; |
1637 | group_size++; |
1638 | } |
1639 | } |
1640 | |
1641 | /* set first unblanked pipe as master */ |
1642 | for (j = 0; j < group_size; j++) { |
1643 | bool is_blanked; |
1644 | |
1645 | if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) |
1646 | is_blanked = |
1647 | pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); |
1648 | else |
1649 | is_blanked = |
1650 | pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); |
1651 | if (!is_blanked) { |
1652 | if (j == 0) |
1653 | break; |
1654 | |
1655 | swap(pipe_set[0], pipe_set[j]); |
1656 | break; |
1657 | } |
1658 | } |
1659 | |
1660 | for (k = 0; k < group_size; k++) { |
1661 | struct dc_stream_status *status = dc_state_get_stream_status(state: ctx, stream: pipe_set[k]->stream); |
1662 | |
1663 | if (!status) |
1664 | continue; |
1665 | |
1666 | status->timing_sync_info.group_id = num_group; |
1667 | status->timing_sync_info.group_size = group_size; |
1668 | if (k == 0) |
1669 | status->timing_sync_info.master = true; |
1670 | else |
1671 | status->timing_sync_info.master = false; |
1672 | |
1673 | } |
1674 | |
1675 | /* remove any other unblanked pipes as they have already been synced */ |
1676 | if (dc->config.use_pipe_ctx_sync_logic) { |
1677 | /* check pipe's syncd to decide which pipe to be removed */ |
1678 | for (j = 1; j < group_size; j++) { |
1679 | if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) { |
1680 | group_size--; |
1681 | pipe_set[j] = pipe_set[group_size]; |
1682 | j--; |
1683 | } else |
1684 | /* link slave pipe's syncd with master pipe */ |
1685 | pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd; |
1686 | } |
1687 | } else { |
1688 | /* remove any other pipes by checking valid plane */ |
1689 | for (j = j + 1; j < group_size; j++) { |
1690 | bool is_blanked; |
1691 | |
1692 | if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) |
1693 | is_blanked = |
1694 | pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); |
1695 | else |
1696 | is_blanked = |
1697 | pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); |
1698 | if (!is_blanked) { |
1699 | group_size--; |
1700 | pipe_set[j] = pipe_set[group_size]; |
1701 | j--; |
1702 | } |
1703 | } |
1704 | } |
1705 | |
1706 | if (group_size > 1) { |
1707 | if (sync_type == TIMING_SYNCHRONIZABLE) { |
1708 | dc->hwss.enable_timing_synchronization( |
1709 | dc, ctx, group_index, group_size, pipe_set); |
1710 | } else |
1711 | if (sync_type == VBLANK_SYNCHRONIZABLE) { |
1712 | dc->hwss.enable_vblanks_synchronization( |
1713 | dc, group_index, group_size, pipe_set); |
1714 | } |
1715 | group_index++; |
1716 | } |
1717 | num_group++; |
1718 | } |
1719 | } |
1720 | |
1721 | static bool streams_changed(struct dc *dc, |
1722 | struct dc_stream_state *streams[], |
1723 | uint8_t stream_count) |
1724 | { |
1725 | uint8_t i; |
1726 | |
1727 | if (stream_count != dc->current_state->stream_count) |
1728 | return true; |
1729 | |
1730 | for (i = 0; i < dc->current_state->stream_count; i++) { |
1731 | if (dc->current_state->streams[i] != streams[i]) |
1732 | return true; |
1733 | if (!streams[i]->link->link_state_valid) |
1734 | return true; |
1735 | } |
1736 | |
1737 | return false; |
1738 | } |
1739 | |
1740 | bool dc_validate_boot_timing(const struct dc *dc, |
1741 | const struct dc_sink *sink, |
1742 | struct dc_crtc_timing *crtc_timing) |
1743 | { |
1744 | struct timing_generator *tg; |
1745 | struct stream_encoder *se = NULL; |
1746 | |
1747 | struct dc_crtc_timing hw_crtc_timing = {0}; |
1748 | |
1749 | struct dc_link *link = sink->link; |
1750 | unsigned int i, enc_inst, tg_inst = 0; |
1751 | |
1752 | /* Support seamless boot on EDP displays only */ |
1753 | if (sink->sink_signal != SIGNAL_TYPE_EDP) { |
1754 | return false; |
1755 | } |
1756 | |
1757 | if (dc->debug.force_odm_combine) { |
1758 | DC_LOG_DEBUG("boot timing validation failed due to force_odm_combine\n"); |
1759 | return false; |
1760 | } |
1761 | |
1762 | /* Check for enabled DIG to identify enabled display */ |
1763 | if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) { |
1764 | DC_LOG_DEBUG("boot timing validation failed due to disabled DIG\n"); |
1765 | return false; |
1766 | } |
1767 | |
1768 | enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); |
1769 | |
1770 | if (enc_inst == ENGINE_ID_UNKNOWN) { |
1771 | DC_LOG_DEBUG("boot timing validation failed due to unknown DIG engine ID\n"); |
1772 | return false; |
1773 | } |
1774 | |
1775 | for (i = 0; i < dc->res_pool->stream_enc_count; i++) { |
1776 | if (dc->res_pool->stream_enc[i]->id == enc_inst) { |
1777 | |
1778 | se = dc->res_pool->stream_enc[i]; |
1779 | |
1780 | tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg( |
1781 | dc->res_pool->stream_enc[i]); |
1782 | break; |
1783 | } |
1784 | } |
1785 | |
1786 | // tg_inst not found |
1787 | if (i == dc->res_pool->stream_enc_count) { |
1788 | DC_LOG_DEBUG("boot timing validation failed due to timing generator instance not found\n"); |
1789 | return false; |
1790 | } |
1791 | |
1792 | if (tg_inst >= dc->res_pool->timing_generator_count) { |
1793 | DC_LOG_DEBUG("boot timing validation failed due to invalid timing generator count\n"); |
1794 | return false; |
1795 | } |
1796 | |
1797 | if (tg_inst != link->link_enc->preferred_engine) { |
1798 | DC_LOG_DEBUG("boot timing validation failed due to non-preferred timing generator\n"); |
1799 | return false; |
1800 | } |
1801 | |
1802 | tg = dc->res_pool->timing_generators[tg_inst]; |
1803 | |
1804 | if (!tg->funcs->get_hw_timing) { |
1805 | DC_LOG_DEBUG("boot timing validation failed due to missing get_hw_timing callback\n"); |
1806 | return false; |
1807 | } |
1808 | |
1809 | if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) { |
1810 | DC_LOG_DEBUG("boot timing validation failed due to failed get_hw_timing return\n"); |
1811 | return false; |
1812 | } |
1813 | |
1814 | if (crtc_timing->h_total != hw_crtc_timing.h_total) { |
1815 | DC_LOG_DEBUG("boot timing validation failed due to h_total mismatch\n"); |
1816 | return false; |
1817 | } |
1818 | |
1819 | if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) { |
1820 | DC_LOG_DEBUG("boot timing validation failed due to h_border_left mismatch\n"); |
1821 | return false; |
1822 | } |
1823 | |
1824 | if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) { |
1825 | DC_LOG_DEBUG("boot timing validation failed due to h_addressable mismatch\n"); |
1826 | return false; |
1827 | } |
1828 | |
1829 | if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) { |
1830 | DC_LOG_DEBUG("boot timing validation failed due to h_border_right mismatch\n"); |
1831 | return false; |
1832 | } |
1833 | |
1834 | if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) { |
1835 | DC_LOG_DEBUG("boot timing validation failed due to h_front_porch mismatch\n"); |
1836 | return false; |
1837 | } |
1838 | |
1839 | if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) { |
1840 | DC_LOG_DEBUG("boot timing validation failed due to h_sync_width mismatch\n"); |
1841 | return false; |
1842 | } |
1843 | |
1844 | if (crtc_timing->v_total != hw_crtc_timing.v_total) { |
1845 | DC_LOG_DEBUG("boot timing validation failed due to v_total mismatch\n"); |
1846 | return false; |
1847 | } |
1848 | |
1849 | if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) { |
1850 | DC_LOG_DEBUG("boot timing validation failed due to v_border_top mismatch\n"); |
1851 | return false; |
1852 | } |
1853 | |
1854 | if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) { |
1855 | DC_LOG_DEBUG("boot timing validation failed due to v_addressable mismatch\n"); |
1856 | return false; |
1857 | } |
1858 | |
1859 | if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) { |
1860 | DC_LOG_DEBUG("boot timing validation failed due to v_border_bottom mismatch\n"); |
1861 | return false; |
1862 | } |
1863 | |
1864 | if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) { |
1865 | DC_LOG_DEBUG("boot timing validation failed due to v_front_porch mismatch\n"); |
1866 | return false; |
1867 | } |
1868 | |
1869 | if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) { |
1870 | DC_LOG_DEBUG("boot timing validation failed due to v_sync_width mismatch\n"); |
1871 | return false; |
1872 | } |
1873 | |
1874 | /* block DSC for now, as VBIOS does not currently support DSC timings */ |
1875 | if (crtc_timing->flags.DSC) { |
1876 | DC_LOG_DEBUG("boot timing validation failed due to DSC\n"); |
1877 | return false; |
1878 | } |
1879 | |
1880 | if (dc_is_dp_signal(signal: link->connector_signal)) { |
1881 | unsigned int pix_clk_100hz = 0; |
1882 | uint32_t numOdmPipes = 1; |
1883 | uint32_t id_src[4] = {0}; |
1884 | |
1885 | dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( |
1886 | dc->res_pool->dp_clock_source, |
1887 | tg_inst, &pix_clk_100hz); |
1888 | |
1889 | if (tg->funcs->get_optc_source) |
1890 | tg->funcs->get_optc_source(tg, |
1891 | &numOdmPipes, &id_src[0], &id_src[1]); |
1892 | |
1893 | if (numOdmPipes == 2) { |
1894 | pix_clk_100hz *= 2; |
1895 | } else if (numOdmPipes == 4) { |
1896 | pix_clk_100hz *= 4; |
1897 | } else if (se && se->funcs->get_pixels_per_cycle) { |
1898 | uint32_t pixels_per_cycle = se->funcs->get_pixels_per_cycle(se); |
1899 | |
1900 | if (pixels_per_cycle != 1 && !dc->debug.enable_dp_dig_pixel_rate_div_policy) { |
1901 | DC_LOG_DEBUG("boot timing validation failed due to pixels_per_cycle\n"); |
1902 | return false; |
1903 | } |
1904 | |
1905 | pix_clk_100hz *= pixels_per_cycle; |
1906 | } |
1907 | |
1908 | // Note: In rare cases, HW pixclk may differ from crtc's pixclk |
1909 | // slightly due to rounding issues in 10 kHz units. |
1910 | if (crtc_timing->pix_clk_100hz != pix_clk_100hz) { |
1911 | DC_LOG_DEBUG("boot timing validation failed due to pix_clk_100hz mismatch\n"); |
1912 | return false; |
1913 | } |
1914 | |
1915 | if (!se || !se->funcs->dp_get_pixel_format) { |
1916 | DC_LOG_DEBUG("boot timing validation failed due to missing dp_get_pixel_format\n"); |
1917 | return false; |
1918 | } |
1919 | |
1920 | if (!se->funcs->dp_get_pixel_format( |
1921 | se, |
1922 | &hw_crtc_timing.pixel_encoding, |
1923 | &hw_crtc_timing.display_color_depth)) { |
1924 | DC_LOG_DEBUG("boot timing validation failed due to dp_get_pixel_format failure\n"); |
1925 | return false; |
1926 | } |
1927 | |
1928 | if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) { |
1929 | DC_LOG_DEBUG("boot timing validation failed due to display_color_depth mismatch\n"); |
1930 | return false; |
1931 | } |
1932 | |
1933 | if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) { |
1934 | DC_LOG_DEBUG("boot timing validation failed due to pixel_encoding mismatch\n"); |
1935 | return false; |
1936 | } |
1937 | } |
1938 | |
1939 | |
1940 | if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) { |
1941 | DC_LOG_DEBUG("boot timing validation failed due to VSC SDP colorimetry\n"); |
1942 | return false; |
1943 | } |
1944 | |
1945 | if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { |
1946 | DC_LOG_DEBUG("boot timing validation failed due to DP 128b/132b\n"); |
1947 | return false; |
1948 | } |
1949 | |
1950 | if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) { |
1951 | DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n"); |
1952 | return false; |
1953 | } |
1954 | |
1955 | return true; |
1956 | } |
1957 | |
1958 | static inline bool should_update_pipe_for_stream( |
1959 | struct dc_state *context, |
1960 | struct pipe_ctx *pipe_ctx, |
1961 | struct dc_stream_state *stream) |
1962 | { |
1963 | return (pipe_ctx->stream && pipe_ctx->stream == stream); |
1964 | } |
1965 | |
1966 | static inline bool should_update_pipe_for_plane( |
1967 | struct dc_state *context, |
1968 | struct pipe_ctx *pipe_ctx, |
1969 | struct dc_plane_state *plane_state) |
1970 | { |
1971 | return (pipe_ctx->plane_state == plane_state); |
1972 | } |
1973 | |
1974 | void dc_enable_stereo( |
1975 | struct dc *dc, |
1976 | struct dc_state *context, |
1977 | struct dc_stream_state *streams[], |
1978 | uint8_t stream_count) |
1979 | { |
1980 | int i, j; |
1981 | struct pipe_ctx *pipe; |
1982 | |
1983 | dc_exit_ips_for_hw_access(dc); |
1984 | |
1985 | for (i = 0; i < MAX_PIPES; i++) { |
1986 | if (context != NULL) { |
1987 | pipe = &context->res_ctx.pipe_ctx[i]; |
1988 | } else { |
1989 | context = dc->current_state; |
1990 | pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
1991 | } |
1992 | |
1993 | for (j = 0; pipe && j < stream_count; j++) { |
1994 | if (should_update_pipe_for_stream(context, pipe_ctx: pipe, stream: streams[j]) && |
1995 | dc->hwss.setup_stereo) |
1996 | dc->hwss.setup_stereo(pipe, dc); |
1997 | } |
1998 | } |
1999 | } |
2000 | |
2001 | void dc_trigger_sync(struct dc *dc, struct dc_state *context) |
2002 | { |
2003 | if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { |
2004 | dc_exit_ips_for_hw_access(dc); |
2005 | |
2006 | enable_timing_multisync(dc, ctx: context); |
2007 | program_timing_sync(dc, ctx: context); |
2008 | } |
2009 | } |
2010 | |
2011 | static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context) |
2012 | { |
2013 | int i; |
2014 | unsigned int stream_mask = 0; |
2015 | |
2016 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
2017 | if (context->res_ctx.pipe_ctx[i].stream) |
2018 | stream_mask |= 1 << i; |
2019 | } |
2020 | |
2021 | return stream_mask; |
2022 | } |
2023 | |
2024 | void dc_z10_restore(const struct dc *dc) |
2025 | { |
2026 | if (dc->hwss.z10_restore) |
2027 | dc->hwss.z10_restore(dc); |
2028 | } |
2029 | |
2030 | void dc_z10_save_init(struct dc *dc) |
2031 | { |
2032 | if (dc->hwss.z10_save_init) |
2033 | dc->hwss.z10_save_init(dc); |
2034 | } |
2035 | |
2036 | /* Set a pipe unlock order based on the change in DET allocation and stores it in dc scratch memory |
2037 | * Prevents over allocation of DET during unlock process |
2038 | * e.g. 2 pipe config with different streams with a max of 20 DET segments |
2039 | * Before: After: |
2040 | * - Pipe0: 10 DET segments - Pipe0: 12 DET segments |
2041 | * - Pipe1: 10 DET segments - Pipe1: 8 DET segments |
2042 | * If Pipe0 gets updated first, 22 DET segments will be allocated |
2043 | */ |
2044 | static void determine_pipe_unlock_order(struct dc *dc, struct dc_state *context) |
2045 | { |
2046 | unsigned int i = 0; |
2047 | struct pipe_ctx *pipe = NULL; |
2048 | struct timing_generator *tg = NULL; |
2049 | |
2050 | if (!dc->config.set_pipe_unlock_order) |
2051 | return; |
2052 | |
2053 | memset(dc->scratch.pipes_to_unlock_first, 0, sizeof(dc->scratch.pipes_to_unlock_first)); |
2054 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
2055 | pipe = &context->res_ctx.pipe_ctx[i]; |
2056 | tg = pipe->stream_res.tg; |
2057 | |
2058 | if (!resource_is_pipe_type(pipe_ctx: pipe, type: OTG_MASTER) || |
2059 | !tg->funcs->is_tg_enabled(tg) || |
2060 | dc_state_get_pipe_subvp_type(state: context, pipe_ctx: pipe) == SUBVP_PHANTOM) { |
2061 | continue; |
2062 | } |
2063 | |
2064 | if (resource_calculate_det_for_stream(state: context, otg_master: pipe) < |
2065 | resource_calculate_det_for_stream(state: dc->current_state, otg_master: &dc->current_state->res_ctx.pipe_ctx[i])) { |
2066 | dc->scratch.pipes_to_unlock_first[i] = true; |
2067 | } |
2068 | } |
2069 | } |
2070 | |
2071 | /** |
2072 | * dc_commit_state_no_check - Apply context to the hardware |
2073 | * |
2074 | * @dc: DC object with the current status to be updated |
2075 | * @context: New state that will become the current status at the end of this function |
2076 | * |
2077 | * Applies given context to the hardware and copy it into current context. |
2078 | * It's up to the user to release the src context afterwards. |
2079 | * |
2080 | * Return: an enum dc_status result code for the operation |
2081 | */ |
2082 | static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context) |
2083 | { |
2084 | struct dc_bios *dcb = dc->ctx->dc_bios; |
2085 | enum dc_status result = DC_ERROR_UNEXPECTED; |
2086 | struct pipe_ctx *pipe; |
2087 | int i, k, l; |
2088 | struct dc_stream_state *dc_streams[MAX_STREAMS] = {0}; |
2089 | struct dc_state *old_state; |
2090 | bool subvp_prev_use = false; |
2091 | |
2092 | dc_z10_restore(dc); |
2093 | dc_allow_idle_optimizations(dc, false); |
2094 | |
2095 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
2096 | struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
2097 | |
2098 | /* Check old context for SubVP */ |
2099 | subvp_prev_use |= (dc_state_get_pipe_subvp_type(state: dc->current_state, pipe_ctx: old_pipe) == SUBVP_PHANTOM); |
2100 | if (subvp_prev_use) |
2101 | break; |
2102 | } |
2103 | |
2104 | for (i = 0; i < context->stream_count; i++) |
2105 | dc_streams[i] = context->streams[i]; |
2106 | |
2107 | if (!dcb->funcs->is_accelerated_mode(dcb)) { |
2108 | disable_vbios_mode_if_required(dc, context); |
2109 | dc->hwss.enable_accelerated_mode(dc, context); |
2110 | } |
2111 | |
2112 | if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) { |
2113 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
2114 | pipe = &context->res_ctx.pipe_ctx[i]; |
2115 | //Only delay otg master for a given config |
2116 | if (resource_is_pipe_type(pipe_ctx: pipe, type: OTG_MASTER)) { |
2117 | //dc_commit_state_no_check is always a full update |
2118 | dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, pipe, false); |
2119 | break; |
2120 | } |
2121 | } |
2122 | } |
2123 | |
2124 | if (context->stream_count > get_seamless_boot_stream_count(ctx: context) || |
2125 | context->stream_count == 0) |
2126 | dc->hwss.prepare_bandwidth(dc, context); |
2127 | |
2128 | /* When SubVP is active, all HW programming must be done while |
2129 | * SubVP lock is acquired |
2130 | */ |
2131 | if (dc->hwss.subvp_pipe_control_lock) |
2132 | dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); |
2133 | if (dc->hwss.fams2_global_control_lock) |
2134 | dc->hwss.fams2_global_control_lock(dc, context, true); |
2135 | |
2136 | if (dc->hwss.update_dsc_pg) |
2137 | dc->hwss.update_dsc_pg(dc, context, false); |
2138 | |
2139 | disable_dangling_plane(dc, context); |
2140 | /* re-program planes for existing stream, in case we need to |
2141 | * free up plane resource for later use |
2142 | */ |
2143 | if (dc->hwss.apply_ctx_for_surface) { |
2144 | for (i = 0; i < context->stream_count; i++) { |
2145 | if (context->streams[i]->mode_changed) |
2146 | continue; |
2147 | apply_ctx_interdependent_lock(dc, context, stream: context->streams[i], lock: true); |
2148 | dc->hwss.apply_ctx_for_surface( |
2149 | dc, context->streams[i], |
2150 | context->stream_status[i].plane_count, |
2151 | context); /* use new pipe config in new context */ |
2152 | apply_ctx_interdependent_lock(dc, context, stream: context->streams[i], lock: false); |
2153 | dc->hwss.post_unlock_program_front_end(dc, context); |
2154 | } |
2155 | } |
2156 | |
2157 | /* Program hardware */ |
2158 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
2159 | pipe = &context->res_ctx.pipe_ctx[i]; |
2160 | dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); |
2161 | } |
2162 | |
2163 | result = dc->hwss.apply_ctx_to_hw(dc, context); |
2164 | |
2165 | if (result != DC_OK) { |
2166 | /* Application of dc_state to hardware stopped. */ |
2167 | dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; |
2168 | return result; |
2169 | } |
2170 | |
2171 | dc_trigger_sync(dc, context); |
2172 | |
2173 | /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */ |
2174 | for (i = 0; i < context->stream_count; i++) { |
2175 | uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed; |
2176 | |
2177 | context->streams[i]->update_flags.raw = 0xFFFFFFFF; |
2178 | context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed; |
2179 | } |
2180 | |
2181 | determine_pipe_unlock_order(dc, context); |
2182 | /* Program all planes within new context*/ |
2183 | if (dc->res_pool->funcs->prepare_mcache_programming) |
2184 | dc->res_pool->funcs->prepare_mcache_programming(dc, context); |
2185 | if (dc->hwss.program_front_end_for_ctx) { |
2186 | dc->hwss.interdependent_update_lock(dc, context, true); |
2187 | dc->hwss.program_front_end_for_ctx(dc, context); |
2188 | |
2189 | if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe) { |
2190 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
2191 | pipe = &context->res_ctx.pipe_ctx[i]; |
2192 | dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe); |
2193 | } |
2194 | } |
2195 | |
2196 | dc->hwss.interdependent_update_lock(dc, context, false); |
2197 | dc->hwss.post_unlock_program_front_end(dc, context); |
2198 | } |
2199 | |
2200 | if (dc->hwss.commit_subvp_config) |
2201 | dc->hwss.commit_subvp_config(dc, context); |
2202 | if (dc->hwss.subvp_pipe_control_lock) |
2203 | dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use); |
2204 | if (dc->hwss.fams2_global_control_lock) |
2205 | dc->hwss.fams2_global_control_lock(dc, context, false); |
2206 | |
2207 | for (i = 0; i < context->stream_count; i++) { |
2208 | const struct dc_link *link = context->streams[i]->link; |
2209 | |
2210 | if (!context->streams[i]->mode_changed) |
2211 | continue; |
2212 | |
2213 | if (dc->hwss.apply_ctx_for_surface) { |
2214 | apply_ctx_interdependent_lock(dc, context, stream: context->streams[i], lock: true); |
2215 | dc->hwss.apply_ctx_for_surface( |
2216 | dc, context->streams[i], |
2217 | context->stream_status[i].plane_count, |
2218 | context); |
2219 | apply_ctx_interdependent_lock(dc, context, stream: context->streams[i], lock: false); |
2220 | dc->hwss.post_unlock_program_front_end(dc, context); |
2221 | } |
2222 | |
2223 | /* |
2224 | * enable stereo |
2225 | * TODO rework dc_enable_stereo call to work with validation sets? |
2226 | */ |
2227 | for (k = 0; k < MAX_PIPES; k++) { |
2228 | pipe = &context->res_ctx.pipe_ctx[k]; |
2229 | |
2230 | for (l = 0 ; pipe && l < context->stream_count; l++) { |
2231 | if (context->streams[l] && |
2232 | context->streams[l] == pipe->stream && |
2233 | dc->hwss.setup_stereo) |
2234 | dc->hwss.setup_stereo(pipe, dc); |
2235 | } |
2236 | } |
2237 | |
2238 | CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}", |
2239 | context->streams[i]->timing.h_addressable, |
2240 | context->streams[i]->timing.v_addressable, |
2241 | context->streams[i]->timing.h_total, |
2242 | context->streams[i]->timing.v_total, |
2243 | context->streams[i]->timing.pix_clk_100hz / 10); |
2244 | } |
2245 | |
2246 | dc_enable_stereo(dc, context, streams: dc_streams, stream_count: context->stream_count); |
2247 | |
2248 | if (get_seamless_boot_stream_count(ctx: context) == 0 || |
2249 | context->stream_count == 0) { |
2250 | /* Must wait for no flips to be pending before doing optimize bw */ |
2251 | hwss_wait_for_no_pipes_pending(dc, context); |
2252 | /* |
2253 | * optimized dispclk depends on ODM setup. Need to wait for ODM |
2254 | * update pending complete before optimizing bandwidth. |
2255 | */ |
2256 | hwss_wait_for_odm_update_pending_complete(dc, context); |
2257 | /* pplib is notified if disp_num changed */ |
2258 | dc->hwss.optimize_bandwidth(dc, context); |
2259 | /* Need to do otg sync again as otg could be out of sync due to otg |
2260 | * workaround applied during clock update |
2261 | */ |
2262 | dc_trigger_sync(dc, context); |
2263 | } |
2264 | |
2265 | if (dc->hwss.update_dsc_pg) |
2266 | dc->hwss.update_dsc_pg(dc, context, true); |
2267 | |
2268 | if (dc->ctx->dce_version >= DCE_VERSION_MAX) |
2269 | TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); |
2270 | else |
2271 | TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); |
2272 | |
2273 | context->stream_mask = get_stream_mask(dc, context); |
2274 | |
2275 | if (context->stream_mask != dc->current_state->stream_mask) |
2276 | dc_dmub_srv_notify_stream_mask(dc_dmub_srv: dc->ctx->dmub_srv, stream_mask: context->stream_mask); |
2277 | |
2278 | for (i = 0; i < context->stream_count; i++) |
2279 | context->streams[i]->mode_changed = false; |
2280 | |
2281 | /* Clear update flags that were set earlier to avoid redundant programming */ |
2282 | for (i = 0; i < context->stream_count; i++) { |
2283 | context->streams[i]->update_flags.raw = 0x0; |
2284 | } |
2285 | |
2286 | old_state = dc->current_state; |
2287 | dc->current_state = context; |
2288 | |
2289 | dc_state_release(state: old_state); |
2290 | |
2291 | dc_state_retain(state: dc->current_state); |
2292 | |
2293 | return result; |
2294 | } |
2295 | |
2296 | static bool commit_minimal_transition_state(struct dc *dc, |
2297 | struct dc_state *transition_base_context); |
2298 | |
2299 | /** |
2300 | * dc_commit_streams - Commit current stream state |
2301 | * |
2302 | * @dc: DC object with the commit state to be configured in the hardware |
2303 | * @params: Parameters for the commit, including the streams to be committed |
2304 | * |
2305 | * Function responsible for commit streams change to the hardware. |
2306 | * |
2307 | * Return: |
2308 | * Return DC_OK if everything work as expected, otherwise, return a dc_status |
2309 | * code. |
2310 | */ |
2311 | enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params) |
2312 | { |
2313 | int i, j; |
2314 | struct dc_state *context; |
2315 | enum dc_status res = DC_OK; |
2316 | struct dc_validation_set set[MAX_STREAMS] = {0}; |
2317 | struct pipe_ctx *pipe; |
2318 | bool handle_exit_odm2to1 = false; |
2319 | |
2320 | if (!params) |
2321 | return DC_ERROR_UNEXPECTED; |
2322 | |
2323 | if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW) |
2324 | return res; |
2325 | |
2326 | if (!streams_changed(dc, streams: params->streams, stream_count: params->stream_count) && |
2327 | dc->current_state->power_source == params->power_source) |
2328 | return res; |
2329 | |
2330 | dc_exit_ips_for_hw_access(dc); |
2331 | |
2332 | DC_LOG_DC("%s: %d streams\n", __func__, params->stream_count); |
2333 | |
2334 | for (i = 0; i < params->stream_count; i++) { |
2335 | struct dc_stream_state *stream = params->streams[i]; |
2336 | struct dc_stream_status *status = dc_stream_get_status(dc_stream: stream); |
2337 | struct dc_sink *sink = stream->sink; |
2338 | |
2339 | /* revalidate streams */ |
2340 | if (!dc_is_virtual_signal(signal: sink->sink_signal)) { |
2341 | res = dc_validate_stream(dc, stream); |
2342 | if (res != DC_OK) |
2343 | return res; |
2344 | } |
2345 | |
2346 | |
2347 | dc_stream_log(dc, stream); |
2348 | |
2349 | set[i].stream = stream; |
2350 | |
2351 | if (status) { |
2352 | set[i].plane_count = status->plane_count; |
2353 | for (j = 0; j < status->plane_count; j++) |
2354 | set[i].plane_states[j] = status->plane_states[j]; |
2355 | } |
2356 | } |
2357 | |
2358 | /* ODM Combine 2:1 power optimization is only applied for single stream |
2359 | * scenario, it uses extra pipes than needed to reduce power consumption |
2360 | * We need to switch off this feature to make room for new streams. |
2361 | */ |
2362 | if (params->stream_count > dc->current_state->stream_count && |
2363 | dc->current_state->stream_count == 1) { |
2364 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
2365 | pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
2366 | if (pipe->next_odm_pipe) |
2367 | handle_exit_odm2to1 = true; |
2368 | } |
2369 | } |
2370 | |
2371 | if (handle_exit_odm2to1) |
2372 | res = commit_minimal_transition_state(dc, transition_base_context: dc->current_state); |
2373 | |
2374 | context = dc_state_create_current_copy(dc); |
2375 | if (!context) |
2376 | goto context_alloc_fail; |
2377 | |
2378 | context->power_source = params->power_source; |
2379 | |
2380 | res = dc_validate_with_context(dc, set, set_count: params->stream_count, context, fast_validate: false); |
2381 | |
2382 | /* |
2383 | * Only update link encoder to stream assignment after bandwidth validation passed. |
2384 | */ |
2385 | if (res == DC_OK && dc->res_pool->funcs->link_encs_assign && !dc->config.unify_link_enc_assignment) |
2386 | dc->res_pool->funcs->link_encs_assign( |
2387 | dc, context, context->streams, context->stream_count); |
2388 | |
2389 | if (res != DC_OK) { |
2390 | BREAK_TO_DEBUGGER(); |
2391 | goto fail; |
2392 | } |
2393 | |
2394 | res = dc_commit_state_no_check(dc, context); |
2395 | |
2396 | for (i = 0; i < params->stream_count; i++) { |
2397 | for (j = 0; j < context->stream_count; j++) { |
2398 | if (params->streams[i]->stream_id == context->streams[j]->stream_id) |
2399 | params->streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst; |
2400 | |
2401 | if (dc_is_embedded_signal(signal: params->streams[i]->signal)) { |
2402 | struct dc_stream_status *status = dc_state_get_stream_status(state: context, stream: params->streams[i]); |
2403 | |
2404 | if (!status) |
2405 | continue; |
2406 | |
2407 | if (dc->hwss.is_abm_supported) |
2408 | status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, params->streams[i]); |
2409 | else |
2410 | status->is_abm_supported = true; |
2411 | } |
2412 | } |
2413 | } |
2414 | |
2415 | fail: |
2416 | dc_state_release(state: context); |
2417 | |
2418 | context_alloc_fail: |
2419 | |
2420 | DC_LOG_DC("%s Finished.\n", __func__); |
2421 | |
2422 | return res; |
2423 | } |
2424 | |
2425 | bool dc_acquire_release_mpc_3dlut( |
2426 | struct dc *dc, bool acquire, |
2427 | struct dc_stream_state *stream, |
2428 | struct dc_3dlut **lut, |
2429 | struct dc_transfer_func **shaper) |
2430 | { |
2431 | int pipe_idx; |
2432 | bool ret = false; |
2433 | bool found_pipe_idx = false; |
2434 | const struct resource_pool *pool = dc->res_pool; |
2435 | struct resource_context *res_ctx = &dc->current_state->res_ctx; |
2436 | int mpcc_id = 0; |
2437 | |
2438 | if (pool && res_ctx) { |
2439 | if (acquire) { |
2440 | /*find pipe idx for the given stream*/ |
2441 | for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) { |
2442 | if (res_ctx->pipe_ctx[pipe_idx].stream == stream) { |
2443 | found_pipe_idx = true; |
2444 | mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst; |
2445 | break; |
2446 | } |
2447 | } |
2448 | } else |
2449 | found_pipe_idx = true;/*for release pipe_idx is not required*/ |
2450 | |
2451 | if (found_pipe_idx) { |
2452 | if (acquire && pool->funcs->acquire_post_bldn_3dlut) |
2453 | ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper); |
2454 | else if (!acquire && pool->funcs->release_post_bldn_3dlut) |
2455 | ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper); |
2456 | } |
2457 | } |
2458 | return ret; |
2459 | } |
2460 | |
2461 | static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) |
2462 | { |
2463 | int i; |
2464 | struct pipe_ctx *pipe; |
2465 | |
2466 | for (i = 0; i < MAX_PIPES; i++) { |
2467 | pipe = &context->res_ctx.pipe_ctx[i]; |
2468 | |
2469 | // Don't check flip pending on phantom pipes |
2470 | if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(state: context, pipe_ctx: pipe) == SUBVP_PHANTOM)) |
2471 | continue; |
2472 | |
2473 | /* Must set to false to start with, due to OR in update function */ |
2474 | pipe->plane_state->status.is_flip_pending = false; |
2475 | dc->hwss.update_pending_status(pipe); |
2476 | if (pipe->plane_state->status.is_flip_pending) |
2477 | return true; |
2478 | } |
2479 | return false; |
2480 | } |
2481 | |
2482 | /* Perform updates here which need to be deferred until next vupdate |
2483 | * |
2484 | * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered |
2485 | * but forcing lut memory to shutdown state is immediate. This causes |
2486 | * single frame corruption as lut gets disabled mid-frame unless shutdown |
2487 | * is deferred until after entering bypass. |
2488 | */ |
2489 | static void process_deferred_updates(struct dc *dc) |
2490 | { |
2491 | int i = 0; |
2492 | |
2493 | if (dc->debug.enable_mem_low_power.bits.cm) { |
2494 | ASSERT(dc->dcn_ip->max_num_dpp); |
2495 | for (i = 0; i < dc->dcn_ip->max_num_dpp; i++) |
2496 | if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update) |
2497 | dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]); |
2498 | } |
2499 | } |
2500 | |
2501 | void dc_post_update_surfaces_to_stream(struct dc *dc) |
2502 | { |
2503 | int i; |
2504 | struct dc_state *context = dc->current_state; |
2505 | |
2506 | if ((!dc->optimized_required) || get_seamless_boot_stream_count(ctx: context) > 0) |
2507 | return; |
2508 | |
2509 | post_surface_trace(dc); |
2510 | |
2511 | /* |
2512 | * Only relevant for DCN behavior where we can guarantee the optimization |
2513 | * is safe to apply - retain the legacy behavior for DCE. |
2514 | */ |
2515 | |
2516 | if (dc->ctx->dce_version < DCE_VERSION_MAX) |
2517 | TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); |
2518 | else { |
2519 | TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); |
2520 | |
2521 | if (is_flip_pending_in_pipes(dc, context)) |
2522 | return; |
2523 | |
2524 | for (i = 0; i < dc->res_pool->pipe_count; i++) |
2525 | if (context->res_ctx.pipe_ctx[i].stream == NULL || |
2526 | context->res_ctx.pipe_ctx[i].plane_state == NULL) { |
2527 | context->res_ctx.pipe_ctx[i].pipe_idx = i; |
2528 | dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]); |
2529 | } |
2530 | |
2531 | process_deferred_updates(dc); |
2532 | |
2533 | dc->hwss.optimize_bandwidth(dc, context); |
2534 | |
2535 | if (dc->hwss.update_dsc_pg) |
2536 | dc->hwss.update_dsc_pg(dc, context, true); |
2537 | } |
2538 | |
2539 | dc->optimized_required = false; |
2540 | dc->wm_optimized_required = false; |
2541 | } |
2542 | |
2543 | bool dc_set_generic_gpio_for_stereo(bool enable, |
2544 | struct gpio_service *gpio_service) |
2545 | { |
2546 | enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR; |
2547 | struct gpio_pin_info pin_info; |
2548 | struct gpio *generic; |
2549 | struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config), |
2550 | GFP_KERNEL); |
2551 | |
2552 | if (!config) |
2553 | return false; |
2554 | pin_info = dal_gpio_get_generic_pin_info(service: gpio_service, id: GPIO_ID_GENERIC, en: 0); |
2555 | |
2556 | if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) { |
2557 | kfree(objp: config); |
2558 | return false; |
2559 | } else { |
2560 | generic = dal_gpio_service_create_generic_mux( |
2561 | service: gpio_service, |
2562 | offset: pin_info.offset, |
2563 | mask: pin_info.mask); |
2564 | } |
2565 | |
2566 | if (!generic) { |
2567 | kfree(objp: config); |
2568 | return false; |
2569 | } |
2570 | |
2571 | gpio_result = dal_gpio_open(gpio: generic, mode: GPIO_MODE_OUTPUT); |
2572 | |
2573 | config->enable_output_from_mux = enable; |
2574 | config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC; |
2575 | |
2576 | if (gpio_result == GPIO_RESULT_OK) |
2577 | gpio_result = dal_mux_setup_config(mux: generic, config); |
2578 | |
2579 | if (gpio_result == GPIO_RESULT_OK) { |
2580 | dal_gpio_close(gpio: generic); |
2581 | dal_gpio_destroy_generic_mux(mux: &generic); |
2582 | kfree(objp: config); |
2583 | return true; |
2584 | } else { |
2585 | dal_gpio_close(gpio: generic); |
2586 | dal_gpio_destroy_generic_mux(mux: &generic); |
2587 | kfree(objp: config); |
2588 | return false; |
2589 | } |
2590 | } |
2591 | |
2592 | static bool is_surface_in_context( |
2593 | const struct dc_state *context, |
2594 | const struct dc_plane_state *plane_state) |
2595 | { |
2596 | int j; |
2597 | |
2598 | for (j = 0; j < MAX_PIPES; j++) { |
2599 | const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; |
2600 | |
2601 | if (plane_state == pipe_ctx->plane_state) { |
2602 | return true; |
2603 | } |
2604 | } |
2605 | |
2606 | return false; |
2607 | } |
2608 | |
2609 | static enum surface_update_type get_plane_info_update_type(const struct dc *dc, const struct dc_surface_update *u) |
2610 | { |
2611 | union surface_update_flags *update_flags = &u->surface->update_flags; |
2612 | enum surface_update_type update_type = UPDATE_TYPE_FAST; |
2613 | |
2614 | if (!u->plane_info) |
2615 | return UPDATE_TYPE_FAST; |
2616 | |
2617 | if (u->plane_info->color_space != u->surface->color_space) { |
2618 | update_flags->bits.color_space_change = 1; |
2619 | elevate_update_type(original: &update_type, new: UPDATE_TYPE_MED); |
2620 | } |
2621 | |
2622 | if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) { |
2623 | update_flags->bits.horizontal_mirror_change = 1; |
2624 | elevate_update_type(original: &update_type, new: UPDATE_TYPE_MED); |
2625 | } |
2626 | |
2627 | if (u->plane_info->rotation != u->surface->rotation) { |
2628 | update_flags->bits.rotation_change = 1; |
2629 | elevate_update_type(original: &update_type, new: UPDATE_TYPE_FULL); |
2630 | } |
2631 | |
2632 | if (u->plane_info->format != u->surface->format) { |
2633 | update_flags->bits.pixel_format_change = 1; |
2634 | elevate_update_type(original: &update_type, new: UPDATE_TYPE_FULL); |
2635 | } |
2636 | |
2637 | if (u->plane_info->stereo_format != u->surface->stereo_format) { |
2638 | update_flags->bits.stereo_format_change = 1; |
2639 | elevate_update_type(original: &update_type, new: UPDATE_TYPE_FULL); |
2640 | } |
2641 | |
2642 | if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) { |
2643 | update_flags->bits.per_pixel_alpha_change = 1; |
2644 | elevate_update_type(original: &update_type, new: UPDATE_TYPE_MED); |
2645 | } |
2646 | |
2647 | if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) { |
2648 | update_flags->bits.global_alpha_change = 1; |
2649 | elevate_update_type(original: &update_type, new: UPDATE_TYPE_MED); |
2650 | } |
2651 | |
2652 | if (u->plane_info->dcc.enable != u->surface->dcc.enable |
2653 | || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk |
2654 | || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { |
2655 | /* During DCC on/off, stutter period is calculated before |
2656 | * DCC has fully transitioned. This results in incorrect |
2657 | * stutter period calculation. Triggering a full update will |
2658 | * recalculate stutter period. |
2659 | */ |
2660 | update_flags->bits.dcc_change = 1; |
2661 | elevate_update_type(original: &update_type, new: UPDATE_TYPE_FULL); |
2662 | } |
2663 | |
2664 | if (resource_pixel_format_to_bpp(format: u->plane_info->format) != |
2665 | resource_pixel_format_to_bpp(format: u->surface->format)) { |
2666 | /* different bytes per element will require full bandwidth |
2667 | * and DML calculation |
2668 | */ |
2669 | update_flags->bits.bpp_change = 1; |
2670 | elevate_update_type(original: &update_type, new: UPDATE_TYPE_FULL); |
2671 | } |
2672 | |
2673 | if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch |
2674 | || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { |
2675 | update_flags->bits.plane_size_change = 1; |
2676 | elevate_update_type(original: &update_type, new: UPDATE_TYPE_MED); |
2677 | } |
2678 | |
2679 | |
2680 | if (memcmp(p: &u->plane_info->tiling_info, q: &u->surface->tiling_info, |
2681 | size: sizeof(struct dc_tiling_info)) != 0) { |
2682 | update_flags->bits.swizzle_change = 1; |
2683 | elevate_update_type(original: &update_type, new: UPDATE_TYPE_MED); |
2684 | |
2685 | /* todo: below are HW dependent, we should add a hook to |
2686 | * DCE/N resource and validated there. |
2687 | */ |
2688 | if (!dc->debug.skip_full_updated_if_possible) { |
2689 | /* swizzled mode requires RQ to be setup properly, |
2690 | * thus need to run DML to calculate RQ settings |
2691 | */ |
2692 | update_flags->bits.bandwidth_change = 1; |
2693 | elevate_update_type(original: &update_type, new: UPDATE_TYPE_FULL); |
2694 | } |
2695 | } |
2696 | |
2697 | /* This should be UPDATE_TYPE_FAST if nothing has changed. */ |
2698 | return update_type; |
2699 | } |
2700 | |
2701 | static enum surface_update_type get_scaling_info_update_type( |
2702 | const struct dc *dc, |
2703 | const struct dc_surface_update *u) |
2704 | { |
2705 | union surface_update_flags *update_flags = &u->surface->update_flags; |
2706 | |
2707 | if (!u->scaling_info) |
2708 | return UPDATE_TYPE_FAST; |
2709 | |
2710 | if (u->scaling_info->src_rect.width != u->surface->src_rect.width |
2711 | || u->scaling_info->src_rect.height != u->surface->src_rect.height |
2712 | || u->scaling_info->dst_rect.width != u->surface->dst_rect.width |
2713 | || u->scaling_info->dst_rect.height != u->surface->dst_rect.height |
2714 | || u->scaling_info->clip_rect.width != u->surface->clip_rect.width |
2715 | || u->scaling_info->clip_rect.height != u->surface->clip_rect.height |
2716 | || u->scaling_info->scaling_quality.integer_scaling != |
2717 | u->surface->scaling_quality.integer_scaling) { |
2718 | update_flags->bits.scaling_change = 1; |
2719 | |
2720 | if (u->scaling_info->src_rect.width > u->surface->src_rect.width |
2721 | || u->scaling_info->src_rect.height > u->surface->src_rect.height) |
2722 | /* Making src rect bigger requires a bandwidth change */ |
2723 | update_flags->bits.clock_change = 1; |
2724 | |
2725 | if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width |
2726 | || u->scaling_info->dst_rect.height < u->surface->dst_rect.height) |
2727 | && (u->scaling_info->dst_rect.width < u->surface->src_rect.width |
2728 | || u->scaling_info->dst_rect.height < u->surface->src_rect.height)) |
2729 | /* Making dst rect smaller requires a bandwidth change */ |
2730 | update_flags->bits.bandwidth_change = 1; |
2731 | |
2732 | if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width && |
2733 | (u->scaling_info->clip_rect.width > u->surface->clip_rect.width || |
2734 | u->scaling_info->clip_rect.height > u->surface->clip_rect.height)) |
2735 | /* Changing clip size of a large surface may result in MPC slice count change */ |
2736 | update_flags->bits.bandwidth_change = 1; |
2737 | } |
2738 | |
2739 | if (u->scaling_info->src_rect.x != u->surface->src_rect.x |
2740 | || u->scaling_info->src_rect.y != u->surface->src_rect.y |
2741 | || u->scaling_info->clip_rect.x != u->surface->clip_rect.x |
2742 | || u->scaling_info->clip_rect.y != u->surface->clip_rect.y |
2743 | || u->scaling_info->dst_rect.x != u->surface->dst_rect.x |
2744 | || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) |
2745 | update_flags->bits.position_change = 1; |
2746 | |
2747 | /* process every update flag before returning */ |
2748 | if (update_flags->bits.clock_change |
2749 | || update_flags->bits.bandwidth_change |
2750 | || update_flags->bits.scaling_change) |
2751 | return UPDATE_TYPE_FULL; |
2752 | |
2753 | if (update_flags->bits.position_change) |
2754 | return UPDATE_TYPE_MED; |
2755 | |
2756 | return UPDATE_TYPE_FAST; |
2757 | } |
2758 | |
2759 | static enum surface_update_type det_surface_update(const struct dc *dc, |
2760 | const struct dc_surface_update *u) |
2761 | { |
2762 | const struct dc_state *context = dc->current_state; |
2763 | enum surface_update_type type; |
2764 | enum surface_update_type overall_type = UPDATE_TYPE_FAST; |
2765 | union surface_update_flags *update_flags = &u->surface->update_flags; |
2766 | |
2767 | if (!is_surface_in_context(context, plane_state: u->surface) || u->surface->force_full_update) { |
2768 | update_flags->raw = 0xFFFFFFFF; |
2769 | return UPDATE_TYPE_FULL; |
2770 | } |
2771 | |
2772 | update_flags->raw = 0; // Reset all flags |
2773 | |
2774 | type = get_plane_info_update_type(dc, u); |
2775 | elevate_update_type(original: &overall_type, new: type); |
2776 | |
2777 | type = get_scaling_info_update_type(dc, u); |
2778 | elevate_update_type(original: &overall_type, new: type); |
2779 | |
2780 | if (u->flip_addr) { |
2781 | update_flags->bits.addr_update = 1; |
2782 | if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) { |
2783 | update_flags->bits.tmz_changed = 1; |
2784 | elevate_update_type(original: &overall_type, new: UPDATE_TYPE_FULL); |
2785 | } |
2786 | } |
2787 | if (u->in_transfer_func) |
2788 | update_flags->bits.in_transfer_func_change = 1; |
2789 | |
2790 | if (u->input_csc_color_matrix) |
2791 | update_flags->bits.input_csc_change = 1; |
2792 | |
2793 | if (u->coeff_reduction_factor) |
2794 | update_flags->bits.coeff_reduction_change = 1; |
2795 | |
2796 | if (u->gamut_remap_matrix) |
2797 | update_flags->bits.gamut_remap_change = 1; |
2798 | |
2799 | if (u->blend_tf) |
2800 | update_flags->bits.gamma_change = 1; |
2801 | |
2802 | if (u->gamma) { |
2803 | enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; |
2804 | |
2805 | if (u->plane_info) |
2806 | format = u->plane_info->format; |
2807 | else |
2808 | format = u->surface->format; |
2809 | |
2810 | if (dce_use_lut(format)) |
2811 | update_flags->bits.gamma_change = 1; |
2812 | } |
2813 | |
2814 | if (u->lut3d_func || u->func_shaper) |
2815 | update_flags->bits.lut_3d = 1; |
2816 | |
2817 | if (u->hdr_mult.value) |
2818 | if (u->hdr_mult.value != u->surface->hdr_mult.value) { |
2819 | update_flags->bits.hdr_mult = 1; |
2820 | elevate_update_type(original: &overall_type, new: UPDATE_TYPE_MED); |
2821 | } |
2822 | |
2823 | if (u->sdr_white_level_nits) |
2824 | if (u->sdr_white_level_nits != u->surface->sdr_white_level_nits) { |
2825 | update_flags->bits.sdr_white_level_nits = 1; |
2826 | elevate_update_type(original: &overall_type, new: UPDATE_TYPE_FULL); |
2827 | } |
2828 | |
2829 | if (u->cm2_params) { |
2830 | if ((u->cm2_params->component_settings.shaper_3dlut_setting |
2831 | != u->surface->mcm_shaper_3dlut_setting) |
2832 | || (u->cm2_params->component_settings.lut1d_enable |
2833 | != u->surface->mcm_lut1d_enable)) |
2834 | update_flags->bits.mcm_transfer_function_enable_change = 1; |
2835 | if (u->cm2_params->cm2_luts.lut3d_data.lut3d_src |
2836 | != u->surface->mcm_luts.lut3d_data.lut3d_src) |
2837 | update_flags->bits.mcm_transfer_function_enable_change = 1; |
2838 | } |
2839 | if (update_flags->bits.in_transfer_func_change) { |
2840 | type = UPDATE_TYPE_MED; |
2841 | elevate_update_type(original: &overall_type, new: type); |
2842 | } |
2843 | |
2844 | if (update_flags->bits.lut_3d && |
2845 | u->surface->mcm_luts.lut3d_data.lut3d_src != DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) { |
2846 | type = UPDATE_TYPE_FULL; |
2847 | elevate_update_type(original: &overall_type, new: type); |
2848 | } |
2849 | if (update_flags->bits.mcm_transfer_function_enable_change) { |
2850 | type = UPDATE_TYPE_FULL; |
2851 | elevate_update_type(original: &overall_type, new: type); |
2852 | } |
2853 | |
2854 | if (dc->debug.enable_legacy_fast_update && |
2855 | (update_flags->bits.gamma_change || |
2856 | update_flags->bits.gamut_remap_change || |
2857 | update_flags->bits.input_csc_change || |
2858 | update_flags->bits.coeff_reduction_change)) { |
2859 | type = UPDATE_TYPE_FULL; |
2860 | elevate_update_type(original: &overall_type, new: type); |
2861 | } |
2862 | return overall_type; |
2863 | } |
2864 | |
2865 | /* May need to flip the desktop plane in cases where MPO plane receives a flip but desktop plane doesn't |
2866 | * while both planes are flip_immediate |
2867 | */ |
2868 | static void force_immediate_gsl_plane_flip(struct dc *dc, struct dc_surface_update *updates, int surface_count) |
2869 | { |
2870 | bool has_flip_immediate_plane = false; |
2871 | int i; |
2872 | |
2873 | for (i = 0; i < surface_count; i++) { |
2874 | if (updates[i].surface->flip_immediate) { |
2875 | has_flip_immediate_plane = true; |
2876 | break; |
2877 | } |
2878 | } |
2879 | |
2880 | if (has_flip_immediate_plane && surface_count > 1) { |
2881 | for (i = 0; i < surface_count; i++) { |
2882 | if (updates[i].surface->flip_immediate) |
2883 | updates[i].surface->update_flags.bits.addr_update = 1; |
2884 | } |
2885 | } |
2886 | } |
2887 | |
2888 | static enum surface_update_type check_update_surfaces_for_stream( |
2889 | struct dc *dc, |
2890 | struct dc_surface_update *updates, |
2891 | int surface_count, |
2892 | struct dc_stream_update *stream_update, |
2893 | const struct dc_stream_status *stream_status) |
2894 | { |
2895 | int i; |
2896 | enum surface_update_type overall_type = UPDATE_TYPE_FAST; |
2897 | |
2898 | if (dc->idle_optimizations_allowed || dc_can_clear_cursor_limit(dc)) |
2899 | overall_type = UPDATE_TYPE_FULL; |
2900 | |
2901 | if (stream_status == NULL || stream_status->plane_count != surface_count) |
2902 | overall_type = UPDATE_TYPE_FULL; |
2903 | |
2904 | if (stream_update && stream_update->pending_test_pattern) { |
2905 | overall_type = UPDATE_TYPE_FULL; |
2906 | } |
2907 | |
2908 | if (stream_update && stream_update->hw_cursor_req) { |
2909 | overall_type = UPDATE_TYPE_FULL; |
2910 | } |
2911 | |
2912 | /* some stream updates require passive update */ |
2913 | if (stream_update) { |
2914 | union stream_update_flags *su_flags = &stream_update->stream->update_flags; |
2915 | |
2916 | if ((stream_update->src.height != 0 && stream_update->src.width != 0) || |
2917 | (stream_update->dst.height != 0 && stream_update->dst.width != 0) || |
2918 | stream_update->integer_scaling_update) |
2919 | su_flags->bits.scaling = 1; |
2920 | |
2921 | if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) |
2922 | su_flags->bits.out_tf = 1; |
2923 | |
2924 | if (stream_update->abm_level) |
2925 | su_flags->bits.abm_level = 1; |
2926 | |
2927 | if (stream_update->dpms_off) |
2928 | su_flags->bits.dpms_off = 1; |
2929 | |
2930 | if (stream_update->gamut_remap) |
2931 | su_flags->bits.gamut_remap = 1; |
2932 | |
2933 | if (stream_update->wb_update) |
2934 | su_flags->bits.wb_update = 1; |
2935 | |
2936 | if (stream_update->dsc_config) |
2937 | su_flags->bits.dsc_changed = 1; |
2938 | |
2939 | if (stream_update->mst_bw_update) |
2940 | su_flags->bits.mst_bw = 1; |
2941 | |
2942 | if (stream_update->stream->freesync_on_desktop && |
2943 | (stream_update->vrr_infopacket || stream_update->allow_freesync || |
2944 | stream_update->vrr_active_variable || stream_update->vrr_active_fixed)) |
2945 | su_flags->bits.fams_changed = 1; |
2946 | |
2947 | if (stream_update->scaler_sharpener_update) |
2948 | su_flags->bits.scaler_sharpener = 1; |
2949 | |
2950 | if (stream_update->sharpening_required) |
2951 | su_flags->bits.sharpening_required = 1; |
2952 | |
2953 | if (stream_update->output_color_space) |
2954 | su_flags->bits.out_csc = 1; |
2955 | |
2956 | if (su_flags->raw != 0) |
2957 | overall_type = UPDATE_TYPE_FULL; |
2958 | |
2959 | if (stream_update->output_csc_transform) |
2960 | su_flags->bits.out_csc = 1; |
2961 | |
2962 | /* Output transfer function changes do not require bandwidth recalculation, |
2963 | * so don't trigger a full update |
2964 | */ |
2965 | if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) |
2966 | su_flags->bits.out_tf = 1; |
2967 | } |
2968 | |
2969 | for (i = 0 ; i < surface_count; i++) { |
2970 | enum surface_update_type type = |
2971 | det_surface_update(dc, u: &updates[i]); |
2972 | |
2973 | elevate_update_type(original: &overall_type, new: type); |
2974 | } |
2975 | |
2976 | return overall_type; |
2977 | } |
2978 | |
2979 | /* |
2980 | * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full) |
2981 | * |
2982 | * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types |
2983 | */ |
2984 | enum surface_update_type dc_check_update_surfaces_for_stream( |
2985 | struct dc *dc, |
2986 | struct dc_surface_update *updates, |
2987 | int surface_count, |
2988 | struct dc_stream_update *stream_update, |
2989 | const struct dc_stream_status *stream_status) |
2990 | { |
2991 | int i; |
2992 | enum surface_update_type type; |
2993 | |
2994 | if (stream_update) |
2995 | stream_update->stream->update_flags.raw = 0; |
2996 | for (i = 0; i < surface_count; i++) |
2997 | updates[i].surface->update_flags.raw = 0; |
2998 | |
2999 | type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); |
3000 | if (type == UPDATE_TYPE_FULL) { |
3001 | if (stream_update) { |
3002 | uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; |
3003 | stream_update->stream->update_flags.raw = 0xFFFFFFFF; |
3004 | stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; |
3005 | } |
3006 | for (i = 0; i < surface_count; i++) |
3007 | updates[i].surface->update_flags.raw = 0xFFFFFFFF; |
3008 | } |
3009 | |
3010 | if (type == UPDATE_TYPE_FAST) { |
3011 | // If there's an available clock comparator, we use that. |
3012 | if (dc->clk_mgr->funcs->are_clock_states_equal) { |
3013 | if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) |
3014 | dc->optimized_required = true; |
3015 | // Else we fallback to mem compare. |
3016 | } else if (memcmp(p: &dc->current_state->bw_ctx.bw.dcn.clk, q: &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { |
3017 | dc->optimized_required = true; |
3018 | } |
3019 | |
3020 | dc->optimized_required |= dc->wm_optimized_required; |
3021 | } |
3022 | |
3023 | return type; |
3024 | } |
3025 | |
3026 | static struct dc_stream_status *stream_get_status( |
3027 | struct dc_state *ctx, |
3028 | struct dc_stream_state *stream) |
3029 | { |
3030 | uint8_t i; |
3031 | |
3032 | for (i = 0; i < ctx->stream_count; i++) { |
3033 | if (stream == ctx->streams[i]) { |
3034 | return &ctx->stream_status[i]; |
3035 | } |
3036 | } |
3037 | |
3038 | return NULL; |
3039 | } |
3040 | |
3041 | static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; |
3042 | |
3043 | static void copy_surface_update_to_plane( |
3044 | struct dc_plane_state *surface, |
3045 | struct dc_surface_update *srf_update) |
3046 | { |
3047 | if (srf_update->flip_addr) { |
3048 | surface->address = srf_update->flip_addr->address; |
3049 | surface->flip_immediate = |
3050 | srf_update->flip_addr->flip_immediate; |
3051 | surface->time.time_elapsed_in_us[surface->time.index] = |
3052 | srf_update->flip_addr->flip_timestamp_in_us - |
3053 | surface->time.prev_update_time_in_us; |
3054 | surface->time.prev_update_time_in_us = |
3055 | srf_update->flip_addr->flip_timestamp_in_us; |
3056 | surface->time.index++; |
3057 | if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX) |
3058 | surface->time.index = 0; |
3059 | |
3060 | surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips; |
3061 | } |
3062 | |
3063 | if (srf_update->scaling_info) { |
3064 | surface->scaling_quality = |
3065 | srf_update->scaling_info->scaling_quality; |
3066 | surface->dst_rect = |
3067 | srf_update->scaling_info->dst_rect; |
3068 | surface->src_rect = |
3069 | srf_update->scaling_info->src_rect; |
3070 | surface->clip_rect = |
3071 | srf_update->scaling_info->clip_rect; |
3072 | } |
3073 | |
3074 | if (srf_update->plane_info) { |
3075 | surface->color_space = |
3076 | srf_update->plane_info->color_space; |
3077 | surface->format = |
3078 | srf_update->plane_info->format; |
3079 | surface->plane_size = |
3080 | srf_update->plane_info->plane_size; |
3081 | surface->rotation = |
3082 | srf_update->plane_info->rotation; |
3083 | surface->horizontal_mirror = |
3084 | srf_update->plane_info->horizontal_mirror; |
3085 | surface->stereo_format = |
3086 | srf_update->plane_info->stereo_format; |
3087 | surface->tiling_info = |
3088 | srf_update->plane_info->tiling_info; |
3089 | surface->visible = |
3090 | srf_update->plane_info->visible; |
3091 | surface->per_pixel_alpha = |
3092 | srf_update->plane_info->per_pixel_alpha; |
3093 | surface->global_alpha = |
3094 | srf_update->plane_info->global_alpha; |
3095 | surface->global_alpha_value = |
3096 | srf_update->plane_info->global_alpha_value; |
3097 | surface->dcc = |
3098 | srf_update->plane_info->dcc; |
3099 | surface->layer_index = |
3100 | srf_update->plane_info->layer_index; |
3101 | } |
3102 | |
3103 | if (srf_update->gamma) { |
3104 | memcpy(&surface->gamma_correction.entries, |
3105 | &srf_update->gamma->entries, |
3106 | sizeof(struct dc_gamma_entries)); |
3107 | surface->gamma_correction.is_identity = |
3108 | srf_update->gamma->is_identity; |
3109 | surface->gamma_correction.num_entries = |
3110 | srf_update->gamma->num_entries; |
3111 | surface->gamma_correction.type = |
3112 | srf_update->gamma->type; |
3113 | } |
3114 | |
3115 | if (srf_update->in_transfer_func) { |
3116 | surface->in_transfer_func.sdr_ref_white_level = |
3117 | srf_update->in_transfer_func->sdr_ref_white_level; |
3118 | surface->in_transfer_func.tf = |
3119 | srf_update->in_transfer_func->tf; |
3120 | surface->in_transfer_func.type = |
3121 | srf_update->in_transfer_func->type; |
3122 | memcpy(&surface->in_transfer_func.tf_pts, |
3123 | &srf_update->in_transfer_func->tf_pts, |
3124 | sizeof(struct dc_transfer_func_distributed_points)); |
3125 | } |
3126 | |
3127 | if (srf_update->cm2_params) { |
3128 | surface->mcm_shaper_3dlut_setting = srf_update->cm2_params->component_settings.shaper_3dlut_setting; |
3129 | surface->mcm_lut1d_enable = srf_update->cm2_params->component_settings.lut1d_enable; |
3130 | surface->mcm_luts = srf_update->cm2_params->cm2_luts; |
3131 | } |
3132 | |
3133 | if (srf_update->func_shaper) { |
3134 | memcpy(&surface->in_shaper_func, srf_update->func_shaper, |
3135 | sizeof(surface->in_shaper_func)); |
3136 | |
3137 | if (surface->mcm_shaper_3dlut_setting >= DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER) |
3138 | surface->mcm_luts.shaper = &surface->in_shaper_func; |
3139 | } |
3140 | |
3141 | if (srf_update->lut3d_func) |
3142 | memcpy(&surface->lut3d_func, srf_update->lut3d_func, |
3143 | sizeof(surface->lut3d_func)); |
3144 | |
3145 | if (srf_update->hdr_mult.value) |
3146 | surface->hdr_mult = |
3147 | srf_update->hdr_mult; |
3148 | |
3149 | if (srf_update->sdr_white_level_nits) |
3150 | surface->sdr_white_level_nits = |
3151 | srf_update->sdr_white_level_nits; |
3152 | |
3153 | if (srf_update->blend_tf) { |
3154 | memcpy(&surface->blend_tf, srf_update->blend_tf, |
3155 | sizeof(surface->blend_tf)); |
3156 | |
3157 | if (surface->mcm_lut1d_enable) |
3158 | surface->mcm_luts.lut1d_func = &surface->blend_tf; |
3159 | } |
3160 | |
3161 | if (srf_update->cm2_params || srf_update->blend_tf) |
3162 | surface->lut_bank_a = !surface->lut_bank_a; |
3163 | |
3164 | if (srf_update->input_csc_color_matrix) |
3165 | surface->input_csc_color_matrix = |
3166 | *srf_update->input_csc_color_matrix; |
3167 | |
3168 | if (srf_update->coeff_reduction_factor) |
3169 | surface->coeff_reduction_factor = |
3170 | *srf_update->coeff_reduction_factor; |
3171 | |
3172 | if (srf_update->gamut_remap_matrix) |
3173 | surface->gamut_remap_matrix = |
3174 | *srf_update->gamut_remap_matrix; |
3175 | |
3176 | if (srf_update->cursor_csc_color_matrix) |
3177 | surface->cursor_csc_color_matrix = |
3178 | *srf_update->cursor_csc_color_matrix; |
3179 | |
3180 | if (srf_update->bias_and_scale.bias_and_scale_valid) |
3181 | surface->bias_and_scale = |
3182 | srf_update->bias_and_scale; |
3183 | } |
3184 | |
3185 | static void copy_stream_update_to_stream(struct dc *dc, |
3186 | struct dc_state *context, |
3187 | struct dc_stream_state *stream, |
3188 | struct dc_stream_update *update) |
3189 | { |
3190 | struct dc_context *dc_ctx = dc->ctx; |
3191 | |
3192 | if (update == NULL || stream == NULL) |
3193 | return; |
3194 | |
3195 | if (update->src.height && update->src.width) |
3196 | stream->src = update->src; |
3197 | |
3198 | if (update->dst.height && update->dst.width) |
3199 | stream->dst = update->dst; |
3200 | |
3201 | if (update->out_transfer_func) { |
3202 | stream->out_transfer_func.sdr_ref_white_level = |
3203 | update->out_transfer_func->sdr_ref_white_level; |
3204 | stream->out_transfer_func.tf = update->out_transfer_func->tf; |
3205 | stream->out_transfer_func.type = |
3206 | update->out_transfer_func->type; |
3207 | memcpy(&stream->out_transfer_func.tf_pts, |
3208 | &update->out_transfer_func->tf_pts, |
3209 | sizeof(struct dc_transfer_func_distributed_points)); |
3210 | } |
3211 | |
3212 | if (update->hdr_static_metadata) |
3213 | stream->hdr_static_metadata = *update->hdr_static_metadata; |
3214 | |
3215 | if (update->abm_level) |
3216 | stream->abm_level = *update->abm_level; |
3217 | |
3218 | if (update->periodic_interrupt) |
3219 | stream->periodic_interrupt = *update->periodic_interrupt; |
3220 | |
3221 | if (update->gamut_remap) |
3222 | stream->gamut_remap_matrix = *update->gamut_remap; |
3223 | |
3224 | /* Note: this being updated after mode set is currently not a use case |
3225 | * however if it arises OCSC would need to be reprogrammed at the |
3226 | * minimum |
3227 | */ |
3228 | if (update->output_color_space) |
3229 | stream->output_color_space = *update->output_color_space; |
3230 | |
3231 | if (update->output_csc_transform) |
3232 | stream->csc_color_matrix = *update->output_csc_transform; |
3233 | |
3234 | if (update->vrr_infopacket) |
3235 | stream->vrr_infopacket = *update->vrr_infopacket; |
3236 | |
3237 | if (update->hw_cursor_req) |
3238 | stream->hw_cursor_req = *update->hw_cursor_req; |
3239 | |
3240 | if (update->allow_freesync) |
3241 | stream->allow_freesync = *update->allow_freesync; |
3242 | |
3243 | if (update->vrr_active_variable) |
3244 | stream->vrr_active_variable = *update->vrr_active_variable; |
3245 | |
3246 | if (update->vrr_active_fixed) |
3247 | stream->vrr_active_fixed = *update->vrr_active_fixed; |
3248 | |
3249 | if (update->crtc_timing_adjust) { |
3250 | if (stream->adjust.v_total_min != update->crtc_timing_adjust->v_total_min || |
3251 | stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max || |
3252 | stream->adjust.timing_adjust_pending) |
3253 | update->crtc_timing_adjust->timing_adjust_pending = true; |
3254 | stream->adjust = *update->crtc_timing_adjust; |
3255 | update->crtc_timing_adjust->timing_adjust_pending = false; |
3256 | } |
3257 | |
3258 | if (update->dpms_off) |
3259 | stream->dpms_off = *update->dpms_off; |
3260 | |
3261 | if (update->hfvsif_infopacket) |
3262 | stream->hfvsif_infopacket = *update->hfvsif_infopacket; |
3263 | |
3264 | if (update->vtem_infopacket) |
3265 | stream->vtem_infopacket = *update->vtem_infopacket; |
3266 | |
3267 | if (update->vsc_infopacket) |
3268 | stream->vsc_infopacket = *update->vsc_infopacket; |
3269 | |
3270 | if (update->vsp_infopacket) |
3271 | stream->vsp_infopacket = *update->vsp_infopacket; |
3272 | |
3273 | if (update->adaptive_sync_infopacket) |
3274 | stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket; |
3275 | |
3276 | if (update->dither_option) |
3277 | stream->dither_option = *update->dither_option; |
3278 | |
3279 | if (update->pending_test_pattern) |
3280 | stream->test_pattern = *update->pending_test_pattern; |
3281 | /* update current stream with writeback info */ |
3282 | if (update->wb_update) { |
3283 | int i; |
3284 | |
3285 | stream->num_wb_info = update->wb_update->num_wb_info; |
3286 | ASSERT(stream->num_wb_info <= MAX_DWB_PIPES); |
3287 | for (i = 0; i < stream->num_wb_info; i++) |
3288 | stream->writeback_info[i] = |
3289 | update->wb_update->writeback_info[i]; |
3290 | } |
3291 | if (update->dsc_config) { |
3292 | struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg; |
3293 | uint32_t old_dsc_enabled = stream->timing.flags.DSC; |
3294 | uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 && |
3295 | update->dsc_config->num_slices_v != 0); |
3296 | |
3297 | /* Use temporarry context for validating new DSC config */ |
3298 | struct dc_state *dsc_validate_context = dc_state_create_copy(src_state: dc->current_state); |
3299 | |
3300 | if (dsc_validate_context) { |
3301 | stream->timing.dsc_cfg = *update->dsc_config; |
3302 | stream->timing.flags.DSC = enable_dsc; |
3303 | if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true) != DC_OK) { |
3304 | stream->timing.dsc_cfg = old_dsc_cfg; |
3305 | stream->timing.flags.DSC = old_dsc_enabled; |
3306 | update->dsc_config = NULL; |
3307 | } |
3308 | |
3309 | dc_state_release(state: dsc_validate_context); |
3310 | } else { |
3311 | DC_ERROR("Failed to allocate new validate context for DSC change\n"); |
3312 | update->dsc_config = NULL; |
3313 | } |
3314 | } |
3315 | if (update->scaler_sharpener_update) |
3316 | stream->scaler_sharpener_update = *update->scaler_sharpener_update; |
3317 | if (update->sharpening_required) |
3318 | stream->sharpening_required = *update->sharpening_required; |
3319 | } |
3320 | |
3321 | static void backup_planes_and_stream_state( |
3322 | struct dc_scratch_space *scratch, |
3323 | struct dc_stream_state *stream) |
3324 | { |
3325 | int i; |
3326 | struct dc_stream_status *status = dc_stream_get_status(dc_stream: stream); |
3327 | |
3328 | if (!status) |
3329 | return; |
3330 | |
3331 | for (i = 0; i < status->plane_count; i++) { |
3332 | dc_plane_copy_config(dst: &scratch->plane_states[i], src: status->plane_states[i]); |
3333 | } |
3334 | scratch->stream_state = *stream; |
3335 | } |
3336 | |
3337 | static void restore_planes_and_stream_state( |
3338 | struct dc_scratch_space *scratch, |
3339 | struct dc_stream_state *stream) |
3340 | { |
3341 | int i; |
3342 | struct dc_stream_status *status = dc_stream_get_status(dc_stream: stream); |
3343 | |
3344 | if (!status) |
3345 | return; |
3346 | |
3347 | for (i = 0; i < status->plane_count; i++) { |
3348 | dc_plane_copy_config(dst: status->plane_states[i], src: &scratch->plane_states[i]); |
3349 | } |
3350 | *stream = scratch->stream_state; |
3351 | } |
3352 | |
3353 | /** |
3354 | * update_seamless_boot_flags() - Helper function for updating seamless boot flags |
3355 | * |
3356 | * @dc: Current DC state |
3357 | * @context: New DC state to be programmed |
3358 | * @surface_count: Number of surfaces that have an updated |
3359 | * @stream: Corresponding stream to be updated in the current flip |
3360 | * |
3361 | * Updating seamless boot flags do not need to be part of the commit sequence. This |
3362 | * helper function will update the seamless boot flags on each flip (if required) |
3363 | * outside of the HW commit sequence (fast or slow). |
3364 | * |
3365 | * Return: void |
3366 | */ |
3367 | static void update_seamless_boot_flags(struct dc *dc, |
3368 | struct dc_state *context, |
3369 | int surface_count, |
3370 | struct dc_stream_state *stream) |
3371 | { |
3372 | if (get_seamless_boot_stream_count(ctx: context) > 0 && surface_count > 0) { |
3373 | /* Optimize seamless boot flag keeps clocks and watermarks high until |
3374 | * first flip. After first flip, optimization is required to lower |
3375 | * bandwidth. Important to note that it is expected UEFI will |
3376 | * only light up a single display on POST, therefore we only expect |
3377 | * one stream with seamless boot flag set. |
3378 | */ |
3379 | if (stream->apply_seamless_boot_optimization) { |
3380 | stream->apply_seamless_boot_optimization = false; |
3381 | |
3382 | if (get_seamless_boot_stream_count(ctx: context) == 0) |
3383 | dc->optimized_required = true; |
3384 | } |
3385 | } |
3386 | } |
3387 | |
3388 | /** |
3389 | * update_planes_and_stream_state() - The function takes planes and stream |
3390 | * updates as inputs and determines the appropriate update type. If update type |
3391 | * is FULL, the function allocates a new context, populates and validates it. |
3392 | * Otherwise, it updates current dc context. The function will return both |
3393 | * new_context and new_update_type back to the caller. The function also backs |
3394 | * up both current and new contexts into corresponding dc state scratch memory. |
3395 | * TODO: The function does too many things, and even conditionally allocates dc |
3396 | * context memory implicitly. We should consider to break it down. |
3397 | * |
3398 | * @dc: Current DC state |
3399 | * @srf_updates: an array of surface updates |
3400 | * @surface_count: surface update count |
3401 | * @stream: Corresponding stream to be updated |
3402 | * @stream_update: stream update |
3403 | * @new_update_type: [out] determined update type by the function |
3404 | * @new_context: [out] new context allocated and validated if update type is |
3405 | * FULL, reference to current context if update type is less than FULL. |
3406 | * |
3407 | * Return: true if a valid update is populated into new_context, false |
3408 | * otherwise. |
3409 | */ |
3410 | static bool update_planes_and_stream_state(struct dc *dc, |
3411 | struct dc_surface_update *srf_updates, int surface_count, |
3412 | struct dc_stream_state *stream, |
3413 | struct dc_stream_update *stream_update, |
3414 | enum surface_update_type *new_update_type, |
3415 | struct dc_state **new_context) |
3416 | { |
3417 | struct dc_state *context; |
3418 | int i, j; |
3419 | enum surface_update_type update_type; |
3420 | const struct dc_stream_status *stream_status; |
3421 | struct dc_context *dc_ctx = dc->ctx; |
3422 | |
3423 | stream_status = dc_stream_get_status(dc_stream: stream); |
3424 | |
3425 | if (!stream_status) { |
3426 | if (surface_count) /* Only an error condition if surf_count non-zero*/ |
3427 | ASSERT(false); |
3428 | |
3429 | return false; /* Cannot commit surface to stream that is not committed */ |
3430 | } |
3431 | |
3432 | context = dc->current_state; |
3433 | update_type = dc_check_update_surfaces_for_stream( |
3434 | dc, updates: srf_updates, surface_count, stream_update, stream_status); |
3435 | /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream. |
3436 | * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip |
3437 | * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come. |
3438 | */ |
3439 | force_immediate_gsl_plane_flip(dc, updates: srf_updates, surface_count); |
3440 | if (update_type == UPDATE_TYPE_FULL) |
3441 | backup_planes_and_stream_state(scratch: &dc->scratch.current_state, stream); |
3442 | |
3443 | /* update current stream with the new updates */ |
3444 | copy_stream_update_to_stream(dc, context, stream, update: stream_update); |
3445 | |
3446 | /* do not perform surface update if surface has invalid dimensions |
3447 | * (all zero) and no scaling_info is provided |
3448 | */ |
3449 | if (surface_count > 0) { |
3450 | for (i = 0; i < surface_count; i++) { |
3451 | if ((srf_updates[i].surface->src_rect.width == 0 || |
3452 | srf_updates[i].surface->src_rect.height == 0 || |
3453 | srf_updates[i].surface->dst_rect.width == 0 || |
3454 | srf_updates[i].surface->dst_rect.height == 0) && |
3455 | (!srf_updates[i].scaling_info || |
3456 | srf_updates[i].scaling_info->src_rect.width == 0 || |
3457 | srf_updates[i].scaling_info->src_rect.height == 0 || |
3458 | srf_updates[i].scaling_info->dst_rect.width == 0 || |
3459 | srf_updates[i].scaling_info->dst_rect.height == 0)) { |
3460 | DC_ERROR("Invalid src/dst rects in surface update!\n"); |
3461 | return false; |
3462 | } |
3463 | } |
3464 | } |
3465 | |
3466 | if (update_type >= update_surface_trace_level) |
3467 | update_surface_trace(dc, updates: srf_updates, surface_count); |
3468 | |
3469 | for (i = 0; i < surface_count; i++) |
3470 | copy_surface_update_to_plane(surface: srf_updates[i].surface, srf_update: &srf_updates[i]); |
3471 | |
3472 | if (update_type >= UPDATE_TYPE_FULL) { |
3473 | struct dc_plane_state *new_planes[MAX_SURFACES] = {0}; |
3474 | |
3475 | for (i = 0; i < surface_count; i++) |
3476 | new_planes[i] = srf_updates[i].surface; |
3477 | |
3478 | /* initialize scratch memory for building context */ |
3479 | context = dc_state_create_copy(src_state: dc->current_state); |
3480 | if (context == NULL) { |
3481 | DC_ERROR("Failed to allocate new validate context!\n"); |
3482 | return false; |
3483 | } |
3484 | |
3485 | /* For each full update, remove all existing phantom pipes first. |
3486 | * Ensures that we have enough pipes for newly added MPO planes |
3487 | */ |
3488 | dc_state_remove_phantom_streams_and_planes(dc, state: context); |
3489 | dc_state_release_phantom_streams_and_planes(dc, state: context); |
3490 | |
3491 | /*remove old surfaces from context */ |
3492 | if (!dc_state_rem_all_planes_for_stream(dc, stream, state: context)) { |
3493 | |
3494 | BREAK_TO_DEBUGGER(); |
3495 | goto fail; |
3496 | } |
3497 | |
3498 | /* add surface to context */ |
3499 | if (!dc_state_add_all_planes_for_stream(dc, stream, plane_states: new_planes, plane_count: surface_count, state: context)) { |
3500 | |
3501 | BREAK_TO_DEBUGGER(); |
3502 | goto fail; |
3503 | } |
3504 | } |
3505 | |
3506 | /* save update parameters into surface */ |
3507 | for (i = 0; i < surface_count; i++) { |
3508 | struct dc_plane_state *surface = srf_updates[i].surface; |
3509 | |
3510 | if (update_type != UPDATE_TYPE_MED) |
3511 | continue; |
3512 | if (surface->update_flags.bits.position_change) { |
3513 | for (j = 0; j < dc->res_pool->pipe_count; j++) { |
3514 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; |
3515 | |
3516 | if (pipe_ctx->plane_state != surface) |
3517 | continue; |
3518 | |
3519 | resource_build_scaling_params(pipe_ctx); |
3520 | } |
3521 | } |
3522 | } |
3523 | |
3524 | if (update_type == UPDATE_TYPE_FULL) { |
3525 | if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) { |
3526 | BREAK_TO_DEBUGGER(); |
3527 | goto fail; |
3528 | } |
3529 | } |
3530 | update_seamless_boot_flags(dc, context, surface_count, stream); |
3531 | |
3532 | *new_context = context; |
3533 | *new_update_type = update_type; |
3534 | if (update_type == UPDATE_TYPE_FULL) |
3535 | backup_planes_and_stream_state(scratch: &dc->scratch.new_state, stream); |
3536 | |
3537 | return true; |
3538 | |
3539 | fail: |
3540 | dc_state_release(state: context); |
3541 | |
3542 | return false; |
3543 | |
3544 | } |
3545 | |
3546 | static void commit_planes_do_stream_update(struct dc *dc, |
3547 | struct dc_stream_state *stream, |
3548 | struct dc_stream_update *stream_update, |
3549 | enum surface_update_type update_type, |
3550 | struct dc_state *context) |
3551 | { |
3552 | int j; |
3553 | |
3554 | // Stream updates |
3555 | for (j = 0; j < dc->res_pool->pipe_count; j++) { |
3556 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; |
3557 | |
3558 | if (resource_is_pipe_type(pipe_ctx, type: OTG_MASTER) && pipe_ctx->stream == stream) { |
3559 | |
3560 | if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt) |
3561 | dc->hwss.setup_periodic_interrupt(dc, pipe_ctx); |
3562 | |
3563 | if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || |
3564 | stream_update->vrr_infopacket || |
3565 | stream_update->vsc_infopacket || |
3566 | stream_update->vsp_infopacket || |
3567 | stream_update->hfvsif_infopacket || |
3568 | stream_update->adaptive_sync_infopacket || |
3569 | stream_update->vtem_infopacket) { |
3570 | resource_build_info_frame(pipe_ctx); |
3571 | dc->hwss.update_info_frame(pipe_ctx); |
3572 | |
3573 | if (dc_is_dp_signal(signal: pipe_ctx->stream->signal)) |
3574 | dc->link_srv->dp_trace_source_sequence( |
3575 | pipe_ctx->stream->link, |
3576 | DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); |
3577 | } |
3578 | |
3579 | if (stream_update->hdr_static_metadata && |
3580 | stream->use_dynamic_meta && |
3581 | dc->hwss.set_dmdata_attributes && |
3582 | pipe_ctx->stream->dmdata_address.quad_part != 0) |
3583 | dc->hwss.set_dmdata_attributes(pipe_ctx); |
3584 | |
3585 | if (stream_update->gamut_remap) |
3586 | dc_stream_set_gamut_remap(dc, stream); |
3587 | |
3588 | if (stream_update->output_csc_transform) |
3589 | dc_stream_program_csc_matrix(dc, stream); |
3590 | |
3591 | if (stream_update->dither_option) { |
3592 | struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; |
3593 | resource_build_bit_depth_reduction_params(stream: pipe_ctx->stream, |
3594 | fmt_bit_depth: &pipe_ctx->stream->bit_depth_params); |
3595 | pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp, |
3596 | &stream->bit_depth_params, |
3597 | &stream->clamping); |
3598 | while (odm_pipe) { |
3599 | odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp, |
3600 | &stream->bit_depth_params, |
3601 | &stream->clamping); |
3602 | odm_pipe = odm_pipe->next_odm_pipe; |
3603 | } |
3604 | } |
3605 | |
3606 | if (stream_update->cursor_attributes) |
3607 | program_cursor_attributes(dc, stream); |
3608 | |
3609 | if (stream_update->cursor_position) |
3610 | program_cursor_position(dc, stream); |
3611 | |
3612 | /* Full fe update*/ |
3613 | if (update_type == UPDATE_TYPE_FAST) |
3614 | continue; |
3615 | |
3616 | if (stream_update->dsc_config) |
3617 | dc->link_srv->update_dsc_config(pipe_ctx); |
3618 | |
3619 | if (stream_update->mst_bw_update) { |
3620 | if (stream_update->mst_bw_update->is_increase) |
3621 | dc->link_srv->increase_mst_payload(pipe_ctx, |
3622 | stream_update->mst_bw_update->mst_stream_bw); |
3623 | else |
3624 | dc->link_srv->reduce_mst_payload(pipe_ctx, |
3625 | stream_update->mst_bw_update->mst_stream_bw); |
3626 | } |
3627 | |
3628 | if (stream_update->pending_test_pattern) { |
3629 | /* |
3630 | * test pattern params depends on ODM topology |
3631 | * changes that we could be applying to front |
3632 | * end. Since at the current stage front end |
3633 | * changes are not yet applied. We can only |
3634 | * apply test pattern in hw based on current |
3635 | * state and populate the final test pattern |
3636 | * params in new state. If current and new test |
3637 | * pattern params are different as result of |
3638 | * different ODM topology being used, it will be |
3639 | * detected and handle during front end |
3640 | * programming update. |
3641 | */ |
3642 | dc->link_srv->dp_set_test_pattern(stream->link, |
3643 | stream->test_pattern.type, |
3644 | stream->test_pattern.color_space, |
3645 | stream->test_pattern.p_link_settings, |
3646 | stream->test_pattern.p_custom_pattern, |
3647 | stream->test_pattern.cust_pattern_size); |
3648 | resource_build_test_pattern_params(res_ctx: &context->res_ctx, pipe_ctx); |
3649 | } |
3650 | |
3651 | if (stream_update->dpms_off) { |
3652 | if (*stream_update->dpms_off) { |
3653 | dc->link_srv->set_dpms_off(pipe_ctx); |
3654 | /* for dpms, keep acquired resources*/ |
3655 | if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only) |
3656 | pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); |
3657 | |
3658 | dc->optimized_required = true; |
3659 | |
3660 | } else { |
3661 | if (get_seamless_boot_stream_count(ctx: context) == 0) |
3662 | dc->hwss.prepare_bandwidth(dc, dc->current_state); |
3663 | dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); |
3664 | } |
3665 | } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space |
3666 | && !stream->dpms_off && dc_is_dp_signal(signal: pipe_ctx->stream->signal)) { |
3667 | /* |
3668 | * Workaround for firmware issue in some receivers where they don't pick up |
3669 | * correct output color space unless DP link is disabled/re-enabled |
3670 | */ |
3671 | dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); |
3672 | } |
3673 | |
3674 | if (stream_update->abm_level && pipe_ctx->stream_res.abm) { |
3675 | bool should_program_abm = true; |
3676 | |
3677 | // if otg funcs defined check if blanked before programming |
3678 | if (pipe_ctx->stream_res.tg->funcs->is_blanked) |
3679 | if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) |
3680 | should_program_abm = false; |
3681 | |
3682 | if (should_program_abm) { |
3683 | if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) { |
3684 | dc->hwss.set_abm_immediate_disable(pipe_ctx); |
3685 | } else { |
3686 | pipe_ctx->stream_res.abm->funcs->set_abm_level( |
3687 | pipe_ctx->stream_res.abm, stream->abm_level); |
3688 | } |
3689 | } |
3690 | } |
3691 | } |
3692 | } |
3693 | } |
3694 | |
3695 | static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream) |
3696 | { |
3697 | if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 |
3698 | || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) |
3699 | && stream->ctx->dce_version >= DCN_VERSION_3_1) |
3700 | return true; |
3701 | |
3702 | if (stream->link->replay_settings.config.replay_supported) |
3703 | return true; |
3704 | |
3705 | if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level) |
3706 | return true; |
3707 | |
3708 | return false; |
3709 | } |
3710 | |
3711 | void dc_dmub_update_dirty_rect(struct dc *dc, |
3712 | int surface_count, |
3713 | struct dc_stream_state *stream, |
3714 | struct dc_surface_update *srf_updates, |
3715 | struct dc_state *context) |
3716 | { |
3717 | union dmub_rb_cmd cmd; |
3718 | struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; |
3719 | unsigned int i, j; |
3720 | unsigned int panel_inst = 0; |
3721 | |
3722 | if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) |
3723 | return; |
3724 | |
3725 | if (!dc_get_edp_link_panel_inst(dc, link: stream->link, inst_out: &panel_inst)) |
3726 | return; |
3727 | |
3728 | memset(&cmd, 0x0, sizeof(cmd)); |
3729 | cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; |
3730 | cmd.update_dirty_rect.header.sub_type = 0; |
3731 | cmd.update_dirty_rect.header.payload_bytes = |
3732 | sizeof(cmd.update_dirty_rect) - |
3733 | sizeof(cmd.update_dirty_rect.header); |
3734 | update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; |
3735 | for (i = 0; i < surface_count; i++) { |
3736 | struct dc_plane_state *plane_state = srf_updates[i].surface; |
3737 | const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; |
3738 | |
3739 | if (!srf_updates[i].surface || !flip_addr) |
3740 | continue; |
3741 | /* Do not send in immediate flip mode */ |
3742 | if (srf_updates[i].surface->flip_immediate) |
3743 | continue; |
3744 | |
3745 | update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; |
3746 | update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; |
3747 | memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, |
3748 | sizeof(flip_addr->dirty_rects)); |
3749 | for (j = 0; j < dc->res_pool->pipe_count; j++) { |
3750 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; |
3751 | |
3752 | if (pipe_ctx->stream != stream) |
3753 | continue; |
3754 | if (pipe_ctx->plane_state != plane_state) |
3755 | continue; |
3756 | |
3757 | update_dirty_rect->panel_inst = panel_inst; |
3758 | update_dirty_rect->pipe_idx = j; |
3759 | dc_wake_and_execute_dmub_cmd(ctx: dc->ctx, cmd: &cmd, wait_type: DM_DMUB_WAIT_TYPE_NO_WAIT); |
3760 | } |
3761 | } |
3762 | } |
3763 | |
3764 | static void build_dmub_update_dirty_rect( |
3765 | struct dc *dc, |
3766 | int surface_count, |
3767 | struct dc_stream_state *stream, |
3768 | struct dc_surface_update *srf_updates, |
3769 | struct dc_state *context, |
3770 | struct dc_dmub_cmd dc_dmub_cmd[], |
3771 | unsigned int *dmub_cmd_count) |
3772 | { |
3773 | union dmub_rb_cmd cmd; |
3774 | struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; |
3775 | unsigned int i, j; |
3776 | unsigned int panel_inst = 0; |
3777 | |
3778 | if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) |
3779 | return; |
3780 | |
3781 | if (!dc_get_edp_link_panel_inst(dc, link: stream->link, inst_out: &panel_inst)) |
3782 | return; |
3783 | |
3784 | memset(&cmd, 0x0, sizeof(cmd)); |
3785 | cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; |
3786 | cmd.update_dirty_rect.header.sub_type = 0; |
3787 | cmd.update_dirty_rect.header.payload_bytes = |
3788 | sizeof(cmd.update_dirty_rect) - |
3789 | sizeof(cmd.update_dirty_rect.header); |
3790 | update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; |
3791 | for (i = 0; i < surface_count; i++) { |
3792 | struct dc_plane_state *plane_state = srf_updates[i].surface; |
3793 | const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; |
3794 | |
3795 | if (!srf_updates[i].surface || !flip_addr) |
3796 | continue; |
3797 | /* Do not send in immediate flip mode */ |
3798 | if (srf_updates[i].surface->flip_immediate) |
3799 | continue; |
3800 | update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; |
3801 | update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; |
3802 | memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, |
3803 | sizeof(flip_addr->dirty_rects)); |
3804 | for (j = 0; j < dc->res_pool->pipe_count; j++) { |
3805 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; |
3806 | |
3807 | if (pipe_ctx->stream != stream) |
3808 | continue; |
3809 | if (pipe_ctx->plane_state != plane_state) |
3810 | continue; |
3811 | update_dirty_rect->panel_inst = panel_inst; |
3812 | update_dirty_rect->pipe_idx = j; |
3813 | dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd; |
3814 | dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; |
3815 | (*dmub_cmd_count)++; |
3816 | } |
3817 | } |
3818 | } |
3819 | |
3820 | static bool check_address_only_update(union surface_update_flags update_flags) |
3821 | { |
3822 | union surface_update_flags addr_only_update_flags; |
3823 | addr_only_update_flags.raw = 0; |
3824 | addr_only_update_flags.bits.addr_update = 1; |
3825 | |
3826 | return update_flags.bits.addr_update && |
3827 | !(update_flags.raw & ~addr_only_update_flags.raw); |
3828 | } |
3829 | |
3830 | /** |
3831 | * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB |
3832 | * |
3833 | * @dc: Current DC state |
3834 | * @srf_updates: Array of surface updates |
3835 | * @surface_count: Number of surfaces that have an updated |
3836 | * @stream: Corresponding stream to be updated in the current flip |
3837 | * @context: New DC state to be programmed |
3838 | * |
3839 | * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB |
3840 | * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array |
3841 | * |
3842 | * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required |
3843 | * to build an array of commands and have them sent while the OTG lock is acquired. |
3844 | * |
3845 | * Return: void |
3846 | */ |
3847 | static void build_dmub_cmd_list(struct dc *dc, |
3848 | struct dc_surface_update *srf_updates, |
3849 | int surface_count, |
3850 | struct dc_stream_state *stream, |
3851 | struct dc_state *context, |
3852 | struct dc_dmub_cmd dc_dmub_cmd[], |
3853 | unsigned int *dmub_cmd_count) |
3854 | { |
3855 | // Initialize cmd count to 0 |
3856 | *dmub_cmd_count = 0; |
3857 | build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count); |
3858 | } |
3859 | |
3860 | static void commit_plane_for_stream_offload_fams2_flip(struct dc *dc, |
3861 | struct dc_surface_update *srf_updates, |
3862 | int surface_count, |
3863 | struct dc_stream_state *stream, |
3864 | struct dc_state *context) |
3865 | { |
3866 | int i, j; |
3867 | |
3868 | /* update dirty rect for PSR */ |
3869 | dc_dmub_update_dirty_rect(dc, surface_count, stream, |
3870 | srf_updates, context); |
3871 | |
3872 | /* Perform requested Updates */ |
3873 | for (i = 0; i < surface_count; i++) { |
3874 | struct dc_plane_state *plane_state = srf_updates[i].surface; |
3875 | |
3876 | for (j = 0; j < dc->res_pool->pipe_count; j++) { |
3877 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; |
3878 | |
3879 | if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) |
3880 | continue; |
3881 | |
3882 | if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) |
3883 | continue; |
3884 | |
3885 | /* update pipe context for plane */ |
3886 | if (pipe_ctx->plane_state->update_flags.bits.addr_update) |
3887 | dc->hwss.update_plane_addr(dc, pipe_ctx); |
3888 | } |
3889 | } |
3890 | |
3891 | /* Send commands to DMCUB */ |
3892 | dc_dmub_srv_fams2_passthrough_flip(dc, |
3893 | state: context, |
3894 | stream, |
3895 | srf_updates, |
3896 | surface_count); |
3897 | } |
3898 | |
3899 | static void commit_planes_for_stream_fast(struct dc *dc, |
3900 | struct dc_surface_update *srf_updates, |
3901 | int surface_count, |
3902 | struct dc_stream_state *stream, |
3903 | struct dc_stream_update *stream_update, |
3904 | enum surface_update_type update_type, |
3905 | struct dc_state *context) |
3906 | { |
3907 | int i, j; |
3908 | struct pipe_ctx *top_pipe_to_program = NULL; |
3909 | struct dc_stream_status *stream_status = NULL; |
3910 | bool should_offload_fams2_flip = false; |
3911 | bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); |
3912 | |
3913 | if (should_lock_all_pipes) |
3914 | determine_pipe_unlock_order(dc, context); |
3915 | |
3916 | if (dc->debug.fams2_config.bits.enable && |
3917 | dc->debug.fams2_config.bits.enable_offload_flip && |
3918 | dc_state_is_fams2_in_use(dc, state: context)) { |
3919 | /* if not offloading to HWFQ, offload to FAMS2 if needed */ |
3920 | should_offload_fams2_flip = true; |
3921 | for (i = 0; i < surface_count; i++) { |
3922 | if (srf_updates[i].surface && |
3923 | srf_updates[i].surface->update_flags.raw && |
3924 | !check_address_only_update(update_flags: srf_updates[i].surface->update_flags)) { |
3925 | /* more than address update, need to acquire FAMS2 lock */ |
3926 | should_offload_fams2_flip = false; |
3927 | break; |
3928 | } |
3929 | } |
3930 | if (stream_update) { |
3931 | /* more than address update, need to acquire FAMS2 lock */ |
3932 | should_offload_fams2_flip = false; |
3933 | } |
3934 | } |
3935 | |
3936 | dc_exit_ips_for_hw_access(dc); |
3937 | |
3938 | dc_z10_restore(dc); |
3939 | |
3940 | top_pipe_to_program = resource_get_otg_master_for_stream( |
3941 | res_ctx: &context->res_ctx, |
3942 | stream); |
3943 | |
3944 | if (!top_pipe_to_program) |
3945 | return; |
3946 | |
3947 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
3948 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
3949 | |
3950 | if (pipe->stream && pipe->plane_state) { |
3951 | if (!dc->debug.using_dml2) |
3952 | set_p_state_switch_method(dc, context, pipe_ctx: pipe); |
3953 | |
3954 | if (dc->debug.visual_confirm) |
3955 | dc_update_visual_confirm_color(dc, context, pipe_ctx: pipe); |
3956 | } |
3957 | } |
3958 | |
3959 | for (i = 0; i < surface_count; i++) { |
3960 | struct dc_plane_state *plane_state = srf_updates[i].surface; |
3961 | /*set logical flag for lock/unlock use*/ |
3962 | for (j = 0; j < dc->res_pool->pipe_count; j++) { |
3963 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; |
3964 | |
3965 | if (!pipe_ctx->plane_state) |
3966 | continue; |
3967 | if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) |
3968 | continue; |
3969 | |
3970 | pipe_ctx->plane_state->triplebuffer_flips = false; |
3971 | if (update_type == UPDATE_TYPE_FAST && |
3972 | dc->hwss.program_triplebuffer != NULL && |
3973 | !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { |
3974 | /*triple buffer for VUpdate only*/ |
3975 | pipe_ctx->plane_state->triplebuffer_flips = true; |
3976 | } |
3977 | } |
3978 | } |
3979 | |
3980 | stream_status = dc_state_get_stream_status(state: context, stream); |
3981 | |
3982 | if (should_offload_fams2_flip) { |
3983 | commit_plane_for_stream_offload_fams2_flip(dc, |
3984 | srf_updates, |
3985 | surface_count, |
3986 | stream, |
3987 | context); |
3988 | } else if (stream_status) { |
3989 | build_dmub_cmd_list(dc, |
3990 | srf_updates, |
3991 | surface_count, |
3992 | stream, |
3993 | context, |
3994 | dc_dmub_cmd: context->dc_dmub_cmd, |
3995 | dmub_cmd_count: &(context->dmub_cmd_count)); |
3996 | hwss_build_fast_sequence(dc, |
3997 | dc_dmub_cmd: context->dc_dmub_cmd, |
3998 | dmub_cmd_count: context->dmub_cmd_count, |
3999 | block_sequence: context->block_sequence, |
4000 | num_steps: &(context->block_sequence_steps), |
4001 | pipe_ctx: top_pipe_to_program, |
4002 | stream_status, |
4003 | context); |
4004 | hwss_execute_sequence(dc, |
4005 | block_sequence: context->block_sequence, |
4006 | num_steps: context->block_sequence_steps); |
4007 | } |
4008 | |
4009 | /* Clear update flags so next flip doesn't have redundant programming |
4010 | * (if there's no stream update, the update flags are not cleared). |
4011 | * Surface updates are cleared unconditionally at the beginning of each flip, |
4012 | * so no need to clear here. |
4013 | */ |
4014 | if (top_pipe_to_program->stream) |
4015 | top_pipe_to_program->stream->update_flags.raw = 0; |
4016 | } |
4017 | |
4018 | static void commit_planes_for_stream(struct dc *dc, |
4019 | struct dc_surface_update *srf_updates, |
4020 | int surface_count, |
4021 | struct dc_stream_state *stream, |
4022 | struct dc_stream_update *stream_update, |
4023 | enum surface_update_type update_type, |
4024 | struct dc_state *context) |
4025 | { |
4026 | int i, j; |
4027 | struct pipe_ctx *top_pipe_to_program = NULL; |
4028 | bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); |
4029 | bool subvp_prev_use = false; |
4030 | bool subvp_curr_use = false; |
4031 | uint8_t current_stream_mask = 0; |
4032 | |
4033 | if (should_lock_all_pipes) |
4034 | determine_pipe_unlock_order(dc, context); |
4035 | // Once we apply the new subvp context to hardware it won't be in the |
4036 | // dc->current_state anymore, so we have to cache it before we apply |
4037 | // the new SubVP context |
4038 | subvp_prev_use = false; |
4039 | dc_exit_ips_for_hw_access(dc); |
4040 | |
4041 | dc_z10_restore(dc); |
4042 | if (update_type == UPDATE_TYPE_FULL && dc->optimized_required) |
4043 | hwss_process_outstanding_hw_updates(dc, dc_context: dc->current_state); |
4044 | |
4045 | if (update_type != UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming) |
4046 | dc->res_pool->funcs->prepare_mcache_programming(dc, context); |
4047 | |
4048 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
4049 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
4050 | |
4051 | if (pipe->stream && pipe->plane_state) { |
4052 | if (!dc->debug.using_dml2) |
4053 | set_p_state_switch_method(dc, context, pipe_ctx: pipe); |
4054 | |
4055 | if (dc->debug.visual_confirm) |
4056 | dc_update_visual_confirm_color(dc, context, pipe_ctx: pipe); |
4057 | } |
4058 | } |
4059 | |
4060 | if (update_type == UPDATE_TYPE_FULL) { |
4061 | dc_allow_idle_optimizations(dc, false); |
4062 | |
4063 | if (get_seamless_boot_stream_count(ctx: context) == 0) |
4064 | dc->hwss.prepare_bandwidth(dc, context); |
4065 | |
4066 | if (dc->hwss.update_dsc_pg) |
4067 | dc->hwss.update_dsc_pg(dc, context, false); |
4068 | |
4069 | context_clock_trace(dc, context); |
4070 | } |
4071 | |
4072 | if (update_type == UPDATE_TYPE_FULL) |
4073 | hwss_wait_for_outstanding_hw_updates(dc, dc_context: dc->current_state); |
4074 | |
4075 | top_pipe_to_program = resource_get_otg_master_for_stream( |
4076 | res_ctx: &context->res_ctx, |
4077 | stream); |
4078 | ASSERT(top_pipe_to_program != NULL); |
4079 | |
4080 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
4081 | struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
4082 | |
4083 | // Check old context for SubVP |
4084 | subvp_prev_use |= (dc_state_get_pipe_subvp_type(state: dc->current_state, pipe_ctx: old_pipe) == SUBVP_PHANTOM); |
4085 | if (subvp_prev_use) |
4086 | break; |
4087 | } |
4088 | |
4089 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
4090 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
4091 | |
4092 | if (dc_state_get_pipe_subvp_type(state: context, pipe_ctx: pipe) == SUBVP_PHANTOM) { |
4093 | subvp_curr_use = true; |
4094 | break; |
4095 | } |
4096 | } |
4097 | |
4098 | if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { |
4099 | struct pipe_ctx *mpcc_pipe; |
4100 | struct pipe_ctx *odm_pipe; |
4101 | |
4102 | for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) |
4103 | for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) |
4104 | odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU; |
4105 | } |
4106 | |
4107 | if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) |
4108 | if (top_pipe_to_program && |
4109 | top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { |
4110 | if (should_use_dmub_lock(link: stream->link)) { |
4111 | union dmub_hw_lock_flags hw_locks = { 0 }; |
4112 | struct dmub_hw_lock_inst_flags inst_flags = { 0 }; |
4113 | |
4114 | hw_locks.bits.lock_dig = 1; |
4115 | inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; |
4116 | |
4117 | dmub_hw_lock_mgr_cmd(dmub_srv: dc->ctx->dmub_srv, |
4118 | lock: true, |
4119 | hw_locks: &hw_locks, |
4120 | inst_flags: &inst_flags); |
4121 | } else |
4122 | top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable( |
4123 | top_pipe_to_program->stream_res.tg); |
4124 | } |
4125 | |
4126 | if (dc->hwss.wait_for_dcc_meta_propagation) { |
4127 | dc->hwss.wait_for_dcc_meta_propagation(dc, top_pipe_to_program); |
4128 | } |
4129 | |
4130 | if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) |
4131 | dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, top_pipe_to_program, update_type == UPDATE_TYPE_FAST); |
4132 | |
4133 | if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { |
4134 | if (dc->hwss.subvp_pipe_control_lock) |
4135 | dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use); |
4136 | |
4137 | if (dc->hwss.fams2_global_control_lock) |
4138 | dc->hwss.fams2_global_control_lock(dc, context, true); |
4139 | |
4140 | dc->hwss.interdependent_update_lock(dc, context, true); |
4141 | } else { |
4142 | if (dc->hwss.subvp_pipe_control_lock) |
4143 | dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); |
4144 | |
4145 | if (dc->hwss.fams2_global_control_lock) |
4146 | dc->hwss.fams2_global_control_lock(dc, context, true); |
4147 | |
4148 | /* Lock the top pipe while updating plane addrs, since freesync requires |
4149 | * plane addr update event triggers to be synchronized. |
4150 | * top_pipe_to_program is expected to never be NULL |
4151 | */ |
4152 | dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); |
4153 | } |
4154 | |
4155 | dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context); |
4156 | |
4157 | // Stream updates |
4158 | if (stream_update) |
4159 | commit_planes_do_stream_update(dc, stream, stream_update, update_type, context); |
4160 | |
4161 | if (surface_count == 0) { |
4162 | /* |
4163 | * In case of turning off screen, no need to program front end a second time. |
4164 | * just return after program blank. |
4165 | */ |
4166 | if (dc->hwss.apply_ctx_for_surface) |
4167 | dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); |
4168 | if (dc->hwss.program_front_end_for_ctx) |
4169 | dc->hwss.program_front_end_for_ctx(dc, context); |
4170 | |
4171 | if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { |
4172 | dc->hwss.interdependent_update_lock(dc, context, false); |
4173 | } else { |
4174 | dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); |
4175 | } |
4176 | dc->hwss.post_unlock_program_front_end(dc, context); |
4177 | |
4178 | if (update_type != UPDATE_TYPE_FAST) |
4179 | if (dc->hwss.commit_subvp_config) |
4180 | dc->hwss.commit_subvp_config(dc, context); |
4181 | |
4182 | /* Since phantom pipe programming is moved to post_unlock_program_front_end, |
4183 | * move the SubVP lock to after the phantom pipes have been setup |
4184 | */ |
4185 | if (dc->hwss.subvp_pipe_control_lock) |
4186 | dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, |
4187 | NULL, subvp_prev_use); |
4188 | |
4189 | if (dc->hwss.fams2_global_control_lock) |
4190 | dc->hwss.fams2_global_control_lock(dc, context, false); |
4191 | |
4192 | return; |
4193 | } |
4194 | |
4195 | if (update_type != UPDATE_TYPE_FAST) { |
4196 | for (j = 0; j < dc->res_pool->pipe_count; j++) { |
4197 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; |
4198 | |
4199 | if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP || |
4200 | dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) && |
4201 | pipe_ctx->stream && pipe_ctx->plane_state) { |
4202 | /* Only update visual confirm for SUBVP and Mclk switching here. |
4203 | * The bar appears on all pipes, so we need to update the bar on all displays, |
4204 | * so the information doesn't get stale. |
4205 | */ |
4206 | dc->hwss.update_visual_confirm_color(dc, pipe_ctx, |
4207 | pipe_ctx->plane_res.hubp->inst); |
4208 | } |
4209 | } |
4210 | } |
4211 | |
4212 | for (i = 0; i < surface_count; i++) { |
4213 | struct dc_plane_state *plane_state = srf_updates[i].surface; |
4214 | |
4215 | /*set logical flag for lock/unlock use*/ |
4216 | for (j = 0; j < dc->res_pool->pipe_count; j++) { |
4217 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; |
4218 | if (!pipe_ctx->plane_state) |
4219 | continue; |
4220 | if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) |
4221 | continue; |
4222 | pipe_ctx->plane_state->triplebuffer_flips = false; |
4223 | if (update_type == UPDATE_TYPE_FAST && |
4224 | dc->hwss.program_triplebuffer != NULL && |
4225 | !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { |
4226 | /*triple buffer for VUpdate only*/ |
4227 | pipe_ctx->plane_state->triplebuffer_flips = true; |
4228 | } |
4229 | } |
4230 | if (update_type == UPDATE_TYPE_FULL) { |
4231 | /* force vsync flip when reconfiguring pipes to prevent underflow */ |
4232 | plane_state->flip_immediate = false; |
4233 | plane_state->triplebuffer_flips = false; |
4234 | } |
4235 | } |
4236 | |
4237 | // Update Type FULL, Surface updates |
4238 | for (j = 0; j < dc->res_pool->pipe_count; j++) { |
4239 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; |
4240 | |
4241 | if (!pipe_ctx->top_pipe && |
4242 | !pipe_ctx->prev_odm_pipe && |
4243 | should_update_pipe_for_stream(context, pipe_ctx, stream)) { |
4244 | struct dc_stream_status *stream_status = NULL; |
4245 | |
4246 | if (!pipe_ctx->plane_state) |
4247 | continue; |
4248 | |
4249 | /* Full fe update*/ |
4250 | if (update_type == UPDATE_TYPE_FAST) |
4251 | continue; |
4252 | |
4253 | stream_status = |
4254 | stream_get_status(ctx: context, stream: pipe_ctx->stream); |
4255 | |
4256 | if (dc->hwss.apply_ctx_for_surface && stream_status) |
4257 | dc->hwss.apply_ctx_for_surface( |
4258 | dc, pipe_ctx->stream, stream_status->plane_count, context); |
4259 | } |
4260 | } |
4261 | |
4262 | for (j = 0; j < dc->res_pool->pipe_count; j++) { |
4263 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; |
4264 | |
4265 | if (!pipe_ctx->plane_state) |
4266 | continue; |
4267 | |
4268 | /* Full fe update*/ |
4269 | if (update_type == UPDATE_TYPE_FAST) |
4270 | continue; |
4271 | |
4272 | ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); |
4273 | if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { |
4274 | /*turn off triple buffer for full update*/ |
4275 | dc->hwss.program_triplebuffer( |
4276 | dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); |
4277 | } |
4278 | } |
4279 | |
4280 | if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { |
4281 | dc->hwss.program_front_end_for_ctx(dc, context); |
4282 | |
4283 | //Pipe busy until some frame and line # |
4284 | if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe && update_type == UPDATE_TYPE_FULL) { |
4285 | for (j = 0; j < dc->res_pool->pipe_count; j++) { |
4286 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; |
4287 | |
4288 | dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe_ctx); |
4289 | } |
4290 | } |
4291 | |
4292 | if (dc->debug.validate_dml_output) { |
4293 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
4294 | struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i]; |
4295 | if (cur_pipe->stream == NULL) |
4296 | continue; |
4297 | |
4298 | cur_pipe->plane_res.hubp->funcs->validate_dml_output( |
4299 | cur_pipe->plane_res.hubp, dc->ctx, |
4300 | &context->res_ctx.pipe_ctx[i].rq_regs, |
4301 | &context->res_ctx.pipe_ctx[i].dlg_regs, |
4302 | &context->res_ctx.pipe_ctx[i].ttu_regs); |
4303 | } |
4304 | } |
4305 | } |
4306 | |
4307 | // Update Type FAST, Surface updates |
4308 | if (update_type == UPDATE_TYPE_FAST) { |
4309 | if (dc->hwss.set_flip_control_gsl) |
4310 | for (i = 0; i < surface_count; i++) { |
4311 | struct dc_plane_state *plane_state = srf_updates[i].surface; |
4312 | |
4313 | for (j = 0; j < dc->res_pool->pipe_count; j++) { |
4314 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; |
4315 | |
4316 | if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) |
4317 | continue; |
4318 | |
4319 | if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) |
4320 | continue; |
4321 | |
4322 | // GSL has to be used for flip immediate |
4323 | dc->hwss.set_flip_control_gsl(pipe_ctx, |
4324 | pipe_ctx->plane_state->flip_immediate); |
4325 | } |
4326 | } |
4327 | |
4328 | /* Perform requested Updates */ |
4329 | for (i = 0; i < surface_count; i++) { |
4330 | struct dc_plane_state *plane_state = srf_updates[i].surface; |
4331 | |
4332 | for (j = 0; j < dc->res_pool->pipe_count; j++) { |
4333 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; |
4334 | |
4335 | if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) |
4336 | continue; |
4337 | |
4338 | if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) |
4339 | continue; |
4340 | |
4341 | if (srf_updates[i].cm2_params && |
4342 | srf_updates[i].cm2_params->cm2_luts.lut3d_data.lut3d_src == |
4343 | DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM && |
4344 | srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting == |
4345 | DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT && |
4346 | dc->hwss.trigger_3dlut_dma_load) |
4347 | dc->hwss.trigger_3dlut_dma_load(dc, pipe_ctx); |
4348 | |
4349 | /*program triple buffer after lock based on flip type*/ |
4350 | if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { |
4351 | /*only enable triplebuffer for fast_update*/ |
4352 | dc->hwss.program_triplebuffer( |
4353 | dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); |
4354 | } |
4355 | if (pipe_ctx->plane_state->update_flags.bits.addr_update) |
4356 | dc->hwss.update_plane_addr(dc, pipe_ctx); |
4357 | } |
4358 | } |
4359 | } |
4360 | |
4361 | if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { |
4362 | dc->hwss.interdependent_update_lock(dc, context, false); |
4363 | } else { |
4364 | dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); |
4365 | } |
4366 | |
4367 | if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) |
4368 | if (top_pipe_to_program && |
4369 | top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { |
4370 | top_pipe_to_program->stream_res.tg->funcs->wait_for_state( |
4371 | top_pipe_to_program->stream_res.tg, |
4372 | CRTC_STATE_VACTIVE); |
4373 | top_pipe_to_program->stream_res.tg->funcs->wait_for_state( |
4374 | top_pipe_to_program->stream_res.tg, |
4375 | CRTC_STATE_VBLANK); |
4376 | top_pipe_to_program->stream_res.tg->funcs->wait_for_state( |
4377 | top_pipe_to_program->stream_res.tg, |
4378 | CRTC_STATE_VACTIVE); |
4379 | |
4380 | if (should_use_dmub_lock(link: stream->link)) { |
4381 | union dmub_hw_lock_flags hw_locks = { 0 }; |
4382 | struct dmub_hw_lock_inst_flags inst_flags = { 0 }; |
4383 | |
4384 | hw_locks.bits.lock_dig = 1; |
4385 | inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; |
4386 | |
4387 | dmub_hw_lock_mgr_cmd(dmub_srv: dc->ctx->dmub_srv, |
4388 | lock: false, |
4389 | hw_locks: &hw_locks, |
4390 | inst_flags: &inst_flags); |
4391 | } else |
4392 | top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable( |
4393 | top_pipe_to_program->stream_res.tg); |
4394 | } |
4395 | |
4396 | if (subvp_curr_use) { |
4397 | /* If enabling subvp or transitioning from subvp->subvp, enable the |
4398 | * phantom streams before we program front end for the phantom pipes. |
4399 | */ |
4400 | if (update_type != UPDATE_TYPE_FAST) { |
4401 | if (dc->hwss.enable_phantom_streams) |
4402 | dc->hwss.enable_phantom_streams(dc, context); |
4403 | } |
4404 | } |
4405 | |
4406 | if (update_type != UPDATE_TYPE_FAST) |
4407 | dc->hwss.post_unlock_program_front_end(dc, context); |
4408 | |
4409 | if (subvp_prev_use && !subvp_curr_use) { |
4410 | /* If disabling subvp, disable phantom streams after front end |
4411 | * programming has completed (we turn on phantom OTG in order |
4412 | * to complete the plane disable for phantom pipes). |
4413 | */ |
4414 | |
4415 | if (dc->hwss.disable_phantom_streams) |
4416 | dc->hwss.disable_phantom_streams(dc, context); |
4417 | } |
4418 | |
4419 | if (update_type != UPDATE_TYPE_FAST) |
4420 | if (dc->hwss.commit_subvp_config) |
4421 | dc->hwss.commit_subvp_config(dc, context); |
4422 | /* Since phantom pipe programming is moved to post_unlock_program_front_end, |
4423 | * move the SubVP lock to after the phantom pipes have been setup |
4424 | */ |
4425 | if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { |
4426 | if (dc->hwss.subvp_pipe_control_lock) |
4427 | dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); |
4428 | if (dc->hwss.fams2_global_control_lock) |
4429 | dc->hwss.fams2_global_control_lock(dc, context, false); |
4430 | } else { |
4431 | if (dc->hwss.subvp_pipe_control_lock) |
4432 | dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); |
4433 | if (dc->hwss.fams2_global_control_lock) |
4434 | dc->hwss.fams2_global_control_lock(dc, context, false); |
4435 | } |
4436 | |
4437 | // Fire manual trigger only when bottom plane is flipped |
4438 | for (j = 0; j < dc->res_pool->pipe_count; j++) { |
4439 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; |
4440 | |
4441 | if (!pipe_ctx->plane_state) |
4442 | continue; |
4443 | |
4444 | if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe || |
4445 | !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) || |
4446 | !pipe_ctx->plane_state->update_flags.bits.addr_update || |
4447 | pipe_ctx->plane_state->skip_manual_trigger) |
4448 | continue; |
4449 | |
4450 | if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) |
4451 | pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); |
4452 | } |
4453 | |
4454 | current_stream_mask = get_stream_mask(dc, context); |
4455 | if (current_stream_mask != context->stream_mask) { |
4456 | context->stream_mask = current_stream_mask; |
4457 | dc_dmub_srv_notify_stream_mask(dc_dmub_srv: dc->ctx->dmub_srv, stream_mask: current_stream_mask); |
4458 | } |
4459 | } |
4460 | |
4461 | /** |
4462 | * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change |
4463 | * |
4464 | * @dc: Used to get the current state status |
4465 | * @stream: Target stream, which we want to remove the attached planes |
4466 | * @srf_updates: Array of surface updates |
4467 | * @surface_count: Number of surface update |
4468 | * @is_plane_addition: [in] Fill out with true if it is a plane addition case |
4469 | * |
4470 | * DCN32x and newer support a feature named Dynamic ODM which can conflict with |
4471 | * the MPO if used simultaneously in some specific configurations (e.g., |
4472 | * 4k@144). This function checks if the incoming context requires applying a |
4473 | * transition state with unnecessary pipe splitting and ODM disabled to |
4474 | * circumvent our hardware limitations to prevent this edge case. If the OPP |
4475 | * associated with an MPCC might change due to plane additions, this function |
4476 | * returns true. |
4477 | * |
4478 | * Return: |
4479 | * Return true if OPP and MPCC might change, otherwise, return false. |
4480 | */ |
4481 | static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc, |
4482 | struct dc_stream_state *stream, |
4483 | struct dc_surface_update *srf_updates, |
4484 | int surface_count, |
4485 | bool *is_plane_addition) |
4486 | { |
4487 | |
4488 | struct dc_stream_status *cur_stream_status = stream_get_status(ctx: dc->current_state, stream); |
4489 | bool force_minimal_pipe_splitting = false; |
4490 | bool subvp_active = false; |
4491 | uint32_t i; |
4492 | |
4493 | *is_plane_addition = false; |
4494 | |
4495 | if (cur_stream_status && |
4496 | dc->current_state->stream_count > 0 && |
4497 | dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) { |
4498 | /* determine if minimal transition is required due to MPC*/ |
4499 | if (surface_count > 0) { |
4500 | if (cur_stream_status->plane_count > surface_count) { |
4501 | force_minimal_pipe_splitting = true; |
4502 | } else if (cur_stream_status->plane_count < surface_count) { |
4503 | force_minimal_pipe_splitting = true; |
4504 | *is_plane_addition = true; |
4505 | } |
4506 | } |
4507 | } |
4508 | |
4509 | if (cur_stream_status && |
4510 | dc->current_state->stream_count == 1 && |
4511 | dc->debug.enable_single_display_2to1_odm_policy) { |
4512 | /* determine if minimal transition is required due to dynamic ODM*/ |
4513 | if (surface_count > 0) { |
4514 | if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) { |
4515 | force_minimal_pipe_splitting = true; |
4516 | } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) { |
4517 | force_minimal_pipe_splitting = true; |
4518 | *is_plane_addition = true; |
4519 | } |
4520 | } |
4521 | } |
4522 | |
4523 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
4524 | struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
4525 | |
4526 | if (dc_state_get_pipe_subvp_type(state: dc->current_state, pipe_ctx: pipe) != SUBVP_NONE) { |
4527 | subvp_active = true; |
4528 | break; |
4529 | } |
4530 | } |
4531 | |
4532 | /* For SubVP when adding or removing planes we need to add a minimal transition |
4533 | * (even when disabling all planes). Whenever disabling a phantom pipe, we |
4534 | * must use the minimal transition path to disable the pipe correctly. |
4535 | * |
4536 | * We want to use the minimal transition whenever subvp is active, not only if |
4537 | * a plane is being added / removed from a subvp stream (MPO plane can be added |
4538 | * to a DRR pipe of SubVP + DRR config, in which case we still want to run through |
4539 | * a min transition to disable subvp. |
4540 | */ |
4541 | if (cur_stream_status && subvp_active) { |
4542 | /* determine if minimal transition is required due to SubVP*/ |
4543 | if (cur_stream_status->plane_count > surface_count) { |
4544 | force_minimal_pipe_splitting = true; |
4545 | } else if (cur_stream_status->plane_count < surface_count) { |
4546 | force_minimal_pipe_splitting = true; |
4547 | *is_plane_addition = true; |
4548 | } |
4549 | } |
4550 | |
4551 | return force_minimal_pipe_splitting; |
4552 | } |
4553 | |
4554 | struct pipe_split_policy_backup { |
4555 | bool dynamic_odm_policy; |
4556 | bool subvp_policy; |
4557 | enum pipe_split_policy mpc_policy; |
4558 | char force_odm[MAX_PIPES]; |
4559 | }; |
4560 | |
4561 | static void backup_and_set_minimal_pipe_split_policy(struct dc *dc, |
4562 | struct dc_state *context, |
4563 | struct pipe_split_policy_backup *policy) |
4564 | { |
4565 | int i; |
4566 | |
4567 | if (!dc->config.is_vmin_only_asic) { |
4568 | policy->mpc_policy = dc->debug.pipe_split_policy; |
4569 | dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; |
4570 | } |
4571 | policy->dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy; |
4572 | dc->debug.enable_single_display_2to1_odm_policy = false; |
4573 | policy->subvp_policy = dc->debug.force_disable_subvp; |
4574 | dc->debug.force_disable_subvp = true; |
4575 | for (i = 0; i < context->stream_count; i++) { |
4576 | policy->force_odm[i] = context->streams[i]->debug.force_odm_combine_segments; |
4577 | if (context->streams[i]->debug.allow_transition_for_forced_odm) |
4578 | context->streams[i]->debug.force_odm_combine_segments = 0; |
4579 | } |
4580 | } |
4581 | |
4582 | static void restore_minimal_pipe_split_policy(struct dc *dc, |
4583 | struct dc_state *context, |
4584 | struct pipe_split_policy_backup *policy) |
4585 | { |
4586 | uint8_t i; |
4587 | |
4588 | if (!dc->config.is_vmin_only_asic) |
4589 | dc->debug.pipe_split_policy = policy->mpc_policy; |
4590 | dc->debug.enable_single_display_2to1_odm_policy = |
4591 | policy->dynamic_odm_policy; |
4592 | dc->debug.force_disable_subvp = policy->subvp_policy; |
4593 | for (i = 0; i < context->stream_count; i++) |
4594 | context->streams[i]->debug.force_odm_combine_segments = policy->force_odm[i]; |
4595 | } |
4596 | |
4597 | static void release_minimal_transition_state(struct dc *dc, |
4598 | struct dc_state *minimal_transition_context, |
4599 | struct dc_state *base_context, |
4600 | struct pipe_split_policy_backup *policy) |
4601 | { |
4602 | restore_minimal_pipe_split_policy(dc, context: base_context, policy); |
4603 | dc_state_release(state: minimal_transition_context); |
4604 | } |
4605 | |
4606 | static void force_vsync_flip_in_minimal_transition_context(struct dc_state *context) |
4607 | { |
4608 | uint8_t i; |
4609 | int j; |
4610 | struct dc_stream_status *stream_status; |
4611 | |
4612 | for (i = 0; i < context->stream_count; i++) { |
4613 | stream_status = &context->stream_status[i]; |
4614 | |
4615 | for (j = 0; j < stream_status->plane_count; j++) |
4616 | stream_status->plane_states[j]->flip_immediate = false; |
4617 | } |
4618 | } |
4619 | |
4620 | static struct dc_state *create_minimal_transition_state(struct dc *dc, |
4621 | struct dc_state *base_context, struct pipe_split_policy_backup *policy) |
4622 | { |
4623 | struct dc_state *minimal_transition_context = NULL; |
4624 | |
4625 | minimal_transition_context = dc_state_create_copy(src_state: base_context); |
4626 | if (!minimal_transition_context) |
4627 | return NULL; |
4628 | |
4629 | backup_and_set_minimal_pipe_split_policy(dc, context: base_context, policy); |
4630 | /* commit minimal state */ |
4631 | if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false) == DC_OK) { |
4632 | /* prevent underflow and corruption when reconfiguring pipes */ |
4633 | force_vsync_flip_in_minimal_transition_context(context: minimal_transition_context); |
4634 | } else { |
4635 | /* |
4636 | * This should never happen, minimal transition state should |
4637 | * always be validated first before adding pipe split features. |
4638 | */ |
4639 | release_minimal_transition_state(dc, minimal_transition_context, base_context, policy); |
4640 | BREAK_TO_DEBUGGER(); |
4641 | minimal_transition_context = NULL; |
4642 | } |
4643 | return minimal_transition_context; |
4644 | } |
4645 | |
4646 | static bool is_pipe_topology_transition_seamless_with_intermediate_step( |
4647 | struct dc *dc, |
4648 | struct dc_state *initial_state, |
4649 | struct dc_state *intermediate_state, |
4650 | struct dc_state *final_state) |
4651 | { |
4652 | return dc->hwss.is_pipe_topology_transition_seamless(dc, initial_state, |
4653 | intermediate_state) && |
4654 | dc->hwss.is_pipe_topology_transition_seamless(dc, |
4655 | intermediate_state, final_state); |
4656 | } |
4657 | |
4658 | static void swap_and_release_current_context(struct dc *dc, |
4659 | struct dc_state *new_context, struct dc_stream_state *stream) |
4660 | { |
4661 | |
4662 | int i; |
4663 | struct dc_state *old = dc->current_state; |
4664 | struct pipe_ctx *pipe_ctx; |
4665 | |
4666 | /* Since memory free requires elevated IRQ, an interrupt |
4667 | * request is generated by mem free. If this happens |
4668 | * between freeing and reassigning the context, our vsync |
4669 | * interrupt will call into dc and cause a memory |
4670 | * corruption. Hence, we first reassign the context, |
4671 | * then free the old context. |
4672 | */ |
4673 | dc->current_state = new_context; |
4674 | dc_state_release(state: old); |
4675 | |
4676 | // clear any forced full updates |
4677 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
4678 | pipe_ctx = &new_context->res_ctx.pipe_ctx[i]; |
4679 | |
4680 | if (pipe_ctx->plane_state && pipe_ctx->stream == stream) |
4681 | pipe_ctx->plane_state->force_full_update = false; |
4682 | } |
4683 | } |
4684 | |
4685 | static int initialize_empty_surface_updates( |
4686 | struct dc_stream_state *stream, |
4687 | struct dc_surface_update *srf_updates) |
4688 | { |
4689 | struct dc_stream_status *status = dc_stream_get_status(dc_stream: stream); |
4690 | int i; |
4691 | |
4692 | if (!status) |
4693 | return 0; |
4694 | |
4695 | for (i = 0; i < status->plane_count; i++) |
4696 | srf_updates[i].surface = status->plane_states[i]; |
4697 | |
4698 | return status->plane_count; |
4699 | } |
4700 | |
4701 | static bool commit_minimal_transition_based_on_new_context(struct dc *dc, |
4702 | struct dc_state *new_context, |
4703 | struct dc_stream_state *stream, |
4704 | struct dc_surface_update *srf_updates, |
4705 | int surface_count) |
4706 | { |
4707 | bool success = false; |
4708 | struct pipe_split_policy_backup policy; |
4709 | struct dc_state *intermediate_context = |
4710 | create_minimal_transition_state(dc, base_context: new_context, |
4711 | policy: &policy); |
4712 | |
4713 | if (intermediate_context) { |
4714 | if (is_pipe_topology_transition_seamless_with_intermediate_step( |
4715 | dc, |
4716 | initial_state: dc->current_state, |
4717 | intermediate_state: intermediate_context, |
4718 | final_state: new_context)) { |
4719 | DC_LOG_DC("commit minimal transition state: base = new state\n"); |
4720 | commit_planes_for_stream(dc, srf_updates, |
4721 | surface_count, stream, NULL, |
4722 | update_type: UPDATE_TYPE_FULL, context: intermediate_context); |
4723 | swap_and_release_current_context( |
4724 | dc, new_context: intermediate_context, stream); |
4725 | dc_state_retain(state: dc->current_state); |
4726 | success = true; |
4727 | } |
4728 | release_minimal_transition_state( |
4729 | dc, minimal_transition_context: intermediate_context, base_context: new_context, policy: &policy); |
4730 | } |
4731 | return success; |
4732 | } |
4733 | |
4734 | static bool commit_minimal_transition_based_on_current_context(struct dc *dc, |
4735 | struct dc_state *new_context, struct dc_stream_state *stream) |
4736 | { |
4737 | bool success = false; |
4738 | struct pipe_split_policy_backup policy; |
4739 | struct dc_state *intermediate_context; |
4740 | struct dc_state *old_current_state = dc->current_state; |
4741 | struct dc_surface_update srf_updates[MAX_SURFACES] = {0}; |
4742 | int surface_count; |
4743 | |
4744 | /* |
4745 | * Both current and new contexts share the same stream and plane state |
4746 | * pointers. When new context is validated, stream and planes get |
4747 | * populated with new updates such as new plane addresses. This makes |
4748 | * the current context no longer valid because stream and planes are |
4749 | * modified from the original. We backup current stream and plane states |
4750 | * into scratch space whenever we are populating new context. So we can |
4751 | * restore the original values back by calling the restore function now. |
4752 | * This restores back the original stream and plane states associated |
4753 | * with the current state. |
4754 | */ |
4755 | restore_planes_and_stream_state(scratch: &dc->scratch.current_state, stream); |
4756 | dc_state_retain(state: old_current_state); |
4757 | intermediate_context = create_minimal_transition_state(dc, |
4758 | base_context: old_current_state, policy: &policy); |
4759 | |
4760 | if (intermediate_context) { |
4761 | if (is_pipe_topology_transition_seamless_with_intermediate_step( |
4762 | dc, |
4763 | initial_state: dc->current_state, |
4764 | intermediate_state: intermediate_context, |
4765 | final_state: new_context)) { |
4766 | DC_LOG_DC("commit minimal transition state: base = current state\n"); |
4767 | surface_count = initialize_empty_surface_updates( |
4768 | stream, srf_updates); |
4769 | commit_planes_for_stream(dc, srf_updates, |
4770 | surface_count, stream, NULL, |
4771 | update_type: UPDATE_TYPE_FULL, context: intermediate_context); |
4772 | swap_and_release_current_context( |
4773 | dc, new_context: intermediate_context, stream); |
4774 | dc_state_retain(state: dc->current_state); |
4775 | success = true; |
4776 | } |
4777 | release_minimal_transition_state(dc, minimal_transition_context: intermediate_context, |
4778 | base_context: old_current_state, policy: &policy); |
4779 | } |
4780 | dc_state_release(state: old_current_state); |
4781 | /* |
4782 | * Restore stream and plane states back to the values associated with |
4783 | * new context. |
4784 | */ |
4785 | restore_planes_and_stream_state(scratch: &dc->scratch.new_state, stream); |
4786 | return success; |
4787 | } |
4788 | |
4789 | /** |
4790 | * commit_minimal_transition_state_in_dc_update - Commit a minimal state based |
4791 | * on current or new context |
4792 | * |
4793 | * @dc: DC structure, used to get the current state |
4794 | * @new_context: New context |
4795 | * @stream: Stream getting the update for the flip |
4796 | * @srf_updates: Surface updates |
4797 | * @surface_count: Number of surfaces |
4798 | * |
4799 | * The function takes in current state and new state and determine a minimal |
4800 | * transition state as the intermediate step which could make the transition |
4801 | * between current and new states seamless. If found, it will commit the minimal |
4802 | * transition state and update current state to this minimal transition state |
4803 | * and return true, if not, it will return false. |
4804 | * |
4805 | * Return: |
4806 | * Return True if the minimal transition succeeded, false otherwise |
4807 | */ |
4808 | static bool commit_minimal_transition_state_in_dc_update(struct dc *dc, |
4809 | struct dc_state *new_context, |
4810 | struct dc_stream_state *stream, |
4811 | struct dc_surface_update *srf_updates, |
4812 | int surface_count) |
4813 | { |
4814 | bool success = commit_minimal_transition_based_on_new_context( |
4815 | dc, new_context, stream, srf_updates, |
4816 | surface_count); |
4817 | if (!success) |
4818 | success = commit_minimal_transition_based_on_current_context(dc, |
4819 | new_context, stream); |
4820 | if (!success) |
4821 | DC_LOG_ERROR("Fail to commit a seamless minimal transition state between current and new states.\nThis pipe topology update is non-seamless!\n"); |
4822 | return success; |
4823 | } |
4824 | |
4825 | /** |
4826 | * commit_minimal_transition_state - Create a transition pipe split state |
4827 | * |
4828 | * @dc: Used to get the current state status |
4829 | * @transition_base_context: New transition state |
4830 | * |
4831 | * In some specific configurations, such as pipe split on multi-display with |
4832 | * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe |
4833 | * programming when moving to new planes. To mitigate those types of problems, |
4834 | * this function adds a transition state that minimizes pipe usage before |
4835 | * programming the new configuration. When adding a new plane, the current |
4836 | * state requires the least pipes, so it is applied without splitting. When |
4837 | * removing a plane, the new state requires the least pipes, so it is applied |
4838 | * without splitting. |
4839 | * |
4840 | * Return: |
4841 | * Return false if something is wrong in the transition state. |
4842 | */ |
4843 | static bool commit_minimal_transition_state(struct dc *dc, |
4844 | struct dc_state *transition_base_context) |
4845 | { |
4846 | struct dc_state *transition_context; |
4847 | struct pipe_split_policy_backup policy; |
4848 | enum dc_status ret = DC_ERROR_UNEXPECTED; |
4849 | unsigned int i, j; |
4850 | unsigned int pipe_in_use = 0; |
4851 | bool subvp_in_use = false; |
4852 | bool odm_in_use = false; |
4853 | |
4854 | /* check current pipes in use*/ |
4855 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
4856 | struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i]; |
4857 | |
4858 | if (pipe->plane_state) |
4859 | pipe_in_use++; |
4860 | } |
4861 | |
4862 | /* If SubVP is enabled and we are adding or removing planes from any main subvp |
4863 | * pipe, we must use the minimal transition. |
4864 | */ |
4865 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
4866 | struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
4867 | |
4868 | if (pipe->stream && dc_state_get_pipe_subvp_type(state: dc->current_state, pipe_ctx: pipe) == SUBVP_PHANTOM) { |
4869 | subvp_in_use = true; |
4870 | break; |
4871 | } |
4872 | } |
4873 | |
4874 | /* If ODM is enabled and we are adding or removing planes from any ODM |
4875 | * pipe, we must use the minimal transition. |
4876 | */ |
4877 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
4878 | struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i]; |
4879 | |
4880 | if (resource_is_pipe_type(pipe_ctx: pipe, type: OTG_MASTER)) { |
4881 | odm_in_use = resource_get_odm_slice_count(pipe) > 1; |
4882 | break; |
4883 | } |
4884 | } |
4885 | |
4886 | /* When the OS add a new surface if we have been used all of pipes with odm combine |
4887 | * and mpc split feature, it need use commit_minimal_transition_state to transition safely. |
4888 | * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need |
4889 | * call it again. Otherwise return true to skip. |
4890 | * |
4891 | * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially |
4892 | * enter/exit MPO when DCN still have enough resources. |
4893 | */ |
4894 | if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) |
4895 | return true; |
4896 | |
4897 | DC_LOG_DC("%s base = %s state, reason = %s\n", __func__, |
4898 | dc->current_state == transition_base_context ? "current": "new", |
4899 | subvp_in_use ? "Subvp In Use": |
4900 | odm_in_use ? "ODM in Use": |
4901 | dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use": |
4902 | "Unknown"); |
4903 | |
4904 | dc_state_retain(state: transition_base_context); |
4905 | transition_context = create_minimal_transition_state(dc, |
4906 | base_context: transition_base_context, policy: &policy); |
4907 | if (transition_context) { |
4908 | ret = dc_commit_state_no_check(dc, context: transition_context); |
4909 | release_minimal_transition_state(dc, minimal_transition_context: transition_context, base_context: transition_base_context, policy: &policy); |
4910 | } |
4911 | dc_state_release(state: transition_base_context); |
4912 | |
4913 | if (ret != DC_OK) { |
4914 | /* this should never happen */ |
4915 | BREAK_TO_DEBUGGER(); |
4916 | return false; |
4917 | } |
4918 | |
4919 | /* force full surface update */ |
4920 | for (i = 0; i < dc->current_state->stream_count; i++) { |
4921 | for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) { |
4922 | dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF; |
4923 | } |
4924 | } |
4925 | |
4926 | return true; |
4927 | } |
4928 | |
4929 | void populate_fast_updates(struct dc_fast_update *fast_update, |
4930 | struct dc_surface_update *srf_updates, |
4931 | int surface_count, |
4932 | struct dc_stream_update *stream_update) |
4933 | { |
4934 | int i = 0; |
4935 | |
4936 | if (stream_update) { |
4937 | fast_update[0].out_transfer_func = stream_update->out_transfer_func; |
4938 | fast_update[0].output_csc_transform = stream_update->output_csc_transform; |
4939 | } else { |
4940 | fast_update[0].out_transfer_func = NULL; |
4941 | fast_update[0].output_csc_transform = NULL; |
4942 | } |
4943 | |
4944 | for (i = 0; i < surface_count; i++) { |
4945 | fast_update[i].flip_addr = srf_updates[i].flip_addr; |
4946 | fast_update[i].gamma = srf_updates[i].gamma; |
4947 | fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix; |
4948 | fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix; |
4949 | fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor; |
4950 | fast_update[i].cursor_csc_color_matrix = srf_updates[i].cursor_csc_color_matrix; |
4951 | } |
4952 | } |
4953 | |
4954 | static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count) |
4955 | { |
4956 | int i; |
4957 | |
4958 | if (fast_update[0].out_transfer_func || |
4959 | fast_update[0].output_csc_transform) |
4960 | return true; |
4961 | |
4962 | for (i = 0; i < surface_count; i++) { |
4963 | if (fast_update[i].flip_addr || |
4964 | fast_update[i].gamma || |
4965 | fast_update[i].gamut_remap_matrix || |
4966 | fast_update[i].input_csc_color_matrix || |
4967 | fast_update[i].cursor_csc_color_matrix || |
4968 | fast_update[i].coeff_reduction_factor) |
4969 | return true; |
4970 | } |
4971 | |
4972 | return false; |
4973 | } |
4974 | |
4975 | bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count) |
4976 | { |
4977 | int i; |
4978 | |
4979 | if (fast_update[0].out_transfer_func || |
4980 | fast_update[0].output_csc_transform) |
4981 | return true; |
4982 | |
4983 | for (i = 0; i < surface_count; i++) { |
4984 | if (fast_update[i].input_csc_color_matrix || |
4985 | fast_update[i].gamma || |
4986 | fast_update[i].gamut_remap_matrix || |
4987 | fast_update[i].coeff_reduction_factor || |
4988 | fast_update[i].cursor_csc_color_matrix) |
4989 | return true; |
4990 | } |
4991 | |
4992 | return false; |
4993 | } |
4994 | |
4995 | static bool full_update_required(struct dc *dc, |
4996 | struct dc_surface_update *srf_updates, |
4997 | int surface_count, |
4998 | struct dc_stream_update *stream_update, |
4999 | struct dc_stream_state *stream) |
5000 | { |
5001 | |
5002 | int i; |
5003 | struct dc_stream_status *stream_status; |
5004 | const struct dc_state *context = dc->current_state; |
5005 | |
5006 | for (i = 0; i < surface_count; i++) { |
5007 | if (srf_updates && |
5008 | (srf_updates[i].plane_info || |
5009 | srf_updates[i].scaling_info || |
5010 | (srf_updates[i].hdr_mult.value && |
5011 | srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) || |
5012 | (srf_updates[i].sdr_white_level_nits && |
5013 | srf_updates[i].sdr_white_level_nits != srf_updates->surface->sdr_white_level_nits) || |
5014 | srf_updates[i].in_transfer_func || |
5015 | srf_updates[i].func_shaper || |
5016 | srf_updates[i].lut3d_func || |
5017 | srf_updates[i].surface->force_full_update || |
5018 | (srf_updates[i].flip_addr && |
5019 | srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) || |
5020 | (srf_updates[i].cm2_params && |
5021 | (srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting != srf_updates[i].surface->mcm_shaper_3dlut_setting || |
5022 | srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable)) || |
5023 | !is_surface_in_context(context, plane_state: srf_updates[i].surface))) |
5024 | return true; |
5025 | } |
5026 | |
5027 | if (stream_update && |
5028 | (((stream_update->src.height != 0 && stream_update->src.width != 0) || |
5029 | (stream_update->dst.height != 0 && stream_update->dst.width != 0) || |
5030 | stream_update->integer_scaling_update) || |
5031 | stream_update->hdr_static_metadata || |
5032 | stream_update->abm_level || |
5033 | stream_update->periodic_interrupt || |
5034 | stream_update->vrr_infopacket || |
5035 | stream_update->vsc_infopacket || |
5036 | stream_update->vsp_infopacket || |
5037 | stream_update->hfvsif_infopacket || |
5038 | stream_update->vtem_infopacket || |
5039 | stream_update->adaptive_sync_infopacket || |
5040 | stream_update->dpms_off || |
5041 | stream_update->allow_freesync || |
5042 | stream_update->vrr_active_variable || |
5043 | stream_update->vrr_active_fixed || |
5044 | stream_update->gamut_remap || |
5045 | stream_update->output_color_space || |
5046 | stream_update->dither_option || |
5047 | stream_update->wb_update || |
5048 | stream_update->dsc_config || |
5049 | stream_update->mst_bw_update || |
5050 | stream_update->func_shaper || |
5051 | stream_update->lut3d_func || |
5052 | stream_update->pending_test_pattern || |
5053 | stream_update->crtc_timing_adjust || |
5054 | stream_update->scaler_sharpener_update || |
5055 | stream_update->hw_cursor_req)) |
5056 | return true; |
5057 | |
5058 | if (stream) { |
5059 | stream_status = dc_stream_get_status(dc_stream: stream); |
5060 | if (stream_status == NULL || stream_status->plane_count != surface_count) |
5061 | return true; |
5062 | } |
5063 | if (dc->idle_optimizations_allowed) |
5064 | return true; |
5065 | |
5066 | if (dc_can_clear_cursor_limit(dc)) |
5067 | return true; |
5068 | |
5069 | return false; |
5070 | } |
5071 | |
5072 | static bool fast_update_only(struct dc *dc, |
5073 | struct dc_fast_update *fast_update, |
5074 | struct dc_surface_update *srf_updates, |
5075 | int surface_count, |
5076 | struct dc_stream_update *stream_update, |
5077 | struct dc_stream_state *stream) |
5078 | { |
5079 | return fast_updates_exist(fast_update, surface_count) |
5080 | && !full_update_required(dc, srf_updates, surface_count, stream_update, stream); |
5081 | } |
5082 | |
5083 | static bool update_planes_and_stream_v1(struct dc *dc, |
5084 | struct dc_surface_update *srf_updates, int surface_count, |
5085 | struct dc_stream_state *stream, |
5086 | struct dc_stream_update *stream_update, |
5087 | struct dc_state *state) |
5088 | { |
5089 | const struct dc_stream_status *stream_status; |
5090 | enum surface_update_type update_type; |
5091 | struct dc_state *context; |
5092 | struct dc_context *dc_ctx = dc->ctx; |
5093 | int i, j; |
5094 | struct dc_fast_update fast_update[MAX_SURFACES] = {0}; |
5095 | |
5096 | dc_exit_ips_for_hw_access(dc); |
5097 | |
5098 | populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); |
5099 | stream_status = dc_stream_get_status(dc_stream: stream); |
5100 | context = dc->current_state; |
5101 | |
5102 | update_type = dc_check_update_surfaces_for_stream( |
5103 | dc, updates: srf_updates, surface_count, stream_update, stream_status); |
5104 | /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream. |
5105 | * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip |
5106 | * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come. |
5107 | */ |
5108 | force_immediate_gsl_plane_flip(dc, updates: srf_updates, surface_count); |
5109 | |
5110 | if (update_type >= UPDATE_TYPE_FULL) { |
5111 | |
5112 | /* initialize scratch memory for building context */ |
5113 | context = dc_state_create_copy(src_state: state); |
5114 | if (context == NULL) { |
5115 | DC_ERROR("Failed to allocate new validate context!\n"); |
5116 | return false; |
5117 | } |
5118 | |
5119 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
5120 | struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; |
5121 | struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
5122 | |
5123 | if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) |
5124 | new_pipe->plane_state->force_full_update = true; |
5125 | } |
5126 | } else if (update_type == UPDATE_TYPE_FAST) { |
5127 | /* |
5128 | * Previous frame finished and HW is ready for optimization. |
5129 | */ |
5130 | dc_post_update_surfaces_to_stream(dc); |
5131 | } |
5132 | |
5133 | for (i = 0; i < surface_count; i++) { |
5134 | struct dc_plane_state *surface = srf_updates[i].surface; |
5135 | |
5136 | copy_surface_update_to_plane(surface, srf_update: &srf_updates[i]); |
5137 | |
5138 | if (update_type >= UPDATE_TYPE_MED) { |
5139 | for (j = 0; j < dc->res_pool->pipe_count; j++) { |
5140 | struct pipe_ctx *pipe_ctx = |
5141 | &context->res_ctx.pipe_ctx[j]; |
5142 | |
5143 | if (pipe_ctx->plane_state != surface) |
5144 | continue; |
5145 | |
5146 | resource_build_scaling_params(pipe_ctx); |
5147 | } |
5148 | } |
5149 | } |
5150 | |
5151 | copy_stream_update_to_stream(dc, context, stream, update: stream_update); |
5152 | |
5153 | if (update_type >= UPDATE_TYPE_FULL) { |
5154 | if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) { |
5155 | DC_ERROR("Mode validation failed for stream update!\n"); |
5156 | dc_state_release(state: context); |
5157 | return false; |
5158 | } |
5159 | } |
5160 | |
5161 | TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); |
5162 | |
5163 | if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && |
5164 | !dc->debug.enable_legacy_fast_update) { |
5165 | commit_planes_for_stream_fast(dc, |
5166 | srf_updates, |
5167 | surface_count, |
5168 | stream, |
5169 | stream_update, |
5170 | update_type, |
5171 | context); |
5172 | } else { |
5173 | commit_planes_for_stream( |
5174 | dc, |
5175 | srf_updates, |
5176 | surface_count, |
5177 | stream, |
5178 | stream_update, |
5179 | update_type, |
5180 | context); |
5181 | } |
5182 | /*update current_State*/ |
5183 | if (dc->current_state != context) { |
5184 | |
5185 | struct dc_state *old = dc->current_state; |
5186 | |
5187 | dc->current_state = context; |
5188 | dc_state_release(state: old); |
5189 | |
5190 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
5191 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; |
5192 | |
5193 | if (pipe_ctx->plane_state && pipe_ctx->stream == stream) |
5194 | pipe_ctx->plane_state->force_full_update = false; |
5195 | } |
5196 | } |
5197 | |
5198 | /* Legacy optimization path for DCE. */ |
5199 | if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) { |
5200 | dc_post_update_surfaces_to_stream(dc); |
5201 | TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); |
5202 | } |
5203 | return true; |
5204 | } |
5205 | |
5206 | static bool update_planes_and_stream_v2(struct dc *dc, |
5207 | struct dc_surface_update *srf_updates, int surface_count, |
5208 | struct dc_stream_state *stream, |
5209 | struct dc_stream_update *stream_update) |
5210 | { |
5211 | struct dc_state *context; |
5212 | enum surface_update_type update_type; |
5213 | struct dc_fast_update fast_update[MAX_SURFACES] = {0}; |
5214 | |
5215 | /* In cases where MPO and split or ODM are used transitions can |
5216 | * cause underflow. Apply stream configuration with minimal pipe |
5217 | * split first to avoid unsupported transitions for active pipes. |
5218 | */ |
5219 | bool force_minimal_pipe_splitting = 0; |
5220 | bool is_plane_addition = 0; |
5221 | bool is_fast_update_only; |
5222 | |
5223 | populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); |
5224 | is_fast_update_only = fast_update_only(dc, fast_update, srf_updates, |
5225 | surface_count, stream_update, stream); |
5226 | force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( |
5227 | dc, |
5228 | stream, |
5229 | srf_updates, |
5230 | surface_count, |
5231 | is_plane_addition: &is_plane_addition); |
5232 | |
5233 | /* on plane addition, minimal state is the current one */ |
5234 | if (force_minimal_pipe_splitting && is_plane_addition && |
5235 | !commit_minimal_transition_state(dc, transition_base_context: dc->current_state)) |
5236 | return false; |
5237 | |
5238 | if (!update_planes_and_stream_state( |
5239 | dc, |
5240 | srf_updates, |
5241 | surface_count, |
5242 | stream, |
5243 | stream_update, |
5244 | new_update_type: &update_type, |
5245 | new_context: &context)) |
5246 | return false; |
5247 | |
5248 | /* on plane removal, minimal state is the new one */ |
5249 | if (force_minimal_pipe_splitting && !is_plane_addition) { |
5250 | if (!commit_minimal_transition_state(dc, transition_base_context: context)) { |
5251 | dc_state_release(state: context); |
5252 | return false; |
5253 | } |
5254 | update_type = UPDATE_TYPE_FULL; |
5255 | } |
5256 | |
5257 | if (dc->hwss.is_pipe_topology_transition_seamless && |
5258 | !dc->hwss.is_pipe_topology_transition_seamless( |
5259 | dc, dc->current_state, context)) |
5260 | commit_minimal_transition_state_in_dc_update(dc, new_context: context, stream, |
5261 | srf_updates, surface_count); |
5262 | |
5263 | if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) { |
5264 | commit_planes_for_stream_fast(dc, |
5265 | srf_updates, |
5266 | surface_count, |
5267 | stream, |
5268 | stream_update, |
5269 | update_type, |
5270 | context); |
5271 | } else { |
5272 | if (!stream_update && |
5273 | dc->hwss.is_pipe_topology_transition_seamless && |
5274 | !dc->hwss.is_pipe_topology_transition_seamless( |
5275 | dc, dc->current_state, context)) { |
5276 | DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n"); |
5277 | BREAK_TO_DEBUGGER(); |
5278 | } |
5279 | commit_planes_for_stream( |
5280 | dc, |
5281 | srf_updates, |
5282 | surface_count, |
5283 | stream, |
5284 | stream_update, |
5285 | update_type, |
5286 | context); |
5287 | } |
5288 | if (dc->current_state != context) |
5289 | swap_and_release_current_context(dc, new_context: context, stream); |
5290 | return true; |
5291 | } |
5292 | |
5293 | static void commit_planes_and_stream_update_on_current_context(struct dc *dc, |
5294 | struct dc_surface_update *srf_updates, int surface_count, |
5295 | struct dc_stream_state *stream, |
5296 | struct dc_stream_update *stream_update, |
5297 | enum surface_update_type update_type) |
5298 | { |
5299 | struct dc_fast_update fast_update[MAX_SURFACES] = {0}; |
5300 | |
5301 | ASSERT(update_type < UPDATE_TYPE_FULL); |
5302 | populate_fast_updates(fast_update, srf_updates, surface_count, |
5303 | stream_update); |
5304 | if (fast_update_only(dc, fast_update, srf_updates, surface_count, |
5305 | stream_update, stream) && |
5306 | !dc->debug.enable_legacy_fast_update) |
5307 | commit_planes_for_stream_fast(dc, |
5308 | srf_updates, |
5309 | surface_count, |
5310 | stream, |
5311 | stream_update, |
5312 | update_type, |
5313 | context: dc->current_state); |
5314 | else |
5315 | commit_planes_for_stream( |
5316 | dc, |
5317 | srf_updates, |
5318 | surface_count, |
5319 | stream, |
5320 | stream_update, |
5321 | update_type, |
5322 | context: dc->current_state); |
5323 | } |
5324 | |
5325 | static void commit_planes_and_stream_update_with_new_context(struct dc *dc, |
5326 | struct dc_surface_update *srf_updates, int surface_count, |
5327 | struct dc_stream_state *stream, |
5328 | struct dc_stream_update *stream_update, |
5329 | enum surface_update_type update_type, |
5330 | struct dc_state *new_context) |
5331 | { |
5332 | ASSERT(update_type >= UPDATE_TYPE_FULL); |
5333 | if (!dc->hwss.is_pipe_topology_transition_seamless(dc, |
5334 | dc->current_state, new_context)) |
5335 | /* |
5336 | * It is required by the feature design that all pipe topologies |
5337 | * using extra free pipes for power saving purposes such as |
5338 | * dynamic ODM or SubVp shall only be enabled when it can be |
5339 | * transitioned seamlessly to AND from its minimal transition |
5340 | * state. A minimal transition state is defined as the same dc |
5341 | * state but with all power saving features disabled. So it uses |
5342 | * the minimum pipe topology. When we can't seamlessly |
5343 | * transition from state A to state B, we will insert the |
5344 | * minimal transition state A' or B' in between so seamless |
5345 | * transition between A and B can be made possible. |
5346 | */ |
5347 | commit_minimal_transition_state_in_dc_update(dc, new_context, |
5348 | stream, srf_updates, surface_count); |
5349 | |
5350 | commit_planes_for_stream( |
5351 | dc, |
5352 | srf_updates, |
5353 | surface_count, |
5354 | stream, |
5355 | stream_update, |
5356 | update_type, |
5357 | context: new_context); |
5358 | } |
5359 | |
5360 | static bool update_planes_and_stream_v3(struct dc *dc, |
5361 | struct dc_surface_update *srf_updates, int surface_count, |
5362 | struct dc_stream_state *stream, |
5363 | struct dc_stream_update *stream_update) |
5364 | { |
5365 | struct dc_state *new_context; |
5366 | enum surface_update_type update_type; |
5367 | |
5368 | /* |
5369 | * When this function returns true and new_context is not equal to |
5370 | * current state, the function allocates and validates a new dc state |
5371 | * and assigns it to new_context. The function expects that the caller |
5372 | * is responsible to free this memory when new_context is no longer |
5373 | * used. We swap current with new context and free current instead. So |
5374 | * new_context's memory will live until the next full update after it is |
5375 | * replaced by a newer context. Refer to the use of |
5376 | * swap_and_free_current_context below. |
5377 | */ |
5378 | if (!update_planes_and_stream_state(dc, srf_updates, surface_count, |
5379 | stream, stream_update, new_update_type: &update_type, |
5380 | new_context: &new_context)) |
5381 | return false; |
5382 | |
5383 | if (new_context == dc->current_state) { |
5384 | commit_planes_and_stream_update_on_current_context(dc, |
5385 | srf_updates, surface_count, stream, |
5386 | stream_update, update_type); |
5387 | } else { |
5388 | commit_planes_and_stream_update_with_new_context(dc, |
5389 | srf_updates, surface_count, stream, |
5390 | stream_update, update_type, new_context); |
5391 | swap_and_release_current_context(dc, new_context, stream); |
5392 | } |
5393 | |
5394 | return true; |
5395 | } |
5396 | |
5397 | static void clear_update_flags(struct dc_surface_update *srf_updates, |
5398 | int surface_count, struct dc_stream_state *stream) |
5399 | { |
5400 | int i; |
5401 | |
5402 | if (stream) |
5403 | stream->update_flags.raw = 0; |
5404 | |
5405 | for (i = 0; i < surface_count; i++) |
5406 | if (srf_updates[i].surface) |
5407 | srf_updates[i].surface->update_flags.raw = 0; |
5408 | } |
5409 | |
5410 | bool dc_update_planes_and_stream(struct dc *dc, |
5411 | struct dc_surface_update *srf_updates, int surface_count, |
5412 | struct dc_stream_state *stream, |
5413 | struct dc_stream_update *stream_update) |
5414 | { |
5415 | bool ret = false; |
5416 | |
5417 | dc_exit_ips_for_hw_access(dc); |
5418 | /* |
5419 | * update planes and stream version 3 separates FULL and FAST updates |
5420 | * to their own sequences. It aims to clean up frequent checks for |
5421 | * update type resulting unnecessary branching in logic flow. It also |
5422 | * adds a new commit minimal transition sequence, which detects the need |
5423 | * for minimal transition based on the actual comparison of current and |
5424 | * new states instead of "predicting" it based on per feature software |
5425 | * policy.i.e could_mpcc_tree_change_for_active_pipes. The new commit |
5426 | * minimal transition sequence is made universal to any power saving |
5427 | * features that would use extra free pipes such as Dynamic ODM/MPC |
5428 | * Combine, MPO or SubVp. Therefore there is no longer a need to |
5429 | * specially handle compatibility problems with transitions among those |
5430 | * features as they are now transparent to the new sequence. |
5431 | */ |
5432 | if (dc->ctx->dce_version >= DCN_VERSION_4_01) |
5433 | ret = update_planes_and_stream_v3(dc, srf_updates, |
5434 | surface_count, stream, stream_update); |
5435 | else |
5436 | ret = update_planes_and_stream_v2(dc, srf_updates, |
5437 | surface_count, stream, stream_update); |
5438 | |
5439 | if (ret) |
5440 | clear_update_flags(srf_updates, surface_count, stream); |
5441 | |
5442 | return ret; |
5443 | } |
5444 | |
5445 | void dc_commit_updates_for_stream(struct dc *dc, |
5446 | struct dc_surface_update *srf_updates, |
5447 | int surface_count, |
5448 | struct dc_stream_state *stream, |
5449 | struct dc_stream_update *stream_update, |
5450 | struct dc_state *state) |
5451 | { |
5452 | bool ret = false; |
5453 | |
5454 | dc_exit_ips_for_hw_access(dc); |
5455 | /* TODO: Since change commit sequence can have a huge impact, |
5456 | * we decided to only enable it for DCN3x. However, as soon as |
5457 | * we get more confident about this change we'll need to enable |
5458 | * the new sequence for all ASICs. |
5459 | */ |
5460 | if (dc->ctx->dce_version >= DCN_VERSION_4_01) { |
5461 | ret = update_planes_and_stream_v3(dc, srf_updates, surface_count, |
5462 | stream, stream_update); |
5463 | } else if (dc->ctx->dce_version >= DCN_VERSION_3_2) { |
5464 | ret = update_planes_and_stream_v2(dc, srf_updates, surface_count, |
5465 | stream, stream_update); |
5466 | } else |
5467 | ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream, |
5468 | stream_update, state); |
5469 | |
5470 | if (ret) |
5471 | clear_update_flags(srf_updates, surface_count, stream); |
5472 | } |
5473 | |
5474 | uint8_t dc_get_current_stream_count(struct dc *dc) |
5475 | { |
5476 | return dc->current_state->stream_count; |
5477 | } |
5478 | |
5479 | struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i) |
5480 | { |
5481 | if (i < dc->current_state->stream_count) |
5482 | return dc->current_state->streams[i]; |
5483 | return NULL; |
5484 | } |
5485 | |
5486 | enum dc_irq_source dc_interrupt_to_irq_source( |
5487 | struct dc *dc, |
5488 | uint32_t src_id, |
5489 | uint32_t ext_id) |
5490 | { |
5491 | return dal_irq_service_to_irq_source(irq_service: dc->res_pool->irqs, src_id, ext_id); |
5492 | } |
5493 | |
5494 | /* |
5495 | * dc_interrupt_set() - Enable/disable an AMD hw interrupt source |
5496 | */ |
5497 | bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) |
5498 | { |
5499 | |
5500 | if (dc == NULL) |
5501 | return false; |
5502 | |
5503 | return dal_irq_service_set(irq_service: dc->res_pool->irqs, source: src, enable); |
5504 | } |
5505 | |
5506 | void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) |
5507 | { |
5508 | dal_irq_service_ack(irq_service: dc->res_pool->irqs, source: src); |
5509 | } |
5510 | |
5511 | void dc_power_down_on_boot(struct dc *dc) |
5512 | { |
5513 | if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW && |
5514 | dc->hwss.power_down_on_boot) { |
5515 | if (dc->caps.ips_support) |
5516 | dc_exit_ips_for_hw_access(dc); |
5517 | dc->hwss.power_down_on_boot(dc); |
5518 | } |
5519 | } |
5520 | |
5521 | void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state) |
5522 | { |
5523 | if (!dc->current_state) |
5524 | return; |
5525 | |
5526 | switch (power_state) { |
5527 | case DC_ACPI_CM_POWER_STATE_D0: |
5528 | dc_state_construct(dc, state: dc->current_state); |
5529 | |
5530 | dc_exit_ips_for_hw_access(dc); |
5531 | |
5532 | dc_z10_restore(dc); |
5533 | |
5534 | dc_dmub_srv_notify_fw_dc_power_state(dc_dmub_srv: dc->ctx->dmub_srv, power_state); |
5535 | |
5536 | dc->hwss.init_hw(dc); |
5537 | |
5538 | if (dc->hwss.init_sys_ctx != NULL && |
5539 | dc->vm_pa_config.valid) { |
5540 | dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); |
5541 | } |
5542 | break; |
5543 | default: |
5544 | ASSERT(dc->current_state->stream_count == 0); |
5545 | dc_dmub_srv_notify_fw_dc_power_state(dc_dmub_srv: dc->ctx->dmub_srv, power_state); |
5546 | |
5547 | dc_state_destruct(state: dc->current_state); |
5548 | |
5549 | break; |
5550 | } |
5551 | } |
5552 | |
5553 | void dc_resume(struct dc *dc) |
5554 | { |
5555 | uint32_t i; |
5556 | |
5557 | for (i = 0; i < dc->link_count; i++) |
5558 | dc->link_srv->resume(dc->links[i]); |
5559 | } |
5560 | |
5561 | bool dc_is_dmcu_initialized(struct dc *dc) |
5562 | { |
5563 | struct dmcu *dmcu = dc->res_pool->dmcu; |
5564 | |
5565 | if (dmcu) |
5566 | return dmcu->funcs->is_dmcu_initialized(dmcu); |
5567 | return false; |
5568 | } |
5569 | |
5570 | enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping) |
5571 | { |
5572 | if (dc->hwss.set_clock) |
5573 | return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping); |
5574 | return DC_ERROR_UNEXPECTED; |
5575 | } |
5576 | void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg) |
5577 | { |
5578 | if (dc->hwss.get_clock) |
5579 | dc->hwss.get_clock(dc, clock_type, clock_cfg); |
5580 | } |
5581 | |
5582 | /* enable/disable eDP PSR without specify stream for eDP */ |
5583 | bool dc_set_psr_allow_active(struct dc *dc, bool enable) |
5584 | { |
5585 | int i; |
5586 | bool allow_active; |
5587 | |
5588 | for (i = 0; i < dc->current_state->stream_count ; i++) { |
5589 | struct dc_link *link; |
5590 | struct dc_stream_state *stream = dc->current_state->streams[i]; |
5591 | |
5592 | link = stream->link; |
5593 | if (!link) |
5594 | continue; |
5595 | |
5596 | if (link->psr_settings.psr_feature_enabled) { |
5597 | if (enable && !link->psr_settings.psr_allow_active) { |
5598 | allow_active = true; |
5599 | if (!dc_link_set_psr_allow_active(dc_link: link, enable: &allow_active, wait: false, force_static: false, NULL)) |
5600 | return false; |
5601 | } else if (!enable && link->psr_settings.psr_allow_active) { |
5602 | allow_active = false; |
5603 | if (!dc_link_set_psr_allow_active(dc_link: link, enable: &allow_active, wait: true, force_static: false, NULL)) |
5604 | return false; |
5605 | } |
5606 | } |
5607 | } |
5608 | |
5609 | return true; |
5610 | } |
5611 | |
5612 | /* enable/disable eDP Replay without specify stream for eDP */ |
5613 | bool dc_set_replay_allow_active(struct dc *dc, bool active) |
5614 | { |
5615 | int i; |
5616 | bool allow_active; |
5617 | |
5618 | for (i = 0; i < dc->current_state->stream_count; i++) { |
5619 | struct dc_link *link; |
5620 | struct dc_stream_state *stream = dc->current_state->streams[i]; |
5621 | |
5622 | link = stream->link; |
5623 | if (!link) |
5624 | continue; |
5625 | |
5626 | if (link->replay_settings.replay_feature_enabled) { |
5627 | if (active && !link->replay_settings.replay_allow_active) { |
5628 | allow_active = true; |
5629 | if (!dc_link_set_replay_allow_active(dc_link: link, enable: &allow_active, |
5630 | wait: false, force_static: false, NULL)) |
5631 | return false; |
5632 | } else if (!active && link->replay_settings.replay_allow_active) { |
5633 | allow_active = false; |
5634 | if (!dc_link_set_replay_allow_active(dc_link: link, enable: &allow_active, |
5635 | wait: true, force_static: false, NULL)) |
5636 | return false; |
5637 | } |
5638 | } |
5639 | } |
5640 | |
5641 | return true; |
5642 | } |
5643 | |
5644 | /* set IPS disable state */ |
5645 | bool dc_set_ips_disable(struct dc *dc, unsigned int disable_ips) |
5646 | { |
5647 | dc_exit_ips_for_hw_access(dc); |
5648 | |
5649 | dc->config.disable_ips = disable_ips; |
5650 | |
5651 | return true; |
5652 | } |
5653 | |
5654 | void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name) |
5655 | { |
5656 | int idle_fclk_khz = 0, idle_dramclk_khz = 0, i = 0; |
5657 | enum mall_stream_type subvp_pipe_type[MAX_PIPES] = {0}; |
5658 | struct pipe_ctx *pipe = NULL; |
5659 | struct dc_state *context = dc->current_state; |
5660 | |
5661 | if (dc->debug.disable_idle_power_optimizations) { |
5662 | DC_LOG_DEBUG("%s: disabled\n", __func__); |
5663 | return; |
5664 | } |
5665 | |
5666 | if (allow != dc->idle_optimizations_allowed) |
5667 | DC_LOG_IPS("%s: allow_idle old=%d new=%d (caller=%s)\n", __func__, |
5668 | dc->idle_optimizations_allowed, allow, caller_name); |
5669 | |
5670 | if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) |
5671 | return; |
5672 | |
5673 | if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present) |
5674 | if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) |
5675 | return; |
5676 | |
5677 | if (allow == dc->idle_optimizations_allowed) |
5678 | return; |
5679 | |
5680 | if (dc->hwss.apply_idle_power_optimizations && dc->clk_mgr != NULL && |
5681 | dc->hwss.apply_idle_power_optimizations(dc, allow)) { |
5682 | dc->idle_optimizations_allowed = allow; |
5683 | DC_LOG_DEBUG("%s: %s\n", __func__, allow ? "enabled": "disabled"); |
5684 | } |
5685 | |
5686 | // log idle clocks and sub vp pipe types at idle optimization time |
5687 | if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->get_hard_min_fclk) |
5688 | idle_fclk_khz = dc->clk_mgr->funcs->get_hard_min_fclk(dc->clk_mgr); |
5689 | |
5690 | if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->get_hard_min_memclk) |
5691 | idle_dramclk_khz = dc->clk_mgr->funcs->get_hard_min_memclk(dc->clk_mgr); |
5692 | |
5693 | if (dc->res_pool && context) { |
5694 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
5695 | pipe = &context->res_ctx.pipe_ctx[i]; |
5696 | subvp_pipe_type[i] = dc_state_get_pipe_subvp_type(state: context, pipe_ctx: pipe); |
5697 | } |
5698 | } |
5699 | |
5700 | DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n", |
5701 | __func__, allow, idle_fclk_khz, idle_dramclk_khz, subvp_pipe_type[0], subvp_pipe_type[1], subvp_pipe_type[2], |
5702 | subvp_pipe_type[3], subvp_pipe_type[4], subvp_pipe_type[5], caller_name); |
5703 | |
5704 | } |
5705 | |
5706 | void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name) |
5707 | { |
5708 | if (dc->caps.ips_support) |
5709 | dc_allow_idle_optimizations_internal(dc, allow: false, caller_name); |
5710 | } |
5711 | |
5712 | bool dc_dmub_is_ips_idle_state(struct dc *dc) |
5713 | { |
5714 | if (dc->debug.disable_idle_power_optimizations) |
5715 | return false; |
5716 | |
5717 | if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) |
5718 | return false; |
5719 | |
5720 | if (!dc->ctx->dmub_srv) |
5721 | return false; |
5722 | |
5723 | return dc->ctx->dmub_srv->idle_allowed; |
5724 | } |
5725 | |
5726 | /* set min and max memory clock to lowest and highest DPM level, respectively */ |
5727 | void dc_unlock_memory_clock_frequency(struct dc *dc) |
5728 | { |
5729 | if (dc->clk_mgr->funcs->set_hard_min_memclk) |
5730 | dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false); |
5731 | |
5732 | if (dc->clk_mgr->funcs->set_hard_max_memclk) |
5733 | dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); |
5734 | } |
5735 | |
5736 | /* set min memory clock to the min required for current mode, max to maxDPM */ |
5737 | void dc_lock_memory_clock_frequency(struct dc *dc) |
5738 | { |
5739 | if (dc->clk_mgr->funcs->get_memclk_states_from_smu) |
5740 | dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr); |
5741 | |
5742 | if (dc->clk_mgr->funcs->set_hard_min_memclk) |
5743 | dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true); |
5744 | |
5745 | if (dc->clk_mgr->funcs->set_hard_max_memclk) |
5746 | dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); |
5747 | } |
5748 | |
5749 | static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz) |
5750 | { |
5751 | struct dc_state *context = dc->current_state; |
5752 | struct hubp *hubp; |
5753 | struct pipe_ctx *pipe; |
5754 | int i; |
5755 | |
5756 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
5757 | pipe = &context->res_ctx.pipe_ctx[i]; |
5758 | |
5759 | if (pipe->stream != NULL) { |
5760 | dc->hwss.disable_pixel_data(dc, pipe, true); |
5761 | |
5762 | // wait for double buffer |
5763 | pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); |
5764 | pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); |
5765 | pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); |
5766 | |
5767 | hubp = pipe->plane_res.hubp; |
5768 | hubp->funcs->set_blank_regs(hubp, true); |
5769 | } |
5770 | } |
5771 | if (dc->clk_mgr->funcs->set_max_memclk) |
5772 | dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz); |
5773 | if (dc->clk_mgr->funcs->set_min_memclk) |
5774 | dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz); |
5775 | |
5776 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
5777 | pipe = &context->res_ctx.pipe_ctx[i]; |
5778 | |
5779 | if (pipe->stream != NULL) { |
5780 | dc->hwss.disable_pixel_data(dc, pipe, false); |
5781 | |
5782 | hubp = pipe->plane_res.hubp; |
5783 | hubp->funcs->set_blank_regs(hubp, false); |
5784 | } |
5785 | } |
5786 | } |
5787 | |
5788 | |
5789 | /** |
5790 | * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode |
5791 | * @dc: pointer to dc of the dm calling this |
5792 | * @enable: True = transition to DC mode, false = transition back to AC mode |
5793 | * |
5794 | * Some SoCs define additional clock limits when in DC mode, DM should |
5795 | * invoke this function when the platform undergoes a power source transition |
5796 | * so DC can apply/unapply the limit. This interface may be disruptive to |
5797 | * the onscreen content. |
5798 | * |
5799 | * Context: Triggered by OS through DM interface, or manually by escape calls. |
5800 | * Need to hold a dclock when doing so. |
5801 | * |
5802 | * Return: none (void function) |
5803 | * |
5804 | */ |
5805 | void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) |
5806 | { |
5807 | unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i; |
5808 | bool p_state_change_support; |
5809 | |
5810 | if (!dc->config.dc_mode_clk_limit_support) |
5811 | return; |
5812 | |
5813 | softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk; |
5814 | for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) { |
5815 | if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM) |
5816 | maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz; |
5817 | } |
5818 | funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000; |
5819 | p_state_change_support = dc->clk_mgr->clks.p_state_change_support; |
5820 | |
5821 | if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) { |
5822 | if (p_state_change_support) { |
5823 | if (funcMin <= softMax && dc->clk_mgr->funcs->set_max_memclk) |
5824 | dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax); |
5825 | // else: No-Op |
5826 | } else { |
5827 | if (funcMin <= softMax) |
5828 | blank_and_force_memclk(dc, apply: true, memclk_mhz: softMax); |
5829 | // else: No-Op |
5830 | } |
5831 | } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) { |
5832 | if (p_state_change_support) { |
5833 | if (funcMin <= softMax && dc->clk_mgr->funcs->set_max_memclk) |
5834 | dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM); |
5835 | // else: No-Op |
5836 | } else { |
5837 | if (funcMin <= softMax) |
5838 | blank_and_force_memclk(dc, apply: true, memclk_mhz: maxDPM); |
5839 | // else: No-Op |
5840 | } |
5841 | } |
5842 | dc->clk_mgr->dc_mode_softmax_enabled = enable; |
5843 | } |
5844 | bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, |
5845 | unsigned int pitch, |
5846 | unsigned int height, |
5847 | enum surface_pixel_format format, |
5848 | struct dc_cursor_attributes *cursor_attr) |
5849 | { |
5850 | if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, pitch, height, format, cursor_attr)) |
5851 | return true; |
5852 | return false; |
5853 | } |
5854 | |
5855 | /* cleanup on driver unload */ |
5856 | void dc_hardware_release(struct dc *dc) |
5857 | { |
5858 | dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc); |
5859 | |
5860 | if (dc->hwss.hardware_release) |
5861 | dc->hwss.hardware_release(dc); |
5862 | } |
5863 | |
5864 | void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc) |
5865 | { |
5866 | if (dc->current_state) |
5867 | dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true; |
5868 | } |
5869 | |
5870 | /** |
5871 | * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification |
5872 | * |
5873 | * @dc: [in] dc structure |
5874 | * |
5875 | * Checks whether DMUB FW supports outbox notifications, if supported DM |
5876 | * should register outbox interrupt prior to actually enabling interrupts |
5877 | * via dc_enable_dmub_outbox |
5878 | * |
5879 | * Return: |
5880 | * True if DMUB FW supports outbox notifications, False otherwise |
5881 | */ |
5882 | bool dc_is_dmub_outbox_supported(struct dc *dc) |
5883 | { |
5884 | if (!dc->caps.dmcub_support) |
5885 | return false; |
5886 | |
5887 | switch (dc->ctx->asic_id.chip_family) { |
5888 | |
5889 | case FAMILY_YELLOW_CARP: |
5890 | /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */ |
5891 | if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && |
5892 | !dc->debug.dpia_debug.bits.disable_dpia) |
5893 | return true; |
5894 | break; |
5895 | |
5896 | case AMDGPU_FAMILY_GC_11_0_1: |
5897 | case AMDGPU_FAMILY_GC_11_5_0: |
5898 | if (!dc->debug.dpia_debug.bits.disable_dpia) |
5899 | return true; |
5900 | break; |
5901 | |
5902 | default: |
5903 | break; |
5904 | } |
5905 | |
5906 | /* dmub aux needs dmub notifications to be enabled */ |
5907 | return dc->debug.enable_dmub_aux_for_legacy_ddc; |
5908 | |
5909 | } |
5910 | |
5911 | /** |
5912 | * dc_enable_dmub_notifications - Check if dmub fw supports outbox |
5913 | * |
5914 | * @dc: [in] dc structure |
5915 | * |
5916 | * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox |
5917 | * notifications. All DMs shall switch to dc_is_dmub_outbox_supported. This |
5918 | * API shall be removed after switching. |
5919 | * |
5920 | * Return: |
5921 | * True if DMUB FW supports outbox notifications, False otherwise |
5922 | */ |
5923 | bool dc_enable_dmub_notifications(struct dc *dc) |
5924 | { |
5925 | return dc_is_dmub_outbox_supported(dc); |
5926 | } |
5927 | |
5928 | /** |
5929 | * dc_enable_dmub_outbox - Enables DMUB unsolicited notification |
5930 | * |
5931 | * @dc: [in] dc structure |
5932 | * |
5933 | * Enables DMUB unsolicited notifications to x86 via outbox. |
5934 | */ |
5935 | void dc_enable_dmub_outbox(struct dc *dc) |
5936 | { |
5937 | struct dc_context *dc_ctx = dc->ctx; |
5938 | |
5939 | dmub_enable_outbox_notification(dmub_srv: dc_ctx->dmub_srv); |
5940 | DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__); |
5941 | } |
5942 | |
5943 | /** |
5944 | * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message |
5945 | * Sets port index appropriately for legacy DDC |
5946 | * @dc: dc structure |
5947 | * @link_index: link index |
5948 | * @payload: aux payload |
5949 | * |
5950 | * Returns: True if successful, False if failure |
5951 | */ |
5952 | bool dc_process_dmub_aux_transfer_async(struct dc *dc, |
5953 | uint32_t link_index, |
5954 | struct aux_payload *payload) |
5955 | { |
5956 | uint8_t action; |
5957 | union dmub_rb_cmd cmd = {0}; |
5958 | |
5959 | ASSERT(payload->length <= 16); |
5960 | |
5961 | cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS; |
5962 | cmd.dp_aux_access.header.payload_bytes = 0; |
5963 | /* For dpia, ddc_pin is set to NULL */ |
5964 | if (!dc->links[link_index]->ddc->ddc_pin) |
5965 | cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA; |
5966 | else |
5967 | cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC; |
5968 | |
5969 | cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst; |
5970 | cmd.dp_aux_access.aux_control.sw_crc_enabled = 0; |
5971 | cmd.dp_aux_access.aux_control.timeout = 0; |
5972 | cmd.dp_aux_access.aux_control.dpaux.address = payload->address; |
5973 | cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux; |
5974 | cmd.dp_aux_access.aux_control.dpaux.length = payload->length; |
5975 | |
5976 | /* set aux action */ |
5977 | if (payload->i2c_over_aux) { |
5978 | if (payload->write) { |
5979 | if (payload->mot) |
5980 | action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT; |
5981 | else |
5982 | action = DP_AUX_REQ_ACTION_I2C_WRITE; |
5983 | } else { |
5984 | if (payload->mot) |
5985 | action = DP_AUX_REQ_ACTION_I2C_READ_MOT; |
5986 | else |
5987 | action = DP_AUX_REQ_ACTION_I2C_READ; |
5988 | } |
5989 | } else { |
5990 | if (payload->write) |
5991 | action = DP_AUX_REQ_ACTION_DPCD_WRITE; |
5992 | else |
5993 | action = DP_AUX_REQ_ACTION_DPCD_READ; |
5994 | } |
5995 | |
5996 | cmd.dp_aux_access.aux_control.dpaux.action = action; |
5997 | |
5998 | if (payload->length && payload->write) { |
5999 | memcpy(cmd.dp_aux_access.aux_control.dpaux.data, |
6000 | payload->data, |
6001 | payload->length |
6002 | ); |
6003 | } |
6004 | |
6005 | dc_wake_and_execute_dmub_cmd(ctx: dc->ctx, cmd: &cmd, wait_type: DM_DMUB_WAIT_TYPE_WAIT); |
6006 | |
6007 | return true; |
6008 | } |
6009 | |
6010 | uint8_t get_link_index_from_dpia_port_index(const struct dc *dc, |
6011 | uint8_t dpia_port_index) |
6012 | { |
6013 | uint8_t index, link_index = 0xFF; |
6014 | |
6015 | for (index = 0; index < dc->link_count; index++) { |
6016 | /* ddc_hw_inst has dpia port index for dpia links |
6017 | * and ddc instance for legacy links |
6018 | */ |
6019 | if (!dc->links[index]->ddc->ddc_pin) { |
6020 | if (dc->links[index]->ddc_hw_inst == dpia_port_index) { |
6021 | link_index = index; |
6022 | break; |
6023 | } |
6024 | } |
6025 | } |
6026 | ASSERT(link_index != 0xFF); |
6027 | return link_index; |
6028 | } |
6029 | |
6030 | /** |
6031 | * dc_process_dmub_set_config_async - Submits set_config command |
6032 | * |
6033 | * @dc: [in] dc structure |
6034 | * @link_index: [in] link_index: link index |
6035 | * @payload: [in] aux payload |
6036 | * @notify: [out] set_config immediate reply |
6037 | * |
6038 | * Submits set_config command to dmub via inbox message. |
6039 | * |
6040 | * Return: |
6041 | * True if successful, False if failure |
6042 | */ |
6043 | bool dc_process_dmub_set_config_async(struct dc *dc, |
6044 | uint32_t link_index, |
6045 | struct set_config_cmd_payload *payload, |
6046 | struct dmub_notification *notify) |
6047 | { |
6048 | union dmub_rb_cmd cmd = {0}; |
6049 | bool is_cmd_complete = true; |
6050 | |
6051 | /* prepare SET_CONFIG command */ |
6052 | cmd.set_config_access.header.type = DMUB_CMD__DPIA; |
6053 | cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS; |
6054 | |
6055 | cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst; |
6056 | cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type; |
6057 | cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data; |
6058 | |
6059 | if (!dc_wake_and_execute_dmub_cmd(ctx: dc->ctx, cmd: &cmd, wait_type: DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) { |
6060 | /* command is not processed by dmub */ |
6061 | notify->sc_status = SET_CONFIG_UNKNOWN_ERROR; |
6062 | return is_cmd_complete; |
6063 | } |
6064 | |
6065 | /* command processed by dmub, if ret_status is 1, it is completed instantly */ |
6066 | if (cmd.set_config_access.header.ret_status == 1) |
6067 | notify->sc_status = cmd.set_config_access.set_config_control.immed_status; |
6068 | else |
6069 | /* cmd pending, will receive notification via outbox */ |
6070 | is_cmd_complete = false; |
6071 | |
6072 | return is_cmd_complete; |
6073 | } |
6074 | |
6075 | /** |
6076 | * dc_process_dmub_set_mst_slots - Submits MST solt allocation |
6077 | * |
6078 | * @dc: [in] dc structure |
6079 | * @link_index: [in] link index |
6080 | * @mst_alloc_slots: [in] mst slots to be allotted |
6081 | * @mst_slots_in_use: [out] mst slots in use returned in failure case |
6082 | * |
6083 | * Submits mst slot allocation command to dmub via inbox message |
6084 | * |
6085 | * Return: |
6086 | * DC_OK if successful, DC_ERROR if failure |
6087 | */ |
6088 | enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, |
6089 | uint32_t link_index, |
6090 | uint8_t mst_alloc_slots, |
6091 | uint8_t *mst_slots_in_use) |
6092 | { |
6093 | union dmub_rb_cmd cmd = {0}; |
6094 | |
6095 | /* prepare MST_ALLOC_SLOTS command */ |
6096 | cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA; |
6097 | cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS; |
6098 | |
6099 | cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst; |
6100 | cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots; |
6101 | |
6102 | if (!dc_wake_and_execute_dmub_cmd(ctx: dc->ctx, cmd: &cmd, wait_type: DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) |
6103 | /* command is not processed by dmub */ |
6104 | return DC_ERROR_UNEXPECTED; |
6105 | |
6106 | /* command processed by dmub, if ret_status is 1 */ |
6107 | if (cmd.set_config_access.header.ret_status != 1) |
6108 | /* command processing error */ |
6109 | return DC_ERROR_UNEXPECTED; |
6110 | |
6111 | /* command processed and we have a status of 2, mst not enabled in dpia */ |
6112 | if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2) |
6113 | return DC_FAIL_UNSUPPORTED_1; |
6114 | |
6115 | /* previously configured mst alloc and used slots did not match */ |
6116 | if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) { |
6117 | *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use; |
6118 | return DC_NOT_SUPPORTED; |
6119 | } |
6120 | |
6121 | return DC_OK; |
6122 | } |
6123 | |
6124 | /** |
6125 | * dc_process_dmub_dpia_set_tps_notification - Submits tps notification |
6126 | * |
6127 | * @dc: [in] dc structure |
6128 | * @link_index: [in] link index |
6129 | * @tps: [in] request tps |
6130 | * |
6131 | * Submits set_tps_notification command to dmub via inbox message |
6132 | */ |
6133 | void dc_process_dmub_dpia_set_tps_notification(const struct dc *dc, uint32_t link_index, uint8_t tps) |
6134 | { |
6135 | union dmub_rb_cmd cmd = {0}; |
6136 | |
6137 | cmd.set_tps_notification.header.type = DMUB_CMD__DPIA; |
6138 | cmd.set_tps_notification.header.sub_type = DMUB_CMD__DPIA_SET_TPS_NOTIFICATION; |
6139 | cmd.set_tps_notification.tps_notification.instance = dc->links[link_index]->ddc_hw_inst; |
6140 | cmd.set_tps_notification.tps_notification.tps = tps; |
6141 | |
6142 | dc_wake_and_execute_dmub_cmd(ctx: dc->ctx, cmd: &cmd, wait_type: DM_DMUB_WAIT_TYPE_WAIT); |
6143 | } |
6144 | |
6145 | /** |
6146 | * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption |
6147 | * |
6148 | * @dc: [in] dc structure |
6149 | * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable |
6150 | * |
6151 | * Submits dpia hpd int enable command to dmub via inbox message |
6152 | */ |
6153 | void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, |
6154 | uint32_t hpd_int_enable) |
6155 | { |
6156 | union dmub_rb_cmd cmd = {0}; |
6157 | |
6158 | cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE; |
6159 | cmd.dpia_hpd_int_enable.enable = hpd_int_enable; |
6160 | |
6161 | dc_wake_and_execute_dmub_cmd(ctx: dc->ctx, cmd: &cmd, wait_type: DM_DMUB_WAIT_TYPE_WAIT); |
6162 | |
6163 | DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable); |
6164 | } |
6165 | |
6166 | /** |
6167 | * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging |
6168 | * |
6169 | * @dc: [in] dc structure |
6170 | * |
6171 | * |
6172 | */ |
6173 | void dc_print_dmub_diagnostic_data(const struct dc *dc) |
6174 | { |
6175 | dc_dmub_srv_log_diagnostic_data(dc_dmub_srv: dc->ctx->dmub_srv); |
6176 | } |
6177 | |
6178 | /** |
6179 | * dc_disable_accelerated_mode - disable accelerated mode |
6180 | * @dc: dc structure |
6181 | */ |
6182 | void dc_disable_accelerated_mode(struct dc *dc) |
6183 | { |
6184 | bios_set_scratch_acc_mode_change(bios: dc->ctx->dc_bios, state: 0); |
6185 | } |
6186 | |
6187 | |
6188 | /** |
6189 | * dc_notify_vsync_int_state - notifies vsync enable/disable state |
6190 | * @dc: dc structure |
6191 | * @stream: stream where vsync int state changed |
6192 | * @enable: whether vsync is enabled or disabled |
6193 | * |
6194 | * Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM |
6195 | * interrupts after steady state is reached. |
6196 | */ |
6197 | void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable) |
6198 | { |
6199 | int i; |
6200 | int edp_num; |
6201 | struct pipe_ctx *pipe = NULL; |
6202 | struct dc_link *link = stream->sink->link; |
6203 | struct dc_link *edp_links[MAX_NUM_EDP]; |
6204 | |
6205 | |
6206 | if (link->psr_settings.psr_feature_enabled) |
6207 | return; |
6208 | |
6209 | if (link->replay_settings.replay_feature_enabled) |
6210 | return; |
6211 | |
6212 | /*find primary pipe associated with stream*/ |
6213 | for (i = 0; i < MAX_PIPES; i++) { |
6214 | pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
6215 | |
6216 | if (pipe->stream == stream && pipe->stream_res.tg) |
6217 | break; |
6218 | } |
6219 | |
6220 | if (i == MAX_PIPES) { |
6221 | ASSERT(0); |
6222 | return; |
6223 | } |
6224 | |
6225 | dc_get_edp_links(dc, edp_links, edp_num: &edp_num); |
6226 | |
6227 | /* Determine panel inst */ |
6228 | for (i = 0; i < edp_num; i++) { |
6229 | if (edp_links[i] == link) |
6230 | break; |
6231 | } |
6232 | |
6233 | if (i == edp_num) { |
6234 | return; |
6235 | } |
6236 | |
6237 | if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause) |
6238 | pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst); |
6239 | } |
6240 | |
6241 | /***************************************************************************** |
6242 | * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause |
6243 | * ABM |
6244 | * @dc: dc structure |
6245 | * @stream: stream where vsync int state changed |
6246 | * @pData: abm hw states |
6247 | * |
6248 | ****************************************************************************/ |
6249 | bool dc_abm_save_restore( |
6250 | struct dc *dc, |
6251 | struct dc_stream_state *stream, |
6252 | struct abm_save_restore *pData) |
6253 | { |
6254 | int i; |
6255 | int edp_num; |
6256 | struct pipe_ctx *pipe = NULL; |
6257 | struct dc_link *link = stream->sink->link; |
6258 | struct dc_link *edp_links[MAX_NUM_EDP]; |
6259 | |
6260 | if (link->replay_settings.replay_feature_enabled) |
6261 | return false; |
6262 | |
6263 | /*find primary pipe associated with stream*/ |
6264 | for (i = 0; i < MAX_PIPES; i++) { |
6265 | pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
6266 | |
6267 | if (pipe->stream == stream && pipe->stream_res.tg) |
6268 | break; |
6269 | } |
6270 | |
6271 | if (i == MAX_PIPES) { |
6272 | ASSERT(0); |
6273 | return false; |
6274 | } |
6275 | |
6276 | dc_get_edp_links(dc, edp_links, edp_num: &edp_num); |
6277 | |
6278 | /* Determine panel inst */ |
6279 | for (i = 0; i < edp_num; i++) |
6280 | if (edp_links[i] == link) |
6281 | break; |
6282 | |
6283 | if (i == edp_num) |
6284 | return false; |
6285 | |
6286 | if (pipe->stream_res.abm && |
6287 | pipe->stream_res.abm->funcs->save_restore) |
6288 | return pipe->stream_res.abm->funcs->save_restore( |
6289 | pipe->stream_res.abm, |
6290 | i, |
6291 | pData); |
6292 | return false; |
6293 | } |
6294 | |
6295 | void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties) |
6296 | { |
6297 | unsigned int i; |
6298 | unsigned int max_cursor_size = dc->caps.max_cursor_size; |
6299 | unsigned int stream_cursor_size; |
6300 | |
6301 | if (dc->debug.allow_sw_cursor_fallback && dc->res_pool->funcs->get_max_hw_cursor_size) { |
6302 | for (i = 0; i < dc->current_state->stream_count; i++) { |
6303 | stream_cursor_size = dc->res_pool->funcs->get_max_hw_cursor_size(dc, |
6304 | dc->current_state, |
6305 | dc->current_state->streams[i]); |
6306 | |
6307 | if (stream_cursor_size < max_cursor_size) { |
6308 | max_cursor_size = stream_cursor_size; |
6309 | } |
6310 | } |
6311 | } |
6312 | |
6313 | properties->cursor_size_limit = max_cursor_size; |
6314 | } |
6315 | |
6316 | /** |
6317 | * dc_set_edp_power() - DM controls eDP power to be ON/OFF |
6318 | * |
6319 | * Called when DM wants to power on/off eDP. |
6320 | * Only work on links with flag skip_implict_edp_power_control is set. |
6321 | * |
6322 | * @dc: Current DC state |
6323 | * @edp_link: a link with eDP connector signal type |
6324 | * @powerOn: power on/off eDP |
6325 | * |
6326 | * Return: void |
6327 | */ |
6328 | void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link, |
6329 | bool powerOn) |
6330 | { |
6331 | if (edp_link->connector_signal != SIGNAL_TYPE_EDP) |
6332 | return; |
6333 | |
6334 | if (edp_link->skip_implict_edp_power_control == false) |
6335 | return; |
6336 | |
6337 | edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn); |
6338 | } |
6339 | |
6340 | /* |
6341 | ***************************************************************************** |
6342 | * dc_get_power_profile_for_dc_state() - extracts power profile from dc state |
6343 | * |
6344 | * Called when DM wants to make power policy decisions based on dc_state |
6345 | * |
6346 | ***************************************************************************** |
6347 | */ |
6348 | struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context) |
6349 | { |
6350 | struct dc_power_profile profile = { 0 }; |
6351 | |
6352 | profile.power_level = !context->bw_ctx.bw.dcn.clk.p_state_change_support; |
6353 | if (!context->clk_mgr || !context->clk_mgr->ctx || !context->clk_mgr->ctx->dc) |
6354 | return profile; |
6355 | struct dc *dc = context->clk_mgr->ctx->dc; |
6356 | |
6357 | if (dc->res_pool->funcs->get_power_profile) |
6358 | profile.power_level = dc->res_pool->funcs->get_power_profile(context); |
6359 | return profile; |
6360 | } |
6361 | |
6362 | /* |
6363 | ********************************************************************************** |
6364 | * dc_get_det_buffer_size_from_state() - extracts detile buffer size from dc state |
6365 | * |
6366 | * Called when DM wants to log detile buffer size from dc_state |
6367 | * |
6368 | ********************************************************************************** |
6369 | */ |
6370 | unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context) |
6371 | { |
6372 | struct dc *dc = context->clk_mgr->ctx->dc; |
6373 | |
6374 | if (dc->res_pool->funcs->get_det_buffer_size) |
6375 | return dc->res_pool->funcs->get_det_buffer_size(context); |
6376 | else |
6377 | return 0; |
6378 | } |
6379 | |
6380 | bool dc_is_cursor_limit_pending(struct dc *dc) |
6381 | { |
6382 | uint32_t i; |
6383 | |
6384 | for (i = 0; i < dc->current_state->stream_count; i++) { |
6385 | if (dc_stream_is_cursor_limit_pending(dc, stream: dc->current_state->streams[i])) |
6386 | return true; |
6387 | } |
6388 | |
6389 | return false; |
6390 | } |
6391 | |
6392 | bool dc_can_clear_cursor_limit(struct dc *dc) |
6393 | { |
6394 | uint32_t i; |
6395 | |
6396 | for (i = 0; i < dc->current_state->stream_count; i++) { |
6397 | if (dc_state_can_clear_stream_cursor_subvp_limit(stream: dc->current_state->streams[i], state: dc->current_state)) |
6398 | return true; |
6399 | } |
6400 | |
6401 | return false; |
6402 | } |
6403 |
Definitions
- DC_BUILD_ID
- elevate_update_type
- destroy_links
- get_num_of_internal_disp
- get_seamless_boot_stream_count
- create_links
- create_link_encoders
- destroy_link_encoders
- dc_perf_trace_create
- dc_perf_trace_destroy
- set_long_vtotal
- dc_stream_adjust_vmin_vmax
- dc_stream_get_last_used_drr_vtotal
- dc_stream_forward_dmub_crc_window
- dc_stream_forward_dmcu_crc_window
- dc_stream_forward_crc_window
- dc_stream_forward_dmub_multiple_crc_window
- dc_stream_forward_multiple_crc_window
- dc_stream_configure_crc
- dc_stream_get_crc
- dc_stream_set_dyn_expansion
- dc_stream_set_dither_option
- dc_stream_set_gamut_remap
- dc_stream_program_csc_matrix
- dc_stream_set_static_screen_params
- dc_destruct
- dc_construct_ctx
- dc_construct
- disable_all_writeback_pipes_for_stream
- apply_ctx_interdependent_lock
- dc_update_visual_confirm_color
- dc_get_visual_confirm_for_stream
- disable_dangling_plane
- disable_vbios_mode_if_required
- dc_create
- detect_edp_presence
- dc_hardware_init
- dc_init_callbacks
- dc_deinit_callbacks
- dc_destroy
- enable_timing_multisync
- program_timing_sync
- streams_changed
- dc_validate_boot_timing
- should_update_pipe_for_stream
- should_update_pipe_for_plane
- dc_enable_stereo
- dc_trigger_sync
- get_stream_mask
- dc_z10_restore
- dc_z10_save_init
- determine_pipe_unlock_order
- dc_commit_state_no_check
- dc_commit_streams
- dc_acquire_release_mpc_3dlut
- is_flip_pending_in_pipes
- process_deferred_updates
- dc_post_update_surfaces_to_stream
- dc_set_generic_gpio_for_stereo
- is_surface_in_context
- get_plane_info_update_type
- get_scaling_info_update_type
- det_surface_update
- force_immediate_gsl_plane_flip
- check_update_surfaces_for_stream
- dc_check_update_surfaces_for_stream
- stream_get_status
- update_surface_trace_level
- copy_surface_update_to_plane
- copy_stream_update_to_stream
- backup_planes_and_stream_state
- restore_planes_and_stream_state
- update_seamless_boot_flags
- update_planes_and_stream_state
- commit_planes_do_stream_update
- dc_dmub_should_send_dirty_rect_cmd
- dc_dmub_update_dirty_rect
- build_dmub_update_dirty_rect
- check_address_only_update
- build_dmub_cmd_list
- commit_plane_for_stream_offload_fams2_flip
- commit_planes_for_stream_fast
- commit_planes_for_stream
- could_mpcc_tree_change_for_active_pipes
- pipe_split_policy_backup
- backup_and_set_minimal_pipe_split_policy
- restore_minimal_pipe_split_policy
- release_minimal_transition_state
- force_vsync_flip_in_minimal_transition_context
- create_minimal_transition_state
- is_pipe_topology_transition_seamless_with_intermediate_step
- swap_and_release_current_context
- initialize_empty_surface_updates
- commit_minimal_transition_based_on_new_context
- commit_minimal_transition_based_on_current_context
- commit_minimal_transition_state_in_dc_update
- commit_minimal_transition_state
- populate_fast_updates
- fast_updates_exist
- fast_nonaddr_updates_exist
- full_update_required
- fast_update_only
- update_planes_and_stream_v1
- update_planes_and_stream_v2
- commit_planes_and_stream_update_on_current_context
- commit_planes_and_stream_update_with_new_context
- update_planes_and_stream_v3
- clear_update_flags
- dc_update_planes_and_stream
- dc_commit_updates_for_stream
- dc_get_current_stream_count
- dc_get_stream_at_index
- dc_interrupt_to_irq_source
- dc_interrupt_set
- dc_interrupt_ack
- dc_power_down_on_boot
- dc_set_power_state
- dc_resume
- dc_is_dmcu_initialized
- dc_set_clock
- dc_get_clock
- dc_set_psr_allow_active
- dc_set_replay_allow_active
- dc_set_ips_disable
- dc_allow_idle_optimizations_internal
- dc_exit_ips_for_hw_access_internal
- dc_dmub_is_ips_idle_state
- dc_unlock_memory_clock_frequency
- dc_lock_memory_clock_frequency
- blank_and_force_memclk
- dc_enable_dcmode_clk_limit
- dc_is_plane_eligible_for_idle_optimizations
- dc_hardware_release
- dc_mclk_switch_using_fw_based_vblank_stretch_shut_down
- dc_is_dmub_outbox_supported
- dc_enable_dmub_notifications
- dc_enable_dmub_outbox
- dc_process_dmub_aux_transfer_async
- get_link_index_from_dpia_port_index
- dc_process_dmub_set_config_async
- dc_process_dmub_set_mst_slots
- dc_process_dmub_dpia_set_tps_notification
- dc_process_dmub_dpia_hpd_int_enable
- dc_print_dmub_diagnostic_data
- dc_disable_accelerated_mode
- dc_notify_vsync_int_state
- dc_abm_save_restore
- dc_query_current_properties
- dc_set_edp_power
- dc_get_power_profile_for_dc_state
- dc_get_det_buffer_size_from_state
- dc_is_cursor_limit_pending
Improve your Profiling and Debugging skills
Find out more