1 | /* |
2 | * Copyright © 2008 Intel Corporation |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. |
22 | * |
23 | * Authors: |
24 | * Keith Packard <keithp@keithp.com> |
25 | * |
26 | */ |
27 | |
28 | #include <linux/export.h> |
29 | #include <linux/i2c.h> |
30 | #include <linux/notifier.h> |
31 | #include <linux/slab.h> |
32 | #include <linux/string_helpers.h> |
33 | #include <linux/timekeeping.h> |
34 | #include <linux/types.h> |
35 | |
36 | #include <asm/byteorder.h> |
37 | |
38 | #include <drm/display/drm_dp_helper.h> |
39 | #include <drm/display/drm_dp_tunnel.h> |
40 | #include <drm/display/drm_dsc_helper.h> |
41 | #include <drm/display/drm_hdmi_helper.h> |
42 | #include <drm/drm_atomic_helper.h> |
43 | #include <drm/drm_crtc.h> |
44 | #include <drm/drm_edid.h> |
45 | #include <drm/drm_probe_helper.h> |
46 | |
47 | #include "g4x_dp.h" |
48 | #include "i915_drv.h" |
49 | #include "i915_irq.h" |
50 | #include "i915_reg.h" |
51 | #include "intel_atomic.h" |
52 | #include "intel_audio.h" |
53 | #include "intel_backlight.h" |
54 | #include "intel_combo_phy_regs.h" |
55 | #include "intel_connector.h" |
56 | #include "intel_crtc.h" |
57 | #include "intel_cx0_phy.h" |
58 | #include "intel_ddi.h" |
59 | #include "intel_de.h" |
60 | #include "intel_display_driver.h" |
61 | #include "intel_display_types.h" |
62 | #include "intel_dp.h" |
63 | #include "intel_dp_aux.h" |
64 | #include "intel_dp_hdcp.h" |
65 | #include "intel_dp_link_training.h" |
66 | #include "intel_dp_mst.h" |
67 | #include "intel_dp_tunnel.h" |
68 | #include "intel_dpio_phy.h" |
69 | #include "intel_dpll.h" |
70 | #include "intel_drrs.h" |
71 | #include "intel_fifo_underrun.h" |
72 | #include "intel_hdcp.h" |
73 | #include "intel_hdmi.h" |
74 | #include "intel_hotplug.h" |
75 | #include "intel_hotplug_irq.h" |
76 | #include "intel_lspcon.h" |
77 | #include "intel_lvds.h" |
78 | #include "intel_panel.h" |
79 | #include "intel_pch_display.h" |
80 | #include "intel_pps.h" |
81 | #include "intel_psr.h" |
82 | #include "intel_tc.h" |
83 | #include "intel_vdsc.h" |
84 | #include "intel_vrr.h" |
85 | #include "intel_crtc_state_dump.h" |
86 | |
87 | /* DP DSC throughput values used for slice count calculations KPixels/s */ |
88 | #define DP_DSC_PEAK_PIXEL_RATE 2720000 |
89 | #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 |
90 | #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 |
91 | |
92 | /* DP DSC FEC Overhead factor in ppm = 1/(0.972261) = 1.028530 */ |
93 | #define DP_DSC_FEC_OVERHEAD_FACTOR 1028530 |
94 | |
95 | /* Compliance test status bits */ |
96 | #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 |
97 | #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) |
98 | #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) |
99 | #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) |
100 | |
101 | |
102 | /* Constants for DP DSC configurations */ |
103 | static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; |
104 | |
105 | /* With Single pipe configuration, HW is capable of supporting maximum |
106 | * of 4 slices per line. |
107 | */ |
108 | static const u8 valid_dsc_slicecount[] = {1, 2, 4}; |
109 | |
110 | /** |
111 | * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) |
112 | * @intel_dp: DP struct |
113 | * |
114 | * If a CPU or PCH DP output is attached to an eDP panel, this function |
115 | * will return true, and false otherwise. |
116 | * |
117 | * This function is not safe to use prior to encoder type being set. |
118 | */ |
119 | bool intel_dp_is_edp(struct intel_dp *intel_dp) |
120 | { |
121 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
122 | |
123 | return dig_port->base.type == INTEL_OUTPUT_EDP; |
124 | } |
125 | |
126 | static void intel_dp_unset_edid(struct intel_dp *intel_dp); |
127 | |
128 | /* Is link rate UHBR and thus 128b/132b? */ |
129 | bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state) |
130 | { |
131 | return drm_dp_is_uhbr_rate(link_rate: crtc_state->port_clock); |
132 | } |
133 | |
134 | /** |
135 | * intel_dp_link_symbol_size - get the link symbol size for a given link rate |
136 | * @rate: link rate in 10kbit/s units |
137 | * |
138 | * Returns the link symbol size in bits/symbol units depending on the link |
139 | * rate -> channel coding. |
140 | */ |
141 | int intel_dp_link_symbol_size(int rate) |
142 | { |
143 | return drm_dp_is_uhbr_rate(link_rate: rate) ? 32 : 10; |
144 | } |
145 | |
146 | /** |
147 | * intel_dp_link_symbol_clock - convert link rate to link symbol clock |
148 | * @rate: link rate in 10kbit/s units |
149 | * |
150 | * Returns the link symbol clock frequency in kHz units depending on the |
151 | * link rate and channel coding. |
152 | */ |
153 | int intel_dp_link_symbol_clock(int rate) |
154 | { |
155 | return DIV_ROUND_CLOSEST(rate * 10, intel_dp_link_symbol_size(rate)); |
156 | } |
157 | |
158 | static int max_dprx_rate(struct intel_dp *intel_dp) |
159 | { |
160 | if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) |
161 | return drm_dp_tunnel_max_dprx_rate(tunnel: intel_dp->tunnel); |
162 | |
163 | return drm_dp_bw_code_to_link_rate(link_bw: intel_dp->dpcd[DP_MAX_LINK_RATE]); |
164 | } |
165 | |
166 | static int max_dprx_lane_count(struct intel_dp *intel_dp) |
167 | { |
168 | if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) |
169 | return drm_dp_tunnel_max_dprx_lane_count(tunnel: intel_dp->tunnel); |
170 | |
171 | return drm_dp_max_lane_count(dpcd: intel_dp->dpcd); |
172 | } |
173 | |
174 | static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp) |
175 | { |
176 | intel_dp->sink_rates[0] = 162000; |
177 | intel_dp->num_sink_rates = 1; |
178 | } |
179 | |
180 | /* update sink rates from dpcd */ |
181 | static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp) |
182 | { |
183 | static const int dp_rates[] = { |
184 | 162000, 270000, 540000, 810000 |
185 | }; |
186 | int i, max_rate; |
187 | int max_lttpr_rate; |
188 | |
189 | if (drm_dp_has_quirk(desc: &intel_dp->desc, quirk: DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { |
190 | /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ |
191 | static const int quirk_rates[] = { 162000, 270000, 324000 }; |
192 | |
193 | memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); |
194 | intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); |
195 | |
196 | return; |
197 | } |
198 | |
199 | /* |
200 | * Sink rates for 8b/10b. |
201 | */ |
202 | max_rate = max_dprx_rate(intel_dp); |
203 | max_lttpr_rate = drm_dp_lttpr_max_link_rate(caps: intel_dp->lttpr_common_caps); |
204 | if (max_lttpr_rate) |
205 | max_rate = min(max_rate, max_lttpr_rate); |
206 | |
207 | for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { |
208 | if (dp_rates[i] > max_rate) |
209 | break; |
210 | intel_dp->sink_rates[i] = dp_rates[i]; |
211 | } |
212 | |
213 | /* |
214 | * Sink rates for 128b/132b. If set, sink should support all 8b/10b |
215 | * rates and 10 Gbps. |
216 | */ |
217 | if (intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B) { |
218 | u8 uhbr_rates = 0; |
219 | |
220 | BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3); |
221 | |
222 | drm_dp_dpcd_readb(aux: &intel_dp->aux, |
223 | DP_128B132B_SUPPORTED_LINK_RATES, valuep: &uhbr_rates); |
224 | |
225 | if (drm_dp_lttpr_count(cap: intel_dp->lttpr_common_caps)) { |
226 | /* We have a repeater */ |
227 | if (intel_dp->lttpr_common_caps[0] >= 0x20 && |
228 | intel_dp->lttpr_common_caps[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - |
229 | DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] & |
230 | DP_PHY_REPEATER_128B132B_SUPPORTED) { |
231 | /* Repeater supports 128b/132b, valid UHBR rates */ |
232 | uhbr_rates &= intel_dp->lttpr_common_caps[DP_PHY_REPEATER_128B132B_RATES - |
233 | DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; |
234 | } else { |
235 | /* Does not support 128b/132b */ |
236 | uhbr_rates = 0; |
237 | } |
238 | } |
239 | |
240 | if (uhbr_rates & DP_UHBR10) |
241 | intel_dp->sink_rates[i++] = 1000000; |
242 | if (uhbr_rates & DP_UHBR13_5) |
243 | intel_dp->sink_rates[i++] = 1350000; |
244 | if (uhbr_rates & DP_UHBR20) |
245 | intel_dp->sink_rates[i++] = 2000000; |
246 | } |
247 | |
248 | intel_dp->num_sink_rates = i; |
249 | } |
250 | |
251 | static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) |
252 | { |
253 | struct intel_connector *connector = intel_dp->attached_connector; |
254 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
255 | struct intel_encoder *encoder = &intel_dig_port->base; |
256 | |
257 | intel_dp_set_dpcd_sink_rates(intel_dp); |
258 | |
259 | if (intel_dp->num_sink_rates) |
260 | return; |
261 | |
262 | drm_err(&dp_to_i915(intel_dp)->drm, |
263 | "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD with no link rates, using defaults\n" , |
264 | connector->base.base.id, connector->base.name, |
265 | encoder->base.base.id, encoder->base.name); |
266 | |
267 | intel_dp_set_default_sink_rates(intel_dp); |
268 | } |
269 | |
270 | static void intel_dp_set_default_max_sink_lane_count(struct intel_dp *intel_dp) |
271 | { |
272 | intel_dp->max_sink_lane_count = 1; |
273 | } |
274 | |
275 | static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp) |
276 | { |
277 | struct intel_connector *connector = intel_dp->attached_connector; |
278 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
279 | struct intel_encoder *encoder = &intel_dig_port->base; |
280 | |
281 | intel_dp->max_sink_lane_count = max_dprx_lane_count(intel_dp); |
282 | |
283 | switch (intel_dp->max_sink_lane_count) { |
284 | case 1: |
285 | case 2: |
286 | case 4: |
287 | return; |
288 | } |
289 | |
290 | drm_err(&dp_to_i915(intel_dp)->drm, |
291 | "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD max lane count (%d), using default\n" , |
292 | connector->base.base.id, connector->base.name, |
293 | encoder->base.base.id, encoder->base.name, |
294 | intel_dp->max_sink_lane_count); |
295 | |
296 | intel_dp_set_default_max_sink_lane_count(intel_dp); |
297 | } |
298 | |
299 | /* Get length of rates array potentially limited by max_rate. */ |
300 | static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) |
301 | { |
302 | int i; |
303 | |
304 | /* Limit results by potentially reduced max rate */ |
305 | for (i = 0; i < len; i++) { |
306 | if (rates[len - i - 1] <= max_rate) |
307 | return len - i; |
308 | } |
309 | |
310 | return 0; |
311 | } |
312 | |
313 | /* Get length of common rates array potentially limited by max_rate. */ |
314 | static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, |
315 | int max_rate) |
316 | { |
317 | return intel_dp_rate_limit_len(rates: intel_dp->common_rates, |
318 | len: intel_dp->num_common_rates, max_rate); |
319 | } |
320 | |
321 | static int intel_dp_common_rate(struct intel_dp *intel_dp, int index) |
322 | { |
323 | if (drm_WARN_ON(&dp_to_i915(intel_dp)->drm, |
324 | index < 0 || index >= intel_dp->num_common_rates)) |
325 | return 162000; |
326 | |
327 | return intel_dp->common_rates[index]; |
328 | } |
329 | |
330 | /* Theoretical max between source and sink */ |
331 | int intel_dp_max_common_rate(struct intel_dp *intel_dp) |
332 | { |
333 | return intel_dp_common_rate(intel_dp, index: intel_dp->num_common_rates - 1); |
334 | } |
335 | |
336 | static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port) |
337 | { |
338 | int vbt_max_lanes = intel_bios_dp_max_lane_count(devdata: dig_port->base.devdata); |
339 | int max_lanes = dig_port->max_lanes; |
340 | |
341 | if (vbt_max_lanes) |
342 | max_lanes = min(max_lanes, vbt_max_lanes); |
343 | |
344 | return max_lanes; |
345 | } |
346 | |
347 | /* Theoretical max between source and sink */ |
348 | int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) |
349 | { |
350 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
351 | int source_max = intel_dp_max_source_lane_count(dig_port); |
352 | int sink_max = intel_dp->max_sink_lane_count; |
353 | int lane_max = intel_tc_port_max_lane_count(dig_port); |
354 | int lttpr_max = drm_dp_lttpr_max_lane_count(caps: intel_dp->lttpr_common_caps); |
355 | |
356 | if (lttpr_max) |
357 | sink_max = min(sink_max, lttpr_max); |
358 | |
359 | return min3(source_max, sink_max, lane_max); |
360 | } |
361 | |
362 | int intel_dp_max_lane_count(struct intel_dp *intel_dp) |
363 | { |
364 | switch (intel_dp->max_link_lane_count) { |
365 | case 1: |
366 | case 2: |
367 | case 4: |
368 | return intel_dp->max_link_lane_count; |
369 | default: |
370 | MISSING_CASE(intel_dp->max_link_lane_count); |
371 | return 1; |
372 | } |
373 | } |
374 | |
375 | /* |
376 | * The required data bandwidth for a mode with given pixel clock and bpp. This |
377 | * is the required net bandwidth independent of the data bandwidth efficiency. |
378 | * |
379 | * TODO: check if callers of this functions should use |
380 | * intel_dp_effective_data_rate() instead. |
381 | */ |
382 | int |
383 | intel_dp_link_required(int pixel_clock, int bpp) |
384 | { |
385 | /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ |
386 | return DIV_ROUND_UP(pixel_clock * bpp, 8); |
387 | } |
388 | |
389 | /** |
390 | * intel_dp_effective_data_rate - Return the pixel data rate accounting for BW allocation overhead |
391 | * @pixel_clock: pixel clock in kHz |
392 | * @bpp_x16: bits per pixel .4 fixed point format |
393 | * @bw_overhead: BW allocation overhead in 1ppm units |
394 | * |
395 | * Return the effective pixel data rate in kB/sec units taking into account |
396 | * the provided SSC, FEC, DSC BW allocation overhead. |
397 | */ |
398 | int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16, |
399 | int bw_overhead) |
400 | { |
401 | return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_clock * bpp_x16, bw_overhead), |
402 | 1000000 * 16 * 8); |
403 | } |
404 | |
405 | /** |
406 | * intel_dp_max_link_data_rate: Calculate the maximum rate for the given link params |
407 | * @intel_dp: Intel DP object |
408 | * @max_dprx_rate: Maximum data rate of the DPRX |
409 | * @max_dprx_lanes: Maximum lane count of the DPRX |
410 | * |
411 | * Calculate the maximum data rate for the provided link parameters taking into |
412 | * account any BW limitations by a DP tunnel attached to @intel_dp. |
413 | * |
414 | * Returns the maximum data rate in kBps units. |
415 | */ |
416 | int intel_dp_max_link_data_rate(struct intel_dp *intel_dp, |
417 | int max_dprx_rate, int max_dprx_lanes) |
418 | { |
419 | int max_rate = drm_dp_max_dprx_data_rate(max_link_rate: max_dprx_rate, max_lanes: max_dprx_lanes); |
420 | |
421 | if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) |
422 | max_rate = min(max_rate, |
423 | drm_dp_tunnel_available_bw(intel_dp->tunnel)); |
424 | |
425 | return max_rate; |
426 | } |
427 | |
428 | bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) |
429 | { |
430 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
431 | struct intel_encoder *encoder = &intel_dig_port->base; |
432 | struct drm_i915_private *dev_priv = to_i915(dev: encoder->base.dev); |
433 | |
434 | return DISPLAY_VER(dev_priv) >= 12 || |
435 | (DISPLAY_VER(dev_priv) == 11 && |
436 | encoder->port != PORT_A); |
437 | } |
438 | |
439 | static int dg2_max_source_rate(struct intel_dp *intel_dp) |
440 | { |
441 | return intel_dp_is_edp(intel_dp) ? 810000 : 1350000; |
442 | } |
443 | |
444 | static int icl_max_source_rate(struct intel_dp *intel_dp) |
445 | { |
446 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
447 | struct drm_i915_private *dev_priv = to_i915(dev: dig_port->base.base.dev); |
448 | enum phy phy = intel_port_to_phy(i915: dev_priv, port: dig_port->base.port); |
449 | |
450 | if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp)) |
451 | return 540000; |
452 | |
453 | return 810000; |
454 | } |
455 | |
456 | static int ehl_max_source_rate(struct intel_dp *intel_dp) |
457 | { |
458 | if (intel_dp_is_edp(intel_dp)) |
459 | return 540000; |
460 | |
461 | return 810000; |
462 | } |
463 | |
464 | static int mtl_max_source_rate(struct intel_dp *intel_dp) |
465 | { |
466 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
467 | struct drm_i915_private *i915 = to_i915(dev: dig_port->base.base.dev); |
468 | enum phy phy = intel_port_to_phy(i915, port: dig_port->base.port); |
469 | |
470 | if (intel_is_c10phy(dev_priv: i915, phy)) |
471 | return 810000; |
472 | |
473 | return 2000000; |
474 | } |
475 | |
476 | static int vbt_max_link_rate(struct intel_dp *intel_dp) |
477 | { |
478 | struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; |
479 | int max_rate; |
480 | |
481 | max_rate = intel_bios_dp_max_link_rate(devdata: encoder->devdata); |
482 | |
483 | if (intel_dp_is_edp(intel_dp)) { |
484 | struct intel_connector *connector = intel_dp->attached_connector; |
485 | int edp_max_rate = connector->panel.vbt.edp.max_link_rate; |
486 | |
487 | if (max_rate && edp_max_rate) |
488 | max_rate = min(max_rate, edp_max_rate); |
489 | else if (edp_max_rate) |
490 | max_rate = edp_max_rate; |
491 | } |
492 | |
493 | return max_rate; |
494 | } |
495 | |
496 | static void |
497 | intel_dp_set_source_rates(struct intel_dp *intel_dp) |
498 | { |
499 | /* The values must be in increasing order */ |
500 | static const int mtl_rates[] = { |
501 | 162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000, |
502 | 810000, 1000000, 2000000, |
503 | }; |
504 | static const int icl_rates[] = { |
505 | 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000, |
506 | 1000000, 1350000, |
507 | }; |
508 | static const int bxt_rates[] = { |
509 | 162000, 216000, 243000, 270000, 324000, 432000, 540000 |
510 | }; |
511 | static const int skl_rates[] = { |
512 | 162000, 216000, 270000, 324000, 432000, 540000 |
513 | }; |
514 | static const int hsw_rates[] = { |
515 | 162000, 270000, 540000 |
516 | }; |
517 | static const int g4x_rates[] = { |
518 | 162000, 270000 |
519 | }; |
520 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
521 | struct drm_i915_private *dev_priv = to_i915(dev: dig_port->base.base.dev); |
522 | const int *source_rates; |
523 | int size, max_rate = 0, vbt_max_rate; |
524 | |
525 | /* This should only be done once */ |
526 | drm_WARN_ON(&dev_priv->drm, |
527 | intel_dp->source_rates || intel_dp->num_source_rates); |
528 | |
529 | if (DISPLAY_VER(dev_priv) >= 14) { |
530 | source_rates = mtl_rates; |
531 | size = ARRAY_SIZE(mtl_rates); |
532 | max_rate = mtl_max_source_rate(intel_dp); |
533 | } else if (DISPLAY_VER(dev_priv) >= 11) { |
534 | source_rates = icl_rates; |
535 | size = ARRAY_SIZE(icl_rates); |
536 | if (IS_DG2(dev_priv)) |
537 | max_rate = dg2_max_source_rate(intel_dp); |
538 | else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) || |
539 | IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) |
540 | max_rate = 810000; |
541 | else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) |
542 | max_rate = ehl_max_source_rate(intel_dp); |
543 | else |
544 | max_rate = icl_max_source_rate(intel_dp); |
545 | } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { |
546 | source_rates = bxt_rates; |
547 | size = ARRAY_SIZE(bxt_rates); |
548 | } else if (DISPLAY_VER(dev_priv) == 9) { |
549 | source_rates = skl_rates; |
550 | size = ARRAY_SIZE(skl_rates); |
551 | } else if ((IS_HASWELL(dev_priv) && !IS_HASWELL_ULX(dev_priv)) || |
552 | IS_BROADWELL(dev_priv)) { |
553 | source_rates = hsw_rates; |
554 | size = ARRAY_SIZE(hsw_rates); |
555 | } else { |
556 | source_rates = g4x_rates; |
557 | size = ARRAY_SIZE(g4x_rates); |
558 | } |
559 | |
560 | vbt_max_rate = vbt_max_link_rate(intel_dp); |
561 | if (max_rate && vbt_max_rate) |
562 | max_rate = min(max_rate, vbt_max_rate); |
563 | else if (vbt_max_rate) |
564 | max_rate = vbt_max_rate; |
565 | |
566 | if (max_rate) |
567 | size = intel_dp_rate_limit_len(rates: source_rates, len: size, max_rate); |
568 | |
569 | intel_dp->source_rates = source_rates; |
570 | intel_dp->num_source_rates = size; |
571 | } |
572 | |
573 | static int intersect_rates(const int *source_rates, int source_len, |
574 | const int *sink_rates, int sink_len, |
575 | int *common_rates) |
576 | { |
577 | int i = 0, j = 0, k = 0; |
578 | |
579 | while (i < source_len && j < sink_len) { |
580 | if (source_rates[i] == sink_rates[j]) { |
581 | if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) |
582 | return k; |
583 | common_rates[k] = source_rates[i]; |
584 | ++k; |
585 | ++i; |
586 | ++j; |
587 | } else if (source_rates[i] < sink_rates[j]) { |
588 | ++i; |
589 | } else { |
590 | ++j; |
591 | } |
592 | } |
593 | return k; |
594 | } |
595 | |
596 | /* return index of rate in rates array, or -1 if not found */ |
597 | static int intel_dp_rate_index(const int *rates, int len, int rate) |
598 | { |
599 | int i; |
600 | |
601 | for (i = 0; i < len; i++) |
602 | if (rate == rates[i]) |
603 | return i; |
604 | |
605 | return -1; |
606 | } |
607 | |
608 | static void intel_dp_set_common_rates(struct intel_dp *intel_dp) |
609 | { |
610 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
611 | |
612 | drm_WARN_ON(&i915->drm, |
613 | !intel_dp->num_source_rates || !intel_dp->num_sink_rates); |
614 | |
615 | intel_dp->num_common_rates = intersect_rates(source_rates: intel_dp->source_rates, |
616 | source_len: intel_dp->num_source_rates, |
617 | sink_rates: intel_dp->sink_rates, |
618 | sink_len: intel_dp->num_sink_rates, |
619 | common_rates: intel_dp->common_rates); |
620 | |
621 | /* Paranoia, there should always be something in common. */ |
622 | if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) { |
623 | intel_dp->common_rates[0] = 162000; |
624 | intel_dp->num_common_rates = 1; |
625 | } |
626 | } |
627 | |
628 | static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, |
629 | u8 lane_count) |
630 | { |
631 | /* |
632 | * FIXME: we need to synchronize the current link parameters with |
633 | * hardware readout. Currently fast link training doesn't work on |
634 | * boot-up. |
635 | */ |
636 | if (link_rate == 0 || |
637 | link_rate > intel_dp->max_link_rate) |
638 | return false; |
639 | |
640 | if (lane_count == 0 || |
641 | lane_count > intel_dp_max_lane_count(intel_dp)) |
642 | return false; |
643 | |
644 | return true; |
645 | } |
646 | |
647 | static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, |
648 | int link_rate, |
649 | u8 lane_count) |
650 | { |
651 | /* FIXME figure out what we actually want here */ |
652 | const struct drm_display_mode *fixed_mode = |
653 | intel_panel_preferred_fixed_mode(connector: intel_dp->attached_connector); |
654 | int mode_rate, max_rate; |
655 | |
656 | mode_rate = intel_dp_link_required(pixel_clock: fixed_mode->clock, bpp: 18); |
657 | max_rate = intel_dp_max_link_data_rate(intel_dp, max_dprx_rate: link_rate, max_dprx_lanes: lane_count); |
658 | if (mode_rate > max_rate) |
659 | return false; |
660 | |
661 | return true; |
662 | } |
663 | |
664 | int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, |
665 | int link_rate, u8 lane_count) |
666 | { |
667 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
668 | int index; |
669 | |
670 | /* |
671 | * TODO: Enable fallback on MST links once MST link compute can handle |
672 | * the fallback params. |
673 | */ |
674 | if (intel_dp->is_mst) { |
675 | drm_err(&i915->drm, "Link Training Unsuccessful\n" ); |
676 | return -1; |
677 | } |
678 | |
679 | if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) { |
680 | drm_dbg_kms(&i915->drm, |
681 | "Retrying Link training for eDP with max parameters\n" ); |
682 | intel_dp->use_max_params = true; |
683 | return 0; |
684 | } |
685 | |
686 | index = intel_dp_rate_index(rates: intel_dp->common_rates, |
687 | len: intel_dp->num_common_rates, |
688 | rate: link_rate); |
689 | if (index > 0) { |
690 | if (intel_dp_is_edp(intel_dp) && |
691 | !intel_dp_can_link_train_fallback_for_edp(intel_dp, |
692 | link_rate: intel_dp_common_rate(intel_dp, index: index - 1), |
693 | lane_count)) { |
694 | drm_dbg_kms(&i915->drm, |
695 | "Retrying Link training for eDP with same parameters\n" ); |
696 | return 0; |
697 | } |
698 | intel_dp->max_link_rate = intel_dp_common_rate(intel_dp, index: index - 1); |
699 | intel_dp->max_link_lane_count = lane_count; |
700 | } else if (lane_count > 1) { |
701 | if (intel_dp_is_edp(intel_dp) && |
702 | !intel_dp_can_link_train_fallback_for_edp(intel_dp, |
703 | link_rate: intel_dp_max_common_rate(intel_dp), |
704 | lane_count: lane_count >> 1)) { |
705 | drm_dbg_kms(&i915->drm, |
706 | "Retrying Link training for eDP with same parameters\n" ); |
707 | return 0; |
708 | } |
709 | intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); |
710 | intel_dp->max_link_lane_count = lane_count >> 1; |
711 | } else { |
712 | drm_err(&i915->drm, "Link Training Unsuccessful\n" ); |
713 | return -1; |
714 | } |
715 | |
716 | return 0; |
717 | } |
718 | |
719 | u32 intel_dp_mode_to_fec_clock(u32 mode_clock) |
720 | { |
721 | return div_u64(dividend: mul_u32_u32(a: mode_clock, DP_DSC_FEC_OVERHEAD_FACTOR), |
722 | divisor: 1000000U); |
723 | } |
724 | |
725 | int intel_dp_bw_fec_overhead(bool fec_enabled) |
726 | { |
727 | /* |
728 | * TODO: Calculate the actual overhead for a given mode. |
729 | * The hard-coded 1/0.972261=2.853% overhead factor |
730 | * corresponds (for instance) to the 8b/10b DP FEC 2.4% + |
731 | * 0.453% DSC overhead. This is enough for a 3840 width mode, |
732 | * which has a DSC overhead of up to ~0.2%, but may not be |
733 | * enough for a 1024 width mode where this is ~0.8% (on a 4 |
734 | * lane DP link, with 2 DSC slices and 8 bpp color depth). |
735 | */ |
736 | return fec_enabled ? DP_DSC_FEC_OVERHEAD_FACTOR : 1000000; |
737 | } |
738 | |
739 | static int |
740 | small_joiner_ram_size_bits(struct drm_i915_private *i915) |
741 | { |
742 | if (DISPLAY_VER(i915) >= 13) |
743 | return 17280 * 8; |
744 | else if (DISPLAY_VER(i915) >= 11) |
745 | return 7680 * 8; |
746 | else |
747 | return 6144 * 8; |
748 | } |
749 | |
750 | u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp) |
751 | { |
752 | u32 bits_per_pixel = bpp; |
753 | int i; |
754 | |
755 | /* Error out if the max bpp is less than smallest allowed valid bpp */ |
756 | if (bits_per_pixel < valid_dsc_bpp[0]) { |
757 | drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n" , |
758 | bits_per_pixel, valid_dsc_bpp[0]); |
759 | return 0; |
760 | } |
761 | |
762 | /* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */ |
763 | if (DISPLAY_VER(i915) >= 13) { |
764 | bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1); |
765 | |
766 | /* |
767 | * According to BSpec, 27 is the max DSC output bpp, |
768 | * 8 is the min DSC output bpp. |
769 | * While we can still clamp higher bpp values to 27, saving bandwidth, |
770 | * if it is required to oompress up to bpp < 8, means we can't do |
771 | * that and probably means we can't fit the required mode, even with |
772 | * DSC enabled. |
773 | */ |
774 | if (bits_per_pixel < 8) { |
775 | drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min 8\n" , |
776 | bits_per_pixel); |
777 | return 0; |
778 | } |
779 | bits_per_pixel = min_t(u32, bits_per_pixel, 27); |
780 | } else { |
781 | /* Find the nearest match in the array of known BPPs from VESA */ |
782 | for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { |
783 | if (bits_per_pixel < valid_dsc_bpp[i + 1]) |
784 | break; |
785 | } |
786 | drm_dbg_kms(&i915->drm, "Set dsc bpp from %d to VESA %d\n" , |
787 | bits_per_pixel, valid_dsc_bpp[i]); |
788 | |
789 | bits_per_pixel = valid_dsc_bpp[i]; |
790 | } |
791 | |
792 | return bits_per_pixel; |
793 | } |
794 | |
795 | static |
796 | u32 get_max_compressed_bpp_with_joiner(struct drm_i915_private *i915, |
797 | u32 mode_clock, u32 mode_hdisplay, |
798 | bool bigjoiner) |
799 | { |
800 | u32 max_bpp_small_joiner_ram; |
801 | |
802 | /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ |
803 | max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / mode_hdisplay; |
804 | |
805 | if (bigjoiner) { |
806 | int bigjoiner_interface_bits = DISPLAY_VER(i915) >= 14 ? 36 : 24; |
807 | /* With bigjoiner multiple dsc engines are used in parallel so PPC is 2 */ |
808 | int ppc = 2; |
809 | u32 max_bpp_bigjoiner = |
810 | i915->display.cdclk.max_cdclk_freq * ppc * bigjoiner_interface_bits / |
811 | intel_dp_mode_to_fec_clock(mode_clock); |
812 | |
813 | max_bpp_small_joiner_ram *= 2; |
814 | |
815 | return min(max_bpp_small_joiner_ram, max_bpp_bigjoiner); |
816 | } |
817 | |
818 | return max_bpp_small_joiner_ram; |
819 | } |
820 | |
821 | u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915, |
822 | u32 link_clock, u32 lane_count, |
823 | u32 mode_clock, u32 mode_hdisplay, |
824 | bool bigjoiner, |
825 | enum intel_output_format output_format, |
826 | u32 pipe_bpp, |
827 | u32 timeslots) |
828 | { |
829 | u32 bits_per_pixel, joiner_max_bpp; |
830 | |
831 | /* |
832 | * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* |
833 | * (LinkSymbolClock)* 8 * (TimeSlots / 64) |
834 | * for SST -> TimeSlots is 64(i.e all TimeSlots that are available) |
835 | * for MST -> TimeSlots has to be calculated, based on mode requirements |
836 | * |
837 | * Due to FEC overhead, the available bw is reduced to 97.2261%. |
838 | * To support the given mode: |
839 | * Bandwidth required should be <= Available link Bandwidth * FEC Overhead |
840 | * =>ModeClock * bits_per_pixel <= Available Link Bandwidth * FEC Overhead |
841 | * =>bits_per_pixel <= Available link Bandwidth * FEC Overhead / ModeClock |
842 | * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock) * 8 (TimeSlots / 64) / |
843 | * (ModeClock / FEC Overhead) |
844 | * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock * TimeSlots) / |
845 | * (ModeClock / FEC Overhead * 8) |
846 | */ |
847 | bits_per_pixel = ((link_clock * lane_count) * timeslots) / |
848 | (intel_dp_mode_to_fec_clock(mode_clock) * 8); |
849 | |
850 | /* Bandwidth required for 420 is half, that of 444 format */ |
851 | if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) |
852 | bits_per_pixel *= 2; |
853 | |
854 | /* |
855 | * According to DSC 1.2a Section 4.1.1 Table 4.1 the maximum |
856 | * supported PPS value can be 63.9375 and with the further |
857 | * mention that for 420, 422 formats, bpp should be programmed double |
858 | * the target bpp restricting our target bpp to be 31.9375 at max. |
859 | */ |
860 | if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) |
861 | bits_per_pixel = min_t(u32, bits_per_pixel, 31); |
862 | |
863 | drm_dbg_kms(&i915->drm, "Max link bpp is %u for %u timeslots " |
864 | "total bw %u pixel clock %u\n" , |
865 | bits_per_pixel, timeslots, |
866 | (link_clock * lane_count * 8), |
867 | intel_dp_mode_to_fec_clock(mode_clock)); |
868 | |
869 | joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, mode_clock, |
870 | mode_hdisplay, bigjoiner); |
871 | bits_per_pixel = min(bits_per_pixel, joiner_max_bpp); |
872 | |
873 | bits_per_pixel = intel_dp_dsc_nearest_valid_bpp(i915, bpp: bits_per_pixel, pipe_bpp); |
874 | |
875 | return bits_per_pixel; |
876 | } |
877 | |
878 | u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector, |
879 | int mode_clock, int mode_hdisplay, |
880 | bool bigjoiner) |
881 | { |
882 | struct drm_i915_private *i915 = to_i915(dev: connector->base.dev); |
883 | u8 min_slice_count, i; |
884 | int max_slice_width; |
885 | |
886 | if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) |
887 | min_slice_count = DIV_ROUND_UP(mode_clock, |
888 | DP_DSC_MAX_ENC_THROUGHPUT_0); |
889 | else |
890 | min_slice_count = DIV_ROUND_UP(mode_clock, |
891 | DP_DSC_MAX_ENC_THROUGHPUT_1); |
892 | |
893 | /* |
894 | * Due to some DSC engine BW limitations, we need to enable second |
895 | * slice and VDSC engine, whenever we approach close enough to max CDCLK |
896 | */ |
897 | if (mode_clock >= ((i915->display.cdclk.max_cdclk_freq * 85) / 100)) |
898 | min_slice_count = max_t(u8, min_slice_count, 2); |
899 | |
900 | max_slice_width = drm_dp_dsc_sink_max_slice_width(dsc_dpcd: connector->dp.dsc_dpcd); |
901 | if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { |
902 | drm_dbg_kms(&i915->drm, |
903 | "Unsupported slice width %d by DP DSC Sink device\n" , |
904 | max_slice_width); |
905 | return 0; |
906 | } |
907 | /* Also take into account max slice width */ |
908 | min_slice_count = max_t(u8, min_slice_count, |
909 | DIV_ROUND_UP(mode_hdisplay, |
910 | max_slice_width)); |
911 | |
912 | /* Find the closest match to the valid slice count values */ |
913 | for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { |
914 | u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner; |
915 | |
916 | if (test_slice_count > |
917 | drm_dp_dsc_sink_max_slice_count(dsc_dpcd: connector->dp.dsc_dpcd, is_edp: false)) |
918 | break; |
919 | |
920 | /* big joiner needs small joiner to be enabled */ |
921 | if (bigjoiner && test_slice_count < 4) |
922 | continue; |
923 | |
924 | if (min_slice_count <= test_slice_count) |
925 | return test_slice_count; |
926 | } |
927 | |
928 | drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n" , |
929 | min_slice_count); |
930 | return 0; |
931 | } |
932 | |
933 | static bool source_can_output(struct intel_dp *intel_dp, |
934 | enum intel_output_format format) |
935 | { |
936 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
937 | |
938 | switch (format) { |
939 | case INTEL_OUTPUT_FORMAT_RGB: |
940 | return true; |
941 | |
942 | case INTEL_OUTPUT_FORMAT_YCBCR444: |
943 | /* |
944 | * No YCbCr output support on gmch platforms. |
945 | * Also, ILK doesn't seem capable of DP YCbCr output. |
946 | * The displayed image is severly corrupted. SNB+ is fine. |
947 | */ |
948 | return !HAS_GMCH(i915) && !IS_IRONLAKE(i915); |
949 | |
950 | case INTEL_OUTPUT_FORMAT_YCBCR420: |
951 | /* Platform < Gen 11 cannot output YCbCr420 format */ |
952 | return DISPLAY_VER(i915) >= 11; |
953 | |
954 | default: |
955 | MISSING_CASE(format); |
956 | return false; |
957 | } |
958 | } |
959 | |
960 | static bool |
961 | dfp_can_convert_from_rgb(struct intel_dp *intel_dp, |
962 | enum intel_output_format sink_format) |
963 | { |
964 | if (!drm_dp_is_branch(dpcd: intel_dp->dpcd)) |
965 | return false; |
966 | |
967 | if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR444) |
968 | return intel_dp->dfp.rgb_to_ycbcr; |
969 | |
970 | if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) |
971 | return intel_dp->dfp.rgb_to_ycbcr && |
972 | intel_dp->dfp.ycbcr_444_to_420; |
973 | |
974 | return false; |
975 | } |
976 | |
977 | static bool |
978 | dfp_can_convert_from_ycbcr444(struct intel_dp *intel_dp, |
979 | enum intel_output_format sink_format) |
980 | { |
981 | if (!drm_dp_is_branch(dpcd: intel_dp->dpcd)) |
982 | return false; |
983 | |
984 | if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) |
985 | return intel_dp->dfp.ycbcr_444_to_420; |
986 | |
987 | return false; |
988 | } |
989 | |
990 | static bool |
991 | dfp_can_convert(struct intel_dp *intel_dp, |
992 | enum intel_output_format output_format, |
993 | enum intel_output_format sink_format) |
994 | { |
995 | switch (output_format) { |
996 | case INTEL_OUTPUT_FORMAT_RGB: |
997 | return dfp_can_convert_from_rgb(intel_dp, sink_format); |
998 | case INTEL_OUTPUT_FORMAT_YCBCR444: |
999 | return dfp_can_convert_from_ycbcr444(intel_dp, sink_format); |
1000 | default: |
1001 | MISSING_CASE(output_format); |
1002 | return false; |
1003 | } |
1004 | |
1005 | return false; |
1006 | } |
1007 | |
1008 | static enum intel_output_format |
1009 | intel_dp_output_format(struct intel_connector *connector, |
1010 | enum intel_output_format sink_format) |
1011 | { |
1012 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
1013 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
1014 | enum intel_output_format force_dsc_output_format = |
1015 | intel_dp->force_dsc_output_format; |
1016 | enum intel_output_format output_format; |
1017 | if (force_dsc_output_format) { |
1018 | if (source_can_output(intel_dp, format: force_dsc_output_format) && |
1019 | (!drm_dp_is_branch(dpcd: intel_dp->dpcd) || |
1020 | sink_format != force_dsc_output_format || |
1021 | dfp_can_convert(intel_dp, output_format: force_dsc_output_format, sink_format))) |
1022 | return force_dsc_output_format; |
1023 | |
1024 | drm_dbg_kms(&i915->drm, "Cannot force DSC output format\n" ); |
1025 | } |
1026 | |
1027 | if (sink_format == INTEL_OUTPUT_FORMAT_RGB || |
1028 | dfp_can_convert_from_rgb(intel_dp, sink_format)) |
1029 | output_format = INTEL_OUTPUT_FORMAT_RGB; |
1030 | |
1031 | else if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR444 || |
1032 | dfp_can_convert_from_ycbcr444(intel_dp, sink_format)) |
1033 | output_format = INTEL_OUTPUT_FORMAT_YCBCR444; |
1034 | |
1035 | else |
1036 | output_format = INTEL_OUTPUT_FORMAT_YCBCR420; |
1037 | |
1038 | drm_WARN_ON(&i915->drm, !source_can_output(intel_dp, output_format)); |
1039 | |
1040 | return output_format; |
1041 | } |
1042 | |
1043 | int intel_dp_min_bpp(enum intel_output_format output_format) |
1044 | { |
1045 | if (output_format == INTEL_OUTPUT_FORMAT_RGB) |
1046 | return 6 * 3; |
1047 | else |
1048 | return 8 * 3; |
1049 | } |
1050 | |
1051 | int intel_dp_output_bpp(enum intel_output_format output_format, int bpp) |
1052 | { |
1053 | /* |
1054 | * bpp value was assumed to RGB format. And YCbCr 4:2:0 output |
1055 | * format of the number of bytes per pixel will be half the number |
1056 | * of bytes of RGB pixel. |
1057 | */ |
1058 | if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) |
1059 | bpp /= 2; |
1060 | |
1061 | return bpp; |
1062 | } |
1063 | |
1064 | static enum intel_output_format |
1065 | intel_dp_sink_format(struct intel_connector *connector, |
1066 | const struct drm_display_mode *mode) |
1067 | { |
1068 | const struct drm_display_info *info = &connector->base.display_info; |
1069 | |
1070 | if (drm_mode_is_420_only(display: info, mode)) |
1071 | return INTEL_OUTPUT_FORMAT_YCBCR420; |
1072 | |
1073 | return INTEL_OUTPUT_FORMAT_RGB; |
1074 | } |
1075 | |
1076 | static int |
1077 | intel_dp_mode_min_output_bpp(struct intel_connector *connector, |
1078 | const struct drm_display_mode *mode) |
1079 | { |
1080 | enum intel_output_format output_format, sink_format; |
1081 | |
1082 | sink_format = intel_dp_sink_format(connector, mode); |
1083 | |
1084 | output_format = intel_dp_output_format(connector, sink_format); |
1085 | |
1086 | return intel_dp_output_bpp(output_format, bpp: intel_dp_min_bpp(output_format)); |
1087 | } |
1088 | |
1089 | static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, |
1090 | int hdisplay) |
1091 | { |
1092 | /* |
1093 | * Older platforms don't like hdisplay==4096 with DP. |
1094 | * |
1095 | * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline |
1096 | * and frame counter increment), but we don't get vblank interrupts, |
1097 | * and the pipe underruns immediately. The link also doesn't seem |
1098 | * to get trained properly. |
1099 | * |
1100 | * On CHV the vblank interrupts don't seem to disappear but |
1101 | * otherwise the symptoms are similar. |
1102 | * |
1103 | * TODO: confirm the behaviour on HSW+ |
1104 | */ |
1105 | return hdisplay == 4096 && !HAS_DDI(dev_priv); |
1106 | } |
1107 | |
1108 | static int intel_dp_max_tmds_clock(struct intel_dp *intel_dp) |
1109 | { |
1110 | struct intel_connector *connector = intel_dp->attached_connector; |
1111 | const struct drm_display_info *info = &connector->base.display_info; |
1112 | int max_tmds_clock = intel_dp->dfp.max_tmds_clock; |
1113 | |
1114 | /* Only consider the sink's max TMDS clock if we know this is a HDMI DFP */ |
1115 | if (max_tmds_clock && info->max_tmds_clock) |
1116 | max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock); |
1117 | |
1118 | return max_tmds_clock; |
1119 | } |
1120 | |
1121 | static enum drm_mode_status |
1122 | intel_dp_tmds_clock_valid(struct intel_dp *intel_dp, |
1123 | int clock, int bpc, |
1124 | enum intel_output_format sink_format, |
1125 | bool respect_downstream_limits) |
1126 | { |
1127 | int tmds_clock, min_tmds_clock, max_tmds_clock; |
1128 | |
1129 | if (!respect_downstream_limits) |
1130 | return MODE_OK; |
1131 | |
1132 | tmds_clock = intel_hdmi_tmds_clock(clock, bpc, sink_format); |
1133 | |
1134 | min_tmds_clock = intel_dp->dfp.min_tmds_clock; |
1135 | max_tmds_clock = intel_dp_max_tmds_clock(intel_dp); |
1136 | |
1137 | if (min_tmds_clock && tmds_clock < min_tmds_clock) |
1138 | return MODE_CLOCK_LOW; |
1139 | |
1140 | if (max_tmds_clock && tmds_clock > max_tmds_clock) |
1141 | return MODE_CLOCK_HIGH; |
1142 | |
1143 | return MODE_OK; |
1144 | } |
1145 | |
1146 | static enum drm_mode_status |
1147 | intel_dp_mode_valid_downstream(struct intel_connector *connector, |
1148 | const struct drm_display_mode *mode, |
1149 | int target_clock) |
1150 | { |
1151 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
1152 | const struct drm_display_info *info = &connector->base.display_info; |
1153 | enum drm_mode_status status; |
1154 | enum intel_output_format sink_format; |
1155 | |
1156 | /* If PCON supports FRL MODE, check FRL bandwidth constraints */ |
1157 | if (intel_dp->dfp.pcon_max_frl_bw) { |
1158 | int target_bw; |
1159 | int max_frl_bw; |
1160 | int bpp = intel_dp_mode_min_output_bpp(connector, mode); |
1161 | |
1162 | target_bw = bpp * target_clock; |
1163 | |
1164 | max_frl_bw = intel_dp->dfp.pcon_max_frl_bw; |
1165 | |
1166 | /* converting bw from Gbps to Kbps*/ |
1167 | max_frl_bw = max_frl_bw * 1000000; |
1168 | |
1169 | if (target_bw > max_frl_bw) |
1170 | return MODE_CLOCK_HIGH; |
1171 | |
1172 | return MODE_OK; |
1173 | } |
1174 | |
1175 | if (intel_dp->dfp.max_dotclock && |
1176 | target_clock > intel_dp->dfp.max_dotclock) |
1177 | return MODE_CLOCK_HIGH; |
1178 | |
1179 | sink_format = intel_dp_sink_format(connector, mode); |
1180 | |
1181 | /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */ |
1182 | status = intel_dp_tmds_clock_valid(intel_dp, clock: target_clock, |
1183 | bpc: 8, sink_format, respect_downstream_limits: true); |
1184 | |
1185 | if (status != MODE_OK) { |
1186 | if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 || |
1187 | !connector->base.ycbcr_420_allowed || |
1188 | !drm_mode_is_420_also(display: info, mode)) |
1189 | return status; |
1190 | sink_format = INTEL_OUTPUT_FORMAT_YCBCR420; |
1191 | status = intel_dp_tmds_clock_valid(intel_dp, clock: target_clock, |
1192 | bpc: 8, sink_format, respect_downstream_limits: true); |
1193 | if (status != MODE_OK) |
1194 | return status; |
1195 | } |
1196 | |
1197 | return MODE_OK; |
1198 | } |
1199 | |
1200 | bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp, |
1201 | int hdisplay, int clock) |
1202 | { |
1203 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
1204 | struct intel_connector *connector = intel_dp->attached_connector; |
1205 | |
1206 | if (!intel_dp_can_bigjoiner(intel_dp)) |
1207 | return false; |
1208 | |
1209 | return clock > i915->max_dotclk_freq || hdisplay > 5120 || |
1210 | connector->force_bigjoiner_enable; |
1211 | } |
1212 | |
1213 | static enum drm_mode_status |
1214 | intel_dp_mode_valid(struct drm_connector *_connector, |
1215 | struct drm_display_mode *mode) |
1216 | { |
1217 | struct intel_connector *connector = to_intel_connector(_connector); |
1218 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
1219 | struct drm_i915_private *dev_priv = to_i915(dev: connector->base.dev); |
1220 | const struct drm_display_mode *fixed_mode; |
1221 | int target_clock = mode->clock; |
1222 | int max_rate, mode_rate, max_lanes, max_link_clock; |
1223 | int max_dotclk = dev_priv->max_dotclk_freq; |
1224 | u16 dsc_max_compressed_bpp = 0; |
1225 | u8 dsc_slice_count = 0; |
1226 | enum drm_mode_status status; |
1227 | bool dsc = false, bigjoiner = false; |
1228 | |
1229 | status = intel_cpu_transcoder_mode_valid(i915: dev_priv, mode); |
1230 | if (status != MODE_OK) |
1231 | return status; |
1232 | |
1233 | if (mode->flags & DRM_MODE_FLAG_DBLCLK) |
1234 | return MODE_H_ILLEGAL; |
1235 | |
1236 | fixed_mode = intel_panel_fixed_mode(connector, mode); |
1237 | if (intel_dp_is_edp(intel_dp) && fixed_mode) { |
1238 | status = intel_panel_mode_valid(connector, mode); |
1239 | if (status != MODE_OK) |
1240 | return status; |
1241 | |
1242 | target_clock = fixed_mode->clock; |
1243 | } |
1244 | |
1245 | if (mode->clock < 10000) |
1246 | return MODE_CLOCK_LOW; |
1247 | |
1248 | if (intel_dp_need_bigjoiner(intel_dp, hdisplay: mode->hdisplay, clock: target_clock)) { |
1249 | bigjoiner = true; |
1250 | max_dotclk *= 2; |
1251 | } |
1252 | if (target_clock > max_dotclk) |
1253 | return MODE_CLOCK_HIGH; |
1254 | |
1255 | if (intel_dp_hdisplay_bad(dev_priv, hdisplay: mode->hdisplay)) |
1256 | return MODE_H_ILLEGAL; |
1257 | |
1258 | max_link_clock = intel_dp_max_link_rate(intel_dp); |
1259 | max_lanes = intel_dp_max_lane_count(intel_dp); |
1260 | |
1261 | max_rate = intel_dp_max_link_data_rate(intel_dp, max_dprx_rate: max_link_clock, max_dprx_lanes: max_lanes); |
1262 | |
1263 | mode_rate = intel_dp_link_required(pixel_clock: target_clock, |
1264 | bpp: intel_dp_mode_min_output_bpp(connector, mode)); |
1265 | |
1266 | if (HAS_DSC(dev_priv) && |
1267 | drm_dp_sink_supports_dsc(dsc_dpcd: connector->dp.dsc_dpcd)) { |
1268 | enum intel_output_format sink_format, output_format; |
1269 | int pipe_bpp; |
1270 | |
1271 | sink_format = intel_dp_sink_format(connector, mode); |
1272 | output_format = intel_dp_output_format(connector, sink_format); |
1273 | /* |
1274 | * TBD pass the connector BPC, |
1275 | * for now U8_MAX so that max BPC on that platform would be picked |
1276 | */ |
1277 | pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, U8_MAX); |
1278 | |
1279 | /* |
1280 | * Output bpp is stored in 6.4 format so right shift by 4 to get the |
1281 | * integer value since we support only integer values of bpp. |
1282 | */ |
1283 | if (intel_dp_is_edp(intel_dp)) { |
1284 | dsc_max_compressed_bpp = |
1285 | drm_edp_dsc_sink_output_bpp(dsc_dpcd: connector->dp.dsc_dpcd) >> 4; |
1286 | dsc_slice_count = |
1287 | drm_dp_dsc_sink_max_slice_count(dsc_dpcd: connector->dp.dsc_dpcd, |
1288 | is_edp: true); |
1289 | } else if (drm_dp_sink_supports_fec(fec_capable: connector->dp.fec_capability)) { |
1290 | dsc_max_compressed_bpp = |
1291 | intel_dp_dsc_get_max_compressed_bpp(i915: dev_priv, |
1292 | link_clock: max_link_clock, |
1293 | lane_count: max_lanes, |
1294 | mode_clock: target_clock, |
1295 | mode_hdisplay: mode->hdisplay, |
1296 | bigjoiner, |
1297 | output_format, |
1298 | pipe_bpp, timeslots: 64); |
1299 | dsc_slice_count = |
1300 | intel_dp_dsc_get_slice_count(connector, |
1301 | mode_clock: target_clock, |
1302 | mode_hdisplay: mode->hdisplay, |
1303 | bigjoiner); |
1304 | } |
1305 | |
1306 | dsc = dsc_max_compressed_bpp && dsc_slice_count; |
1307 | } |
1308 | |
1309 | /* |
1310 | * Big joiner configuration needs DSC for TGL which is not true for |
1311 | * XE_LPD where uncompressed joiner is supported. |
1312 | */ |
1313 | if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc) |
1314 | return MODE_CLOCK_HIGH; |
1315 | |
1316 | if (mode_rate > max_rate && !dsc) |
1317 | return MODE_CLOCK_HIGH; |
1318 | |
1319 | status = intel_dp_mode_valid_downstream(connector, mode, target_clock); |
1320 | if (status != MODE_OK) |
1321 | return status; |
1322 | |
1323 | return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner); |
1324 | } |
1325 | |
1326 | bool intel_dp_source_supports_tps3(struct drm_i915_private *i915) |
1327 | { |
1328 | return DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915) || IS_HASWELL(i915); |
1329 | } |
1330 | |
1331 | bool intel_dp_source_supports_tps4(struct drm_i915_private *i915) |
1332 | { |
1333 | return DISPLAY_VER(i915) >= 10; |
1334 | } |
1335 | |
1336 | static void snprintf_int_array(char *str, size_t len, |
1337 | const int *array, int nelem) |
1338 | { |
1339 | int i; |
1340 | |
1341 | str[0] = '\0'; |
1342 | |
1343 | for (i = 0; i < nelem; i++) { |
1344 | int r = snprintf(buf: str, size: len, fmt: "%s%d" , i ? ", " : "" , array[i]); |
1345 | if (r >= len) |
1346 | return; |
1347 | str += r; |
1348 | len -= r; |
1349 | } |
1350 | } |
1351 | |
1352 | static void intel_dp_print_rates(struct intel_dp *intel_dp) |
1353 | { |
1354 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
1355 | char str[128]; /* FIXME: too big for stack? */ |
1356 | |
1357 | if (!drm_debug_enabled(DRM_UT_KMS)) |
1358 | return; |
1359 | |
1360 | snprintf_int_array(str, len: sizeof(str), |
1361 | array: intel_dp->source_rates, nelem: intel_dp->num_source_rates); |
1362 | drm_dbg_kms(&i915->drm, "source rates: %s\n" , str); |
1363 | |
1364 | snprintf_int_array(str, len: sizeof(str), |
1365 | array: intel_dp->sink_rates, nelem: intel_dp->num_sink_rates); |
1366 | drm_dbg_kms(&i915->drm, "sink rates: %s\n" , str); |
1367 | |
1368 | snprintf_int_array(str, len: sizeof(str), |
1369 | array: intel_dp->common_rates, nelem: intel_dp->num_common_rates); |
1370 | drm_dbg_kms(&i915->drm, "common rates: %s\n" , str); |
1371 | } |
1372 | |
1373 | int |
1374 | intel_dp_max_link_rate(struct intel_dp *intel_dp) |
1375 | { |
1376 | int len; |
1377 | |
1378 | len = intel_dp_common_len_rate_limit(intel_dp, max_rate: intel_dp->max_link_rate); |
1379 | |
1380 | return intel_dp_common_rate(intel_dp, index: len - 1); |
1381 | } |
1382 | |
1383 | int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) |
1384 | { |
1385 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
1386 | int i = intel_dp_rate_index(rates: intel_dp->sink_rates, |
1387 | len: intel_dp->num_sink_rates, rate); |
1388 | |
1389 | if (drm_WARN_ON(&i915->drm, i < 0)) |
1390 | i = 0; |
1391 | |
1392 | return i; |
1393 | } |
1394 | |
1395 | void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, |
1396 | u8 *link_bw, u8 *rate_select) |
1397 | { |
1398 | /* eDP 1.4 rate select method. */ |
1399 | if (intel_dp->use_rate_select) { |
1400 | *link_bw = 0; |
1401 | *rate_select = |
1402 | intel_dp_rate_select(intel_dp, rate: port_clock); |
1403 | } else { |
1404 | *link_bw = drm_dp_link_rate_to_bw_code(link_rate: port_clock); |
1405 | *rate_select = 0; |
1406 | } |
1407 | } |
1408 | |
1409 | bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp) |
1410 | { |
1411 | struct intel_connector *connector = intel_dp->attached_connector; |
1412 | |
1413 | return connector->base.display_info.is_hdmi; |
1414 | } |
1415 | |
1416 | static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, |
1417 | const struct intel_crtc_state *pipe_config) |
1418 | { |
1419 | struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; |
1420 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
1421 | |
1422 | if (DISPLAY_VER(dev_priv) >= 12) |
1423 | return true; |
1424 | |
1425 | if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A && |
1426 | !intel_crtc_has_type(crtc_state: pipe_config, type: INTEL_OUTPUT_DP_MST)) |
1427 | return true; |
1428 | |
1429 | return false; |
1430 | } |
1431 | |
1432 | bool intel_dp_supports_fec(struct intel_dp *intel_dp, |
1433 | const struct intel_connector *connector, |
1434 | const struct intel_crtc_state *pipe_config) |
1435 | { |
1436 | return intel_dp_source_supports_fec(intel_dp, pipe_config) && |
1437 | drm_dp_sink_supports_fec(fec_capable: connector->dp.fec_capability); |
1438 | } |
1439 | |
1440 | static bool intel_dp_supports_dsc(const struct intel_connector *connector, |
1441 | const struct intel_crtc_state *crtc_state) |
1442 | { |
1443 | if (intel_crtc_has_type(crtc_state, type: INTEL_OUTPUT_DP) && !crtc_state->fec_enable) |
1444 | return false; |
1445 | |
1446 | return intel_dsc_source_support(crtc_state) && |
1447 | connector->dp.dsc_decompression_aux && |
1448 | drm_dp_sink_supports_dsc(dsc_dpcd: connector->dp.dsc_dpcd); |
1449 | } |
1450 | |
1451 | static int intel_dp_hdmi_compute_bpc(struct intel_dp *intel_dp, |
1452 | const struct intel_crtc_state *crtc_state, |
1453 | int bpc, bool respect_downstream_limits) |
1454 | { |
1455 | int clock = crtc_state->hw.adjusted_mode.crtc_clock; |
1456 | |
1457 | /* |
1458 | * Current bpc could already be below 8bpc due to |
1459 | * FDI bandwidth constraints or other limits. |
1460 | * HDMI minimum is 8bpc however. |
1461 | */ |
1462 | bpc = max(bpc, 8); |
1463 | |
1464 | /* |
1465 | * We will never exceed downstream TMDS clock limits while |
1466 | * attempting deep color. If the user insists on forcing an |
1467 | * out of spec mode they will have to be satisfied with 8bpc. |
1468 | */ |
1469 | if (!respect_downstream_limits) |
1470 | bpc = 8; |
1471 | |
1472 | for (; bpc >= 8; bpc -= 2) { |
1473 | if (intel_hdmi_bpc_possible(crtc_state, bpc, |
1474 | has_hdmi_sink: intel_dp_has_hdmi_sink(intel_dp)) && |
1475 | intel_dp_tmds_clock_valid(intel_dp, clock, bpc, sink_format: crtc_state->sink_format, |
1476 | respect_downstream_limits) == MODE_OK) |
1477 | return bpc; |
1478 | } |
1479 | |
1480 | return -EINVAL; |
1481 | } |
1482 | |
1483 | static int intel_dp_max_bpp(struct intel_dp *intel_dp, |
1484 | const struct intel_crtc_state *crtc_state, |
1485 | bool respect_downstream_limits) |
1486 | { |
1487 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
1488 | struct intel_connector *intel_connector = intel_dp->attached_connector; |
1489 | int bpp, bpc; |
1490 | |
1491 | bpc = crtc_state->pipe_bpp / 3; |
1492 | |
1493 | if (intel_dp->dfp.max_bpc) |
1494 | bpc = min_t(int, bpc, intel_dp->dfp.max_bpc); |
1495 | |
1496 | if (intel_dp->dfp.min_tmds_clock) { |
1497 | int max_hdmi_bpc; |
1498 | |
1499 | max_hdmi_bpc = intel_dp_hdmi_compute_bpc(intel_dp, crtc_state, bpc, |
1500 | respect_downstream_limits); |
1501 | if (max_hdmi_bpc < 0) |
1502 | return 0; |
1503 | |
1504 | bpc = min(bpc, max_hdmi_bpc); |
1505 | } |
1506 | |
1507 | bpp = bpc * 3; |
1508 | if (intel_dp_is_edp(intel_dp)) { |
1509 | /* Get bpp from vbt only for panels that dont have bpp in edid */ |
1510 | if (intel_connector->base.display_info.bpc == 0 && |
1511 | intel_connector->panel.vbt.edp.bpp && |
1512 | intel_connector->panel.vbt.edp.bpp < bpp) { |
1513 | drm_dbg_kms(&dev_priv->drm, |
1514 | "clamping bpp for eDP panel to BIOS-provided %i\n" , |
1515 | intel_connector->panel.vbt.edp.bpp); |
1516 | bpp = intel_connector->panel.vbt.edp.bpp; |
1517 | } |
1518 | } |
1519 | |
1520 | return bpp; |
1521 | } |
1522 | |
1523 | /* Adjust link config limits based on compliance test requests. */ |
1524 | void |
1525 | intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, |
1526 | struct intel_crtc_state *pipe_config, |
1527 | struct link_config_limits *limits) |
1528 | { |
1529 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
1530 | |
1531 | /* For DP Compliance we override the computed bpp for the pipe */ |
1532 | if (intel_dp->compliance.test_data.bpc != 0) { |
1533 | int bpp = 3 * intel_dp->compliance.test_data.bpc; |
1534 | |
1535 | limits->pipe.min_bpp = limits->pipe.max_bpp = bpp; |
1536 | pipe_config->dither_force_disable = bpp == 6 * 3; |
1537 | |
1538 | drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n" , bpp); |
1539 | } |
1540 | |
1541 | /* Use values requested by Compliance Test Request */ |
1542 | if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { |
1543 | int index; |
1544 | |
1545 | /* Validate the compliance test data since max values |
1546 | * might have changed due to link train fallback. |
1547 | */ |
1548 | if (intel_dp_link_params_valid(intel_dp, link_rate: intel_dp->compliance.test_link_rate, |
1549 | lane_count: intel_dp->compliance.test_lane_count)) { |
1550 | index = intel_dp_rate_index(rates: intel_dp->common_rates, |
1551 | len: intel_dp->num_common_rates, |
1552 | rate: intel_dp->compliance.test_link_rate); |
1553 | if (index >= 0) |
1554 | limits->min_rate = limits->max_rate = |
1555 | intel_dp->compliance.test_link_rate; |
1556 | limits->min_lane_count = limits->max_lane_count = |
1557 | intel_dp->compliance.test_lane_count; |
1558 | } |
1559 | } |
1560 | } |
1561 | |
1562 | static bool has_seamless_m_n(struct intel_connector *connector) |
1563 | { |
1564 | struct drm_i915_private *i915 = to_i915(dev: connector->base.dev); |
1565 | |
1566 | /* |
1567 | * Seamless M/N reprogramming only implemented |
1568 | * for BDW+ double buffered M/N registers so far. |
1569 | */ |
1570 | return HAS_DOUBLE_BUFFERED_M_N(i915) && |
1571 | intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS; |
1572 | } |
1573 | |
1574 | static int intel_dp_mode_clock(const struct intel_crtc_state *crtc_state, |
1575 | const struct drm_connector_state *conn_state) |
1576 | { |
1577 | struct intel_connector *connector = to_intel_connector(conn_state->connector); |
1578 | const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; |
1579 | |
1580 | /* FIXME a bit of a mess wrt clock vs. crtc_clock */ |
1581 | if (has_seamless_m_n(connector)) |
1582 | return intel_panel_highest_mode(connector, adjusted_mode)->clock; |
1583 | else |
1584 | return adjusted_mode->crtc_clock; |
1585 | } |
1586 | |
1587 | /* Optimize link config in order: max bpp, min clock, min lanes */ |
1588 | static int |
1589 | intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, |
1590 | struct intel_crtc_state *pipe_config, |
1591 | const struct drm_connector_state *conn_state, |
1592 | const struct link_config_limits *limits) |
1593 | { |
1594 | int bpp, i, lane_count, clock = intel_dp_mode_clock(crtc_state: pipe_config, conn_state); |
1595 | int mode_rate, link_rate, link_avail; |
1596 | |
1597 | for (bpp = to_bpp_int(bpp_x16: limits->link.max_bpp_x16); |
1598 | bpp >= to_bpp_int(bpp_x16: limits->link.min_bpp_x16); |
1599 | bpp -= 2 * 3) { |
1600 | int link_bpp = intel_dp_output_bpp(output_format: pipe_config->output_format, bpp); |
1601 | |
1602 | mode_rate = intel_dp_link_required(pixel_clock: clock, bpp: link_bpp); |
1603 | |
1604 | for (i = 0; i < intel_dp->num_common_rates; i++) { |
1605 | link_rate = intel_dp_common_rate(intel_dp, index: i); |
1606 | if (link_rate < limits->min_rate || |
1607 | link_rate > limits->max_rate) |
1608 | continue; |
1609 | |
1610 | for (lane_count = limits->min_lane_count; |
1611 | lane_count <= limits->max_lane_count; |
1612 | lane_count <<= 1) { |
1613 | link_avail = intel_dp_max_link_data_rate(intel_dp, |
1614 | max_dprx_rate: link_rate, |
1615 | max_dprx_lanes: lane_count); |
1616 | |
1617 | |
1618 | if (mode_rate <= link_avail) { |
1619 | pipe_config->lane_count = lane_count; |
1620 | pipe_config->pipe_bpp = bpp; |
1621 | pipe_config->port_clock = link_rate; |
1622 | |
1623 | return 0; |
1624 | } |
1625 | } |
1626 | } |
1627 | } |
1628 | |
1629 | return -EINVAL; |
1630 | } |
1631 | |
1632 | static |
1633 | u8 intel_dp_dsc_max_src_input_bpc(struct drm_i915_private *i915) |
1634 | { |
1635 | /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ |
1636 | if (DISPLAY_VER(i915) >= 12) |
1637 | return 12; |
1638 | if (DISPLAY_VER(i915) == 11) |
1639 | return 10; |
1640 | |
1641 | return 0; |
1642 | } |
1643 | |
1644 | int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector, |
1645 | u8 max_req_bpc) |
1646 | { |
1647 | struct drm_i915_private *i915 = to_i915(dev: connector->base.dev); |
1648 | int i, num_bpc; |
1649 | u8 dsc_bpc[3] = {}; |
1650 | u8 dsc_max_bpc; |
1651 | |
1652 | dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915); |
1653 | |
1654 | if (!dsc_max_bpc) |
1655 | return dsc_max_bpc; |
1656 | |
1657 | dsc_max_bpc = min_t(u8, dsc_max_bpc, max_req_bpc); |
1658 | |
1659 | num_bpc = drm_dp_dsc_sink_supported_input_bpcs(dsc_dpc: connector->dp.dsc_dpcd, |
1660 | dsc_bpc); |
1661 | for (i = 0; i < num_bpc; i++) { |
1662 | if (dsc_max_bpc >= dsc_bpc[i]) |
1663 | return dsc_bpc[i] * 3; |
1664 | } |
1665 | |
1666 | return 0; |
1667 | } |
1668 | |
1669 | static int intel_dp_source_dsc_version_minor(struct drm_i915_private *i915) |
1670 | { |
1671 | return DISPLAY_VER(i915) >= 14 ? 2 : 1; |
1672 | } |
1673 | |
1674 | static int intel_dp_sink_dsc_version_minor(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) |
1675 | { |
1676 | return (dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & DP_DSC_MINOR_MASK) >> |
1677 | DP_DSC_MINOR_SHIFT; |
1678 | } |
1679 | |
1680 | static int intel_dp_get_slice_height(int vactive) |
1681 | { |
1682 | int slice_height; |
1683 | |
1684 | /* |
1685 | * VDSC 1.2a spec in Section 3.8 Options for Slices implies that 108 |
1686 | * lines is an optimal slice height, but any size can be used as long as |
1687 | * vertical active integer multiple and maximum vertical slice count |
1688 | * requirements are met. |
1689 | */ |
1690 | for (slice_height = 108; slice_height <= vactive; slice_height += 2) |
1691 | if (vactive % slice_height == 0) |
1692 | return slice_height; |
1693 | |
1694 | /* |
1695 | * Highly unlikely we reach here as most of the resolutions will end up |
1696 | * finding appropriate slice_height in above loop but returning |
1697 | * slice_height as 2 here as it should work with all resolutions. |
1698 | */ |
1699 | return 2; |
1700 | } |
1701 | |
1702 | static int intel_dp_dsc_compute_params(const struct intel_connector *connector, |
1703 | struct intel_crtc_state *crtc_state) |
1704 | { |
1705 | struct drm_i915_private *i915 = to_i915(dev: connector->base.dev); |
1706 | struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; |
1707 | u8 line_buf_depth; |
1708 | int ret; |
1709 | |
1710 | /* |
1711 | * RC_MODEL_SIZE is currently a constant across all configurations. |
1712 | * |
1713 | * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and |
1714 | * DP_DSC_RC_BUF_SIZE for this. |
1715 | */ |
1716 | vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; |
1717 | vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay; |
1718 | |
1719 | vdsc_cfg->slice_height = intel_dp_get_slice_height(vactive: vdsc_cfg->pic_height); |
1720 | |
1721 | ret = intel_dsc_compute_params(pipe_config: crtc_state); |
1722 | if (ret) |
1723 | return ret; |
1724 | |
1725 | vdsc_cfg->dsc_version_major = |
1726 | (connector->dp.dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & |
1727 | DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; |
1728 | vdsc_cfg->dsc_version_minor = |
1729 | min(intel_dp_source_dsc_version_minor(i915), |
1730 | intel_dp_sink_dsc_version_minor(connector->dp.dsc_dpcd)); |
1731 | if (vdsc_cfg->convert_rgb) |
1732 | vdsc_cfg->convert_rgb = |
1733 | connector->dp.dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & |
1734 | DP_DSC_RGB; |
1735 | |
1736 | line_buf_depth = drm_dp_dsc_sink_line_buf_depth(dsc_dpcd: connector->dp.dsc_dpcd); |
1737 | if (!line_buf_depth) { |
1738 | drm_dbg_kms(&i915->drm, |
1739 | "DSC Sink Line Buffer Depth invalid\n" ); |
1740 | return -EINVAL; |
1741 | } |
1742 | |
1743 | if (vdsc_cfg->dsc_version_minor == 2) |
1744 | vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? |
1745 | DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; |
1746 | else |
1747 | vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? |
1748 | DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; |
1749 | |
1750 | vdsc_cfg->block_pred_enable = |
1751 | connector->dp.dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & |
1752 | DP_DSC_BLK_PREDICTION_IS_SUPPORTED; |
1753 | |
1754 | return drm_dsc_compute_rc_parameters(vdsc_cfg); |
1755 | } |
1756 | |
1757 | static bool intel_dp_dsc_supports_format(const struct intel_connector *connector, |
1758 | enum intel_output_format output_format) |
1759 | { |
1760 | struct drm_i915_private *i915 = to_i915(dev: connector->base.dev); |
1761 | u8 sink_dsc_format; |
1762 | |
1763 | switch (output_format) { |
1764 | case INTEL_OUTPUT_FORMAT_RGB: |
1765 | sink_dsc_format = DP_DSC_RGB; |
1766 | break; |
1767 | case INTEL_OUTPUT_FORMAT_YCBCR444: |
1768 | sink_dsc_format = DP_DSC_YCbCr444; |
1769 | break; |
1770 | case INTEL_OUTPUT_FORMAT_YCBCR420: |
1771 | if (min(intel_dp_source_dsc_version_minor(i915), |
1772 | intel_dp_sink_dsc_version_minor(connector->dp.dsc_dpcd)) < 2) |
1773 | return false; |
1774 | sink_dsc_format = DP_DSC_YCbCr420_Native; |
1775 | break; |
1776 | default: |
1777 | return false; |
1778 | } |
1779 | |
1780 | return drm_dp_dsc_sink_supports_format(dsc_dpcd: connector->dp.dsc_dpcd, output_format: sink_dsc_format); |
1781 | } |
1782 | |
1783 | static bool is_bw_sufficient_for_dsc_config(u16 compressed_bppx16, u32 link_clock, |
1784 | u32 lane_count, u32 mode_clock, |
1785 | enum intel_output_format output_format, |
1786 | int timeslots) |
1787 | { |
1788 | u32 available_bw, required_bw; |
1789 | |
1790 | available_bw = (link_clock * lane_count * timeslots * 16) / 8; |
1791 | required_bw = compressed_bppx16 * (intel_dp_mode_to_fec_clock(mode_clock)); |
1792 | |
1793 | return available_bw > required_bw; |
1794 | } |
1795 | |
1796 | static int dsc_compute_link_config(struct intel_dp *intel_dp, |
1797 | struct intel_crtc_state *pipe_config, |
1798 | struct link_config_limits *limits, |
1799 | u16 compressed_bppx16, |
1800 | int timeslots) |
1801 | { |
1802 | const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; |
1803 | int link_rate, lane_count; |
1804 | int i; |
1805 | |
1806 | for (i = 0; i < intel_dp->num_common_rates; i++) { |
1807 | link_rate = intel_dp_common_rate(intel_dp, index: i); |
1808 | if (link_rate < limits->min_rate || link_rate > limits->max_rate) |
1809 | continue; |
1810 | |
1811 | for (lane_count = limits->min_lane_count; |
1812 | lane_count <= limits->max_lane_count; |
1813 | lane_count <<= 1) { |
1814 | if (!is_bw_sufficient_for_dsc_config(compressed_bppx16, link_clock: link_rate, |
1815 | lane_count, mode_clock: adjusted_mode->clock, |
1816 | output_format: pipe_config->output_format, |
1817 | timeslots)) |
1818 | continue; |
1819 | |
1820 | pipe_config->lane_count = lane_count; |
1821 | pipe_config->port_clock = link_rate; |
1822 | |
1823 | return 0; |
1824 | } |
1825 | } |
1826 | |
1827 | return -EINVAL; |
1828 | } |
1829 | |
1830 | static |
1831 | u16 intel_dp_dsc_max_sink_compressed_bppx16(const struct intel_connector *connector, |
1832 | struct intel_crtc_state *pipe_config, |
1833 | int bpc) |
1834 | { |
1835 | u16 max_bppx16 = drm_edp_dsc_sink_output_bpp(dsc_dpcd: connector->dp.dsc_dpcd); |
1836 | |
1837 | if (max_bppx16) |
1838 | return max_bppx16; |
1839 | /* |
1840 | * If support not given in DPCD 67h, 68h use the Maximum Allowed bit rate |
1841 | * values as given in spec Table 2-157 DP v2.0 |
1842 | */ |
1843 | switch (pipe_config->output_format) { |
1844 | case INTEL_OUTPUT_FORMAT_RGB: |
1845 | case INTEL_OUTPUT_FORMAT_YCBCR444: |
1846 | return (3 * bpc) << 4; |
1847 | case INTEL_OUTPUT_FORMAT_YCBCR420: |
1848 | return (3 * (bpc / 2)) << 4; |
1849 | default: |
1850 | MISSING_CASE(pipe_config->output_format); |
1851 | break; |
1852 | } |
1853 | |
1854 | return 0; |
1855 | } |
1856 | |
1857 | int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config) |
1858 | { |
1859 | /* From Mandatory bit rate range Support Table 2-157 (DP v2.0) */ |
1860 | switch (pipe_config->output_format) { |
1861 | case INTEL_OUTPUT_FORMAT_RGB: |
1862 | case INTEL_OUTPUT_FORMAT_YCBCR444: |
1863 | return 8; |
1864 | case INTEL_OUTPUT_FORMAT_YCBCR420: |
1865 | return 6; |
1866 | default: |
1867 | MISSING_CASE(pipe_config->output_format); |
1868 | break; |
1869 | } |
1870 | |
1871 | return 0; |
1872 | } |
1873 | |
1874 | int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector, |
1875 | struct intel_crtc_state *pipe_config, |
1876 | int bpc) |
1877 | { |
1878 | return intel_dp_dsc_max_sink_compressed_bppx16(connector, |
1879 | pipe_config, bpc) >> 4; |
1880 | } |
1881 | |
1882 | static int dsc_src_min_compressed_bpp(void) |
1883 | { |
1884 | /* Min Compressed bpp supported by source is 8 */ |
1885 | return 8; |
1886 | } |
1887 | |
1888 | static int dsc_src_max_compressed_bpp(struct intel_dp *intel_dp) |
1889 | { |
1890 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
1891 | |
1892 | /* |
1893 | * Max Compressed bpp for Gen 13+ is 27bpp. |
1894 | * For earlier platform is 23bpp. (Bspec:49259). |
1895 | */ |
1896 | if (DISPLAY_VER(i915) < 13) |
1897 | return 23; |
1898 | else |
1899 | return 27; |
1900 | } |
1901 | |
1902 | /* |
1903 | * From a list of valid compressed bpps try different compressed bpp and find a |
1904 | * suitable link configuration that can support it. |
1905 | */ |
1906 | static int |
1907 | icl_dsc_compute_link_config(struct intel_dp *intel_dp, |
1908 | struct intel_crtc_state *pipe_config, |
1909 | struct link_config_limits *limits, |
1910 | int dsc_max_bpp, |
1911 | int dsc_min_bpp, |
1912 | int pipe_bpp, |
1913 | int timeslots) |
1914 | { |
1915 | int i, ret; |
1916 | |
1917 | /* Compressed BPP should be less than the Input DSC bpp */ |
1918 | dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1); |
1919 | |
1920 | for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) { |
1921 | if (valid_dsc_bpp[i] < dsc_min_bpp) |
1922 | continue; |
1923 | if (valid_dsc_bpp[i] > dsc_max_bpp) |
1924 | break; |
1925 | |
1926 | ret = dsc_compute_link_config(intel_dp, |
1927 | pipe_config, |
1928 | limits, |
1929 | compressed_bppx16: valid_dsc_bpp[i] << 4, |
1930 | timeslots); |
1931 | if (ret == 0) { |
1932 | pipe_config->dsc.compressed_bpp_x16 = |
1933 | to_bpp_x16(bpp: valid_dsc_bpp[i]); |
1934 | return 0; |
1935 | } |
1936 | } |
1937 | |
1938 | return -EINVAL; |
1939 | } |
1940 | |
1941 | /* |
1942 | * From XE_LPD onwards we supports compression bpps in steps of 1 up to |
1943 | * uncompressed bpp-1. So we start from max compressed bpp and see if any |
1944 | * link configuration is able to support that compressed bpp, if not we |
1945 | * step down and check for lower compressed bpp. |
1946 | */ |
1947 | static int |
1948 | xelpd_dsc_compute_link_config(struct intel_dp *intel_dp, |
1949 | const struct intel_connector *connector, |
1950 | struct intel_crtc_state *pipe_config, |
1951 | struct link_config_limits *limits, |
1952 | int dsc_max_bpp, |
1953 | int dsc_min_bpp, |
1954 | int pipe_bpp, |
1955 | int timeslots) |
1956 | { |
1957 | u8 bppx16_incr = drm_dp_dsc_sink_bpp_incr(dsc_dpcd: connector->dp.dsc_dpcd); |
1958 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
1959 | u16 compressed_bppx16; |
1960 | u8 bppx16_step; |
1961 | int ret; |
1962 | |
1963 | if (DISPLAY_VER(i915) < 14 || bppx16_incr <= 1) |
1964 | bppx16_step = 16; |
1965 | else |
1966 | bppx16_step = 16 / bppx16_incr; |
1967 | |
1968 | /* Compressed BPP should be less than the Input DSC bpp */ |
1969 | dsc_max_bpp = min(dsc_max_bpp << 4, (pipe_bpp << 4) - bppx16_step); |
1970 | dsc_min_bpp = dsc_min_bpp << 4; |
1971 | |
1972 | for (compressed_bppx16 = dsc_max_bpp; |
1973 | compressed_bppx16 >= dsc_min_bpp; |
1974 | compressed_bppx16 -= bppx16_step) { |
1975 | if (intel_dp->force_dsc_fractional_bpp_en && |
1976 | !to_bpp_frac(bpp_x16: compressed_bppx16)) |
1977 | continue; |
1978 | ret = dsc_compute_link_config(intel_dp, |
1979 | pipe_config, |
1980 | limits, |
1981 | compressed_bppx16, |
1982 | timeslots); |
1983 | if (ret == 0) { |
1984 | pipe_config->dsc.compressed_bpp_x16 = compressed_bppx16; |
1985 | if (intel_dp->force_dsc_fractional_bpp_en && |
1986 | to_bpp_frac(bpp_x16: compressed_bppx16)) |
1987 | drm_dbg_kms(&i915->drm, "Forcing DSC fractional bpp\n" ); |
1988 | |
1989 | return 0; |
1990 | } |
1991 | } |
1992 | return -EINVAL; |
1993 | } |
1994 | |
1995 | static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp, |
1996 | const struct intel_connector *connector, |
1997 | struct intel_crtc_state *pipe_config, |
1998 | struct link_config_limits *limits, |
1999 | int pipe_bpp, |
2000 | int timeslots) |
2001 | { |
2002 | const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; |
2003 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
2004 | int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp; |
2005 | int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp; |
2006 | int dsc_joiner_max_bpp; |
2007 | |
2008 | dsc_src_min_bpp = dsc_src_min_compressed_bpp(); |
2009 | dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config); |
2010 | dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp); |
2011 | dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16)); |
2012 | |
2013 | dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp); |
2014 | dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector, |
2015 | pipe_config, |
2016 | bpc: pipe_bpp / 3); |
2017 | dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp; |
2018 | |
2019 | dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, mode_clock: adjusted_mode->clock, |
2020 | mode_hdisplay: adjusted_mode->hdisplay, |
2021 | bigjoiner: pipe_config->bigjoiner_pipes); |
2022 | dsc_max_bpp = min(dsc_max_bpp, dsc_joiner_max_bpp); |
2023 | dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16)); |
2024 | |
2025 | if (DISPLAY_VER(i915) >= 13) |
2026 | return xelpd_dsc_compute_link_config(intel_dp, connector, pipe_config, limits, |
2027 | dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots); |
2028 | return icl_dsc_compute_link_config(intel_dp, pipe_config, limits, |
2029 | dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots); |
2030 | } |
2031 | |
2032 | static |
2033 | u8 intel_dp_dsc_min_src_input_bpc(struct drm_i915_private *i915) |
2034 | { |
2035 | /* Min DSC Input BPC for ICL+ is 8 */ |
2036 | return HAS_DSC(i915) ? 8 : 0; |
2037 | } |
2038 | |
2039 | static |
2040 | bool is_dsc_pipe_bpp_sufficient(struct drm_i915_private *i915, |
2041 | struct drm_connector_state *conn_state, |
2042 | struct link_config_limits *limits, |
2043 | int pipe_bpp) |
2044 | { |
2045 | u8 dsc_max_bpc, dsc_min_bpc, dsc_max_pipe_bpp, dsc_min_pipe_bpp; |
2046 | |
2047 | dsc_max_bpc = min(intel_dp_dsc_max_src_input_bpc(i915), conn_state->max_requested_bpc); |
2048 | dsc_min_bpc = intel_dp_dsc_min_src_input_bpc(i915); |
2049 | |
2050 | dsc_max_pipe_bpp = min(dsc_max_bpc * 3, limits->pipe.max_bpp); |
2051 | dsc_min_pipe_bpp = max(dsc_min_bpc * 3, limits->pipe.min_bpp); |
2052 | |
2053 | return pipe_bpp >= dsc_min_pipe_bpp && |
2054 | pipe_bpp <= dsc_max_pipe_bpp; |
2055 | } |
2056 | |
2057 | static |
2058 | int intel_dp_force_dsc_pipe_bpp(struct intel_dp *intel_dp, |
2059 | struct drm_connector_state *conn_state, |
2060 | struct link_config_limits *limits) |
2061 | { |
2062 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
2063 | int forced_bpp; |
2064 | |
2065 | if (!intel_dp->force_dsc_bpc) |
2066 | return 0; |
2067 | |
2068 | forced_bpp = intel_dp->force_dsc_bpc * 3; |
2069 | |
2070 | if (is_dsc_pipe_bpp_sufficient(i915, conn_state, limits, pipe_bpp: forced_bpp)) { |
2071 | drm_dbg_kms(&i915->drm, "Input DSC BPC forced to %d\n" , intel_dp->force_dsc_bpc); |
2072 | return forced_bpp; |
2073 | } |
2074 | |
2075 | drm_dbg_kms(&i915->drm, "Cannot force DSC BPC:%d, due to DSC BPC limits\n" , |
2076 | intel_dp->force_dsc_bpc); |
2077 | |
2078 | return 0; |
2079 | } |
2080 | |
2081 | static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp, |
2082 | struct intel_crtc_state *pipe_config, |
2083 | struct drm_connector_state *conn_state, |
2084 | struct link_config_limits *limits, |
2085 | int timeslots) |
2086 | { |
2087 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
2088 | const struct intel_connector *connector = |
2089 | to_intel_connector(conn_state->connector); |
2090 | u8 max_req_bpc = conn_state->max_requested_bpc; |
2091 | u8 dsc_max_bpc, dsc_max_bpp; |
2092 | u8 dsc_min_bpc, dsc_min_bpp; |
2093 | u8 dsc_bpc[3] = {}; |
2094 | int forced_bpp, pipe_bpp; |
2095 | int num_bpc, i, ret; |
2096 | |
2097 | forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, conn_state, limits); |
2098 | |
2099 | if (forced_bpp) { |
2100 | ret = dsc_compute_compressed_bpp(intel_dp, connector, pipe_config, |
2101 | limits, pipe_bpp: forced_bpp, timeslots); |
2102 | if (ret == 0) { |
2103 | pipe_config->pipe_bpp = forced_bpp; |
2104 | return 0; |
2105 | } |
2106 | } |
2107 | |
2108 | dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915); |
2109 | if (!dsc_max_bpc) |
2110 | return -EINVAL; |
2111 | |
2112 | dsc_max_bpc = min_t(u8, dsc_max_bpc, max_req_bpc); |
2113 | dsc_max_bpp = min(dsc_max_bpc * 3, limits->pipe.max_bpp); |
2114 | |
2115 | dsc_min_bpc = intel_dp_dsc_min_src_input_bpc(i915); |
2116 | dsc_min_bpp = max(dsc_min_bpc * 3, limits->pipe.min_bpp); |
2117 | |
2118 | /* |
2119 | * Get the maximum DSC bpc that will be supported by any valid |
2120 | * link configuration and compressed bpp. |
2121 | */ |
2122 | num_bpc = drm_dp_dsc_sink_supported_input_bpcs(dsc_dpc: connector->dp.dsc_dpcd, dsc_bpc); |
2123 | for (i = 0; i < num_bpc; i++) { |
2124 | pipe_bpp = dsc_bpc[i] * 3; |
2125 | if (pipe_bpp < dsc_min_bpp) |
2126 | break; |
2127 | if (pipe_bpp > dsc_max_bpp) |
2128 | continue; |
2129 | ret = dsc_compute_compressed_bpp(intel_dp, connector, pipe_config, |
2130 | limits, pipe_bpp, timeslots); |
2131 | if (ret == 0) { |
2132 | pipe_config->pipe_bpp = pipe_bpp; |
2133 | return 0; |
2134 | } |
2135 | } |
2136 | |
2137 | return -EINVAL; |
2138 | } |
2139 | |
2140 | static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp, |
2141 | struct intel_crtc_state *pipe_config, |
2142 | struct drm_connector_state *conn_state, |
2143 | struct link_config_limits *limits) |
2144 | { |
2145 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
2146 | struct intel_connector *connector = |
2147 | to_intel_connector(conn_state->connector); |
2148 | int pipe_bpp, forced_bpp; |
2149 | int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp; |
2150 | int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp; |
2151 | |
2152 | forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, conn_state, limits); |
2153 | |
2154 | if (forced_bpp) { |
2155 | pipe_bpp = forced_bpp; |
2156 | } else { |
2157 | int max_bpc = min(limits->pipe.max_bpp / 3, (int)conn_state->max_requested_bpc); |
2158 | |
2159 | /* For eDP use max bpp that can be supported with DSC. */ |
2160 | pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, max_req_bpc: max_bpc); |
2161 | if (!is_dsc_pipe_bpp_sufficient(i915, conn_state, limits, pipe_bpp)) { |
2162 | drm_dbg_kms(&i915->drm, |
2163 | "Computed BPC is not in DSC BPC limits\n" ); |
2164 | return -EINVAL; |
2165 | } |
2166 | } |
2167 | pipe_config->port_clock = limits->max_rate; |
2168 | pipe_config->lane_count = limits->max_lane_count; |
2169 | |
2170 | dsc_src_min_bpp = dsc_src_min_compressed_bpp(); |
2171 | dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config); |
2172 | dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp); |
2173 | dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16)); |
2174 | |
2175 | dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp); |
2176 | dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector, |
2177 | pipe_config, |
2178 | bpc: pipe_bpp / 3); |
2179 | dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp; |
2180 | dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16)); |
2181 | |
2182 | /* Compressed BPP should be less than the Input DSC bpp */ |
2183 | dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1); |
2184 | |
2185 | pipe_config->dsc.compressed_bpp_x16 = |
2186 | to_bpp_x16(max(dsc_min_bpp, dsc_max_bpp)); |
2187 | |
2188 | pipe_config->pipe_bpp = pipe_bpp; |
2189 | |
2190 | return 0; |
2191 | } |
2192 | |
2193 | int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, |
2194 | struct intel_crtc_state *pipe_config, |
2195 | struct drm_connector_state *conn_state, |
2196 | struct link_config_limits *limits, |
2197 | int timeslots, |
2198 | bool compute_pipe_bpp) |
2199 | { |
2200 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
2201 | struct drm_i915_private *dev_priv = to_i915(dev: dig_port->base.base.dev); |
2202 | const struct intel_connector *connector = |
2203 | to_intel_connector(conn_state->connector); |
2204 | const struct drm_display_mode *adjusted_mode = |
2205 | &pipe_config->hw.adjusted_mode; |
2206 | int ret; |
2207 | |
2208 | pipe_config->fec_enable = pipe_config->fec_enable || |
2209 | (!intel_dp_is_edp(intel_dp) && |
2210 | intel_dp_supports_fec(intel_dp, connector, pipe_config)); |
2211 | |
2212 | if (!intel_dp_supports_dsc(connector, crtc_state: pipe_config)) |
2213 | return -EINVAL; |
2214 | |
2215 | if (!intel_dp_dsc_supports_format(connector, output_format: pipe_config->output_format)) |
2216 | return -EINVAL; |
2217 | |
2218 | /* |
2219 | * compute pipe bpp is set to false for DP MST DSC case |
2220 | * and compressed_bpp is calculated same time once |
2221 | * vpci timeslots are allocated, because overall bpp |
2222 | * calculation procedure is bit different for MST case. |
2223 | */ |
2224 | if (compute_pipe_bpp) { |
2225 | if (intel_dp_is_edp(intel_dp)) |
2226 | ret = intel_edp_dsc_compute_pipe_bpp(intel_dp, pipe_config, |
2227 | conn_state, limits); |
2228 | else |
2229 | ret = intel_dp_dsc_compute_pipe_bpp(intel_dp, pipe_config, |
2230 | conn_state, limits, timeslots); |
2231 | if (ret) { |
2232 | drm_dbg_kms(&dev_priv->drm, |
2233 | "No Valid pipe bpp for given mode ret = %d\n" , ret); |
2234 | return ret; |
2235 | } |
2236 | } |
2237 | |
2238 | /* Calculate Slice count */ |
2239 | if (intel_dp_is_edp(intel_dp)) { |
2240 | pipe_config->dsc.slice_count = |
2241 | drm_dp_dsc_sink_max_slice_count(dsc_dpcd: connector->dp.dsc_dpcd, |
2242 | is_edp: true); |
2243 | if (!pipe_config->dsc.slice_count) { |
2244 | drm_dbg_kms(&dev_priv->drm, "Unsupported Slice Count %d\n" , |
2245 | pipe_config->dsc.slice_count); |
2246 | return -EINVAL; |
2247 | } |
2248 | } else { |
2249 | u8 dsc_dp_slice_count; |
2250 | |
2251 | dsc_dp_slice_count = |
2252 | intel_dp_dsc_get_slice_count(connector, |
2253 | mode_clock: adjusted_mode->crtc_clock, |
2254 | mode_hdisplay: adjusted_mode->crtc_hdisplay, |
2255 | bigjoiner: pipe_config->bigjoiner_pipes); |
2256 | if (!dsc_dp_slice_count) { |
2257 | drm_dbg_kms(&dev_priv->drm, |
2258 | "Compressed Slice Count not supported\n" ); |
2259 | return -EINVAL; |
2260 | } |
2261 | |
2262 | pipe_config->dsc.slice_count = dsc_dp_slice_count; |
2263 | } |
2264 | /* |
2265 | * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate |
2266 | * is greater than the maximum Cdclock and if slice count is even |
2267 | * then we need to use 2 VDSC instances. |
2268 | */ |
2269 | if (pipe_config->bigjoiner_pipes || pipe_config->dsc.slice_count > 1) |
2270 | pipe_config->dsc.dsc_split = true; |
2271 | |
2272 | ret = intel_dp_dsc_compute_params(connector, crtc_state: pipe_config); |
2273 | if (ret < 0) { |
2274 | drm_dbg_kms(&dev_priv->drm, |
2275 | "Cannot compute valid DSC parameters for Input Bpp = %d" |
2276 | "Compressed BPP = " BPP_X16_FMT "\n" , |
2277 | pipe_config->pipe_bpp, |
2278 | BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16)); |
2279 | return ret; |
2280 | } |
2281 | |
2282 | pipe_config->dsc.compression_enable = true; |
2283 | drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " |
2284 | "Compressed Bpp = " BPP_X16_FMT " Slice Count = %d\n" , |
2285 | pipe_config->pipe_bpp, |
2286 | BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16), |
2287 | pipe_config->dsc.slice_count); |
2288 | |
2289 | return 0; |
2290 | } |
2291 | |
2292 | /** |
2293 | * intel_dp_compute_config_link_bpp_limits - compute output link bpp limits |
2294 | * @intel_dp: intel DP |
2295 | * @crtc_state: crtc state |
2296 | * @dsc: DSC compression mode |
2297 | * @limits: link configuration limits |
2298 | * |
2299 | * Calculates the output link min, max bpp values in @limits based on the |
2300 | * pipe bpp range, @crtc_state and @dsc mode. |
2301 | * |
2302 | * Returns %true in case of success. |
2303 | */ |
2304 | bool |
2305 | intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp, |
2306 | const struct intel_crtc_state *crtc_state, |
2307 | bool dsc, |
2308 | struct link_config_limits *limits) |
2309 | { |
2310 | struct drm_i915_private *i915 = to_i915(dev: crtc_state->uapi.crtc->dev); |
2311 | const struct drm_display_mode *adjusted_mode = |
2312 | &crtc_state->hw.adjusted_mode; |
2313 | const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
2314 | const struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; |
2315 | int max_link_bpp_x16; |
2316 | |
2317 | max_link_bpp_x16 = min(crtc_state->max_link_bpp_x16, |
2318 | to_bpp_x16(limits->pipe.max_bpp)); |
2319 | |
2320 | if (!dsc) { |
2321 | max_link_bpp_x16 = rounddown(max_link_bpp_x16, to_bpp_x16(2 * 3)); |
2322 | |
2323 | if (max_link_bpp_x16 < to_bpp_x16(bpp: limits->pipe.min_bpp)) |
2324 | return false; |
2325 | |
2326 | limits->link.min_bpp_x16 = to_bpp_x16(bpp: limits->pipe.min_bpp); |
2327 | } else { |
2328 | /* |
2329 | * TODO: set the DSC link limits already here, atm these are |
2330 | * initialized only later in intel_edp_dsc_compute_pipe_bpp() / |
2331 | * intel_dp_dsc_compute_pipe_bpp() |
2332 | */ |
2333 | limits->link.min_bpp_x16 = 0; |
2334 | } |
2335 | |
2336 | limits->link.max_bpp_x16 = max_link_bpp_x16; |
2337 | |
2338 | drm_dbg_kms(&i915->drm, |
2339 | "[ENCODER:%d:%s][CRTC:%d:%s] DP link limits: pixel clock %d kHz DSC %s max lanes %d max rate %d max pipe_bpp %d max link_bpp " BPP_X16_FMT "\n" , |
2340 | encoder->base.base.id, encoder->base.name, |
2341 | crtc->base.base.id, crtc->base.name, |
2342 | adjusted_mode->crtc_clock, |
2343 | dsc ? "on" : "off" , |
2344 | limits->max_lane_count, |
2345 | limits->max_rate, |
2346 | limits->pipe.max_bpp, |
2347 | BPP_X16_ARGS(limits->link.max_bpp_x16)); |
2348 | |
2349 | return true; |
2350 | } |
2351 | |
2352 | static bool |
2353 | intel_dp_compute_config_limits(struct intel_dp *intel_dp, |
2354 | struct intel_crtc_state *crtc_state, |
2355 | bool respect_downstream_limits, |
2356 | bool dsc, |
2357 | struct link_config_limits *limits) |
2358 | { |
2359 | limits->min_rate = intel_dp_common_rate(intel_dp, index: 0); |
2360 | limits->max_rate = intel_dp_max_link_rate(intel_dp); |
2361 | |
2362 | /* FIXME 128b/132b SST support missing */ |
2363 | limits->max_rate = min(limits->max_rate, 810000); |
2364 | |
2365 | limits->min_lane_count = 1; |
2366 | limits->max_lane_count = intel_dp_max_lane_count(intel_dp); |
2367 | |
2368 | limits->pipe.min_bpp = intel_dp_min_bpp(output_format: crtc_state->output_format); |
2369 | limits->pipe.max_bpp = intel_dp_max_bpp(intel_dp, crtc_state, |
2370 | respect_downstream_limits); |
2371 | |
2372 | if (intel_dp->use_max_params) { |
2373 | /* |
2374 | * Use the maximum clock and number of lanes the eDP panel |
2375 | * advertizes being capable of in case the initial fast |
2376 | * optimal params failed us. The panels are generally |
2377 | * designed to support only a single clock and lane |
2378 | * configuration, and typically on older panels these |
2379 | * values correspond to the native resolution of the panel. |
2380 | */ |
2381 | limits->min_lane_count = limits->max_lane_count; |
2382 | limits->min_rate = limits->max_rate; |
2383 | } |
2384 | |
2385 | intel_dp_adjust_compliance_config(intel_dp, pipe_config: crtc_state, limits); |
2386 | |
2387 | return intel_dp_compute_config_link_bpp_limits(intel_dp, |
2388 | crtc_state, |
2389 | dsc, |
2390 | limits); |
2391 | } |
2392 | |
2393 | int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state) |
2394 | { |
2395 | const struct drm_display_mode *adjusted_mode = |
2396 | &crtc_state->hw.adjusted_mode; |
2397 | int bpp = crtc_state->dsc.compression_enable ? |
2398 | to_bpp_int_roundup(bpp_x16: crtc_state->dsc.compressed_bpp_x16) : |
2399 | crtc_state->pipe_bpp; |
2400 | |
2401 | return intel_dp_link_required(pixel_clock: adjusted_mode->crtc_clock, bpp); |
2402 | } |
2403 | |
2404 | static int |
2405 | intel_dp_compute_link_config(struct intel_encoder *encoder, |
2406 | struct intel_crtc_state *pipe_config, |
2407 | struct drm_connector_state *conn_state, |
2408 | bool respect_downstream_limits) |
2409 | { |
2410 | struct drm_i915_private *i915 = to_i915(dev: encoder->base.dev); |
2411 | struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); |
2412 | const struct intel_connector *connector = |
2413 | to_intel_connector(conn_state->connector); |
2414 | const struct drm_display_mode *adjusted_mode = |
2415 | &pipe_config->hw.adjusted_mode; |
2416 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
2417 | struct link_config_limits limits; |
2418 | bool joiner_needs_dsc = false; |
2419 | bool dsc_needed; |
2420 | int ret = 0; |
2421 | |
2422 | if (pipe_config->fec_enable && |
2423 | !intel_dp_supports_fec(intel_dp, connector, pipe_config)) |
2424 | return -EINVAL; |
2425 | |
2426 | if (intel_dp_need_bigjoiner(intel_dp, hdisplay: adjusted_mode->crtc_hdisplay, |
2427 | clock: adjusted_mode->crtc_clock)) |
2428 | pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe); |
2429 | |
2430 | /* |
2431 | * Pipe joiner needs compression up to display 12 due to bandwidth |
2432 | * limitation. DG2 onwards pipe joiner can be enabled without |
2433 | * compression. |
2434 | */ |
2435 | joiner_needs_dsc = DISPLAY_VER(i915) < 13 && pipe_config->bigjoiner_pipes; |
2436 | |
2437 | dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en || |
2438 | !intel_dp_compute_config_limits(intel_dp, crtc_state: pipe_config, |
2439 | respect_downstream_limits, |
2440 | dsc: false, |
2441 | limits: &limits); |
2442 | |
2443 | if (!dsc_needed) { |
2444 | /* |
2445 | * Optimize for slow and wide for everything, because there are some |
2446 | * eDP 1.3 and 1.4 panels don't work well with fast and narrow. |
2447 | */ |
2448 | ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, |
2449 | conn_state, limits: &limits); |
2450 | if (ret) |
2451 | dsc_needed = true; |
2452 | } |
2453 | |
2454 | if (dsc_needed) { |
2455 | drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n" , |
2456 | str_yes_no(ret), str_yes_no(joiner_needs_dsc), |
2457 | str_yes_no(intel_dp->force_dsc_en)); |
2458 | |
2459 | if (!intel_dp_compute_config_limits(intel_dp, crtc_state: pipe_config, |
2460 | respect_downstream_limits, |
2461 | dsc: true, |
2462 | limits: &limits)) |
2463 | return -EINVAL; |
2464 | |
2465 | ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, |
2466 | conn_state, limits: &limits, timeslots: 64, compute_pipe_bpp: true); |
2467 | if (ret < 0) |
2468 | return ret; |
2469 | } |
2470 | |
2471 | drm_dbg_kms(&i915->drm, |
2472 | "DP lane count %d clock %d bpp input %d compressed " BPP_X16_FMT " link rate required %d available %d\n" , |
2473 | pipe_config->lane_count, pipe_config->port_clock, |
2474 | pipe_config->pipe_bpp, |
2475 | BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16), |
2476 | intel_dp_config_required_rate(pipe_config), |
2477 | intel_dp_max_link_data_rate(intel_dp, |
2478 | pipe_config->port_clock, |
2479 | pipe_config->lane_count)); |
2480 | |
2481 | return 0; |
2482 | } |
2483 | |
2484 | bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, |
2485 | const struct drm_connector_state *conn_state) |
2486 | { |
2487 | const struct intel_digital_connector_state *intel_conn_state = |
2488 | to_intel_digital_connector_state(conn_state); |
2489 | const struct drm_display_mode *adjusted_mode = |
2490 | &crtc_state->hw.adjusted_mode; |
2491 | |
2492 | /* |
2493 | * Our YCbCr output is always limited range. |
2494 | * crtc_state->limited_color_range only applies to RGB, |
2495 | * and it must never be set for YCbCr or we risk setting |
2496 | * some conflicting bits in TRANSCONF which will mess up |
2497 | * the colors on the monitor. |
2498 | */ |
2499 | if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) |
2500 | return false; |
2501 | |
2502 | if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { |
2503 | /* |
2504 | * See: |
2505 | * CEA-861-E - 5.1 Default Encoding Parameters |
2506 | * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry |
2507 | */ |
2508 | return crtc_state->pipe_bpp != 18 && |
2509 | drm_default_rgb_quant_range(mode: adjusted_mode) == |
2510 | HDMI_QUANTIZATION_RANGE_LIMITED; |
2511 | } else { |
2512 | return intel_conn_state->broadcast_rgb == |
2513 | INTEL_BROADCAST_RGB_LIMITED; |
2514 | } |
2515 | } |
2516 | |
2517 | static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, |
2518 | enum port port) |
2519 | { |
2520 | if (IS_G4X(dev_priv)) |
2521 | return false; |
2522 | if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A) |
2523 | return false; |
2524 | |
2525 | return true; |
2526 | } |
2527 | |
2528 | static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, |
2529 | const struct drm_connector_state *conn_state, |
2530 | struct drm_dp_vsc_sdp *vsc) |
2531 | { |
2532 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
2533 | struct drm_i915_private *dev_priv = to_i915(dev: crtc->base.dev); |
2534 | |
2535 | if (crtc_state->has_panel_replay) { |
2536 | /* |
2537 | * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223 |
2538 | * VSC SDP supporting 3D stereo, Panel Replay, and Pixel |
2539 | * Encoding/Colorimetry Format indication. |
2540 | */ |
2541 | vsc->revision = 0x7; |
2542 | } else { |
2543 | /* |
2544 | * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 |
2545 | * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ |
2546 | * Colorimetry Format indication. |
2547 | */ |
2548 | vsc->revision = 0x5; |
2549 | } |
2550 | |
2551 | vsc->length = 0x13; |
2552 | |
2553 | /* DP 1.4a spec, Table 2-120 */ |
2554 | switch (crtc_state->output_format) { |
2555 | case INTEL_OUTPUT_FORMAT_YCBCR444: |
2556 | vsc->pixelformat = DP_PIXELFORMAT_YUV444; |
2557 | break; |
2558 | case INTEL_OUTPUT_FORMAT_YCBCR420: |
2559 | vsc->pixelformat = DP_PIXELFORMAT_YUV420; |
2560 | break; |
2561 | case INTEL_OUTPUT_FORMAT_RGB: |
2562 | default: |
2563 | vsc->pixelformat = DP_PIXELFORMAT_RGB; |
2564 | } |
2565 | |
2566 | switch (conn_state->colorspace) { |
2567 | case DRM_MODE_COLORIMETRY_BT709_YCC: |
2568 | vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; |
2569 | break; |
2570 | case DRM_MODE_COLORIMETRY_XVYCC_601: |
2571 | vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; |
2572 | break; |
2573 | case DRM_MODE_COLORIMETRY_XVYCC_709: |
2574 | vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; |
2575 | break; |
2576 | case DRM_MODE_COLORIMETRY_SYCC_601: |
2577 | vsc->colorimetry = DP_COLORIMETRY_SYCC_601; |
2578 | break; |
2579 | case DRM_MODE_COLORIMETRY_OPYCC_601: |
2580 | vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; |
2581 | break; |
2582 | case DRM_MODE_COLORIMETRY_BT2020_CYCC: |
2583 | vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; |
2584 | break; |
2585 | case DRM_MODE_COLORIMETRY_BT2020_RGB: |
2586 | vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; |
2587 | break; |
2588 | case DRM_MODE_COLORIMETRY_BT2020_YCC: |
2589 | vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; |
2590 | break; |
2591 | case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: |
2592 | case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: |
2593 | vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; |
2594 | break; |
2595 | default: |
2596 | /* |
2597 | * RGB->YCBCR color conversion uses the BT.709 |
2598 | * color space. |
2599 | */ |
2600 | if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) |
2601 | vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; |
2602 | else |
2603 | vsc->colorimetry = DP_COLORIMETRY_DEFAULT; |
2604 | break; |
2605 | } |
2606 | |
2607 | vsc->bpc = crtc_state->pipe_bpp / 3; |
2608 | |
2609 | /* only RGB pixelformat supports 6 bpc */ |
2610 | drm_WARN_ON(&dev_priv->drm, |
2611 | vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); |
2612 | |
2613 | /* all YCbCr are always limited range */ |
2614 | vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; |
2615 | vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; |
2616 | } |
2617 | |
2618 | static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, |
2619 | struct intel_crtc_state *crtc_state, |
2620 | const struct drm_connector_state *conn_state) |
2621 | { |
2622 | struct drm_dp_vsc_sdp *vsc; |
2623 | |
2624 | if ((!intel_dp->colorimetry_support || |
2625 | !intel_dp_needs_vsc_sdp(crtc_state, conn_state)) && |
2626 | !crtc_state->has_psr) |
2627 | return; |
2628 | |
2629 | vsc = &crtc_state->infoframes.vsc; |
2630 | |
2631 | crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); |
2632 | vsc->sdp_type = DP_SDP_VSC; |
2633 | |
2634 | /* Needs colorimetry */ |
2635 | if (intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { |
2636 | intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, |
2637 | vsc); |
2638 | } else if (crtc_state->has_psr2) { |
2639 | /* |
2640 | * [PSR2 without colorimetry] |
2641 | * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 |
2642 | * 3D stereo + PSR/PSR2 + Y-coordinate. |
2643 | */ |
2644 | vsc->revision = 0x4; |
2645 | vsc->length = 0xe; |
2646 | } else if (crtc_state->has_panel_replay) { |
2647 | /* |
2648 | * [Panel Replay without colorimetry info] |
2649 | * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223 |
2650 | * VSC SDP supporting 3D stereo + Panel Replay. |
2651 | */ |
2652 | vsc->revision = 0x6; |
2653 | vsc->length = 0x10; |
2654 | } else { |
2655 | /* |
2656 | * [PSR1] |
2657 | * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 |
2658 | * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or |
2659 | * higher). |
2660 | */ |
2661 | vsc->revision = 0x2; |
2662 | vsc->length = 0x8; |
2663 | } |
2664 | } |
2665 | |
2666 | static void |
2667 | intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, |
2668 | struct intel_crtc_state *crtc_state, |
2669 | const struct drm_connector_state *conn_state) |
2670 | { |
2671 | int ret; |
2672 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
2673 | struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; |
2674 | |
2675 | if (!conn_state->hdr_output_metadata) |
2676 | return; |
2677 | |
2678 | ret = drm_hdmi_infoframe_set_hdr_metadata(frame: drm_infoframe, conn_state); |
2679 | |
2680 | if (ret) { |
2681 | drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n" ); |
2682 | return; |
2683 | } |
2684 | |
2685 | crtc_state->infoframes.enable |= |
2686 | intel_hdmi_infoframe_enable(type: HDMI_PACKET_TYPE_GAMUT_METADATA); |
2687 | } |
2688 | |
2689 | static bool can_enable_drrs(struct intel_connector *connector, |
2690 | const struct intel_crtc_state *pipe_config, |
2691 | const struct drm_display_mode *downclock_mode) |
2692 | { |
2693 | struct drm_i915_private *i915 = to_i915(dev: connector->base.dev); |
2694 | |
2695 | if (pipe_config->vrr.enable) |
2696 | return false; |
2697 | |
2698 | /* |
2699 | * DRRS and PSR can't be enable together, so giving preference to PSR |
2700 | * as it allows more power-savings by complete shutting down display, |
2701 | * so to guarantee this, intel_drrs_compute_config() must be called |
2702 | * after intel_psr_compute_config(). |
2703 | */ |
2704 | if (pipe_config->has_psr) |
2705 | return false; |
2706 | |
2707 | /* FIXME missing FDI M2/N2 etc. */ |
2708 | if (pipe_config->has_pch_encoder) |
2709 | return false; |
2710 | |
2711 | if (!intel_cpu_transcoder_has_drrs(i915, cpu_transcoder: pipe_config->cpu_transcoder)) |
2712 | return false; |
2713 | |
2714 | return downclock_mode && |
2715 | intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS; |
2716 | } |
2717 | |
2718 | static void |
2719 | intel_dp_drrs_compute_config(struct intel_connector *connector, |
2720 | struct intel_crtc_state *pipe_config, |
2721 | int link_bpp_x16) |
2722 | { |
2723 | struct drm_i915_private *i915 = to_i915(dev: connector->base.dev); |
2724 | const struct drm_display_mode *downclock_mode = |
2725 | intel_panel_downclock_mode(connector, adjusted_mode: &pipe_config->hw.adjusted_mode); |
2726 | int pixel_clock; |
2727 | |
2728 | /* |
2729 | * FIXME all joined pipes share the same transcoder. |
2730 | * Need to account for that when updating M/N live. |
2731 | */ |
2732 | if (has_seamless_m_n(connector) && !pipe_config->bigjoiner_pipes) |
2733 | pipe_config->update_m_n = true; |
2734 | |
2735 | if (!can_enable_drrs(connector, pipe_config, downclock_mode)) { |
2736 | if (intel_cpu_transcoder_has_m2_n2(dev_priv: i915, transcoder: pipe_config->cpu_transcoder)) |
2737 | intel_zero_m_n(m_n: &pipe_config->dp_m2_n2); |
2738 | return; |
2739 | } |
2740 | |
2741 | if (IS_IRONLAKE(i915) || IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) |
2742 | pipe_config->msa_timing_delay = connector->panel.vbt.edp.drrs_msa_timing_delay; |
2743 | |
2744 | pipe_config->has_drrs = true; |
2745 | |
2746 | pixel_clock = downclock_mode->clock; |
2747 | if (pipe_config->splitter.enable) |
2748 | pixel_clock /= pipe_config->splitter.link_count; |
2749 | |
2750 | intel_link_compute_m_n(bpp: link_bpp_x16, nlanes: pipe_config->lane_count, pixel_clock, |
2751 | link_clock: pipe_config->port_clock, |
2752 | bw_overhead: intel_dp_bw_fec_overhead(fec_enabled: pipe_config->fec_enable), |
2753 | m_n: &pipe_config->dp_m2_n2); |
2754 | |
2755 | /* FIXME: abstract this better */ |
2756 | if (pipe_config->splitter.enable) |
2757 | pipe_config->dp_m2_n2.data_m *= pipe_config->splitter.link_count; |
2758 | } |
2759 | |
2760 | static bool intel_dp_has_audio(struct intel_encoder *encoder, |
2761 | struct intel_crtc_state *crtc_state, |
2762 | const struct drm_connector_state *conn_state) |
2763 | { |
2764 | struct drm_i915_private *i915 = to_i915(dev: encoder->base.dev); |
2765 | const struct intel_digital_connector_state *intel_conn_state = |
2766 | to_intel_digital_connector_state(conn_state); |
2767 | struct intel_connector *connector = |
2768 | to_intel_connector(conn_state->connector); |
2769 | |
2770 | if (!intel_crtc_has_type(crtc_state, type: INTEL_OUTPUT_DP_MST) && |
2771 | !intel_dp_port_has_audio(dev_priv: i915, port: encoder->port)) |
2772 | return false; |
2773 | |
2774 | if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) |
2775 | return connector->base.display_info.has_audio; |
2776 | else |
2777 | return intel_conn_state->force_audio == HDMI_AUDIO_ON; |
2778 | } |
2779 | |
2780 | static int |
2781 | intel_dp_compute_output_format(struct intel_encoder *encoder, |
2782 | struct intel_crtc_state *crtc_state, |
2783 | struct drm_connector_state *conn_state, |
2784 | bool respect_downstream_limits) |
2785 | { |
2786 | struct drm_i915_private *i915 = to_i915(dev: encoder->base.dev); |
2787 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
2788 | struct intel_connector *connector = intel_dp->attached_connector; |
2789 | const struct drm_display_info *info = &connector->base.display_info; |
2790 | const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; |
2791 | bool ycbcr_420_only; |
2792 | int ret; |
2793 | |
2794 | ycbcr_420_only = drm_mode_is_420_only(display: info, mode: adjusted_mode); |
2795 | |
2796 | if (ycbcr_420_only && !connector->base.ycbcr_420_allowed) { |
2797 | drm_dbg_kms(&i915->drm, |
2798 | "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n" ); |
2799 | crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB; |
2800 | } else { |
2801 | crtc_state->sink_format = intel_dp_sink_format(connector, mode: adjusted_mode); |
2802 | } |
2803 | |
2804 | crtc_state->output_format = intel_dp_output_format(connector, sink_format: crtc_state->sink_format); |
2805 | |
2806 | ret = intel_dp_compute_link_config(encoder, pipe_config: crtc_state, conn_state, |
2807 | respect_downstream_limits); |
2808 | if (ret) { |
2809 | if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 || |
2810 | !connector->base.ycbcr_420_allowed || |
2811 | !drm_mode_is_420_also(display: info, mode: adjusted_mode)) |
2812 | return ret; |
2813 | |
2814 | crtc_state->sink_format = INTEL_OUTPUT_FORMAT_YCBCR420; |
2815 | crtc_state->output_format = intel_dp_output_format(connector, |
2816 | sink_format: crtc_state->sink_format); |
2817 | ret = intel_dp_compute_link_config(encoder, pipe_config: crtc_state, conn_state, |
2818 | respect_downstream_limits); |
2819 | } |
2820 | |
2821 | return ret; |
2822 | } |
2823 | |
2824 | void |
2825 | intel_dp_audio_compute_config(struct intel_encoder *encoder, |
2826 | struct intel_crtc_state *pipe_config, |
2827 | struct drm_connector_state *conn_state) |
2828 | { |
2829 | pipe_config->has_audio = |
2830 | intel_dp_has_audio(encoder, crtc_state: pipe_config, conn_state) && |
2831 | intel_audio_compute_config(encoder, crtc_state: pipe_config, conn_state); |
2832 | |
2833 | pipe_config->sdp_split_enable = pipe_config->has_audio && |
2834 | intel_dp_is_uhbr(crtc_state: pipe_config); |
2835 | } |
2836 | |
2837 | void intel_dp_queue_modeset_retry_work(struct intel_connector *connector) |
2838 | { |
2839 | struct drm_i915_private *i915 = to_i915(dev: connector->base.dev); |
2840 | |
2841 | drm_connector_get(connector: &connector->base); |
2842 | if (!queue_work(wq: i915->unordered_wq, work: &connector->modeset_retry_work)) |
2843 | drm_connector_put(connector: &connector->base); |
2844 | } |
2845 | |
2846 | void |
2847 | intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state, |
2848 | struct intel_encoder *encoder, |
2849 | const struct intel_crtc_state *crtc_state) |
2850 | { |
2851 | struct intel_connector *connector; |
2852 | struct intel_digital_connector_state *conn_state; |
2853 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
2854 | int i; |
2855 | |
2856 | if (!intel_crtc_has_type(crtc_state, type: INTEL_OUTPUT_DP_MST)) { |
2857 | intel_dp_queue_modeset_retry_work(connector: intel_dp->attached_connector); |
2858 | |
2859 | return; |
2860 | } |
2861 | |
2862 | for_each_new_intel_connector_in_state(state, connector, conn_state, i) { |
2863 | if (!conn_state->base.crtc) |
2864 | continue; |
2865 | |
2866 | if (connector->mst_port == intel_dp) |
2867 | intel_dp_queue_modeset_retry_work(connector); |
2868 | } |
2869 | } |
2870 | |
2871 | int |
2872 | intel_dp_compute_config(struct intel_encoder *encoder, |
2873 | struct intel_crtc_state *pipe_config, |
2874 | struct drm_connector_state *conn_state) |
2875 | { |
2876 | struct drm_i915_private *dev_priv = to_i915(dev: encoder->base.dev); |
2877 | struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state); |
2878 | struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; |
2879 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
2880 | const struct drm_display_mode *fixed_mode; |
2881 | struct intel_connector *connector = intel_dp->attached_connector; |
2882 | int ret = 0, link_bpp_x16; |
2883 | |
2884 | if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A) |
2885 | pipe_config->has_pch_encoder = true; |
2886 | |
2887 | fixed_mode = intel_panel_fixed_mode(connector, mode: adjusted_mode); |
2888 | if (intel_dp_is_edp(intel_dp) && fixed_mode) { |
2889 | ret = intel_panel_compute_config(connector, adjusted_mode); |
2890 | if (ret) |
2891 | return ret; |
2892 | } |
2893 | |
2894 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) |
2895 | return -EINVAL; |
2896 | |
2897 | if (!connector->base.interlace_allowed && |
2898 | adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) |
2899 | return -EINVAL; |
2900 | |
2901 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) |
2902 | return -EINVAL; |
2903 | |
2904 | if (intel_dp_hdisplay_bad(dev_priv, hdisplay: adjusted_mode->crtc_hdisplay)) |
2905 | return -EINVAL; |
2906 | |
2907 | /* |
2908 | * Try to respect downstream TMDS clock limits first, if |
2909 | * that fails assume the user might know something we don't. |
2910 | */ |
2911 | ret = intel_dp_compute_output_format(encoder, crtc_state: pipe_config, conn_state, respect_downstream_limits: true); |
2912 | if (ret) |
2913 | ret = intel_dp_compute_output_format(encoder, crtc_state: pipe_config, conn_state, respect_downstream_limits: false); |
2914 | if (ret) |
2915 | return ret; |
2916 | |
2917 | if ((intel_dp_is_edp(intel_dp) && fixed_mode) || |
2918 | pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { |
2919 | ret = intel_panel_fitting(crtc_state: pipe_config, conn_state); |
2920 | if (ret) |
2921 | return ret; |
2922 | } |
2923 | |
2924 | pipe_config->limited_color_range = |
2925 | intel_dp_limited_color_range(crtc_state: pipe_config, conn_state); |
2926 | |
2927 | pipe_config->enhanced_framing = |
2928 | drm_dp_enhanced_frame_cap(dpcd: intel_dp->dpcd); |
2929 | |
2930 | if (pipe_config->dsc.compression_enable) |
2931 | link_bpp_x16 = pipe_config->dsc.compressed_bpp_x16; |
2932 | else |
2933 | link_bpp_x16 = to_bpp_x16(bpp: intel_dp_output_bpp(output_format: pipe_config->output_format, |
2934 | bpp: pipe_config->pipe_bpp)); |
2935 | |
2936 | if (intel_dp->mso_link_count) { |
2937 | int n = intel_dp->mso_link_count; |
2938 | int overlap = intel_dp->mso_pixel_overlap; |
2939 | |
2940 | pipe_config->splitter.enable = true; |
2941 | pipe_config->splitter.link_count = n; |
2942 | pipe_config->splitter.pixel_overlap = overlap; |
2943 | |
2944 | drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n" , |
2945 | n, overlap); |
2946 | |
2947 | adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap; |
2948 | adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap; |
2949 | adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap; |
2950 | adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap; |
2951 | adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap; |
2952 | adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap; |
2953 | adjusted_mode->crtc_clock /= n; |
2954 | } |
2955 | |
2956 | intel_dp_audio_compute_config(encoder, pipe_config, conn_state); |
2957 | |
2958 | intel_link_compute_m_n(bpp: link_bpp_x16, |
2959 | nlanes: pipe_config->lane_count, |
2960 | pixel_clock: adjusted_mode->crtc_clock, |
2961 | link_clock: pipe_config->port_clock, |
2962 | bw_overhead: intel_dp_bw_fec_overhead(fec_enabled: pipe_config->fec_enable), |
2963 | m_n: &pipe_config->dp_m_n); |
2964 | |
2965 | /* FIXME: abstract this better */ |
2966 | if (pipe_config->splitter.enable) |
2967 | pipe_config->dp_m_n.data_m *= pipe_config->splitter.link_count; |
2968 | |
2969 | if (!HAS_DDI(dev_priv)) |
2970 | g4x_dp_set_clock(encoder, pipe_config); |
2971 | |
2972 | intel_vrr_compute_config(crtc_state: pipe_config, conn_state); |
2973 | intel_psr_compute_config(intel_dp, crtc_state: pipe_config, conn_state); |
2974 | intel_dp_drrs_compute_config(connector, pipe_config, link_bpp_x16); |
2975 | intel_dp_compute_vsc_sdp(intel_dp, crtc_state: pipe_config, conn_state); |
2976 | intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, crtc_state: pipe_config, conn_state); |
2977 | |
2978 | return intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector, |
2979 | crtc_state: pipe_config); |
2980 | } |
2981 | |
2982 | void intel_dp_set_link_params(struct intel_dp *intel_dp, |
2983 | int link_rate, int lane_count) |
2984 | { |
2985 | memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); |
2986 | intel_dp->link_trained = false; |
2987 | intel_dp->link_rate = link_rate; |
2988 | intel_dp->lane_count = lane_count; |
2989 | } |
2990 | |
2991 | static void intel_dp_reset_max_link_params(struct intel_dp *intel_dp) |
2992 | { |
2993 | intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); |
2994 | intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); |
2995 | } |
2996 | |
2997 | /* Enable backlight PWM and backlight PP control. */ |
2998 | void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, |
2999 | const struct drm_connector_state *conn_state) |
3000 | { |
3001 | struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); |
3002 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
3003 | |
3004 | if (!intel_dp_is_edp(intel_dp)) |
3005 | return; |
3006 | |
3007 | drm_dbg_kms(&i915->drm, "\n" ); |
3008 | |
3009 | intel_backlight_enable(crtc_state, conn_state); |
3010 | intel_pps_backlight_on(intel_dp); |
3011 | } |
3012 | |
3013 | /* Disable backlight PP control and backlight PWM. */ |
3014 | void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) |
3015 | { |
3016 | struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); |
3017 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
3018 | |
3019 | if (!intel_dp_is_edp(intel_dp)) |
3020 | return; |
3021 | |
3022 | drm_dbg_kms(&i915->drm, "\n" ); |
3023 | |
3024 | intel_pps_backlight_off(intel_dp); |
3025 | intel_backlight_disable(old_conn_state); |
3026 | } |
3027 | |
3028 | static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) |
3029 | { |
3030 | /* |
3031 | * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus |
3032 | * be capable of signalling downstream hpd with a long pulse. |
3033 | * Whether or not that means D3 is safe to use is not clear, |
3034 | * but let's assume so until proven otherwise. |
3035 | * |
3036 | * FIXME should really check all downstream ports... |
3037 | */ |
3038 | return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && |
3039 | drm_dp_is_branch(dpcd: intel_dp->dpcd) && |
3040 | intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; |
3041 | } |
3042 | |
3043 | static int |
3044 | write_dsc_decompression_flag(struct drm_dp_aux *aux, u8 flag, bool set) |
3045 | { |
3046 | int err; |
3047 | u8 val; |
3048 | |
3049 | err = drm_dp_dpcd_readb(aux, DP_DSC_ENABLE, valuep: &val); |
3050 | if (err < 0) |
3051 | return err; |
3052 | |
3053 | if (set) |
3054 | val |= flag; |
3055 | else |
3056 | val &= ~flag; |
3057 | |
3058 | return drm_dp_dpcd_writeb(aux, DP_DSC_ENABLE, value: val); |
3059 | } |
3060 | |
3061 | static void |
3062 | intel_dp_sink_set_dsc_decompression(struct intel_connector *connector, |
3063 | bool enable) |
3064 | { |
3065 | struct drm_i915_private *i915 = to_i915(dev: connector->base.dev); |
3066 | |
3067 | if (write_dsc_decompression_flag(aux: connector->dp.dsc_decompression_aux, |
3068 | DP_DECOMPRESSION_EN, set: enable) < 0) |
3069 | drm_dbg_kms(&i915->drm, |
3070 | "Failed to %s sink decompression state\n" , |
3071 | str_enable_disable(enable)); |
3072 | } |
3073 | |
3074 | static void |
3075 | intel_dp_sink_set_dsc_passthrough(const struct intel_connector *connector, |
3076 | bool enable) |
3077 | { |
3078 | struct drm_i915_private *i915 = to_i915(dev: connector->base.dev); |
3079 | struct drm_dp_aux *aux = connector->port ? |
3080 | connector->port->passthrough_aux : NULL; |
3081 | |
3082 | if (!aux) |
3083 | return; |
3084 | |
3085 | if (write_dsc_decompression_flag(aux, |
3086 | DP_DSC_PASSTHROUGH_EN, set: enable) < 0) |
3087 | drm_dbg_kms(&i915->drm, |
3088 | "Failed to %s sink compression passthrough state\n" , |
3089 | str_enable_disable(enable)); |
3090 | } |
3091 | |
3092 | static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state, |
3093 | const struct intel_connector *connector, |
3094 | bool for_get_ref) |
3095 | { |
3096 | struct drm_i915_private *i915 = to_i915(dev: state->base.dev); |
3097 | struct drm_connector *_connector_iter; |
3098 | struct drm_connector_state *old_conn_state; |
3099 | struct drm_connector_state *new_conn_state; |
3100 | int ref_count = 0; |
3101 | int i; |
3102 | |
3103 | /* |
3104 | * On SST the decompression AUX device won't be shared, each connector |
3105 | * uses for this its own AUX targeting the sink device. |
3106 | */ |
3107 | if (!connector->mst_port) |
3108 | return connector->dp.dsc_decompression_enabled ? 1 : 0; |
3109 | |
3110 | for_each_oldnew_connector_in_state(&state->base, _connector_iter, |
3111 | old_conn_state, new_conn_state, i) { |
3112 | const struct intel_connector * |
3113 | connector_iter = to_intel_connector(_connector_iter); |
3114 | |
3115 | if (connector_iter->mst_port != connector->mst_port) |
3116 | continue; |
3117 | |
3118 | if (!connector_iter->dp.dsc_decompression_enabled) |
3119 | continue; |
3120 | |
3121 | drm_WARN_ON(&i915->drm, |
3122 | (for_get_ref && !new_conn_state->crtc) || |
3123 | (!for_get_ref && !old_conn_state->crtc)); |
3124 | |
3125 | if (connector_iter->dp.dsc_decompression_aux == |
3126 | connector->dp.dsc_decompression_aux) |
3127 | ref_count++; |
3128 | } |
3129 | |
3130 | return ref_count; |
3131 | } |
3132 | |
3133 | static bool intel_dp_dsc_aux_get_ref(struct intel_atomic_state *state, |
3134 | struct intel_connector *connector) |
3135 | { |
3136 | bool ret = intel_dp_dsc_aux_ref_count(state, connector, for_get_ref: true) == 0; |
3137 | |
3138 | connector->dp.dsc_decompression_enabled = true; |
3139 | |
3140 | return ret; |
3141 | } |
3142 | |
3143 | static bool intel_dp_dsc_aux_put_ref(struct intel_atomic_state *state, |
3144 | struct intel_connector *connector) |
3145 | { |
3146 | connector->dp.dsc_decompression_enabled = false; |
3147 | |
3148 | return intel_dp_dsc_aux_ref_count(state, connector, for_get_ref: false) == 0; |
3149 | } |
3150 | |
3151 | /** |
3152 | * intel_dp_sink_enable_decompression - Enable DSC decompression in sink/last branch device |
3153 | * @state: atomic state |
3154 | * @connector: connector to enable the decompression for |
3155 | * @new_crtc_state: new state for the CRTC driving @connector |
3156 | * |
3157 | * Enable the DSC decompression if required in the %DP_DSC_ENABLE DPCD |
3158 | * register of the appropriate sink/branch device. On SST this is always the |
3159 | * sink device, whereas on MST based on each device's DSC capabilities it's |
3160 | * either the last branch device (enabling decompression in it) or both the |
3161 | * last branch device (enabling passthrough in it) and the sink device |
3162 | * (enabling decompression in it). |
3163 | */ |
3164 | void intel_dp_sink_enable_decompression(struct intel_atomic_state *state, |
3165 | struct intel_connector *connector, |
3166 | const struct intel_crtc_state *new_crtc_state) |
3167 | { |
3168 | struct drm_i915_private *i915 = to_i915(dev: state->base.dev); |
3169 | |
3170 | if (!new_crtc_state->dsc.compression_enable) |
3171 | return; |
3172 | |
3173 | if (drm_WARN_ON(&i915->drm, |
3174 | !connector->dp.dsc_decompression_aux || |
3175 | connector->dp.dsc_decompression_enabled)) |
3176 | return; |
3177 | |
3178 | if (!intel_dp_dsc_aux_get_ref(state, connector)) |
3179 | return; |
3180 | |
3181 | intel_dp_sink_set_dsc_passthrough(connector, enable: true); |
3182 | intel_dp_sink_set_dsc_decompression(connector, enable: true); |
3183 | } |
3184 | |
3185 | /** |
3186 | * intel_dp_sink_disable_decompression - Disable DSC decompression in sink/last branch device |
3187 | * @state: atomic state |
3188 | * @connector: connector to disable the decompression for |
3189 | * @old_crtc_state: old state for the CRTC driving @connector |
3190 | * |
3191 | * Disable the DSC decompression if required in the %DP_DSC_ENABLE DPCD |
3192 | * register of the appropriate sink/branch device, corresponding to the |
3193 | * sequence in intel_dp_sink_enable_decompression(). |
3194 | */ |
3195 | void intel_dp_sink_disable_decompression(struct intel_atomic_state *state, |
3196 | struct intel_connector *connector, |
3197 | const struct intel_crtc_state *old_crtc_state) |
3198 | { |
3199 | struct drm_i915_private *i915 = to_i915(dev: state->base.dev); |
3200 | |
3201 | if (!old_crtc_state->dsc.compression_enable) |
3202 | return; |
3203 | |
3204 | if (drm_WARN_ON(&i915->drm, |
3205 | !connector->dp.dsc_decompression_aux || |
3206 | !connector->dp.dsc_decompression_enabled)) |
3207 | return; |
3208 | |
3209 | if (!intel_dp_dsc_aux_put_ref(state, connector)) |
3210 | return; |
3211 | |
3212 | intel_dp_sink_set_dsc_decompression(connector, enable: false); |
3213 | intel_dp_sink_set_dsc_passthrough(connector, enable: false); |
3214 | } |
3215 | |
3216 | static void |
3217 | intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) |
3218 | { |
3219 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
3220 | u8 oui[] = { 0x00, 0xaa, 0x01 }; |
3221 | u8 buf[3] = {}; |
3222 | |
3223 | /* |
3224 | * During driver init, we want to be careful and avoid changing the source OUI if it's |
3225 | * already set to what we want, so as to avoid clearing any state by accident |
3226 | */ |
3227 | if (careful) { |
3228 | if (drm_dp_dpcd_read(aux: &intel_dp->aux, DP_SOURCE_OUI, buffer: buf, size: sizeof(buf)) < 0) |
3229 | drm_err(&i915->drm, "Failed to read source OUI\n" ); |
3230 | |
3231 | if (memcmp(p: oui, q: buf, size: sizeof(oui)) == 0) |
3232 | return; |
3233 | } |
3234 | |
3235 | if (drm_dp_dpcd_write(aux: &intel_dp->aux, DP_SOURCE_OUI, buffer: oui, size: sizeof(oui)) < 0) |
3236 | drm_err(&i915->drm, "Failed to write source OUI\n" ); |
3237 | |
3238 | intel_dp->last_oui_write = jiffies; |
3239 | } |
3240 | |
3241 | void intel_dp_wait_source_oui(struct intel_dp *intel_dp) |
3242 | { |
3243 | struct intel_connector *connector = intel_dp->attached_connector; |
3244 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
3245 | |
3246 | drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n" , |
3247 | connector->base.base.id, connector->base.name, |
3248 | connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout); |
3249 | |
3250 | wait_remaining_ms_from_jiffies(timestamp_jiffies: intel_dp->last_oui_write, |
3251 | to_wait_ms: connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout); |
3252 | } |
3253 | |
3254 | /* If the device supports it, try to set the power state appropriately */ |
3255 | void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) |
3256 | { |
3257 | struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; |
3258 | struct drm_i915_private *i915 = to_i915(dev: encoder->base.dev); |
3259 | int ret, i; |
3260 | |
3261 | /* Should have a valid DPCD by this point */ |
3262 | if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) |
3263 | return; |
3264 | |
3265 | if (mode != DP_SET_POWER_D0) { |
3266 | if (downstream_hpd_needs_d0(intel_dp)) |
3267 | return; |
3268 | |
3269 | ret = drm_dp_dpcd_writeb(aux: &intel_dp->aux, DP_SET_POWER, value: mode); |
3270 | } else { |
3271 | struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); |
3272 | |
3273 | lspcon_resume(dig_port: dp_to_dig_port(intel_dp)); |
3274 | |
3275 | /* Write the source OUI as early as possible */ |
3276 | if (intel_dp_is_edp(intel_dp)) |
3277 | intel_edp_init_source_oui(intel_dp, careful: false); |
3278 | |
3279 | /* |
3280 | * When turning on, we need to retry for 1ms to give the sink |
3281 | * time to wake up. |
3282 | */ |
3283 | for (i = 0; i < 3; i++) { |
3284 | ret = drm_dp_dpcd_writeb(aux: &intel_dp->aux, DP_SET_POWER, value: mode); |
3285 | if (ret == 1) |
3286 | break; |
3287 | msleep(msecs: 1); |
3288 | } |
3289 | |
3290 | if (ret == 1 && lspcon->active) |
3291 | lspcon_wait_pcon_mode(lspcon); |
3292 | } |
3293 | |
3294 | if (ret != 1) |
3295 | drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n" , |
3296 | encoder->base.base.id, encoder->base.name, |
3297 | mode == DP_SET_POWER_D0 ? "D0" : "D3" ); |
3298 | } |
3299 | |
3300 | static bool |
3301 | intel_dp_get_dpcd(struct intel_dp *intel_dp); |
3302 | |
3303 | /** |
3304 | * intel_dp_sync_state - sync the encoder state during init/resume |
3305 | * @encoder: intel encoder to sync |
3306 | * @crtc_state: state for the CRTC connected to the encoder |
3307 | * |
3308 | * Sync any state stored in the encoder wrt. HW state during driver init |
3309 | * and system resume. |
3310 | */ |
3311 | void intel_dp_sync_state(struct intel_encoder *encoder, |
3312 | const struct intel_crtc_state *crtc_state) |
3313 | { |
3314 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
3315 | bool dpcd_updated = false; |
3316 | |
3317 | /* |
3318 | * Don't clobber DPCD if it's been already read out during output |
3319 | * setup (eDP) or detect. |
3320 | */ |
3321 | if (crtc_state && intel_dp->dpcd[DP_DPCD_REV] == 0) { |
3322 | intel_dp_get_dpcd(intel_dp); |
3323 | dpcd_updated = true; |
3324 | } |
3325 | |
3326 | intel_dp_tunnel_resume(intel_dp, crtc_state, dpcd_updated); |
3327 | |
3328 | if (crtc_state) |
3329 | intel_dp_reset_max_link_params(intel_dp); |
3330 | } |
3331 | |
3332 | bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, |
3333 | struct intel_crtc_state *crtc_state) |
3334 | { |
3335 | struct drm_i915_private *i915 = to_i915(dev: encoder->base.dev); |
3336 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
3337 | bool fastset = true; |
3338 | |
3339 | /* |
3340 | * If BIOS has set an unsupported or non-standard link rate for some |
3341 | * reason force an encoder recompute and full modeset. |
3342 | */ |
3343 | if (intel_dp_rate_index(rates: intel_dp->source_rates, len: intel_dp->num_source_rates, |
3344 | rate: crtc_state->port_clock) < 0) { |
3345 | drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to unsupported link rate\n" , |
3346 | encoder->base.base.id, encoder->base.name); |
3347 | crtc_state->uapi.connectors_changed = true; |
3348 | fastset = false; |
3349 | } |
3350 | |
3351 | /* |
3352 | * FIXME hack to force full modeset when DSC is being used. |
3353 | * |
3354 | * As long as we do not have full state readout and config comparison |
3355 | * of crtc_state->dsc, we have no way to ensure reliable fastset. |
3356 | * Remove once we have readout for DSC. |
3357 | */ |
3358 | if (crtc_state->dsc.compression_enable) { |
3359 | drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to DSC being enabled\n" , |
3360 | encoder->base.base.id, encoder->base.name); |
3361 | crtc_state->uapi.mode_changed = true; |
3362 | fastset = false; |
3363 | } |
3364 | |
3365 | return fastset; |
3366 | } |
3367 | |
3368 | static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp) |
3369 | { |
3370 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
3371 | |
3372 | /* Clear the cached register set to avoid using stale values */ |
3373 | |
3374 | memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd)); |
3375 | |
3376 | if (drm_dp_dpcd_read(aux: &intel_dp->aux, DP_PCON_DSC_ENCODER, |
3377 | buffer: intel_dp->pcon_dsc_dpcd, |
3378 | size: sizeof(intel_dp->pcon_dsc_dpcd)) < 0) |
3379 | drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n" , |
3380 | DP_PCON_DSC_ENCODER); |
3381 | |
3382 | drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n" , |
3383 | (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd); |
3384 | } |
3385 | |
3386 | static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask) |
3387 | { |
3388 | int bw_gbps[] = {9, 18, 24, 32, 40, 48}; |
3389 | int i; |
3390 | |
3391 | for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) { |
3392 | if (frl_bw_mask & (1 << i)) |
3393 | return bw_gbps[i]; |
3394 | } |
3395 | return 0; |
3396 | } |
3397 | |
3398 | static int intel_dp_pcon_set_frl_mask(int max_frl) |
3399 | { |
3400 | switch (max_frl) { |
3401 | case 48: |
3402 | return DP_PCON_FRL_BW_MASK_48GBPS; |
3403 | case 40: |
3404 | return DP_PCON_FRL_BW_MASK_40GBPS; |
3405 | case 32: |
3406 | return DP_PCON_FRL_BW_MASK_32GBPS; |
3407 | case 24: |
3408 | return DP_PCON_FRL_BW_MASK_24GBPS; |
3409 | case 18: |
3410 | return DP_PCON_FRL_BW_MASK_18GBPS; |
3411 | case 9: |
3412 | return DP_PCON_FRL_BW_MASK_9GBPS; |
3413 | } |
3414 | |
3415 | return 0; |
3416 | } |
3417 | |
3418 | static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp) |
3419 | { |
3420 | struct intel_connector *intel_connector = intel_dp->attached_connector; |
3421 | struct drm_connector *connector = &intel_connector->base; |
3422 | int max_frl_rate; |
3423 | int max_lanes, rate_per_lane; |
3424 | int max_dsc_lanes, dsc_rate_per_lane; |
3425 | |
3426 | max_lanes = connector->display_info.hdmi.max_lanes; |
3427 | rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane; |
3428 | max_frl_rate = max_lanes * rate_per_lane; |
3429 | |
3430 | if (connector->display_info.hdmi.dsc_cap.v_1p2) { |
3431 | max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes; |
3432 | dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane; |
3433 | if (max_dsc_lanes && dsc_rate_per_lane) |
3434 | max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane); |
3435 | } |
3436 | |
3437 | return max_frl_rate; |
3438 | } |
3439 | |
3440 | static bool |
3441 | intel_dp_pcon_is_frl_trained(struct intel_dp *intel_dp, |
3442 | u8 max_frl_bw_mask, u8 *frl_trained_mask) |
3443 | { |
3444 | if (drm_dp_pcon_hdmi_link_active(aux: &intel_dp->aux) && |
3445 | drm_dp_pcon_hdmi_link_mode(aux: &intel_dp->aux, frl_trained_mask) == DP_PCON_HDMI_MODE_FRL && |
3446 | *frl_trained_mask >= max_frl_bw_mask) |
3447 | return true; |
3448 | |
3449 | return false; |
3450 | } |
3451 | |
3452 | static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) |
3453 | { |
3454 | #define TIMEOUT_FRL_READY_MS 500 |
3455 | #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000 |
3456 | |
3457 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
3458 | int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret; |
3459 | u8 max_frl_bw_mask = 0, frl_trained_mask; |
3460 | bool is_active; |
3461 | |
3462 | max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw; |
3463 | drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n" , max_pcon_frl_bw); |
3464 | |
3465 | max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp); |
3466 | drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n" , max_edid_frl_bw); |
3467 | |
3468 | max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw); |
3469 | |
3470 | if (max_frl_bw <= 0) |
3471 | return -EINVAL; |
3472 | |
3473 | max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl: max_frl_bw); |
3474 | drm_dbg(&i915->drm, "MAX_FRL_BW_MASK = %u\n" , max_frl_bw_mask); |
3475 | |
3476 | if (intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, frl_trained_mask: &frl_trained_mask)) |
3477 | goto frl_trained; |
3478 | |
3479 | ret = drm_dp_pcon_frl_prepare(aux: &intel_dp->aux, enable_frl_ready_hpd: false); |
3480 | if (ret < 0) |
3481 | return ret; |
3482 | /* Wait for PCON to be FRL Ready */ |
3483 | wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS); |
3484 | |
3485 | if (!is_active) |
3486 | return -ETIMEDOUT; |
3487 | |
3488 | ret = drm_dp_pcon_frl_configure_1(aux: &intel_dp->aux, max_frl_gbps: max_frl_bw, |
3489 | DP_PCON_ENABLE_SEQUENTIAL_LINK); |
3490 | if (ret < 0) |
3491 | return ret; |
3492 | ret = drm_dp_pcon_frl_configure_2(aux: &intel_dp->aux, max_frl_mask: max_frl_bw_mask, |
3493 | DP_PCON_FRL_LINK_TRAIN_NORMAL); |
3494 | if (ret < 0) |
3495 | return ret; |
3496 | ret = drm_dp_pcon_frl_enable(aux: &intel_dp->aux); |
3497 | if (ret < 0) |
3498 | return ret; |
3499 | /* |
3500 | * Wait for FRL to be completed |
3501 | * Check if the HDMI Link is up and active. |
3502 | */ |
3503 | wait_for(is_active = |
3504 | intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask), |
3505 | TIMEOUT_HDMI_LINK_ACTIVE_MS); |
3506 | |
3507 | if (!is_active) |
3508 | return -ETIMEDOUT; |
3509 | |
3510 | frl_trained: |
3511 | drm_dbg(&i915->drm, "FRL_TRAINED_MASK = %u\n" , frl_trained_mask); |
3512 | intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_bw_mask: frl_trained_mask); |
3513 | intel_dp->frl.is_trained = true; |
3514 | drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n" , intel_dp->frl.trained_rate_gbps); |
3515 | |
3516 | return 0; |
3517 | } |
3518 | |
3519 | static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp) |
3520 | { |
3521 | if (drm_dp_is_branch(dpcd: intel_dp->dpcd) && |
3522 | intel_dp_has_hdmi_sink(intel_dp) && |
3523 | intel_dp_hdmi_sink_max_frl(intel_dp) > 0) |
3524 | return true; |
3525 | |
3526 | return false; |
3527 | } |
3528 | |
3529 | static |
3530 | int intel_dp_pcon_set_tmds_mode(struct intel_dp *intel_dp) |
3531 | { |
3532 | int ret; |
3533 | u8 buf = 0; |
3534 | |
3535 | /* Set PCON source control mode */ |
3536 | buf |= DP_PCON_ENABLE_SOURCE_CTL_MODE; |
3537 | |
3538 | ret = drm_dp_dpcd_writeb(aux: &intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, value: buf); |
3539 | if (ret < 0) |
3540 | return ret; |
3541 | |
3542 | /* Set HDMI LINK ENABLE */ |
3543 | buf |= DP_PCON_ENABLE_HDMI_LINK; |
3544 | ret = drm_dp_dpcd_writeb(aux: &intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, value: buf); |
3545 | if (ret < 0) |
3546 | return ret; |
3547 | |
3548 | return 0; |
3549 | } |
3550 | |
3551 | void intel_dp_check_frl_training(struct intel_dp *intel_dp) |
3552 | { |
3553 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
3554 | |
3555 | /* |
3556 | * Always go for FRL training if: |
3557 | * -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7) |
3558 | * -sink is HDMI2.1 |
3559 | */ |
3560 | if (!(intel_dp->downstream_ports[2] & DP_PCON_SOURCE_CTL_MODE) || |
3561 | !intel_dp_is_hdmi_2_1_sink(intel_dp) || |
3562 | intel_dp->frl.is_trained) |
3563 | return; |
3564 | |
3565 | if (intel_dp_pcon_start_frl_training(intel_dp) < 0) { |
3566 | int ret, mode; |
3567 | |
3568 | drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n" ); |
3569 | ret = intel_dp_pcon_set_tmds_mode(intel_dp); |
3570 | mode = drm_dp_pcon_hdmi_link_mode(aux: &intel_dp->aux, NULL); |
3571 | |
3572 | if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS) |
3573 | drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n" ); |
3574 | } else { |
3575 | drm_dbg(&dev_priv->drm, "FRL training Completed\n" ); |
3576 | } |
3577 | } |
3578 | |
3579 | static int |
3580 | intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state) |
3581 | { |
3582 | int vactive = crtc_state->hw.adjusted_mode.vdisplay; |
3583 | |
3584 | return intel_hdmi_dsc_get_slice_height(vactive); |
3585 | } |
3586 | |
3587 | static int |
3588 | intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp, |
3589 | const struct intel_crtc_state *crtc_state) |
3590 | { |
3591 | struct intel_connector *intel_connector = intel_dp->attached_connector; |
3592 | struct drm_connector *connector = &intel_connector->base; |
3593 | int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice; |
3594 | int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices; |
3595 | int pcon_max_slices = drm_dp_pcon_dsc_max_slices(pcon_dsc_dpcd: intel_dp->pcon_dsc_dpcd); |
3596 | int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(pcon_dsc_dpcd: intel_dp->pcon_dsc_dpcd); |
3597 | |
3598 | return intel_hdmi_dsc_get_num_slices(crtc_state, src_max_slices: pcon_max_slices, |
3599 | src_max_slice_width: pcon_max_slice_width, |
3600 | hdmi_max_slices, hdmi_throughput); |
3601 | } |
3602 | |
3603 | static int |
3604 | intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp, |
3605 | const struct intel_crtc_state *crtc_state, |
3606 | int num_slices, int slice_width) |
3607 | { |
3608 | struct intel_connector *intel_connector = intel_dp->attached_connector; |
3609 | struct drm_connector *connector = &intel_connector->base; |
3610 | int output_format = crtc_state->output_format; |
3611 | bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp; |
3612 | int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(pcon_dsc_dpcd: intel_dp->pcon_dsc_dpcd); |
3613 | int hdmi_max_chunk_bytes = |
3614 | connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024; |
3615 | |
3616 | return intel_hdmi_dsc_get_bpp(src_fractional_bpp: pcon_fractional_bpp, slice_width, |
3617 | num_slices, output_format, hdmi_all_bpp, |
3618 | hdmi_max_chunk_bytes); |
3619 | } |
3620 | |
3621 | void |
3622 | intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, |
3623 | const struct intel_crtc_state *crtc_state) |
3624 | { |
3625 | u8 pps_param[6]; |
3626 | int slice_height; |
3627 | int slice_width; |
3628 | int num_slices; |
3629 | int bits_per_pixel; |
3630 | int ret; |
3631 | struct intel_connector *intel_connector = intel_dp->attached_connector; |
3632 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
3633 | struct drm_connector *connector; |
3634 | bool hdmi_is_dsc_1_2; |
3635 | |
3636 | if (!intel_dp_is_hdmi_2_1_sink(intel_dp)) |
3637 | return; |
3638 | |
3639 | if (!intel_connector) |
3640 | return; |
3641 | connector = &intel_connector->base; |
3642 | hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2; |
3643 | |
3644 | if (!drm_dp_pcon_enc_is_dsc_1_2(pcon_dsc_dpcd: intel_dp->pcon_dsc_dpcd) || |
3645 | !hdmi_is_dsc_1_2) |
3646 | return; |
3647 | |
3648 | slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state); |
3649 | if (!slice_height) |
3650 | return; |
3651 | |
3652 | num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state); |
3653 | if (!num_slices) |
3654 | return; |
3655 | |
3656 | slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, |
3657 | num_slices); |
3658 | |
3659 | bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state, |
3660 | num_slices, slice_width); |
3661 | if (!bits_per_pixel) |
3662 | return; |
3663 | |
3664 | pps_param[0] = slice_height & 0xFF; |
3665 | pps_param[1] = slice_height >> 8; |
3666 | pps_param[2] = slice_width & 0xFF; |
3667 | pps_param[3] = slice_width >> 8; |
3668 | pps_param[4] = bits_per_pixel & 0xFF; |
3669 | pps_param[5] = (bits_per_pixel >> 8) & 0x3; |
3670 | |
3671 | ret = drm_dp_pcon_pps_override_param(aux: &intel_dp->aux, pps_param); |
3672 | if (ret < 0) |
3673 | drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n" ); |
3674 | } |
3675 | |
3676 | void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, |
3677 | const struct intel_crtc_state *crtc_state) |
3678 | { |
3679 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
3680 | bool ycbcr444_to_420 = false; |
3681 | bool rgb_to_ycbcr = false; |
3682 | u8 tmp; |
3683 | |
3684 | if (intel_dp->dpcd[DP_DPCD_REV] < 0x13) |
3685 | return; |
3686 | |
3687 | if (!drm_dp_is_branch(dpcd: intel_dp->dpcd)) |
3688 | return; |
3689 | |
3690 | tmp = intel_dp_has_hdmi_sink(intel_dp) ? DP_HDMI_DVI_OUTPUT_CONFIG : 0; |
3691 | |
3692 | if (drm_dp_dpcd_writeb(aux: &intel_dp->aux, |
3693 | DP_PROTOCOL_CONVERTER_CONTROL_0, value: tmp) != 1) |
3694 | drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n" , |
3695 | str_enable_disable(intel_dp_has_hdmi_sink(intel_dp))); |
3696 | |
3697 | if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) { |
3698 | switch (crtc_state->output_format) { |
3699 | case INTEL_OUTPUT_FORMAT_YCBCR420: |
3700 | break; |
3701 | case INTEL_OUTPUT_FORMAT_YCBCR444: |
3702 | ycbcr444_to_420 = true; |
3703 | break; |
3704 | case INTEL_OUTPUT_FORMAT_RGB: |
3705 | rgb_to_ycbcr = true; |
3706 | ycbcr444_to_420 = true; |
3707 | break; |
3708 | default: |
3709 | MISSING_CASE(crtc_state->output_format); |
3710 | break; |
3711 | } |
3712 | } else if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR444) { |
3713 | switch (crtc_state->output_format) { |
3714 | case INTEL_OUTPUT_FORMAT_YCBCR444: |
3715 | break; |
3716 | case INTEL_OUTPUT_FORMAT_RGB: |
3717 | rgb_to_ycbcr = true; |
3718 | break; |
3719 | default: |
3720 | MISSING_CASE(crtc_state->output_format); |
3721 | break; |
3722 | } |
3723 | } |
3724 | |
3725 | tmp = ycbcr444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0; |
3726 | |
3727 | if (drm_dp_dpcd_writeb(aux: &intel_dp->aux, |
3728 | DP_PROTOCOL_CONVERTER_CONTROL_1, value: tmp) != 1) |
3729 | drm_dbg_kms(&i915->drm, |
3730 | "Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n" , |
3731 | str_enable_disable(intel_dp->dfp.ycbcr_444_to_420)); |
3732 | |
3733 | tmp = rgb_to_ycbcr ? DP_CONVERSION_BT709_RGB_YCBCR_ENABLE : 0; |
3734 | |
3735 | if (drm_dp_pcon_convert_rgb_to_ycbcr(aux: &intel_dp->aux, color_spc: tmp) < 0) |
3736 | drm_dbg_kms(&i915->drm, |
3737 | "Failed to %s protocol converter RGB->YCbCr conversion mode\n" , |
3738 | str_enable_disable(tmp)); |
3739 | } |
3740 | |
3741 | bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) |
3742 | { |
3743 | u8 dprx = 0; |
3744 | |
3745 | if (drm_dp_dpcd_readb(aux: &intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, |
3746 | valuep: &dprx) != 1) |
3747 | return false; |
3748 | return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; |
3749 | } |
3750 | |
3751 | static void intel_dp_read_dsc_dpcd(struct drm_dp_aux *aux, |
3752 | u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) |
3753 | { |
3754 | if (drm_dp_dpcd_read(aux, DP_DSC_SUPPORT, buffer: dsc_dpcd, |
3755 | DP_DSC_RECEIVER_CAP_SIZE) < 0) { |
3756 | drm_err(aux->drm_dev, |
3757 | "Failed to read DPCD register 0x%x\n" , |
3758 | DP_DSC_SUPPORT); |
3759 | return; |
3760 | } |
3761 | |
3762 | drm_dbg_kms(aux->drm_dev, "DSC DPCD: %*ph\n" , |
3763 | DP_DSC_RECEIVER_CAP_SIZE, |
3764 | dsc_dpcd); |
3765 | } |
3766 | |
3767 | void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector) |
3768 | { |
3769 | struct drm_i915_private *i915 = to_i915(dev: connector->base.dev); |
3770 | |
3771 | /* |
3772 | * Clear the cached register set to avoid using stale values |
3773 | * for the sinks that do not support DSC. |
3774 | */ |
3775 | memset(connector->dp.dsc_dpcd, 0, sizeof(connector->dp.dsc_dpcd)); |
3776 | |
3777 | /* Clear fec_capable to avoid using stale values */ |
3778 | connector->dp.fec_capability = 0; |
3779 | |
3780 | if (dpcd_rev < DP_DPCD_REV_14) |
3781 | return; |
3782 | |
3783 | intel_dp_read_dsc_dpcd(aux: connector->dp.dsc_decompression_aux, |
3784 | dsc_dpcd: connector->dp.dsc_dpcd); |
3785 | |
3786 | if (drm_dp_dpcd_readb(aux: connector->dp.dsc_decompression_aux, DP_FEC_CAPABILITY, |
3787 | valuep: &connector->dp.fec_capability) < 0) { |
3788 | drm_err(&i915->drm, "Failed to read FEC DPCD register\n" ); |
3789 | return; |
3790 | } |
3791 | |
3792 | drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n" , |
3793 | connector->dp.fec_capability); |
3794 | } |
3795 | |
3796 | static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector *connector) |
3797 | { |
3798 | if (edp_dpcd_rev < DP_EDP_14) |
3799 | return; |
3800 | |
3801 | intel_dp_read_dsc_dpcd(aux: connector->dp.dsc_decompression_aux, dsc_dpcd: connector->dp.dsc_dpcd); |
3802 | } |
3803 | |
3804 | static void intel_edp_mso_mode_fixup(struct intel_connector *connector, |
3805 | struct drm_display_mode *mode) |
3806 | { |
3807 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
3808 | struct drm_i915_private *i915 = to_i915(dev: connector->base.dev); |
3809 | int n = intel_dp->mso_link_count; |
3810 | int overlap = intel_dp->mso_pixel_overlap; |
3811 | |
3812 | if (!mode || !n) |
3813 | return; |
3814 | |
3815 | mode->hdisplay = (mode->hdisplay - overlap) * n; |
3816 | mode->hsync_start = (mode->hsync_start - overlap) * n; |
3817 | mode->hsync_end = (mode->hsync_end - overlap) * n; |
3818 | mode->htotal = (mode->htotal - overlap) * n; |
3819 | mode->clock *= n; |
3820 | |
3821 | drm_mode_set_name(mode); |
3822 | |
3823 | drm_dbg_kms(&i915->drm, |
3824 | "[CONNECTOR:%d:%s] using generated MSO mode: " DRM_MODE_FMT "\n" , |
3825 | connector->base.base.id, connector->base.name, |
3826 | DRM_MODE_ARG(mode)); |
3827 | } |
3828 | |
3829 | void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp) |
3830 | { |
3831 | struct drm_i915_private *dev_priv = to_i915(dev: encoder->base.dev); |
3832 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
3833 | struct intel_connector *connector = intel_dp->attached_connector; |
3834 | |
3835 | if (connector->panel.vbt.edp.bpp && pipe_bpp > connector->panel.vbt.edp.bpp) { |
3836 | /* |
3837 | * This is a big fat ugly hack. |
3838 | * |
3839 | * Some machines in UEFI boot mode provide us a VBT that has 18 |
3840 | * bpp and 1.62 GHz link bandwidth for eDP, which for reasons |
3841 | * unknown we fail to light up. Yet the same BIOS boots up with |
3842 | * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as |
3843 | * max, not what it tells us to use. |
3844 | * |
3845 | * Note: This will still be broken if the eDP panel is not lit |
3846 | * up by the BIOS, and thus we can't get the mode at module |
3847 | * load. |
3848 | */ |
3849 | drm_dbg_kms(&dev_priv->drm, |
3850 | "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n" , |
3851 | pipe_bpp, connector->panel.vbt.edp.bpp); |
3852 | connector->panel.vbt.edp.bpp = pipe_bpp; |
3853 | } |
3854 | } |
3855 | |
3856 | static void intel_edp_mso_init(struct intel_dp *intel_dp) |
3857 | { |
3858 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
3859 | struct intel_connector *connector = intel_dp->attached_connector; |
3860 | struct drm_display_info *info = &connector->base.display_info; |
3861 | u8 mso; |
3862 | |
3863 | if (intel_dp->edp_dpcd[0] < DP_EDP_14) |
3864 | return; |
3865 | |
3866 | if (drm_dp_dpcd_readb(aux: &intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, valuep: &mso) != 1) { |
3867 | drm_err(&i915->drm, "Failed to read MSO cap\n" ); |
3868 | return; |
3869 | } |
3870 | |
3871 | /* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */ |
3872 | mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK; |
3873 | if (mso % 2 || mso > drm_dp_max_lane_count(dpcd: intel_dp->dpcd)) { |
3874 | drm_err(&i915->drm, "Invalid MSO link count cap %u\n" , mso); |
3875 | mso = 0; |
3876 | } |
3877 | |
3878 | if (mso) { |
3879 | drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration, pixel overlap %u\n" , |
3880 | mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso, |
3881 | info->mso_pixel_overlap); |
3882 | if (!HAS_MSO(i915)) { |
3883 | drm_err(&i915->drm, "No source MSO support, disabling\n" ); |
3884 | mso = 0; |
3885 | } |
3886 | } |
3887 | |
3888 | intel_dp->mso_link_count = mso; |
3889 | intel_dp->mso_pixel_overlap = mso ? info->mso_pixel_overlap : 0; |
3890 | } |
3891 | |
3892 | static bool |
3893 | intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector) |
3894 | { |
3895 | struct drm_i915_private *dev_priv = |
3896 | to_i915(dev: dp_to_dig_port(intel_dp)->base.base.dev); |
3897 | |
3898 | /* this function is meant to be called only once */ |
3899 | drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); |
3900 | |
3901 | if (drm_dp_read_dpcd_caps(aux: &intel_dp->aux, dpcd: intel_dp->dpcd) != 0) |
3902 | return false; |
3903 | |
3904 | drm_dp_read_desc(aux: &intel_dp->aux, desc: &intel_dp->desc, |
3905 | is_branch: drm_dp_is_branch(dpcd: intel_dp->dpcd)); |
3906 | |
3907 | /* |
3908 | * Read the eDP display control registers. |
3909 | * |
3910 | * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in |
3911 | * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it |
3912 | * set, but require eDP 1.4+ detection (e.g. for supported link rates |
3913 | * method). The display control registers should read zero if they're |
3914 | * not supported anyway. |
3915 | */ |
3916 | if (drm_dp_dpcd_read(aux: &intel_dp->aux, DP_EDP_DPCD_REV, |
3917 | buffer: intel_dp->edp_dpcd, size: sizeof(intel_dp->edp_dpcd)) == |
3918 | sizeof(intel_dp->edp_dpcd)) { |
3919 | drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n" , |
3920 | (int)sizeof(intel_dp->edp_dpcd), |
3921 | intel_dp->edp_dpcd); |
3922 | |
3923 | intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14; |
3924 | } |
3925 | |
3926 | /* |
3927 | * This has to be called after intel_dp->edp_dpcd is filled, PSR checks |
3928 | * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] |
3929 | */ |
3930 | intel_psr_init_dpcd(intel_dp); |
3931 | |
3932 | /* Clear the default sink rates */ |
3933 | intel_dp->num_sink_rates = 0; |
3934 | |
3935 | /* Read the eDP 1.4+ supported link rates. */ |
3936 | if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { |
3937 | __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; |
3938 | int i; |
3939 | |
3940 | drm_dp_dpcd_read(aux: &intel_dp->aux, DP_SUPPORTED_LINK_RATES, |
3941 | buffer: sink_rates, size: sizeof(sink_rates)); |
3942 | |
3943 | for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { |
3944 | int val = le16_to_cpu(sink_rates[i]); |
3945 | |
3946 | if (val == 0) |
3947 | break; |
3948 | |
3949 | /* Value read multiplied by 200kHz gives the per-lane |
3950 | * link rate in kHz. The source rates are, however, |
3951 | * stored in terms of LS_Clk kHz. The full conversion |
3952 | * back to symbols is |
3953 | * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) |
3954 | */ |
3955 | intel_dp->sink_rates[i] = (val * 200) / 10; |
3956 | } |
3957 | intel_dp->num_sink_rates = i; |
3958 | } |
3959 | |
3960 | /* |
3961 | * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, |
3962 | * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. |
3963 | */ |
3964 | if (intel_dp->num_sink_rates) |
3965 | intel_dp->use_rate_select = true; |
3966 | else |
3967 | intel_dp_set_sink_rates(intel_dp); |
3968 | intel_dp_set_max_sink_lane_count(intel_dp); |
3969 | |
3970 | /* Read the eDP DSC DPCD registers */ |
3971 | if (HAS_DSC(dev_priv)) |
3972 | intel_edp_get_dsc_sink_cap(edp_dpcd_rev: intel_dp->edp_dpcd[0], |
3973 | connector); |
3974 | |
3975 | /* |
3976 | * If needed, program our source OUI so we can make various Intel-specific AUX services |
3977 | * available (such as HDR backlight controls) |
3978 | */ |
3979 | intel_edp_init_source_oui(intel_dp, careful: true); |
3980 | |
3981 | return true; |
3982 | } |
3983 | |
3984 | static bool |
3985 | intel_dp_has_sink_count(struct intel_dp *intel_dp) |
3986 | { |
3987 | if (!intel_dp->attached_connector) |
3988 | return false; |
3989 | |
3990 | return drm_dp_read_sink_count_cap(connector: &intel_dp->attached_connector->base, |
3991 | dpcd: intel_dp->dpcd, |
3992 | desc: &intel_dp->desc); |
3993 | } |
3994 | |
3995 | void intel_dp_update_sink_caps(struct intel_dp *intel_dp) |
3996 | { |
3997 | intel_dp_set_sink_rates(intel_dp); |
3998 | intel_dp_set_max_sink_lane_count(intel_dp); |
3999 | intel_dp_set_common_rates(intel_dp); |
4000 | } |
4001 | |
4002 | static bool |
4003 | intel_dp_get_dpcd(struct intel_dp *intel_dp) |
4004 | { |
4005 | int ret; |
4006 | |
4007 | if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0) |
4008 | return false; |
4009 | |
4010 | /* |
4011 | * Don't clobber cached eDP rates. Also skip re-reading |
4012 | * the OUI/ID since we know it won't change. |
4013 | */ |
4014 | if (!intel_dp_is_edp(intel_dp)) { |
4015 | drm_dp_read_desc(aux: &intel_dp->aux, desc: &intel_dp->desc, |
4016 | is_branch: drm_dp_is_branch(dpcd: intel_dp->dpcd)); |
4017 | |
4018 | intel_dp_update_sink_caps(intel_dp); |
4019 | } |
4020 | |
4021 | if (intel_dp_has_sink_count(intel_dp)) { |
4022 | ret = drm_dp_read_sink_count(aux: &intel_dp->aux); |
4023 | if (ret < 0) |
4024 | return false; |
4025 | |
4026 | /* |
4027 | * Sink count can change between short pulse hpd hence |
4028 | * a member variable in intel_dp will track any changes |
4029 | * between short pulse interrupts. |
4030 | */ |
4031 | intel_dp->sink_count = ret; |
4032 | |
4033 | /* |
4034 | * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that |
4035 | * a dongle is present but no display. Unless we require to know |
4036 | * if a dongle is present or not, we don't need to update |
4037 | * downstream port information. So, an early return here saves |
4038 | * time from performing other operations which are not required. |
4039 | */ |
4040 | if (!intel_dp->sink_count) |
4041 | return false; |
4042 | } |
4043 | |
4044 | return drm_dp_read_downstream_info(aux: &intel_dp->aux, dpcd: intel_dp->dpcd, |
4045 | downstream_ports: intel_dp->downstream_ports) == 0; |
4046 | } |
4047 | |
4048 | static bool |
4049 | intel_dp_can_mst(struct intel_dp *intel_dp) |
4050 | { |
4051 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
4052 | |
4053 | return i915->display.params.enable_dp_mst && |
4054 | intel_dp_mst_source_support(intel_dp) && |
4055 | drm_dp_read_mst_cap(aux: &intel_dp->aux, dpcd: intel_dp->dpcd); |
4056 | } |
4057 | |
4058 | static void |
4059 | intel_dp_configure_mst(struct intel_dp *intel_dp) |
4060 | { |
4061 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
4062 | struct intel_encoder *encoder = |
4063 | &dp_to_dig_port(intel_dp)->base; |
4064 | bool sink_can_mst = drm_dp_read_mst_cap(aux: &intel_dp->aux, dpcd: intel_dp->dpcd); |
4065 | |
4066 | drm_dbg_kms(&i915->drm, |
4067 | "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n" , |
4068 | encoder->base.base.id, encoder->base.name, |
4069 | str_yes_no(intel_dp_mst_source_support(intel_dp)), |
4070 | str_yes_no(sink_can_mst), |
4071 | str_yes_no(i915->display.params.enable_dp_mst)); |
4072 | |
4073 | if (!intel_dp_mst_source_support(intel_dp)) |
4074 | return; |
4075 | |
4076 | intel_dp->is_mst = sink_can_mst && |
4077 | i915->display.params.enable_dp_mst; |
4078 | |
4079 | drm_dp_mst_topology_mgr_set_mst(mgr: &intel_dp->mst_mgr, |
4080 | mst_state: intel_dp->is_mst); |
4081 | } |
4082 | |
4083 | static bool |
4084 | intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi) |
4085 | { |
4086 | return drm_dp_dpcd_read(aux: &intel_dp->aux, DP_SINK_COUNT_ESI, buffer: esi, size: 4) == 4; |
4087 | } |
4088 | |
4089 | static bool intel_dp_ack_sink_irq_esi(struct intel_dp *intel_dp, u8 esi[4]) |
4090 | { |
4091 | int retry; |
4092 | |
4093 | for (retry = 0; retry < 3; retry++) { |
4094 | if (drm_dp_dpcd_write(aux: &intel_dp->aux, DP_SINK_COUNT_ESI + 1, |
4095 | buffer: &esi[1], size: 3) == 3) |
4096 | return true; |
4097 | } |
4098 | |
4099 | return false; |
4100 | } |
4101 | |
4102 | bool |
4103 | intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, |
4104 | const struct drm_connector_state *conn_state) |
4105 | { |
4106 | /* |
4107 | * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication |
4108 | * of Color Encoding Format and Content Color Gamut], in order to |
4109 | * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. |
4110 | */ |
4111 | if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) |
4112 | return true; |
4113 | |
4114 | switch (conn_state->colorspace) { |
4115 | case DRM_MODE_COLORIMETRY_SYCC_601: |
4116 | case DRM_MODE_COLORIMETRY_OPYCC_601: |
4117 | case DRM_MODE_COLORIMETRY_BT2020_YCC: |
4118 | case DRM_MODE_COLORIMETRY_BT2020_RGB: |
4119 | case DRM_MODE_COLORIMETRY_BT2020_CYCC: |
4120 | return true; |
4121 | default: |
4122 | break; |
4123 | } |
4124 | |
4125 | return false; |
4126 | } |
4127 | |
4128 | static ssize_t |
4129 | intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915, |
4130 | const struct hdmi_drm_infoframe *drm_infoframe, |
4131 | struct dp_sdp *sdp, |
4132 | size_t size) |
4133 | { |
4134 | size_t length = sizeof(struct dp_sdp); |
4135 | const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; |
4136 | unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; |
4137 | ssize_t len; |
4138 | |
4139 | if (size < length) |
4140 | return -ENOSPC; |
4141 | |
4142 | memset(sdp, 0, size); |
4143 | |
4144 | len = hdmi_drm_infoframe_pack_only(frame: drm_infoframe, buffer: buf, size: sizeof(buf)); |
4145 | if (len < 0) { |
4146 | drm_dbg_kms(&i915->drm, "buffer size is smaller than hdr metadata infoframe\n" ); |
4147 | return -ENOSPC; |
4148 | } |
4149 | |
4150 | if (len != infoframe_size) { |
4151 | drm_dbg_kms(&i915->drm, "wrong static hdr metadata size\n" ); |
4152 | return -ENOSPC; |
4153 | } |
4154 | |
4155 | /* |
4156 | * Set up the infoframe sdp packet for HDR static metadata. |
4157 | * Prepare VSC Header for SU as per DP 1.4a spec, |
4158 | * Table 2-100 and Table 2-101 |
4159 | */ |
4160 | |
4161 | /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ |
4162 | sdp->sdp_header.HB0 = 0; |
4163 | /* |
4164 | * Packet Type 80h + Non-audio INFOFRAME Type value |
4165 | * HDMI_INFOFRAME_TYPE_DRM: 0x87 |
4166 | * - 80h + Non-audio INFOFRAME Type value |
4167 | * - InfoFrame Type: 0x07 |
4168 | * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] |
4169 | */ |
4170 | sdp->sdp_header.HB1 = drm_infoframe->type; |
4171 | /* |
4172 | * Least Significant Eight Bits of (Data Byte Count – 1) |
4173 | * infoframe_size - 1 |
4174 | */ |
4175 | sdp->sdp_header.HB2 = 0x1D; |
4176 | /* INFOFRAME SDP Version Number */ |
4177 | sdp->sdp_header.HB3 = (0x13 << 2); |
4178 | /* CTA Header Byte 2 (INFOFRAME Version Number) */ |
4179 | sdp->db[0] = drm_infoframe->version; |
4180 | /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ |
4181 | sdp->db[1] = drm_infoframe->length; |
4182 | /* |
4183 | * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after |
4184 | * HDMI_INFOFRAME_HEADER_SIZE |
4185 | */ |
4186 | BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); |
4187 | memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], |
4188 | HDMI_DRM_INFOFRAME_SIZE); |
4189 | |
4190 | /* |
4191 | * Size of DP infoframe sdp packet for HDR static metadata consists of |
4192 | * - DP SDP Header(struct dp_sdp_header): 4 bytes |
4193 | * - Two Data Blocks: 2 bytes |
4194 | * CTA Header Byte2 (INFOFRAME Version Number) |
4195 | * CTA Header Byte3 (Length of INFOFRAME) |
4196 | * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes |
4197 | * |
4198 | * Prior to GEN11's GMP register size is identical to DP HDR static metadata |
4199 | * infoframe size. But GEN11+ has larger than that size, write_infoframe |
4200 | * will pad rest of the size. |
4201 | */ |
4202 | return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; |
4203 | } |
4204 | |
4205 | static void intel_write_dp_sdp(struct intel_encoder *encoder, |
4206 | const struct intel_crtc_state *crtc_state, |
4207 | unsigned int type) |
4208 | { |
4209 | struct intel_digital_port *dig_port = enc_to_dig_port(encoder); |
4210 | struct drm_i915_private *dev_priv = to_i915(dev: encoder->base.dev); |
4211 | struct dp_sdp sdp = {}; |
4212 | ssize_t len; |
4213 | |
4214 | if ((crtc_state->infoframes.enable & |
4215 | intel_hdmi_infoframe_enable(type)) == 0) |
4216 | return; |
4217 | |
4218 | switch (type) { |
4219 | case DP_SDP_VSC: |
4220 | len = drm_dp_vsc_sdp_pack(vsc: &crtc_state->infoframes.vsc, sdp: &sdp); |
4221 | break; |
4222 | case HDMI_PACKET_TYPE_GAMUT_METADATA: |
4223 | len = intel_dp_hdr_metadata_infoframe_sdp_pack(i915: dev_priv, |
4224 | drm_infoframe: &crtc_state->infoframes.drm.drm, |
4225 | sdp: &sdp, size: sizeof(sdp)); |
4226 | break; |
4227 | default: |
4228 | MISSING_CASE(type); |
4229 | return; |
4230 | } |
4231 | |
4232 | if (drm_WARN_ON(&dev_priv->drm, len < 0)) |
4233 | return; |
4234 | |
4235 | dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); |
4236 | } |
4237 | |
4238 | void intel_dp_set_infoframes(struct intel_encoder *encoder, |
4239 | bool enable, |
4240 | const struct intel_crtc_state *crtc_state, |
4241 | const struct drm_connector_state *conn_state) |
4242 | { |
4243 | struct drm_i915_private *dev_priv = to_i915(dev: encoder->base.dev); |
4244 | i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); |
4245 | u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | |
4246 | VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | |
4247 | VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; |
4248 | u32 val = intel_de_read(i915: dev_priv, reg) & ~dip_enable; |
4249 | |
4250 | /* TODO: Sanitize DSC enabling wrt. intel_dsc_dp_pps_write(). */ |
4251 | if (!enable && HAS_DSC(dev_priv)) |
4252 | val &= ~VDIP_ENABLE_PPS; |
4253 | |
4254 | /* When PSR is enabled, this routine doesn't disable VSC DIP */ |
4255 | if (!crtc_state->has_psr) |
4256 | val &= ~VIDEO_DIP_ENABLE_VSC_HSW; |
4257 | |
4258 | intel_de_write(i915: dev_priv, reg, val); |
4259 | intel_de_posting_read(i915: dev_priv, reg); |
4260 | |
4261 | if (!enable) |
4262 | return; |
4263 | |
4264 | intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); |
4265 | |
4266 | intel_write_dp_sdp(encoder, crtc_state, type: HDMI_PACKET_TYPE_GAMUT_METADATA); |
4267 | } |
4268 | |
4269 | static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, |
4270 | const void *buffer, size_t size) |
4271 | { |
4272 | const struct dp_sdp *sdp = buffer; |
4273 | |
4274 | if (size < sizeof(struct dp_sdp)) |
4275 | return -EINVAL; |
4276 | |
4277 | memset(vsc, 0, sizeof(*vsc)); |
4278 | |
4279 | if (sdp->sdp_header.HB0 != 0) |
4280 | return -EINVAL; |
4281 | |
4282 | if (sdp->sdp_header.HB1 != DP_SDP_VSC) |
4283 | return -EINVAL; |
4284 | |
4285 | vsc->sdp_type = sdp->sdp_header.HB1; |
4286 | vsc->revision = sdp->sdp_header.HB2; |
4287 | vsc->length = sdp->sdp_header.HB3; |
4288 | |
4289 | if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || |
4290 | (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) { |
4291 | /* |
4292 | * - HB2 = 0x2, HB3 = 0x8 |
4293 | * VSC SDP supporting 3D stereo + PSR |
4294 | * - HB2 = 0x4, HB3 = 0xe |
4295 | * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of |
4296 | * first scan line of the SU region (applies to eDP v1.4b |
4297 | * and higher). |
4298 | */ |
4299 | return 0; |
4300 | } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { |
4301 | /* |
4302 | * - HB2 = 0x5, HB3 = 0x13 |
4303 | * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry |
4304 | * Format. |
4305 | */ |
4306 | vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; |
4307 | vsc->colorimetry = sdp->db[16] & 0xf; |
4308 | vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; |
4309 | |
4310 | switch (sdp->db[17] & 0x7) { |
4311 | case 0x0: |
4312 | vsc->bpc = 6; |
4313 | break; |
4314 | case 0x1: |
4315 | vsc->bpc = 8; |
4316 | break; |
4317 | case 0x2: |
4318 | vsc->bpc = 10; |
4319 | break; |
4320 | case 0x3: |
4321 | vsc->bpc = 12; |
4322 | break; |
4323 | case 0x4: |
4324 | vsc->bpc = 16; |
4325 | break; |
4326 | default: |
4327 | MISSING_CASE(sdp->db[17] & 0x7); |
4328 | return -EINVAL; |
4329 | } |
4330 | |
4331 | vsc->content_type = sdp->db[18] & 0x7; |
4332 | } else { |
4333 | return -EINVAL; |
4334 | } |
4335 | |
4336 | return 0; |
4337 | } |
4338 | |
4339 | static int |
4340 | intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, |
4341 | const void *buffer, size_t size) |
4342 | { |
4343 | int ret; |
4344 | |
4345 | const struct dp_sdp *sdp = buffer; |
4346 | |
4347 | if (size < sizeof(struct dp_sdp)) |
4348 | return -EINVAL; |
4349 | |
4350 | if (sdp->sdp_header.HB0 != 0) |
4351 | return -EINVAL; |
4352 | |
4353 | if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) |
4354 | return -EINVAL; |
4355 | |
4356 | /* |
4357 | * Least Significant Eight Bits of (Data Byte Count – 1) |
4358 | * 1Dh (i.e., Data Byte Count = 30 bytes). |
4359 | */ |
4360 | if (sdp->sdp_header.HB2 != 0x1D) |
4361 | return -EINVAL; |
4362 | |
4363 | /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ |
4364 | if ((sdp->sdp_header.HB3 & 0x3) != 0) |
4365 | return -EINVAL; |
4366 | |
4367 | /* INFOFRAME SDP Version Number */ |
4368 | if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) |
4369 | return -EINVAL; |
4370 | |
4371 | /* CTA Header Byte 2 (INFOFRAME Version Number) */ |
4372 | if (sdp->db[0] != 1) |
4373 | return -EINVAL; |
4374 | |
4375 | /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ |
4376 | if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) |
4377 | return -EINVAL; |
4378 | |
4379 | ret = hdmi_drm_infoframe_unpack_only(frame: drm_infoframe, buffer: &sdp->db[2], |
4380 | HDMI_DRM_INFOFRAME_SIZE); |
4381 | |
4382 | return ret; |
4383 | } |
4384 | |
4385 | static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, |
4386 | struct intel_crtc_state *crtc_state, |
4387 | struct drm_dp_vsc_sdp *vsc) |
4388 | { |
4389 | struct intel_digital_port *dig_port = enc_to_dig_port(encoder); |
4390 | struct drm_i915_private *dev_priv = to_i915(dev: encoder->base.dev); |
4391 | unsigned int type = DP_SDP_VSC; |
4392 | struct dp_sdp sdp = {}; |
4393 | int ret; |
4394 | |
4395 | if ((crtc_state->infoframes.enable & |
4396 | intel_hdmi_infoframe_enable(type)) == 0) |
4397 | return; |
4398 | |
4399 | dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); |
4400 | |
4401 | ret = intel_dp_vsc_sdp_unpack(vsc, buffer: &sdp, size: sizeof(sdp)); |
4402 | |
4403 | if (ret) |
4404 | drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n" ); |
4405 | } |
4406 | |
4407 | static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, |
4408 | struct intel_crtc_state *crtc_state, |
4409 | struct hdmi_drm_infoframe *drm_infoframe) |
4410 | { |
4411 | struct intel_digital_port *dig_port = enc_to_dig_port(encoder); |
4412 | struct drm_i915_private *dev_priv = to_i915(dev: encoder->base.dev); |
4413 | unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; |
4414 | struct dp_sdp sdp = {}; |
4415 | int ret; |
4416 | |
4417 | if ((crtc_state->infoframes.enable & |
4418 | intel_hdmi_infoframe_enable(type)) == 0) |
4419 | return; |
4420 | |
4421 | dig_port->read_infoframe(encoder, crtc_state, type, &sdp, |
4422 | sizeof(sdp)); |
4423 | |
4424 | ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, buffer: &sdp, |
4425 | size: sizeof(sdp)); |
4426 | |
4427 | if (ret) |
4428 | drm_dbg_kms(&dev_priv->drm, |
4429 | "Failed to unpack DP HDR Metadata Infoframe SDP\n" ); |
4430 | } |
4431 | |
4432 | void intel_read_dp_sdp(struct intel_encoder *encoder, |
4433 | struct intel_crtc_state *crtc_state, |
4434 | unsigned int type) |
4435 | { |
4436 | switch (type) { |
4437 | case DP_SDP_VSC: |
4438 | intel_read_dp_vsc_sdp(encoder, crtc_state, |
4439 | vsc: &crtc_state->infoframes.vsc); |
4440 | break; |
4441 | case HDMI_PACKET_TYPE_GAMUT_METADATA: |
4442 | intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, |
4443 | drm_infoframe: &crtc_state->infoframes.drm.drm); |
4444 | break; |
4445 | default: |
4446 | MISSING_CASE(type); |
4447 | break; |
4448 | } |
4449 | } |
4450 | |
4451 | static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) |
4452 | { |
4453 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
4454 | int status = 0; |
4455 | int test_link_rate; |
4456 | u8 test_lane_count, test_link_bw; |
4457 | /* (DP CTS 1.2) |
4458 | * 4.3.1.11 |
4459 | */ |
4460 | /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ |
4461 | status = drm_dp_dpcd_readb(aux: &intel_dp->aux, DP_TEST_LANE_COUNT, |
4462 | valuep: &test_lane_count); |
4463 | |
4464 | if (status <= 0) { |
4465 | drm_dbg_kms(&i915->drm, "Lane count read failed\n" ); |
4466 | return DP_TEST_NAK; |
4467 | } |
4468 | test_lane_count &= DP_MAX_LANE_COUNT_MASK; |
4469 | |
4470 | status = drm_dp_dpcd_readb(aux: &intel_dp->aux, DP_TEST_LINK_RATE, |
4471 | valuep: &test_link_bw); |
4472 | if (status <= 0) { |
4473 | drm_dbg_kms(&i915->drm, "Link Rate read failed\n" ); |
4474 | return DP_TEST_NAK; |
4475 | } |
4476 | test_link_rate = drm_dp_bw_code_to_link_rate(link_bw: test_link_bw); |
4477 | |
4478 | /* Validate the requested link rate and lane count */ |
4479 | if (!intel_dp_link_params_valid(intel_dp, link_rate: test_link_rate, |
4480 | lane_count: test_lane_count)) |
4481 | return DP_TEST_NAK; |
4482 | |
4483 | intel_dp->compliance.test_lane_count = test_lane_count; |
4484 | intel_dp->compliance.test_link_rate = test_link_rate; |
4485 | |
4486 | return DP_TEST_ACK; |
4487 | } |
4488 | |
4489 | static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) |
4490 | { |
4491 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
4492 | u8 test_pattern; |
4493 | u8 test_misc; |
4494 | __be16 h_width, v_height; |
4495 | int status = 0; |
4496 | |
4497 | /* Read the TEST_PATTERN (DP CTS 3.1.5) */ |
4498 | status = drm_dp_dpcd_readb(aux: &intel_dp->aux, DP_TEST_PATTERN, |
4499 | valuep: &test_pattern); |
4500 | if (status <= 0) { |
4501 | drm_dbg_kms(&i915->drm, "Test pattern read failed\n" ); |
4502 | return DP_TEST_NAK; |
4503 | } |
4504 | if (test_pattern != DP_COLOR_RAMP) |
4505 | return DP_TEST_NAK; |
4506 | |
4507 | status = drm_dp_dpcd_read(aux: &intel_dp->aux, DP_TEST_H_WIDTH_HI, |
4508 | buffer: &h_width, size: 2); |
4509 | if (status <= 0) { |
4510 | drm_dbg_kms(&i915->drm, "H Width read failed\n" ); |
4511 | return DP_TEST_NAK; |
4512 | } |
4513 | |
4514 | status = drm_dp_dpcd_read(aux: &intel_dp->aux, DP_TEST_V_HEIGHT_HI, |
4515 | buffer: &v_height, size: 2); |
4516 | if (status <= 0) { |
4517 | drm_dbg_kms(&i915->drm, "V Height read failed\n" ); |
4518 | return DP_TEST_NAK; |
4519 | } |
4520 | |
4521 | status = drm_dp_dpcd_readb(aux: &intel_dp->aux, DP_TEST_MISC0, |
4522 | valuep: &test_misc); |
4523 | if (status <= 0) { |
4524 | drm_dbg_kms(&i915->drm, "TEST MISC read failed\n" ); |
4525 | return DP_TEST_NAK; |
4526 | } |
4527 | if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) |
4528 | return DP_TEST_NAK; |
4529 | if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) |
4530 | return DP_TEST_NAK; |
4531 | switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { |
4532 | case DP_TEST_BIT_DEPTH_6: |
4533 | intel_dp->compliance.test_data.bpc = 6; |
4534 | break; |
4535 | case DP_TEST_BIT_DEPTH_8: |
4536 | intel_dp->compliance.test_data.bpc = 8; |
4537 | break; |
4538 | default: |
4539 | return DP_TEST_NAK; |
4540 | } |
4541 | |
4542 | intel_dp->compliance.test_data.video_pattern = test_pattern; |
4543 | intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); |
4544 | intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); |
4545 | /* Set test active flag here so userspace doesn't interrupt things */ |
4546 | intel_dp->compliance.test_active = true; |
4547 | |
4548 | return DP_TEST_ACK; |
4549 | } |
4550 | |
4551 | static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) |
4552 | { |
4553 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
4554 | u8 test_result = DP_TEST_ACK; |
4555 | struct intel_connector *intel_connector = intel_dp->attached_connector; |
4556 | struct drm_connector *connector = &intel_connector->base; |
4557 | |
4558 | if (intel_connector->detect_edid == NULL || |
4559 | connector->edid_corrupt || |
4560 | intel_dp->aux.i2c_defer_count > 6) { |
4561 | /* Check EDID read for NACKs, DEFERs and corruption |
4562 | * (DP CTS 1.2 Core r1.1) |
4563 | * 4.2.2.4 : Failed EDID read, I2C_NAK |
4564 | * 4.2.2.5 : Failed EDID read, I2C_DEFER |
4565 | * 4.2.2.6 : EDID corruption detected |
4566 | * Use failsafe mode for all cases |
4567 | */ |
4568 | if (intel_dp->aux.i2c_nack_count > 0 || |
4569 | intel_dp->aux.i2c_defer_count > 0) |
4570 | drm_dbg_kms(&i915->drm, |
4571 | "EDID read had %d NACKs, %d DEFERs\n" , |
4572 | intel_dp->aux.i2c_nack_count, |
4573 | intel_dp->aux.i2c_defer_count); |
4574 | intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; |
4575 | } else { |
4576 | /* FIXME: Get rid of drm_edid_raw() */ |
4577 | const struct edid *block = drm_edid_raw(drm_edid: intel_connector->detect_edid); |
4578 | |
4579 | /* We have to write the checksum of the last block read */ |
4580 | block += block->extensions; |
4581 | |
4582 | if (drm_dp_dpcd_writeb(aux: &intel_dp->aux, DP_TEST_EDID_CHECKSUM, |
4583 | value: block->checksum) <= 0) |
4584 | drm_dbg_kms(&i915->drm, |
4585 | "Failed to write EDID checksum\n" ); |
4586 | |
4587 | test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; |
4588 | intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; |
4589 | } |
4590 | |
4591 | /* Set test active flag here so userspace doesn't interrupt things */ |
4592 | intel_dp->compliance.test_active = true; |
4593 | |
4594 | return test_result; |
4595 | } |
4596 | |
4597 | static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, |
4598 | const struct intel_crtc_state *crtc_state) |
4599 | { |
4600 | struct drm_i915_private *dev_priv = |
4601 | to_i915(dev: dp_to_dig_port(intel_dp)->base.base.dev); |
4602 | struct drm_dp_phy_test_params *data = |
4603 | &intel_dp->compliance.test_data.phytest; |
4604 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
4605 | struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; |
4606 | enum pipe pipe = crtc->pipe; |
4607 | u32 pattern_val; |
4608 | |
4609 | switch (data->phy_pattern) { |
4610 | case DP_LINK_QUAL_PATTERN_DISABLE: |
4611 | drm_dbg_kms(&dev_priv->drm, "Disable Phy Test Pattern\n" ); |
4612 | intel_de_write(i915: dev_priv, DDI_DP_COMP_CTL(pipe), val: 0x0); |
4613 | if (DISPLAY_VER(dev_priv) >= 10) |
4614 | intel_de_rmw(i915: dev_priv, reg: dp_tp_ctl_reg(encoder, crtc_state), |
4615 | DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK, |
4616 | DP_TP_CTL_LINK_TRAIN_NORMAL); |
4617 | break; |
4618 | case DP_LINK_QUAL_PATTERN_D10_2: |
4619 | drm_dbg_kms(&dev_priv->drm, "Set D10.2 Phy Test Pattern\n" ); |
4620 | intel_de_write(i915: dev_priv, DDI_DP_COMP_CTL(pipe), |
4621 | DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); |
4622 | break; |
4623 | case DP_LINK_QUAL_PATTERN_ERROR_RATE: |
4624 | drm_dbg_kms(&dev_priv->drm, "Set Error Count Phy Test Pattern\n" ); |
4625 | intel_de_write(i915: dev_priv, DDI_DP_COMP_CTL(pipe), |
4626 | DDI_DP_COMP_CTL_ENABLE | |
4627 | DDI_DP_COMP_CTL_SCRAMBLED_0); |
4628 | break; |
4629 | case DP_LINK_QUAL_PATTERN_PRBS7: |
4630 | drm_dbg_kms(&dev_priv->drm, "Set PRBS7 Phy Test Pattern\n" ); |
4631 | intel_de_write(i915: dev_priv, DDI_DP_COMP_CTL(pipe), |
4632 | DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); |
4633 | break; |
4634 | case DP_LINK_QUAL_PATTERN_80BIT_CUSTOM: |
4635 | /* |
4636 | * FIXME: Ideally pattern should come from DPCD 0x250. As |
4637 | * current firmware of DPR-100 could not set it, so hardcoding |
4638 | * now for complaince test. |
4639 | */ |
4640 | drm_dbg_kms(&dev_priv->drm, |
4641 | "Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n" ); |
4642 | pattern_val = 0x3e0f83e0; |
4643 | intel_de_write(i915: dev_priv, DDI_DP_COMP_PAT(pipe, 0), val: pattern_val); |
4644 | pattern_val = 0x0f83e0f8; |
4645 | intel_de_write(i915: dev_priv, DDI_DP_COMP_PAT(pipe, 1), val: pattern_val); |
4646 | pattern_val = 0x0000f83e; |
4647 | intel_de_write(i915: dev_priv, DDI_DP_COMP_PAT(pipe, 2), val: pattern_val); |
4648 | intel_de_write(i915: dev_priv, DDI_DP_COMP_CTL(pipe), |
4649 | DDI_DP_COMP_CTL_ENABLE | |
4650 | DDI_DP_COMP_CTL_CUSTOM80); |
4651 | break; |
4652 | case DP_LINK_QUAL_PATTERN_CP2520_PAT_1: |
4653 | /* |
4654 | * FIXME: Ideally pattern should come from DPCD 0x24A. As |
4655 | * current firmware of DPR-100 could not set it, so hardcoding |
4656 | * now for complaince test. |
4657 | */ |
4658 | drm_dbg_kms(&dev_priv->drm, "Set HBR2 compliance Phy Test Pattern\n" ); |
4659 | pattern_val = 0xFB; |
4660 | intel_de_write(i915: dev_priv, DDI_DP_COMP_CTL(pipe), |
4661 | DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | |
4662 | pattern_val); |
4663 | break; |
4664 | case DP_LINK_QUAL_PATTERN_CP2520_PAT_3: |
4665 | if (DISPLAY_VER(dev_priv) < 10) { |
4666 | drm_warn(&dev_priv->drm, "Platform does not support TPS4\n" ); |
4667 | break; |
4668 | } |
4669 | drm_dbg_kms(&dev_priv->drm, "Set TPS4 compliance Phy Test Pattern\n" ); |
4670 | intel_de_write(i915: dev_priv, DDI_DP_COMP_CTL(pipe), val: 0x0); |
4671 | intel_de_rmw(i915: dev_priv, reg: dp_tp_ctl_reg(encoder, crtc_state), |
4672 | DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK, |
4673 | DP_TP_CTL_TRAIN_PAT4_SEL_TP4A | DP_TP_CTL_LINK_TRAIN_PAT4); |
4674 | break; |
4675 | default: |
4676 | drm_warn(&dev_priv->drm, "Invalid Phy Test Pattern\n" ); |
4677 | } |
4678 | } |
4679 | |
4680 | static void intel_dp_process_phy_request(struct intel_dp *intel_dp, |
4681 | const struct intel_crtc_state *crtc_state) |
4682 | { |
4683 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
4684 | struct drm_dp_phy_test_params *data = |
4685 | &intel_dp->compliance.test_data.phytest; |
4686 | u8 link_status[DP_LINK_STATUS_SIZE]; |
4687 | |
4688 | if (drm_dp_dpcd_read_phy_link_status(aux: &intel_dp->aux, dp_phy: DP_PHY_DPRX, |
4689 | link_status) < 0) { |
4690 | drm_dbg_kms(&i915->drm, "failed to get link status\n" ); |
4691 | return; |
4692 | } |
4693 | |
4694 | /* retrieve vswing & pre-emphasis setting */ |
4695 | intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy: DP_PHY_DPRX, |
4696 | link_status); |
4697 | |
4698 | intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy: DP_PHY_DPRX); |
4699 | |
4700 | intel_dp_phy_pattern_update(intel_dp, crtc_state); |
4701 | |
4702 | drm_dp_dpcd_write(aux: &intel_dp->aux, DP_TRAINING_LANE0_SET, |
4703 | buffer: intel_dp->train_set, size: crtc_state->lane_count); |
4704 | |
4705 | drm_dp_set_phy_test_pattern(aux: &intel_dp->aux, data, |
4706 | dp_rev: intel_dp->dpcd[DP_DPCD_REV]); |
4707 | } |
4708 | |
4709 | static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) |
4710 | { |
4711 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
4712 | struct drm_dp_phy_test_params *data = |
4713 | &intel_dp->compliance.test_data.phytest; |
4714 | |
4715 | if (drm_dp_get_phy_test_pattern(aux: &intel_dp->aux, data)) { |
4716 | drm_dbg_kms(&i915->drm, "DP Phy Test pattern AUX read failure\n" ); |
4717 | return DP_TEST_NAK; |
4718 | } |
4719 | |
4720 | /* Set test active flag here so userspace doesn't interrupt things */ |
4721 | intel_dp->compliance.test_active = true; |
4722 | |
4723 | return DP_TEST_ACK; |
4724 | } |
4725 | |
4726 | static void intel_dp_handle_test_request(struct intel_dp *intel_dp) |
4727 | { |
4728 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
4729 | u8 response = DP_TEST_NAK; |
4730 | u8 request = 0; |
4731 | int status; |
4732 | |
4733 | status = drm_dp_dpcd_readb(aux: &intel_dp->aux, DP_TEST_REQUEST, valuep: &request); |
4734 | if (status <= 0) { |
4735 | drm_dbg_kms(&i915->drm, |
4736 | "Could not read test request from sink\n" ); |
4737 | goto update_status; |
4738 | } |
4739 | |
4740 | switch (request) { |
4741 | case DP_TEST_LINK_TRAINING: |
4742 | drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n" ); |
4743 | response = intel_dp_autotest_link_training(intel_dp); |
4744 | break; |
4745 | case DP_TEST_LINK_VIDEO_PATTERN: |
4746 | drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n" ); |
4747 | response = intel_dp_autotest_video_pattern(intel_dp); |
4748 | break; |
4749 | case DP_TEST_LINK_EDID_READ: |
4750 | drm_dbg_kms(&i915->drm, "EDID test requested\n" ); |
4751 | response = intel_dp_autotest_edid(intel_dp); |
4752 | break; |
4753 | case DP_TEST_LINK_PHY_TEST_PATTERN: |
4754 | drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n" ); |
4755 | response = intel_dp_autotest_phy_pattern(intel_dp); |
4756 | break; |
4757 | default: |
4758 | drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n" , |
4759 | request); |
4760 | break; |
4761 | } |
4762 | |
4763 | if (response & DP_TEST_ACK) |
4764 | intel_dp->compliance.test_type = request; |
4765 | |
4766 | update_status: |
4767 | status = drm_dp_dpcd_writeb(aux: &intel_dp->aux, DP_TEST_RESPONSE, value: response); |
4768 | if (status <= 0) |
4769 | drm_dbg_kms(&i915->drm, |
4770 | "Could not write test response to sink\n" ); |
4771 | } |
4772 | |
4773 | static bool intel_dp_link_ok(struct intel_dp *intel_dp, |
4774 | u8 link_status[DP_LINK_STATUS_SIZE]) |
4775 | { |
4776 | struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; |
4777 | struct drm_i915_private *i915 = to_i915(dev: encoder->base.dev); |
4778 | bool uhbr = intel_dp->link_rate >= 1000000; |
4779 | bool ok; |
4780 | |
4781 | if (uhbr) |
4782 | ok = drm_dp_128b132b_lane_channel_eq_done(link_status, |
4783 | lane_count: intel_dp->lane_count); |
4784 | else |
4785 | ok = drm_dp_channel_eq_ok(link_status, lane_count: intel_dp->lane_count); |
4786 | |
4787 | if (ok) |
4788 | return true; |
4789 | |
4790 | intel_dp_dump_link_status(intel_dp, dp_phy: DP_PHY_DPRX, link_status); |
4791 | drm_dbg_kms(&i915->drm, |
4792 | "[ENCODER:%d:%s] %s link not ok, retraining\n" , |
4793 | encoder->base.base.id, encoder->base.name, |
4794 | uhbr ? "128b/132b" : "8b/10b" ); |
4795 | |
4796 | return false; |
4797 | } |
4798 | |
4799 | static void |
4800 | intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack) |
4801 | { |
4802 | bool handled = false; |
4803 | |
4804 | drm_dp_mst_hpd_irq_handle_event(mgr: &intel_dp->mst_mgr, esi, ack, handled: &handled); |
4805 | |
4806 | if (esi[1] & DP_CP_IRQ) { |
4807 | intel_hdcp_handle_cp_irq(connector: intel_dp->attached_connector); |
4808 | ack[1] |= DP_CP_IRQ; |
4809 | } |
4810 | } |
4811 | |
4812 | static bool intel_dp_mst_link_status(struct intel_dp *intel_dp) |
4813 | { |
4814 | struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; |
4815 | struct drm_i915_private *i915 = to_i915(dev: encoder->base.dev); |
4816 | u8 link_status[DP_LINK_STATUS_SIZE] = {}; |
4817 | const size_t esi_link_status_size = DP_LINK_STATUS_SIZE - 2; |
4818 | |
4819 | if (drm_dp_dpcd_read(aux: &intel_dp->aux, DP_LANE0_1_STATUS_ESI, buffer: link_status, |
4820 | size: esi_link_status_size) != esi_link_status_size) { |
4821 | drm_err(&i915->drm, |
4822 | "[ENCODER:%d:%s] Failed to read link status\n" , |
4823 | encoder->base.base.id, encoder->base.name); |
4824 | return false; |
4825 | } |
4826 | |
4827 | return intel_dp_link_ok(intel_dp, link_status); |
4828 | } |
4829 | |
4830 | /** |
4831 | * intel_dp_check_mst_status - service any pending MST interrupts, check link status |
4832 | * @intel_dp: Intel DP struct |
4833 | * |
4834 | * Read any pending MST interrupts, call MST core to handle these and ack the |
4835 | * interrupts. Check if the main and AUX link state is ok. |
4836 | * |
4837 | * Returns: |
4838 | * - %true if pending interrupts were serviced (or no interrupts were |
4839 | * pending) w/o detecting an error condition. |
4840 | * - %false if an error condition - like AUX failure or a loss of link - is |
4841 | * detected, or another condition - like a DP tunnel BW state change - needs |
4842 | * servicing from the hotplug work. |
4843 | */ |
4844 | static bool |
4845 | intel_dp_check_mst_status(struct intel_dp *intel_dp) |
4846 | { |
4847 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
4848 | bool link_ok = true; |
4849 | bool reprobe_needed = false; |
4850 | |
4851 | drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); |
4852 | |
4853 | for (;;) { |
4854 | u8 esi[4] = {}; |
4855 | u8 ack[4] = {}; |
4856 | |
4857 | if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { |
4858 | drm_dbg_kms(&i915->drm, |
4859 | "failed to get ESI - device may have failed\n" ); |
4860 | link_ok = false; |
4861 | |
4862 | break; |
4863 | } |
4864 | |
4865 | drm_dbg_kms(&i915->drm, "DPRX ESI: %4ph\n" , esi); |
4866 | |
4867 | if (intel_dp->active_mst_links > 0 && link_ok && |
4868 | esi[3] & LINK_STATUS_CHANGED) { |
4869 | if (!intel_dp_mst_link_status(intel_dp)) |
4870 | link_ok = false; |
4871 | ack[3] |= LINK_STATUS_CHANGED; |
4872 | } |
4873 | |
4874 | intel_dp_mst_hpd_irq(intel_dp, esi, ack); |
4875 | |
4876 | if (esi[3] & DP_TUNNELING_IRQ) { |
4877 | if (drm_dp_tunnel_handle_irq(mgr: i915->display.dp_tunnel_mgr, |
4878 | aux: &intel_dp->aux)) |
4879 | reprobe_needed = true; |
4880 | ack[3] |= DP_TUNNELING_IRQ; |
4881 | } |
4882 | |
4883 | if (!memchr_inv(p: ack, c: 0, size: sizeof(ack))) |
4884 | break; |
4885 | |
4886 | if (!intel_dp_ack_sink_irq_esi(intel_dp, esi: ack)) |
4887 | drm_dbg_kms(&i915->drm, "Failed to ack ESI\n" ); |
4888 | |
4889 | if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY)) |
4890 | drm_dp_mst_hpd_irq_send_new_request(mgr: &intel_dp->mst_mgr); |
4891 | } |
4892 | |
4893 | return link_ok && !reprobe_needed; |
4894 | } |
4895 | |
4896 | static void |
4897 | intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp) |
4898 | { |
4899 | bool is_active; |
4900 | u8 buf = 0; |
4901 | |
4902 | is_active = drm_dp_pcon_hdmi_link_active(aux: &intel_dp->aux); |
4903 | if (intel_dp->frl.is_trained && !is_active) { |
4904 | if (drm_dp_dpcd_readb(aux: &intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, valuep: &buf) < 0) |
4905 | return; |
4906 | |
4907 | buf &= ~DP_PCON_ENABLE_HDMI_LINK; |
4908 | if (drm_dp_dpcd_writeb(aux: &intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, value: buf) < 0) |
4909 | return; |
4910 | |
4911 | drm_dp_pcon_hdmi_frl_link_error_count(aux: &intel_dp->aux, connector: &intel_dp->attached_connector->base); |
4912 | |
4913 | intel_dp->frl.is_trained = false; |
4914 | |
4915 | /* Restart FRL training or fall back to TMDS mode */ |
4916 | intel_dp_check_frl_training(intel_dp); |
4917 | } |
4918 | } |
4919 | |
4920 | static bool |
4921 | intel_dp_needs_link_retrain(struct intel_dp *intel_dp) |
4922 | { |
4923 | u8 link_status[DP_LINK_STATUS_SIZE]; |
4924 | |
4925 | if (!intel_dp->link_trained) |
4926 | return false; |
4927 | |
4928 | /* |
4929 | * While PSR source HW is enabled, it will control main-link sending |
4930 | * frames, enabling and disabling it so trying to do a retrain will fail |
4931 | * as the link would or not be on or it could mix training patterns |
4932 | * and frame data at the same time causing retrain to fail. |
4933 | * Also when exiting PSR, HW will retrain the link anyways fixing |
4934 | * any link status error. |
4935 | */ |
4936 | if (intel_psr_enabled(intel_dp)) |
4937 | return false; |
4938 | |
4939 | if (drm_dp_dpcd_read_phy_link_status(aux: &intel_dp->aux, dp_phy: DP_PHY_DPRX, |
4940 | link_status) < 0) |
4941 | return false; |
4942 | |
4943 | /* |
4944 | * Validate the cached values of intel_dp->link_rate and |
4945 | * intel_dp->lane_count before attempting to retrain. |
4946 | * |
4947 | * FIXME would be nice to user the crtc state here, but since |
4948 | * we need to call this from the short HPD handler that seems |
4949 | * a bit hard. |
4950 | */ |
4951 | if (!intel_dp_link_params_valid(intel_dp, link_rate: intel_dp->link_rate, |
4952 | lane_count: intel_dp->lane_count)) |
4953 | return false; |
4954 | |
4955 | /* Retrain if link not ok */ |
4956 | return !intel_dp_link_ok(intel_dp, link_status); |
4957 | } |
4958 | |
4959 | static bool intel_dp_has_connector(struct intel_dp *intel_dp, |
4960 | const struct drm_connector_state *conn_state) |
4961 | { |
4962 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
4963 | struct intel_encoder *encoder; |
4964 | enum pipe pipe; |
4965 | |
4966 | if (!conn_state->best_encoder) |
4967 | return false; |
4968 | |
4969 | /* SST */ |
4970 | encoder = &dp_to_dig_port(intel_dp)->base; |
4971 | if (conn_state->best_encoder == &encoder->base) |
4972 | return true; |
4973 | |
4974 | /* MST */ |
4975 | for_each_pipe(i915, pipe) { |
4976 | encoder = &intel_dp->mst_encoders[pipe]->base; |
4977 | if (conn_state->best_encoder == &encoder->base) |
4978 | return true; |
4979 | } |
4980 | |
4981 | return false; |
4982 | } |
4983 | |
4984 | int intel_dp_get_active_pipes(struct intel_dp *intel_dp, |
4985 | struct drm_modeset_acquire_ctx *ctx, |
4986 | u8 *pipe_mask) |
4987 | { |
4988 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
4989 | struct drm_connector_list_iter conn_iter; |
4990 | struct intel_connector *connector; |
4991 | int ret = 0; |
4992 | |
4993 | *pipe_mask = 0; |
4994 | |
4995 | drm_connector_list_iter_begin(dev: &i915->drm, iter: &conn_iter); |
4996 | for_each_intel_connector_iter(connector, &conn_iter) { |
4997 | struct drm_connector_state *conn_state = |
4998 | connector->base.state; |
4999 | struct intel_crtc_state *crtc_state; |
5000 | struct intel_crtc *crtc; |
5001 | |
5002 | if (!intel_dp_has_connector(intel_dp, conn_state)) |
5003 | continue; |
5004 | |
5005 | crtc = to_intel_crtc(conn_state->crtc); |
5006 | if (!crtc) |
5007 | continue; |
5008 | |
5009 | ret = drm_modeset_lock(lock: &crtc->base.mutex, ctx); |
5010 | if (ret) |
5011 | break; |
5012 | |
5013 | crtc_state = to_intel_crtc_state(crtc->base.state); |
5014 | |
5015 | drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); |
5016 | |
5017 | if (!crtc_state->hw.active) |
5018 | continue; |
5019 | |
5020 | if (conn_state->commit) |
5021 | drm_WARN_ON(&i915->drm, |
5022 | !wait_for_completion_timeout(&conn_state->commit->hw_done, |
5023 | msecs_to_jiffies(5000))); |
5024 | |
5025 | *pipe_mask |= BIT(crtc->pipe); |
5026 | } |
5027 | drm_connector_list_iter_end(iter: &conn_iter); |
5028 | |
5029 | return ret; |
5030 | } |
5031 | |
5032 | static bool intel_dp_is_connected(struct intel_dp *intel_dp) |
5033 | { |
5034 | struct intel_connector *connector = intel_dp->attached_connector; |
5035 | |
5036 | return connector->base.status == connector_status_connected || |
5037 | intel_dp->is_mst; |
5038 | } |
5039 | |
5040 | int intel_dp_retrain_link(struct intel_encoder *encoder, |
5041 | struct drm_modeset_acquire_ctx *ctx) |
5042 | { |
5043 | struct drm_i915_private *dev_priv = to_i915(dev: encoder->base.dev); |
5044 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
5045 | struct intel_crtc *crtc; |
5046 | u8 pipe_mask; |
5047 | int ret; |
5048 | |
5049 | if (!intel_dp_is_connected(intel_dp)) |
5050 | return 0; |
5051 | |
5052 | ret = drm_modeset_lock(lock: &dev_priv->drm.mode_config.connection_mutex, |
5053 | ctx); |
5054 | if (ret) |
5055 | return ret; |
5056 | |
5057 | if (!intel_dp_needs_link_retrain(intel_dp)) |
5058 | return 0; |
5059 | |
5060 | ret = intel_dp_get_active_pipes(intel_dp, ctx, pipe_mask: &pipe_mask); |
5061 | if (ret) |
5062 | return ret; |
5063 | |
5064 | if (pipe_mask == 0) |
5065 | return 0; |
5066 | |
5067 | if (!intel_dp_needs_link_retrain(intel_dp)) |
5068 | return 0; |
5069 | |
5070 | drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n" , |
5071 | encoder->base.base.id, encoder->base.name); |
5072 | |
5073 | for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { |
5074 | const struct intel_crtc_state *crtc_state = |
5075 | to_intel_crtc_state(crtc->base.state); |
5076 | |
5077 | /* Suppress underruns caused by re-training */ |
5078 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe: crtc->pipe, enable: false); |
5079 | if (crtc_state->has_pch_encoder) |
5080 | intel_set_pch_fifo_underrun_reporting(dev_priv, |
5081 | pch_transcoder: intel_crtc_pch_transcoder(crtc), enable: false); |
5082 | } |
5083 | |
5084 | for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { |
5085 | const struct intel_crtc_state *crtc_state = |
5086 | to_intel_crtc_state(crtc->base.state); |
5087 | |
5088 | /* retrain on the MST master transcoder */ |
5089 | if (DISPLAY_VER(dev_priv) >= 12 && |
5090 | intel_crtc_has_type(crtc_state, type: INTEL_OUTPUT_DP_MST) && |
5091 | !intel_dp_mst_is_master_trans(crtc_state)) |
5092 | continue; |
5093 | |
5094 | intel_dp_check_frl_training(intel_dp); |
5095 | intel_dp_pcon_dsc_configure(intel_dp, crtc_state); |
5096 | intel_dp_start_link_train(intel_dp, crtc_state); |
5097 | intel_dp_stop_link_train(intel_dp, crtc_state); |
5098 | break; |
5099 | } |
5100 | |
5101 | for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { |
5102 | const struct intel_crtc_state *crtc_state = |
5103 | to_intel_crtc_state(crtc->base.state); |
5104 | |
5105 | /* Keep underrun reporting disabled until things are stable */ |
5106 | intel_crtc_wait_for_next_vblank(crtc); |
5107 | |
5108 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe: crtc->pipe, enable: true); |
5109 | if (crtc_state->has_pch_encoder) |
5110 | intel_set_pch_fifo_underrun_reporting(dev_priv, |
5111 | pch_transcoder: intel_crtc_pch_transcoder(crtc), enable: true); |
5112 | } |
5113 | |
5114 | return 0; |
5115 | } |
5116 | |
5117 | static int intel_dp_prep_phy_test(struct intel_dp *intel_dp, |
5118 | struct drm_modeset_acquire_ctx *ctx, |
5119 | u8 *pipe_mask) |
5120 | { |
5121 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
5122 | struct drm_connector_list_iter conn_iter; |
5123 | struct intel_connector *connector; |
5124 | int ret = 0; |
5125 | |
5126 | *pipe_mask = 0; |
5127 | |
5128 | drm_connector_list_iter_begin(dev: &i915->drm, iter: &conn_iter); |
5129 | for_each_intel_connector_iter(connector, &conn_iter) { |
5130 | struct drm_connector_state *conn_state = |
5131 | connector->base.state; |
5132 | struct intel_crtc_state *crtc_state; |
5133 | struct intel_crtc *crtc; |
5134 | |
5135 | if (!intel_dp_has_connector(intel_dp, conn_state)) |
5136 | continue; |
5137 | |
5138 | crtc = to_intel_crtc(conn_state->crtc); |
5139 | if (!crtc) |
5140 | continue; |
5141 | |
5142 | ret = drm_modeset_lock(lock: &crtc->base.mutex, ctx); |
5143 | if (ret) |
5144 | break; |
5145 | |
5146 | crtc_state = to_intel_crtc_state(crtc->base.state); |
5147 | |
5148 | drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); |
5149 | |
5150 | if (!crtc_state->hw.active) |
5151 | continue; |
5152 | |
5153 | if (conn_state->commit && |
5154 | !try_wait_for_completion(x: &conn_state->commit->hw_done)) |
5155 | continue; |
5156 | |
5157 | *pipe_mask |= BIT(crtc->pipe); |
5158 | } |
5159 | drm_connector_list_iter_end(iter: &conn_iter); |
5160 | |
5161 | return ret; |
5162 | } |
5163 | |
5164 | static int intel_dp_do_phy_test(struct intel_encoder *encoder, |
5165 | struct drm_modeset_acquire_ctx *ctx) |
5166 | { |
5167 | struct drm_i915_private *dev_priv = to_i915(dev: encoder->base.dev); |
5168 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
5169 | struct intel_crtc *crtc; |
5170 | u8 pipe_mask; |
5171 | int ret; |
5172 | |
5173 | ret = drm_modeset_lock(lock: &dev_priv->drm.mode_config.connection_mutex, |
5174 | ctx); |
5175 | if (ret) |
5176 | return ret; |
5177 | |
5178 | ret = intel_dp_prep_phy_test(intel_dp, ctx, pipe_mask: &pipe_mask); |
5179 | if (ret) |
5180 | return ret; |
5181 | |
5182 | if (pipe_mask == 0) |
5183 | return 0; |
5184 | |
5185 | drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n" , |
5186 | encoder->base.base.id, encoder->base.name); |
5187 | |
5188 | for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { |
5189 | const struct intel_crtc_state *crtc_state = |
5190 | to_intel_crtc_state(crtc->base.state); |
5191 | |
5192 | /* test on the MST master transcoder */ |
5193 | if (DISPLAY_VER(dev_priv) >= 12 && |
5194 | intel_crtc_has_type(crtc_state, type: INTEL_OUTPUT_DP_MST) && |
5195 | !intel_dp_mst_is_master_trans(crtc_state)) |
5196 | continue; |
5197 | |
5198 | intel_dp_process_phy_request(intel_dp, crtc_state); |
5199 | break; |
5200 | } |
5201 | |
5202 | return 0; |
5203 | } |
5204 | |
5205 | void intel_dp_phy_test(struct intel_encoder *encoder) |
5206 | { |
5207 | struct drm_modeset_acquire_ctx ctx; |
5208 | int ret; |
5209 | |
5210 | drm_modeset_acquire_init(ctx: &ctx, flags: 0); |
5211 | |
5212 | for (;;) { |
5213 | ret = intel_dp_do_phy_test(encoder, ctx: &ctx); |
5214 | |
5215 | if (ret == -EDEADLK) { |
5216 | drm_modeset_backoff(ctx: &ctx); |
5217 | continue; |
5218 | } |
5219 | |
5220 | break; |
5221 | } |
5222 | |
5223 | drm_modeset_drop_locks(ctx: &ctx); |
5224 | drm_modeset_acquire_fini(ctx: &ctx); |
5225 | drm_WARN(encoder->base.dev, ret, |
5226 | "Acquiring modeset locks failed with %i\n" , ret); |
5227 | } |
5228 | |
5229 | static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp) |
5230 | { |
5231 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
5232 | u8 val; |
5233 | |
5234 | if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) |
5235 | return; |
5236 | |
5237 | if (drm_dp_dpcd_readb(aux: &intel_dp->aux, |
5238 | DP_DEVICE_SERVICE_IRQ_VECTOR, valuep: &val) != 1 || !val) |
5239 | return; |
5240 | |
5241 | drm_dp_dpcd_writeb(aux: &intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, value: val); |
5242 | |
5243 | if (val & DP_AUTOMATED_TEST_REQUEST) |
5244 | intel_dp_handle_test_request(intel_dp); |
5245 | |
5246 | if (val & DP_CP_IRQ) |
5247 | intel_hdcp_handle_cp_irq(connector: intel_dp->attached_connector); |
5248 | |
5249 | if (val & DP_SINK_SPECIFIC_IRQ) |
5250 | drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n" ); |
5251 | } |
5252 | |
5253 | static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp) |
5254 | { |
5255 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
5256 | bool reprobe_needed = false; |
5257 | u8 val; |
5258 | |
5259 | if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) |
5260 | return false; |
5261 | |
5262 | if (drm_dp_dpcd_readb(aux: &intel_dp->aux, |
5263 | DP_LINK_SERVICE_IRQ_VECTOR_ESI0, valuep: &val) != 1 || !val) |
5264 | return false; |
5265 | |
5266 | if ((val & DP_TUNNELING_IRQ) && |
5267 | drm_dp_tunnel_handle_irq(mgr: i915->display.dp_tunnel_mgr, |
5268 | aux: &intel_dp->aux)) |
5269 | reprobe_needed = true; |
5270 | |
5271 | if (drm_dp_dpcd_writeb(aux: &intel_dp->aux, |
5272 | DP_LINK_SERVICE_IRQ_VECTOR_ESI0, value: val) != 1) |
5273 | return reprobe_needed; |
5274 | |
5275 | if (val & HDMI_LINK_STATUS_CHANGED) |
5276 | intel_dp_handle_hdmi_link_status_change(intel_dp); |
5277 | |
5278 | return reprobe_needed; |
5279 | } |
5280 | |
5281 | /* |
5282 | * According to DP spec |
5283 | * 5.1.2: |
5284 | * 1. Read DPCD |
5285 | * 2. Configure link according to Receiver Capabilities |
5286 | * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 |
5287 | * 4. Check link status on receipt of hot-plug interrupt |
5288 | * |
5289 | * intel_dp_short_pulse - handles short pulse interrupts |
5290 | * when full detection is not required. |
5291 | * Returns %true if short pulse is handled and full detection |
5292 | * is NOT required and %false otherwise. |
5293 | */ |
5294 | static bool |
5295 | intel_dp_short_pulse(struct intel_dp *intel_dp) |
5296 | { |
5297 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
5298 | u8 old_sink_count = intel_dp->sink_count; |
5299 | bool reprobe_needed = false; |
5300 | bool ret; |
5301 | |
5302 | /* |
5303 | * Clearing compliance test variables to allow capturing |
5304 | * of values for next automated test request. |
5305 | */ |
5306 | memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); |
5307 | |
5308 | /* |
5309 | * Now read the DPCD to see if it's actually running |
5310 | * If the current value of sink count doesn't match with |
5311 | * the value that was stored earlier or dpcd read failed |
5312 | * we need to do full detection |
5313 | */ |
5314 | ret = intel_dp_get_dpcd(intel_dp); |
5315 | |
5316 | if ((old_sink_count != intel_dp->sink_count) || !ret) { |
5317 | /* No need to proceed if we are going to do full detect */ |
5318 | return false; |
5319 | } |
5320 | |
5321 | intel_dp_check_device_service_irq(intel_dp); |
5322 | reprobe_needed = intel_dp_check_link_service_irq(intel_dp); |
5323 | |
5324 | /* Handle CEC interrupts, if any */ |
5325 | drm_dp_cec_irq(aux: &intel_dp->aux); |
5326 | |
5327 | /* defer to the hotplug work for link retraining if needed */ |
5328 | if (intel_dp_needs_link_retrain(intel_dp)) |
5329 | return false; |
5330 | |
5331 | intel_psr_short_pulse(intel_dp); |
5332 | |
5333 | switch (intel_dp->compliance.test_type) { |
5334 | case DP_TEST_LINK_TRAINING: |
5335 | drm_dbg_kms(&dev_priv->drm, |
5336 | "Link Training Compliance Test requested\n" ); |
5337 | /* Send a Hotplug Uevent to userspace to start modeset */ |
5338 | drm_kms_helper_hotplug_event(dev: &dev_priv->drm); |
5339 | break; |
5340 | case DP_TEST_LINK_PHY_TEST_PATTERN: |
5341 | drm_dbg_kms(&dev_priv->drm, |
5342 | "PHY test pattern Compliance Test requested\n" ); |
5343 | /* |
5344 | * Schedule long hpd to do the test |
5345 | * |
5346 | * FIXME get rid of the ad-hoc phy test modeset code |
5347 | * and properly incorporate it into the normal modeset. |
5348 | */ |
5349 | reprobe_needed = true; |
5350 | } |
5351 | |
5352 | return !reprobe_needed; |
5353 | } |
5354 | |
5355 | /* XXX this is probably wrong for multiple downstream ports */ |
5356 | static enum drm_connector_status |
5357 | intel_dp_detect_dpcd(struct intel_dp *intel_dp) |
5358 | { |
5359 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
5360 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
5361 | u8 *dpcd = intel_dp->dpcd; |
5362 | u8 type; |
5363 | |
5364 | if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp))) |
5365 | return connector_status_connected; |
5366 | |
5367 | lspcon_resume(dig_port); |
5368 | |
5369 | if (!intel_dp_get_dpcd(intel_dp)) |
5370 | return connector_status_disconnected; |
5371 | |
5372 | /* if there's no downstream port, we're done */ |
5373 | if (!drm_dp_is_branch(dpcd)) |
5374 | return connector_status_connected; |
5375 | |
5376 | /* If we're HPD-aware, SINK_COUNT changes dynamically */ |
5377 | if (intel_dp_has_sink_count(intel_dp) && |
5378 | intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { |
5379 | return intel_dp->sink_count ? |
5380 | connector_status_connected : connector_status_disconnected; |
5381 | } |
5382 | |
5383 | if (intel_dp_can_mst(intel_dp)) |
5384 | return connector_status_connected; |
5385 | |
5386 | /* If no HPD, poke DDC gently */ |
5387 | if (drm_probe_ddc(adapter: &intel_dp->aux.ddc)) |
5388 | return connector_status_connected; |
5389 | |
5390 | /* Well we tried, say unknown for unreliable port types */ |
5391 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { |
5392 | type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; |
5393 | if (type == DP_DS_PORT_TYPE_VGA || |
5394 | type == DP_DS_PORT_TYPE_NON_EDID) |
5395 | return connector_status_unknown; |
5396 | } else { |
5397 | type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & |
5398 | DP_DWN_STRM_PORT_TYPE_MASK; |
5399 | if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || |
5400 | type == DP_DWN_STRM_PORT_TYPE_OTHER) |
5401 | return connector_status_unknown; |
5402 | } |
5403 | |
5404 | /* Anything else is out of spec, warn and ignore */ |
5405 | drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n" ); |
5406 | return connector_status_disconnected; |
5407 | } |
5408 | |
5409 | static enum drm_connector_status |
5410 | edp_detect(struct intel_dp *intel_dp) |
5411 | { |
5412 | return connector_status_connected; |
5413 | } |
5414 | |
5415 | void intel_digital_port_lock(struct intel_encoder *encoder) |
5416 | { |
5417 | struct intel_digital_port *dig_port = enc_to_dig_port(encoder); |
5418 | |
5419 | if (dig_port->lock) |
5420 | dig_port->lock(dig_port); |
5421 | } |
5422 | |
5423 | void intel_digital_port_unlock(struct intel_encoder *encoder) |
5424 | { |
5425 | struct intel_digital_port *dig_port = enc_to_dig_port(encoder); |
5426 | |
5427 | if (dig_port->unlock) |
5428 | dig_port->unlock(dig_port); |
5429 | } |
5430 | |
5431 | /* |
5432 | * intel_digital_port_connected_locked - is the specified port connected? |
5433 | * @encoder: intel_encoder |
5434 | * |
5435 | * In cases where there's a connector physically connected but it can't be used |
5436 | * by our hardware we also return false, since the rest of the driver should |
5437 | * pretty much treat the port as disconnected. This is relevant for type-C |
5438 | * (starting on ICL) where there's ownership involved. |
5439 | * |
5440 | * The caller must hold the lock acquired by calling intel_digital_port_lock() |
5441 | * when calling this function. |
5442 | * |
5443 | * Return %true if port is connected, %false otherwise. |
5444 | */ |
5445 | bool intel_digital_port_connected_locked(struct intel_encoder *encoder) |
5446 | { |
5447 | struct drm_i915_private *dev_priv = to_i915(dev: encoder->base.dev); |
5448 | struct intel_digital_port *dig_port = enc_to_dig_port(encoder); |
5449 | bool is_glitch_free = intel_tc_port_handles_hpd_glitches(dig_port); |
5450 | bool is_connected = false; |
5451 | intel_wakeref_t wakeref; |
5452 | |
5453 | with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) { |
5454 | unsigned long wait_expires = jiffies + msecs_to_jiffies_timeout(m: 4); |
5455 | |
5456 | do { |
5457 | is_connected = dig_port->connected(encoder); |
5458 | if (is_connected || is_glitch_free) |
5459 | break; |
5460 | usleep_range(min: 10, max: 30); |
5461 | } while (time_before(jiffies, wait_expires)); |
5462 | } |
5463 | |
5464 | return is_connected; |
5465 | } |
5466 | |
5467 | bool intel_digital_port_connected(struct intel_encoder *encoder) |
5468 | { |
5469 | bool ret; |
5470 | |
5471 | intel_digital_port_lock(encoder); |
5472 | ret = intel_digital_port_connected_locked(encoder); |
5473 | intel_digital_port_unlock(encoder); |
5474 | |
5475 | return ret; |
5476 | } |
5477 | |
5478 | static const struct drm_edid * |
5479 | intel_dp_get_edid(struct intel_dp *intel_dp) |
5480 | { |
5481 | struct intel_connector *connector = intel_dp->attached_connector; |
5482 | const struct drm_edid *fixed_edid = connector->panel.fixed_edid; |
5483 | |
5484 | /* Use panel fixed edid if we have one */ |
5485 | if (fixed_edid) { |
5486 | /* invalid edid */ |
5487 | if (IS_ERR(ptr: fixed_edid)) |
5488 | return NULL; |
5489 | |
5490 | return drm_edid_dup(drm_edid: fixed_edid); |
5491 | } |
5492 | |
5493 | return drm_edid_read_ddc(connector: &connector->base, adapter: &intel_dp->aux.ddc); |
5494 | } |
5495 | |
5496 | static void |
5497 | intel_dp_update_dfp(struct intel_dp *intel_dp, |
5498 | const struct drm_edid *drm_edid) |
5499 | { |
5500 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
5501 | struct intel_connector *connector = intel_dp->attached_connector; |
5502 | |
5503 | intel_dp->dfp.max_bpc = |
5504 | drm_dp_downstream_max_bpc(dpcd: intel_dp->dpcd, |
5505 | port_cap: intel_dp->downstream_ports, drm_edid); |
5506 | |
5507 | intel_dp->dfp.max_dotclock = |
5508 | drm_dp_downstream_max_dotclock(dpcd: intel_dp->dpcd, |
5509 | port_cap: intel_dp->downstream_ports); |
5510 | |
5511 | intel_dp->dfp.min_tmds_clock = |
5512 | drm_dp_downstream_min_tmds_clock(dpcd: intel_dp->dpcd, |
5513 | port_cap: intel_dp->downstream_ports, |
5514 | drm_edid); |
5515 | intel_dp->dfp.max_tmds_clock = |
5516 | drm_dp_downstream_max_tmds_clock(dpcd: intel_dp->dpcd, |
5517 | port_cap: intel_dp->downstream_ports, |
5518 | drm_edid); |
5519 | |
5520 | intel_dp->dfp.pcon_max_frl_bw = |
5521 | drm_dp_get_pcon_max_frl_bw(dpcd: intel_dp->dpcd, |
5522 | port_cap: intel_dp->downstream_ports); |
5523 | |
5524 | drm_dbg_kms(&i915->drm, |
5525 | "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n" , |
5526 | connector->base.base.id, connector->base.name, |
5527 | intel_dp->dfp.max_bpc, |
5528 | intel_dp->dfp.max_dotclock, |
5529 | intel_dp->dfp.min_tmds_clock, |
5530 | intel_dp->dfp.max_tmds_clock, |
5531 | intel_dp->dfp.pcon_max_frl_bw); |
5532 | |
5533 | intel_dp_get_pcon_dsc_cap(intel_dp); |
5534 | } |
5535 | |
5536 | static bool |
5537 | intel_dp_can_ycbcr420(struct intel_dp *intel_dp) |
5538 | { |
5539 | if (source_can_output(intel_dp, format: INTEL_OUTPUT_FORMAT_YCBCR420) && |
5540 | (!drm_dp_is_branch(dpcd: intel_dp->dpcd) || intel_dp->dfp.ycbcr420_passthrough)) |
5541 | return true; |
5542 | |
5543 | if (source_can_output(intel_dp, format: INTEL_OUTPUT_FORMAT_RGB) && |
5544 | dfp_can_convert_from_rgb(intel_dp, sink_format: INTEL_OUTPUT_FORMAT_YCBCR420)) |
5545 | return true; |
5546 | |
5547 | if (source_can_output(intel_dp, format: INTEL_OUTPUT_FORMAT_YCBCR444) && |
5548 | dfp_can_convert_from_ycbcr444(intel_dp, sink_format: INTEL_OUTPUT_FORMAT_YCBCR420)) |
5549 | return true; |
5550 | |
5551 | return false; |
5552 | } |
5553 | |
5554 | static void |
5555 | intel_dp_update_420(struct intel_dp *intel_dp) |
5556 | { |
5557 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
5558 | struct intel_connector *connector = intel_dp->attached_connector; |
5559 | |
5560 | intel_dp->dfp.ycbcr420_passthrough = |
5561 | drm_dp_downstream_420_passthrough(dpcd: intel_dp->dpcd, |
5562 | port_cap: intel_dp->downstream_ports); |
5563 | /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */ |
5564 | intel_dp->dfp.ycbcr_444_to_420 = |
5565 | dp_to_dig_port(intel_dp)->lspcon.active || |
5566 | drm_dp_downstream_444_to_420_conversion(dpcd: intel_dp->dpcd, |
5567 | port_cap: intel_dp->downstream_ports); |
5568 | intel_dp->dfp.rgb_to_ycbcr = |
5569 | drm_dp_downstream_rgb_to_ycbcr_conversion(dpcd: intel_dp->dpcd, |
5570 | port_cap: intel_dp->downstream_ports, |
5571 | DP_DS_HDMI_BT709_RGB_YCBCR_CONV); |
5572 | |
5573 | connector->base.ycbcr_420_allowed = intel_dp_can_ycbcr420(intel_dp); |
5574 | |
5575 | drm_dbg_kms(&i915->drm, |
5576 | "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n" , |
5577 | connector->base.base.id, connector->base.name, |
5578 | str_yes_no(intel_dp->dfp.rgb_to_ycbcr), |
5579 | str_yes_no(connector->base.ycbcr_420_allowed), |
5580 | str_yes_no(intel_dp->dfp.ycbcr_444_to_420)); |
5581 | } |
5582 | |
5583 | static void |
5584 | intel_dp_set_edid(struct intel_dp *intel_dp) |
5585 | { |
5586 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
5587 | struct intel_connector *connector = intel_dp->attached_connector; |
5588 | const struct drm_edid *drm_edid; |
5589 | bool vrr_capable; |
5590 | |
5591 | intel_dp_unset_edid(intel_dp); |
5592 | drm_edid = intel_dp_get_edid(intel_dp); |
5593 | connector->detect_edid = drm_edid; |
5594 | |
5595 | /* Below we depend on display info having been updated */ |
5596 | drm_edid_connector_update(connector: &connector->base, edid: drm_edid); |
5597 | |
5598 | vrr_capable = intel_vrr_is_capable(connector); |
5599 | drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n" , |
5600 | connector->base.base.id, connector->base.name, str_yes_no(vrr_capable)); |
5601 | drm_connector_set_vrr_capable_property(connector: &connector->base, capable: vrr_capable); |
5602 | |
5603 | intel_dp_update_dfp(intel_dp, drm_edid); |
5604 | intel_dp_update_420(intel_dp); |
5605 | |
5606 | drm_dp_cec_attach(aux: &intel_dp->aux, |
5607 | source_physical_address: connector->base.display_info.source_physical_address); |
5608 | } |
5609 | |
5610 | static void |
5611 | intel_dp_unset_edid(struct intel_dp *intel_dp) |
5612 | { |
5613 | struct intel_connector *connector = intel_dp->attached_connector; |
5614 | |
5615 | drm_dp_cec_unset_edid(aux: &intel_dp->aux); |
5616 | drm_edid_free(drm_edid: connector->detect_edid); |
5617 | connector->detect_edid = NULL; |
5618 | |
5619 | intel_dp->dfp.max_bpc = 0; |
5620 | intel_dp->dfp.max_dotclock = 0; |
5621 | intel_dp->dfp.min_tmds_clock = 0; |
5622 | intel_dp->dfp.max_tmds_clock = 0; |
5623 | |
5624 | intel_dp->dfp.pcon_max_frl_bw = 0; |
5625 | |
5626 | intel_dp->dfp.ycbcr_444_to_420 = false; |
5627 | connector->base.ycbcr_420_allowed = false; |
5628 | |
5629 | drm_connector_set_vrr_capable_property(connector: &connector->base, |
5630 | capable: false); |
5631 | } |
5632 | |
5633 | static void |
5634 | intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *connector) |
5635 | { |
5636 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
5637 | |
5638 | /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ |
5639 | if (!HAS_DSC(i915)) |
5640 | return; |
5641 | |
5642 | if (intel_dp_is_edp(intel_dp)) |
5643 | intel_edp_get_dsc_sink_cap(edp_dpcd_rev: intel_dp->edp_dpcd[0], |
5644 | connector); |
5645 | else |
5646 | intel_dp_get_dsc_sink_cap(dpcd_rev: intel_dp->dpcd[DP_DPCD_REV], |
5647 | connector); |
5648 | } |
5649 | |
5650 | static int |
5651 | intel_dp_detect(struct drm_connector *connector, |
5652 | struct drm_modeset_acquire_ctx *ctx, |
5653 | bool force) |
5654 | { |
5655 | struct drm_i915_private *dev_priv = to_i915(dev: connector->dev); |
5656 | struct intel_connector *intel_connector = |
5657 | to_intel_connector(connector); |
5658 | struct intel_dp *intel_dp = intel_attached_dp(connector: intel_connector); |
5659 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
5660 | struct intel_encoder *encoder = &dig_port->base; |
5661 | enum drm_connector_status status; |
5662 | int ret; |
5663 | |
5664 | drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n" , |
5665 | connector->base.id, connector->name); |
5666 | drm_WARN_ON(&dev_priv->drm, |
5667 | !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); |
5668 | |
5669 | if (!intel_display_device_enabled(i915: dev_priv)) |
5670 | return connector_status_disconnected; |
5671 | |
5672 | if (!intel_display_driver_check_access(i915: dev_priv)) |
5673 | return connector->status; |
5674 | |
5675 | /* Can't disconnect eDP */ |
5676 | if (intel_dp_is_edp(intel_dp)) |
5677 | status = edp_detect(intel_dp); |
5678 | else if (intel_digital_port_connected(encoder)) |
5679 | status = intel_dp_detect_dpcd(intel_dp); |
5680 | else |
5681 | status = connector_status_disconnected; |
5682 | |
5683 | if (status == connector_status_disconnected) { |
5684 | memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); |
5685 | memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd)); |
5686 | intel_dp->psr.sink_panel_replay_support = false; |
5687 | |
5688 | if (intel_dp->is_mst) { |
5689 | drm_dbg_kms(&dev_priv->drm, |
5690 | "MST device may have disappeared %d vs %d\n" , |
5691 | intel_dp->is_mst, |
5692 | intel_dp->mst_mgr.mst_state); |
5693 | intel_dp->is_mst = false; |
5694 | drm_dp_mst_topology_mgr_set_mst(mgr: &intel_dp->mst_mgr, |
5695 | mst_state: intel_dp->is_mst); |
5696 | } |
5697 | |
5698 | intel_dp_tunnel_disconnect(intel_dp); |
5699 | |
5700 | goto out; |
5701 | } |
5702 | |
5703 | ret = intel_dp_tunnel_detect(intel_dp, ctx); |
5704 | if (ret == -EDEADLK) |
5705 | return ret; |
5706 | |
5707 | if (ret == 1) |
5708 | intel_connector->base.epoch_counter++; |
5709 | |
5710 | if (!intel_dp_is_edp(intel_dp)) |
5711 | intel_psr_init_dpcd(intel_dp); |
5712 | |
5713 | intel_dp_detect_dsc_caps(intel_dp, connector: intel_connector); |
5714 | |
5715 | intel_dp_configure_mst(intel_dp); |
5716 | |
5717 | /* |
5718 | * TODO: Reset link params when switching to MST mode, until MST |
5719 | * supports link training fallback params. |
5720 | */ |
5721 | if (intel_dp->reset_link_params || intel_dp->is_mst) { |
5722 | intel_dp_reset_max_link_params(intel_dp); |
5723 | intel_dp->reset_link_params = false; |
5724 | } |
5725 | |
5726 | intel_dp_print_rates(intel_dp); |
5727 | |
5728 | if (intel_dp->is_mst) { |
5729 | /* |
5730 | * If we are in MST mode then this connector |
5731 | * won't appear connected or have anything |
5732 | * with EDID on it |
5733 | */ |
5734 | status = connector_status_disconnected; |
5735 | goto out; |
5736 | } |
5737 | |
5738 | /* |
5739 | * Some external monitors do not signal loss of link synchronization |
5740 | * with an IRQ_HPD, so force a link status check. |
5741 | */ |
5742 | if (!intel_dp_is_edp(intel_dp)) { |
5743 | ret = intel_dp_retrain_link(encoder, ctx); |
5744 | if (ret) |
5745 | return ret; |
5746 | } |
5747 | |
5748 | /* |
5749 | * Clearing NACK and defer counts to get their exact values |
5750 | * while reading EDID which are required by Compliance tests |
5751 | * 4.2.2.4 and 4.2.2.5 |
5752 | */ |
5753 | intel_dp->aux.i2c_nack_count = 0; |
5754 | intel_dp->aux.i2c_defer_count = 0; |
5755 | |
5756 | intel_dp_set_edid(intel_dp); |
5757 | if (intel_dp_is_edp(intel_dp) || |
5758 | to_intel_connector(connector)->detect_edid) |
5759 | status = connector_status_connected; |
5760 | |
5761 | intel_dp_check_device_service_irq(intel_dp); |
5762 | |
5763 | out: |
5764 | if (status != connector_status_connected && !intel_dp->is_mst) |
5765 | intel_dp_unset_edid(intel_dp); |
5766 | |
5767 | if (!intel_dp_is_edp(intel_dp)) |
5768 | drm_dp_set_subconnector_property(connector, |
5769 | status, |
5770 | dpcd: intel_dp->dpcd, |
5771 | port_cap: intel_dp->downstream_ports); |
5772 | return status; |
5773 | } |
5774 | |
5775 | static void |
5776 | intel_dp_force(struct drm_connector *connector) |
5777 | { |
5778 | struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); |
5779 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
5780 | struct intel_encoder *intel_encoder = &dig_port->base; |
5781 | struct drm_i915_private *dev_priv = to_i915(dev: intel_encoder->base.dev); |
5782 | |
5783 | drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n" , |
5784 | connector->base.id, connector->name); |
5785 | |
5786 | if (!intel_display_driver_check_access(i915: dev_priv)) |
5787 | return; |
5788 | |
5789 | intel_dp_unset_edid(intel_dp); |
5790 | |
5791 | if (connector->status != connector_status_connected) |
5792 | return; |
5793 | |
5794 | intel_dp_set_edid(intel_dp); |
5795 | } |
5796 | |
5797 | static int intel_dp_get_modes(struct drm_connector *connector) |
5798 | { |
5799 | struct intel_connector *intel_connector = to_intel_connector(connector); |
5800 | int num_modes; |
5801 | |
5802 | /* drm_edid_connector_update() done in ->detect() or ->force() */ |
5803 | num_modes = drm_edid_connector_add_modes(connector); |
5804 | |
5805 | /* Also add fixed mode, which may or may not be present in EDID */ |
5806 | if (intel_dp_is_edp(intel_dp: intel_attached_dp(connector: intel_connector))) |
5807 | num_modes += intel_panel_get_modes(connector: intel_connector); |
5808 | |
5809 | if (num_modes) |
5810 | return num_modes; |
5811 | |
5812 | if (!intel_connector->detect_edid) { |
5813 | struct intel_dp *intel_dp = intel_attached_dp(connector: intel_connector); |
5814 | struct drm_display_mode *mode; |
5815 | |
5816 | mode = drm_dp_downstream_mode(dev: connector->dev, |
5817 | dpcd: intel_dp->dpcd, |
5818 | port_cap: intel_dp->downstream_ports); |
5819 | if (mode) { |
5820 | drm_mode_probed_add(connector, mode); |
5821 | num_modes++; |
5822 | } |
5823 | } |
5824 | |
5825 | return num_modes; |
5826 | } |
5827 | |
5828 | static int |
5829 | intel_dp_connector_register(struct drm_connector *connector) |
5830 | { |
5831 | struct drm_i915_private *i915 = to_i915(dev: connector->dev); |
5832 | struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); |
5833 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
5834 | struct intel_lspcon *lspcon = &dig_port->lspcon; |
5835 | int ret; |
5836 | |
5837 | ret = intel_connector_register(connector); |
5838 | if (ret) |
5839 | return ret; |
5840 | |
5841 | drm_dbg_kms(&i915->drm, "registering %s bus for %s\n" , |
5842 | intel_dp->aux.name, connector->kdev->kobj.name); |
5843 | |
5844 | intel_dp->aux.dev = connector->kdev; |
5845 | ret = drm_dp_aux_register(aux: &intel_dp->aux); |
5846 | if (!ret) |
5847 | drm_dp_cec_register_connector(aux: &intel_dp->aux, connector); |
5848 | |
5849 | if (!intel_bios_encoder_is_lspcon(devdata: dig_port->base.devdata)) |
5850 | return ret; |
5851 | |
5852 | /* |
5853 | * ToDo: Clean this up to handle lspcon init and resume more |
5854 | * efficiently and streamlined. |
5855 | */ |
5856 | if (lspcon_init(dig_port)) { |
5857 | lspcon_detect_hdr_capability(lspcon); |
5858 | if (lspcon->hdr_supported) |
5859 | drm_connector_attach_hdr_output_metadata_property(connector); |
5860 | } |
5861 | |
5862 | return ret; |
5863 | } |
5864 | |
5865 | static void |
5866 | intel_dp_connector_unregister(struct drm_connector *connector) |
5867 | { |
5868 | struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); |
5869 | |
5870 | drm_dp_cec_unregister_connector(aux: &intel_dp->aux); |
5871 | drm_dp_aux_unregister(aux: &intel_dp->aux); |
5872 | intel_connector_unregister(connector); |
5873 | } |
5874 | |
5875 | void intel_dp_connector_sync_state(struct intel_connector *connector, |
5876 | const struct intel_crtc_state *crtc_state) |
5877 | { |
5878 | struct drm_i915_private *i915 = to_i915(dev: connector->base.dev); |
5879 | |
5880 | if (crtc_state && crtc_state->dsc.compression_enable) { |
5881 | drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux); |
5882 | connector->dp.dsc_decompression_enabled = true; |
5883 | } else { |
5884 | connector->dp.dsc_decompression_enabled = false; |
5885 | } |
5886 | } |
5887 | |
5888 | void intel_dp_encoder_flush_work(struct drm_encoder *encoder) |
5889 | { |
5890 | struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); |
5891 | struct intel_dp *intel_dp = &dig_port->dp; |
5892 | |
5893 | intel_dp_mst_encoder_cleanup(dig_port); |
5894 | |
5895 | intel_dp_tunnel_destroy(intel_dp); |
5896 | |
5897 | intel_pps_vdd_off_sync(intel_dp); |
5898 | |
5899 | /* |
5900 | * Ensure power off delay is respected on module remove, so that we can |
5901 | * reduce delays at driver probe. See pps_init_timestamps(). |
5902 | */ |
5903 | intel_pps_wait_power_cycle(intel_dp); |
5904 | |
5905 | intel_dp_aux_fini(intel_dp); |
5906 | } |
5907 | |
5908 | void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) |
5909 | { |
5910 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder: intel_encoder); |
5911 | |
5912 | intel_pps_vdd_off_sync(intel_dp); |
5913 | |
5914 | intel_dp_tunnel_suspend(intel_dp); |
5915 | } |
5916 | |
5917 | void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder) |
5918 | { |
5919 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder: intel_encoder); |
5920 | |
5921 | intel_pps_wait_power_cycle(intel_dp); |
5922 | } |
5923 | |
5924 | static int intel_modeset_tile_group(struct intel_atomic_state *state, |
5925 | int tile_group_id) |
5926 | { |
5927 | struct drm_i915_private *dev_priv = to_i915(dev: state->base.dev); |
5928 | struct drm_connector_list_iter conn_iter; |
5929 | struct drm_connector *connector; |
5930 | int ret = 0; |
5931 | |
5932 | drm_connector_list_iter_begin(dev: &dev_priv->drm, iter: &conn_iter); |
5933 | drm_for_each_connector_iter(connector, &conn_iter) { |
5934 | struct drm_connector_state *conn_state; |
5935 | struct intel_crtc_state *crtc_state; |
5936 | struct intel_crtc *crtc; |
5937 | |
5938 | if (!connector->has_tile || |
5939 | connector->tile_group->id != tile_group_id) |
5940 | continue; |
5941 | |
5942 | conn_state = drm_atomic_get_connector_state(state: &state->base, |
5943 | connector); |
5944 | if (IS_ERR(ptr: conn_state)) { |
5945 | ret = PTR_ERR(ptr: conn_state); |
5946 | break; |
5947 | } |
5948 | |
5949 | crtc = to_intel_crtc(conn_state->crtc); |
5950 | |
5951 | if (!crtc) |
5952 | continue; |
5953 | |
5954 | crtc_state = intel_atomic_get_new_crtc_state(state, crtc); |
5955 | crtc_state->uapi.mode_changed = true; |
5956 | |
5957 | ret = drm_atomic_add_affected_planes(state: &state->base, crtc: &crtc->base); |
5958 | if (ret) |
5959 | break; |
5960 | } |
5961 | drm_connector_list_iter_end(iter: &conn_iter); |
5962 | |
5963 | return ret; |
5964 | } |
5965 | |
5966 | static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) |
5967 | { |
5968 | struct drm_i915_private *dev_priv = to_i915(dev: state->base.dev); |
5969 | struct intel_crtc *crtc; |
5970 | |
5971 | if (transcoders == 0) |
5972 | return 0; |
5973 | |
5974 | for_each_intel_crtc(&dev_priv->drm, crtc) { |
5975 | struct intel_crtc_state *crtc_state; |
5976 | int ret; |
5977 | |
5978 | crtc_state = intel_atomic_get_crtc_state(state: &state->base, crtc); |
5979 | if (IS_ERR(ptr: crtc_state)) |
5980 | return PTR_ERR(ptr: crtc_state); |
5981 | |
5982 | if (!crtc_state->hw.enable) |
5983 | continue; |
5984 | |
5985 | if (!(transcoders & BIT(crtc_state->cpu_transcoder))) |
5986 | continue; |
5987 | |
5988 | crtc_state->uapi.mode_changed = true; |
5989 | |
5990 | ret = drm_atomic_add_affected_connectors(state: &state->base, crtc: &crtc->base); |
5991 | if (ret) |
5992 | return ret; |
5993 | |
5994 | ret = drm_atomic_add_affected_planes(state: &state->base, crtc: &crtc->base); |
5995 | if (ret) |
5996 | return ret; |
5997 | |
5998 | transcoders &= ~BIT(crtc_state->cpu_transcoder); |
5999 | } |
6000 | |
6001 | drm_WARN_ON(&dev_priv->drm, transcoders != 0); |
6002 | |
6003 | return 0; |
6004 | } |
6005 | |
6006 | static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, |
6007 | struct drm_connector *connector) |
6008 | { |
6009 | const struct drm_connector_state *old_conn_state = |
6010 | drm_atomic_get_old_connector_state(state: &state->base, connector); |
6011 | const struct intel_crtc_state *old_crtc_state; |
6012 | struct intel_crtc *crtc; |
6013 | u8 transcoders; |
6014 | |
6015 | crtc = to_intel_crtc(old_conn_state->crtc); |
6016 | if (!crtc) |
6017 | return 0; |
6018 | |
6019 | old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); |
6020 | |
6021 | if (!old_crtc_state->hw.active) |
6022 | return 0; |
6023 | |
6024 | transcoders = old_crtc_state->sync_mode_slaves_mask; |
6025 | if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) |
6026 | transcoders |= BIT(old_crtc_state->master_transcoder); |
6027 | |
6028 | return intel_modeset_affected_transcoders(state, |
6029 | transcoders); |
6030 | } |
6031 | |
6032 | static int intel_dp_connector_atomic_check(struct drm_connector *conn, |
6033 | struct drm_atomic_state *_state) |
6034 | { |
6035 | struct drm_i915_private *dev_priv = to_i915(dev: conn->dev); |
6036 | struct intel_atomic_state *state = to_intel_atomic_state(_state); |
6037 | struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(state: _state, connector: conn); |
6038 | struct intel_connector *intel_conn = to_intel_connector(conn); |
6039 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder: intel_conn->encoder); |
6040 | int ret; |
6041 | |
6042 | ret = intel_digital_connector_atomic_check(conn, state: &state->base); |
6043 | if (ret) |
6044 | return ret; |
6045 | |
6046 | if (intel_dp_mst_source_support(intel_dp)) { |
6047 | ret = drm_dp_mst_root_conn_atomic_check(new_conn_state: conn_state, mgr: &intel_dp->mst_mgr); |
6048 | if (ret) |
6049 | return ret; |
6050 | } |
6051 | |
6052 | if (!intel_connector_needs_modeset(state, connector: conn)) |
6053 | return 0; |
6054 | |
6055 | ret = intel_dp_tunnel_atomic_check_state(state, |
6056 | intel_dp, |
6057 | connector: intel_conn); |
6058 | if (ret) |
6059 | return ret; |
6060 | |
6061 | /* |
6062 | * We don't enable port sync on BDW due to missing w/as and |
6063 | * due to not having adjusted the modeset sequence appropriately. |
6064 | */ |
6065 | if (DISPLAY_VER(dev_priv) < 9) |
6066 | return 0; |
6067 | |
6068 | if (conn->has_tile) { |
6069 | ret = intel_modeset_tile_group(state, tile_group_id: conn->tile_group->id); |
6070 | if (ret) |
6071 | return ret; |
6072 | } |
6073 | |
6074 | return intel_modeset_synced_crtcs(state, connector: conn); |
6075 | } |
6076 | |
6077 | static void intel_dp_oob_hotplug_event(struct drm_connector *connector, |
6078 | enum drm_connector_status hpd_state) |
6079 | { |
6080 | struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); |
6081 | struct drm_i915_private *i915 = to_i915(dev: connector->dev); |
6082 | bool hpd_high = hpd_state == connector_status_connected; |
6083 | unsigned int hpd_pin = encoder->hpd_pin; |
6084 | bool need_work = false; |
6085 | |
6086 | spin_lock_irq(lock: &i915->irq_lock); |
6087 | if (hpd_high != test_bit(hpd_pin, &i915->display.hotplug.oob_hotplug_last_state)) { |
6088 | i915->display.hotplug.event_bits |= BIT(hpd_pin); |
6089 | |
6090 | __assign_bit(nr: hpd_pin, addr: &i915->display.hotplug.oob_hotplug_last_state, value: hpd_high); |
6091 | need_work = true; |
6092 | } |
6093 | spin_unlock_irq(lock: &i915->irq_lock); |
6094 | |
6095 | if (need_work) |
6096 | intel_hpd_schedule_detection(i915); |
6097 | } |
6098 | |
6099 | static const struct drm_connector_funcs intel_dp_connector_funcs = { |
6100 | .force = intel_dp_force, |
6101 | .fill_modes = drm_helper_probe_single_connector_modes, |
6102 | .atomic_get_property = intel_digital_connector_atomic_get_property, |
6103 | .atomic_set_property = intel_digital_connector_atomic_set_property, |
6104 | .late_register = intel_dp_connector_register, |
6105 | .early_unregister = intel_dp_connector_unregister, |
6106 | .destroy = intel_connector_destroy, |
6107 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, |
6108 | .atomic_duplicate_state = intel_digital_connector_duplicate_state, |
6109 | .oob_hotplug_event = intel_dp_oob_hotplug_event, |
6110 | }; |
6111 | |
6112 | static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { |
6113 | .detect_ctx = intel_dp_detect, |
6114 | .get_modes = intel_dp_get_modes, |
6115 | .mode_valid = intel_dp_mode_valid, |
6116 | .atomic_check = intel_dp_connector_atomic_check, |
6117 | }; |
6118 | |
6119 | enum irqreturn |
6120 | intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) |
6121 | { |
6122 | struct drm_i915_private *i915 = to_i915(dev: dig_port->base.base.dev); |
6123 | struct intel_dp *intel_dp = &dig_port->dp; |
6124 | u8 dpcd[DP_RECEIVER_CAP_SIZE]; |
6125 | |
6126 | if (dig_port->base.type == INTEL_OUTPUT_EDP && |
6127 | (long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) { |
6128 | /* |
6129 | * vdd off can generate a long/short pulse on eDP which |
6130 | * would require vdd on to handle it, and thus we |
6131 | * would end up in an endless cycle of |
6132 | * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." |
6133 | */ |
6134 | drm_dbg_kms(&i915->drm, |
6135 | "ignoring %s hpd on eDP [ENCODER:%d:%s]\n" , |
6136 | long_hpd ? "long" : "short" , |
6137 | dig_port->base.base.base.id, |
6138 | dig_port->base.base.name); |
6139 | return IRQ_HANDLED; |
6140 | } |
6141 | |
6142 | drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n" , |
6143 | dig_port->base.base.base.id, |
6144 | dig_port->base.base.name, |
6145 | long_hpd ? "long" : "short" ); |
6146 | |
6147 | /* |
6148 | * TBT DP tunnels require the GFX driver to read out the DPRX caps in |
6149 | * response to long HPD pulses. The DP hotplug handler does that, |
6150 | * however the hotplug handler may be blocked by another |
6151 | * connector's/encoder's hotplug handler. Since the TBT CM may not |
6152 | * complete the DP tunnel BW request for the latter connector/encoder |
6153 | * waiting for this encoder's DPRX read, perform a dummy read here. |
6154 | */ |
6155 | if (long_hpd) |
6156 | intel_dp_read_dprx_caps(intel_dp, dpcd); |
6157 | |
6158 | if (long_hpd) { |
6159 | intel_dp->reset_link_params = true; |
6160 | return IRQ_NONE; |
6161 | } |
6162 | |
6163 | if (intel_dp->is_mst) { |
6164 | if (!intel_dp_check_mst_status(intel_dp)) |
6165 | return IRQ_NONE; |
6166 | } else if (!intel_dp_short_pulse(intel_dp)) { |
6167 | return IRQ_NONE; |
6168 | } |
6169 | |
6170 | return IRQ_HANDLED; |
6171 | } |
6172 | |
6173 | static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv, |
6174 | const struct intel_bios_encoder_data *devdata, |
6175 | enum port port) |
6176 | { |
6177 | /* |
6178 | * eDP not supported on g4x. so bail out early just |
6179 | * for a bit extra safety in case the VBT is bonkers. |
6180 | */ |
6181 | if (DISPLAY_VER(dev_priv) < 5) |
6182 | return false; |
6183 | |
6184 | if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A) |
6185 | return true; |
6186 | |
6187 | return devdata && intel_bios_encoder_supports_edp(devdata); |
6188 | } |
6189 | |
6190 | bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port) |
6191 | { |
6192 | const struct intel_bios_encoder_data *devdata = |
6193 | intel_bios_encoder_data_lookup(i915, port); |
6194 | |
6195 | return _intel_dp_is_port_edp(dev_priv: i915, devdata, port); |
6196 | } |
6197 | |
6198 | static bool |
6199 | has_gamut_metadata_dip(struct intel_encoder *encoder) |
6200 | { |
6201 | struct drm_i915_private *i915 = to_i915(dev: encoder->base.dev); |
6202 | enum port port = encoder->port; |
6203 | |
6204 | if (intel_bios_encoder_is_lspcon(devdata: encoder->devdata)) |
6205 | return false; |
6206 | |
6207 | if (DISPLAY_VER(i915) >= 11) |
6208 | return true; |
6209 | |
6210 | if (port == PORT_A) |
6211 | return false; |
6212 | |
6213 | if (IS_HASWELL(i915) || IS_BROADWELL(i915) || |
6214 | DISPLAY_VER(i915) >= 9) |
6215 | return true; |
6216 | |
6217 | return false; |
6218 | } |
6219 | |
6220 | static void |
6221 | intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) |
6222 | { |
6223 | struct drm_i915_private *dev_priv = to_i915(dev: connector->dev); |
6224 | enum port port = dp_to_dig_port(intel_dp)->base.port; |
6225 | |
6226 | if (!intel_dp_is_edp(intel_dp)) |
6227 | drm_connector_attach_dp_subconnector_property(connector); |
6228 | |
6229 | if (!IS_G4X(dev_priv) && port != PORT_A) |
6230 | intel_attach_force_audio_property(connector); |
6231 | |
6232 | intel_attach_broadcast_rgb_property(connector); |
6233 | if (HAS_GMCH(dev_priv)) |
6234 | drm_connector_attach_max_bpc_property(connector, min: 6, max: 10); |
6235 | else if (DISPLAY_VER(dev_priv) >= 5) |
6236 | drm_connector_attach_max_bpc_property(connector, min: 6, max: 12); |
6237 | |
6238 | /* Register HDMI colorspace for case of lspcon */ |
6239 | if (intel_bios_encoder_is_lspcon(devdata: dp_to_dig_port(intel_dp)->base.devdata)) { |
6240 | drm_connector_attach_content_type_property(dev: connector); |
6241 | intel_attach_hdmi_colorspace_property(connector); |
6242 | } else { |
6243 | intel_attach_dp_colorspace_property(connector); |
6244 | } |
6245 | |
6246 | if (has_gamut_metadata_dip(encoder: &dp_to_dig_port(intel_dp)->base)) |
6247 | drm_connector_attach_hdr_output_metadata_property(connector); |
6248 | |
6249 | if (HAS_VRR(dev_priv)) |
6250 | drm_connector_attach_vrr_capable_property(connector); |
6251 | } |
6252 | |
6253 | static void |
6254 | intel_edp_add_properties(struct intel_dp *intel_dp) |
6255 | { |
6256 | struct intel_connector *connector = intel_dp->attached_connector; |
6257 | struct drm_i915_private *i915 = to_i915(dev: connector->base.dev); |
6258 | const struct drm_display_mode *fixed_mode = |
6259 | intel_panel_preferred_fixed_mode(connector); |
6260 | |
6261 | intel_attach_scaling_mode_property(connector: &connector->base); |
6262 | |
6263 | drm_connector_set_panel_orientation_with_quirk(connector: &connector->base, |
6264 | panel_orientation: i915->display.vbt.orientation, |
6265 | width: fixed_mode->hdisplay, |
6266 | height: fixed_mode->vdisplay); |
6267 | } |
6268 | |
6269 | static void intel_edp_backlight_setup(struct intel_dp *intel_dp, |
6270 | struct intel_connector *connector) |
6271 | { |
6272 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
6273 | enum pipe pipe = INVALID_PIPE; |
6274 | |
6275 | if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { |
6276 | /* |
6277 | * Figure out the current pipe for the initial backlight setup. |
6278 | * If the current pipe isn't valid, try the PPS pipe, and if that |
6279 | * fails just assume pipe A. |
6280 | */ |
6281 | pipe = vlv_active_pipe(intel_dp); |
6282 | |
6283 | if (pipe != PIPE_A && pipe != PIPE_B) |
6284 | pipe = intel_dp->pps.pps_pipe; |
6285 | |
6286 | if (pipe != PIPE_A && pipe != PIPE_B) |
6287 | pipe = PIPE_A; |
6288 | } |
6289 | |
6290 | intel_backlight_setup(connector, pipe); |
6291 | } |
6292 | |
6293 | static bool intel_edp_init_connector(struct intel_dp *intel_dp, |
6294 | struct intel_connector *intel_connector) |
6295 | { |
6296 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
6297 | struct drm_connector *connector = &intel_connector->base; |
6298 | struct drm_display_mode *fixed_mode; |
6299 | struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; |
6300 | bool has_dpcd; |
6301 | const struct drm_edid *drm_edid; |
6302 | |
6303 | if (!intel_dp_is_edp(intel_dp)) |
6304 | return true; |
6305 | |
6306 | /* |
6307 | * On IBX/CPT we may get here with LVDS already registered. Since the |
6308 | * driver uses the only internal power sequencer available for both |
6309 | * eDP and LVDS bail out early in this case to prevent interfering |
6310 | * with an already powered-on LVDS power sequencer. |
6311 | */ |
6312 | if (intel_get_lvds_encoder(dev_priv)) { |
6313 | drm_WARN_ON(&dev_priv->drm, |
6314 | !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); |
6315 | drm_info(&dev_priv->drm, |
6316 | "LVDS was detected, not registering eDP\n" ); |
6317 | |
6318 | return false; |
6319 | } |
6320 | |
6321 | intel_bios_init_panel_early(dev_priv, panel: &intel_connector->panel, |
6322 | devdata: encoder->devdata); |
6323 | |
6324 | if (!intel_pps_init(intel_dp)) { |
6325 | drm_info(&dev_priv->drm, |
6326 | "[ENCODER:%d:%s] unusable PPS, disabling eDP\n" , |
6327 | encoder->base.base.id, encoder->base.name); |
6328 | /* |
6329 | * The BIOS may have still enabled VDD on the PPS even |
6330 | * though it's unusable. Make sure we turn it back off |
6331 | * and to release the power domain references/etc. |
6332 | */ |
6333 | goto out_vdd_off; |
6334 | } |
6335 | |
6336 | /* |
6337 | * Enable HPD sense for live status check. |
6338 | * intel_hpd_irq_setup() will turn it off again |
6339 | * if it's no longer needed later. |
6340 | * |
6341 | * The DPCD probe below will make sure VDD is on. |
6342 | */ |
6343 | intel_hpd_enable_detection(encoder); |
6344 | |
6345 | /* Cache DPCD and EDID for edp. */ |
6346 | has_dpcd = intel_edp_init_dpcd(intel_dp, connector: intel_connector); |
6347 | |
6348 | if (!has_dpcd) { |
6349 | /* if this fails, presume the device is a ghost */ |
6350 | drm_info(&dev_priv->drm, |
6351 | "[ENCODER:%d:%s] failed to retrieve link info, disabling eDP\n" , |
6352 | encoder->base.base.id, encoder->base.name); |
6353 | goto out_vdd_off; |
6354 | } |
6355 | |
6356 | /* |
6357 | * VBT and straps are liars. Also check HPD as that seems |
6358 | * to be the most reliable piece of information available. |
6359 | * |
6360 | * ... expect on devices that forgot to hook HPD up for eDP |
6361 | * (eg. Acer Chromebook C710), so we'll check it only if multiple |
6362 | * ports are attempting to use the same AUX CH, according to VBT. |
6363 | */ |
6364 | if (intel_bios_dp_has_shared_aux_ch(devdata: encoder->devdata)) { |
6365 | /* |
6366 | * If this fails, presume the DPCD answer came |
6367 | * from some other port using the same AUX CH. |
6368 | * |
6369 | * FIXME maybe cleaner to check this before the |
6370 | * DPCD read? Would need sort out the VDD handling... |
6371 | */ |
6372 | if (!intel_digital_port_connected(encoder)) { |
6373 | drm_info(&dev_priv->drm, |
6374 | "[ENCODER:%d:%s] HPD is down, disabling eDP\n" , |
6375 | encoder->base.base.id, encoder->base.name); |
6376 | goto out_vdd_off; |
6377 | } |
6378 | |
6379 | /* |
6380 | * Unfortunately even the HPD based detection fails on |
6381 | * eg. Asus B360M-A (CFL+CNP), so as a last resort fall |
6382 | * back to checking for a VGA branch device. Only do this |
6383 | * on known affected platforms to minimize false positives. |
6384 | */ |
6385 | if (DISPLAY_VER(dev_priv) == 9 && drm_dp_is_branch(dpcd: intel_dp->dpcd) && |
6386 | (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) == |
6387 | DP_DWN_STRM_PORT_TYPE_ANALOG) { |
6388 | drm_info(&dev_priv->drm, |
6389 | "[ENCODER:%d:%s] VGA converter detected, disabling eDP\n" , |
6390 | encoder->base.base.id, encoder->base.name); |
6391 | goto out_vdd_off; |
6392 | } |
6393 | } |
6394 | |
6395 | mutex_lock(&dev_priv->drm.mode_config.mutex); |
6396 | drm_edid = drm_edid_read_ddc(connector, adapter: connector->ddc); |
6397 | if (!drm_edid) { |
6398 | /* Fallback to EDID from ACPI OpRegion, if any */ |
6399 | drm_edid = intel_opregion_get_edid(connector: intel_connector); |
6400 | if (drm_edid) |
6401 | drm_dbg_kms(&dev_priv->drm, |
6402 | "[CONNECTOR:%d:%s] Using OpRegion EDID\n" , |
6403 | connector->base.id, connector->name); |
6404 | } |
6405 | if (drm_edid) { |
6406 | if (drm_edid_connector_update(connector, edid: drm_edid) || |
6407 | !drm_edid_connector_add_modes(connector)) { |
6408 | drm_edid_connector_update(connector, NULL); |
6409 | drm_edid_free(drm_edid); |
6410 | drm_edid = ERR_PTR(error: -EINVAL); |
6411 | } |
6412 | } else { |
6413 | drm_edid = ERR_PTR(error: -ENOENT); |
6414 | } |
6415 | |
6416 | intel_bios_init_panel_late(dev_priv, panel: &intel_connector->panel, devdata: encoder->devdata, |
6417 | drm_edid: IS_ERR(ptr: drm_edid) ? NULL : drm_edid); |
6418 | |
6419 | intel_panel_add_edid_fixed_modes(connector: intel_connector, use_alt_fixed_modes: true); |
6420 | |
6421 | /* MSO requires information from the EDID */ |
6422 | intel_edp_mso_init(intel_dp); |
6423 | |
6424 | /* multiply the mode clock and horizontal timings for MSO */ |
6425 | list_for_each_entry(fixed_mode, &intel_connector->panel.fixed_modes, head) |
6426 | intel_edp_mso_mode_fixup(connector: intel_connector, mode: fixed_mode); |
6427 | |
6428 | /* fallback to VBT if available for eDP */ |
6429 | if (!intel_panel_preferred_fixed_mode(connector: intel_connector)) |
6430 | intel_panel_add_vbt_lfp_fixed_mode(connector: intel_connector); |
6431 | |
6432 | mutex_unlock(lock: &dev_priv->drm.mode_config.mutex); |
6433 | |
6434 | if (!intel_panel_preferred_fixed_mode(connector: intel_connector)) { |
6435 | drm_info(&dev_priv->drm, |
6436 | "[ENCODER:%d:%s] failed to find fixed mode for the panel, disabling eDP\n" , |
6437 | encoder->base.base.id, encoder->base.name); |
6438 | goto out_vdd_off; |
6439 | } |
6440 | |
6441 | intel_panel_init(connector: intel_connector, fixed_edid: drm_edid); |
6442 | |
6443 | intel_edp_backlight_setup(intel_dp, connector: intel_connector); |
6444 | |
6445 | intel_edp_add_properties(intel_dp); |
6446 | |
6447 | intel_pps_init_late(intel_dp); |
6448 | |
6449 | return true; |
6450 | |
6451 | out_vdd_off: |
6452 | intel_pps_vdd_off_sync(intel_dp); |
6453 | |
6454 | return false; |
6455 | } |
6456 | |
6457 | static void intel_dp_modeset_retry_work_fn(struct work_struct *work) |
6458 | { |
6459 | struct intel_connector *intel_connector; |
6460 | struct drm_connector *connector; |
6461 | |
6462 | intel_connector = container_of(work, typeof(*intel_connector), |
6463 | modeset_retry_work); |
6464 | connector = &intel_connector->base; |
6465 | drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n" , connector->base.id, |
6466 | connector->name); |
6467 | |
6468 | /* Grab the locks before changing connector property*/ |
6469 | mutex_lock(&connector->dev->mode_config.mutex); |
6470 | /* Set connector link status to BAD and send a Uevent to notify |
6471 | * userspace to do a modeset. |
6472 | */ |
6473 | drm_connector_set_link_status_property(connector, |
6474 | DRM_MODE_LINK_STATUS_BAD); |
6475 | mutex_unlock(lock: &connector->dev->mode_config.mutex); |
6476 | /* Send Hotplug uevent so userspace can reprobe */ |
6477 | drm_kms_helper_connector_hotplug_event(connector); |
6478 | |
6479 | drm_connector_put(connector); |
6480 | } |
6481 | |
6482 | void intel_dp_init_modeset_retry_work(struct intel_connector *connector) |
6483 | { |
6484 | INIT_WORK(&connector->modeset_retry_work, |
6485 | intel_dp_modeset_retry_work_fn); |
6486 | } |
6487 | |
6488 | bool |
6489 | intel_dp_init_connector(struct intel_digital_port *dig_port, |
6490 | struct intel_connector *intel_connector) |
6491 | { |
6492 | struct drm_connector *connector = &intel_connector->base; |
6493 | struct intel_dp *intel_dp = &dig_port->dp; |
6494 | struct intel_encoder *intel_encoder = &dig_port->base; |
6495 | struct drm_device *dev = intel_encoder->base.dev; |
6496 | struct drm_i915_private *dev_priv = to_i915(dev); |
6497 | enum port port = intel_encoder->port; |
6498 | enum phy phy = intel_port_to_phy(i915: dev_priv, port); |
6499 | int type; |
6500 | |
6501 | /* Initialize the work for modeset in case of link train failure */ |
6502 | intel_dp_init_modeset_retry_work(connector: intel_connector); |
6503 | |
6504 | if (drm_WARN(dev, dig_port->max_lanes < 1, |
6505 | "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n" , |
6506 | dig_port->max_lanes, intel_encoder->base.base.id, |
6507 | intel_encoder->base.name)) |
6508 | return false; |
6509 | |
6510 | intel_dp->reset_link_params = true; |
6511 | intel_dp->pps.pps_pipe = INVALID_PIPE; |
6512 | intel_dp->pps.active_pipe = INVALID_PIPE; |
6513 | |
6514 | /* Preserve the current hw state. */ |
6515 | intel_dp->DP = intel_de_read(i915: dev_priv, reg: intel_dp->output_reg); |
6516 | intel_dp->attached_connector = intel_connector; |
6517 | |
6518 | if (_intel_dp_is_port_edp(dev_priv, devdata: intel_encoder->devdata, port)) { |
6519 | /* |
6520 | * Currently we don't support eDP on TypeC ports, although in |
6521 | * theory it could work on TypeC legacy ports. |
6522 | */ |
6523 | drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); |
6524 | type = DRM_MODE_CONNECTOR_eDP; |
6525 | intel_encoder->type = INTEL_OUTPUT_EDP; |
6526 | |
6527 | /* eDP only on port B and/or C on vlv/chv */ |
6528 | if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || |
6529 | IS_CHERRYVIEW(dev_priv)) && |
6530 | port != PORT_B && port != PORT_C)) |
6531 | return false; |
6532 | } else { |
6533 | type = DRM_MODE_CONNECTOR_DisplayPort; |
6534 | } |
6535 | |
6536 | intel_dp_set_default_sink_rates(intel_dp); |
6537 | intel_dp_set_default_max_sink_lane_count(intel_dp); |
6538 | |
6539 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
6540 | intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); |
6541 | |
6542 | intel_dp_aux_init(intel_dp); |
6543 | intel_connector->dp.dsc_decompression_aux = &intel_dp->aux; |
6544 | |
6545 | drm_dbg_kms(&dev_priv->drm, |
6546 | "Adding %s connector on [ENCODER:%d:%s]\n" , |
6547 | type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP" , |
6548 | intel_encoder->base.base.id, intel_encoder->base.name); |
6549 | |
6550 | drm_connector_init_with_ddc(dev, connector, funcs: &intel_dp_connector_funcs, |
6551 | connector_type: type, ddc: &intel_dp->aux.ddc); |
6552 | drm_connector_helper_add(connector, funcs: &intel_dp_connector_helper_funcs); |
6553 | |
6554 | if (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) < 12) |
6555 | connector->interlace_allowed = true; |
6556 | |
6557 | intel_connector->polled = DRM_CONNECTOR_POLL_HPD; |
6558 | intel_connector->base.polled = intel_connector->polled; |
6559 | |
6560 | intel_connector_attach_encoder(connector: intel_connector, encoder: intel_encoder); |
6561 | |
6562 | if (HAS_DDI(dev_priv)) |
6563 | intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; |
6564 | else |
6565 | intel_connector->get_hw_state = intel_connector_get_hw_state; |
6566 | intel_connector->sync_state = intel_dp_connector_sync_state; |
6567 | |
6568 | if (!intel_edp_init_connector(intel_dp, intel_connector)) { |
6569 | intel_dp_aux_fini(intel_dp); |
6570 | goto fail; |
6571 | } |
6572 | |
6573 | intel_dp_set_source_rates(intel_dp); |
6574 | intel_dp_set_common_rates(intel_dp); |
6575 | intel_dp_reset_max_link_params(intel_dp); |
6576 | |
6577 | /* init MST on ports that can support it */ |
6578 | intel_dp_mst_encoder_init(dig_port, |
6579 | conn_id: intel_connector->base.base.id); |
6580 | |
6581 | intel_dp_add_properties(intel_dp, connector); |
6582 | |
6583 | if (is_hdcp_supported(i915: dev_priv, port) && !intel_dp_is_edp(intel_dp)) { |
6584 | int ret = intel_dp_hdcp_init(dig_port, intel_connector); |
6585 | if (ret) |
6586 | drm_dbg_kms(&dev_priv->drm, |
6587 | "HDCP init failed, skipping.\n" ); |
6588 | } |
6589 | |
6590 | intel_dp->colorimetry_support = |
6591 | intel_dp_get_colorimetry_status(intel_dp); |
6592 | |
6593 | intel_dp->frl.is_trained = false; |
6594 | intel_dp->frl.trained_rate_gbps = 0; |
6595 | |
6596 | intel_psr_init(intel_dp); |
6597 | |
6598 | return true; |
6599 | |
6600 | fail: |
6601 | intel_display_power_flush_work(i915: dev_priv); |
6602 | drm_connector_cleanup(connector); |
6603 | |
6604 | return false; |
6605 | } |
6606 | |
6607 | void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) |
6608 | { |
6609 | struct intel_encoder *encoder; |
6610 | |
6611 | if (!HAS_DISPLAY(dev_priv)) |
6612 | return; |
6613 | |
6614 | for_each_intel_encoder(&dev_priv->drm, encoder) { |
6615 | struct intel_dp *intel_dp; |
6616 | |
6617 | if (encoder->type != INTEL_OUTPUT_DDI) |
6618 | continue; |
6619 | |
6620 | intel_dp = enc_to_intel_dp(encoder); |
6621 | |
6622 | if (!intel_dp_mst_source_support(intel_dp)) |
6623 | continue; |
6624 | |
6625 | if (intel_dp->is_mst) |
6626 | drm_dp_mst_topology_mgr_suspend(mgr: &intel_dp->mst_mgr); |
6627 | } |
6628 | } |
6629 | |
6630 | void intel_dp_mst_resume(struct drm_i915_private *dev_priv) |
6631 | { |
6632 | struct intel_encoder *encoder; |
6633 | |
6634 | if (!HAS_DISPLAY(dev_priv)) |
6635 | return; |
6636 | |
6637 | for_each_intel_encoder(&dev_priv->drm, encoder) { |
6638 | struct intel_dp *intel_dp; |
6639 | int ret; |
6640 | |
6641 | if (encoder->type != INTEL_OUTPUT_DDI) |
6642 | continue; |
6643 | |
6644 | intel_dp = enc_to_intel_dp(encoder); |
6645 | |
6646 | if (!intel_dp_mst_source_support(intel_dp)) |
6647 | continue; |
6648 | |
6649 | ret = drm_dp_mst_topology_mgr_resume(mgr: &intel_dp->mst_mgr, |
6650 | sync: true); |
6651 | if (ret) { |
6652 | intel_dp->is_mst = false; |
6653 | drm_dp_mst_topology_mgr_set_mst(mgr: &intel_dp->mst_mgr, |
6654 | mst_state: false); |
6655 | } |
6656 | } |
6657 | } |
6658 | |