1 | /* |
2 | * Copyright © 2008-2015 Intel Corporation |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. |
22 | */ |
23 | |
24 | #include "i915_drv.h" |
25 | #include "intel_display_types.h" |
26 | #include "intel_dp.h" |
27 | #include "intel_dp_link_training.h" |
28 | |
29 | #define LT_MSG_PREFIX "[CONNECTOR:%d:%s][ENCODER:%d:%s][%s] " |
30 | #define LT_MSG_ARGS(_intel_dp, _dp_phy) (_intel_dp)->attached_connector->base.base.id, \ |
31 | (_intel_dp)->attached_connector->base.name, \ |
32 | dp_to_dig_port(_intel_dp)->base.base.base.id, \ |
33 | dp_to_dig_port(_intel_dp)->base.base.name, \ |
34 | drm_dp_phy_name(_dp_phy) |
35 | |
36 | #define lt_dbg(_intel_dp, _dp_phy, _format, ...) \ |
37 | drm_dbg_kms(&dp_to_i915(_intel_dp)->drm, \ |
38 | LT_MSG_PREFIX _format, \ |
39 | LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__) |
40 | |
41 | #define lt_err(_intel_dp, _dp_phy, _format, ...) do { \ |
42 | if (intel_digital_port_connected(&dp_to_dig_port(_intel_dp)->base)) \ |
43 | drm_err(&dp_to_i915(_intel_dp)->drm, \ |
44 | LT_MSG_PREFIX _format, \ |
45 | LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__); \ |
46 | else \ |
47 | lt_dbg(_intel_dp, _dp_phy, "Sink disconnected: " _format, ## __VA_ARGS__); \ |
48 | } while (0) |
49 | |
50 | static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp) |
51 | { |
52 | memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps)); |
53 | } |
54 | |
55 | static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp) |
56 | { |
57 | intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT - |
58 | DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0; |
59 | } |
60 | |
61 | static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp, |
62 | enum drm_dp_phy dp_phy) |
63 | { |
64 | return intel_dp->lttpr_phy_caps[dp_phy - DP_PHY_LTTPR1]; |
65 | } |
66 | |
67 | static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp, |
68 | const u8 dpcd[DP_RECEIVER_CAP_SIZE], |
69 | enum drm_dp_phy dp_phy) |
70 | { |
71 | u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); |
72 | |
73 | if (drm_dp_read_lttpr_phy_caps(aux: &intel_dp->aux, dpcd, dp_phy, caps: phy_caps) < 0) { |
74 | lt_dbg(intel_dp, dp_phy, "failed to read the PHY caps\n" ); |
75 | return; |
76 | } |
77 | |
78 | lt_dbg(intel_dp, dp_phy, "PHY capabilities: %*ph\n" , |
79 | (int)sizeof(intel_dp->lttpr_phy_caps[0]), |
80 | phy_caps); |
81 | } |
82 | |
83 | static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp, |
84 | const u8 dpcd[DP_RECEIVER_CAP_SIZE]) |
85 | { |
86 | int ret; |
87 | |
88 | ret = drm_dp_read_lttpr_common_caps(aux: &intel_dp->aux, dpcd, |
89 | caps: intel_dp->lttpr_common_caps); |
90 | if (ret < 0) |
91 | goto reset_caps; |
92 | |
93 | lt_dbg(intel_dp, DP_PHY_DPRX, "LTTPR common capabilities: %*ph\n" , |
94 | (int)sizeof(intel_dp->lttpr_common_caps), |
95 | intel_dp->lttpr_common_caps); |
96 | |
97 | /* The minimum value of LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV is 1.4 */ |
98 | if (intel_dp->lttpr_common_caps[0] < 0x14) |
99 | goto reset_caps; |
100 | |
101 | return true; |
102 | |
103 | reset_caps: |
104 | intel_dp_reset_lttpr_common_caps(intel_dp); |
105 | return false; |
106 | } |
107 | |
108 | static bool |
109 | intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable) |
110 | { |
111 | u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT : |
112 | DP_PHY_REPEATER_MODE_NON_TRANSPARENT; |
113 | |
114 | return drm_dp_dpcd_write(aux: &intel_dp->aux, DP_PHY_REPEATER_MODE, buffer: &val, size: 1) == 1; |
115 | } |
116 | |
117 | static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE]) |
118 | { |
119 | int lttpr_count; |
120 | int i; |
121 | |
122 | if (!intel_dp_read_lttpr_common_caps(intel_dp, dpcd)) |
123 | return 0; |
124 | |
125 | lttpr_count = drm_dp_lttpr_count(cap: intel_dp->lttpr_common_caps); |
126 | /* |
127 | * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are |
128 | * detected as this breaks link training at least on the Dell WD19TB |
129 | * dock. |
130 | */ |
131 | if (lttpr_count == 0) |
132 | return 0; |
133 | |
134 | /* |
135 | * See DP Standard v2.0 3.6.6.1. about the explicit disabling of |
136 | * non-transparent mode and the disable->enable non-transparent mode |
137 | * sequence. |
138 | */ |
139 | intel_dp_set_lttpr_transparent_mode(intel_dp, enable: true); |
140 | |
141 | /* |
142 | * In case of unsupported number of LTTPRs or failing to switch to |
143 | * non-transparent mode fall-back to transparent link training mode, |
144 | * still taking into account any LTTPR common lane- rate/count limits. |
145 | */ |
146 | if (lttpr_count < 0) |
147 | return 0; |
148 | |
149 | if (!intel_dp_set_lttpr_transparent_mode(intel_dp, enable: false)) { |
150 | lt_dbg(intel_dp, DP_PHY_DPRX, |
151 | "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n" ); |
152 | |
153 | intel_dp_set_lttpr_transparent_mode(intel_dp, enable: true); |
154 | intel_dp_reset_lttpr_count(intel_dp); |
155 | |
156 | return 0; |
157 | } |
158 | |
159 | for (i = 0; i < lttpr_count; i++) |
160 | intel_dp_read_lttpr_phy_caps(intel_dp, dpcd, DP_PHY_LTTPR(i)); |
161 | |
162 | return lttpr_count; |
163 | } |
164 | |
165 | int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE]) |
166 | { |
167 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
168 | |
169 | if (intel_dp_is_edp(intel_dp)) |
170 | return 0; |
171 | |
172 | /* |
173 | * Detecting LTTPRs must be avoided on platforms with an AUX timeout |
174 | * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1). |
175 | */ |
176 | if (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915)) |
177 | if (drm_dp_dpcd_probe(aux: &intel_dp->aux, |
178 | DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV)) |
179 | return -EIO; |
180 | |
181 | if (drm_dp_read_dpcd_caps(aux: &intel_dp->aux, dpcd)) |
182 | return -EIO; |
183 | |
184 | return 0; |
185 | } |
186 | |
187 | /** |
188 | * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode |
189 | * @intel_dp: Intel DP struct |
190 | * |
191 | * Read the LTTPR common and DPRX capabilities and switch to non-transparent |
192 | * link training mode if any is detected and read the PHY capabilities for all |
193 | * detected LTTPRs. In case of an LTTPR detection error or if the number of |
194 | * LTTPRs is more than is supported (8), fall back to the no-LTTPR, |
195 | * transparent mode link training mode. |
196 | * |
197 | * Returns: |
198 | * >0 if LTTPRs were detected and the non-transparent LT mode was set. The |
199 | * DPRX capabilities are read out. |
200 | * 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a |
201 | * detection failure and the transparent LT mode was set. The DPRX |
202 | * capabilities are read out. |
203 | * <0 Reading out the DPRX capabilities failed. |
204 | */ |
205 | int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp) |
206 | { |
207 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
208 | int lttpr_count = 0; |
209 | |
210 | /* |
211 | * Detecting LTTPRs must be avoided on platforms with an AUX timeout |
212 | * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1). |
213 | */ |
214 | if (!intel_dp_is_edp(intel_dp) && |
215 | (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))) { |
216 | u8 dpcd[DP_RECEIVER_CAP_SIZE]; |
217 | int err = intel_dp_read_dprx_caps(intel_dp, dpcd); |
218 | |
219 | if (err != 0) |
220 | return err; |
221 | |
222 | lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd); |
223 | } |
224 | |
225 | /* |
226 | * The DPTX shall read the DPRX caps after LTTPR detection, so re-read |
227 | * it here. |
228 | */ |
229 | if (drm_dp_read_dpcd_caps(aux: &intel_dp->aux, dpcd: intel_dp->dpcd)) { |
230 | intel_dp_reset_lttpr_common_caps(intel_dp); |
231 | return -EIO; |
232 | } |
233 | |
234 | return lttpr_count; |
235 | } |
236 | |
237 | static u8 dp_voltage_max(u8 preemph) |
238 | { |
239 | switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) { |
240 | case DP_TRAIN_PRE_EMPH_LEVEL_0: |
241 | return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; |
242 | case DP_TRAIN_PRE_EMPH_LEVEL_1: |
243 | return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; |
244 | case DP_TRAIN_PRE_EMPH_LEVEL_2: |
245 | return DP_TRAIN_VOLTAGE_SWING_LEVEL_1; |
246 | case DP_TRAIN_PRE_EMPH_LEVEL_3: |
247 | default: |
248 | return DP_TRAIN_VOLTAGE_SWING_LEVEL_0; |
249 | } |
250 | } |
251 | |
252 | static u8 intel_dp_lttpr_voltage_max(struct intel_dp *intel_dp, |
253 | enum drm_dp_phy dp_phy) |
254 | { |
255 | const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); |
256 | |
257 | if (drm_dp_lttpr_voltage_swing_level_3_supported(caps: phy_caps)) |
258 | return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; |
259 | else |
260 | return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; |
261 | } |
262 | |
263 | static u8 intel_dp_lttpr_preemph_max(struct intel_dp *intel_dp, |
264 | enum drm_dp_phy dp_phy) |
265 | { |
266 | const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); |
267 | |
268 | if (drm_dp_lttpr_pre_emphasis_level_3_supported(caps: phy_caps)) |
269 | return DP_TRAIN_PRE_EMPH_LEVEL_3; |
270 | else |
271 | return DP_TRAIN_PRE_EMPH_LEVEL_2; |
272 | } |
273 | |
274 | static bool |
275 | intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp, |
276 | enum drm_dp_phy dp_phy) |
277 | { |
278 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
279 | int lttpr_count = drm_dp_lttpr_count(cap: intel_dp->lttpr_common_caps); |
280 | |
281 | drm_WARN_ON_ONCE(&i915->drm, lttpr_count <= 0 && dp_phy != DP_PHY_DPRX); |
282 | |
283 | return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1); |
284 | } |
285 | |
286 | static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp, |
287 | const struct intel_crtc_state *crtc_state, |
288 | enum drm_dp_phy dp_phy) |
289 | { |
290 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
291 | u8 voltage_max; |
292 | |
293 | /* |
294 | * Get voltage_max from the DPTX_PHY (source or LTTPR) upstream from |
295 | * the DPRX_PHY we train. |
296 | */ |
297 | if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) |
298 | voltage_max = intel_dp->voltage_max(intel_dp, crtc_state); |
299 | else |
300 | voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy: dp_phy + 1); |
301 | |
302 | drm_WARN_ON_ONCE(&i915->drm, |
303 | voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 && |
304 | voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3); |
305 | |
306 | return voltage_max; |
307 | } |
308 | |
309 | static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp, |
310 | enum drm_dp_phy dp_phy) |
311 | { |
312 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
313 | u8 preemph_max; |
314 | |
315 | /* |
316 | * Get preemph_max from the DPTX_PHY (source or LTTPR) upstream from |
317 | * the DPRX_PHY we train. |
318 | */ |
319 | if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) |
320 | preemph_max = intel_dp->preemph_max(intel_dp); |
321 | else |
322 | preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy: dp_phy + 1); |
323 | |
324 | drm_WARN_ON_ONCE(&i915->drm, |
325 | preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 && |
326 | preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3); |
327 | |
328 | return preemph_max; |
329 | } |
330 | |
331 | static bool has_per_lane_signal_levels(struct intel_dp *intel_dp, |
332 | enum drm_dp_phy dp_phy) |
333 | { |
334 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
335 | |
336 | return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) || |
337 | DISPLAY_VER(i915) >= 11; |
338 | } |
339 | |
340 | /* 128b/132b */ |
341 | static u8 intel_dp_get_lane_adjust_tx_ffe_preset(struct intel_dp *intel_dp, |
342 | const struct intel_crtc_state *crtc_state, |
343 | enum drm_dp_phy dp_phy, |
344 | const u8 link_status[DP_LINK_STATUS_SIZE], |
345 | int lane) |
346 | { |
347 | u8 tx_ffe = 0; |
348 | |
349 | if (has_per_lane_signal_levels(intel_dp, dp_phy)) { |
350 | lane = min(lane, crtc_state->lane_count - 1); |
351 | tx_ffe = drm_dp_get_adjust_tx_ffe_preset(link_status, lane); |
352 | } else { |
353 | for (lane = 0; lane < crtc_state->lane_count; lane++) |
354 | tx_ffe = max(tx_ffe, drm_dp_get_adjust_tx_ffe_preset(link_status, lane)); |
355 | } |
356 | |
357 | return tx_ffe; |
358 | } |
359 | |
360 | /* 8b/10b */ |
361 | static u8 intel_dp_get_lane_adjust_vswing_preemph(struct intel_dp *intel_dp, |
362 | const struct intel_crtc_state *crtc_state, |
363 | enum drm_dp_phy dp_phy, |
364 | const u8 link_status[DP_LINK_STATUS_SIZE], |
365 | int lane) |
366 | { |
367 | u8 v = 0; |
368 | u8 p = 0; |
369 | u8 voltage_max; |
370 | u8 preemph_max; |
371 | |
372 | if (has_per_lane_signal_levels(intel_dp, dp_phy)) { |
373 | lane = min(lane, crtc_state->lane_count - 1); |
374 | |
375 | v = drm_dp_get_adjust_request_voltage(link_status, lane); |
376 | p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); |
377 | } else { |
378 | for (lane = 0; lane < crtc_state->lane_count; lane++) { |
379 | v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane)); |
380 | p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane)); |
381 | } |
382 | } |
383 | |
384 | preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy); |
385 | if (p >= preemph_max) |
386 | p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; |
387 | |
388 | v = min(v, dp_voltage_max(p)); |
389 | |
390 | voltage_max = intel_dp_phy_voltage_max(intel_dp, crtc_state, dp_phy); |
391 | if (v >= voltage_max) |
392 | v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; |
393 | |
394 | return v | p; |
395 | } |
396 | |
397 | static u8 intel_dp_get_lane_adjust_train(struct intel_dp *intel_dp, |
398 | const struct intel_crtc_state *crtc_state, |
399 | enum drm_dp_phy dp_phy, |
400 | const u8 link_status[DP_LINK_STATUS_SIZE], |
401 | int lane) |
402 | { |
403 | if (intel_dp_is_uhbr(crtc_state)) |
404 | return intel_dp_get_lane_adjust_tx_ffe_preset(intel_dp, crtc_state, |
405 | dp_phy, link_status, lane); |
406 | else |
407 | return intel_dp_get_lane_adjust_vswing_preemph(intel_dp, crtc_state, |
408 | dp_phy, link_status, lane); |
409 | } |
410 | |
411 | #define TRAIN_REQ_FMT "%d/%d/%d/%d" |
412 | #define _TRAIN_REQ_VSWING_ARGS(link_status, lane) \ |
413 | (drm_dp_get_adjust_request_voltage((link_status), (lane)) >> DP_TRAIN_VOLTAGE_SWING_SHIFT) |
414 | #define TRAIN_REQ_VSWING_ARGS(link_status) \ |
415 | _TRAIN_REQ_VSWING_ARGS(link_status, 0), \ |
416 | _TRAIN_REQ_VSWING_ARGS(link_status, 1), \ |
417 | _TRAIN_REQ_VSWING_ARGS(link_status, 2), \ |
418 | _TRAIN_REQ_VSWING_ARGS(link_status, 3) |
419 | #define _TRAIN_REQ_PREEMPH_ARGS(link_status, lane) \ |
420 | (drm_dp_get_adjust_request_pre_emphasis((link_status), (lane)) >> DP_TRAIN_PRE_EMPHASIS_SHIFT) |
421 | #define TRAIN_REQ_PREEMPH_ARGS(link_status) \ |
422 | _TRAIN_REQ_PREEMPH_ARGS(link_status, 0), \ |
423 | _TRAIN_REQ_PREEMPH_ARGS(link_status, 1), \ |
424 | _TRAIN_REQ_PREEMPH_ARGS(link_status, 2), \ |
425 | _TRAIN_REQ_PREEMPH_ARGS(link_status, 3) |
426 | #define _TRAIN_REQ_TX_FFE_ARGS(link_status, lane) \ |
427 | drm_dp_get_adjust_tx_ffe_preset((link_status), (lane)) |
428 | #define TRAIN_REQ_TX_FFE_ARGS(link_status) \ |
429 | _TRAIN_REQ_TX_FFE_ARGS(link_status, 0), \ |
430 | _TRAIN_REQ_TX_FFE_ARGS(link_status, 1), \ |
431 | _TRAIN_REQ_TX_FFE_ARGS(link_status, 2), \ |
432 | _TRAIN_REQ_TX_FFE_ARGS(link_status, 3) |
433 | |
434 | void |
435 | intel_dp_get_adjust_train(struct intel_dp *intel_dp, |
436 | const struct intel_crtc_state *crtc_state, |
437 | enum drm_dp_phy dp_phy, |
438 | const u8 link_status[DP_LINK_STATUS_SIZE]) |
439 | { |
440 | int lane; |
441 | |
442 | if (intel_dp_is_uhbr(crtc_state)) { |
443 | lt_dbg(intel_dp, dp_phy, |
444 | "128b/132b, lanes: %d, " |
445 | "TX FFE request: " TRAIN_REQ_FMT "\n" , |
446 | crtc_state->lane_count, |
447 | TRAIN_REQ_TX_FFE_ARGS(link_status)); |
448 | } else { |
449 | lt_dbg(intel_dp, dp_phy, |
450 | "8b/10b, lanes: %d, " |
451 | "vswing request: " TRAIN_REQ_FMT ", " |
452 | "pre-emphasis request: " TRAIN_REQ_FMT "\n" , |
453 | crtc_state->lane_count, |
454 | TRAIN_REQ_VSWING_ARGS(link_status), |
455 | TRAIN_REQ_PREEMPH_ARGS(link_status)); |
456 | } |
457 | |
458 | for (lane = 0; lane < 4; lane++) |
459 | intel_dp->train_set[lane] = |
460 | intel_dp_get_lane_adjust_train(intel_dp, crtc_state, |
461 | dp_phy, link_status, lane); |
462 | } |
463 | |
464 | static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp, |
465 | enum drm_dp_phy dp_phy) |
466 | { |
467 | return dp_phy == DP_PHY_DPRX ? |
468 | DP_TRAINING_PATTERN_SET : |
469 | DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy); |
470 | } |
471 | |
472 | static bool |
473 | intel_dp_set_link_train(struct intel_dp *intel_dp, |
474 | const struct intel_crtc_state *crtc_state, |
475 | enum drm_dp_phy dp_phy, |
476 | u8 dp_train_pat) |
477 | { |
478 | int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy); |
479 | u8 buf[sizeof(intel_dp->train_set) + 1]; |
480 | int len; |
481 | |
482 | intel_dp_program_link_training_pattern(intel_dp, crtc_state, |
483 | dp_phy, dp_train_pat); |
484 | |
485 | buf[0] = dp_train_pat; |
486 | /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */ |
487 | memcpy(buf + 1, intel_dp->train_set, crtc_state->lane_count); |
488 | len = crtc_state->lane_count + 1; |
489 | |
490 | return drm_dp_dpcd_write(aux: &intel_dp->aux, offset: reg, buffer: buf, size: len) == len; |
491 | } |
492 | |
493 | static char dp_training_pattern_name(u8 train_pat) |
494 | { |
495 | switch (train_pat) { |
496 | case DP_TRAINING_PATTERN_1: |
497 | case DP_TRAINING_PATTERN_2: |
498 | case DP_TRAINING_PATTERN_3: |
499 | return '0' + train_pat; |
500 | case DP_TRAINING_PATTERN_4: |
501 | return '4'; |
502 | default: |
503 | MISSING_CASE(train_pat); |
504 | return '?'; |
505 | } |
506 | } |
507 | |
508 | void |
509 | intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, |
510 | const struct intel_crtc_state *crtc_state, |
511 | enum drm_dp_phy dp_phy, |
512 | u8 dp_train_pat) |
513 | { |
514 | u8 train_pat = intel_dp_training_pattern_symbol(pattern: dp_train_pat); |
515 | |
516 | if (train_pat != DP_TRAINING_PATTERN_DISABLE) |
517 | lt_dbg(intel_dp, dp_phy, "Using DP training pattern TPS%c\n" , |
518 | dp_training_pattern_name(train_pat)); |
519 | |
520 | intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat); |
521 | } |
522 | |
523 | #define TRAIN_SET_FMT "%d%s/%d%s/%d%s/%d%s" |
524 | #define _TRAIN_SET_VSWING_ARGS(train_set) \ |
525 | ((train_set) & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT, \ |
526 | (train_set) & DP_TRAIN_MAX_SWING_REACHED ? "(max)" : "" |
527 | #define TRAIN_SET_VSWING_ARGS(train_set) \ |
528 | _TRAIN_SET_VSWING_ARGS((train_set)[0]), \ |
529 | _TRAIN_SET_VSWING_ARGS((train_set)[1]), \ |
530 | _TRAIN_SET_VSWING_ARGS((train_set)[2]), \ |
531 | _TRAIN_SET_VSWING_ARGS((train_set)[3]) |
532 | #define _TRAIN_SET_PREEMPH_ARGS(train_set) \ |
533 | ((train_set) & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT, \ |
534 | (train_set) & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? "(max)" : "" |
535 | #define TRAIN_SET_PREEMPH_ARGS(train_set) \ |
536 | _TRAIN_SET_PREEMPH_ARGS((train_set)[0]), \ |
537 | _TRAIN_SET_PREEMPH_ARGS((train_set)[1]), \ |
538 | _TRAIN_SET_PREEMPH_ARGS((train_set)[2]), \ |
539 | _TRAIN_SET_PREEMPH_ARGS((train_set)[3]) |
540 | #define _TRAIN_SET_TX_FFE_ARGS(train_set) \ |
541 | ((train_set) & DP_TX_FFE_PRESET_VALUE_MASK), "" |
542 | #define TRAIN_SET_TX_FFE_ARGS(train_set) \ |
543 | _TRAIN_SET_TX_FFE_ARGS((train_set)[0]), \ |
544 | _TRAIN_SET_TX_FFE_ARGS((train_set)[1]), \ |
545 | _TRAIN_SET_TX_FFE_ARGS((train_set)[2]), \ |
546 | _TRAIN_SET_TX_FFE_ARGS((train_set)[3]) |
547 | |
548 | void intel_dp_set_signal_levels(struct intel_dp *intel_dp, |
549 | const struct intel_crtc_state *crtc_state, |
550 | enum drm_dp_phy dp_phy) |
551 | { |
552 | struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; |
553 | |
554 | if (intel_dp_is_uhbr(crtc_state)) { |
555 | lt_dbg(intel_dp, dp_phy, |
556 | "128b/132b, lanes: %d, " |
557 | "TX FFE presets: " TRAIN_SET_FMT "\n" , |
558 | crtc_state->lane_count, |
559 | TRAIN_SET_TX_FFE_ARGS(intel_dp->train_set)); |
560 | } else { |
561 | lt_dbg(intel_dp, dp_phy, |
562 | "8b/10b, lanes: %d, " |
563 | "vswing levels: " TRAIN_SET_FMT ", " |
564 | "pre-emphasis levels: " TRAIN_SET_FMT "\n" , |
565 | crtc_state->lane_count, |
566 | TRAIN_SET_VSWING_ARGS(intel_dp->train_set), |
567 | TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set)); |
568 | } |
569 | |
570 | if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) |
571 | encoder->set_signal_levels(encoder, crtc_state); |
572 | } |
573 | |
574 | static bool |
575 | intel_dp_reset_link_train(struct intel_dp *intel_dp, |
576 | const struct intel_crtc_state *crtc_state, |
577 | enum drm_dp_phy dp_phy, |
578 | u8 dp_train_pat) |
579 | { |
580 | memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); |
581 | intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy); |
582 | return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat); |
583 | } |
584 | |
585 | static bool |
586 | intel_dp_update_link_train(struct intel_dp *intel_dp, |
587 | const struct intel_crtc_state *crtc_state, |
588 | enum drm_dp_phy dp_phy) |
589 | { |
590 | int reg = dp_phy == DP_PHY_DPRX ? |
591 | DP_TRAINING_LANE0_SET : |
592 | DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy); |
593 | int ret; |
594 | |
595 | intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy); |
596 | |
597 | ret = drm_dp_dpcd_write(aux: &intel_dp->aux, offset: reg, |
598 | buffer: intel_dp->train_set, size: crtc_state->lane_count); |
599 | |
600 | return ret == crtc_state->lane_count; |
601 | } |
602 | |
603 | /* 128b/132b */ |
604 | static bool intel_dp_lane_max_tx_ffe_reached(u8 train_set_lane) |
605 | { |
606 | return (train_set_lane & DP_TX_FFE_PRESET_VALUE_MASK) == |
607 | DP_TX_FFE_PRESET_VALUE_MASK; |
608 | } |
609 | |
610 | /* |
611 | * 8b/10b |
612 | * |
613 | * FIXME: The DP spec is very confusing here, also the Link CTS spec seems to |
614 | * have self contradicting tests around this area. |
615 | * |
616 | * In lieu of better ideas let's just stop when we've reached the max supported |
617 | * vswing with its max pre-emphasis, which is either 2+1 or 3+0 depending on |
618 | * whether vswing level 3 is supported or not. |
619 | */ |
620 | static bool intel_dp_lane_max_vswing_reached(u8 train_set_lane) |
621 | { |
622 | u8 v = (train_set_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >> |
623 | DP_TRAIN_VOLTAGE_SWING_SHIFT; |
624 | u8 p = (train_set_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >> |
625 | DP_TRAIN_PRE_EMPHASIS_SHIFT; |
626 | |
627 | if ((train_set_lane & DP_TRAIN_MAX_SWING_REACHED) == 0) |
628 | return false; |
629 | |
630 | if (v + p != 3) |
631 | return false; |
632 | |
633 | return true; |
634 | } |
635 | |
636 | static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp, |
637 | const struct intel_crtc_state *crtc_state) |
638 | { |
639 | int lane; |
640 | |
641 | for (lane = 0; lane < crtc_state->lane_count; lane++) { |
642 | u8 train_set_lane = intel_dp->train_set[lane]; |
643 | |
644 | if (intel_dp_is_uhbr(crtc_state)) { |
645 | if (!intel_dp_lane_max_tx_ffe_reached(train_set_lane)) |
646 | return false; |
647 | } else { |
648 | if (!intel_dp_lane_max_vswing_reached(train_set_lane)) |
649 | return false; |
650 | } |
651 | } |
652 | |
653 | return true; |
654 | } |
655 | |
656 | static void |
657 | intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp, |
658 | const struct intel_crtc_state *crtc_state) |
659 | { |
660 | u8 link_config[2]; |
661 | |
662 | link_config[0] = crtc_state->vrr.flipline ? DP_MSA_TIMING_PAR_IGNORE_EN : 0; |
663 | link_config[1] = intel_dp_is_uhbr(crtc_state) ? |
664 | DP_SET_ANSI_128B132B : DP_SET_ANSI_8B10B; |
665 | drm_dp_dpcd_write(aux: &intel_dp->aux, DP_DOWNSPREAD_CTRL, buffer: link_config, size: 2); |
666 | } |
667 | |
668 | static void |
669 | intel_dp_update_link_bw_set(struct intel_dp *intel_dp, |
670 | const struct intel_crtc_state *crtc_state, |
671 | u8 link_bw, u8 rate_select) |
672 | { |
673 | u8 lane_count = crtc_state->lane_count; |
674 | |
675 | if (crtc_state->enhanced_framing) |
676 | lane_count |= DP_LANE_COUNT_ENHANCED_FRAME_EN; |
677 | |
678 | if (link_bw) { |
679 | /* DP and eDP v1.3 and earlier link bw set method. */ |
680 | u8 link_config[] = { link_bw, lane_count }; |
681 | |
682 | drm_dp_dpcd_write(aux: &intel_dp->aux, DP_LINK_BW_SET, buffer: link_config, |
683 | ARRAY_SIZE(link_config)); |
684 | } else { |
685 | /* |
686 | * eDP v1.4 and later link rate set method. |
687 | * |
688 | * eDP v1.4x sinks shall ignore DP_LINK_RATE_SET if |
689 | * DP_LINK_BW_SET is set. Avoid writing DP_LINK_BW_SET. |
690 | * |
691 | * eDP v1.5 sinks allow choosing either, and the last choice |
692 | * shall be active. |
693 | */ |
694 | drm_dp_dpcd_writeb(aux: &intel_dp->aux, DP_LANE_COUNT_SET, value: lane_count); |
695 | drm_dp_dpcd_writeb(aux: &intel_dp->aux, DP_LINK_RATE_SET, value: rate_select); |
696 | } |
697 | } |
698 | |
699 | /* |
700 | * Prepare link training by configuring the link parameters. On DDI platforms |
701 | * also enable the port here. |
702 | */ |
703 | static bool |
704 | intel_dp_prepare_link_train(struct intel_dp *intel_dp, |
705 | const struct intel_crtc_state *crtc_state) |
706 | { |
707 | u8 link_bw, rate_select; |
708 | |
709 | if (intel_dp->prepare_link_retrain) |
710 | intel_dp->prepare_link_retrain(intel_dp, crtc_state); |
711 | |
712 | intel_dp_compute_rate(intel_dp, port_clock: crtc_state->port_clock, |
713 | link_bw: &link_bw, rate_select: &rate_select); |
714 | |
715 | /* |
716 | * WaEdpLinkRateDataReload |
717 | * |
718 | * Parade PS8461E MUX (used on varius TGL+ laptops) needs |
719 | * to snoop the link rates reported by the sink when we |
720 | * use LINK_RATE_SET in order to operate in jitter cleaning |
721 | * mode (as opposed to redriver mode). Unfortunately it |
722 | * loses track of the snooped link rates when powered down, |
723 | * so we need to make it re-snoop often. Without this high |
724 | * link rates are not stable. |
725 | */ |
726 | if (!link_bw) { |
727 | __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; |
728 | |
729 | lt_dbg(intel_dp, DP_PHY_DPRX, "Reloading eDP link rates\n" ); |
730 | |
731 | drm_dp_dpcd_read(aux: &intel_dp->aux, DP_SUPPORTED_LINK_RATES, |
732 | buffer: sink_rates, size: sizeof(sink_rates)); |
733 | } |
734 | |
735 | if (link_bw) |
736 | lt_dbg(intel_dp, DP_PHY_DPRX, "Using LINK_BW_SET value %02x\n" , |
737 | link_bw); |
738 | else |
739 | lt_dbg(intel_dp, DP_PHY_DPRX, |
740 | "Using LINK_RATE_SET value %02x\n" , |
741 | rate_select); |
742 | /* |
743 | * Spec DP2.1 Section 3.5.2.16 |
744 | * Prior to LT DPTX should set 128b/132b DP Channel coding and then set link rate |
745 | */ |
746 | intel_dp_update_downspread_ctrl(intel_dp, crtc_state); |
747 | intel_dp_update_link_bw_set(intel_dp, crtc_state, link_bw, |
748 | rate_select); |
749 | |
750 | return true; |
751 | } |
752 | |
753 | static bool intel_dp_adjust_request_changed(const struct intel_crtc_state *crtc_state, |
754 | const u8 old_link_status[DP_LINK_STATUS_SIZE], |
755 | const u8 new_link_status[DP_LINK_STATUS_SIZE]) |
756 | { |
757 | int lane; |
758 | |
759 | for (lane = 0; lane < crtc_state->lane_count; lane++) { |
760 | u8 old, new; |
761 | |
762 | if (intel_dp_is_uhbr(crtc_state)) { |
763 | old = drm_dp_get_adjust_tx_ffe_preset(link_status: old_link_status, lane); |
764 | new = drm_dp_get_adjust_tx_ffe_preset(link_status: new_link_status, lane); |
765 | } else { |
766 | old = drm_dp_get_adjust_request_voltage(link_status: old_link_status, lane) | |
767 | drm_dp_get_adjust_request_pre_emphasis(link_status: old_link_status, lane); |
768 | new = drm_dp_get_adjust_request_voltage(link_status: new_link_status, lane) | |
769 | drm_dp_get_adjust_request_pre_emphasis(link_status: new_link_status, lane); |
770 | } |
771 | |
772 | if (old != new) |
773 | return true; |
774 | } |
775 | |
776 | return false; |
777 | } |
778 | |
779 | void |
780 | intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy, |
781 | const u8 link_status[DP_LINK_STATUS_SIZE]) |
782 | { |
783 | lt_dbg(intel_dp, dp_phy, |
784 | "ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n" , |
785 | link_status[0], link_status[1], link_status[2], |
786 | link_status[3], link_status[4], link_status[5]); |
787 | } |
788 | |
789 | /* |
790 | * Perform the link training clock recovery phase on the given DP PHY using |
791 | * training pattern 1. |
792 | */ |
793 | static bool |
794 | intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, |
795 | const struct intel_crtc_state *crtc_state, |
796 | enum drm_dp_phy dp_phy) |
797 | { |
798 | u8 old_link_status[DP_LINK_STATUS_SIZE] = {}; |
799 | int voltage_tries, cr_tries, max_cr_tries; |
800 | u8 link_status[DP_LINK_STATUS_SIZE]; |
801 | bool max_vswing_reached = false; |
802 | int delay_us; |
803 | |
804 | delay_us = drm_dp_read_clock_recovery_delay(aux: &intel_dp->aux, |
805 | dpcd: intel_dp->dpcd, dp_phy, |
806 | uhbr: intel_dp_is_uhbr(crtc_state)); |
807 | |
808 | /* clock recovery */ |
809 | if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy, |
810 | DP_TRAINING_PATTERN_1 | |
811 | DP_LINK_SCRAMBLING_DISABLE)) { |
812 | lt_err(intel_dp, dp_phy, "Failed to enable link training\n" ); |
813 | return false; |
814 | } |
815 | |
816 | /* |
817 | * The DP 1.4 spec defines the max clock recovery retries value |
818 | * as 10 but for pre-DP 1.4 devices we set a very tolerant |
819 | * retry limit of 80 (4 voltage levels x 4 preemphasis levels x |
820 | * x 5 identical voltage retries). Since the previous specs didn't |
821 | * define a limit and created the possibility of an infinite loop |
822 | * we want to prevent any sync from triggering that corner case. |
823 | */ |
824 | if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14) |
825 | max_cr_tries = 10; |
826 | else |
827 | max_cr_tries = 80; |
828 | |
829 | voltage_tries = 1; |
830 | for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) { |
831 | usleep_range(min: delay_us, max: 2 * delay_us); |
832 | |
833 | if (drm_dp_dpcd_read_phy_link_status(aux: &intel_dp->aux, dp_phy, |
834 | link_status) < 0) { |
835 | lt_err(intel_dp, dp_phy, "Failed to get link status\n" ); |
836 | return false; |
837 | } |
838 | |
839 | if (drm_dp_clock_recovery_ok(link_status, lane_count: crtc_state->lane_count)) { |
840 | lt_dbg(intel_dp, dp_phy, "Clock recovery OK\n" ); |
841 | return true; |
842 | } |
843 | |
844 | if (voltage_tries == 5) { |
845 | intel_dp_dump_link_status(intel_dp, dp_phy, link_status); |
846 | lt_dbg(intel_dp, dp_phy, "Same voltage tried 5 times\n" ); |
847 | return false; |
848 | } |
849 | |
850 | if (max_vswing_reached) { |
851 | intel_dp_dump_link_status(intel_dp, dp_phy, link_status); |
852 | lt_dbg(intel_dp, dp_phy, "Max Voltage Swing reached\n" ); |
853 | return false; |
854 | } |
855 | |
856 | /* Update training set as requested by target */ |
857 | intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy, |
858 | link_status); |
859 | if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) { |
860 | lt_err(intel_dp, dp_phy, "Failed to update link training\n" ); |
861 | return false; |
862 | } |
863 | |
864 | if (!intel_dp_adjust_request_changed(crtc_state, old_link_status, new_link_status: link_status)) |
865 | ++voltage_tries; |
866 | else |
867 | voltage_tries = 1; |
868 | |
869 | memcpy(old_link_status, link_status, sizeof(link_status)); |
870 | |
871 | if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state)) |
872 | max_vswing_reached = true; |
873 | } |
874 | |
875 | intel_dp_dump_link_status(intel_dp, dp_phy, link_status); |
876 | lt_err(intel_dp, dp_phy, "Failed clock recovery %d times, giving up!\n" , |
877 | max_cr_tries); |
878 | |
879 | return false; |
880 | } |
881 | |
882 | /* |
883 | * Pick Training Pattern Sequence (TPS) for channel equalization. 128b/132b TPS2 |
884 | * for UHBR+, TPS4 for HBR3 or for 1.4 devices that support it, TPS3 for HBR2 or |
885 | * 1.2 devices that support it, TPS2 otherwise. |
886 | */ |
887 | static u32 intel_dp_training_pattern(struct intel_dp *intel_dp, |
888 | const struct intel_crtc_state *crtc_state, |
889 | enum drm_dp_phy dp_phy) |
890 | { |
891 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
892 | bool source_tps3, sink_tps3, source_tps4, sink_tps4; |
893 | |
894 | /* UHBR+ use separate 128b/132b TPS2 */ |
895 | if (intel_dp_is_uhbr(crtc_state)) |
896 | return DP_TRAINING_PATTERN_2; |
897 | |
898 | /* |
899 | * TPS4 support is mandatory for all downstream devices that |
900 | * support HBR3. There are no known eDP panels that support |
901 | * TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification. |
902 | * LTTPRs must support TPS4. |
903 | */ |
904 | source_tps4 = intel_dp_source_supports_tps4(i915); |
905 | sink_tps4 = dp_phy != DP_PHY_DPRX || |
906 | drm_dp_tps4_supported(dpcd: intel_dp->dpcd); |
907 | if (source_tps4 && sink_tps4) { |
908 | return DP_TRAINING_PATTERN_4; |
909 | } else if (crtc_state->port_clock == 810000) { |
910 | if (!source_tps4) |
911 | lt_dbg(intel_dp, dp_phy, |
912 | "8.1 Gbps link rate without source TPS4 support\n" ); |
913 | if (!sink_tps4) |
914 | lt_dbg(intel_dp, dp_phy, |
915 | "8.1 Gbps link rate without sink TPS4 support\n" ); |
916 | } |
917 | |
918 | /* |
919 | * TPS3 support is mandatory for downstream devices that |
920 | * support HBR2. However, not all sinks follow the spec. |
921 | */ |
922 | source_tps3 = intel_dp_source_supports_tps3(i915); |
923 | sink_tps3 = dp_phy != DP_PHY_DPRX || |
924 | drm_dp_tps3_supported(dpcd: intel_dp->dpcd); |
925 | if (source_tps3 && sink_tps3) { |
926 | return DP_TRAINING_PATTERN_3; |
927 | } else if (crtc_state->port_clock >= 540000) { |
928 | if (!source_tps3) |
929 | lt_dbg(intel_dp, dp_phy, |
930 | ">=5.4/6.48 Gbps link rate without source TPS3 support\n" ); |
931 | if (!sink_tps3) |
932 | lt_dbg(intel_dp, dp_phy, |
933 | ">=5.4/6.48 Gbps link rate without sink TPS3 support\n" ); |
934 | } |
935 | |
936 | return DP_TRAINING_PATTERN_2; |
937 | } |
938 | |
939 | /* |
940 | * Perform the link training channel equalization phase on the given DP PHY |
941 | * using one of training pattern 2, 3 or 4 depending on the source and |
942 | * sink capabilities. |
943 | */ |
944 | static bool |
945 | intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, |
946 | const struct intel_crtc_state *crtc_state, |
947 | enum drm_dp_phy dp_phy) |
948 | { |
949 | int tries; |
950 | u32 training_pattern; |
951 | u8 link_status[DP_LINK_STATUS_SIZE]; |
952 | bool channel_eq = false; |
953 | int delay_us; |
954 | |
955 | delay_us = drm_dp_read_channel_eq_delay(aux: &intel_dp->aux, |
956 | dpcd: intel_dp->dpcd, dp_phy, |
957 | uhbr: intel_dp_is_uhbr(crtc_state)); |
958 | |
959 | training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy); |
960 | /* Scrambling is disabled for TPS2/3 and enabled for TPS4 */ |
961 | if (training_pattern != DP_TRAINING_PATTERN_4) |
962 | training_pattern |= DP_LINK_SCRAMBLING_DISABLE; |
963 | |
964 | /* channel equalization */ |
965 | if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, |
966 | dp_train_pat: training_pattern)) { |
967 | lt_err(intel_dp, dp_phy, "Failed to start channel equalization\n" ); |
968 | return false; |
969 | } |
970 | |
971 | for (tries = 0; tries < 5; tries++) { |
972 | usleep_range(min: delay_us, max: 2 * delay_us); |
973 | |
974 | if (drm_dp_dpcd_read_phy_link_status(aux: &intel_dp->aux, dp_phy, |
975 | link_status) < 0) { |
976 | lt_err(intel_dp, dp_phy, "Failed to get link status\n" ); |
977 | break; |
978 | } |
979 | |
980 | /* Make sure clock is still ok */ |
981 | if (!drm_dp_clock_recovery_ok(link_status, |
982 | lane_count: crtc_state->lane_count)) { |
983 | intel_dp_dump_link_status(intel_dp, dp_phy, link_status); |
984 | lt_dbg(intel_dp, dp_phy, |
985 | "Clock recovery check failed, cannot continue channel equalization\n" ); |
986 | break; |
987 | } |
988 | |
989 | if (drm_dp_channel_eq_ok(link_status, |
990 | lane_count: crtc_state->lane_count)) { |
991 | channel_eq = true; |
992 | lt_dbg(intel_dp, dp_phy, "Channel EQ done. DP Training successful\n" ); |
993 | break; |
994 | } |
995 | |
996 | /* Update training set as requested by target */ |
997 | intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy, |
998 | link_status); |
999 | if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) { |
1000 | lt_err(intel_dp, dp_phy, "Failed to update link training\n" ); |
1001 | break; |
1002 | } |
1003 | } |
1004 | |
1005 | /* Try 5 times, else fail and try at lower BW */ |
1006 | if (tries == 5) { |
1007 | intel_dp_dump_link_status(intel_dp, dp_phy, link_status); |
1008 | lt_dbg(intel_dp, dp_phy, "Channel equalization failed 5 times\n" ); |
1009 | } |
1010 | |
1011 | return channel_eq; |
1012 | } |
1013 | |
1014 | static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp, |
1015 | enum drm_dp_phy dp_phy) |
1016 | { |
1017 | int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy); |
1018 | u8 val = DP_TRAINING_PATTERN_DISABLE; |
1019 | |
1020 | return drm_dp_dpcd_write(aux: &intel_dp->aux, offset: reg, buffer: &val, size: 1) == 1; |
1021 | } |
1022 | |
1023 | static int |
1024 | intel_dp_128b132b_intra_hop(struct intel_dp *intel_dp, |
1025 | const struct intel_crtc_state *crtc_state) |
1026 | { |
1027 | u8 sink_status; |
1028 | int ret; |
1029 | |
1030 | ret = drm_dp_dpcd_readb(aux: &intel_dp->aux, DP_SINK_STATUS, valuep: &sink_status); |
1031 | if (ret != 1) { |
1032 | lt_dbg(intel_dp, DP_PHY_DPRX, "Failed to read sink status\n" ); |
1033 | return ret < 0 ? ret : -EIO; |
1034 | } |
1035 | |
1036 | return sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION ? 1 : 0; |
1037 | } |
1038 | |
1039 | /** |
1040 | * intel_dp_stop_link_train - stop link training |
1041 | * @intel_dp: DP struct |
1042 | * @crtc_state: state for CRTC attached to the encoder |
1043 | * |
1044 | * Stop the link training of the @intel_dp port, disabling the training |
1045 | * pattern in the sink's DPCD, and disabling the test pattern symbol |
1046 | * generation on the port. |
1047 | * |
1048 | * What symbols are output on the port after this point is |
1049 | * platform specific: On DDI/VLV/CHV platforms it will be the idle pattern |
1050 | * with the pipe being disabled, on older platforms it's HW specific if/how an |
1051 | * idle pattern is generated, as the pipe is already enabled here for those. |
1052 | * |
1053 | * This function must be called after intel_dp_start_link_train(). |
1054 | */ |
1055 | void intel_dp_stop_link_train(struct intel_dp *intel_dp, |
1056 | const struct intel_crtc_state *crtc_state) |
1057 | { |
1058 | intel_dp->link_trained = true; |
1059 | |
1060 | intel_dp_disable_dpcd_training_pattern(intel_dp, dp_phy: DP_PHY_DPRX); |
1061 | intel_dp_program_link_training_pattern(intel_dp, crtc_state, dp_phy: DP_PHY_DPRX, |
1062 | DP_TRAINING_PATTERN_DISABLE); |
1063 | |
1064 | if (intel_dp_is_uhbr(crtc_state) && |
1065 | wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) { |
1066 | lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n" ); |
1067 | } |
1068 | } |
1069 | |
1070 | static bool |
1071 | intel_dp_link_train_phy(struct intel_dp *intel_dp, |
1072 | const struct intel_crtc_state *crtc_state, |
1073 | enum drm_dp_phy dp_phy) |
1074 | { |
1075 | bool ret = false; |
1076 | |
1077 | if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy)) |
1078 | goto out; |
1079 | |
1080 | if (!intel_dp_link_training_channel_equalization(intel_dp, crtc_state, dp_phy)) |
1081 | goto out; |
1082 | |
1083 | ret = true; |
1084 | |
1085 | out: |
1086 | lt_dbg(intel_dp, dp_phy, |
1087 | "Link Training %s at link rate = %d, lane count = %d\n" , |
1088 | ret ? "passed" : "failed" , |
1089 | crtc_state->port_clock, crtc_state->lane_count); |
1090 | |
1091 | return ret; |
1092 | } |
1093 | |
1094 | static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp, |
1095 | const struct intel_crtc_state *crtc_state) |
1096 | { |
1097 | struct intel_connector *intel_connector = intel_dp->attached_connector; |
1098 | |
1099 | if (!intel_digital_port_connected(encoder: &dp_to_dig_port(intel_dp)->base)) { |
1100 | lt_dbg(intel_dp, DP_PHY_DPRX, "Link Training failed on disconnected sink.\n" ); |
1101 | return; |
1102 | } |
1103 | |
1104 | if (intel_dp->hobl_active) { |
1105 | lt_dbg(intel_dp, DP_PHY_DPRX, |
1106 | "Link Training failed with HOBL active, not enabling it from now on\n" ); |
1107 | intel_dp->hobl_failed = true; |
1108 | } else if (intel_dp_get_link_train_fallback_values(intel_dp, |
1109 | link_rate: crtc_state->port_clock, |
1110 | lane_count: crtc_state->lane_count)) { |
1111 | return; |
1112 | } |
1113 | |
1114 | /* Schedule a Hotplug Uevent to userspace to start modeset */ |
1115 | intel_dp_queue_modeset_retry_work(connector: intel_connector); |
1116 | } |
1117 | |
1118 | /* Perform the link training on all LTTPRs and the DPRX on a link. */ |
1119 | static bool |
1120 | intel_dp_link_train_all_phys(struct intel_dp *intel_dp, |
1121 | const struct intel_crtc_state *crtc_state, |
1122 | int lttpr_count) |
1123 | { |
1124 | bool ret = true; |
1125 | int i; |
1126 | |
1127 | for (i = lttpr_count - 1; i >= 0; i--) { |
1128 | enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i); |
1129 | |
1130 | ret = intel_dp_link_train_phy(intel_dp, crtc_state, dp_phy); |
1131 | intel_dp_disable_dpcd_training_pattern(intel_dp, dp_phy); |
1132 | |
1133 | if (!ret) |
1134 | break; |
1135 | } |
1136 | |
1137 | if (ret) |
1138 | ret = intel_dp_link_train_phy(intel_dp, crtc_state, dp_phy: DP_PHY_DPRX); |
1139 | |
1140 | if (intel_dp->set_idle_link_train) |
1141 | intel_dp->set_idle_link_train(intel_dp, crtc_state); |
1142 | |
1143 | return ret; |
1144 | } |
1145 | |
1146 | /* |
1147 | * 128b/132b DP LANEx_EQ_DONE Sequence (DP 2.0 E11 3.5.2.16.1) |
1148 | */ |
1149 | static bool |
1150 | intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp, |
1151 | const struct intel_crtc_state *crtc_state) |
1152 | { |
1153 | u8 link_status[DP_LINK_STATUS_SIZE]; |
1154 | int delay_us; |
1155 | int try, max_tries = 20; |
1156 | unsigned long deadline; |
1157 | bool timeout = false; |
1158 | |
1159 | /* |
1160 | * Reset signal levels. Start transmitting 128b/132b TPS1. |
1161 | * |
1162 | * Put DPRX and LTTPRs (if any) into intra-hop AUX mode by writing TPS1 |
1163 | * in DP_TRAINING_PATTERN_SET. |
1164 | */ |
1165 | if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy: DP_PHY_DPRX, |
1166 | DP_TRAINING_PATTERN_1)) { |
1167 | lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS1\n" ); |
1168 | return false; |
1169 | } |
1170 | |
1171 | delay_us = drm_dp_128b132b_read_aux_rd_interval(aux: &intel_dp->aux); |
1172 | |
1173 | /* Read the initial TX FFE settings. */ |
1174 | if (drm_dp_dpcd_read_link_status(aux: &intel_dp->aux, status: link_status) < 0) { |
1175 | lt_err(intel_dp, DP_PHY_DPRX, "Failed to read TX FFE presets\n" ); |
1176 | return false; |
1177 | } |
1178 | |
1179 | /* Update signal levels and training set as requested. */ |
1180 | intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy: DP_PHY_DPRX, link_status); |
1181 | if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy: DP_PHY_DPRX)) { |
1182 | lt_err(intel_dp, DP_PHY_DPRX, "Failed to set initial TX FFE settings\n" ); |
1183 | return false; |
1184 | } |
1185 | |
1186 | /* Start transmitting 128b/132b TPS2. */ |
1187 | if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy: DP_PHY_DPRX, |
1188 | DP_TRAINING_PATTERN_2)) { |
1189 | lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS2\n" ); |
1190 | return false; |
1191 | } |
1192 | |
1193 | /* Time budget for the LANEx_EQ_DONE Sequence */ |
1194 | deadline = jiffies + msecs_to_jiffies_timeout(m: 400); |
1195 | |
1196 | for (try = 0; try < max_tries; try++) { |
1197 | usleep_range(min: delay_us, max: 2 * delay_us); |
1198 | |
1199 | /* |
1200 | * The delay may get updated. The transmitter shall read the |
1201 | * delay before link status during link training. |
1202 | */ |
1203 | delay_us = drm_dp_128b132b_read_aux_rd_interval(aux: &intel_dp->aux); |
1204 | |
1205 | if (drm_dp_dpcd_read_link_status(aux: &intel_dp->aux, status: link_status) < 0) { |
1206 | lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n" ); |
1207 | return false; |
1208 | } |
1209 | |
1210 | if (drm_dp_128b132b_link_training_failed(link_status)) { |
1211 | intel_dp_dump_link_status(intel_dp, dp_phy: DP_PHY_DPRX, link_status); |
1212 | lt_err(intel_dp, DP_PHY_DPRX, |
1213 | "Downstream link training failure\n" ); |
1214 | return false; |
1215 | } |
1216 | |
1217 | if (drm_dp_128b132b_lane_channel_eq_done(link_status, lane_count: crtc_state->lane_count)) { |
1218 | lt_dbg(intel_dp, DP_PHY_DPRX, "Lane channel eq done\n" ); |
1219 | break; |
1220 | } |
1221 | |
1222 | if (timeout) { |
1223 | intel_dp_dump_link_status(intel_dp, dp_phy: DP_PHY_DPRX, link_status); |
1224 | lt_err(intel_dp, DP_PHY_DPRX, "Lane channel eq timeout\n" ); |
1225 | return false; |
1226 | } |
1227 | |
1228 | if (time_after(jiffies, deadline)) |
1229 | timeout = true; /* try one last time after deadline */ |
1230 | |
1231 | /* Update signal levels and training set as requested. */ |
1232 | intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy: DP_PHY_DPRX, link_status); |
1233 | if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy: DP_PHY_DPRX)) { |
1234 | lt_err(intel_dp, DP_PHY_DPRX, "Failed to update TX FFE settings\n" ); |
1235 | return false; |
1236 | } |
1237 | } |
1238 | |
1239 | if (try == max_tries) { |
1240 | intel_dp_dump_link_status(intel_dp, dp_phy: DP_PHY_DPRX, link_status); |
1241 | lt_err(intel_dp, DP_PHY_DPRX, "Max loop count reached\n" ); |
1242 | return false; |
1243 | } |
1244 | |
1245 | for (;;) { |
1246 | if (time_after(jiffies, deadline)) |
1247 | timeout = true; /* try one last time after deadline */ |
1248 | |
1249 | if (drm_dp_dpcd_read_link_status(aux: &intel_dp->aux, status: link_status) < 0) { |
1250 | lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n" ); |
1251 | return false; |
1252 | } |
1253 | |
1254 | if (drm_dp_128b132b_link_training_failed(link_status)) { |
1255 | intel_dp_dump_link_status(intel_dp, dp_phy: DP_PHY_DPRX, link_status); |
1256 | lt_err(intel_dp, DP_PHY_DPRX, "Downstream link training failure\n" ); |
1257 | return false; |
1258 | } |
1259 | |
1260 | if (drm_dp_128b132b_eq_interlane_align_done(link_status)) { |
1261 | lt_dbg(intel_dp, DP_PHY_DPRX, "Interlane align done\n" ); |
1262 | break; |
1263 | } |
1264 | |
1265 | if (timeout) { |
1266 | intel_dp_dump_link_status(intel_dp, dp_phy: DP_PHY_DPRX, link_status); |
1267 | lt_err(intel_dp, DP_PHY_DPRX, "Interlane align timeout\n" ); |
1268 | return false; |
1269 | } |
1270 | |
1271 | usleep_range(min: 2000, max: 3000); |
1272 | } |
1273 | |
1274 | return true; |
1275 | } |
1276 | |
1277 | /* |
1278 | * 128b/132b DP LANEx_CDS_DONE Sequence (DP 2.0 E11 3.5.2.16.2) |
1279 | */ |
1280 | static bool |
1281 | intel_dp_128b132b_lane_cds(struct intel_dp *intel_dp, |
1282 | const struct intel_crtc_state *crtc_state, |
1283 | int lttpr_count) |
1284 | { |
1285 | u8 link_status[DP_LINK_STATUS_SIZE]; |
1286 | unsigned long deadline; |
1287 | |
1288 | if (drm_dp_dpcd_writeb(aux: &intel_dp->aux, DP_TRAINING_PATTERN_SET, |
1289 | DP_TRAINING_PATTERN_2_CDS) != 1) { |
1290 | lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS2 CDS\n" ); |
1291 | return false; |
1292 | } |
1293 | |
1294 | /* Time budget for the LANEx_CDS_DONE Sequence */ |
1295 | deadline = jiffies + msecs_to_jiffies_timeout(m: (lttpr_count + 1) * 20); |
1296 | |
1297 | for (;;) { |
1298 | bool timeout = false; |
1299 | |
1300 | if (time_after(jiffies, deadline)) |
1301 | timeout = true; /* try one last time after deadline */ |
1302 | |
1303 | usleep_range(min: 2000, max: 3000); |
1304 | |
1305 | if (drm_dp_dpcd_read_link_status(aux: &intel_dp->aux, status: link_status) < 0) { |
1306 | lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n" ); |
1307 | return false; |
1308 | } |
1309 | |
1310 | if (drm_dp_128b132b_eq_interlane_align_done(link_status) && |
1311 | drm_dp_128b132b_cds_interlane_align_done(link_status) && |
1312 | drm_dp_128b132b_lane_symbol_locked(link_status, lane_count: crtc_state->lane_count)) { |
1313 | lt_dbg(intel_dp, DP_PHY_DPRX, "CDS interlane align done\n" ); |
1314 | break; |
1315 | } |
1316 | |
1317 | if (drm_dp_128b132b_link_training_failed(link_status)) { |
1318 | intel_dp_dump_link_status(intel_dp, dp_phy: DP_PHY_DPRX, link_status); |
1319 | lt_err(intel_dp, DP_PHY_DPRX, "Downstream link training failure\n" ); |
1320 | return false; |
1321 | } |
1322 | |
1323 | if (timeout) { |
1324 | intel_dp_dump_link_status(intel_dp, dp_phy: DP_PHY_DPRX, link_status); |
1325 | lt_err(intel_dp, DP_PHY_DPRX, "CDS timeout\n" ); |
1326 | return false; |
1327 | } |
1328 | } |
1329 | |
1330 | return true; |
1331 | } |
1332 | |
1333 | /* |
1334 | * 128b/132b link training sequence. (DP 2.0 E11 SCR on link training.) |
1335 | */ |
1336 | static bool |
1337 | intel_dp_128b132b_link_train(struct intel_dp *intel_dp, |
1338 | const struct intel_crtc_state *crtc_state, |
1339 | int lttpr_count) |
1340 | { |
1341 | bool passed = false; |
1342 | |
1343 | if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) { |
1344 | lt_err(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clear\n" ); |
1345 | return false; |
1346 | } |
1347 | |
1348 | if (intel_dp_128b132b_lane_eq(intel_dp, crtc_state) && |
1349 | intel_dp_128b132b_lane_cds(intel_dp, crtc_state, lttpr_count)) |
1350 | passed = true; |
1351 | |
1352 | lt_dbg(intel_dp, DP_PHY_DPRX, |
1353 | "128b/132b Link Training %s at link rate = %d, lane count = %d\n" , |
1354 | passed ? "passed" : "failed" , |
1355 | crtc_state->port_clock, crtc_state->lane_count); |
1356 | |
1357 | return passed; |
1358 | } |
1359 | |
1360 | /** |
1361 | * intel_dp_start_link_train - start link training |
1362 | * @intel_dp: DP struct |
1363 | * @crtc_state: state for CRTC attached to the encoder |
1364 | * |
1365 | * Start the link training of the @intel_dp port, scheduling a fallback |
1366 | * retraining with reduced link rate/lane parameters if the link training |
1367 | * fails. |
1368 | * After calling this function intel_dp_stop_link_train() must be called. |
1369 | */ |
1370 | void intel_dp_start_link_train(struct intel_dp *intel_dp, |
1371 | const struct intel_crtc_state *crtc_state) |
1372 | { |
1373 | struct drm_i915_private *i915 = dp_to_i915(intel_dp); |
1374 | bool passed; |
1375 | |
1376 | /* |
1377 | * TODO: Reiniting LTTPRs here won't be needed once proper connector |
1378 | * HW state readout is added. |
1379 | */ |
1380 | int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp); |
1381 | |
1382 | if (lttpr_count < 0) |
1383 | /* Still continue with enabling the port and link training. */ |
1384 | lttpr_count = 0; |
1385 | |
1386 | intel_dp_prepare_link_train(intel_dp, crtc_state); |
1387 | |
1388 | if (intel_dp_is_uhbr(crtc_state)) |
1389 | passed = intel_dp_128b132b_link_train(intel_dp, crtc_state, lttpr_count); |
1390 | else |
1391 | passed = intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count); |
1392 | |
1393 | /* |
1394 | * Ignore the link failure in CI |
1395 | * |
1396 | * In fixed enviroments like CI, sometimes unexpected long HPDs are |
1397 | * generated by the displays. If ignore_long_hpd flag is set, such long |
1398 | * HPDs are ignored. And probably as a consequence of these ignored |
1399 | * long HPDs, subsequent link trainings are failed resulting into CI |
1400 | * execution failures. |
1401 | * |
1402 | * For test cases which rely on the link training or processing of HPDs |
1403 | * ignore_long_hpd flag can unset from the testcase. |
1404 | */ |
1405 | if (!passed && i915->display.hotplug.ignore_long_hpd) { |
1406 | lt_dbg(intel_dp, DP_PHY_DPRX, "Ignore the link failure\n" ); |
1407 | return; |
1408 | } |
1409 | |
1410 | if (!passed) |
1411 | intel_dp_schedule_fallback_link_training(intel_dp, crtc_state); |
1412 | } |
1413 | |
1414 | void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp, |
1415 | const struct intel_crtc_state *crtc_state) |
1416 | { |
1417 | /* |
1418 | * VIDEO_DIP_CTL register bit 31 should be set to '0' to not |
1419 | * disable SDP CRC. This is applicable for Display version 13. |
1420 | * Default value of bit 31 is '0' hence discarding the write |
1421 | * TODO: Corrective actions on SDP corruption yet to be defined |
1422 | */ |
1423 | if (!intel_dp_is_uhbr(crtc_state)) |
1424 | return; |
1425 | |
1426 | /* DP v2.0 SCR on SDP CRC16 for 128b/132b Link Layer */ |
1427 | drm_dp_dpcd_writeb(aux: &intel_dp->aux, |
1428 | DP_SDP_ERROR_DETECTION_CONFIGURATION, |
1429 | DP_SDP_CRC16_128B132B_EN); |
1430 | |
1431 | lt_dbg(intel_dp, DP_PHY_DPRX, "DP2.0 SDP CRC16 for 128b/132b enabled\n" ); |
1432 | } |
1433 | |