1 | /* |
2 | * Copyright 2007-8 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: |
11 | * |
12 | * The above copyright notice and this permission notice shall be included in |
13 | * all copies or substantial portions of the Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | * |
23 | * Authors: Dave Airlie |
24 | * Alex Deucher |
25 | * Jerome Glisse |
26 | */ |
27 | |
28 | #include <drm/radeon_drm.h> |
29 | #include "radeon.h" |
30 | |
31 | #include "atom.h" |
32 | #include "atom-bits.h" |
33 | #include <drm/display/drm_dp_helper.h> |
34 | |
35 | /* move these to drm_dp_helper.c/h */ |
36 | #define DP_LINK_CONFIGURATION_SIZE 9 |
37 | #define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE |
38 | |
39 | static char *voltage_names[] = { |
40 | "0.4V" , "0.6V" , "0.8V" , "1.2V" |
41 | }; |
42 | static char *pre_emph_names[] = { |
43 | "0dB" , "3.5dB" , "6dB" , "9.5dB" |
44 | }; |
45 | |
46 | /***** radeon AUX functions *****/ |
47 | |
48 | /* Atom needs data in little endian format so swap as appropriate when copying |
49 | * data to or from atom. Note that atom operates on dw units. |
50 | * |
51 | * Use to_le=true when sending data to atom and provide at least |
52 | * ALIGN(num_bytes,4) bytes in the dst buffer. |
53 | * |
54 | * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4) |
55 | * byes in the src buffer. |
56 | */ |
57 | void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) |
58 | { |
59 | #ifdef __BIG_ENDIAN |
60 | u32 src_tmp[5], dst_tmp[5]; |
61 | int i; |
62 | u8 align_num_bytes = ALIGN(num_bytes, 4); |
63 | |
64 | if (to_le) { |
65 | memcpy(src_tmp, src, num_bytes); |
66 | for (i = 0; i < align_num_bytes / 4; i++) |
67 | dst_tmp[i] = cpu_to_le32(src_tmp[i]); |
68 | memcpy(dst, dst_tmp, align_num_bytes); |
69 | } else { |
70 | memcpy(src_tmp, src, align_num_bytes); |
71 | for (i = 0; i < align_num_bytes / 4; i++) |
72 | dst_tmp[i] = le32_to_cpu(src_tmp[i]); |
73 | memcpy(dst, dst_tmp, num_bytes); |
74 | } |
75 | #else |
76 | memcpy(dst, src, num_bytes); |
77 | #endif |
78 | } |
79 | |
80 | union aux_channel_transaction { |
81 | PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; |
82 | PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; |
83 | }; |
84 | |
85 | static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, |
86 | u8 *send, int send_bytes, |
87 | u8 *recv, int recv_size, |
88 | u8 delay, u8 *ack) |
89 | { |
90 | struct drm_device *dev = chan->dev; |
91 | struct radeon_device *rdev = dev->dev_private; |
92 | union aux_channel_transaction args; |
93 | int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); |
94 | unsigned char *base; |
95 | int recv_bytes; |
96 | int r = 0; |
97 | |
98 | memset(&args, 0, sizeof(args)); |
99 | |
100 | mutex_lock(&chan->mutex); |
101 | mutex_lock(&rdev->mode_info.atom_context->scratch_mutex); |
102 | |
103 | base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); |
104 | |
105 | radeon_atom_copy_swap(dst: base, src: send, num_bytes: send_bytes, to_le: true); |
106 | |
107 | args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4)); |
108 | args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4)); |
109 | args.v1.ucDataOutLen = 0; |
110 | args.v1.ucChannelID = chan->rec.i2c_id; |
111 | args.v1.ucDelay = delay / 10; |
112 | if (ASIC_IS_DCE4(rdev)) |
113 | args.v2.ucHPD_ID = chan->rec.hpd; |
114 | |
115 | atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); |
116 | |
117 | *ack = args.v1.ucReplyStatus; |
118 | |
119 | /* timeout */ |
120 | if (args.v1.ucReplyStatus == 1) { |
121 | DRM_DEBUG_KMS("dp_aux_ch timeout\n" ); |
122 | r = -ETIMEDOUT; |
123 | goto done; |
124 | } |
125 | |
126 | /* flags not zero */ |
127 | if (args.v1.ucReplyStatus == 2) { |
128 | DRM_DEBUG_KMS("dp_aux_ch flags not zero\n" ); |
129 | r = -EIO; |
130 | goto done; |
131 | } |
132 | |
133 | /* error */ |
134 | if (args.v1.ucReplyStatus == 3) { |
135 | DRM_DEBUG_KMS("dp_aux_ch error\n" ); |
136 | r = -EIO; |
137 | goto done; |
138 | } |
139 | |
140 | recv_bytes = args.v1.ucDataOutLen; |
141 | if (recv_bytes > recv_size) |
142 | recv_bytes = recv_size; |
143 | |
144 | if (recv && recv_size) |
145 | radeon_atom_copy_swap(dst: recv, src: base + 16, num_bytes: recv_bytes, to_le: false); |
146 | |
147 | r = recv_bytes; |
148 | done: |
149 | mutex_unlock(lock: &rdev->mode_info.atom_context->scratch_mutex); |
150 | mutex_unlock(lock: &chan->mutex); |
151 | |
152 | return r; |
153 | } |
154 | |
155 | #define BARE_ADDRESS_SIZE 3 |
156 | #define (BARE_ADDRESS_SIZE + 1) |
157 | |
158 | static ssize_t |
159 | radeon_dp_aux_transfer_atom(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) |
160 | { |
161 | struct radeon_i2c_chan *chan = |
162 | container_of(aux, struct radeon_i2c_chan, aux); |
163 | int ret; |
164 | u8 tx_buf[20]; |
165 | size_t tx_size; |
166 | u8 ack, delay = 0; |
167 | |
168 | if (WARN_ON(msg->size > 16)) |
169 | return -E2BIG; |
170 | |
171 | tx_buf[0] = msg->address & 0xff; |
172 | tx_buf[1] = (msg->address >> 8) & 0xff; |
173 | tx_buf[2] = (msg->request << 4) | |
174 | ((msg->address >> 16) & 0xf); |
175 | tx_buf[3] = msg->size ? (msg->size - 1) : 0; |
176 | |
177 | switch (msg->request & ~DP_AUX_I2C_MOT) { |
178 | case DP_AUX_NATIVE_WRITE: |
179 | case DP_AUX_I2C_WRITE: |
180 | case DP_AUX_I2C_WRITE_STATUS_UPDATE: |
181 | /* The atom implementation only supports writes with a max payload of |
182 | * 12 bytes since it uses 4 bits for the total count (header + payload) |
183 | * in the parameter space. The atom interface supports 16 byte |
184 | * payloads for reads. The hw itself supports up to 16 bytes of payload. |
185 | */ |
186 | if (WARN_ON_ONCE(msg->size > 12)) |
187 | return -E2BIG; |
188 | /* tx_size needs to be 4 even for bare address packets since the atom |
189 | * table needs the info in tx_buf[3]. |
190 | */ |
191 | tx_size = HEADER_SIZE + msg->size; |
192 | if (msg->size == 0) |
193 | tx_buf[3] |= BARE_ADDRESS_SIZE << 4; |
194 | else |
195 | tx_buf[3] |= tx_size << 4; |
196 | memcpy(tx_buf + HEADER_SIZE, msg->buffer, msg->size); |
197 | ret = radeon_process_aux_ch(chan, |
198 | send: tx_buf, send_bytes: tx_size, NULL, recv_size: 0, delay, ack: &ack); |
199 | if (ret >= 0) |
200 | /* Return payload size. */ |
201 | ret = msg->size; |
202 | break; |
203 | case DP_AUX_NATIVE_READ: |
204 | case DP_AUX_I2C_READ: |
205 | /* tx_size needs to be 4 even for bare address packets since the atom |
206 | * table needs the info in tx_buf[3]. |
207 | */ |
208 | tx_size = HEADER_SIZE; |
209 | if (msg->size == 0) |
210 | tx_buf[3] |= BARE_ADDRESS_SIZE << 4; |
211 | else |
212 | tx_buf[3] |= tx_size << 4; |
213 | ret = radeon_process_aux_ch(chan, |
214 | send: tx_buf, send_bytes: tx_size, recv: msg->buffer, recv_size: msg->size, delay, ack: &ack); |
215 | break; |
216 | default: |
217 | ret = -EINVAL; |
218 | break; |
219 | } |
220 | |
221 | if (ret >= 0) |
222 | msg->reply = ack >> 4; |
223 | |
224 | return ret; |
225 | } |
226 | |
227 | void radeon_dp_aux_init(struct radeon_connector *radeon_connector) |
228 | { |
229 | struct drm_device *dev = radeon_connector->base.dev; |
230 | struct radeon_device *rdev = dev->dev_private; |
231 | |
232 | radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd; |
233 | radeon_connector->ddc_bus->aux.drm_dev = radeon_connector->base.dev; |
234 | if (ASIC_IS_DCE5(rdev)) { |
235 | if (radeon_auxch) |
236 | radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_native; |
237 | else |
238 | radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom; |
239 | } else { |
240 | radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom; |
241 | } |
242 | |
243 | drm_dp_aux_init(aux: &radeon_connector->ddc_bus->aux); |
244 | radeon_connector->ddc_bus->has_aux = true; |
245 | } |
246 | |
247 | /***** general DP utility functions *****/ |
248 | |
249 | #define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_LEVEL_3 |
250 | #define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPH_LEVEL_3 |
251 | |
252 | static void dp_get_adjust_train(const u8 link_status[DP_LINK_STATUS_SIZE], |
253 | int lane_count, |
254 | u8 train_set[4]) |
255 | { |
256 | u8 v = 0; |
257 | u8 p = 0; |
258 | int lane; |
259 | |
260 | for (lane = 0; lane < lane_count; lane++) { |
261 | u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane); |
262 | u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); |
263 | |
264 | DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n" , |
265 | lane, |
266 | voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT], |
267 | pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]); |
268 | |
269 | if (this_v > v) |
270 | v = this_v; |
271 | if (this_p > p) |
272 | p = this_p; |
273 | } |
274 | |
275 | if (v >= DP_VOLTAGE_MAX) |
276 | v |= DP_TRAIN_MAX_SWING_REACHED; |
277 | |
278 | if (p >= DP_PRE_EMPHASIS_MAX) |
279 | p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; |
280 | |
281 | DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n" , |
282 | voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT], |
283 | pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]); |
284 | |
285 | for (lane = 0; lane < 4; lane++) |
286 | train_set[lane] = v | p; |
287 | } |
288 | |
289 | /* convert bits per color to bits per pixel */ |
290 | /* get bpc from the EDID */ |
291 | static int convert_bpc_to_bpp(int bpc) |
292 | { |
293 | if (bpc == 0) |
294 | return 24; |
295 | else |
296 | return bpc * 3; |
297 | } |
298 | |
299 | /***** radeon specific DP functions *****/ |
300 | |
301 | static int radeon_dp_get_dp_link_config(struct drm_connector *connector, |
302 | const u8 dpcd[DP_DPCD_SIZE], |
303 | unsigned pix_clock, |
304 | unsigned *dp_lanes, unsigned *dp_rate) |
305 | { |
306 | int bpp = convert_bpc_to_bpp(bpc: radeon_get_monitor_bpc(connector)); |
307 | static const unsigned link_rates[3] = { 162000, 270000, 540000 }; |
308 | unsigned max_link_rate = drm_dp_max_link_rate(dpcd); |
309 | unsigned max_lane_num = drm_dp_max_lane_count(dpcd); |
310 | unsigned lane_num, i, max_pix_clock; |
311 | |
312 | if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == |
313 | ENCODER_OBJECT_ID_NUTMEG) { |
314 | for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { |
315 | max_pix_clock = (lane_num * 270000 * 8) / bpp; |
316 | if (max_pix_clock >= pix_clock) { |
317 | *dp_lanes = lane_num; |
318 | *dp_rate = 270000; |
319 | return 0; |
320 | } |
321 | } |
322 | } else { |
323 | for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { |
324 | for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { |
325 | max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; |
326 | if (max_pix_clock >= pix_clock) { |
327 | *dp_lanes = lane_num; |
328 | *dp_rate = link_rates[i]; |
329 | return 0; |
330 | } |
331 | } |
332 | } |
333 | } |
334 | |
335 | return -EINVAL; |
336 | } |
337 | |
338 | static u8 radeon_dp_encoder_service(struct radeon_device *rdev, |
339 | int action, int dp_clock, |
340 | u8 ucconfig, u8 lane_num) |
341 | { |
342 | DP_ENCODER_SERVICE_PARAMETERS args; |
343 | int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService); |
344 | |
345 | memset(&args, 0, sizeof(args)); |
346 | args.ucLinkClock = dp_clock / 10; |
347 | args.ucConfig = ucconfig; |
348 | args.ucAction = action; |
349 | args.ucLaneNum = lane_num; |
350 | args.ucStatus = 0; |
351 | |
352 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); |
353 | return args.ucStatus; |
354 | } |
355 | |
356 | u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector) |
357 | { |
358 | struct drm_device *dev = radeon_connector->base.dev; |
359 | struct radeon_device *rdev = dev->dev_private; |
360 | |
361 | return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, dp_clock: 0, |
362 | ucconfig: radeon_connector->ddc_bus->rec.i2c_id, lane_num: 0); |
363 | } |
364 | |
365 | static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector) |
366 | { |
367 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; |
368 | u8 buf[3]; |
369 | |
370 | if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) |
371 | return; |
372 | |
373 | if (drm_dp_dpcd_read(aux: &radeon_connector->ddc_bus->aux, DP_SINK_OUI, buffer: buf, size: 3) == 3) |
374 | DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n" , |
375 | buf[0], buf[1], buf[2]); |
376 | |
377 | if (drm_dp_dpcd_read(aux: &radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buffer: buf, size: 3) == 3) |
378 | DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n" , |
379 | buf[0], buf[1], buf[2]); |
380 | } |
381 | |
382 | bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) |
383 | { |
384 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; |
385 | u8 msg[DP_DPCD_SIZE]; |
386 | int ret; |
387 | |
388 | ret = drm_dp_dpcd_read(aux: &radeon_connector->ddc_bus->aux, DP_DPCD_REV, buffer: msg, |
389 | DP_DPCD_SIZE); |
390 | if (ret == DP_DPCD_SIZE) { |
391 | memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); |
392 | |
393 | DRM_DEBUG_KMS("DPCD: %*ph\n" , (int)sizeof(dig_connector->dpcd), |
394 | dig_connector->dpcd); |
395 | |
396 | radeon_dp_probe_oui(radeon_connector); |
397 | |
398 | return true; |
399 | } |
400 | |
401 | dig_connector->dpcd[0] = 0; |
402 | return false; |
403 | } |
404 | |
405 | int radeon_dp_get_panel_mode(struct drm_encoder *encoder, |
406 | struct drm_connector *connector) |
407 | { |
408 | struct drm_device *dev = encoder->dev; |
409 | struct radeon_device *rdev = dev->dev_private; |
410 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
411 | int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; |
412 | u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector); |
413 | u8 tmp; |
414 | |
415 | if (!ASIC_IS_DCE4(rdev)) |
416 | return panel_mode; |
417 | |
418 | if (!radeon_connector->con_priv) |
419 | return panel_mode; |
420 | |
421 | if (dp_bridge != ENCODER_OBJECT_ID_NONE) { |
422 | /* DP bridge chips */ |
423 | if (drm_dp_dpcd_readb(aux: &radeon_connector->ddc_bus->aux, |
424 | DP_EDP_CONFIGURATION_CAP, valuep: &tmp) == 1) { |
425 | if (tmp & 1) |
426 | panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; |
427 | else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || |
428 | (dp_bridge == ENCODER_OBJECT_ID_TRAVIS)) |
429 | panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; |
430 | else |
431 | panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; |
432 | } |
433 | } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { |
434 | /* eDP */ |
435 | if (drm_dp_dpcd_readb(aux: &radeon_connector->ddc_bus->aux, |
436 | DP_EDP_CONFIGURATION_CAP, valuep: &tmp) == 1) { |
437 | if (tmp & 1) |
438 | panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; |
439 | } |
440 | } |
441 | |
442 | return panel_mode; |
443 | } |
444 | |
445 | void radeon_dp_set_link_config(struct drm_connector *connector, |
446 | const struct drm_display_mode *mode) |
447 | { |
448 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
449 | struct radeon_connector_atom_dig *dig_connector; |
450 | int ret; |
451 | |
452 | if (!radeon_connector->con_priv) |
453 | return; |
454 | dig_connector = radeon_connector->con_priv; |
455 | |
456 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || |
457 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { |
458 | ret = radeon_dp_get_dp_link_config(connector, dpcd: dig_connector->dpcd, |
459 | pix_clock: mode->clock, |
460 | dp_lanes: &dig_connector->dp_lane_count, |
461 | dp_rate: &dig_connector->dp_clock); |
462 | if (ret) { |
463 | dig_connector->dp_clock = 0; |
464 | dig_connector->dp_lane_count = 0; |
465 | } |
466 | } |
467 | } |
468 | |
469 | int radeon_dp_mode_valid_helper(struct drm_connector *connector, |
470 | const struct drm_display_mode *mode) |
471 | { |
472 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
473 | struct radeon_connector_atom_dig *dig_connector; |
474 | unsigned dp_clock, dp_lanes; |
475 | int ret; |
476 | |
477 | if ((mode->clock > 340000) && |
478 | (!radeon_connector_is_dp12_capable(connector))) |
479 | return MODE_CLOCK_HIGH; |
480 | |
481 | if (!radeon_connector->con_priv) |
482 | return MODE_CLOCK_HIGH; |
483 | dig_connector = radeon_connector->con_priv; |
484 | |
485 | ret = radeon_dp_get_dp_link_config(connector, dpcd: dig_connector->dpcd, |
486 | pix_clock: mode->clock, |
487 | dp_lanes: &dp_lanes, |
488 | dp_rate: &dp_clock); |
489 | if (ret) |
490 | return MODE_CLOCK_HIGH; |
491 | |
492 | if ((dp_clock == 540000) && |
493 | (!radeon_connector_is_dp12_capable(connector))) |
494 | return MODE_CLOCK_HIGH; |
495 | |
496 | return MODE_OK; |
497 | } |
498 | |
499 | bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector) |
500 | { |
501 | u8 link_status[DP_LINK_STATUS_SIZE]; |
502 | struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; |
503 | |
504 | if (drm_dp_dpcd_read_link_status(aux: &radeon_connector->ddc_bus->aux, |
505 | status: link_status) < 0) |
506 | return false; |
507 | if (drm_dp_channel_eq_ok(link_status, lane_count: dig->dp_lane_count)) |
508 | return false; |
509 | return true; |
510 | } |
511 | |
512 | void radeon_dp_set_rx_power_state(struct drm_connector *connector, |
513 | u8 power_state) |
514 | { |
515 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
516 | struct radeon_connector_atom_dig *dig_connector; |
517 | |
518 | if (!radeon_connector->con_priv) |
519 | return; |
520 | |
521 | dig_connector = radeon_connector->con_priv; |
522 | |
523 | /* power up/down the sink */ |
524 | if (dig_connector->dpcd[0] >= 0x11) { |
525 | drm_dp_dpcd_writeb(aux: &radeon_connector->ddc_bus->aux, |
526 | DP_SET_POWER, value: power_state); |
527 | usleep_range(min: 1000, max: 2000); |
528 | } |
529 | } |
530 | |
531 | |
532 | struct radeon_dp_link_train_info { |
533 | struct radeon_device *rdev; |
534 | struct drm_encoder *encoder; |
535 | struct drm_connector *connector; |
536 | int enc_id; |
537 | int dp_clock; |
538 | int dp_lane_count; |
539 | bool tp3_supported; |
540 | u8 dpcd[DP_RECEIVER_CAP_SIZE]; |
541 | u8 train_set[4]; |
542 | u8 link_status[DP_LINK_STATUS_SIZE]; |
543 | u8 tries; |
544 | bool use_dpencoder; |
545 | struct drm_dp_aux *aux; |
546 | }; |
547 | |
548 | static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info) |
549 | { |
550 | /* set the initial vs/emph on the source */ |
551 | atombios_dig_transmitter_setup(encoder: dp_info->encoder, |
552 | ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH, |
553 | lane_num: 0, lane_set: dp_info->train_set[0]); /* sets all lanes at once */ |
554 | |
555 | /* set the vs/emph on the sink */ |
556 | drm_dp_dpcd_write(aux: dp_info->aux, DP_TRAINING_LANE0_SET, |
557 | buffer: dp_info->train_set, size: dp_info->dp_lane_count); |
558 | } |
559 | |
560 | static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp) |
561 | { |
562 | int rtp = 0; |
563 | |
564 | /* set training pattern on the source */ |
565 | if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) { |
566 | switch (tp) { |
567 | case DP_TRAINING_PATTERN_1: |
568 | rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1; |
569 | break; |
570 | case DP_TRAINING_PATTERN_2: |
571 | rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2; |
572 | break; |
573 | case DP_TRAINING_PATTERN_3: |
574 | rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3; |
575 | break; |
576 | } |
577 | atombios_dig_encoder_setup(encoder: dp_info->encoder, action: rtp, panel_mode: 0); |
578 | } else { |
579 | switch (tp) { |
580 | case DP_TRAINING_PATTERN_1: |
581 | rtp = 0; |
582 | break; |
583 | case DP_TRAINING_PATTERN_2: |
584 | rtp = 1; |
585 | break; |
586 | } |
587 | radeon_dp_encoder_service(rdev: dp_info->rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, |
588 | dp_clock: dp_info->dp_clock, ucconfig: dp_info->enc_id, lane_num: rtp); |
589 | } |
590 | |
591 | /* enable training pattern on the sink */ |
592 | drm_dp_dpcd_writeb(aux: dp_info->aux, DP_TRAINING_PATTERN_SET, value: tp); |
593 | } |
594 | |
595 | static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info) |
596 | { |
597 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(dp_info->encoder); |
598 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
599 | u8 tmp; |
600 | |
601 | /* power up the sink */ |
602 | radeon_dp_set_rx_power_state(connector: dp_info->connector, DP_SET_POWER_D0); |
603 | |
604 | /* possibly enable downspread on the sink */ |
605 | if (dp_info->dpcd[3] & 0x1) |
606 | drm_dp_dpcd_writeb(aux: dp_info->aux, |
607 | DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5); |
608 | else |
609 | drm_dp_dpcd_writeb(aux: dp_info->aux, |
610 | DP_DOWNSPREAD_CTRL, value: 0); |
611 | |
612 | if (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE) |
613 | drm_dp_dpcd_writeb(aux: dp_info->aux, DP_EDP_CONFIGURATION_SET, value: 1); |
614 | |
615 | /* set the lane count on the sink */ |
616 | tmp = dp_info->dp_lane_count; |
617 | if (drm_dp_enhanced_frame_cap(dpcd: dp_info->dpcd)) |
618 | tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN; |
619 | drm_dp_dpcd_writeb(aux: dp_info->aux, DP_LANE_COUNT_SET, value: tmp); |
620 | |
621 | /* set the link rate on the sink */ |
622 | tmp = drm_dp_link_rate_to_bw_code(link_rate: dp_info->dp_clock); |
623 | drm_dp_dpcd_writeb(aux: dp_info->aux, DP_LINK_BW_SET, value: tmp); |
624 | |
625 | /* start training on the source */ |
626 | if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) |
627 | atombios_dig_encoder_setup(encoder: dp_info->encoder, |
628 | ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, panel_mode: 0); |
629 | else |
630 | radeon_dp_encoder_service(rdev: dp_info->rdev, ATOM_DP_ACTION_TRAINING_START, |
631 | dp_clock: dp_info->dp_clock, ucconfig: dp_info->enc_id, lane_num: 0); |
632 | |
633 | /* disable the training pattern on the sink */ |
634 | drm_dp_dpcd_writeb(aux: dp_info->aux, |
635 | DP_TRAINING_PATTERN_SET, |
636 | DP_TRAINING_PATTERN_DISABLE); |
637 | |
638 | return 0; |
639 | } |
640 | |
641 | static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info) |
642 | { |
643 | udelay(usec: 400); |
644 | |
645 | /* disable the training pattern on the sink */ |
646 | drm_dp_dpcd_writeb(aux: dp_info->aux, |
647 | DP_TRAINING_PATTERN_SET, |
648 | DP_TRAINING_PATTERN_DISABLE); |
649 | |
650 | /* disable the training pattern on the source */ |
651 | if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) |
652 | atombios_dig_encoder_setup(encoder: dp_info->encoder, |
653 | ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, panel_mode: 0); |
654 | else |
655 | radeon_dp_encoder_service(rdev: dp_info->rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, |
656 | dp_clock: dp_info->dp_clock, ucconfig: dp_info->enc_id, lane_num: 0); |
657 | |
658 | return 0; |
659 | } |
660 | |
661 | static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info) |
662 | { |
663 | bool clock_recovery; |
664 | u8 voltage; |
665 | int i; |
666 | |
667 | radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_1); |
668 | memset(dp_info->train_set, 0, 4); |
669 | radeon_dp_update_vs_emph(dp_info); |
670 | |
671 | udelay(usec: 400); |
672 | |
673 | /* clock recovery loop */ |
674 | clock_recovery = false; |
675 | dp_info->tries = 0; |
676 | voltage = 0xff; |
677 | while (1) { |
678 | drm_dp_link_train_clock_recovery_delay(aux: dp_info->aux, dpcd: dp_info->dpcd); |
679 | |
680 | if (drm_dp_dpcd_read_link_status(aux: dp_info->aux, |
681 | status: dp_info->link_status) < 0) { |
682 | DRM_ERROR("displayport link status failed\n" ); |
683 | break; |
684 | } |
685 | |
686 | if (drm_dp_clock_recovery_ok(link_status: dp_info->link_status, lane_count: dp_info->dp_lane_count)) { |
687 | clock_recovery = true; |
688 | break; |
689 | } |
690 | |
691 | for (i = 0; i < dp_info->dp_lane_count; i++) { |
692 | if ((dp_info->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) |
693 | break; |
694 | } |
695 | if (i == dp_info->dp_lane_count) { |
696 | DRM_ERROR("clock recovery reached max voltage\n" ); |
697 | break; |
698 | } |
699 | |
700 | if ((dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { |
701 | ++dp_info->tries; |
702 | if (dp_info->tries == 5) { |
703 | DRM_ERROR("clock recovery tried 5 times\n" ); |
704 | break; |
705 | } |
706 | } else |
707 | dp_info->tries = 0; |
708 | |
709 | voltage = dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; |
710 | |
711 | /* Compute new train_set as requested by sink */ |
712 | dp_get_adjust_train(link_status: dp_info->link_status, lane_count: dp_info->dp_lane_count, train_set: dp_info->train_set); |
713 | |
714 | radeon_dp_update_vs_emph(dp_info); |
715 | } |
716 | if (!clock_recovery) { |
717 | DRM_ERROR("clock recovery failed\n" ); |
718 | return -1; |
719 | } else { |
720 | DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n" , |
721 | dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, |
722 | (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >> |
723 | DP_TRAIN_PRE_EMPHASIS_SHIFT); |
724 | return 0; |
725 | } |
726 | } |
727 | |
728 | static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info) |
729 | { |
730 | bool channel_eq; |
731 | |
732 | if (dp_info->tp3_supported) |
733 | radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_3); |
734 | else |
735 | radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_2); |
736 | |
737 | /* channel equalization loop */ |
738 | dp_info->tries = 0; |
739 | channel_eq = false; |
740 | while (1) { |
741 | drm_dp_link_train_channel_eq_delay(aux: dp_info->aux, dpcd: dp_info->dpcd); |
742 | |
743 | if (drm_dp_dpcd_read_link_status(aux: dp_info->aux, |
744 | status: dp_info->link_status) < 0) { |
745 | DRM_ERROR("displayport link status failed\n" ); |
746 | break; |
747 | } |
748 | |
749 | if (drm_dp_channel_eq_ok(link_status: dp_info->link_status, lane_count: dp_info->dp_lane_count)) { |
750 | channel_eq = true; |
751 | break; |
752 | } |
753 | |
754 | /* Try 5 times */ |
755 | if (dp_info->tries > 5) { |
756 | DRM_ERROR("channel eq failed: 5 tries\n" ); |
757 | break; |
758 | } |
759 | |
760 | /* Compute new train_set as requested by sink */ |
761 | dp_get_adjust_train(link_status: dp_info->link_status, lane_count: dp_info->dp_lane_count, train_set: dp_info->train_set); |
762 | |
763 | radeon_dp_update_vs_emph(dp_info); |
764 | dp_info->tries++; |
765 | } |
766 | |
767 | if (!channel_eq) { |
768 | DRM_ERROR("channel eq failed\n" ); |
769 | return -1; |
770 | } else { |
771 | DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n" , |
772 | dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, |
773 | (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) |
774 | >> DP_TRAIN_PRE_EMPHASIS_SHIFT); |
775 | return 0; |
776 | } |
777 | } |
778 | |
779 | void radeon_dp_link_train(struct drm_encoder *encoder, |
780 | struct drm_connector *connector) |
781 | { |
782 | struct drm_device *dev = encoder->dev; |
783 | struct radeon_device *rdev = dev->dev_private; |
784 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
785 | struct radeon_encoder_atom_dig *dig; |
786 | struct radeon_connector *radeon_connector; |
787 | struct radeon_connector_atom_dig *dig_connector; |
788 | struct radeon_dp_link_train_info dp_info; |
789 | int index; |
790 | u8 tmp, frev, crev; |
791 | |
792 | if (!radeon_encoder->enc_priv) |
793 | return; |
794 | dig = radeon_encoder->enc_priv; |
795 | |
796 | radeon_connector = to_radeon_connector(connector); |
797 | if (!radeon_connector->con_priv) |
798 | return; |
799 | dig_connector = radeon_connector->con_priv; |
800 | |
801 | if ((dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) && |
802 | (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP)) |
803 | return; |
804 | |
805 | /* DPEncoderService newer than 1.1 can't program properly the |
806 | * training pattern. When facing such version use the |
807 | * DIGXEncoderControl (X== 1 | 2) |
808 | */ |
809 | dp_info.use_dpencoder = true; |
810 | index = GetIndexIntoMasterTable(COMMAND, DPEncoderService); |
811 | if (atom_parse_cmd_header(ctx: rdev->mode_info.atom_context, index, frev: &frev, crev: &crev)) { |
812 | if (crev > 1) |
813 | dp_info.use_dpencoder = false; |
814 | } |
815 | |
816 | dp_info.enc_id = 0; |
817 | if (dig->dig_encoder) |
818 | dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; |
819 | else |
820 | dp_info.enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; |
821 | if (dig->linkb) |
822 | dp_info.enc_id |= ATOM_DP_CONFIG_LINK_B; |
823 | else |
824 | dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; |
825 | |
826 | if (drm_dp_dpcd_readb(aux: &radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, valuep: &tmp) |
827 | == 1) { |
828 | if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) |
829 | dp_info.tp3_supported = true; |
830 | else |
831 | dp_info.tp3_supported = false; |
832 | } else { |
833 | dp_info.tp3_supported = false; |
834 | } |
835 | |
836 | memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE); |
837 | dp_info.rdev = rdev; |
838 | dp_info.encoder = encoder; |
839 | dp_info.connector = connector; |
840 | dp_info.dp_lane_count = dig_connector->dp_lane_count; |
841 | dp_info.dp_clock = dig_connector->dp_clock; |
842 | dp_info.aux = &radeon_connector->ddc_bus->aux; |
843 | |
844 | if (radeon_dp_link_train_init(dp_info: &dp_info)) |
845 | goto done; |
846 | if (radeon_dp_link_train_cr(dp_info: &dp_info)) |
847 | goto done; |
848 | if (radeon_dp_link_train_ce(dp_info: &dp_info)) |
849 | goto done; |
850 | done: |
851 | if (radeon_dp_link_train_finish(dp_info: &dp_info)) |
852 | return; |
853 | } |
854 | |