1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/display/drm_dp_helper.h>
26
27#include "nouveau_drv.h"
28#include "nouveau_connector.h"
29#include "nouveau_encoder.h"
30#include "nouveau_crtc.h"
31
32#include <nvif/if0011.h>
33
34MODULE_PARM_DESC(mst, "Enable DisplayPort multi-stream (default: enabled)");
35static int nouveau_mst = 1;
36module_param_named(mst, nouveau_mst, int, 0400);
37
38static bool
39nouveau_dp_has_sink_count(struct drm_connector *connector,
40 struct nouveau_encoder *outp)
41{
42 return drm_dp_read_sink_count_cap(connector, dpcd: outp->dp.dpcd, desc: &outp->dp.desc);
43}
44
45static bool
46nouveau_dp_probe_lttpr(struct nouveau_encoder *outp)
47{
48 u8 rev, size = sizeof(rev);
49 int ret;
50
51 ret = nvif_outp_dp_aux_xfer(&outp->outp, DP_AUX_NATIVE_READ, &size,
52 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
53 &rev);
54 if (ret || size < sizeof(rev) || rev < 0x14)
55 return false;
56
57 return true;
58}
59
60static enum drm_connector_status
61nouveau_dp_probe_dpcd(struct nouveau_connector *nv_connector,
62 struct nouveau_encoder *outp)
63{
64 struct drm_connector *connector = &nv_connector->base;
65 struct drm_dp_aux *aux = &nv_connector->aux;
66 struct nv50_mstm *mstm = NULL;
67 enum drm_connector_status status = connector_status_disconnected;
68 int ret;
69 u8 *dpcd = outp->dp.dpcd;
70
71 outp->dp.lttpr.nr = 0;
72 outp->dp.rate_nr = 0;
73 outp->dp.link_nr = 0;
74 outp->dp.link_bw = 0;
75
76 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
77 nouveau_dp_probe_lttpr(outp) &&
78 !drm_dp_read_dpcd_caps(aux, dpcd) &&
79 !drm_dp_read_lttpr_common_caps(aux, dpcd, caps: outp->dp.lttpr.caps)) {
80 int nr = drm_dp_lttpr_count(cap: outp->dp.lttpr.caps);
81
82 if (nr) {
83 drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE,
84 DP_PHY_REPEATER_MODE_TRANSPARENT);
85
86 if (nr > 0) {
87 ret = drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE,
88 DP_PHY_REPEATER_MODE_NON_TRANSPARENT);
89 if (ret != 1) {
90 drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE,
91 DP_PHY_REPEATER_MODE_TRANSPARENT);
92 } else {
93 outp->dp.lttpr.nr = nr;
94 }
95 }
96 }
97 }
98
99 ret = drm_dp_read_dpcd_caps(aux, dpcd);
100 if (ret < 0)
101 goto out;
102
103 outp->dp.link_nr = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
104 if (outp->dcb->dpconf.link_nr < outp->dp.link_nr)
105 outp->dp.link_nr = outp->dcb->dpconf.link_nr;
106
107 if (outp->dp.lttpr.nr) {
108 int links = drm_dp_lttpr_max_lane_count(caps: outp->dp.lttpr.caps);
109
110 if (links && links < outp->dp.link_nr)
111 outp->dp.link_nr = links;
112 }
113
114 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && dpcd[DP_DPCD_REV] >= 0x13) {
115 __le16 rates[DP_MAX_SUPPORTED_RATES];
116
117 ret = drm_dp_dpcd_read(aux, DP_SUPPORTED_LINK_RATES, buffer: rates, size: sizeof(rates));
118 if (ret == sizeof(rates)) {
119 for (int i = 0; i < ARRAY_SIZE(rates); i++) {
120 u32 rate = (le16_to_cpu(rates[i]) * 200) / 10;
121 int j;
122
123 if (!rate)
124 break;
125
126 for (j = 0; j < outp->dp.rate_nr; j++) {
127 if (rate > outp->dp.rate[j].rate) {
128 for (int k = outp->dp.rate_nr; k > j; k--)
129 outp->dp.rate[k] = outp->dp.rate[k - 1];
130 break;
131 }
132 }
133
134 outp->dp.rate[j].dpcd = i;
135 outp->dp.rate[j].rate = rate;
136 outp->dp.rate_nr++;
137 }
138 }
139 }
140
141 if (!outp->dp.rate_nr) {
142 const u32 rates[] = { 810000, 540000, 270000, 162000 };
143 u32 max_rate = dpcd[DP_MAX_LINK_RATE] * 27000;
144
145 if (outp->dp.lttpr.nr) {
146 int rate = drm_dp_lttpr_max_link_rate(caps: outp->dp.lttpr.caps);
147
148 if (rate && rate < max_rate)
149 max_rate = rate;
150 }
151
152 max_rate = min_t(int, max_rate, outp->dcb->dpconf.link_bw);
153
154 for (int i = 0; i < ARRAY_SIZE(rates); i++) {
155 if (rates[i] <= max_rate) {
156 outp->dp.rate[outp->dp.rate_nr].dpcd = -1;
157 outp->dp.rate[outp->dp.rate_nr].rate = rates[i];
158 outp->dp.rate_nr++;
159 }
160 }
161
162 if (WARN_ON(!outp->dp.rate_nr))
163 goto out;
164 }
165
166 ret = nvif_outp_dp_rates(&outp->outp, outp->dp.rate, outp->dp.rate_nr);
167 if (ret)
168 goto out;
169
170 for (int i = 0; i < outp->dp.rate_nr; i++) {
171 u32 link_bw = outp->dp.rate[i].rate;
172
173 if (link_bw > outp->dp.link_bw)
174 outp->dp.link_bw = link_bw;
175 }
176
177 ret = drm_dp_read_desc(aux, desc: &outp->dp.desc, is_branch: drm_dp_is_branch(dpcd));
178 if (ret < 0)
179 goto out;
180
181 if (nouveau_mst) {
182 mstm = outp->dp.mstm;
183 if (mstm)
184 mstm->can_mst = drm_dp_read_mst_cap(aux, dpcd);
185 }
186
187 if (nouveau_dp_has_sink_count(connector, outp)) {
188 ret = drm_dp_read_sink_count(aux);
189 if (ret < 0)
190 goto out;
191
192 outp->dp.sink_count = ret;
193
194 /*
195 * Dongle connected, but no display. Don't bother reading
196 * downstream port info
197 */
198 if (!outp->dp.sink_count)
199 return connector_status_disconnected;
200 }
201
202 ret = drm_dp_read_downstream_info(aux, dpcd,
203 downstream_ports: outp->dp.downstream_ports);
204 if (ret < 0)
205 goto out;
206
207 status = connector_status_connected;
208out:
209 if (status != connector_status_connected) {
210 /* Clear any cached info */
211 outp->dp.sink_count = 0;
212 }
213 return status;
214}
215
216int
217nouveau_dp_detect(struct nouveau_connector *nv_connector,
218 struct nouveau_encoder *nv_encoder)
219{
220 struct drm_device *dev = nv_encoder->base.base.dev;
221 struct nouveau_drm *drm = nouveau_drm(dev);
222 struct drm_connector *connector = &nv_connector->base;
223 struct nv50_mstm *mstm = nv_encoder->dp.mstm;
224 enum drm_connector_status status;
225 u8 *dpcd = nv_encoder->dp.dpcd;
226 int ret = NOUVEAU_DP_NONE, hpd;
227
228 /* If we've already read the DPCD on an eDP device, we don't need to
229 * reread it as it won't change
230 */
231 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
232 dpcd[DP_DPCD_REV] != 0)
233 return NOUVEAU_DP_SST;
234
235 mutex_lock(&nv_encoder->dp.hpd_irq_lock);
236 if (mstm) {
237 /* If we're not ready to handle MST state changes yet, just
238 * report the last status of the connector. We'll reprobe it
239 * once we've resumed.
240 */
241 if (mstm->suspended) {
242 if (mstm->is_mst)
243 ret = NOUVEAU_DP_MST;
244 else if (connector->status ==
245 connector_status_connected)
246 ret = NOUVEAU_DP_SST;
247
248 goto out;
249 }
250 }
251
252 hpd = nvif_outp_detect(&nv_encoder->outp);
253 if (hpd == NOT_PRESENT) {
254 nvif_outp_dp_aux_pwr(&nv_encoder->outp, false);
255 goto out;
256 }
257 nvif_outp_dp_aux_pwr(&nv_encoder->outp, true);
258
259 status = nouveau_dp_probe_dpcd(nv_connector, outp: nv_encoder);
260 if (status == connector_status_disconnected) {
261 nvif_outp_dp_aux_pwr(&nv_encoder->outp, false);
262 goto out;
263 }
264
265 /* If we're in MST mode, we're done here */
266 if (mstm && mstm->can_mst && mstm->is_mst) {
267 ret = NOUVEAU_DP_MST;
268 goto out;
269 }
270
271 NV_DEBUG(drm, "sink dpcd version: 0x%02x\n", dpcd[DP_DPCD_REV]);
272 for (int i = 0; i < nv_encoder->dp.rate_nr; i++)
273 NV_DEBUG(drm, "sink rate %d: %d\n", i, nv_encoder->dp.rate[i].rate);
274
275 NV_DEBUG(drm, "encoder: %dx%d\n", nv_encoder->dcb->dpconf.link_nr,
276 nv_encoder->dcb->dpconf.link_bw);
277 NV_DEBUG(drm, "maximum: %dx%d\n", nv_encoder->dp.link_nr,
278 nv_encoder->dp.link_bw);
279
280 if (mstm && mstm->can_mst) {
281 ret = nv50_mstm_detect(encoder: nv_encoder);
282 if (ret == 1) {
283 ret = NOUVEAU_DP_MST;
284 goto out;
285 } else if (ret != 0) {
286 nvif_outp_dp_aux_pwr(&nv_encoder->outp, false);
287 goto out;
288 }
289 }
290 ret = NOUVEAU_DP_SST;
291
292out:
293 if (mstm && !mstm->suspended && ret != NOUVEAU_DP_MST)
294 nv50_mstm_remove(mstm);
295
296 mutex_unlock(lock: &nv_encoder->dp.hpd_irq_lock);
297 return ret;
298}
299
300void
301nouveau_dp_power_down(struct nouveau_encoder *outp)
302{
303 struct drm_dp_aux *aux = &outp->conn->aux;
304 int ret;
305 u8 pwr;
306
307 mutex_lock(&outp->dp.hpd_irq_lock);
308
309 ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, valuep: &pwr);
310 if (ret == 1) {
311 pwr &= ~DP_SET_POWER_MASK;
312 pwr |= DP_SET_POWER_D3;
313 drm_dp_dpcd_writeb(aux, DP_SET_POWER, value: pwr);
314 }
315
316 outp->dp.lt.nr = 0;
317 mutex_unlock(lock: &outp->dp.hpd_irq_lock);
318}
319
320static bool
321nouveau_dp_train_link(struct nouveau_encoder *outp, bool retrain)
322{
323 struct drm_dp_aux *aux = &outp->conn->aux;
324 bool post_lt = false;
325 int ret, retries = 0;
326
327 if ( (outp->dp.dpcd[DP_MAX_LANE_COUNT] & 0x20) &&
328 !(outp->dp.dpcd[DP_MAX_DOWNSPREAD] & DP_TPS4_SUPPORTED))
329 post_lt = true;
330
331retry:
332 ret = nvif_outp_dp_train(&outp->outp, outp->dp.dpcd,
333 outp->dp.lttpr.nr,
334 outp->dp.lt.nr,
335 outp->dp.lt.bw,
336 outp->dp.lt.mst,
337 post_lt,
338 retrain);
339 if (ret)
340 return false;
341
342 if (post_lt) {
343 u8 stat[DP_LINK_STATUS_SIZE];
344 u8 prev[2];
345 u8 time = 0, adjusts = 0, tmp;
346
347 ret = drm_dp_dpcd_read_phy_link_status(aux, dp_phy: DP_PHY_DPRX, link_status: stat);
348 if (ret)
349 return false;
350
351 for (;;) {
352 if (!drm_dp_channel_eq_ok(link_status: stat, lane_count: outp->dp.lt.nr)) {
353 ret = 1;
354 break;
355 }
356
357 if (!(stat[2] & 0x02))
358 break;
359
360 msleep(msecs: 5);
361 time += 5;
362
363 memcpy(prev, &stat[4], sizeof(prev));
364 ret = drm_dp_dpcd_read_phy_link_status(aux, dp_phy: DP_PHY_DPRX, link_status: stat);
365 if (ret)
366 break;
367
368 if (!memcmp(p: prev, q: &stat[4], size: sizeof(prev))) {
369 if (time > 200)
370 break;
371 } else {
372 u8 pe[4], vs[4];
373
374 if (adjusts++ == 6)
375 break;
376
377 for (int i = 0; i < outp->dp.lt.nr; i++) {
378 pe[i] = drm_dp_get_adjust_request_pre_emphasis(link_status: stat, lane: i) >>
379 DP_TRAIN_PRE_EMPHASIS_SHIFT;
380 vs[i] = drm_dp_get_adjust_request_voltage(link_status: stat, lane: i) >>
381 DP_TRAIN_VOLTAGE_SWING_SHIFT;
382 }
383
384 ret = nvif_outp_dp_drive(&outp->outp, outp->dp.lt.nr, pe, vs);
385 if (ret)
386 break;
387
388 time = 0;
389 }
390 }
391
392 if (drm_dp_dpcd_readb(aux, DP_LANE_COUNT_SET, valuep: &tmp) == 1) {
393 tmp &= ~0x20;
394 drm_dp_dpcd_writeb(aux, DP_LANE_COUNT_SET, value: tmp);
395 }
396 }
397
398 if (ret == 1 && retries++ < 3)
399 goto retry;
400
401 return ret == 0;
402}
403
404bool
405nouveau_dp_train(struct nouveau_encoder *outp, bool mst, u32 khz, u8 bpc)
406{
407 struct nouveau_drm *drm = nouveau_drm(dev: outp->base.base.dev);
408 struct drm_dp_aux *aux = &outp->conn->aux;
409 u32 min_rate;
410 u8 pwr;
411 bool ret = true;
412
413 if (mst)
414 min_rate = outp->dp.link_nr * outp->dp.rate[0].rate;
415 else
416 min_rate = DIV_ROUND_UP(khz * bpc * 3, 8);
417
418 NV_DEBUG(drm, "%s link training (mst:%d min_rate:%d)\n",
419 outp->base.base.name, mst, min_rate);
420
421 mutex_lock(&outp->dp.hpd_irq_lock);
422
423 if (drm_dp_dpcd_readb(aux, DP_SET_POWER, valuep: &pwr) == 1) {
424 if ((pwr & DP_SET_POWER_MASK) != DP_SET_POWER_D0) {
425 pwr &= ~DP_SET_POWER_MASK;
426 pwr |= DP_SET_POWER_D0;
427 drm_dp_dpcd_writeb(aux, DP_SET_POWER, value: pwr);
428 }
429 }
430
431 for (int nr = outp->dp.link_nr; nr; nr >>= 1) {
432 for (int rate = 0; rate < outp->dp.rate_nr; rate++) {
433 if (outp->dp.rate[rate].rate * nr >= min_rate) {
434 outp->dp.lt.nr = nr;
435 outp->dp.lt.bw = outp->dp.rate[rate].rate;
436 outp->dp.lt.mst = mst;
437 if (nouveau_dp_train_link(outp, retrain: false))
438 goto done;
439 }
440 }
441 }
442
443 ret = false;
444done:
445 mutex_unlock(lock: &outp->dp.hpd_irq_lock);
446 return ret;
447}
448
449static bool
450nouveau_dp_link_check_locked(struct nouveau_encoder *outp)
451{
452 u8 link_status[DP_LINK_STATUS_SIZE];
453
454 if (!outp || !outp->dp.lt.nr)
455 return true;
456
457 if (drm_dp_dpcd_read_phy_link_status(aux: &outp->conn->aux, dp_phy: DP_PHY_DPRX, link_status) < 0)
458 return false;
459
460 if (drm_dp_channel_eq_ok(link_status, lane_count: outp->dp.lt.nr))
461 return true;
462
463 return nouveau_dp_train_link(outp, retrain: true);
464}
465
466bool
467nouveau_dp_link_check(struct nouveau_connector *nv_connector)
468{
469 struct nouveau_encoder *outp = nv_connector->dp_encoder;
470 bool link_ok = true;
471
472 if (outp) {
473 mutex_lock(&outp->dp.hpd_irq_lock);
474 if (outp->dp.lt.nr)
475 link_ok = nouveau_dp_link_check_locked(outp);
476 mutex_unlock(lock: &outp->dp.hpd_irq_lock);
477 }
478
479 return link_ok;
480}
481
482void
483nouveau_dp_irq(struct work_struct *work)
484{
485 struct nouveau_connector *nv_connector =
486 container_of(work, typeof(*nv_connector), irq_work);
487 struct drm_connector *connector = &nv_connector->base;
488 struct nouveau_encoder *outp = find_encoder(connector, DCB_OUTPUT_DP);
489 struct nouveau_drm *drm = nouveau_drm(dev: outp->base.base.dev);
490 struct nv50_mstm *mstm;
491 u64 hpd = 0;
492 int ret;
493
494 if (!outp)
495 return;
496
497 mstm = outp->dp.mstm;
498 NV_DEBUG(drm, "service %s\n", connector->name);
499
500 mutex_lock(&outp->dp.hpd_irq_lock);
501
502 if (mstm && mstm->is_mst) {
503 if (!nv50_mstm_service(drm, nv_connector, mstm))
504 hpd |= NVIF_CONN_EVENT_V0_UNPLUG;
505 } else {
506 drm_dp_cec_irq(aux: &nv_connector->aux);
507
508 if (nouveau_dp_has_sink_count(connector, outp)) {
509 ret = drm_dp_read_sink_count(aux: &nv_connector->aux);
510 if (ret != outp->dp.sink_count)
511 hpd |= NVIF_CONN_EVENT_V0_PLUG;
512 if (ret >= 0)
513 outp->dp.sink_count = ret;
514 }
515 }
516
517 mutex_unlock(lock: &outp->dp.hpd_irq_lock);
518
519 nouveau_connector_hpd(nv_connector, NVIF_CONN_EVENT_V0_IRQ | hpd);
520}
521
522/* TODO:
523 * - Validate against the DP caps advertised by the GPU (we don't check these
524 * yet)
525 */
526enum drm_mode_status
527nv50_dp_mode_valid(struct nouveau_encoder *outp,
528 const struct drm_display_mode *mode,
529 unsigned *out_clock)
530{
531 const unsigned int min_clock = 25000;
532 unsigned int max_rate, mode_rate, ds_max_dotclock, clock = mode->clock;
533 /* Check with the minmum bpc always, so we can advertise better modes.
534 * In particlar not doing this causes modes to be dropped on HDR
535 * displays as we might check with a bpc of 16 even.
536 */
537 const u8 bpp = 6 * 3;
538
539 if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace)
540 return MODE_NO_INTERLACE;
541
542 if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
543 clock *= 2;
544
545 max_rate = outp->dp.link_nr * outp->dp.link_bw;
546 mode_rate = DIV_ROUND_UP(clock * bpp, 8);
547 if (mode_rate > max_rate)
548 return MODE_CLOCK_HIGH;
549
550 ds_max_dotclock = drm_dp_downstream_max_dotclock(dpcd: outp->dp.dpcd, port_cap: outp->dp.downstream_ports);
551 if (ds_max_dotclock && clock > ds_max_dotclock)
552 return MODE_CLOCK_HIGH;
553
554 if (clock < min_clock)
555 return MODE_CLOCK_LOW;
556
557 if (out_clock)
558 *out_clock = clock;
559
560 return MODE_OK;
561}
562

source code of linux/drivers/gpu/drm/nouveau/nouveau_dp.c