1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#include <drm/drm_atomic_helper.h>
25#include <drm/drm_damage_helper.h>
26#include <drm/drm_debugfs.h>
27
28#include "i915_drv.h"
29#include "i915_reg.h"
30#include "intel_atomic.h"
31#include "intel_crtc.h"
32#include "intel_ddi.h"
33#include "intel_de.h"
34#include "intel_display_types.h"
35#include "intel_dp.h"
36#include "intel_dp_aux.h"
37#include "intel_frontbuffer.h"
38#include "intel_hdmi.h"
39#include "intel_psr.h"
40#include "intel_psr_regs.h"
41#include "intel_snps_phy.h"
42#include "skl_universal_plane.h"
43
44/**
45 * DOC: Panel Self Refresh (PSR/SRD)
46 *
47 * Since Haswell Display controller supports Panel Self-Refresh on display
48 * panels witch have a remote frame buffer (RFB) implemented according to PSR
49 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
50 * when system is idle but display is on as it eliminates display refresh
51 * request to DDR memory completely as long as the frame buffer for that
52 * display is unchanged.
53 *
54 * Panel Self Refresh must be supported by both Hardware (source) and
55 * Panel (sink).
56 *
57 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
58 * to power down the link and memory controller. For DSI panels the same idea
59 * is called "manual mode".
60 *
61 * The implementation uses the hardware-based PSR support which automatically
62 * enters/exits self-refresh mode. The hardware takes care of sending the
63 * required DP aux message and could even retrain the link (that part isn't
64 * enabled yet though). The hardware also keeps track of any frontbuffer
65 * changes to know when to exit self-refresh mode again. Unfortunately that
66 * part doesn't work too well, hence why the i915 PSR support uses the
67 * software frontbuffer tracking to make sure it doesn't miss a screen
68 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
69 * get called by the frontbuffer tracking code. Note that because of locking
70 * issues the self-refresh re-enable code is done from a work queue, which
71 * must be correctly synchronized/cancelled when shutting down the pipe."
72 *
73 * DC3CO (DC3 clock off)
74 *
75 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
76 * clock off automatically during PSR2 idle state.
77 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
78 * entry/exit allows the HW to enter a low-power state even when page flipping
79 * periodically (for instance a 30fps video playback scenario).
80 *
81 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
82 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
83 * frames, if no other flip occurs and the function above is executed, DC3CO is
84 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
85 * of another flip.
86 * Front buffer modifications do not trigger DC3CO activation on purpose as it
87 * would bring a lot of complexity and most of the moderns systems will only
88 * use page flips.
89 */
90
91/*
92 * Description of PSR mask bits:
93 *
94 * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
95 *
96 * When unmasked (nearly) all display register writes (eg. even
97 * SWF) trigger a PSR exit. Some registers are excluded from this
98 * and they have a more specific mask (described below). On icl+
99 * this bit no longer exists and is effectively always set.
100 *
101 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
102 *
103 * When unmasked (nearly) all pipe/plane register writes
104 * trigger a PSR exit. Some plane registers are excluded from this
105 * and they have a more specific mask (described below).
106 *
107 * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
108 * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
109 * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
110 *
111 * When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
112 * SPR_SURF/CURBASE are not included in this and instead are
113 * controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
114 * EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
115 *
116 * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
117 * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
118 *
119 * When unmasked PSR is blocked as long as the sprite
120 * plane is enabled. skl+ with their universal planes no
121 * longer have a mask bit like this, and no plane being
122 * enabledb blocks PSR.
123 *
124 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
125 * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
126 *
127 * When umasked CURPOS writes trigger a PSR exit. On skl+
128 * this doesn't exit but CURPOS is included in the
129 * PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
130 *
131 * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
132 * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
133 *
134 * When unmasked PSR is blocked as long as vblank and/or vsync
135 * interrupt is unmasked in IMR *and* enabled in IER.
136 *
137 * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
138 * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
139 *
140 * Selectcs whether PSR exit generates an extra vblank before
141 * the first frame is transmitted. Also note the opposite polarity
142 * if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
143 * unmasked==do not generate the extra vblank).
144 *
145 * With DC states enabled the extra vblank happens after link training,
146 * with DC states disabled it happens immediately upuon PSR exit trigger.
147 * No idea as of now why there is a difference. HSW/BDW (which don't
148 * even have DMC) always generate it after link training. Go figure.
149 *
150 * Unfortunately CHICKEN_TRANS itself seems to be double buffered
151 * and thus won't latch until the first vblank. So with DC states
152 * enabled the register effctively uses the reset value during DC5
153 * exit+PSR exit sequence, and thus the bit does nothing until
154 * latched by the vblank that it was trying to prevent from being
155 * generated in the first place. So we should probably call this
156 * one a chicken/egg bit instead on skl+.
157 *
158 * In standby mode (as opposed to link-off) this makes no difference
159 * as the timing generator keeps running the whole time generating
160 * normal periodic vblanks.
161 *
162 * WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
163 * and doing so makes the behaviour match the skl+ reset value.
164 *
165 * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
166 * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
167 *
168 * On BDW without this bit is no vblanks whatsoever are
169 * generated after PSR exit. On HSW this has no apparant effect.
170 * WaPsrDPRSUnmaskVBlankInSRD says to set this.
171 *
172 * The rest of the bits are more self-explanatory and/or
173 * irrelevant for normal operation.
174 */
175
176#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
177 (intel_dp)->psr.source_support)
178
179#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \
180 (intel_dp)->psr.source_panel_replay_support)
181
182bool intel_encoder_can_psr(struct intel_encoder *encoder)
183{
184 if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
185 return CAN_PSR(enc_to_intel_dp(encoder)) ||
186 CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
187 else
188 return false;
189}
190
191static bool psr_global_enabled(struct intel_dp *intel_dp)
192{
193 struct intel_connector *connector = intel_dp->attached_connector;
194 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
195
196 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
197 case I915_PSR_DEBUG_DEFAULT:
198 if (i915->display.params.enable_psr == -1)
199 return connector->panel.vbt.psr.enable;
200 return i915->display.params.enable_psr;
201 case I915_PSR_DEBUG_DISABLE:
202 return false;
203 default:
204 return true;
205 }
206}
207
208static bool psr2_global_enabled(struct intel_dp *intel_dp)
209{
210 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
211
212 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
213 case I915_PSR_DEBUG_DISABLE:
214 case I915_PSR_DEBUG_FORCE_PSR1:
215 return false;
216 default:
217 if (i915->display.params.enable_psr == 1)
218 return false;
219 return true;
220 }
221}
222
223static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
224{
225 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
226
227 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
228 EDP_PSR_ERROR(intel_dp->psr.transcoder);
229}
230
231static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
232{
233 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
234
235 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
236 EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
237}
238
239static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
240{
241 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
242
243 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
244 EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
245}
246
247static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
248{
249 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
250
251 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
252 EDP_PSR_MASK(intel_dp->psr.transcoder);
253}
254
255static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
256 enum transcoder cpu_transcoder)
257{
258 if (DISPLAY_VER(dev_priv) >= 8)
259 return EDP_PSR_CTL(cpu_transcoder);
260 else
261 return HSW_SRD_CTL;
262}
263
264static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
265 enum transcoder cpu_transcoder)
266{
267 if (DISPLAY_VER(dev_priv) >= 8)
268 return EDP_PSR_DEBUG(cpu_transcoder);
269 else
270 return HSW_SRD_DEBUG;
271}
272
273static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
274 enum transcoder cpu_transcoder)
275{
276 if (DISPLAY_VER(dev_priv) >= 8)
277 return EDP_PSR_PERF_CNT(cpu_transcoder);
278 else
279 return HSW_SRD_PERF_CNT;
280}
281
282static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
283 enum transcoder cpu_transcoder)
284{
285 if (DISPLAY_VER(dev_priv) >= 8)
286 return EDP_PSR_STATUS(cpu_transcoder);
287 else
288 return HSW_SRD_STATUS;
289}
290
291static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
292 enum transcoder cpu_transcoder)
293{
294 if (DISPLAY_VER(dev_priv) >= 12)
295 return TRANS_PSR_IMR(cpu_transcoder);
296 else
297 return EDP_PSR_IMR;
298}
299
300static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
301 enum transcoder cpu_transcoder)
302{
303 if (DISPLAY_VER(dev_priv) >= 12)
304 return TRANS_PSR_IIR(cpu_transcoder);
305 else
306 return EDP_PSR_IIR;
307}
308
309static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
310 enum transcoder cpu_transcoder)
311{
312 if (DISPLAY_VER(dev_priv) >= 8)
313 return EDP_PSR_AUX_CTL(cpu_transcoder);
314 else
315 return HSW_SRD_AUX_CTL;
316}
317
318static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
319 enum transcoder cpu_transcoder, int i)
320{
321 if (DISPLAY_VER(dev_priv) >= 8)
322 return EDP_PSR_AUX_DATA(cpu_transcoder, i);
323 else
324 return HSW_SRD_AUX_DATA(i);
325}
326
327static void psr_irq_control(struct intel_dp *intel_dp)
328{
329 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
330 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
331 u32 mask;
332
333 mask = psr_irq_psr_error_bit_get(intel_dp);
334 if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
335 mask |= psr_irq_post_exit_bit_get(intel_dp) |
336 psr_irq_pre_entry_bit_get(intel_dp);
337
338 intel_de_rmw(i915: dev_priv, reg: psr_imr_reg(dev_priv, cpu_transcoder),
339 clear: psr_irq_mask_get(intel_dp), set: ~mask);
340}
341
342static void psr_event_print(struct drm_i915_private *i915,
343 u32 val, bool psr2_enabled)
344{
345 drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
346 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
347 drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
348 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
349 drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
350 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
351 drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
352 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
353 drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
354 if (val & PSR_EVENT_GRAPHICS_RESET)
355 drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
356 if (val & PSR_EVENT_PCH_INTERRUPT)
357 drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
358 if (val & PSR_EVENT_MEMORY_UP)
359 drm_dbg_kms(&i915->drm, "\tMemory up\n");
360 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
361 drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
362 if (val & PSR_EVENT_WD_TIMER_EXPIRE)
363 drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
364 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
365 drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
366 if (val & PSR_EVENT_REGISTER_UPDATE)
367 drm_dbg_kms(&i915->drm, "\tRegister updated\n");
368 if (val & PSR_EVENT_HDCP_ENABLE)
369 drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
370 if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
371 drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
372 if (val & PSR_EVENT_VBI_ENABLE)
373 drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
374 if (val & PSR_EVENT_LPSP_MODE_EXIT)
375 drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
376 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
377 drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
378}
379
380void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
381{
382 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
383 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
384 ktime_t time_ns = ktime_get();
385
386 if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
387 intel_dp->psr.last_entry_attempt = time_ns;
388 drm_dbg_kms(&dev_priv->drm,
389 "[transcoder %s] PSR entry attempt in 2 vblanks\n",
390 transcoder_name(cpu_transcoder));
391 }
392
393 if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
394 intel_dp->psr.last_exit = time_ns;
395 drm_dbg_kms(&dev_priv->drm,
396 "[transcoder %s] PSR exit completed\n",
397 transcoder_name(cpu_transcoder));
398
399 if (DISPLAY_VER(dev_priv) >= 9) {
400 u32 val;
401
402 val = intel_de_rmw(i915: dev_priv, PSR_EVENT(cpu_transcoder), clear: 0, set: 0);
403
404 psr_event_print(i915: dev_priv, val, psr2_enabled: intel_dp->psr.psr2_enabled);
405 }
406 }
407
408 if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
409 drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
410 transcoder_name(cpu_transcoder));
411
412 intel_dp->psr.irq_aux_error = true;
413
414 /*
415 * If this interruption is not masked it will keep
416 * interrupting so fast that it prevents the scheduled
417 * work to run.
418 * Also after a PSR error, we don't want to arm PSR
419 * again so we don't care about unmask the interruption
420 * or unset irq_aux_error.
421 */
422 intel_de_rmw(i915: dev_priv, reg: psr_imr_reg(dev_priv, cpu_transcoder),
423 clear: 0, set: psr_irq_psr_error_bit_get(intel_dp));
424
425 queue_work(wq: dev_priv->unordered_wq, work: &intel_dp->psr.work);
426 }
427}
428
429static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
430{
431 u8 alpm_caps = 0;
432
433 if (drm_dp_dpcd_readb(aux: &intel_dp->aux, DP_RECEIVER_ALPM_CAP,
434 valuep: &alpm_caps) != 1)
435 return false;
436 return alpm_caps & DP_ALPM_CAP;
437}
438
439static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
440{
441 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
442 u8 val = 8; /* assume the worst if we can't read the value */
443
444 if (drm_dp_dpcd_readb(aux: &intel_dp->aux,
445 DP_SYNCHRONIZATION_LATENCY_IN_SINK, valuep: &val) == 1)
446 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
447 else
448 drm_dbg_kms(&i915->drm,
449 "Unable to get sink synchronization latency, assuming 8 frames\n");
450 return val;
451}
452
453static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
454{
455 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
456 ssize_t r;
457 u16 w;
458 u8 y;
459
460 /* If sink don't have specific granularity requirements set legacy ones */
461 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
462 /* As PSR2 HW sends full lines, we do not care about x granularity */
463 w = 4;
464 y = 4;
465 goto exit;
466 }
467
468 r = drm_dp_dpcd_read(aux: &intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, buffer: &w, size: 2);
469 if (r != 2)
470 drm_dbg_kms(&i915->drm,
471 "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
472 /*
473 * Spec says that if the value read is 0 the default granularity should
474 * be used instead.
475 */
476 if (r != 2 || w == 0)
477 w = 4;
478
479 r = drm_dp_dpcd_read(aux: &intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, buffer: &y, size: 1);
480 if (r != 1) {
481 drm_dbg_kms(&i915->drm,
482 "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
483 y = 4;
484 }
485 if (y == 0)
486 y = 1;
487
488exit:
489 intel_dp->psr.su_w_granularity = w;
490 intel_dp->psr.su_y_granularity = y;
491}
492
493static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
494{
495 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
496 u8 pr_dpcd = 0;
497
498 intel_dp->psr.sink_panel_replay_support = false;
499 drm_dp_dpcd_readb(aux: &intel_dp->aux, DP_PANEL_REPLAY_CAP, valuep: &pr_dpcd);
500
501 if (!(pr_dpcd & DP_PANEL_REPLAY_SUPPORT)) {
502 drm_dbg_kms(&i915->drm,
503 "Panel replay is not supported by panel\n");
504 return;
505 }
506
507 drm_dbg_kms(&i915->drm,
508 "Panel replay is supported by panel\n");
509 intel_dp->psr.sink_panel_replay_support = true;
510}
511
512static void _psr_init_dpcd(struct intel_dp *intel_dp)
513{
514 struct drm_i915_private *i915 =
515 to_i915(dev: dp_to_dig_port(intel_dp)->base.base.dev);
516
517 drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
518 intel_dp->psr_dpcd[0]);
519
520 if (drm_dp_has_quirk(desc: &intel_dp->desc, quirk: DP_DPCD_QUIRK_NO_PSR)) {
521 drm_dbg_kms(&i915->drm,
522 "PSR support not currently available for this panel\n");
523 return;
524 }
525
526 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
527 drm_dbg_kms(&i915->drm,
528 "Panel lacks power state control, PSR cannot be enabled\n");
529 return;
530 }
531
532 intel_dp->psr.sink_support = true;
533 intel_dp->psr.sink_sync_latency =
534 intel_dp_get_sink_sync_latency(intel_dp);
535
536 if (DISPLAY_VER(i915) >= 9 &&
537 intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
538 bool y_req = intel_dp->psr_dpcd[1] &
539 DP_PSR2_SU_Y_COORDINATE_REQUIRED;
540 bool alpm = intel_dp_get_alpm_status(intel_dp);
541
542 /*
543 * All panels that supports PSR version 03h (PSR2 +
544 * Y-coordinate) can handle Y-coordinates in VSC but we are
545 * only sure that it is going to be used when required by the
546 * panel. This way panel is capable to do selective update
547 * without a aux frame sync.
548 *
549 * To support PSR version 02h and PSR version 03h without
550 * Y-coordinate requirement panels we would need to enable
551 * GTC first.
552 */
553 intel_dp->psr.sink_psr2_support = y_req && alpm;
554 drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
555 intel_dp->psr.sink_psr2_support ? "" : "not ");
556 }
557}
558
559void intel_psr_init_dpcd(struct intel_dp *intel_dp)
560{
561 _panel_replay_init_dpcd(intel_dp);
562
563 drm_dp_dpcd_read(aux: &intel_dp->aux, DP_PSR_SUPPORT, buffer: intel_dp->psr_dpcd,
564 size: sizeof(intel_dp->psr_dpcd));
565
566 if (intel_dp->psr_dpcd[0])
567 _psr_init_dpcd(intel_dp);
568
569 if (intel_dp->psr.sink_psr2_support)
570 intel_dp_get_su_granularity(intel_dp);
571}
572
573static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
574{
575 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
576 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
577 u32 aux_clock_divider, aux_ctl;
578 /* write DP_SET_POWER=D0 */
579 static const u8 aux_msg[] = {
580 [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
581 [1] = (DP_SET_POWER >> 8) & 0xff,
582 [2] = DP_SET_POWER & 0xff,
583 [3] = 1 - 1,
584 [4] = DP_SET_POWER_D0,
585 };
586 int i;
587
588 BUILD_BUG_ON(sizeof(aux_msg) > 20);
589 for (i = 0; i < sizeof(aux_msg); i += 4)
590 intel_de_write(i915: dev_priv,
591 reg: psr_aux_data_reg(dev_priv, cpu_transcoder, i: i >> 2),
592 val: intel_dp_aux_pack(src: &aux_msg[i], src_bytes: sizeof(aux_msg) - i));
593
594 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
595
596 /* Start with bits set for DDI_AUX_CTL register */
597 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
598 aux_clock_divider);
599
600 /* Select only valid bits for SRD_AUX_CTL */
601 aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
602 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
603 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
604 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
605
606 intel_de_write(i915: dev_priv, reg: psr_aux_ctl_reg(dev_priv, cpu_transcoder),
607 val: aux_ctl);
608}
609
610static bool psr2_su_region_et_valid(struct intel_dp *intel_dp)
611{
612 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
613
614 if (DISPLAY_VER(i915) >= 20 &&
615 intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
616 !(intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE))
617 return true;
618
619 return false;
620}
621
622static void intel_psr_enable_sink(struct intel_dp *intel_dp)
623{
624 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
625 u8 dpcd_val = DP_PSR_ENABLE;
626
627 if (intel_dp->psr.panel_replay_enabled)
628 return;
629
630 if (intel_dp->psr.psr2_enabled) {
631 /* Enable ALPM at sink for psr2 */
632 drm_dp_dpcd_writeb(aux: &intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
633 DP_ALPM_ENABLE |
634 DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
635
636 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
637 if (psr2_su_region_et_valid(intel_dp))
638 dpcd_val |= DP_PSR_ENABLE_SU_REGION_ET;
639 } else {
640 if (intel_dp->psr.link_standby)
641 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
642
643 if (DISPLAY_VER(dev_priv) >= 8)
644 dpcd_val |= DP_PSR_CRC_VERIFICATION;
645 }
646
647 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
648 dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
649
650 if (intel_dp->psr.entry_setup_frames > 0)
651 dpcd_val |= DP_PSR_FRAME_CAPTURE;
652
653 drm_dp_dpcd_writeb(aux: &intel_dp->aux, DP_PSR_EN_CFG, value: dpcd_val);
654
655 drm_dp_dpcd_writeb(aux: &intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
656}
657
658static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
659{
660 struct intel_connector *connector = intel_dp->attached_connector;
661 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
662 u32 val = 0;
663
664 if (DISPLAY_VER(dev_priv) >= 11)
665 val |= EDP_PSR_TP4_TIME_0us;
666
667 if (dev_priv->display.params.psr_safest_params) {
668 val |= EDP_PSR_TP1_TIME_2500us;
669 val |= EDP_PSR_TP2_TP3_TIME_2500us;
670 goto check_tp3_sel;
671 }
672
673 if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
674 val |= EDP_PSR_TP1_TIME_0us;
675 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
676 val |= EDP_PSR_TP1_TIME_100us;
677 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
678 val |= EDP_PSR_TP1_TIME_500us;
679 else
680 val |= EDP_PSR_TP1_TIME_2500us;
681
682 if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
683 val |= EDP_PSR_TP2_TP3_TIME_0us;
684 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
685 val |= EDP_PSR_TP2_TP3_TIME_100us;
686 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
687 val |= EDP_PSR_TP2_TP3_TIME_500us;
688 else
689 val |= EDP_PSR_TP2_TP3_TIME_2500us;
690
691 /*
692 * WA 0479: hsw,bdw
693 * "Do not skip both TP1 and TP2/TP3"
694 */
695 if (DISPLAY_VER(dev_priv) < 9 &&
696 connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
697 connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
698 val |= EDP_PSR_TP2_TP3_TIME_100us;
699
700check_tp3_sel:
701 if (intel_dp_source_supports_tps3(i915: dev_priv) &&
702 drm_dp_tps3_supported(dpcd: intel_dp->dpcd))
703 val |= EDP_PSR_TP_TP1_TP3;
704 else
705 val |= EDP_PSR_TP_TP1_TP2;
706
707 return val;
708}
709
710static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
711{
712 struct intel_connector *connector = intel_dp->attached_connector;
713 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
714 int idle_frames;
715
716 /* Let's use 6 as the minimum to cover all known cases including the
717 * off-by-one issue that HW has in some cases.
718 */
719 idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
720 idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
721
722 if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
723 idle_frames = 0xf;
724
725 return idle_frames;
726}
727
728static void hsw_activate_psr1(struct intel_dp *intel_dp)
729{
730 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
731 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
732 u32 max_sleep_time = 0x1f;
733 u32 val = EDP_PSR_ENABLE;
734
735 val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
736
737 if (DISPLAY_VER(dev_priv) < 20)
738 val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
739
740 if (IS_HASWELL(dev_priv))
741 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
742
743 if (intel_dp->psr.link_standby)
744 val |= EDP_PSR_LINK_STANDBY;
745
746 val |= intel_psr1_get_tp_time(intel_dp);
747
748 if (DISPLAY_VER(dev_priv) >= 8)
749 val |= EDP_PSR_CRC_ENABLE;
750
751 if (DISPLAY_VER(dev_priv) >= 20)
752 val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
753
754 intel_de_rmw(i915: dev_priv, reg: psr_ctl_reg(dev_priv, cpu_transcoder),
755 clear: ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, set: val);
756}
757
758static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
759{
760 struct intel_connector *connector = intel_dp->attached_connector;
761 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
762 u32 val = 0;
763
764 if (dev_priv->display.params.psr_safest_params)
765 return EDP_PSR2_TP2_TIME_2500us;
766
767 if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
768 connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
769 val |= EDP_PSR2_TP2_TIME_50us;
770 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
771 val |= EDP_PSR2_TP2_TIME_100us;
772 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
773 val |= EDP_PSR2_TP2_TIME_500us;
774 else
775 val |= EDP_PSR2_TP2_TIME_2500us;
776
777 return val;
778}
779
780static int psr2_block_count_lines(struct intel_dp *intel_dp)
781{
782 return intel_dp->psr.alpm_parameters.io_wake_lines < 9 &&
783 intel_dp->psr.alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
784}
785
786static int psr2_block_count(struct intel_dp *intel_dp)
787{
788 return psr2_block_count_lines(intel_dp) / 4;
789}
790
791static u8 frames_before_su_entry(struct intel_dp *intel_dp)
792{
793 u8 frames_before_su_entry;
794
795 frames_before_su_entry = max_t(u8,
796 intel_dp->psr.sink_sync_latency + 1,
797 2);
798
799 /* Entry setup frames must be at least 1 less than frames before SU entry */
800 if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
801 frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
802
803 return frames_before_su_entry;
804}
805
806static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
807{
808 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
809
810 intel_de_rmw(i915: dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
811 clear: 0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
812
813 intel_de_rmw(i915: dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), clear: 0,
814 TRANS_DP2_PANEL_REPLAY_ENABLE);
815}
816
817static void hsw_activate_psr2(struct intel_dp *intel_dp)
818{
819 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
820 struct intel_psr *psr = &intel_dp->psr;
821 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
822 u32 val = EDP_PSR2_ENABLE;
823 u32 psr_val = 0;
824
825 val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
826
827 if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))
828 val |= EDP_SU_TRACK_ENABLE;
829
830 if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13)
831 val |= EDP_Y_COORDINATE_ENABLE;
832
833 val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
834
835 val |= intel_psr2_get_tp_time(intel_dp);
836
837 if (DISPLAY_VER(dev_priv) >= 12) {
838 if (psr2_block_count(intel_dp) > 2)
839 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
840 else
841 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
842 }
843
844 /* Wa_22012278275:adl-p */
845 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
846 static const u8 map[] = {
847 2, /* 5 lines */
848 1, /* 6 lines */
849 0, /* 7 lines */
850 3, /* 8 lines */
851 6, /* 9 lines */
852 5, /* 10 lines */
853 4, /* 11 lines */
854 7, /* 12 lines */
855 };
856 /*
857 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
858 * comments bellow for more information
859 */
860 int tmp;
861
862 tmp = map[psr->alpm_parameters.io_wake_lines -
863 TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
864 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
865
866 tmp = map[psr->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
867 val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
868 } else if (DISPLAY_VER(dev_priv) >= 12) {
869 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(psr->alpm_parameters.io_wake_lines);
870 val |= TGL_EDP_PSR2_FAST_WAKE(psr->alpm_parameters.fast_wake_lines);
871 } else if (DISPLAY_VER(dev_priv) >= 9) {
872 val |= EDP_PSR2_IO_BUFFER_WAKE(psr->alpm_parameters.io_wake_lines);
873 val |= EDP_PSR2_FAST_WAKE(psr->alpm_parameters.fast_wake_lines);
874 }
875
876 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
877 val |= EDP_PSR2_SU_SDP_SCANLINE;
878
879 if (DISPLAY_VER(dev_priv) >= 20)
880 psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
881
882 if (intel_dp->psr.psr2_sel_fetch_enabled) {
883 u32 tmp;
884
885 tmp = intel_de_read(i915: dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
886 drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
887 } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
888 intel_de_write(i915: dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val: 0);
889 }
890
891 if (psr2_su_region_et_valid(intel_dp))
892 val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
893
894 /*
895 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
896 * recommending keep this bit unset while PSR2 is enabled.
897 */
898 intel_de_write(i915: dev_priv, reg: psr_ctl_reg(dev_priv, cpu_transcoder), val: psr_val);
899
900 intel_de_write(i915: dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
901}
902
903static bool
904transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
905{
906 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
907 return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
908 else if (DISPLAY_VER(dev_priv) >= 12)
909 return cpu_transcoder == TRANSCODER_A;
910 else if (DISPLAY_VER(dev_priv) >= 9)
911 return cpu_transcoder == TRANSCODER_EDP;
912 else
913 return false;
914}
915
916static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
917{
918 if (!crtc_state->hw.active)
919 return 0;
920
921 return DIV_ROUND_UP(1000 * 1000,
922 drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
923}
924
925static void psr2_program_idle_frames(struct intel_dp *intel_dp,
926 u32 idle_frames)
927{
928 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
929 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
930
931 intel_de_rmw(i915: dev_priv, EDP_PSR2_CTL(cpu_transcoder),
932 EDP_PSR2_IDLE_FRAMES_MASK,
933 EDP_PSR2_IDLE_FRAMES(idle_frames));
934}
935
936static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
937{
938 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
939
940 psr2_program_idle_frames(intel_dp, idle_frames: 0);
941 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
942}
943
944static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
945{
946 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
947
948 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
949 psr2_program_idle_frames(intel_dp, idle_frames: psr_compute_idle_frames(intel_dp));
950}
951
952static void tgl_dc3co_disable_work(struct work_struct *work)
953{
954 struct intel_dp *intel_dp =
955 container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
956
957 mutex_lock(&intel_dp->psr.lock);
958 /* If delayed work is pending, it is not idle */
959 if (delayed_work_pending(&intel_dp->psr.dc3co_work))
960 goto unlock;
961
962 tgl_psr2_disable_dc3co(intel_dp);
963unlock:
964 mutex_unlock(lock: &intel_dp->psr.lock);
965}
966
967static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
968{
969 if (!intel_dp->psr.dc3co_exitline)
970 return;
971
972 cancel_delayed_work(dwork: &intel_dp->psr.dc3co_work);
973 /* Before PSR2 exit disallow dc3co*/
974 tgl_psr2_disable_dc3co(intel_dp);
975}
976
977static bool
978dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
979 struct intel_crtc_state *crtc_state)
980{
981 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
982 enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
983 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
984 enum port port = dig_port->base.port;
985
986 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
987 return pipe <= PIPE_B && port <= PORT_B;
988 else
989 return pipe == PIPE_A && port == PORT_A;
990}
991
992static void
993tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
994 struct intel_crtc_state *crtc_state)
995{
996 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
997 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
998 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
999 u32 exit_scanlines;
1000
1001 /*
1002 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
1003 * disable DC3CO until the changed dc3co activating/deactivating sequence
1004 * is applied. B.Specs:49196
1005 */
1006 return;
1007
1008 /*
1009 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
1010 * TODO: when the issue is addressed, this restriction should be removed.
1011 */
1012 if (crtc_state->enable_psr2_sel_fetch)
1013 return;
1014
1015 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
1016 return;
1017
1018 if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
1019 return;
1020
1021 /* Wa_16011303918:adl-p */
1022 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1023 return;
1024
1025 /*
1026 * DC3CO Exit time 200us B.Spec 49196
1027 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1028 */
1029 exit_scanlines =
1030 intel_usecs_to_scanlines(adjusted_mode: &crtc_state->uapi.adjusted_mode, usecs: 200) + 1;
1031
1032 if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
1033 return;
1034
1035 crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1036}
1037
1038static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1039 struct intel_crtc_state *crtc_state)
1040{
1041 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1042
1043 if (!dev_priv->display.params.enable_psr2_sel_fetch &&
1044 intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1045 drm_dbg_kms(&dev_priv->drm,
1046 "PSR2 sel fetch not enabled, disabled by parameter\n");
1047 return false;
1048 }
1049
1050 if (crtc_state->uapi.async_flip) {
1051 drm_dbg_kms(&dev_priv->drm,
1052 "PSR2 sel fetch not enabled, async flip enabled\n");
1053 return false;
1054 }
1055
1056 if (psr2_su_region_et_valid(intel_dp))
1057 crtc_state->enable_psr2_su_region_et = true;
1058
1059 return crtc_state->enable_psr2_sel_fetch = true;
1060}
1061
1062static bool psr2_granularity_check(struct intel_dp *intel_dp,
1063 struct intel_crtc_state *crtc_state)
1064{
1065 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1066 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1067 const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1068 const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1069 u16 y_granularity = 0;
1070
1071 /* PSR2 HW only send full lines so we only need to validate the width */
1072 if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1073 return false;
1074
1075 if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1076 return false;
1077
1078 /* HW tracking is only aligned to 4 lines */
1079 if (!crtc_state->enable_psr2_sel_fetch)
1080 return intel_dp->psr.su_y_granularity == 4;
1081
1082 /*
1083 * adl_p and mtl platforms have 1 line granularity.
1084 * For other platforms with SW tracking we can adjust the y coordinates
1085 * to match sink requirement if multiple of 4.
1086 */
1087 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1088 y_granularity = intel_dp->psr.su_y_granularity;
1089 else if (intel_dp->psr.su_y_granularity <= 2)
1090 y_granularity = 4;
1091 else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1092 y_granularity = intel_dp->psr.su_y_granularity;
1093
1094 if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1095 return false;
1096
1097 if (crtc_state->dsc.compression_enable &&
1098 vdsc_cfg->slice_height % y_granularity)
1099 return false;
1100
1101 crtc_state->su_y_granularity = y_granularity;
1102 return true;
1103}
1104
1105static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1106 struct intel_crtc_state *crtc_state)
1107{
1108 const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1109 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1110 u32 hblank_total, hblank_ns, req_ns;
1111
1112 hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1113 hblank_ns = div_u64(dividend: 1000000ULL * hblank_total, divisor: adjusted_mode->crtc_clock);
1114
1115 /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1116 req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1117
1118 if ((hblank_ns - req_ns) > 100)
1119 return true;
1120
1121 /* Not supported <13 / Wa_22012279113:adl-p */
1122 if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1123 return false;
1124
1125 crtc_state->req_psr2_sdp_prior_scanline = true;
1126 return true;
1127}
1128
1129static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
1130 struct intel_crtc_state *crtc_state)
1131{
1132 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1133 int check_entry_lines;
1134
1135 if (DISPLAY_VER(i915) < 20)
1136 return true;
1137
1138 /* ALPM Entry Check = 2 + CEILING( 5us /tline ) */
1139 check_entry_lines = 2 +
1140 intel_usecs_to_scanlines(adjusted_mode: &crtc_state->hw.adjusted_mode, usecs: 5);
1141
1142 if (check_entry_lines > 15)
1143 return false;
1144
1145 if (i915->display.params.psr_safest_params)
1146 check_entry_lines = 15;
1147
1148 intel_dp->psr.alpm_parameters.check_entry_lines = check_entry_lines;
1149
1150 return true;
1151}
1152
1153static bool _compute_alpm_params(struct intel_dp *intel_dp,
1154 struct intel_crtc_state *crtc_state)
1155{
1156 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1157 int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
1158 u8 max_wake_lines;
1159
1160 if (DISPLAY_VER(i915) >= 12) {
1161 io_wake_time = 42;
1162 /*
1163 * According to Bspec it's 42us, but based on testing
1164 * it is not enough -> use 45 us.
1165 */
1166 fast_wake_time = 45;
1167
1168 /* TODO: Check how we can use ALPM_CTL fast wake extended field */
1169 max_wake_lines = 12;
1170 } else {
1171 io_wake_time = 50;
1172 fast_wake_time = 32;
1173 max_wake_lines = 8;
1174 }
1175
1176 io_wake_lines = intel_usecs_to_scanlines(
1177 adjusted_mode: &crtc_state->hw.adjusted_mode, usecs: io_wake_time);
1178 fast_wake_lines = intel_usecs_to_scanlines(
1179 adjusted_mode: &crtc_state->hw.adjusted_mode, usecs: fast_wake_time);
1180
1181 if (io_wake_lines > max_wake_lines ||
1182 fast_wake_lines > max_wake_lines)
1183 return false;
1184
1185 if (!_lnl_compute_alpm_params(intel_dp, crtc_state))
1186 return false;
1187
1188 if (i915->display.params.psr_safest_params)
1189 io_wake_lines = fast_wake_lines = max_wake_lines;
1190
1191 /* According to Bspec lower limit should be set as 7 lines. */
1192 intel_dp->psr.alpm_parameters.io_wake_lines = max(io_wake_lines, 7);
1193 intel_dp->psr.alpm_parameters.fast_wake_lines = max(fast_wake_lines, 7);
1194
1195 return true;
1196}
1197
1198static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1199 const struct drm_display_mode *adjusted_mode)
1200{
1201 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1202 int psr_setup_time = drm_dp_psr_setup_time(psr_cap: intel_dp->psr_dpcd);
1203 int entry_setup_frames = 0;
1204
1205 if (psr_setup_time < 0) {
1206 drm_dbg_kms(&i915->drm,
1207 "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1208 intel_dp->psr_dpcd[1]);
1209 return -ETIME;
1210 }
1211
1212 if (intel_usecs_to_scanlines(adjusted_mode, usecs: psr_setup_time) >
1213 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1214 if (DISPLAY_VER(i915) >= 20) {
1215 /* setup entry frames can be up to 3 frames */
1216 entry_setup_frames = 1;
1217 drm_dbg_kms(&i915->drm,
1218 "PSR setup entry frames %d\n",
1219 entry_setup_frames);
1220 } else {
1221 drm_dbg_kms(&i915->drm,
1222 "PSR condition failed: PSR setup time (%d us) too long\n",
1223 psr_setup_time);
1224 return -ETIME;
1225 }
1226 }
1227
1228 return entry_setup_frames;
1229}
1230
1231static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1232 struct intel_crtc_state *crtc_state)
1233{
1234 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1235 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1236 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1237 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1238
1239 if (!intel_dp->psr.sink_psr2_support)
1240 return false;
1241
1242 /* JSL and EHL only supports eDP 1.3 */
1243 if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1244 drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
1245 return false;
1246 }
1247
1248 /* Wa_16011181250 */
1249 if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1250 IS_DG2(dev_priv)) {
1251 drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
1252 return false;
1253 }
1254
1255 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1256 drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
1257 return false;
1258 }
1259
1260 if (!transcoder_has_psr2(dev_priv, cpu_transcoder: crtc_state->cpu_transcoder)) {
1261 drm_dbg_kms(&dev_priv->drm,
1262 "PSR2 not supported in transcoder %s\n",
1263 transcoder_name(crtc_state->cpu_transcoder));
1264 return false;
1265 }
1266
1267 if (!psr2_global_enabled(intel_dp)) {
1268 drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
1269 return false;
1270 }
1271
1272 /*
1273 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1274 * resolution requires DSC to be enabled, priority is given to DSC
1275 * over PSR2.
1276 */
1277 if (crtc_state->dsc.compression_enable &&
1278 (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
1279 drm_dbg_kms(&dev_priv->drm,
1280 "PSR2 cannot be enabled since DSC is enabled\n");
1281 return false;
1282 }
1283
1284 if (crtc_state->crc_enabled) {
1285 drm_dbg_kms(&dev_priv->drm,
1286 "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
1287 return false;
1288 }
1289
1290 if (DISPLAY_VER(dev_priv) >= 12) {
1291 psr_max_h = 5120;
1292 psr_max_v = 3200;
1293 max_bpp = 30;
1294 } else if (DISPLAY_VER(dev_priv) >= 10) {
1295 psr_max_h = 4096;
1296 psr_max_v = 2304;
1297 max_bpp = 24;
1298 } else if (DISPLAY_VER(dev_priv) == 9) {
1299 psr_max_h = 3640;
1300 psr_max_v = 2304;
1301 max_bpp = 24;
1302 }
1303
1304 if (crtc_state->pipe_bpp > max_bpp) {
1305 drm_dbg_kms(&dev_priv->drm,
1306 "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1307 crtc_state->pipe_bpp, max_bpp);
1308 return false;
1309 }
1310
1311 /* Wa_16011303918:adl-p */
1312 if (crtc_state->vrr.enable &&
1313 IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1314 drm_dbg_kms(&dev_priv->drm,
1315 "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1316 return false;
1317 }
1318
1319 if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1320 drm_dbg_kms(&dev_priv->drm,
1321 "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1322 return false;
1323 }
1324
1325 if (!_compute_alpm_params(intel_dp, crtc_state)) {
1326 drm_dbg_kms(&dev_priv->drm,
1327 "PSR2 not enabled, Unable to use long enough wake times\n");
1328 return false;
1329 }
1330
1331 /* Vblank >= PSR2_CTL Block Count Number maximum line count */
1332 if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
1333 crtc_state->hw.adjusted_mode.crtc_vblank_start <
1334 psr2_block_count_lines(intel_dp)) {
1335 drm_dbg_kms(&dev_priv->drm,
1336 "PSR2 not enabled, too short vblank time\n");
1337 return false;
1338 }
1339
1340 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1341 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1342 !HAS_PSR_HW_TRACKING(dev_priv)) {
1343 drm_dbg_kms(&dev_priv->drm,
1344 "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
1345 return false;
1346 }
1347 }
1348
1349 if (!psr2_granularity_check(intel_dp, crtc_state)) {
1350 drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1351 goto unsupported;
1352 }
1353
1354 if (!crtc_state->enable_psr2_sel_fetch &&
1355 (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1356 drm_dbg_kms(&dev_priv->drm,
1357 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1358 crtc_hdisplay, crtc_vdisplay,
1359 psr_max_h, psr_max_v);
1360 goto unsupported;
1361 }
1362
1363 tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1364 return true;
1365
1366unsupported:
1367 crtc_state->enable_psr2_sel_fetch = false;
1368 return false;
1369}
1370
1371static bool _psr_compute_config(struct intel_dp *intel_dp,
1372 struct intel_crtc_state *crtc_state)
1373{
1374 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1375 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1376 int entry_setup_frames;
1377
1378 /*
1379 * Current PSR panels don't work reliably with VRR enabled
1380 * So if VRR is enabled, do not enable PSR.
1381 */
1382 if (crtc_state->vrr.enable)
1383 return false;
1384
1385 if (!CAN_PSR(intel_dp))
1386 return false;
1387
1388 entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1389
1390 if (entry_setup_frames >= 0) {
1391 intel_dp->psr.entry_setup_frames = entry_setup_frames;
1392 } else {
1393 drm_dbg_kms(&dev_priv->drm,
1394 "PSR condition failed: PSR setup timing not met\n");
1395 return false;
1396 }
1397
1398 return true;
1399}
1400
1401void intel_psr_compute_config(struct intel_dp *intel_dp,
1402 struct intel_crtc_state *crtc_state,
1403 struct drm_connector_state *conn_state)
1404{
1405 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1406 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1407
1408 if (!psr_global_enabled(intel_dp)) {
1409 drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1410 return;
1411 }
1412
1413 if (intel_dp->psr.sink_not_reliable) {
1414 drm_dbg_kms(&dev_priv->drm,
1415 "PSR sink implementation is not reliable\n");
1416 return;
1417 }
1418
1419 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1420 drm_dbg_kms(&dev_priv->drm,
1421 "PSR condition failed: Interlaced mode enabled\n");
1422 return;
1423 }
1424
1425 /*
1426 * FIXME figure out what is wrong with PSR+bigjoiner and
1427 * fix it. Presumably something related to the fact that
1428 * PSR is a transcoder level feature.
1429 */
1430 if (crtc_state->bigjoiner_pipes) {
1431 drm_dbg_kms(&dev_priv->drm,
1432 "PSR disabled due to bigjoiner\n");
1433 return;
1434 }
1435
1436 if (CAN_PANEL_REPLAY(intel_dp))
1437 crtc_state->has_panel_replay = true;
1438 else
1439 crtc_state->has_psr = _psr_compute_config(intel_dp, crtc_state);
1440
1441 if (!(crtc_state->has_panel_replay || crtc_state->has_psr))
1442 return;
1443
1444 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1445}
1446
1447void intel_psr_get_config(struct intel_encoder *encoder,
1448 struct intel_crtc_state *pipe_config)
1449{
1450 struct drm_i915_private *dev_priv = to_i915(dev: encoder->base.dev);
1451 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1452 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1453 struct intel_dp *intel_dp;
1454 u32 val;
1455
1456 if (!dig_port)
1457 return;
1458
1459 intel_dp = &dig_port->dp;
1460 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1461 return;
1462
1463 mutex_lock(&intel_dp->psr.lock);
1464 if (!intel_dp->psr.enabled)
1465 goto unlock;
1466
1467 if (intel_dp->psr.panel_replay_enabled) {
1468 pipe_config->has_panel_replay = true;
1469 } else {
1470 /*
1471 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1472 * enabled/disabled because of frontbuffer tracking and others.
1473 */
1474 pipe_config->has_psr = true;
1475 }
1476
1477 pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1478 pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1479
1480 if (!intel_dp->psr.psr2_enabled)
1481 goto unlock;
1482
1483 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1484 val = intel_de_read(i915: dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
1485 if (val & PSR2_MAN_TRK_CTL_ENABLE)
1486 pipe_config->enable_psr2_sel_fetch = true;
1487 }
1488
1489 if (DISPLAY_VER(dev_priv) >= 12) {
1490 val = intel_de_read(i915: dev_priv, TRANS_EXITLINE(cpu_transcoder));
1491 pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1492 }
1493unlock:
1494 mutex_unlock(lock: &intel_dp->psr.lock);
1495}
1496
1497static void intel_psr_activate(struct intel_dp *intel_dp)
1498{
1499 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1500 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1501
1502 drm_WARN_ON(&dev_priv->drm,
1503 transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1504 intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
1505
1506 drm_WARN_ON(&dev_priv->drm,
1507 intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1508
1509 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1510
1511 lockdep_assert_held(&intel_dp->psr.lock);
1512
1513 /* psr1, psr2 and panel-replay are mutually exclusive.*/
1514 if (intel_dp->psr.panel_replay_enabled)
1515 dg2_activate_panel_replay(intel_dp);
1516 else if (intel_dp->psr.psr2_enabled)
1517 hsw_activate_psr2(intel_dp);
1518 else
1519 hsw_activate_psr1(intel_dp);
1520
1521 intel_dp->psr.active = true;
1522}
1523
1524static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1525{
1526 switch (intel_dp->psr.pipe) {
1527 case PIPE_A:
1528 return LATENCY_REPORTING_REMOVED_PIPE_A;
1529 case PIPE_B:
1530 return LATENCY_REPORTING_REMOVED_PIPE_B;
1531 case PIPE_C:
1532 return LATENCY_REPORTING_REMOVED_PIPE_C;
1533 case PIPE_D:
1534 return LATENCY_REPORTING_REMOVED_PIPE_D;
1535 default:
1536 MISSING_CASE(intel_dp->psr.pipe);
1537 return 0;
1538 }
1539}
1540
1541/*
1542 * Wa_16013835468
1543 * Wa_14015648006
1544 */
1545static void wm_optimization_wa(struct intel_dp *intel_dp,
1546 const struct intel_crtc_state *crtc_state)
1547{
1548 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1549 bool set_wa_bit = false;
1550
1551 /* Wa_14015648006 */
1552 if (IS_DISPLAY_VER(dev_priv, 11, 14))
1553 set_wa_bit |= crtc_state->wm_level_disabled;
1554
1555 /* Wa_16013835468 */
1556 if (DISPLAY_VER(dev_priv) == 12)
1557 set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1558 crtc_state->hw.adjusted_mode.crtc_vdisplay;
1559
1560 if (set_wa_bit)
1561 intel_de_rmw(i915: dev_priv, GEN8_CHICKEN_DCPR_1,
1562 clear: 0, set: wa_16013835468_bit_get(intel_dp));
1563 else
1564 intel_de_rmw(i915: dev_priv, GEN8_CHICKEN_DCPR_1,
1565 clear: wa_16013835468_bit_get(intel_dp), set: 0);
1566}
1567
1568static void lnl_alpm_configure(struct intel_dp *intel_dp)
1569{
1570 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1571 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1572 struct intel_psr *psr = &intel_dp->psr;
1573
1574 if (DISPLAY_VER(dev_priv) < 20)
1575 return;
1576
1577 intel_de_write(i915: dev_priv, ALPM_CTL(cpu_transcoder),
1578 ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
1579 ALPM_CTL_ALPM_ENTRY_CHECK(psr->alpm_parameters.check_entry_lines) |
1580 ALPM_CTL_EXTENDED_FAST_WAKE_TIME(psr->alpm_parameters.fast_wake_lines));
1581}
1582
1583static void intel_psr_enable_source(struct intel_dp *intel_dp,
1584 const struct intel_crtc_state *crtc_state)
1585{
1586 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1587 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1588 u32 mask;
1589
1590 /*
1591 * Only HSW and BDW have PSR AUX registers that need to be setup.
1592 * SKL+ use hardcoded values PSR AUX transactions
1593 */
1594 if (DISPLAY_VER(dev_priv) < 9)
1595 hsw_psr_setup_aux(intel_dp);
1596
1597 /*
1598 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1599 * mask LPSP to avoid dependency on other drivers that might block
1600 * runtime_pm besides preventing other hw tracking issues now we
1601 * can rely on frontbuffer tracking.
1602 */
1603 mask = EDP_PSR_DEBUG_MASK_MEMUP |
1604 EDP_PSR_DEBUG_MASK_HPD;
1605
1606 /*
1607 * For some unknown reason on HSW non-ULT (or at least on
1608 * Dell Latitude E6540) external displays start to flicker
1609 * when PSR is enabled on the eDP. SR/PC6 residency is much
1610 * higher than should be possible with an external display.
1611 * As a workaround leave LPSP unmasked to prevent PSR entry
1612 * when external displays are active.
1613 */
1614 if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
1615 mask |= EDP_PSR_DEBUG_MASK_LPSP;
1616
1617 if (DISPLAY_VER(dev_priv) < 20)
1618 mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1619
1620 /*
1621 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1622 * registers in order to keep the CURSURFLIVE tricks working :(
1623 */
1624 if (IS_DISPLAY_VER(dev_priv, 9, 10))
1625 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1626
1627 /* allow PSR with sprite enabled */
1628 if (IS_HASWELL(dev_priv))
1629 mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1630
1631 intel_de_write(i915: dev_priv, reg: psr_debug_reg(dev_priv, cpu_transcoder), val: mask);
1632
1633 psr_irq_control(intel_dp);
1634
1635 /*
1636 * TODO: if future platforms supports DC3CO in more than one
1637 * transcoder, EXITLINE will need to be unset when disabling PSR
1638 */
1639 if (intel_dp->psr.dc3co_exitline)
1640 intel_de_rmw(i915: dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1641 set: intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1642
1643 if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1644 intel_de_rmw(i915: dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1645 set: intel_dp->psr.psr2_sel_fetch_enabled ?
1646 IGNORE_PSR2_HW_TRACKING : 0);
1647
1648 lnl_alpm_configure(intel_dp);
1649
1650 /*
1651 * Wa_16013835468
1652 * Wa_14015648006
1653 */
1654 wm_optimization_wa(intel_dp, crtc_state);
1655
1656 if (intel_dp->psr.psr2_enabled) {
1657 if (DISPLAY_VER(dev_priv) == 9)
1658 intel_de_rmw(i915: dev_priv, CHICKEN_TRANS(cpu_transcoder), clear: 0,
1659 PSR2_VSC_ENABLE_PROG_HEADER |
1660 PSR2_ADD_VERTICAL_LINE_COUNT);
1661
1662 /*
1663 * Wa_16014451276:adlp,mtl[a0,b0]
1664 * All supported adlp panels have 1-based X granularity, this may
1665 * cause issues if non-supported panels are used.
1666 */
1667 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
1668 IS_ALDERLAKE_P(dev_priv))
1669 intel_de_rmw(i915: dev_priv, reg: hsw_chicken_trans_reg(i915: dev_priv, cpu_transcoder),
1670 clear: 0, ADLP_1_BASED_X_GRANULARITY);
1671
1672 /* Wa_16012604467:adlp,mtl[a0,b0] */
1673 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1674 intel_de_rmw(i915: dev_priv,
1675 MTL_CLKGATE_DIS_TRANS(cpu_transcoder), clear: 0,
1676 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1677 else if (IS_ALDERLAKE_P(dev_priv))
1678 intel_de_rmw(i915: dev_priv, CLKGATE_DIS_MISC, clear: 0,
1679 CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1680 }
1681}
1682
1683static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1684{
1685 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1686 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1687 u32 val;
1688
1689 /*
1690 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1691 * will still keep the error set even after the reset done in the
1692 * irq_preinstall and irq_uninstall hooks.
1693 * And enabling in this situation cause the screen to freeze in the
1694 * first time that PSR HW tries to activate so lets keep PSR disabled
1695 * to avoid any rendering problems.
1696 */
1697 val = intel_de_read(i915: dev_priv, reg: psr_iir_reg(dev_priv, cpu_transcoder));
1698 val &= psr_irq_psr_error_bit_get(intel_dp);
1699 if (val) {
1700 intel_dp->psr.sink_not_reliable = true;
1701 drm_dbg_kms(&dev_priv->drm,
1702 "PSR interruption error set, not enabling PSR\n");
1703 return false;
1704 }
1705
1706 return true;
1707}
1708
1709static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1710 const struct intel_crtc_state *crtc_state)
1711{
1712 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1713 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1714 enum phy phy = intel_port_to_phy(i915: dev_priv, port: dig_port->base.port);
1715 u32 val;
1716
1717 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1718
1719 intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1720 intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
1721 intel_dp->psr.busy_frontbuffer_bits = 0;
1722 intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1723 intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1724 /* DC5/DC6 requires at least 6 idle frames */
1725 val = usecs_to_jiffies(u: intel_get_frame_time_us(crtc_state) * 6);
1726 intel_dp->psr.dc3co_exit_delay = val;
1727 intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1728 intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1729 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1730 intel_dp->psr.req_psr2_sdp_prior_scanline =
1731 crtc_state->req_psr2_sdp_prior_scanline;
1732
1733 if (!psr_interrupt_error_check(intel_dp))
1734 return;
1735
1736 if (intel_dp->psr.panel_replay_enabled)
1737 drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
1738 else
1739 drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1740 intel_dp->psr.psr2_enabled ? "2" : "1");
1741
1742 intel_snps_phy_update_psr_power_state(dev_priv, phy, enable: true);
1743 intel_psr_enable_sink(intel_dp);
1744 intel_psr_enable_source(intel_dp, crtc_state);
1745 intel_dp->psr.enabled = true;
1746 intel_dp->psr.paused = false;
1747
1748 intel_psr_activate(intel_dp);
1749}
1750
1751static void intel_psr_exit(struct intel_dp *intel_dp)
1752{
1753 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1754 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1755 u32 val;
1756
1757 if (!intel_dp->psr.active) {
1758 if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
1759 val = intel_de_read(i915: dev_priv, EDP_PSR2_CTL(cpu_transcoder));
1760 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1761 }
1762
1763 val = intel_de_read(i915: dev_priv, reg: psr_ctl_reg(dev_priv, cpu_transcoder));
1764 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1765
1766 return;
1767 }
1768
1769 if (intel_dp->psr.panel_replay_enabled) {
1770 intel_de_rmw(i915: dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
1771 TRANS_DP2_PANEL_REPLAY_ENABLE, set: 0);
1772 } else if (intel_dp->psr.psr2_enabled) {
1773 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1774
1775 val = intel_de_rmw(i915: dev_priv, EDP_PSR2_CTL(cpu_transcoder),
1776 EDP_PSR2_ENABLE, set: 0);
1777
1778 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1779 } else {
1780 val = intel_de_rmw(i915: dev_priv, reg: psr_ctl_reg(dev_priv, cpu_transcoder),
1781 EDP_PSR_ENABLE, set: 0);
1782
1783 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1784 }
1785 intel_dp->psr.active = false;
1786}
1787
1788static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1789{
1790 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1791 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1792 i915_reg_t psr_status;
1793 u32 psr_status_mask;
1794
1795 if (intel_dp->psr.psr2_enabled) {
1796 psr_status = EDP_PSR2_STATUS(cpu_transcoder);
1797 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1798 } else {
1799 psr_status = psr_status_reg(dev_priv, cpu_transcoder);
1800 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1801 }
1802
1803 /* Wait till PSR is idle */
1804 if (intel_de_wait_for_clear(i915: dev_priv, reg: psr_status,
1805 mask: psr_status_mask, timeout: 2000))
1806 drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1807}
1808
1809static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1810{
1811 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1812 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1813 enum phy phy = intel_port_to_phy(i915: dev_priv,
1814 port: dp_to_dig_port(intel_dp)->base.port);
1815
1816 lockdep_assert_held(&intel_dp->psr.lock);
1817
1818 if (!intel_dp->psr.enabled)
1819 return;
1820
1821 if (intel_dp->psr.panel_replay_enabled)
1822 drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
1823 else
1824 drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1825 intel_dp->psr.psr2_enabled ? "2" : "1");
1826
1827 intel_psr_exit(intel_dp);
1828 intel_psr_wait_exit_locked(intel_dp);
1829
1830 /*
1831 * Wa_16013835468
1832 * Wa_14015648006
1833 */
1834 if (DISPLAY_VER(dev_priv) >= 11)
1835 intel_de_rmw(i915: dev_priv, GEN8_CHICKEN_DCPR_1,
1836 clear: wa_16013835468_bit_get(intel_dp), set: 0);
1837
1838 if (intel_dp->psr.psr2_enabled) {
1839 /* Wa_16012604467:adlp,mtl[a0,b0] */
1840 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1841 intel_de_rmw(i915: dev_priv,
1842 MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
1843 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, set: 0);
1844 else if (IS_ALDERLAKE_P(dev_priv))
1845 intel_de_rmw(i915: dev_priv, CLKGATE_DIS_MISC,
1846 CLKGATE_DIS_MISC_DMASC_GATING_DIS, set: 0);
1847 }
1848
1849 intel_snps_phy_update_psr_power_state(dev_priv, phy, enable: false);
1850
1851 /* Disable PSR on Sink */
1852 drm_dp_dpcd_writeb(aux: &intel_dp->aux, DP_PSR_EN_CFG, value: 0);
1853
1854 if (intel_dp->psr.psr2_enabled)
1855 drm_dp_dpcd_writeb(aux: &intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, value: 0);
1856
1857 intel_dp->psr.enabled = false;
1858 intel_dp->psr.panel_replay_enabled = false;
1859 intel_dp->psr.psr2_enabled = false;
1860 intel_dp->psr.psr2_sel_fetch_enabled = false;
1861 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1862}
1863
1864/**
1865 * intel_psr_disable - Disable PSR
1866 * @intel_dp: Intel DP
1867 * @old_crtc_state: old CRTC state
1868 *
1869 * This function needs to be called before disabling pipe.
1870 */
1871void intel_psr_disable(struct intel_dp *intel_dp,
1872 const struct intel_crtc_state *old_crtc_state)
1873{
1874 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1875
1876 if (!old_crtc_state->has_psr)
1877 return;
1878
1879 if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1880 return;
1881
1882 mutex_lock(&intel_dp->psr.lock);
1883
1884 intel_psr_disable_locked(intel_dp);
1885
1886 mutex_unlock(lock: &intel_dp->psr.lock);
1887 cancel_work_sync(work: &intel_dp->psr.work);
1888 cancel_delayed_work_sync(dwork: &intel_dp->psr.dc3co_work);
1889}
1890
1891/**
1892 * intel_psr_pause - Pause PSR
1893 * @intel_dp: Intel DP
1894 *
1895 * This function need to be called after enabling psr.
1896 */
1897void intel_psr_pause(struct intel_dp *intel_dp)
1898{
1899 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1900 struct intel_psr *psr = &intel_dp->psr;
1901
1902 if (!CAN_PSR(intel_dp))
1903 return;
1904
1905 mutex_lock(&psr->lock);
1906
1907 if (!psr->enabled) {
1908 mutex_unlock(lock: &psr->lock);
1909 return;
1910 }
1911
1912 /* If we ever hit this, we will need to add refcount to pause/resume */
1913 drm_WARN_ON(&dev_priv->drm, psr->paused);
1914
1915 intel_psr_exit(intel_dp);
1916 intel_psr_wait_exit_locked(intel_dp);
1917 psr->paused = true;
1918
1919 mutex_unlock(lock: &psr->lock);
1920
1921 cancel_work_sync(work: &psr->work);
1922 cancel_delayed_work_sync(dwork: &psr->dc3co_work);
1923}
1924
1925/**
1926 * intel_psr_resume - Resume PSR
1927 * @intel_dp: Intel DP
1928 *
1929 * This function need to be called after pausing psr.
1930 */
1931void intel_psr_resume(struct intel_dp *intel_dp)
1932{
1933 struct intel_psr *psr = &intel_dp->psr;
1934
1935 if (!CAN_PSR(intel_dp))
1936 return;
1937
1938 mutex_lock(&psr->lock);
1939
1940 if (!psr->paused)
1941 goto unlock;
1942
1943 psr->paused = false;
1944 intel_psr_activate(intel_dp);
1945
1946unlock:
1947 mutex_unlock(lock: &psr->lock);
1948}
1949
1950static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1951{
1952 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
1953 PSR2_MAN_TRK_CTL_ENABLE;
1954}
1955
1956static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1957{
1958 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1959 ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1960 PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1961}
1962
1963static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1964{
1965 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1966 ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1967 PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1968}
1969
1970static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1971{
1972 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1973 ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1974 PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1975}
1976
1977static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1978{
1979 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1980 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1981
1982 if (intel_dp->psr.psr2_sel_fetch_enabled)
1983 intel_de_write(i915: dev_priv,
1984 PSR2_MAN_TRK_CTL(cpu_transcoder),
1985 val: man_trk_ctl_enable_bit_get(dev_priv) |
1986 man_trk_ctl_partial_frame_bit_get(dev_priv) |
1987 man_trk_ctl_single_full_frame_bit_get(dev_priv) |
1988 man_trk_ctl_continuos_full_frame(dev_priv));
1989
1990 /*
1991 * Display WA #0884: skl+
1992 * This documented WA for bxt can be safely applied
1993 * broadly so we can force HW tracking to exit PSR
1994 * instead of disabling and re-enabling.
1995 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1996 * but it makes more sense write to the current active
1997 * pipe.
1998 *
1999 * This workaround do not exist for platforms with display 10 or newer
2000 * but testing proved that it works for up display 13, for newer
2001 * than that testing will be needed.
2002 */
2003 intel_de_write(i915: dev_priv, CURSURFLIVE(intel_dp->psr.pipe), val: 0);
2004}
2005
2006void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
2007{
2008 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2009 struct drm_i915_private *dev_priv = to_i915(dev: crtc_state->uapi.crtc->dev);
2010 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2011 struct intel_encoder *encoder;
2012
2013 if (!crtc_state->enable_psr2_sel_fetch)
2014 return;
2015
2016 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2017 crtc_state->uapi.encoder_mask) {
2018 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2019
2020 lockdep_assert_held(&intel_dp->psr.lock);
2021 if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
2022 return;
2023 break;
2024 }
2025
2026 intel_de_write(i915: dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2027 val: crtc_state->psr2_man_track_ctl);
2028
2029 if (!crtc_state->enable_psr2_su_region_et)
2030 return;
2031
2032 intel_de_write(i915: dev_priv, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
2033 val: crtc_state->pipe_srcsz_early_tpt);
2034}
2035
2036static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
2037 bool full_update)
2038{
2039 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2040 struct drm_i915_private *dev_priv = to_i915(dev: crtc->base.dev);
2041 u32 val = man_trk_ctl_enable_bit_get(dev_priv);
2042
2043 /* SF partial frame enable has to be set even on full update */
2044 val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
2045
2046 if (full_update) {
2047 val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
2048 val |= man_trk_ctl_continuos_full_frame(dev_priv);
2049 goto exit;
2050 }
2051
2052 if (crtc_state->psr2_su_area.y1 == -1)
2053 goto exit;
2054
2055 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
2056 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
2057 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
2058 } else {
2059 drm_WARN_ON(crtc_state->uapi.crtc->dev,
2060 crtc_state->psr2_su_area.y1 % 4 ||
2061 crtc_state->psr2_su_area.y2 % 4);
2062
2063 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
2064 crtc_state->psr2_su_area.y1 / 4 + 1);
2065 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
2066 crtc_state->psr2_su_area.y2 / 4 + 1);
2067 }
2068exit:
2069 crtc_state->psr2_man_track_ctl = val;
2070}
2071
2072static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
2073 bool full_update)
2074{
2075 int width, height;
2076
2077 if (!crtc_state->enable_psr2_su_region_et || full_update)
2078 return 0;
2079
2080 width = drm_rect_width(r: &crtc_state->psr2_su_area);
2081 height = drm_rect_height(r: &crtc_state->psr2_su_area);
2082
2083 return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
2084}
2085
2086static void clip_area_update(struct drm_rect *overlap_damage_area,
2087 struct drm_rect *damage_area,
2088 struct drm_rect *pipe_src)
2089{
2090 if (!drm_rect_intersect(r: damage_area, clip: pipe_src))
2091 return;
2092
2093 if (overlap_damage_area->y1 == -1) {
2094 overlap_damage_area->y1 = damage_area->y1;
2095 overlap_damage_area->y2 = damage_area->y2;
2096 return;
2097 }
2098
2099 if (damage_area->y1 < overlap_damage_area->y1)
2100 overlap_damage_area->y1 = damage_area->y1;
2101
2102 if (damage_area->y2 > overlap_damage_area->y2)
2103 overlap_damage_area->y2 = damage_area->y2;
2104}
2105
2106static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
2107{
2108 struct drm_i915_private *dev_priv = to_i915(dev: crtc_state->uapi.crtc->dev);
2109 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2110 u16 y_alignment;
2111
2112 /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2113 if (crtc_state->dsc.compression_enable &&
2114 (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
2115 y_alignment = vdsc_cfg->slice_height;
2116 else
2117 y_alignment = crtc_state->su_y_granularity;
2118
2119 crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
2120 if (crtc_state->psr2_su_area.y2 % y_alignment)
2121 crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
2122 y_alignment) + 1) * y_alignment;
2123}
2124
2125/*
2126 * When early transport is in use we need to extend SU area to cover
2127 * cursor fully when cursor is in SU area.
2128 */
2129static void
2130intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
2131 struct intel_crtc *crtc)
2132{
2133 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2134 struct intel_plane_state *new_plane_state;
2135 struct intel_plane *plane;
2136 int i;
2137
2138 if (!crtc_state->enable_psr2_su_region_et)
2139 return;
2140
2141 for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
2142 struct drm_rect inter;
2143
2144 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2145 continue;
2146
2147 if (plane->id != PLANE_CURSOR)
2148 continue;
2149
2150 if (!new_plane_state->uapi.visible)
2151 continue;
2152
2153 inter = crtc_state->psr2_su_area;
2154 if (!drm_rect_intersect(r: &inter, clip: &new_plane_state->uapi.dst))
2155 continue;
2156
2157 clip_area_update(overlap_damage_area: &crtc_state->psr2_su_area, damage_area: &new_plane_state->uapi.dst,
2158 pipe_src: &crtc_state->pipe_src);
2159 }
2160}
2161
2162/*
2163 * TODO: Not clear how to handle planes with negative position,
2164 * also planes are not updated if they have a negative X
2165 * position so for now doing a full update in this cases
2166 *
2167 * Plane scaling and rotation is not supported by selective fetch and both
2168 * properties can change without a modeset, so need to be check at every
2169 * atomic commit.
2170 */
2171static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2172{
2173 if (plane_state->uapi.dst.y1 < 0 ||
2174 plane_state->uapi.dst.x1 < 0 ||
2175 plane_state->scaler_id >= 0 ||
2176 plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2177 return false;
2178
2179 return true;
2180}
2181
2182/*
2183 * Check for pipe properties that is not supported by selective fetch.
2184 *
2185 * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2186 * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2187 * enabled and going to the full update path.
2188 */
2189static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2190{
2191 if (crtc_state->scaler_state.scaler_id >= 0)
2192 return false;
2193
2194 return true;
2195}
2196
2197int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2198 struct intel_crtc *crtc)
2199{
2200 struct drm_i915_private *dev_priv = to_i915(dev: state->base.dev);
2201 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2202 struct intel_plane_state *new_plane_state, *old_plane_state;
2203 struct intel_plane *plane;
2204 bool full_update = false;
2205 int i, ret;
2206
2207 if (!crtc_state->enable_psr2_sel_fetch)
2208 return 0;
2209
2210 if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2211 full_update = true;
2212 goto skip_sel_fetch_set_loop;
2213 }
2214
2215 crtc_state->psr2_su_area.x1 = 0;
2216 crtc_state->psr2_su_area.y1 = -1;
2217 crtc_state->psr2_su_area.x2 = INT_MAX;
2218 crtc_state->psr2_su_area.y2 = -1;
2219
2220 /*
2221 * Calculate minimal selective fetch area of each plane and calculate
2222 * the pipe damaged area.
2223 * In the next loop the plane selective fetch area will actually be set
2224 * using whole pipe damaged area.
2225 */
2226 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2227 new_plane_state, i) {
2228 struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2229 .x2 = INT_MAX };
2230
2231 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2232 continue;
2233
2234 if (!new_plane_state->uapi.visible &&
2235 !old_plane_state->uapi.visible)
2236 continue;
2237
2238 if (!psr2_sel_fetch_plane_state_supported(plane_state: new_plane_state)) {
2239 full_update = true;
2240 break;
2241 }
2242
2243 /*
2244 * If visibility or plane moved, mark the whole plane area as
2245 * damaged as it needs to be complete redraw in the new and old
2246 * position.
2247 */
2248 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2249 !drm_rect_equals(r1: &new_plane_state->uapi.dst,
2250 r2: &old_plane_state->uapi.dst)) {
2251 if (old_plane_state->uapi.visible) {
2252 damaged_area.y1 = old_plane_state->uapi.dst.y1;
2253 damaged_area.y2 = old_plane_state->uapi.dst.y2;
2254 clip_area_update(overlap_damage_area: &crtc_state->psr2_su_area, damage_area: &damaged_area,
2255 pipe_src: &crtc_state->pipe_src);
2256 }
2257
2258 if (new_plane_state->uapi.visible) {
2259 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2260 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2261 clip_area_update(overlap_damage_area: &crtc_state->psr2_su_area, damage_area: &damaged_area,
2262 pipe_src: &crtc_state->pipe_src);
2263 }
2264 continue;
2265 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2266 /* If alpha changed mark the whole plane area as damaged */
2267 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2268 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2269 clip_area_update(overlap_damage_area: &crtc_state->psr2_su_area, damage_area: &damaged_area,
2270 pipe_src: &crtc_state->pipe_src);
2271 continue;
2272 }
2273
2274 src = drm_plane_state_src(state: &new_plane_state->uapi);
2275 drm_rect_fp_to_int(dst: &src, src: &src);
2276
2277 if (!drm_atomic_helper_damage_merged(old_state: &old_plane_state->uapi,
2278 state: &new_plane_state->uapi, rect: &damaged_area))
2279 continue;
2280
2281 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2282 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2283 damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2284 damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2285
2286 clip_area_update(overlap_damage_area: &crtc_state->psr2_su_area, damage_area: &damaged_area, pipe_src: &crtc_state->pipe_src);
2287 }
2288
2289 /*
2290 * TODO: For now we are just using full update in case
2291 * selective fetch area calculation fails. To optimize this we
2292 * should identify cases where this happens and fix the area
2293 * calculation for those.
2294 */
2295 if (crtc_state->psr2_su_area.y1 == -1) {
2296 drm_info_once(&dev_priv->drm,
2297 "Selective fetch area calculation failed in pipe %c\n",
2298 pipe_name(crtc->pipe));
2299 full_update = true;
2300 }
2301
2302 if (full_update)
2303 goto skip_sel_fetch_set_loop;
2304
2305 /* Wa_14014971492 */
2306 if ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
2307 IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
2308 crtc_state->splitter.enable)
2309 crtc_state->psr2_su_area.y1 = 0;
2310
2311 ret = drm_atomic_add_affected_planes(state: &state->base, crtc: &crtc->base);
2312 if (ret)
2313 return ret;
2314
2315 /*
2316 * Adjust su area to cover cursor fully as necessary (early
2317 * transport). This needs to be done after
2318 * drm_atomic_add_affected_planes to ensure visible cursor is added into
2319 * affected planes even when cursor is not updated by itself.
2320 */
2321 intel_psr2_sel_fetch_et_alignment(state, crtc);
2322
2323 intel_psr2_sel_fetch_pipe_alignment(crtc_state);
2324
2325 /*
2326 * Now that we have the pipe damaged area check if it intersect with
2327 * every plane, if it does set the plane selective fetch area.
2328 */
2329 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2330 new_plane_state, i) {
2331 struct drm_rect *sel_fetch_area, inter;
2332 struct intel_plane *linked = new_plane_state->planar_linked_plane;
2333
2334 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2335 !new_plane_state->uapi.visible)
2336 continue;
2337
2338 inter = crtc_state->psr2_su_area;
2339 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2340 if (!drm_rect_intersect(r: &inter, clip: &new_plane_state->uapi.dst)) {
2341 sel_fetch_area->y1 = -1;
2342 sel_fetch_area->y2 = -1;
2343 /*
2344 * if plane sel fetch was previously enabled ->
2345 * disable it
2346 */
2347 if (drm_rect_height(r: &old_plane_state->psr2_sel_fetch_area) > 0)
2348 crtc_state->update_planes |= BIT(plane->id);
2349
2350 continue;
2351 }
2352
2353 if (!psr2_sel_fetch_plane_state_supported(plane_state: new_plane_state)) {
2354 full_update = true;
2355 break;
2356 }
2357
2358 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2359 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2360 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2361 crtc_state->update_planes |= BIT(plane->id);
2362
2363 /*
2364 * Sel_fetch_area is calculated for UV plane. Use
2365 * same area for Y plane as well.
2366 */
2367 if (linked) {
2368 struct intel_plane_state *linked_new_plane_state;
2369 struct drm_rect *linked_sel_fetch_area;
2370
2371 linked_new_plane_state = intel_atomic_get_plane_state(state, plane: linked);
2372 if (IS_ERR(ptr: linked_new_plane_state))
2373 return PTR_ERR(ptr: linked_new_plane_state);
2374
2375 linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2376 linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2377 linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2378 crtc_state->update_planes |= BIT(linked->id);
2379 }
2380 }
2381
2382skip_sel_fetch_set_loop:
2383 psr2_man_trk_ctl_calc(crtc_state, full_update);
2384 crtc_state->pipe_srcsz_early_tpt =
2385 psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
2386 return 0;
2387}
2388
2389void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2390 struct intel_crtc *crtc)
2391{
2392 struct drm_i915_private *i915 = to_i915(dev: state->base.dev);
2393 const struct intel_crtc_state *old_crtc_state =
2394 intel_atomic_get_old_crtc_state(state, crtc);
2395 const struct intel_crtc_state *new_crtc_state =
2396 intel_atomic_get_new_crtc_state(state, crtc);
2397 struct intel_encoder *encoder;
2398
2399 if (!HAS_PSR(i915))
2400 return;
2401
2402 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2403 old_crtc_state->uapi.encoder_mask) {
2404 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2405 struct intel_psr *psr = &intel_dp->psr;
2406 bool needs_to_disable = false;
2407
2408 mutex_lock(&psr->lock);
2409
2410 /*
2411 * Reasons to disable:
2412 * - PSR disabled in new state
2413 * - All planes will go inactive
2414 * - Changing between PSR versions
2415 * - Display WA #1136: skl, bxt
2416 */
2417 needs_to_disable |= intel_crtc_needs_modeset(crtc_state: new_crtc_state);
2418 needs_to_disable |= !new_crtc_state->has_psr;
2419 needs_to_disable |= !new_crtc_state->active_planes;
2420 needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
2421 needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2422 new_crtc_state->wm_level_disabled;
2423
2424 if (psr->enabled && needs_to_disable)
2425 intel_psr_disable_locked(intel_dp);
2426 else if (psr->enabled && new_crtc_state->wm_level_disabled)
2427 /* Wa_14015648006 */
2428 wm_optimization_wa(intel_dp, crtc_state: new_crtc_state);
2429
2430 mutex_unlock(lock: &psr->lock);
2431 }
2432}
2433
2434void intel_psr_post_plane_update(struct intel_atomic_state *state,
2435 struct intel_crtc *crtc)
2436{
2437 struct drm_i915_private *dev_priv = to_i915(dev: state->base.dev);
2438 const struct intel_crtc_state *crtc_state =
2439 intel_atomic_get_new_crtc_state(state, crtc);
2440 struct intel_encoder *encoder;
2441
2442 if (!(crtc_state->has_psr || crtc_state->has_panel_replay))
2443 return;
2444
2445 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2446 crtc_state->uapi.encoder_mask) {
2447 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2448 struct intel_psr *psr = &intel_dp->psr;
2449 bool keep_disabled = false;
2450
2451 mutex_lock(&psr->lock);
2452
2453 drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2454
2455 keep_disabled |= psr->sink_not_reliable;
2456 keep_disabled |= !crtc_state->active_planes;
2457
2458 /* Display WA #1136: skl, bxt */
2459 keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2460 crtc_state->wm_level_disabled;
2461
2462 if (!psr->enabled && !keep_disabled)
2463 intel_psr_enable_locked(intel_dp, crtc_state);
2464 else if (psr->enabled && !crtc_state->wm_level_disabled)
2465 /* Wa_14015648006 */
2466 wm_optimization_wa(intel_dp, crtc_state);
2467
2468 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2469 if (crtc_state->crc_enabled && psr->enabled)
2470 psr_force_hw_tracking_exit(intel_dp);
2471
2472 /*
2473 * Clear possible busy bits in case we have
2474 * invalidate -> flip -> flush sequence.
2475 */
2476 intel_dp->psr.busy_frontbuffer_bits = 0;
2477
2478 mutex_unlock(lock: &psr->lock);
2479 }
2480}
2481
2482static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2483{
2484 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2485 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2486
2487 /*
2488 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2489 * As all higher states has bit 4 of PSR2 state set we can just wait for
2490 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2491 */
2492 return intel_de_wait_for_clear(i915: dev_priv,
2493 EDP_PSR2_STATUS(cpu_transcoder),
2494 EDP_PSR2_STATUS_STATE_DEEP_SLEEP, timeout: 50);
2495}
2496
2497static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2498{
2499 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2500 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2501
2502 /*
2503 * From bspec: Panel Self Refresh (BDW+)
2504 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2505 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2506 * defensive enough to cover everything.
2507 */
2508 return intel_de_wait_for_clear(i915: dev_priv,
2509 reg: psr_status_reg(dev_priv, cpu_transcoder),
2510 EDP_PSR_STATUS_STATE_MASK, timeout: 50);
2511}
2512
2513/**
2514 * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2515 * @new_crtc_state: new CRTC state
2516 *
2517 * This function is expected to be called from pipe_update_start() where it is
2518 * not expected to race with PSR enable or disable.
2519 */
2520void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2521{
2522 struct drm_i915_private *dev_priv = to_i915(dev: new_crtc_state->uapi.crtc->dev);
2523 struct intel_encoder *encoder;
2524
2525 if (!new_crtc_state->has_psr)
2526 return;
2527
2528 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2529 new_crtc_state->uapi.encoder_mask) {
2530 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2531 int ret;
2532
2533 lockdep_assert_held(&intel_dp->psr.lock);
2534
2535 if (!intel_dp->psr.enabled)
2536 continue;
2537
2538 if (intel_dp->psr.psr2_enabled)
2539 ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2540 else
2541 ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2542
2543 if (ret)
2544 drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2545 }
2546}
2547
2548static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2549{
2550 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2551 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2552 i915_reg_t reg;
2553 u32 mask;
2554 int err;
2555
2556 if (!intel_dp->psr.enabled)
2557 return false;
2558
2559 if (intel_dp->psr.psr2_enabled) {
2560 reg = EDP_PSR2_STATUS(cpu_transcoder);
2561 mask = EDP_PSR2_STATUS_STATE_MASK;
2562 } else {
2563 reg = psr_status_reg(dev_priv, cpu_transcoder);
2564 mask = EDP_PSR_STATUS_STATE_MASK;
2565 }
2566
2567 mutex_unlock(lock: &intel_dp->psr.lock);
2568
2569 err = intel_de_wait_for_clear(i915: dev_priv, reg, mask, timeout: 50);
2570 if (err)
2571 drm_err(&dev_priv->drm,
2572 "Timed out waiting for PSR Idle for re-enable\n");
2573
2574 /* After the unlocked wait, verify that PSR is still wanted! */
2575 mutex_lock(&intel_dp->psr.lock);
2576 return err == 0 && intel_dp->psr.enabled;
2577}
2578
2579static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2580{
2581 struct drm_connector_list_iter conn_iter;
2582 struct drm_modeset_acquire_ctx ctx;
2583 struct drm_atomic_state *state;
2584 struct drm_connector *conn;
2585 int err = 0;
2586
2587 state = drm_atomic_state_alloc(dev: &dev_priv->drm);
2588 if (!state)
2589 return -ENOMEM;
2590
2591 drm_modeset_acquire_init(ctx: &ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2592
2593 state->acquire_ctx = &ctx;
2594 to_intel_atomic_state(state)->internal = true;
2595
2596retry:
2597 drm_connector_list_iter_begin(dev: &dev_priv->drm, iter: &conn_iter);
2598 drm_for_each_connector_iter(conn, &conn_iter) {
2599 struct drm_connector_state *conn_state;
2600 struct drm_crtc_state *crtc_state;
2601
2602 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2603 continue;
2604
2605 conn_state = drm_atomic_get_connector_state(state, connector: conn);
2606 if (IS_ERR(ptr: conn_state)) {
2607 err = PTR_ERR(ptr: conn_state);
2608 break;
2609 }
2610
2611 if (!conn_state->crtc)
2612 continue;
2613
2614 crtc_state = drm_atomic_get_crtc_state(state, crtc: conn_state->crtc);
2615 if (IS_ERR(ptr: crtc_state)) {
2616 err = PTR_ERR(ptr: crtc_state);
2617 break;
2618 }
2619
2620 /* Mark mode as changed to trigger a pipe->update() */
2621 crtc_state->mode_changed = true;
2622 }
2623 drm_connector_list_iter_end(iter: &conn_iter);
2624
2625 if (err == 0)
2626 err = drm_atomic_commit(state);
2627
2628 if (err == -EDEADLK) {
2629 drm_atomic_state_clear(state);
2630 err = drm_modeset_backoff(ctx: &ctx);
2631 if (!err)
2632 goto retry;
2633 }
2634
2635 drm_modeset_drop_locks(ctx: &ctx);
2636 drm_modeset_acquire_fini(ctx: &ctx);
2637 drm_atomic_state_put(state);
2638
2639 return err;
2640}
2641
2642int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2643{
2644 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2645 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2646 u32 old_mode;
2647 int ret;
2648
2649 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2650 mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2651 drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2652 return -EINVAL;
2653 }
2654
2655 ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2656 if (ret)
2657 return ret;
2658
2659 old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2660 intel_dp->psr.debug = val;
2661
2662 /*
2663 * Do it right away if it's already enabled, otherwise it will be done
2664 * when enabling the source.
2665 */
2666 if (intel_dp->psr.enabled)
2667 psr_irq_control(intel_dp);
2668
2669 mutex_unlock(lock: &intel_dp->psr.lock);
2670
2671 if (old_mode != mode)
2672 ret = intel_psr_fastset_force(dev_priv);
2673
2674 return ret;
2675}
2676
2677static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2678{
2679 struct intel_psr *psr = &intel_dp->psr;
2680
2681 intel_psr_disable_locked(intel_dp);
2682 psr->sink_not_reliable = true;
2683 /* let's make sure that sink is awaken */
2684 drm_dp_dpcd_writeb(aux: &intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2685}
2686
2687static void intel_psr_work(struct work_struct *work)
2688{
2689 struct intel_dp *intel_dp =
2690 container_of(work, typeof(*intel_dp), psr.work);
2691
2692 mutex_lock(&intel_dp->psr.lock);
2693
2694 if (!intel_dp->psr.enabled)
2695 goto unlock;
2696
2697 if (READ_ONCE(intel_dp->psr.irq_aux_error))
2698 intel_psr_handle_irq(intel_dp);
2699
2700 /*
2701 * We have to make sure PSR is ready for re-enable
2702 * otherwise it keeps disabled until next full enable/disable cycle.
2703 * PSR might take some time to get fully disabled
2704 * and be ready for re-enable.
2705 */
2706 if (!__psr_wait_for_idle_locked(intel_dp))
2707 goto unlock;
2708
2709 /*
2710 * The delayed work can race with an invalidate hence we need to
2711 * recheck. Since psr_flush first clears this and then reschedules we
2712 * won't ever miss a flush when bailing out here.
2713 */
2714 if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2715 goto unlock;
2716
2717 intel_psr_activate(intel_dp);
2718unlock:
2719 mutex_unlock(lock: &intel_dp->psr.lock);
2720}
2721
2722static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2723{
2724 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2725 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2726
2727 if (intel_dp->psr.psr2_sel_fetch_enabled) {
2728 u32 val;
2729
2730 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2731 /* Send one update otherwise lag is observed in screen */
2732 intel_de_write(i915: dev_priv, CURSURFLIVE(intel_dp->psr.pipe), val: 0);
2733 return;
2734 }
2735
2736 val = man_trk_ctl_enable_bit_get(dev_priv) |
2737 man_trk_ctl_partial_frame_bit_get(dev_priv) |
2738 man_trk_ctl_continuos_full_frame(dev_priv);
2739 intel_de_write(i915: dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
2740 intel_de_write(i915: dev_priv, CURSURFLIVE(intel_dp->psr.pipe), val: 0);
2741 intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2742 } else {
2743 intel_psr_exit(intel_dp);
2744 }
2745}
2746
2747/**
2748 * intel_psr_invalidate - Invalidate PSR
2749 * @dev_priv: i915 device
2750 * @frontbuffer_bits: frontbuffer plane tracking bits
2751 * @origin: which operation caused the invalidate
2752 *
2753 * Since the hardware frontbuffer tracking has gaps we need to integrate
2754 * with the software frontbuffer tracking. This function gets called every
2755 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2756 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2757 *
2758 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2759 */
2760void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2761 unsigned frontbuffer_bits, enum fb_op_origin origin)
2762{
2763 struct intel_encoder *encoder;
2764
2765 if (origin == ORIGIN_FLIP)
2766 return;
2767
2768 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2769 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2770 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2771
2772 mutex_lock(&intel_dp->psr.lock);
2773 if (!intel_dp->psr.enabled) {
2774 mutex_unlock(lock: &intel_dp->psr.lock);
2775 continue;
2776 }
2777
2778 pipe_frontbuffer_bits &=
2779 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2780 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2781
2782 if (pipe_frontbuffer_bits)
2783 _psr_invalidate_handle(intel_dp);
2784
2785 mutex_unlock(lock: &intel_dp->psr.lock);
2786 }
2787}
2788/*
2789 * When we will be completely rely on PSR2 S/W tracking in future,
2790 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2791 * event also therefore tgl_dc3co_flush_locked() require to be changed
2792 * accordingly in future.
2793 */
2794static void
2795tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2796 enum fb_op_origin origin)
2797{
2798 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2799
2800 if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2801 !intel_dp->psr.active)
2802 return;
2803
2804 /*
2805 * At every frontbuffer flush flip event modified delay of delayed work,
2806 * when delayed work schedules that means display has been idle.
2807 */
2808 if (!(frontbuffer_bits &
2809 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2810 return;
2811
2812 tgl_psr2_enable_dc3co(intel_dp);
2813 mod_delayed_work(wq: i915->unordered_wq, dwork: &intel_dp->psr.dc3co_work,
2814 delay: intel_dp->psr.dc3co_exit_delay);
2815}
2816
2817static void _psr_flush_handle(struct intel_dp *intel_dp)
2818{
2819 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2820 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2821
2822 if (intel_dp->psr.psr2_sel_fetch_enabled) {
2823 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2824 /* can we turn CFF off? */
2825 if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2826 u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2827 man_trk_ctl_partial_frame_bit_get(dev_priv) |
2828 man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2829 man_trk_ctl_continuos_full_frame(dev_priv);
2830
2831 /*
2832 * Set psr2_sel_fetch_cff_enabled as false to allow selective
2833 * updates. Still keep cff bit enabled as we don't have proper
2834 * SU configuration in case update is sent for any reason after
2835 * sff bit gets cleared by the HW on next vblank.
2836 */
2837 intel_de_write(i915: dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2838 val);
2839 intel_de_write(i915: dev_priv, CURSURFLIVE(intel_dp->psr.pipe), val: 0);
2840 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2841 }
2842 } else {
2843 /*
2844 * continuous full frame is disabled, only a single full
2845 * frame is required
2846 */
2847 psr_force_hw_tracking_exit(intel_dp);
2848 }
2849 } else {
2850 psr_force_hw_tracking_exit(intel_dp);
2851
2852 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2853 queue_work(wq: dev_priv->unordered_wq, work: &intel_dp->psr.work);
2854 }
2855}
2856
2857/**
2858 * intel_psr_flush - Flush PSR
2859 * @dev_priv: i915 device
2860 * @frontbuffer_bits: frontbuffer plane tracking bits
2861 * @origin: which operation caused the flush
2862 *
2863 * Since the hardware frontbuffer tracking has gaps we need to integrate
2864 * with the software frontbuffer tracking. This function gets called every
2865 * time frontbuffer rendering has completed and flushed out to memory. PSR
2866 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2867 *
2868 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2869 */
2870void intel_psr_flush(struct drm_i915_private *dev_priv,
2871 unsigned frontbuffer_bits, enum fb_op_origin origin)
2872{
2873 struct intel_encoder *encoder;
2874
2875 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2876 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2877 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2878
2879 mutex_lock(&intel_dp->psr.lock);
2880 if (!intel_dp->psr.enabled) {
2881 mutex_unlock(lock: &intel_dp->psr.lock);
2882 continue;
2883 }
2884
2885 pipe_frontbuffer_bits &=
2886 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2887 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2888
2889 /*
2890 * If the PSR is paused by an explicit intel_psr_paused() call,
2891 * we have to ensure that the PSR is not activated until
2892 * intel_psr_resume() is called.
2893 */
2894 if (intel_dp->psr.paused)
2895 goto unlock;
2896
2897 if (origin == ORIGIN_FLIP ||
2898 (origin == ORIGIN_CURSOR_UPDATE &&
2899 !intel_dp->psr.psr2_sel_fetch_enabled)) {
2900 tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2901 goto unlock;
2902 }
2903
2904 if (pipe_frontbuffer_bits == 0)
2905 goto unlock;
2906
2907 /* By definition flush = invalidate + flush */
2908 _psr_flush_handle(intel_dp);
2909unlock:
2910 mutex_unlock(lock: &intel_dp->psr.lock);
2911 }
2912}
2913
2914/**
2915 * intel_psr_init - Init basic PSR work and mutex.
2916 * @intel_dp: Intel DP
2917 *
2918 * This function is called after the initializing connector.
2919 * (the initializing of connector treats the handling of connector capabilities)
2920 * And it initializes basic PSR stuff for each DP Encoder.
2921 */
2922void intel_psr_init(struct intel_dp *intel_dp)
2923{
2924 struct intel_connector *connector = intel_dp->attached_connector;
2925 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2926 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2927
2928 if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
2929 return;
2930
2931 /*
2932 * HSW spec explicitly says PSR is tied to port A.
2933 * BDW+ platforms have a instance of PSR registers per transcoder but
2934 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2935 * than eDP one.
2936 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2937 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2938 * But GEN12 supports a instance of PSR registers per transcoder.
2939 */
2940 if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2941 drm_dbg_kms(&dev_priv->drm,
2942 "PSR condition failed: Port not supported\n");
2943 return;
2944 }
2945
2946 if (HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp))
2947 intel_dp->psr.source_panel_replay_support = true;
2948 else
2949 intel_dp->psr.source_support = true;
2950
2951 /* Disable early transport for now */
2952 intel_dp->psr.debug |= I915_PSR_DEBUG_SU_REGION_ET_DISABLE;
2953
2954 /* Set link_standby x link_off defaults */
2955 if (DISPLAY_VER(dev_priv) < 12)
2956 /* For new platforms up to TGL let's respect VBT back again */
2957 intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2958
2959 INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2960 INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2961 mutex_init(&intel_dp->psr.lock);
2962}
2963
2964static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2965 u8 *status, u8 *error_status)
2966{
2967 struct drm_dp_aux *aux = &intel_dp->aux;
2968 int ret;
2969 unsigned int offset;
2970
2971 offset = intel_dp->psr.panel_replay_enabled ?
2972 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
2973
2974 ret = drm_dp_dpcd_readb(aux, offset, valuep: status);
2975 if (ret != 1)
2976 return ret;
2977
2978 offset = intel_dp->psr.panel_replay_enabled ?
2979 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
2980
2981 ret = drm_dp_dpcd_readb(aux, offset, valuep: error_status);
2982 if (ret != 1)
2983 return ret;
2984
2985 *status = *status & DP_PSR_SINK_STATE_MASK;
2986
2987 return 0;
2988}
2989
2990static void psr_alpm_check(struct intel_dp *intel_dp)
2991{
2992 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2993 struct drm_dp_aux *aux = &intel_dp->aux;
2994 struct intel_psr *psr = &intel_dp->psr;
2995 u8 val;
2996 int r;
2997
2998 if (!psr->psr2_enabled)
2999 return;
3000
3001 r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, valuep: &val);
3002 if (r != 1) {
3003 drm_err(&dev_priv->drm, "Error reading ALPM status\n");
3004 return;
3005 }
3006
3007 if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
3008 intel_psr_disable_locked(intel_dp);
3009 psr->sink_not_reliable = true;
3010 drm_dbg_kms(&dev_priv->drm,
3011 "ALPM lock timeout error, disabling PSR\n");
3012
3013 /* Clearing error */
3014 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, value: val);
3015 }
3016}
3017
3018static void psr_capability_changed_check(struct intel_dp *intel_dp)
3019{
3020 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3021 struct intel_psr *psr = &intel_dp->psr;
3022 u8 val;
3023 int r;
3024
3025 r = drm_dp_dpcd_readb(aux: &intel_dp->aux, DP_PSR_ESI, valuep: &val);
3026 if (r != 1) {
3027 drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
3028 return;
3029 }
3030
3031 if (val & DP_PSR_CAPS_CHANGE) {
3032 intel_psr_disable_locked(intel_dp);
3033 psr->sink_not_reliable = true;
3034 drm_dbg_kms(&dev_priv->drm,
3035 "Sink PSR capability changed, disabling PSR\n");
3036
3037 /* Clearing it */
3038 drm_dp_dpcd_writeb(aux: &intel_dp->aux, DP_PSR_ESI, value: val);
3039 }
3040}
3041
3042void intel_psr_short_pulse(struct intel_dp *intel_dp)
3043{
3044 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3045 struct intel_psr *psr = &intel_dp->psr;
3046 u8 status, error_status;
3047 const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
3048 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3049 DP_PSR_LINK_CRC_ERROR;
3050
3051 if (!CAN_PSR(intel_dp))
3052 return;
3053
3054 mutex_lock(&psr->lock);
3055
3056 if (!psr->enabled)
3057 goto exit;
3058
3059 if (psr_get_status_and_error_status(intel_dp, status: &status, error_status: &error_status)) {
3060 drm_err(&dev_priv->drm,
3061 "Error reading PSR status or error status\n");
3062 goto exit;
3063 }
3064
3065 if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
3066 intel_psr_disable_locked(intel_dp);
3067 psr->sink_not_reliable = true;
3068 }
3069
3070 if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
3071 drm_dbg_kms(&dev_priv->drm,
3072 "PSR sink internal error, disabling PSR\n");
3073 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3074 drm_dbg_kms(&dev_priv->drm,
3075 "PSR RFB storage error, disabling PSR\n");
3076 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3077 drm_dbg_kms(&dev_priv->drm,
3078 "PSR VSC SDP uncorrectable error, disabling PSR\n");
3079 if (error_status & DP_PSR_LINK_CRC_ERROR)
3080 drm_dbg_kms(&dev_priv->drm,
3081 "PSR Link CRC error, disabling PSR\n");
3082
3083 if (error_status & ~errors)
3084 drm_err(&dev_priv->drm,
3085 "PSR_ERROR_STATUS unhandled errors %x\n",
3086 error_status & ~errors);
3087 /* clear status register */
3088 drm_dp_dpcd_writeb(aux: &intel_dp->aux, DP_PSR_ERROR_STATUS, value: error_status);
3089
3090 psr_alpm_check(intel_dp);
3091 psr_capability_changed_check(intel_dp);
3092
3093exit:
3094 mutex_unlock(lock: &psr->lock);
3095}
3096
3097bool intel_psr_enabled(struct intel_dp *intel_dp)
3098{
3099 bool ret;
3100
3101 if (!CAN_PSR(intel_dp))
3102 return false;
3103
3104 mutex_lock(&intel_dp->psr.lock);
3105 ret = intel_dp->psr.enabled;
3106 mutex_unlock(lock: &intel_dp->psr.lock);
3107
3108 return ret;
3109}
3110
3111/**
3112 * intel_psr_lock - grab PSR lock
3113 * @crtc_state: the crtc state
3114 *
3115 * This is initially meant to be used by around CRTC update, when
3116 * vblank sensitive registers are updated and we need grab the lock
3117 * before it to avoid vblank evasion.
3118 */
3119void intel_psr_lock(const struct intel_crtc_state *crtc_state)
3120{
3121 struct drm_i915_private *i915 = to_i915(dev: crtc_state->uapi.crtc->dev);
3122 struct intel_encoder *encoder;
3123
3124 if (!crtc_state->has_psr)
3125 return;
3126
3127 for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
3128 crtc_state->uapi.encoder_mask) {
3129 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3130
3131 mutex_lock(&intel_dp->psr.lock);
3132 break;
3133 }
3134}
3135
3136/**
3137 * intel_psr_unlock - release PSR lock
3138 * @crtc_state: the crtc state
3139 *
3140 * Release the PSR lock that was held during pipe update.
3141 */
3142void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
3143{
3144 struct drm_i915_private *i915 = to_i915(dev: crtc_state->uapi.crtc->dev);
3145 struct intel_encoder *encoder;
3146
3147 if (!crtc_state->has_psr)
3148 return;
3149
3150 for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
3151 crtc_state->uapi.encoder_mask) {
3152 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3153
3154 mutex_unlock(lock: &intel_dp->psr.lock);
3155 break;
3156 }
3157}
3158
3159static void
3160psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3161{
3162 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3163 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3164 const char *status = "unknown";
3165 u32 val, status_val;
3166
3167 if (intel_dp->psr.psr2_enabled) {
3168 static const char * const live_status[] = {
3169 "IDLE",
3170 "CAPTURE",
3171 "CAPTURE_FS",
3172 "SLEEP",
3173 "BUFON_FW",
3174 "ML_UP",
3175 "SU_STANDBY",
3176 "FAST_SLEEP",
3177 "DEEP_SLEEP",
3178 "BUF_ON",
3179 "TG_ON"
3180 };
3181 val = intel_de_read(i915: dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
3182 status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3183 if (status_val < ARRAY_SIZE(live_status))
3184 status = live_status[status_val];
3185 } else {
3186 static const char * const live_status[] = {
3187 "IDLE",
3188 "SRDONACK",
3189 "SRDENT",
3190 "BUFOFF",
3191 "BUFON",
3192 "AUXACK",
3193 "SRDOFFACK",
3194 "SRDENT_ON",
3195 };
3196 val = intel_de_read(i915: dev_priv, reg: psr_status_reg(dev_priv, cpu_transcoder));
3197 status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3198 if (status_val < ARRAY_SIZE(live_status))
3199 status = live_status[status_val];
3200 }
3201
3202 seq_printf(m, fmt: "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3203}
3204
3205static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
3206{
3207 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3208 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3209 struct intel_psr *psr = &intel_dp->psr;
3210 intel_wakeref_t wakeref;
3211 const char *status;
3212 bool enabled;
3213 u32 val;
3214
3215 seq_printf(m, fmt: "Sink support: PSR = %s",
3216 str_yes_no(v: psr->sink_support));
3217
3218 if (psr->sink_support)
3219 seq_printf(m, fmt: " [0x%02x]", intel_dp->psr_dpcd[0]);
3220 seq_printf(m, fmt: ", Panel Replay = %s\n", str_yes_no(v: psr->sink_panel_replay_support));
3221
3222 if (!(psr->sink_support || psr->sink_panel_replay_support))
3223 return 0;
3224
3225 wakeref = intel_runtime_pm_get(rpm: &dev_priv->runtime_pm);
3226 mutex_lock(&psr->lock);
3227
3228 if (psr->panel_replay_enabled)
3229 status = "Panel Replay Enabled";
3230 else if (psr->enabled)
3231 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
3232 else
3233 status = "disabled";
3234 seq_printf(m, fmt: "PSR mode: %s\n", status);
3235
3236 if (!psr->enabled) {
3237 seq_printf(m, fmt: "PSR sink not reliable: %s\n",
3238 str_yes_no(v: psr->sink_not_reliable));
3239
3240 goto unlock;
3241 }
3242
3243 if (psr->panel_replay_enabled) {
3244 val = intel_de_read(i915: dev_priv, TRANS_DP2_CTL(cpu_transcoder));
3245 enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
3246 } else if (psr->psr2_enabled) {
3247 val = intel_de_read(i915: dev_priv, EDP_PSR2_CTL(cpu_transcoder));
3248 enabled = val & EDP_PSR2_ENABLE;
3249 } else {
3250 val = intel_de_read(i915: dev_priv, reg: psr_ctl_reg(dev_priv, cpu_transcoder));
3251 enabled = val & EDP_PSR_ENABLE;
3252 }
3253 seq_printf(m, fmt: "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
3254 str_enabled_disabled(v: enabled), val);
3255 psr_source_status(intel_dp, m);
3256 seq_printf(m, fmt: "Busy frontbuffer bits: 0x%08x\n",
3257 psr->busy_frontbuffer_bits);
3258
3259 /*
3260 * SKL+ Perf counter is reset to 0 everytime DC state is entered
3261 */
3262 val = intel_de_read(i915: dev_priv, reg: psr_perf_cnt_reg(dev_priv, cpu_transcoder));
3263 seq_printf(m, fmt: "Performance counter: %u\n",
3264 REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3265
3266 if (psr->debug & I915_PSR_DEBUG_IRQ) {
3267 seq_printf(m, fmt: "Last attempted entry at: %lld\n",
3268 psr->last_entry_attempt);
3269 seq_printf(m, fmt: "Last exit at: %lld\n", psr->last_exit);
3270 }
3271
3272 if (psr->psr2_enabled) {
3273 u32 su_frames_val[3];
3274 int frame;
3275
3276 /*
3277 * Reading all 3 registers before hand to minimize crossing a
3278 * frame boundary between register reads
3279 */
3280 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3281 val = intel_de_read(i915: dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
3282 su_frames_val[frame / 3] = val;
3283 }
3284
3285 seq_puts(m, s: "Frame:\tPSR2 SU blocks:\n");
3286
3287 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3288 u32 su_blocks;
3289
3290 su_blocks = su_frames_val[frame / 3] &
3291 PSR2_SU_STATUS_MASK(frame);
3292 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3293 seq_printf(m, fmt: "%d\t%d\n", frame, su_blocks);
3294 }
3295
3296 seq_printf(m, fmt: "PSR2 selective fetch: %s\n",
3297 str_enabled_disabled(v: psr->psr2_sel_fetch_enabled));
3298 }
3299
3300unlock:
3301 mutex_unlock(lock: &psr->lock);
3302 intel_runtime_pm_put(rpm: &dev_priv->runtime_pm, wref: wakeref);
3303
3304 return 0;
3305}
3306
3307static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3308{
3309 struct drm_i915_private *dev_priv = m->private;
3310 struct intel_dp *intel_dp = NULL;
3311 struct intel_encoder *encoder;
3312
3313 if (!HAS_PSR(dev_priv))
3314 return -ENODEV;
3315
3316 /* Find the first EDP which supports PSR */
3317 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3318 intel_dp = enc_to_intel_dp(encoder);
3319 break;
3320 }
3321
3322 if (!intel_dp)
3323 return -ENODEV;
3324
3325 return intel_psr_status(m, intel_dp);
3326}
3327DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3328
3329static int
3330i915_edp_psr_debug_set(void *data, u64 val)
3331{
3332 struct drm_i915_private *dev_priv = data;
3333 struct intel_encoder *encoder;
3334 intel_wakeref_t wakeref;
3335 int ret = -ENODEV;
3336
3337 if (!HAS_PSR(dev_priv))
3338 return ret;
3339
3340 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3341 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3342
3343 drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3344
3345 wakeref = intel_runtime_pm_get(rpm: &dev_priv->runtime_pm);
3346
3347 // TODO: split to each transcoder's PSR debug state
3348 ret = intel_psr_debug_set(intel_dp, val);
3349
3350 intel_runtime_pm_put(rpm: &dev_priv->runtime_pm, wref: wakeref);
3351 }
3352
3353 return ret;
3354}
3355
3356static int
3357i915_edp_psr_debug_get(void *data, u64 *val)
3358{
3359 struct drm_i915_private *dev_priv = data;
3360 struct intel_encoder *encoder;
3361
3362 if (!HAS_PSR(dev_priv))
3363 return -ENODEV;
3364
3365 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3366 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3367
3368 // TODO: split to each transcoder's PSR debug state
3369 *val = READ_ONCE(intel_dp->psr.debug);
3370 return 0;
3371 }
3372
3373 return -ENODEV;
3374}
3375
3376DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3377 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3378 "%llu\n");
3379
3380void intel_psr_debugfs_register(struct drm_i915_private *i915)
3381{
3382 struct drm_minor *minor = i915->drm.primary;
3383
3384 debugfs_create_file(name: "i915_edp_psr_debug", mode: 0644, parent: minor->debugfs_root,
3385 data: i915, fops: &i915_edp_psr_debug_fops);
3386
3387 debugfs_create_file(name: "i915_edp_psr_status", mode: 0444, parent: minor->debugfs_root,
3388 data: i915, fops: &i915_edp_psr_status_fops);
3389}
3390
3391static const char *psr_mode_str(struct intel_dp *intel_dp)
3392{
3393 if (intel_dp->psr.panel_replay_enabled)
3394 return "PANEL-REPLAY";
3395 else if (intel_dp->psr.enabled)
3396 return "PSR";
3397
3398 return "unknown";
3399}
3400
3401static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3402{
3403 struct intel_connector *connector = m->private;
3404 struct intel_dp *intel_dp = intel_attached_dp(connector);
3405 static const char * const sink_status[] = {
3406 "inactive",
3407 "transition to active, capture and display",
3408 "active, display from RFB",
3409 "active, capture and display on sink device timings",
3410 "transition to inactive, capture and display, timing re-sync",
3411 "reserved",
3412 "reserved",
3413 "sink internal error",
3414 };
3415 static const char * const panel_replay_status[] = {
3416 "Sink device frame is locked to the Source device",
3417 "Sink device is coasting, using the VTotal target",
3418 "Sink device is governing the frame rate (frame rate unlock is granted)",
3419 "Sink device in the process of re-locking with the Source device",
3420 };
3421 const char *str;
3422 int ret;
3423 u8 status, error_status;
3424 u32 idx;
3425
3426 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
3427 seq_puts(m, s: "PSR/Panel-Replay Unsupported\n");
3428 return -ENODEV;
3429 }
3430
3431 if (connector->base.status != connector_status_connected)
3432 return -ENODEV;
3433
3434 ret = psr_get_status_and_error_status(intel_dp, status: &status, error_status: &error_status);
3435 if (ret)
3436 return ret;
3437
3438 str = "unknown";
3439 if (intel_dp->psr.panel_replay_enabled) {
3440 idx = (status & DP_SINK_FRAME_LOCKED_MASK) >> DP_SINK_FRAME_LOCKED_SHIFT;
3441 if (idx < ARRAY_SIZE(panel_replay_status))
3442 str = panel_replay_status[idx];
3443 } else if (intel_dp->psr.enabled) {
3444 idx = status & DP_PSR_SINK_STATE_MASK;
3445 if (idx < ARRAY_SIZE(sink_status))
3446 str = sink_status[idx];
3447 }
3448
3449 seq_printf(m, fmt: "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
3450
3451 seq_printf(m, fmt: "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
3452
3453 if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
3454 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3455 DP_PSR_LINK_CRC_ERROR))
3456 seq_puts(m, s: ":\n");
3457 else
3458 seq_puts(m, s: "\n");
3459 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3460 seq_printf(m, fmt: "\t%s RFB storage error\n", psr_mode_str(intel_dp));
3461 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3462 seq_printf(m, fmt: "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
3463 if (error_status & DP_PSR_LINK_CRC_ERROR)
3464 seq_printf(m, fmt: "\t%s Link CRC error\n", psr_mode_str(intel_dp));
3465
3466 return ret;
3467}
3468DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3469
3470static int i915_psr_status_show(struct seq_file *m, void *data)
3471{
3472 struct intel_connector *connector = m->private;
3473 struct intel_dp *intel_dp = intel_attached_dp(connector);
3474
3475 return intel_psr_status(m, intel_dp);
3476}
3477DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3478
3479void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3480{
3481 struct drm_i915_private *i915 = to_i915(dev: connector->base.dev);
3482 struct dentry *root = connector->base.debugfs_entry;
3483
3484 /* TODO: Add support for MST connectors as well. */
3485 if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
3486 connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
3487 connector->mst_port)
3488 return;
3489
3490 debugfs_create_file(name: "i915_psr_sink_status", mode: 0444, parent: root,
3491 data: connector, fops: &i915_psr_sink_status_fops);
3492
3493 if (HAS_PSR(i915) || HAS_DP20(i915))
3494 debugfs_create_file(name: "i915_psr_status", mode: 0444, parent: root,
3495 data: connector, fops: &i915_psr_status_fops);
3496}
3497

source code of linux/drivers/gpu/drm/i915/display/intel_psr.c