1 | /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- |
2 | */ |
3 | /* |
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
5 | * All Rights Reserved. |
6 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a |
8 | * copy of this software and associated documentation files (the |
9 | * "Software"), to deal in the Software without restriction, including |
10 | * without limitation the rights to use, copy, modify, merge, publish, |
11 | * distribute, sub license, and/or sell copies of the Software, and to |
12 | * permit persons to whom the Software is furnished to do so, subject to |
13 | * the following conditions: |
14 | * |
15 | * The above copyright notice and this permission notice (including the |
16 | * next paragraph) shall be included in all copies or substantial portions |
17 | * of the Software. |
18 | * |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
26 | * |
27 | */ |
28 | |
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
30 | |
31 | #include <linux/slab.h> |
32 | #include <linux/sysrq.h> |
33 | |
34 | #include <drm/drm_drv.h> |
35 | |
36 | #include "display/intel_display_irq.h" |
37 | #include "display/intel_display_types.h" |
38 | #include "display/intel_hotplug.h" |
39 | #include "display/intel_hotplug_irq.h" |
40 | #include "display/intel_lpe_audio.h" |
41 | #include "display/intel_psr_regs.h" |
42 | |
43 | #include "gt/intel_breadcrumbs.h" |
44 | #include "gt/intel_gt.h" |
45 | #include "gt/intel_gt_irq.h" |
46 | #include "gt/intel_gt_pm_irq.h" |
47 | #include "gt/intel_gt_regs.h" |
48 | #include "gt/intel_rps.h" |
49 | |
50 | #include "i915_driver.h" |
51 | #include "i915_drv.h" |
52 | #include "i915_irq.h" |
53 | #include "i915_reg.h" |
54 | |
55 | /** |
56 | * DOC: interrupt handling |
57 | * |
58 | * These functions provide the basic support for enabling and disabling the |
59 | * interrupt handling support. There's a lot more functionality in i915_irq.c |
60 | * and related files, but that will be described in separate chapters. |
61 | */ |
62 | |
63 | /* |
64 | * Interrupt statistic for PMU. Increments the counter only if the |
65 | * interrupt originated from the GPU so interrupts from a device which |
66 | * shares the interrupt line are not accounted. |
67 | */ |
68 | static inline void pmu_irq_stats(struct drm_i915_private *i915, |
69 | irqreturn_t res) |
70 | { |
71 | if (unlikely(res != IRQ_HANDLED)) |
72 | return; |
73 | |
74 | /* |
75 | * A clever compiler translates that into INC. A not so clever one |
76 | * should at least prevent store tearing. |
77 | */ |
78 | WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1); |
79 | } |
80 | |
81 | void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, |
82 | i915_reg_t iir, i915_reg_t ier) |
83 | { |
84 | intel_uncore_write(uncore, reg: imr, val: 0xffffffff); |
85 | intel_uncore_posting_read(uncore, imr); |
86 | |
87 | intel_uncore_write(uncore, reg: ier, val: 0); |
88 | |
89 | /* IIR can theoretically queue up two events. Be paranoid. */ |
90 | intel_uncore_write(uncore, reg: iir, val: 0xffffffff); |
91 | intel_uncore_posting_read(uncore, iir); |
92 | intel_uncore_write(uncore, reg: iir, val: 0xffffffff); |
93 | intel_uncore_posting_read(uncore, iir); |
94 | } |
95 | |
96 | static void gen2_irq_reset(struct intel_uncore *uncore) |
97 | { |
98 | intel_uncore_write16(uncore, GEN2_IMR, val: 0xffff); |
99 | intel_uncore_posting_read16(uncore, GEN2_IMR); |
100 | |
101 | intel_uncore_write16(uncore, GEN2_IER, val: 0); |
102 | |
103 | /* IIR can theoretically queue up two events. Be paranoid. */ |
104 | intel_uncore_write16(uncore, GEN2_IIR, val: 0xffff); |
105 | intel_uncore_posting_read16(uncore, GEN2_IIR); |
106 | intel_uncore_write16(uncore, GEN2_IIR, val: 0xffff); |
107 | intel_uncore_posting_read16(uncore, GEN2_IIR); |
108 | } |
109 | |
110 | /* |
111 | * We should clear IMR at preinstall/uninstall, and just check at postinstall. |
112 | */ |
113 | void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) |
114 | { |
115 | u32 val = intel_uncore_read(uncore, reg); |
116 | |
117 | if (val == 0) |
118 | return; |
119 | |
120 | drm_WARN(&uncore->i915->drm, 1, |
121 | "Interrupt register 0x%x is not zero: 0x%08x\n" , |
122 | i915_mmio_reg_offset(reg), val); |
123 | intel_uncore_write(uncore, reg, val: 0xffffffff); |
124 | intel_uncore_posting_read(uncore, reg); |
125 | intel_uncore_write(uncore, reg, val: 0xffffffff); |
126 | intel_uncore_posting_read(uncore, reg); |
127 | } |
128 | |
129 | static void gen2_assert_iir_is_zero(struct intel_uncore *uncore) |
130 | { |
131 | u16 val = intel_uncore_read16(uncore, GEN2_IIR); |
132 | |
133 | if (val == 0) |
134 | return; |
135 | |
136 | drm_WARN(&uncore->i915->drm, 1, |
137 | "Interrupt register 0x%x is not zero: 0x%08x\n" , |
138 | i915_mmio_reg_offset(GEN2_IIR), val); |
139 | intel_uncore_write16(uncore, GEN2_IIR, val: 0xffff); |
140 | intel_uncore_posting_read16(uncore, GEN2_IIR); |
141 | intel_uncore_write16(uncore, GEN2_IIR, val: 0xffff); |
142 | intel_uncore_posting_read16(uncore, GEN2_IIR); |
143 | } |
144 | |
145 | void gen3_irq_init(struct intel_uncore *uncore, |
146 | i915_reg_t imr, u32 imr_val, |
147 | i915_reg_t ier, u32 ier_val, |
148 | i915_reg_t iir) |
149 | { |
150 | gen3_assert_iir_is_zero(uncore, reg: iir); |
151 | |
152 | intel_uncore_write(uncore, reg: ier, val: ier_val); |
153 | intel_uncore_write(uncore, reg: imr, val: imr_val); |
154 | intel_uncore_posting_read(uncore, imr); |
155 | } |
156 | |
157 | static void gen2_irq_init(struct intel_uncore *uncore, |
158 | u32 imr_val, u32 ier_val) |
159 | { |
160 | gen2_assert_iir_is_zero(uncore); |
161 | |
162 | intel_uncore_write16(uncore, GEN2_IER, val: ier_val); |
163 | intel_uncore_write16(uncore, GEN2_IMR, val: imr_val); |
164 | intel_uncore_posting_read16(uncore, GEN2_IMR); |
165 | } |
166 | |
167 | /** |
168 | * ivb_parity_work - Workqueue called when a parity error interrupt |
169 | * occurred. |
170 | * @work: workqueue struct |
171 | * |
172 | * Doesn't actually do anything except notify userspace. As a consequence of |
173 | * this event, userspace should try to remap the bad rows since statistically |
174 | * it is likely the same row is more likely to go bad again. |
175 | */ |
176 | static void ivb_parity_work(struct work_struct *work) |
177 | { |
178 | struct drm_i915_private *dev_priv = |
179 | container_of(work, typeof(*dev_priv), l3_parity.error_work); |
180 | struct intel_gt *gt = to_gt(i915: dev_priv); |
181 | u32 error_status, row, bank, subbank; |
182 | char *parity_event[6]; |
183 | u32 misccpctl; |
184 | u8 slice = 0; |
185 | |
186 | /* We must turn off DOP level clock gating to access the L3 registers. |
187 | * In order to prevent a get/put style interface, acquire struct mutex |
188 | * any time we access those registers. |
189 | */ |
190 | mutex_lock(&dev_priv->drm.struct_mutex); |
191 | |
192 | /* If we've screwed up tracking, just let the interrupt fire again */ |
193 | if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice)) |
194 | goto out; |
195 | |
196 | misccpctl = intel_uncore_rmw(uncore: &dev_priv->uncore, GEN7_MISCCPCTL, |
197 | GEN7_DOP_CLOCK_GATE_ENABLE, set: 0); |
198 | intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL); |
199 | |
200 | while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { |
201 | i915_reg_t reg; |
202 | |
203 | slice--; |
204 | if (drm_WARN_ON_ONCE(&dev_priv->drm, |
205 | slice >= NUM_L3_SLICES(dev_priv))) |
206 | break; |
207 | |
208 | dev_priv->l3_parity.which_slice &= ~(1<<slice); |
209 | |
210 | reg = GEN7_L3CDERRST1(slice); |
211 | |
212 | error_status = intel_uncore_read(uncore: &dev_priv->uncore, reg); |
213 | row = GEN7_PARITY_ERROR_ROW(error_status); |
214 | bank = GEN7_PARITY_ERROR_BANK(error_status); |
215 | subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); |
216 | |
217 | intel_uncore_write(uncore: &dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); |
218 | intel_uncore_posting_read(&dev_priv->uncore, reg); |
219 | |
220 | parity_event[0] = I915_L3_PARITY_UEVENT "=1" ; |
221 | parity_event[1] = kasprintf(GFP_KERNEL, fmt: "ROW=%d" , row); |
222 | parity_event[2] = kasprintf(GFP_KERNEL, fmt: "BANK=%d" , bank); |
223 | parity_event[3] = kasprintf(GFP_KERNEL, fmt: "SUBBANK=%d" , subbank); |
224 | parity_event[4] = kasprintf(GFP_KERNEL, fmt: "SLICE=%d" , slice); |
225 | parity_event[5] = NULL; |
226 | |
227 | kobject_uevent_env(kobj: &dev_priv->drm.primary->kdev->kobj, |
228 | action: KOBJ_CHANGE, envp: parity_event); |
229 | |
230 | drm_dbg(&dev_priv->drm, |
231 | "Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n" , |
232 | slice, row, bank, subbank); |
233 | |
234 | kfree(objp: parity_event[4]); |
235 | kfree(objp: parity_event[3]); |
236 | kfree(objp: parity_event[2]); |
237 | kfree(objp: parity_event[1]); |
238 | } |
239 | |
240 | intel_uncore_write(uncore: &dev_priv->uncore, GEN7_MISCCPCTL, val: misccpctl); |
241 | |
242 | out: |
243 | drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice); |
244 | spin_lock_irq(lock: gt->irq_lock); |
245 | gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv)); |
246 | spin_unlock_irq(lock: gt->irq_lock); |
247 | |
248 | mutex_unlock(lock: &dev_priv->drm.struct_mutex); |
249 | } |
250 | |
251 | static irqreturn_t valleyview_irq_handler(int irq, void *arg) |
252 | { |
253 | struct drm_i915_private *dev_priv = arg; |
254 | irqreturn_t ret = IRQ_NONE; |
255 | |
256 | if (!intel_irqs_enabled(dev_priv)) |
257 | return IRQ_NONE; |
258 | |
259 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
260 | disable_rpm_wakeref_asserts(rpm: &dev_priv->runtime_pm); |
261 | |
262 | do { |
263 | u32 iir, gt_iir, pm_iir; |
264 | u32 pipe_stats[I915_MAX_PIPES] = {}; |
265 | u32 hotplug_status = 0; |
266 | u32 ier = 0; |
267 | |
268 | gt_iir = intel_uncore_read(uncore: &dev_priv->uncore, GTIIR); |
269 | pm_iir = intel_uncore_read(uncore: &dev_priv->uncore, GEN6_PMIIR); |
270 | iir = intel_uncore_read(uncore: &dev_priv->uncore, VLV_IIR); |
271 | |
272 | if (gt_iir == 0 && pm_iir == 0 && iir == 0) |
273 | break; |
274 | |
275 | ret = IRQ_HANDLED; |
276 | |
277 | /* |
278 | * Theory on interrupt generation, based on empirical evidence: |
279 | * |
280 | * x = ((VLV_IIR & VLV_IER) || |
281 | * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && |
282 | * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); |
283 | * |
284 | * A CPU interrupt will only be raised when 'x' has a 0->1 edge. |
285 | * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to |
286 | * guarantee the CPU interrupt will be raised again even if we |
287 | * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR |
288 | * bits this time around. |
289 | */ |
290 | intel_uncore_write(uncore: &dev_priv->uncore, VLV_MASTER_IER, val: 0); |
291 | ier = intel_uncore_rmw(uncore: &dev_priv->uncore, VLV_IER, clear: ~0, set: 0); |
292 | |
293 | if (gt_iir) |
294 | intel_uncore_write(uncore: &dev_priv->uncore, GTIIR, val: gt_iir); |
295 | if (pm_iir) |
296 | intel_uncore_write(uncore: &dev_priv->uncore, GEN6_PMIIR, val: pm_iir); |
297 | |
298 | if (iir & I915_DISPLAY_PORT_INTERRUPT) |
299 | hotplug_status = i9xx_hpd_irq_ack(i915: dev_priv); |
300 | |
301 | /* Call regardless, as some status bits might not be |
302 | * signalled in iir */ |
303 | i9xx_pipestat_irq_ack(i915: dev_priv, iir, pipe_stats); |
304 | |
305 | if (iir & (I915_LPE_PIPE_A_INTERRUPT | |
306 | I915_LPE_PIPE_B_INTERRUPT)) |
307 | intel_lpe_audio_irq_handler(dev_priv); |
308 | |
309 | /* |
310 | * VLV_IIR is single buffered, and reflects the level |
311 | * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. |
312 | */ |
313 | if (iir) |
314 | intel_uncore_write(uncore: &dev_priv->uncore, VLV_IIR, val: iir); |
315 | |
316 | intel_uncore_write(uncore: &dev_priv->uncore, VLV_IER, val: ier); |
317 | intel_uncore_write(uncore: &dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); |
318 | |
319 | if (gt_iir) |
320 | gen6_gt_irq_handler(gt: to_gt(i915: dev_priv), gt_iir); |
321 | if (pm_iir) |
322 | gen6_rps_irq_handler(rps: &to_gt(i915: dev_priv)->rps, pm_iir); |
323 | |
324 | if (hotplug_status) |
325 | i9xx_hpd_irq_handler(i915: dev_priv, hotplug_status); |
326 | |
327 | valleyview_pipestat_irq_handler(i915: dev_priv, pipe_stats); |
328 | } while (0); |
329 | |
330 | pmu_irq_stats(i915: dev_priv, res: ret); |
331 | |
332 | enable_rpm_wakeref_asserts(rpm: &dev_priv->runtime_pm); |
333 | |
334 | return ret; |
335 | } |
336 | |
337 | static irqreturn_t cherryview_irq_handler(int irq, void *arg) |
338 | { |
339 | struct drm_i915_private *dev_priv = arg; |
340 | irqreturn_t ret = IRQ_NONE; |
341 | |
342 | if (!intel_irqs_enabled(dev_priv)) |
343 | return IRQ_NONE; |
344 | |
345 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
346 | disable_rpm_wakeref_asserts(rpm: &dev_priv->runtime_pm); |
347 | |
348 | do { |
349 | u32 master_ctl, iir; |
350 | u32 pipe_stats[I915_MAX_PIPES] = {}; |
351 | u32 hotplug_status = 0; |
352 | u32 ier = 0; |
353 | |
354 | master_ctl = intel_uncore_read(uncore: &dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; |
355 | iir = intel_uncore_read(uncore: &dev_priv->uncore, VLV_IIR); |
356 | |
357 | if (master_ctl == 0 && iir == 0) |
358 | break; |
359 | |
360 | ret = IRQ_HANDLED; |
361 | |
362 | /* |
363 | * Theory on interrupt generation, based on empirical evidence: |
364 | * |
365 | * x = ((VLV_IIR & VLV_IER) || |
366 | * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && |
367 | * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); |
368 | * |
369 | * A CPU interrupt will only be raised when 'x' has a 0->1 edge. |
370 | * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to |
371 | * guarantee the CPU interrupt will be raised again even if we |
372 | * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL |
373 | * bits this time around. |
374 | */ |
375 | intel_uncore_write(uncore: &dev_priv->uncore, GEN8_MASTER_IRQ, val: 0); |
376 | ier = intel_uncore_rmw(uncore: &dev_priv->uncore, VLV_IER, clear: ~0, set: 0); |
377 | |
378 | gen8_gt_irq_handler(gt: to_gt(i915: dev_priv), master_ctl); |
379 | |
380 | if (iir & I915_DISPLAY_PORT_INTERRUPT) |
381 | hotplug_status = i9xx_hpd_irq_ack(i915: dev_priv); |
382 | |
383 | /* Call regardless, as some status bits might not be |
384 | * signalled in iir */ |
385 | i9xx_pipestat_irq_ack(i915: dev_priv, iir, pipe_stats); |
386 | |
387 | if (iir & (I915_LPE_PIPE_A_INTERRUPT | |
388 | I915_LPE_PIPE_B_INTERRUPT | |
389 | I915_LPE_PIPE_C_INTERRUPT)) |
390 | intel_lpe_audio_irq_handler(dev_priv); |
391 | |
392 | /* |
393 | * VLV_IIR is single buffered, and reflects the level |
394 | * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. |
395 | */ |
396 | if (iir) |
397 | intel_uncore_write(uncore: &dev_priv->uncore, VLV_IIR, val: iir); |
398 | |
399 | intel_uncore_write(uncore: &dev_priv->uncore, VLV_IER, val: ier); |
400 | intel_uncore_write(uncore: &dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); |
401 | |
402 | if (hotplug_status) |
403 | i9xx_hpd_irq_handler(i915: dev_priv, hotplug_status); |
404 | |
405 | valleyview_pipestat_irq_handler(i915: dev_priv, pipe_stats); |
406 | } while (0); |
407 | |
408 | pmu_irq_stats(i915: dev_priv, res: ret); |
409 | |
410 | enable_rpm_wakeref_asserts(rpm: &dev_priv->runtime_pm); |
411 | |
412 | return ret; |
413 | } |
414 | |
415 | /* |
416 | * To handle irqs with the minimum potential races with fresh interrupts, we: |
417 | * 1 - Disable Master Interrupt Control. |
418 | * 2 - Find the source(s) of the interrupt. |
419 | * 3 - Clear the Interrupt Identity bits (IIR). |
420 | * 4 - Process the interrupt(s) that had bits set in the IIRs. |
421 | * 5 - Re-enable Master Interrupt Control. |
422 | */ |
423 | static irqreturn_t ilk_irq_handler(int irq, void *arg) |
424 | { |
425 | struct drm_i915_private *i915 = arg; |
426 | void __iomem * const regs = intel_uncore_regs(uncore: &i915->uncore); |
427 | u32 de_iir, gt_iir, de_ier, sde_ier = 0; |
428 | irqreturn_t ret = IRQ_NONE; |
429 | |
430 | if (unlikely(!intel_irqs_enabled(i915))) |
431 | return IRQ_NONE; |
432 | |
433 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
434 | disable_rpm_wakeref_asserts(rpm: &i915->runtime_pm); |
435 | |
436 | /* disable master interrupt before clearing iir */ |
437 | de_ier = raw_reg_read(regs, DEIER); |
438 | raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); |
439 | |
440 | /* Disable south interrupts. We'll only write to SDEIIR once, so further |
441 | * interrupts will will be stored on its back queue, and then we'll be |
442 | * able to process them after we restore SDEIER (as soon as we restore |
443 | * it, we'll get an interrupt if SDEIIR still has something to process |
444 | * due to its back queue). */ |
445 | if (!HAS_PCH_NOP(i915)) { |
446 | sde_ier = raw_reg_read(regs, SDEIER); |
447 | raw_reg_write(regs, SDEIER, 0); |
448 | } |
449 | |
450 | /* Find, clear, then process each source of interrupt */ |
451 | |
452 | gt_iir = raw_reg_read(regs, GTIIR); |
453 | if (gt_iir) { |
454 | raw_reg_write(regs, GTIIR, gt_iir); |
455 | if (GRAPHICS_VER(i915) >= 6) |
456 | gen6_gt_irq_handler(gt: to_gt(i915), gt_iir); |
457 | else |
458 | gen5_gt_irq_handler(gt: to_gt(i915), gt_iir); |
459 | ret = IRQ_HANDLED; |
460 | } |
461 | |
462 | de_iir = raw_reg_read(regs, DEIIR); |
463 | if (de_iir) { |
464 | raw_reg_write(regs, DEIIR, de_iir); |
465 | if (DISPLAY_VER(i915) >= 7) |
466 | ivb_display_irq_handler(i915, de_iir); |
467 | else |
468 | ilk_display_irq_handler(i915, de_iir); |
469 | ret = IRQ_HANDLED; |
470 | } |
471 | |
472 | if (GRAPHICS_VER(i915) >= 6) { |
473 | u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR); |
474 | if (pm_iir) { |
475 | raw_reg_write(regs, GEN6_PMIIR, pm_iir); |
476 | gen6_rps_irq_handler(rps: &to_gt(i915)->rps, pm_iir); |
477 | ret = IRQ_HANDLED; |
478 | } |
479 | } |
480 | |
481 | raw_reg_write(regs, DEIER, de_ier); |
482 | if (sde_ier) |
483 | raw_reg_write(regs, SDEIER, sde_ier); |
484 | |
485 | pmu_irq_stats(i915, res: ret); |
486 | |
487 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
488 | enable_rpm_wakeref_asserts(rpm: &i915->runtime_pm); |
489 | |
490 | return ret; |
491 | } |
492 | |
493 | static inline u32 gen8_master_intr_disable(void __iomem * const regs) |
494 | { |
495 | raw_reg_write(regs, GEN8_MASTER_IRQ, 0); |
496 | |
497 | /* |
498 | * Now with master disabled, get a sample of level indications |
499 | * for this interrupt. Indications will be cleared on related acks. |
500 | * New indications can and will light up during processing, |
501 | * and will generate new interrupt after enabling master. |
502 | */ |
503 | return raw_reg_read(regs, GEN8_MASTER_IRQ); |
504 | } |
505 | |
506 | static inline void gen8_master_intr_enable(void __iomem * const regs) |
507 | { |
508 | raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); |
509 | } |
510 | |
511 | static irqreturn_t gen8_irq_handler(int irq, void *arg) |
512 | { |
513 | struct drm_i915_private *dev_priv = arg; |
514 | void __iomem * const regs = intel_uncore_regs(uncore: &dev_priv->uncore); |
515 | u32 master_ctl; |
516 | |
517 | if (!intel_irqs_enabled(dev_priv)) |
518 | return IRQ_NONE; |
519 | |
520 | master_ctl = gen8_master_intr_disable(regs); |
521 | if (!master_ctl) { |
522 | gen8_master_intr_enable(regs); |
523 | return IRQ_NONE; |
524 | } |
525 | |
526 | /* Find, queue (onto bottom-halves), then clear each source */ |
527 | gen8_gt_irq_handler(gt: to_gt(i915: dev_priv), master_ctl); |
528 | |
529 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
530 | if (master_ctl & ~GEN8_GT_IRQS) { |
531 | disable_rpm_wakeref_asserts(rpm: &dev_priv->runtime_pm); |
532 | gen8_de_irq_handler(i915: dev_priv, master_ctl); |
533 | enable_rpm_wakeref_asserts(rpm: &dev_priv->runtime_pm); |
534 | } |
535 | |
536 | gen8_master_intr_enable(regs); |
537 | |
538 | pmu_irq_stats(i915: dev_priv, res: IRQ_HANDLED); |
539 | |
540 | return IRQ_HANDLED; |
541 | } |
542 | |
543 | static inline u32 gen11_master_intr_disable(void __iomem * const regs) |
544 | { |
545 | raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); |
546 | |
547 | /* |
548 | * Now with master disabled, get a sample of level indications |
549 | * for this interrupt. Indications will be cleared on related acks. |
550 | * New indications can and will light up during processing, |
551 | * and will generate new interrupt after enabling master. |
552 | */ |
553 | return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); |
554 | } |
555 | |
556 | static inline void gen11_master_intr_enable(void __iomem * const regs) |
557 | { |
558 | raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); |
559 | } |
560 | |
561 | static irqreturn_t gen11_irq_handler(int irq, void *arg) |
562 | { |
563 | struct drm_i915_private *i915 = arg; |
564 | void __iomem * const regs = intel_uncore_regs(uncore: &i915->uncore); |
565 | struct intel_gt *gt = to_gt(i915); |
566 | u32 master_ctl; |
567 | u32 gu_misc_iir; |
568 | |
569 | if (!intel_irqs_enabled(dev_priv: i915)) |
570 | return IRQ_NONE; |
571 | |
572 | master_ctl = gen11_master_intr_disable(regs); |
573 | if (!master_ctl) { |
574 | gen11_master_intr_enable(regs); |
575 | return IRQ_NONE; |
576 | } |
577 | |
578 | /* Find, queue (onto bottom-halves), then clear each source */ |
579 | gen11_gt_irq_handler(gt, master_ctl); |
580 | |
581 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
582 | if (master_ctl & GEN11_DISPLAY_IRQ) |
583 | gen11_display_irq_handler(i915); |
584 | |
585 | gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); |
586 | |
587 | gen11_master_intr_enable(regs); |
588 | |
589 | gen11_gu_misc_irq_handler(i915, iir: gu_misc_iir); |
590 | |
591 | pmu_irq_stats(i915, res: IRQ_HANDLED); |
592 | |
593 | return IRQ_HANDLED; |
594 | } |
595 | |
596 | static inline u32 dg1_master_intr_disable(void __iomem * const regs) |
597 | { |
598 | u32 val; |
599 | |
600 | /* First disable interrupts */ |
601 | raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0); |
602 | |
603 | /* Get the indication levels and ack the master unit */ |
604 | val = raw_reg_read(regs, DG1_MSTR_TILE_INTR); |
605 | if (unlikely(!val)) |
606 | return 0; |
607 | |
608 | raw_reg_write(regs, DG1_MSTR_TILE_INTR, val); |
609 | |
610 | return val; |
611 | } |
612 | |
613 | static inline void dg1_master_intr_enable(void __iomem * const regs) |
614 | { |
615 | raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ); |
616 | } |
617 | |
618 | static irqreturn_t dg1_irq_handler(int irq, void *arg) |
619 | { |
620 | struct drm_i915_private * const i915 = arg; |
621 | struct intel_gt *gt = to_gt(i915); |
622 | void __iomem * const regs = intel_uncore_regs(uncore: gt->uncore); |
623 | u32 master_tile_ctl, master_ctl; |
624 | u32 gu_misc_iir; |
625 | |
626 | if (!intel_irqs_enabled(dev_priv: i915)) |
627 | return IRQ_NONE; |
628 | |
629 | master_tile_ctl = dg1_master_intr_disable(regs); |
630 | if (!master_tile_ctl) { |
631 | dg1_master_intr_enable(regs); |
632 | return IRQ_NONE; |
633 | } |
634 | |
635 | /* FIXME: we only support tile 0 for now. */ |
636 | if (master_tile_ctl & DG1_MSTR_TILE(0)) { |
637 | master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); |
638 | raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl); |
639 | } else { |
640 | drm_err(&i915->drm, "Tile not supported: 0x%08x\n" , |
641 | master_tile_ctl); |
642 | dg1_master_intr_enable(regs); |
643 | return IRQ_NONE; |
644 | } |
645 | |
646 | gen11_gt_irq_handler(gt, master_ctl); |
647 | |
648 | if (master_ctl & GEN11_DISPLAY_IRQ) |
649 | gen11_display_irq_handler(i915); |
650 | |
651 | gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); |
652 | |
653 | dg1_master_intr_enable(regs); |
654 | |
655 | gen11_gu_misc_irq_handler(i915, iir: gu_misc_iir); |
656 | |
657 | pmu_irq_stats(i915, res: IRQ_HANDLED); |
658 | |
659 | return IRQ_HANDLED; |
660 | } |
661 | |
662 | static void ibx_irq_reset(struct drm_i915_private *dev_priv) |
663 | { |
664 | struct intel_uncore *uncore = &dev_priv->uncore; |
665 | |
666 | if (HAS_PCH_NOP(dev_priv)) |
667 | return; |
668 | |
669 | GEN3_IRQ_RESET(uncore, SDE); |
670 | |
671 | if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) |
672 | intel_uncore_write(uncore: &dev_priv->uncore, SERR_INT, val: 0xffffffff); |
673 | } |
674 | |
675 | /* drm_dma.h hooks |
676 | */ |
677 | static void ilk_irq_reset(struct drm_i915_private *dev_priv) |
678 | { |
679 | struct intel_uncore *uncore = &dev_priv->uncore; |
680 | |
681 | GEN3_IRQ_RESET(uncore, DE); |
682 | dev_priv->irq_mask = ~0u; |
683 | |
684 | if (GRAPHICS_VER(dev_priv) == 7) |
685 | intel_uncore_write(uncore, GEN7_ERR_INT, val: 0xffffffff); |
686 | |
687 | if (IS_HASWELL(dev_priv)) { |
688 | intel_uncore_write(uncore, EDP_PSR_IMR, val: 0xffffffff); |
689 | intel_uncore_write(uncore, EDP_PSR_IIR, val: 0xffffffff); |
690 | } |
691 | |
692 | gen5_gt_irq_reset(gt: to_gt(i915: dev_priv)); |
693 | |
694 | ibx_irq_reset(dev_priv); |
695 | } |
696 | |
697 | static void valleyview_irq_reset(struct drm_i915_private *dev_priv) |
698 | { |
699 | intel_uncore_write(uncore: &dev_priv->uncore, VLV_MASTER_IER, val: 0); |
700 | intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); |
701 | |
702 | gen5_gt_irq_reset(gt: to_gt(i915: dev_priv)); |
703 | |
704 | spin_lock_irq(lock: &dev_priv->irq_lock); |
705 | if (dev_priv->display_irqs_enabled) |
706 | vlv_display_irq_reset(i915: dev_priv); |
707 | spin_unlock_irq(lock: &dev_priv->irq_lock); |
708 | } |
709 | |
710 | static void gen8_irq_reset(struct drm_i915_private *dev_priv) |
711 | { |
712 | struct intel_uncore *uncore = &dev_priv->uncore; |
713 | |
714 | gen8_master_intr_disable(regs: intel_uncore_regs(uncore)); |
715 | |
716 | gen8_gt_irq_reset(gt: to_gt(i915: dev_priv)); |
717 | gen8_display_irq_reset(i915: dev_priv); |
718 | GEN3_IRQ_RESET(uncore, GEN8_PCU_); |
719 | |
720 | if (HAS_PCH_SPLIT(dev_priv)) |
721 | ibx_irq_reset(dev_priv); |
722 | |
723 | } |
724 | |
725 | static void gen11_irq_reset(struct drm_i915_private *dev_priv) |
726 | { |
727 | struct intel_gt *gt = to_gt(i915: dev_priv); |
728 | struct intel_uncore *uncore = gt->uncore; |
729 | |
730 | gen11_master_intr_disable(regs: intel_uncore_regs(uncore: &dev_priv->uncore)); |
731 | |
732 | gen11_gt_irq_reset(gt); |
733 | gen11_display_irq_reset(i915: dev_priv); |
734 | |
735 | GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); |
736 | GEN3_IRQ_RESET(uncore, GEN8_PCU_); |
737 | } |
738 | |
739 | static void dg1_irq_reset(struct drm_i915_private *dev_priv) |
740 | { |
741 | struct intel_uncore *uncore = &dev_priv->uncore; |
742 | struct intel_gt *gt; |
743 | unsigned int i; |
744 | |
745 | dg1_master_intr_disable(regs: intel_uncore_regs(uncore: &dev_priv->uncore)); |
746 | |
747 | for_each_gt(gt, dev_priv, i) |
748 | gen11_gt_irq_reset(gt); |
749 | |
750 | gen11_display_irq_reset(i915: dev_priv); |
751 | |
752 | GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); |
753 | GEN3_IRQ_RESET(uncore, GEN8_PCU_); |
754 | |
755 | intel_uncore_write(uncore, GEN11_GFX_MSTR_IRQ, val: ~0); |
756 | } |
757 | |
758 | static void cherryview_irq_reset(struct drm_i915_private *dev_priv) |
759 | { |
760 | struct intel_uncore *uncore = &dev_priv->uncore; |
761 | |
762 | intel_uncore_write(uncore, GEN8_MASTER_IRQ, val: 0); |
763 | intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); |
764 | |
765 | gen8_gt_irq_reset(gt: to_gt(i915: dev_priv)); |
766 | |
767 | GEN3_IRQ_RESET(uncore, GEN8_PCU_); |
768 | |
769 | spin_lock_irq(lock: &dev_priv->irq_lock); |
770 | if (dev_priv->display_irqs_enabled) |
771 | vlv_display_irq_reset(i915: dev_priv); |
772 | spin_unlock_irq(lock: &dev_priv->irq_lock); |
773 | } |
774 | |
775 | static void ilk_irq_postinstall(struct drm_i915_private *dev_priv) |
776 | { |
777 | gen5_gt_irq_postinstall(gt: to_gt(i915: dev_priv)); |
778 | |
779 | ilk_de_irq_postinstall(i915: dev_priv); |
780 | } |
781 | |
782 | static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) |
783 | { |
784 | gen5_gt_irq_postinstall(gt: to_gt(i915: dev_priv)); |
785 | |
786 | spin_lock_irq(lock: &dev_priv->irq_lock); |
787 | if (dev_priv->display_irqs_enabled) |
788 | vlv_display_irq_postinstall(i915: dev_priv); |
789 | spin_unlock_irq(lock: &dev_priv->irq_lock); |
790 | |
791 | intel_uncore_write(uncore: &dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); |
792 | intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); |
793 | } |
794 | |
795 | static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) |
796 | { |
797 | gen8_gt_irq_postinstall(gt: to_gt(i915: dev_priv)); |
798 | gen8_de_irq_postinstall(i915: dev_priv); |
799 | |
800 | gen8_master_intr_enable(regs: intel_uncore_regs(uncore: &dev_priv->uncore)); |
801 | } |
802 | |
803 | static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) |
804 | { |
805 | struct intel_gt *gt = to_gt(i915: dev_priv); |
806 | struct intel_uncore *uncore = gt->uncore; |
807 | u32 gu_misc_masked = GEN11_GU_MISC_GSE; |
808 | |
809 | gen11_gt_irq_postinstall(gt); |
810 | gen11_de_irq_postinstall(i915: dev_priv); |
811 | |
812 | GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); |
813 | |
814 | gen11_master_intr_enable(regs: intel_uncore_regs(uncore)); |
815 | intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ); |
816 | } |
817 | |
818 | static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) |
819 | { |
820 | struct intel_uncore *uncore = &dev_priv->uncore; |
821 | u32 gu_misc_masked = GEN11_GU_MISC_GSE; |
822 | struct intel_gt *gt; |
823 | unsigned int i; |
824 | |
825 | for_each_gt(gt, dev_priv, i) |
826 | gen11_gt_irq_postinstall(gt); |
827 | |
828 | GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); |
829 | |
830 | dg1_de_irq_postinstall(i915: dev_priv); |
831 | |
832 | dg1_master_intr_enable(regs: intel_uncore_regs(uncore)); |
833 | intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR); |
834 | } |
835 | |
836 | static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) |
837 | { |
838 | gen8_gt_irq_postinstall(gt: to_gt(i915: dev_priv)); |
839 | |
840 | spin_lock_irq(lock: &dev_priv->irq_lock); |
841 | if (dev_priv->display_irqs_enabled) |
842 | vlv_display_irq_postinstall(i915: dev_priv); |
843 | spin_unlock_irq(lock: &dev_priv->irq_lock); |
844 | |
845 | intel_uncore_write(uncore: &dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); |
846 | intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); |
847 | } |
848 | |
849 | static void i8xx_irq_reset(struct drm_i915_private *dev_priv) |
850 | { |
851 | struct intel_uncore *uncore = &dev_priv->uncore; |
852 | |
853 | i9xx_pipestat_irq_reset(i915: dev_priv); |
854 | |
855 | gen2_irq_reset(uncore); |
856 | dev_priv->irq_mask = ~0u; |
857 | } |
858 | |
859 | static u32 i9xx_error_mask(struct drm_i915_private *i915) |
860 | { |
861 | /* |
862 | * On gen2/3 FBC generates (seemingly spurious) |
863 | * display INVALID_GTT/INVALID_GTT_PTE table errors. |
864 | * |
865 | * Also gen3 bspec has this to say: |
866 | * "DISPA_INVALID_GTT_PTE |
867 | " [DevNapa] : Reserved. This bit does not reflect the page |
868 | " table error for the display plane A." |
869 | * |
870 | * Unfortunately we can't mask off individual PGTBL_ER bits, |
871 | * so we just have to mask off all page table errors via EMR. |
872 | */ |
873 | if (HAS_FBC(i915)) |
874 | return ~I915_ERROR_MEMORY_REFRESH; |
875 | else |
876 | return ~(I915_ERROR_PAGE_TABLE | |
877 | I915_ERROR_MEMORY_REFRESH); |
878 | } |
879 | |
880 | static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv) |
881 | { |
882 | struct intel_uncore *uncore = &dev_priv->uncore; |
883 | u16 enable_mask; |
884 | |
885 | intel_uncore_write16(uncore, EMR, val: i9xx_error_mask(i915: dev_priv)); |
886 | |
887 | /* Unmask the interrupts that we always want on. */ |
888 | dev_priv->irq_mask = |
889 | ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
890 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
891 | I915_MASTER_ERROR_INTERRUPT); |
892 | |
893 | enable_mask = |
894 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
895 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
896 | I915_MASTER_ERROR_INTERRUPT | |
897 | I915_USER_INTERRUPT; |
898 | |
899 | gen2_irq_init(uncore, imr_val: dev_priv->irq_mask, ier_val: enable_mask); |
900 | |
901 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
902 | * just to make the assert_spin_locked check happy. */ |
903 | spin_lock_irq(lock: &dev_priv->irq_lock); |
904 | i915_enable_pipestat(i915: dev_priv, pipe: PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); |
905 | i915_enable_pipestat(i915: dev_priv, pipe: PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); |
906 | spin_unlock_irq(lock: &dev_priv->irq_lock); |
907 | } |
908 | |
909 | static void i8xx_error_irq_ack(struct drm_i915_private *i915, |
910 | u16 *eir, u16 *eir_stuck) |
911 | { |
912 | struct intel_uncore *uncore = &i915->uncore; |
913 | u16 emr; |
914 | |
915 | *eir = intel_uncore_read16(uncore, EIR); |
916 | intel_uncore_write16(uncore, EIR, val: *eir); |
917 | |
918 | *eir_stuck = intel_uncore_read16(uncore, EIR); |
919 | if (*eir_stuck == 0) |
920 | return; |
921 | |
922 | /* |
923 | * Toggle all EMR bits to make sure we get an edge |
924 | * in the ISR master error bit if we don't clear |
925 | * all the EIR bits. Otherwise the edge triggered |
926 | * IIR on i965/g4x wouldn't notice that an interrupt |
927 | * is still pending. Also some EIR bits can't be |
928 | * cleared except by handling the underlying error |
929 | * (or by a GPU reset) so we mask any bit that |
930 | * remains set. |
931 | */ |
932 | emr = intel_uncore_read16(uncore, EMR); |
933 | intel_uncore_write16(uncore, EMR, val: 0xffff); |
934 | intel_uncore_write16(uncore, EMR, val: emr | *eir_stuck); |
935 | } |
936 | |
937 | static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, |
938 | u16 eir, u16 eir_stuck) |
939 | { |
940 | drm_dbg(&dev_priv->drm, "Master Error: EIR 0x%04x\n" , eir); |
941 | |
942 | if (eir_stuck) |
943 | drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n" , |
944 | eir_stuck); |
945 | |
946 | drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n" , |
947 | intel_uncore_read(&dev_priv->uncore, PGTBL_ER)); |
948 | } |
949 | |
950 | static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, |
951 | u32 *eir, u32 *eir_stuck) |
952 | { |
953 | u32 emr; |
954 | |
955 | *eir = intel_uncore_read(uncore: &dev_priv->uncore, EIR); |
956 | intel_uncore_write(uncore: &dev_priv->uncore, EIR, val: *eir); |
957 | |
958 | *eir_stuck = intel_uncore_read(uncore: &dev_priv->uncore, EIR); |
959 | if (*eir_stuck == 0) |
960 | return; |
961 | |
962 | /* |
963 | * Toggle all EMR bits to make sure we get an edge |
964 | * in the ISR master error bit if we don't clear |
965 | * all the EIR bits. Otherwise the edge triggered |
966 | * IIR on i965/g4x wouldn't notice that an interrupt |
967 | * is still pending. Also some EIR bits can't be |
968 | * cleared except by handling the underlying error |
969 | * (or by a GPU reset) so we mask any bit that |
970 | * remains set. |
971 | */ |
972 | emr = intel_uncore_read(uncore: &dev_priv->uncore, EMR); |
973 | intel_uncore_write(uncore: &dev_priv->uncore, EMR, val: 0xffffffff); |
974 | intel_uncore_write(uncore: &dev_priv->uncore, EMR, val: emr | *eir_stuck); |
975 | } |
976 | |
977 | static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, |
978 | u32 eir, u32 eir_stuck) |
979 | { |
980 | drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n" , eir); |
981 | |
982 | if (eir_stuck) |
983 | drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n" , |
984 | eir_stuck); |
985 | |
986 | drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n" , |
987 | intel_uncore_read(&dev_priv->uncore, PGTBL_ER)); |
988 | } |
989 | |
990 | static irqreturn_t i8xx_irq_handler(int irq, void *arg) |
991 | { |
992 | struct drm_i915_private *dev_priv = arg; |
993 | irqreturn_t ret = IRQ_NONE; |
994 | |
995 | if (!intel_irqs_enabled(dev_priv)) |
996 | return IRQ_NONE; |
997 | |
998 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
999 | disable_rpm_wakeref_asserts(rpm: &dev_priv->runtime_pm); |
1000 | |
1001 | do { |
1002 | u32 pipe_stats[I915_MAX_PIPES] = {}; |
1003 | u16 eir = 0, eir_stuck = 0; |
1004 | u16 iir; |
1005 | |
1006 | iir = intel_uncore_read16(uncore: &dev_priv->uncore, GEN2_IIR); |
1007 | if (iir == 0) |
1008 | break; |
1009 | |
1010 | ret = IRQ_HANDLED; |
1011 | |
1012 | /* Call regardless, as some status bits might not be |
1013 | * signalled in iir */ |
1014 | i9xx_pipestat_irq_ack(i915: dev_priv, iir, pipe_stats); |
1015 | |
1016 | if (iir & I915_MASTER_ERROR_INTERRUPT) |
1017 | i8xx_error_irq_ack(i915: dev_priv, eir: &eir, eir_stuck: &eir_stuck); |
1018 | |
1019 | intel_uncore_write16(uncore: &dev_priv->uncore, GEN2_IIR, val: iir); |
1020 | |
1021 | if (iir & I915_USER_INTERRUPT) |
1022 | intel_engine_cs_irq(engine: to_gt(i915: dev_priv)->engine[RCS0], iir); |
1023 | |
1024 | if (iir & I915_MASTER_ERROR_INTERRUPT) |
1025 | i8xx_error_irq_handler(dev_priv, eir, eir_stuck); |
1026 | |
1027 | i8xx_pipestat_irq_handler(i915: dev_priv, iir, pipe_stats); |
1028 | } while (0); |
1029 | |
1030 | pmu_irq_stats(i915: dev_priv, res: ret); |
1031 | |
1032 | enable_rpm_wakeref_asserts(rpm: &dev_priv->runtime_pm); |
1033 | |
1034 | return ret; |
1035 | } |
1036 | |
1037 | static void i915_irq_reset(struct drm_i915_private *dev_priv) |
1038 | { |
1039 | struct intel_uncore *uncore = &dev_priv->uncore; |
1040 | |
1041 | if (I915_HAS_HOTPLUG(dev_priv)) { |
1042 | i915_hotplug_interrupt_update(i915: dev_priv, mask: 0xffffffff, bits: 0); |
1043 | intel_uncore_rmw(uncore: &dev_priv->uncore, PORT_HOTPLUG_STAT, clear: 0, set: 0); |
1044 | } |
1045 | |
1046 | i9xx_pipestat_irq_reset(i915: dev_priv); |
1047 | |
1048 | GEN3_IRQ_RESET(uncore, GEN2_); |
1049 | dev_priv->irq_mask = ~0u; |
1050 | } |
1051 | |
1052 | static void i915_irq_postinstall(struct drm_i915_private *dev_priv) |
1053 | { |
1054 | struct intel_uncore *uncore = &dev_priv->uncore; |
1055 | u32 enable_mask; |
1056 | |
1057 | intel_uncore_write(uncore, EMR, val: i9xx_error_mask(i915: dev_priv)); |
1058 | |
1059 | /* Unmask the interrupts that we always want on. */ |
1060 | dev_priv->irq_mask = |
1061 | ~(I915_ASLE_INTERRUPT | |
1062 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
1063 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
1064 | I915_MASTER_ERROR_INTERRUPT); |
1065 | |
1066 | enable_mask = |
1067 | I915_ASLE_INTERRUPT | |
1068 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
1069 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
1070 | I915_MASTER_ERROR_INTERRUPT | |
1071 | I915_USER_INTERRUPT; |
1072 | |
1073 | if (I915_HAS_HOTPLUG(dev_priv)) { |
1074 | /* Enable in IER... */ |
1075 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; |
1076 | /* and unmask in IMR */ |
1077 | dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; |
1078 | } |
1079 | |
1080 | GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); |
1081 | |
1082 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
1083 | * just to make the assert_spin_locked check happy. */ |
1084 | spin_lock_irq(lock: &dev_priv->irq_lock); |
1085 | i915_enable_pipestat(i915: dev_priv, pipe: PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); |
1086 | i915_enable_pipestat(i915: dev_priv, pipe: PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); |
1087 | spin_unlock_irq(lock: &dev_priv->irq_lock); |
1088 | |
1089 | i915_enable_asle_pipestat(i915: dev_priv); |
1090 | } |
1091 | |
1092 | static irqreturn_t i915_irq_handler(int irq, void *arg) |
1093 | { |
1094 | struct drm_i915_private *dev_priv = arg; |
1095 | irqreturn_t ret = IRQ_NONE; |
1096 | |
1097 | if (!intel_irqs_enabled(dev_priv)) |
1098 | return IRQ_NONE; |
1099 | |
1100 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
1101 | disable_rpm_wakeref_asserts(rpm: &dev_priv->runtime_pm); |
1102 | |
1103 | do { |
1104 | u32 pipe_stats[I915_MAX_PIPES] = {}; |
1105 | u32 eir = 0, eir_stuck = 0; |
1106 | u32 hotplug_status = 0; |
1107 | u32 iir; |
1108 | |
1109 | iir = intel_uncore_read(uncore: &dev_priv->uncore, GEN2_IIR); |
1110 | if (iir == 0) |
1111 | break; |
1112 | |
1113 | ret = IRQ_HANDLED; |
1114 | |
1115 | if (I915_HAS_HOTPLUG(dev_priv) && |
1116 | iir & I915_DISPLAY_PORT_INTERRUPT) |
1117 | hotplug_status = i9xx_hpd_irq_ack(i915: dev_priv); |
1118 | |
1119 | /* Call regardless, as some status bits might not be |
1120 | * signalled in iir */ |
1121 | i9xx_pipestat_irq_ack(i915: dev_priv, iir, pipe_stats); |
1122 | |
1123 | if (iir & I915_MASTER_ERROR_INTERRUPT) |
1124 | i9xx_error_irq_ack(dev_priv, eir: &eir, eir_stuck: &eir_stuck); |
1125 | |
1126 | intel_uncore_write(uncore: &dev_priv->uncore, GEN2_IIR, val: iir); |
1127 | |
1128 | if (iir & I915_USER_INTERRUPT) |
1129 | intel_engine_cs_irq(engine: to_gt(i915: dev_priv)->engine[RCS0], iir); |
1130 | |
1131 | if (iir & I915_MASTER_ERROR_INTERRUPT) |
1132 | i9xx_error_irq_handler(dev_priv, eir, eir_stuck); |
1133 | |
1134 | if (hotplug_status) |
1135 | i9xx_hpd_irq_handler(i915: dev_priv, hotplug_status); |
1136 | |
1137 | i915_pipestat_irq_handler(i915: dev_priv, iir, pipe_stats); |
1138 | } while (0); |
1139 | |
1140 | pmu_irq_stats(i915: dev_priv, res: ret); |
1141 | |
1142 | enable_rpm_wakeref_asserts(rpm: &dev_priv->runtime_pm); |
1143 | |
1144 | return ret; |
1145 | } |
1146 | |
1147 | static void i965_irq_reset(struct drm_i915_private *dev_priv) |
1148 | { |
1149 | struct intel_uncore *uncore = &dev_priv->uncore; |
1150 | |
1151 | i915_hotplug_interrupt_update(i915: dev_priv, mask: 0xffffffff, bits: 0); |
1152 | intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, clear: 0, set: 0); |
1153 | |
1154 | i9xx_pipestat_irq_reset(i915: dev_priv); |
1155 | |
1156 | GEN3_IRQ_RESET(uncore, GEN2_); |
1157 | dev_priv->irq_mask = ~0u; |
1158 | } |
1159 | |
1160 | static u32 i965_error_mask(struct drm_i915_private *i915) |
1161 | { |
1162 | /* |
1163 | * Enable some error detection, note the instruction error mask |
1164 | * bit is reserved, so we leave it masked. |
1165 | * |
1166 | * i965 FBC no longer generates spurious GTT errors, |
1167 | * so we can always enable the page table errors. |
1168 | */ |
1169 | if (IS_G4X(i915)) |
1170 | return ~(GM45_ERROR_PAGE_TABLE | |
1171 | GM45_ERROR_MEM_PRIV | |
1172 | GM45_ERROR_CP_PRIV | |
1173 | I915_ERROR_MEMORY_REFRESH); |
1174 | else |
1175 | return ~(I915_ERROR_PAGE_TABLE | |
1176 | I915_ERROR_MEMORY_REFRESH); |
1177 | } |
1178 | |
1179 | static void i965_irq_postinstall(struct drm_i915_private *dev_priv) |
1180 | { |
1181 | struct intel_uncore *uncore = &dev_priv->uncore; |
1182 | u32 enable_mask; |
1183 | |
1184 | intel_uncore_write(uncore, EMR, val: i965_error_mask(i915: dev_priv)); |
1185 | |
1186 | /* Unmask the interrupts that we always want on. */ |
1187 | dev_priv->irq_mask = |
1188 | ~(I915_ASLE_INTERRUPT | |
1189 | I915_DISPLAY_PORT_INTERRUPT | |
1190 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
1191 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
1192 | I915_MASTER_ERROR_INTERRUPT); |
1193 | |
1194 | enable_mask = |
1195 | I915_ASLE_INTERRUPT | |
1196 | I915_DISPLAY_PORT_INTERRUPT | |
1197 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
1198 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
1199 | I915_MASTER_ERROR_INTERRUPT | |
1200 | I915_USER_INTERRUPT; |
1201 | |
1202 | if (IS_G4X(dev_priv)) |
1203 | enable_mask |= I915_BSD_USER_INTERRUPT; |
1204 | |
1205 | GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); |
1206 | |
1207 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
1208 | * just to make the assert_spin_locked check happy. */ |
1209 | spin_lock_irq(lock: &dev_priv->irq_lock); |
1210 | i915_enable_pipestat(i915: dev_priv, pipe: PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); |
1211 | i915_enable_pipestat(i915: dev_priv, pipe: PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); |
1212 | i915_enable_pipestat(i915: dev_priv, pipe: PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); |
1213 | spin_unlock_irq(lock: &dev_priv->irq_lock); |
1214 | |
1215 | i915_enable_asle_pipestat(i915: dev_priv); |
1216 | } |
1217 | |
1218 | static irqreturn_t i965_irq_handler(int irq, void *arg) |
1219 | { |
1220 | struct drm_i915_private *dev_priv = arg; |
1221 | irqreturn_t ret = IRQ_NONE; |
1222 | |
1223 | if (!intel_irqs_enabled(dev_priv)) |
1224 | return IRQ_NONE; |
1225 | |
1226 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
1227 | disable_rpm_wakeref_asserts(rpm: &dev_priv->runtime_pm); |
1228 | |
1229 | do { |
1230 | u32 pipe_stats[I915_MAX_PIPES] = {}; |
1231 | u32 eir = 0, eir_stuck = 0; |
1232 | u32 hotplug_status = 0; |
1233 | u32 iir; |
1234 | |
1235 | iir = intel_uncore_read(uncore: &dev_priv->uncore, GEN2_IIR); |
1236 | if (iir == 0) |
1237 | break; |
1238 | |
1239 | ret = IRQ_HANDLED; |
1240 | |
1241 | if (iir & I915_DISPLAY_PORT_INTERRUPT) |
1242 | hotplug_status = i9xx_hpd_irq_ack(i915: dev_priv); |
1243 | |
1244 | /* Call regardless, as some status bits might not be |
1245 | * signalled in iir */ |
1246 | i9xx_pipestat_irq_ack(i915: dev_priv, iir, pipe_stats); |
1247 | |
1248 | if (iir & I915_MASTER_ERROR_INTERRUPT) |
1249 | i9xx_error_irq_ack(dev_priv, eir: &eir, eir_stuck: &eir_stuck); |
1250 | |
1251 | intel_uncore_write(uncore: &dev_priv->uncore, GEN2_IIR, val: iir); |
1252 | |
1253 | if (iir & I915_USER_INTERRUPT) |
1254 | intel_engine_cs_irq(engine: to_gt(i915: dev_priv)->engine[RCS0], |
1255 | iir); |
1256 | |
1257 | if (iir & I915_BSD_USER_INTERRUPT) |
1258 | intel_engine_cs_irq(engine: to_gt(i915: dev_priv)->engine[VCS0], |
1259 | iir: iir >> 25); |
1260 | |
1261 | if (iir & I915_MASTER_ERROR_INTERRUPT) |
1262 | i9xx_error_irq_handler(dev_priv, eir, eir_stuck); |
1263 | |
1264 | if (hotplug_status) |
1265 | i9xx_hpd_irq_handler(i915: dev_priv, hotplug_status); |
1266 | |
1267 | i965_pipestat_irq_handler(i915: dev_priv, iir, pipe_stats); |
1268 | } while (0); |
1269 | |
1270 | pmu_irq_stats(i915: dev_priv, res: IRQ_HANDLED); |
1271 | |
1272 | enable_rpm_wakeref_asserts(rpm: &dev_priv->runtime_pm); |
1273 | |
1274 | return ret; |
1275 | } |
1276 | |
1277 | /** |
1278 | * intel_irq_init - initializes irq support |
1279 | * @dev_priv: i915 device instance |
1280 | * |
1281 | * This function initializes all the irq support including work items, timers |
1282 | * and all the vtables. It does not setup the interrupt itself though. |
1283 | */ |
1284 | void intel_irq_init(struct drm_i915_private *dev_priv) |
1285 | { |
1286 | int i; |
1287 | |
1288 | INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work); |
1289 | for (i = 0; i < MAX_L3_SLICES; ++i) |
1290 | dev_priv->l3_parity.remap_info[i] = NULL; |
1291 | |
1292 | /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */ |
1293 | if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11) |
1294 | to_gt(i915: dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16; |
1295 | } |
1296 | |
1297 | /** |
1298 | * intel_irq_fini - deinitializes IRQ support |
1299 | * @i915: i915 device instance |
1300 | * |
1301 | * This function deinitializes all the IRQ support. |
1302 | */ |
1303 | void intel_irq_fini(struct drm_i915_private *i915) |
1304 | { |
1305 | int i; |
1306 | |
1307 | for (i = 0; i < MAX_L3_SLICES; ++i) |
1308 | kfree(objp: i915->l3_parity.remap_info[i]); |
1309 | } |
1310 | |
1311 | static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv) |
1312 | { |
1313 | if (HAS_GMCH(dev_priv)) { |
1314 | if (IS_CHERRYVIEW(dev_priv)) |
1315 | return cherryview_irq_handler; |
1316 | else if (IS_VALLEYVIEW(dev_priv)) |
1317 | return valleyview_irq_handler; |
1318 | else if (GRAPHICS_VER(dev_priv) == 4) |
1319 | return i965_irq_handler; |
1320 | else if (GRAPHICS_VER(dev_priv) == 3) |
1321 | return i915_irq_handler; |
1322 | else |
1323 | return i8xx_irq_handler; |
1324 | } else { |
1325 | if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) |
1326 | return dg1_irq_handler; |
1327 | else if (GRAPHICS_VER(dev_priv) >= 11) |
1328 | return gen11_irq_handler; |
1329 | else if (GRAPHICS_VER(dev_priv) >= 8) |
1330 | return gen8_irq_handler; |
1331 | else |
1332 | return ilk_irq_handler; |
1333 | } |
1334 | } |
1335 | |
1336 | static void intel_irq_reset(struct drm_i915_private *dev_priv) |
1337 | { |
1338 | if (HAS_GMCH(dev_priv)) { |
1339 | if (IS_CHERRYVIEW(dev_priv)) |
1340 | cherryview_irq_reset(dev_priv); |
1341 | else if (IS_VALLEYVIEW(dev_priv)) |
1342 | valleyview_irq_reset(dev_priv); |
1343 | else if (GRAPHICS_VER(dev_priv) == 4) |
1344 | i965_irq_reset(dev_priv); |
1345 | else if (GRAPHICS_VER(dev_priv) == 3) |
1346 | i915_irq_reset(dev_priv); |
1347 | else |
1348 | i8xx_irq_reset(dev_priv); |
1349 | } else { |
1350 | if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) |
1351 | dg1_irq_reset(dev_priv); |
1352 | else if (GRAPHICS_VER(dev_priv) >= 11) |
1353 | gen11_irq_reset(dev_priv); |
1354 | else if (GRAPHICS_VER(dev_priv) >= 8) |
1355 | gen8_irq_reset(dev_priv); |
1356 | else |
1357 | ilk_irq_reset(dev_priv); |
1358 | } |
1359 | } |
1360 | |
1361 | static void intel_irq_postinstall(struct drm_i915_private *dev_priv) |
1362 | { |
1363 | if (HAS_GMCH(dev_priv)) { |
1364 | if (IS_CHERRYVIEW(dev_priv)) |
1365 | cherryview_irq_postinstall(dev_priv); |
1366 | else if (IS_VALLEYVIEW(dev_priv)) |
1367 | valleyview_irq_postinstall(dev_priv); |
1368 | else if (GRAPHICS_VER(dev_priv) == 4) |
1369 | i965_irq_postinstall(dev_priv); |
1370 | else if (GRAPHICS_VER(dev_priv) == 3) |
1371 | i915_irq_postinstall(dev_priv); |
1372 | else |
1373 | i8xx_irq_postinstall(dev_priv); |
1374 | } else { |
1375 | if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) |
1376 | dg1_irq_postinstall(dev_priv); |
1377 | else if (GRAPHICS_VER(dev_priv) >= 11) |
1378 | gen11_irq_postinstall(dev_priv); |
1379 | else if (GRAPHICS_VER(dev_priv) >= 8) |
1380 | gen8_irq_postinstall(dev_priv); |
1381 | else |
1382 | ilk_irq_postinstall(dev_priv); |
1383 | } |
1384 | } |
1385 | |
1386 | /** |
1387 | * intel_irq_install - enables the hardware interrupt |
1388 | * @dev_priv: i915 device instance |
1389 | * |
1390 | * This function enables the hardware interrupt handling, but leaves the hotplug |
1391 | * handling still disabled. It is called after intel_irq_init(). |
1392 | * |
1393 | * In the driver load and resume code we need working interrupts in a few places |
1394 | * but don't want to deal with the hassle of concurrent probe and hotplug |
1395 | * workers. Hence the split into this two-stage approach. |
1396 | */ |
1397 | int intel_irq_install(struct drm_i915_private *dev_priv) |
1398 | { |
1399 | int irq = to_pci_dev(dev_priv->drm.dev)->irq; |
1400 | int ret; |
1401 | |
1402 | /* |
1403 | * We enable some interrupt sources in our postinstall hooks, so mark |
1404 | * interrupts as enabled _before_ actually enabling them to avoid |
1405 | * special cases in our ordering checks. |
1406 | */ |
1407 | dev_priv->runtime_pm.irqs_enabled = true; |
1408 | |
1409 | dev_priv->irq_enabled = true; |
1410 | |
1411 | intel_irq_reset(dev_priv); |
1412 | |
1413 | ret = request_irq(irq, handler: intel_irq_handler(dev_priv), |
1414 | IRQF_SHARED, DRIVER_NAME, dev: dev_priv); |
1415 | if (ret < 0) { |
1416 | dev_priv->irq_enabled = false; |
1417 | return ret; |
1418 | } |
1419 | |
1420 | intel_irq_postinstall(dev_priv); |
1421 | |
1422 | return ret; |
1423 | } |
1424 | |
1425 | /** |
1426 | * intel_irq_uninstall - finilizes all irq handling |
1427 | * @dev_priv: i915 device instance |
1428 | * |
1429 | * This stops interrupt and hotplug handling and unregisters and frees all |
1430 | * resources acquired in the init functions. |
1431 | */ |
1432 | void intel_irq_uninstall(struct drm_i915_private *dev_priv) |
1433 | { |
1434 | int irq = to_pci_dev(dev_priv->drm.dev)->irq; |
1435 | |
1436 | /* |
1437 | * FIXME we can get called twice during driver probe |
1438 | * error handling as well as during driver remove due to |
1439 | * intel_display_driver_remove() calling us out of sequence. |
1440 | * Would be nice if it didn't do that... |
1441 | */ |
1442 | if (!dev_priv->irq_enabled) |
1443 | return; |
1444 | |
1445 | dev_priv->irq_enabled = false; |
1446 | |
1447 | intel_irq_reset(dev_priv); |
1448 | |
1449 | free_irq(irq, dev_priv); |
1450 | |
1451 | intel_hpd_cancel_work(dev_priv); |
1452 | dev_priv->runtime_pm.irqs_enabled = false; |
1453 | } |
1454 | |
1455 | /** |
1456 | * intel_runtime_pm_disable_interrupts - runtime interrupt disabling |
1457 | * @dev_priv: i915 device instance |
1458 | * |
1459 | * This function is used to disable interrupts at runtime, both in the runtime |
1460 | * pm and the system suspend/resume code. |
1461 | */ |
1462 | void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) |
1463 | { |
1464 | intel_irq_reset(dev_priv); |
1465 | dev_priv->runtime_pm.irqs_enabled = false; |
1466 | intel_synchronize_irq(i915: dev_priv); |
1467 | } |
1468 | |
1469 | /** |
1470 | * intel_runtime_pm_enable_interrupts - runtime interrupt enabling |
1471 | * @dev_priv: i915 device instance |
1472 | * |
1473 | * This function is used to enable interrupts at runtime, both in the runtime |
1474 | * pm and the system suspend/resume code. |
1475 | */ |
1476 | void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) |
1477 | { |
1478 | dev_priv->runtime_pm.irqs_enabled = true; |
1479 | intel_irq_reset(dev_priv); |
1480 | intel_irq_postinstall(dev_priv); |
1481 | } |
1482 | |
1483 | bool intel_irqs_enabled(struct drm_i915_private *dev_priv) |
1484 | { |
1485 | return dev_priv->runtime_pm.irqs_enabled; |
1486 | } |
1487 | |
1488 | void intel_synchronize_irq(struct drm_i915_private *i915) |
1489 | { |
1490 | synchronize_irq(to_pci_dev(i915->drm.dev)->irq); |
1491 | } |
1492 | |
1493 | void intel_synchronize_hardirq(struct drm_i915_private *i915) |
1494 | { |
1495 | synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq); |
1496 | } |
1497 | |