1 | // SPDX-License-Identifier: MIT |
2 | /* |
3 | * Copyright © 2008-2018 Intel Corporation |
4 | */ |
5 | |
6 | #include <linux/sched/mm.h> |
7 | #include <linux/stop_machine.h> |
8 | #include <linux/string_helpers.h> |
9 | |
10 | #include "display/intel_display_reset.h" |
11 | #include "display/intel_overlay.h" |
12 | |
13 | #include "gem/i915_gem_context.h" |
14 | |
15 | #include "gt/intel_gt_regs.h" |
16 | |
17 | #include "gt/uc/intel_gsc_fw.h" |
18 | |
19 | #include "i915_drv.h" |
20 | #include "i915_file_private.h" |
21 | #include "i915_gpu_error.h" |
22 | #include "i915_irq.h" |
23 | #include "i915_reg.h" |
24 | #include "intel_breadcrumbs.h" |
25 | #include "intel_engine_pm.h" |
26 | #include "intel_engine_regs.h" |
27 | #include "intel_gt.h" |
28 | #include "intel_gt_pm.h" |
29 | #include "intel_gt_print.h" |
30 | #include "intel_gt_requests.h" |
31 | #include "intel_mchbar_regs.h" |
32 | #include "intel_pci_config.h" |
33 | #include "intel_reset.h" |
34 | |
35 | #include "uc/intel_guc.h" |
36 | |
37 | #define RESET_MAX_RETRIES 3 |
38 | |
39 | static void client_mark_guilty(struct i915_gem_context *ctx, bool banned) |
40 | { |
41 | struct drm_i915_file_private *file_priv = ctx->file_priv; |
42 | unsigned long prev_hang; |
43 | unsigned int score; |
44 | |
45 | if (IS_ERR_OR_NULL(ptr: file_priv)) |
46 | return; |
47 | |
48 | score = 0; |
49 | if (banned) |
50 | score = I915_CLIENT_SCORE_CONTEXT_BAN; |
51 | |
52 | prev_hang = xchg(&file_priv->hang_timestamp, jiffies); |
53 | if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES)) |
54 | score += I915_CLIENT_SCORE_HANG_FAST; |
55 | |
56 | if (score) { |
57 | atomic_add(i: score, v: &file_priv->ban_score); |
58 | |
59 | drm_dbg(&ctx->i915->drm, |
60 | "client %s: gained %u ban score, now %u\n" , |
61 | ctx->name, score, |
62 | atomic_read(&file_priv->ban_score)); |
63 | } |
64 | } |
65 | |
66 | static bool mark_guilty(struct i915_request *rq) |
67 | { |
68 | struct i915_gem_context *ctx; |
69 | unsigned long prev_hang; |
70 | bool banned; |
71 | int i; |
72 | |
73 | if (intel_context_is_closed(ce: rq->context)) |
74 | return true; |
75 | |
76 | rcu_read_lock(); |
77 | ctx = rcu_dereference(rq->context->gem_context); |
78 | if (ctx && !kref_get_unless_zero(kref: &ctx->ref)) |
79 | ctx = NULL; |
80 | rcu_read_unlock(); |
81 | if (!ctx) |
82 | return intel_context_is_banned(ce: rq->context); |
83 | |
84 | atomic_inc(v: &ctx->guilty_count); |
85 | |
86 | /* Cool contexts are too cool to be banned! (Used for reset testing.) */ |
87 | if (!i915_gem_context_is_bannable(ctx)) { |
88 | banned = false; |
89 | goto out; |
90 | } |
91 | |
92 | drm_notice(&ctx->i915->drm, |
93 | "%s context reset due to GPU hang\n" , |
94 | ctx->name); |
95 | |
96 | /* Record the timestamp for the last N hangs */ |
97 | prev_hang = ctx->hang_timestamp[0]; |
98 | for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++) |
99 | ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1]; |
100 | ctx->hang_timestamp[i] = jiffies; |
101 | |
102 | /* If we have hung N+1 times in rapid succession, we ban the context! */ |
103 | banned = !i915_gem_context_is_recoverable(ctx); |
104 | if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES)) |
105 | banned = true; |
106 | if (banned) |
107 | drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n" , |
108 | ctx->name, atomic_read(&ctx->guilty_count)); |
109 | |
110 | client_mark_guilty(ctx, banned); |
111 | |
112 | out: |
113 | i915_gem_context_put(ctx); |
114 | return banned; |
115 | } |
116 | |
117 | static void mark_innocent(struct i915_request *rq) |
118 | { |
119 | struct i915_gem_context *ctx; |
120 | |
121 | rcu_read_lock(); |
122 | ctx = rcu_dereference(rq->context->gem_context); |
123 | if (ctx) |
124 | atomic_inc(v: &ctx->active_count); |
125 | rcu_read_unlock(); |
126 | } |
127 | |
128 | void __i915_request_reset(struct i915_request *rq, bool guilty) |
129 | { |
130 | bool banned = false; |
131 | |
132 | RQ_TRACE(rq, "guilty? %s\n" , str_yes_no(guilty)); |
133 | GEM_BUG_ON(__i915_request_is_complete(rq)); |
134 | |
135 | rcu_read_lock(); /* protect the GEM context */ |
136 | if (guilty) { |
137 | i915_request_set_error_once(rq, error: -EIO); |
138 | __i915_request_skip(rq); |
139 | banned = mark_guilty(rq); |
140 | } else { |
141 | i915_request_set_error_once(rq, error: -EAGAIN); |
142 | mark_innocent(rq); |
143 | } |
144 | rcu_read_unlock(); |
145 | |
146 | if (banned) |
147 | intel_context_ban(ce: rq->context, rq); |
148 | } |
149 | |
150 | static bool i915_in_reset(struct pci_dev *pdev) |
151 | { |
152 | u8 gdrst; |
153 | |
154 | pci_read_config_byte(dev: pdev, I915_GDRST, val: &gdrst); |
155 | return gdrst & GRDOM_RESET_STATUS; |
156 | } |
157 | |
158 | static int i915_do_reset(struct intel_gt *gt, |
159 | intel_engine_mask_t engine_mask, |
160 | unsigned int retry) |
161 | { |
162 | struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); |
163 | int err; |
164 | |
165 | /* Assert reset for at least 50 usec, and wait for acknowledgement. */ |
166 | pci_write_config_byte(dev: pdev, I915_GDRST, GRDOM_RESET_ENABLE); |
167 | udelay(50); |
168 | err = _wait_for_atomic(i915_in_reset(pdev), 50000, 0); |
169 | |
170 | /* Clear the reset request. */ |
171 | pci_write_config_byte(dev: pdev, I915_GDRST, val: 0); |
172 | udelay(50); |
173 | if (!err) |
174 | err = _wait_for_atomic(!i915_in_reset(pdev), 50000, 0); |
175 | |
176 | return err; |
177 | } |
178 | |
179 | static bool g4x_reset_complete(struct pci_dev *pdev) |
180 | { |
181 | u8 gdrst; |
182 | |
183 | pci_read_config_byte(dev: pdev, I915_GDRST, val: &gdrst); |
184 | return (gdrst & GRDOM_RESET_ENABLE) == 0; |
185 | } |
186 | |
187 | static int g33_do_reset(struct intel_gt *gt, |
188 | intel_engine_mask_t engine_mask, |
189 | unsigned int retry) |
190 | { |
191 | struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); |
192 | |
193 | pci_write_config_byte(dev: pdev, I915_GDRST, GRDOM_RESET_ENABLE); |
194 | return _wait_for_atomic(g4x_reset_complete(pdev), 50000, 0); |
195 | } |
196 | |
197 | static int g4x_do_reset(struct intel_gt *gt, |
198 | intel_engine_mask_t engine_mask, |
199 | unsigned int retry) |
200 | { |
201 | struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); |
202 | struct intel_uncore *uncore = gt->uncore; |
203 | int ret; |
204 | |
205 | /* WaVcpClkGateDisableForMediaReset:ctg,elk */ |
206 | intel_uncore_rmw_fw(uncore, VDECCLK_GATE_D, clear: 0, VCP_UNIT_CLOCK_GATE_DISABLE); |
207 | intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D); |
208 | |
209 | pci_write_config_byte(dev: pdev, I915_GDRST, |
210 | GRDOM_MEDIA | GRDOM_RESET_ENABLE); |
211 | ret = _wait_for_atomic(g4x_reset_complete(pdev), 50000, 0); |
212 | if (ret) { |
213 | GT_TRACE(gt, "Wait for media reset failed\n" ); |
214 | goto out; |
215 | } |
216 | |
217 | pci_write_config_byte(dev: pdev, I915_GDRST, |
218 | GRDOM_RENDER | GRDOM_RESET_ENABLE); |
219 | ret = _wait_for_atomic(g4x_reset_complete(pdev), 50000, 0); |
220 | if (ret) { |
221 | GT_TRACE(gt, "Wait for render reset failed\n" ); |
222 | goto out; |
223 | } |
224 | |
225 | out: |
226 | pci_write_config_byte(dev: pdev, I915_GDRST, val: 0); |
227 | |
228 | intel_uncore_rmw_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE, set: 0); |
229 | intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D); |
230 | |
231 | return ret; |
232 | } |
233 | |
234 | static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, |
235 | unsigned int retry) |
236 | { |
237 | struct intel_uncore *uncore = gt->uncore; |
238 | int ret; |
239 | |
240 | intel_uncore_write_fw(uncore, ILK_GDSR, |
241 | ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); |
242 | ret = __intel_wait_for_register_fw(uncore, ILK_GDSR, |
243 | ILK_GRDOM_RESET_ENABLE, value: 0, |
244 | fast_timeout_us: 5000, slow_timeout_ms: 0, |
245 | NULL); |
246 | if (ret) { |
247 | GT_TRACE(gt, "Wait for render reset failed\n" ); |
248 | goto out; |
249 | } |
250 | |
251 | intel_uncore_write_fw(uncore, ILK_GDSR, |
252 | ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); |
253 | ret = __intel_wait_for_register_fw(uncore, ILK_GDSR, |
254 | ILK_GRDOM_RESET_ENABLE, value: 0, |
255 | fast_timeout_us: 5000, slow_timeout_ms: 0, |
256 | NULL); |
257 | if (ret) { |
258 | GT_TRACE(gt, "Wait for media reset failed\n" ); |
259 | goto out; |
260 | } |
261 | |
262 | out: |
263 | intel_uncore_write_fw(uncore, ILK_GDSR, 0); |
264 | intel_uncore_posting_read_fw(uncore, ILK_GDSR); |
265 | return ret; |
266 | } |
267 | |
268 | /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */ |
269 | static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask) |
270 | { |
271 | struct intel_uncore *uncore = gt->uncore; |
272 | int loops; |
273 | int err; |
274 | |
275 | /* |
276 | * On some platforms, e.g. Jasperlake, we see that the engine register |
277 | * state is not cleared until shortly after GDRST reports completion, |
278 | * causing a failure as we try to immediately resume while the internal |
279 | * state is still in flux. If we immediately repeat the reset, the |
280 | * second reset appears to serialise with the first, and since it is a |
281 | * no-op, the registers should retain their reset value. However, there |
282 | * is still a concern that upon leaving the second reset, the internal |
283 | * engine state is still in flux and not ready for resuming. |
284 | * |
285 | * Starting on MTL, there are some prep steps that we need to do when |
286 | * resetting some engines that need to be applied every time we write to |
287 | * GEN6_GDRST. As those are time consuming (tens of ms), we don't want |
288 | * to perform that twice, so, since the Jasperlake issue hasn't been |
289 | * observed on MTL, we avoid repeating the reset on newer platforms. |
290 | */ |
291 | loops = GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 70) ? 2 : 1; |
292 | |
293 | /* |
294 | * GEN6_GDRST is not in the gt power well, no need to check |
295 | * for fifo space for the write or forcewake the chip for |
296 | * the read |
297 | */ |
298 | do { |
299 | intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask); |
300 | |
301 | /* Wait for the device to ack the reset requests. */ |
302 | err = __intel_wait_for_register_fw(uncore, GEN6_GDRST, |
303 | mask: hw_domain_mask, value: 0, |
304 | fast_timeout_us: 2000, slow_timeout_ms: 0, |
305 | NULL); |
306 | } while (err == 0 && --loops); |
307 | if (err) |
308 | GT_TRACE(gt, |
309 | "Wait for 0x%08x engines reset failed\n" , |
310 | hw_domain_mask); |
311 | |
312 | /* |
313 | * As we have observed that the engine state is still volatile |
314 | * after GDRST is acked, impose a small delay to let everything settle. |
315 | */ |
316 | udelay(50); |
317 | |
318 | return err; |
319 | } |
320 | |
321 | static int __gen6_reset_engines(struct intel_gt *gt, |
322 | intel_engine_mask_t engine_mask, |
323 | unsigned int retry) |
324 | { |
325 | struct intel_engine_cs *engine; |
326 | u32 hw_mask; |
327 | |
328 | if (engine_mask == ALL_ENGINES) { |
329 | hw_mask = GEN6_GRDOM_FULL; |
330 | } else { |
331 | intel_engine_mask_t tmp; |
332 | |
333 | hw_mask = 0; |
334 | for_each_engine_masked(engine, gt, engine_mask, tmp) { |
335 | hw_mask |= engine->reset_domain; |
336 | } |
337 | } |
338 | |
339 | return gen6_hw_domain_reset(gt, hw_domain_mask: hw_mask); |
340 | } |
341 | |
342 | static int gen6_reset_engines(struct intel_gt *gt, |
343 | intel_engine_mask_t engine_mask, |
344 | unsigned int retry) |
345 | { |
346 | unsigned long flags; |
347 | int ret; |
348 | |
349 | spin_lock_irqsave(>->uncore->lock, flags); |
350 | ret = __gen6_reset_engines(gt, engine_mask, retry); |
351 | spin_unlock_irqrestore(lock: >->uncore->lock, flags); |
352 | |
353 | return ret; |
354 | } |
355 | |
356 | static struct intel_engine_cs *find_sfc_paired_vecs_engine(struct intel_engine_cs *engine) |
357 | { |
358 | int vecs_id; |
359 | |
360 | GEM_BUG_ON(engine->class != VIDEO_DECODE_CLASS); |
361 | |
362 | vecs_id = _VECS((engine->instance) / 2); |
363 | |
364 | return engine->gt->engine[vecs_id]; |
365 | } |
366 | |
367 | struct sfc_lock_data { |
368 | i915_reg_t lock_reg; |
369 | i915_reg_t ack_reg; |
370 | i915_reg_t usage_reg; |
371 | u32 lock_bit; |
372 | u32 ack_bit; |
373 | u32 usage_bit; |
374 | u32 reset_bit; |
375 | }; |
376 | |
377 | static void get_sfc_forced_lock_data(struct intel_engine_cs *engine, |
378 | struct sfc_lock_data *sfc_lock) |
379 | { |
380 | switch (engine->class) { |
381 | default: |
382 | MISSING_CASE(engine->class); |
383 | fallthrough; |
384 | case VIDEO_DECODE_CLASS: |
385 | sfc_lock->lock_reg = GEN11_VCS_SFC_FORCED_LOCK(engine->mmio_base); |
386 | sfc_lock->lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT; |
387 | |
388 | sfc_lock->ack_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base); |
389 | sfc_lock->ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT; |
390 | |
391 | sfc_lock->usage_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base); |
392 | sfc_lock->usage_bit = GEN11_VCS_SFC_USAGE_BIT; |
393 | sfc_lock->reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance); |
394 | |
395 | break; |
396 | case VIDEO_ENHANCEMENT_CLASS: |
397 | sfc_lock->lock_reg = GEN11_VECS_SFC_FORCED_LOCK(engine->mmio_base); |
398 | sfc_lock->lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT; |
399 | |
400 | sfc_lock->ack_reg = GEN11_VECS_SFC_LOCK_ACK(engine->mmio_base); |
401 | sfc_lock->ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT; |
402 | |
403 | sfc_lock->usage_reg = GEN11_VECS_SFC_USAGE(engine->mmio_base); |
404 | sfc_lock->usage_bit = GEN11_VECS_SFC_USAGE_BIT; |
405 | sfc_lock->reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance); |
406 | |
407 | break; |
408 | } |
409 | } |
410 | |
411 | static int gen11_lock_sfc(struct intel_engine_cs *engine, |
412 | u32 *reset_mask, |
413 | u32 *unlock_mask) |
414 | { |
415 | struct intel_uncore *uncore = engine->uncore; |
416 | u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access; |
417 | struct sfc_lock_data sfc_lock; |
418 | bool lock_obtained, lock_to_other = false; |
419 | int ret; |
420 | |
421 | switch (engine->class) { |
422 | case VIDEO_DECODE_CLASS: |
423 | if ((BIT(engine->instance) & vdbox_sfc_access) == 0) |
424 | return 0; |
425 | |
426 | fallthrough; |
427 | case VIDEO_ENHANCEMENT_CLASS: |
428 | get_sfc_forced_lock_data(engine, sfc_lock: &sfc_lock); |
429 | |
430 | break; |
431 | default: |
432 | return 0; |
433 | } |
434 | |
435 | if (!(intel_uncore_read_fw(uncore, sfc_lock.usage_reg) & sfc_lock.usage_bit)) { |
436 | struct intel_engine_cs *paired_vecs; |
437 | |
438 | if (engine->class != VIDEO_DECODE_CLASS || |
439 | GRAPHICS_VER(engine->i915) != 12) |
440 | return 0; |
441 | |
442 | /* |
443 | * Wa_14010733141 |
444 | * |
445 | * If the VCS-MFX isn't using the SFC, we also need to check |
446 | * whether VCS-HCP is using it. If so, we need to issue a *VE* |
447 | * forced lock on the VE engine that shares the same SFC. |
448 | */ |
449 | if (!(intel_uncore_read_fw(uncore, |
450 | GEN12_HCP_SFC_LOCK_STATUS(engine->mmio_base)) & |
451 | GEN12_HCP_SFC_USAGE_BIT)) |
452 | return 0; |
453 | |
454 | paired_vecs = find_sfc_paired_vecs_engine(engine); |
455 | get_sfc_forced_lock_data(engine: paired_vecs, sfc_lock: &sfc_lock); |
456 | lock_to_other = true; |
457 | *unlock_mask |= paired_vecs->mask; |
458 | } else { |
459 | *unlock_mask |= engine->mask; |
460 | } |
461 | |
462 | /* |
463 | * If the engine is using an SFC, tell the engine that a software reset |
464 | * is going to happen. The engine will then try to force lock the SFC. |
465 | * If SFC ends up being locked to the engine we want to reset, we have |
466 | * to reset it as well (we will unlock it once the reset sequence is |
467 | * completed). |
468 | */ |
469 | intel_uncore_rmw_fw(uncore, reg: sfc_lock.lock_reg, clear: 0, set: sfc_lock.lock_bit); |
470 | |
471 | ret = __intel_wait_for_register_fw(uncore, |
472 | reg: sfc_lock.ack_reg, |
473 | mask: sfc_lock.ack_bit, |
474 | value: sfc_lock.ack_bit, |
475 | fast_timeout_us: 1000, slow_timeout_ms: 0, NULL); |
476 | |
477 | /* |
478 | * Was the SFC released while we were trying to lock it? |
479 | * |
480 | * We should reset both the engine and the SFC if: |
481 | * - We were locking the SFC to this engine and the lock succeeded |
482 | * OR |
483 | * - We were locking the SFC to a different engine (Wa_14010733141) |
484 | * but the SFC was released before the lock was obtained. |
485 | * |
486 | * Otherwise we need only reset the engine by itself and we can |
487 | * leave the SFC alone. |
488 | */ |
489 | lock_obtained = (intel_uncore_read_fw(uncore, sfc_lock.usage_reg) & |
490 | sfc_lock.usage_bit) != 0; |
491 | if (lock_obtained == lock_to_other) |
492 | return 0; |
493 | |
494 | if (ret) { |
495 | ENGINE_TRACE(engine, "Wait for SFC forced lock ack failed\n" ); |
496 | return ret; |
497 | } |
498 | |
499 | *reset_mask |= sfc_lock.reset_bit; |
500 | return 0; |
501 | } |
502 | |
503 | static void gen11_unlock_sfc(struct intel_engine_cs *engine) |
504 | { |
505 | struct intel_uncore *uncore = engine->uncore; |
506 | u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access; |
507 | struct sfc_lock_data sfc_lock = {}; |
508 | |
509 | if (engine->class != VIDEO_DECODE_CLASS && |
510 | engine->class != VIDEO_ENHANCEMENT_CLASS) |
511 | return; |
512 | |
513 | if (engine->class == VIDEO_DECODE_CLASS && |
514 | (BIT(engine->instance) & vdbox_sfc_access) == 0) |
515 | return; |
516 | |
517 | get_sfc_forced_lock_data(engine, sfc_lock: &sfc_lock); |
518 | |
519 | intel_uncore_rmw_fw(uncore, reg: sfc_lock.lock_reg, clear: sfc_lock.lock_bit, set: 0); |
520 | } |
521 | |
522 | static int __gen11_reset_engines(struct intel_gt *gt, |
523 | intel_engine_mask_t engine_mask, |
524 | unsigned int retry) |
525 | { |
526 | struct intel_engine_cs *engine; |
527 | intel_engine_mask_t tmp; |
528 | u32 reset_mask, unlock_mask = 0; |
529 | int ret; |
530 | |
531 | if (engine_mask == ALL_ENGINES) { |
532 | reset_mask = GEN11_GRDOM_FULL; |
533 | } else { |
534 | reset_mask = 0; |
535 | for_each_engine_masked(engine, gt, engine_mask, tmp) { |
536 | reset_mask |= engine->reset_domain; |
537 | ret = gen11_lock_sfc(engine, reset_mask: &reset_mask, unlock_mask: &unlock_mask); |
538 | if (ret) |
539 | goto sfc_unlock; |
540 | } |
541 | } |
542 | |
543 | ret = gen6_hw_domain_reset(gt, hw_domain_mask: reset_mask); |
544 | |
545 | sfc_unlock: |
546 | /* |
547 | * We unlock the SFC based on the lock status and not the result of |
548 | * gen11_lock_sfc to make sure that we clean properly if something |
549 | * wrong happened during the lock (e.g. lock acquired after timeout |
550 | * expiration). |
551 | * |
552 | * Due to Wa_14010733141, we may have locked an SFC to an engine that |
553 | * wasn't being reset. So instead of calling gen11_unlock_sfc() |
554 | * on engine_mask, we instead call it on the mask of engines that our |
555 | * gen11_lock_sfc() calls told us actually had locks attempted. |
556 | */ |
557 | for_each_engine_masked(engine, gt, unlock_mask, tmp) |
558 | gen11_unlock_sfc(engine); |
559 | |
560 | return ret; |
561 | } |
562 | |
563 | static int gen8_engine_reset_prepare(struct intel_engine_cs *engine) |
564 | { |
565 | struct intel_uncore *uncore = engine->uncore; |
566 | const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base); |
567 | u32 request, mask, ack; |
568 | int ret; |
569 | |
570 | if (I915_SELFTEST_ONLY(should_fail(&engine->reset_timeout, 1))) |
571 | return -ETIMEDOUT; |
572 | |
573 | ack = intel_uncore_read_fw(uncore, reg); |
574 | if (ack & RESET_CTL_CAT_ERROR) { |
575 | /* |
576 | * For catastrophic errors, ready-for-reset sequence |
577 | * needs to be bypassed: HAS#396813 |
578 | */ |
579 | request = RESET_CTL_CAT_ERROR; |
580 | mask = RESET_CTL_CAT_ERROR; |
581 | |
582 | /* Catastrophic errors need to be cleared by HW */ |
583 | ack = 0; |
584 | } else if (!(ack & RESET_CTL_READY_TO_RESET)) { |
585 | request = RESET_CTL_REQUEST_RESET; |
586 | mask = RESET_CTL_READY_TO_RESET; |
587 | ack = RESET_CTL_READY_TO_RESET; |
588 | } else { |
589 | return 0; |
590 | } |
591 | |
592 | intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request)); |
593 | ret = __intel_wait_for_register_fw(uncore, reg, mask, value: ack, |
594 | fast_timeout_us: 700, slow_timeout_ms: 0, NULL); |
595 | if (ret) |
596 | gt_err(engine->gt, |
597 | "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n" , |
598 | engine->name, request, |
599 | intel_uncore_read_fw(uncore, reg)); |
600 | |
601 | return ret; |
602 | } |
603 | |
604 | static void gen8_engine_reset_cancel(struct intel_engine_cs *engine) |
605 | { |
606 | intel_uncore_write_fw(engine->uncore, |
607 | RING_RESET_CTL(engine->mmio_base), |
608 | _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); |
609 | } |
610 | |
611 | static int gen8_reset_engines(struct intel_gt *gt, |
612 | intel_engine_mask_t engine_mask, |
613 | unsigned int retry) |
614 | { |
615 | struct intel_engine_cs *engine; |
616 | const bool reset_non_ready = retry >= 1; |
617 | intel_engine_mask_t tmp; |
618 | unsigned long flags; |
619 | int ret; |
620 | |
621 | spin_lock_irqsave(>->uncore->lock, flags); |
622 | |
623 | for_each_engine_masked(engine, gt, engine_mask, tmp) { |
624 | ret = gen8_engine_reset_prepare(engine); |
625 | if (ret && !reset_non_ready) |
626 | goto skip_reset; |
627 | |
628 | /* |
629 | * If this is not the first failed attempt to prepare, |
630 | * we decide to proceed anyway. |
631 | * |
632 | * By doing so we risk context corruption and with |
633 | * some gens (kbl), possible system hang if reset |
634 | * happens during active bb execution. |
635 | * |
636 | * We rather take context corruption instead of |
637 | * failed reset with a wedged driver/gpu. And |
638 | * active bb execution case should be covered by |
639 | * stop_engines() we have before the reset. |
640 | */ |
641 | } |
642 | |
643 | /* |
644 | * Wa_22011100796:dg2, whenever Full soft reset is required, |
645 | * reset all individual engines firstly, and then do a full soft reset. |
646 | * |
647 | * This is best effort, so ignore any error from the initial reset. |
648 | */ |
649 | if (IS_DG2(gt->i915) && engine_mask == ALL_ENGINES) |
650 | __gen11_reset_engines(gt, engine_mask: gt->info.engine_mask, retry: 0); |
651 | |
652 | if (GRAPHICS_VER(gt->i915) >= 11) |
653 | ret = __gen11_reset_engines(gt, engine_mask, retry); |
654 | else |
655 | ret = __gen6_reset_engines(gt, engine_mask, retry); |
656 | |
657 | skip_reset: |
658 | for_each_engine_masked(engine, gt, engine_mask, tmp) |
659 | gen8_engine_reset_cancel(engine); |
660 | |
661 | spin_unlock_irqrestore(lock: >->uncore->lock, flags); |
662 | |
663 | return ret; |
664 | } |
665 | |
666 | static int mock_reset(struct intel_gt *gt, |
667 | intel_engine_mask_t mask, |
668 | unsigned int retry) |
669 | { |
670 | return 0; |
671 | } |
672 | |
673 | typedef int (*reset_func)(struct intel_gt *, |
674 | intel_engine_mask_t engine_mask, |
675 | unsigned int retry); |
676 | |
677 | static reset_func intel_get_gpu_reset(const struct intel_gt *gt) |
678 | { |
679 | struct drm_i915_private *i915 = gt->i915; |
680 | |
681 | if (is_mock_gt(gt)) |
682 | return mock_reset; |
683 | else if (GRAPHICS_VER(i915) >= 8) |
684 | return gen8_reset_engines; |
685 | else if (GRAPHICS_VER(i915) >= 6) |
686 | return gen6_reset_engines; |
687 | else if (GRAPHICS_VER(i915) >= 5) |
688 | return ilk_do_reset; |
689 | else if (IS_G4X(i915)) |
690 | return g4x_do_reset; |
691 | else if (IS_G33(i915) || IS_PINEVIEW(i915)) |
692 | return g33_do_reset; |
693 | else if (GRAPHICS_VER(i915) >= 3) |
694 | return i915_do_reset; |
695 | else |
696 | return NULL; |
697 | } |
698 | |
699 | static int __reset_guc(struct intel_gt *gt) |
700 | { |
701 | u32 guc_domain = |
702 | GRAPHICS_VER(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC; |
703 | |
704 | return gen6_hw_domain_reset(gt, hw_domain_mask: guc_domain); |
705 | } |
706 | |
707 | static bool needs_wa_14015076503(struct intel_gt *gt, intel_engine_mask_t engine_mask) |
708 | { |
709 | if (MEDIA_VER_FULL(gt->i915) != IP_VER(13, 0) || !HAS_ENGINE(gt, GSC0)) |
710 | return false; |
711 | |
712 | if (!__HAS_ENGINE(engine_mask, GSC0)) |
713 | return false; |
714 | |
715 | return intel_gsc_uc_fw_init_done(gsc: >->uc.gsc); |
716 | } |
717 | |
718 | static intel_engine_mask_t |
719 | wa_14015076503_start(struct intel_gt *gt, intel_engine_mask_t engine_mask, bool first) |
720 | { |
721 | if (!needs_wa_14015076503(gt, engine_mask)) |
722 | return engine_mask; |
723 | |
724 | /* |
725 | * wa_14015076503: if the GSC FW is loaded, we need to alert it that |
726 | * we're going to do a GSC engine reset and then wait for 200ms for the |
727 | * FW to get ready for it. However, if this is the first ALL_ENGINES |
728 | * reset attempt and the GSC is not busy, we can try to instead reset |
729 | * the GuC and all the other engines individually to avoid the 200ms |
730 | * wait. |
731 | * Skipping the GSC engine is safe because, differently from other |
732 | * engines, the GSCCS only role is to forward the commands to the GSC |
733 | * FW, so it doesn't have any HW outside of the CS itself and therefore |
734 | * it has no state that we don't explicitly re-init on resume or on |
735 | * context switch LRC or power context). The HW for the GSC uC is |
736 | * managed by the GSC FW so we don't need to care about that. |
737 | */ |
738 | if (engine_mask == ALL_ENGINES && first && intel_engine_is_idle(engine: gt->engine[GSC0])) { |
739 | __reset_guc(gt); |
740 | engine_mask = gt->info.engine_mask & ~BIT(GSC0); |
741 | } else { |
742 | intel_uncore_rmw(uncore: gt->uncore, |
743 | HECI_H_GS1(MTL_GSC_HECI2_BASE), |
744 | clear: 0, HECI_H_GS1_ER_PREP); |
745 | |
746 | /* make sure the reset bit is clear when writing the CSR reg */ |
747 | intel_uncore_rmw(uncore: gt->uncore, |
748 | HECI_H_CSR(MTL_GSC_HECI2_BASE), |
749 | HECI_H_CSR_RST, HECI_H_CSR_IG); |
750 | msleep(msecs: 200); |
751 | } |
752 | |
753 | return engine_mask; |
754 | } |
755 | |
756 | static void |
757 | wa_14015076503_end(struct intel_gt *gt, intel_engine_mask_t engine_mask) |
758 | { |
759 | if (!needs_wa_14015076503(gt, engine_mask)) |
760 | return; |
761 | |
762 | intel_uncore_rmw(uncore: gt->uncore, |
763 | HECI_H_GS1(MTL_GSC_HECI2_BASE), |
764 | HECI_H_GS1_ER_PREP, set: 0); |
765 | } |
766 | |
767 | int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask) |
768 | { |
769 | const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1; |
770 | reset_func reset; |
771 | int ret = -ETIMEDOUT; |
772 | int retry; |
773 | |
774 | reset = intel_get_gpu_reset(gt); |
775 | if (!reset) |
776 | return -ENODEV; |
777 | |
778 | /* |
779 | * If the power well sleeps during the reset, the reset |
780 | * request may be dropped and never completes (causing -EIO). |
781 | */ |
782 | intel_uncore_forcewake_get(uncore: gt->uncore, domains: FORCEWAKE_ALL); |
783 | for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) { |
784 | intel_engine_mask_t reset_mask; |
785 | |
786 | reset_mask = wa_14015076503_start(gt, engine_mask, first: !retry); |
787 | |
788 | GT_TRACE(gt, "engine_mask=%x\n" , reset_mask); |
789 | ret = reset(gt, reset_mask, retry); |
790 | |
791 | wa_14015076503_end(gt, engine_mask: reset_mask); |
792 | } |
793 | intel_uncore_forcewake_put(uncore: gt->uncore, domains: FORCEWAKE_ALL); |
794 | |
795 | return ret; |
796 | } |
797 | |
798 | bool intel_has_gpu_reset(const struct intel_gt *gt) |
799 | { |
800 | if (!gt->i915->params.reset) |
801 | return NULL; |
802 | |
803 | return intel_get_gpu_reset(gt); |
804 | } |
805 | |
806 | bool intel_has_reset_engine(const struct intel_gt *gt) |
807 | { |
808 | if (gt->i915->params.reset < 2) |
809 | return false; |
810 | |
811 | return INTEL_INFO(gt->i915)->has_reset_engine; |
812 | } |
813 | |
814 | int intel_reset_guc(struct intel_gt *gt) |
815 | { |
816 | int ret; |
817 | |
818 | GEM_BUG_ON(!HAS_GT_UC(gt->i915)); |
819 | |
820 | intel_uncore_forcewake_get(uncore: gt->uncore, domains: FORCEWAKE_ALL); |
821 | ret = __reset_guc(gt); |
822 | intel_uncore_forcewake_put(uncore: gt->uncore, domains: FORCEWAKE_ALL); |
823 | |
824 | return ret; |
825 | } |
826 | |
827 | /* |
828 | * Ensure irq handler finishes, and not run again. |
829 | * Also return the active request so that we only search for it once. |
830 | */ |
831 | static void reset_prepare_engine(struct intel_engine_cs *engine) |
832 | { |
833 | /* |
834 | * During the reset sequence, we must prevent the engine from |
835 | * entering RC6. As the context state is undefined until we restart |
836 | * the engine, if it does enter RC6 during the reset, the state |
837 | * written to the powercontext is undefined and so we may lose |
838 | * GPU state upon resume, i.e. fail to restart after a reset. |
839 | */ |
840 | intel_uncore_forcewake_get(uncore: engine->uncore, domains: FORCEWAKE_ALL); |
841 | if (engine->reset.prepare) |
842 | engine->reset.prepare(engine); |
843 | } |
844 | |
845 | static void revoke_mmaps(struct intel_gt *gt) |
846 | { |
847 | int i; |
848 | |
849 | for (i = 0; i < gt->ggtt->num_fences; i++) { |
850 | struct drm_vma_offset_node *node; |
851 | struct i915_vma *vma; |
852 | u64 vma_offset; |
853 | |
854 | vma = READ_ONCE(gt->ggtt->fence_regs[i].vma); |
855 | if (!vma) |
856 | continue; |
857 | |
858 | if (!i915_vma_has_userfault(vma)) |
859 | continue; |
860 | |
861 | GEM_BUG_ON(vma->fence != >->ggtt->fence_regs[i]); |
862 | |
863 | if (!vma->mmo) |
864 | continue; |
865 | |
866 | node = &vma->mmo->vma_node; |
867 | vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT; |
868 | |
869 | unmap_mapping_range(mapping: gt->i915->drm.anon_inode->i_mapping, |
870 | holebegin: drm_vma_node_offset_addr(node) + vma_offset, |
871 | holelen: vma->size, |
872 | even_cows: 1); |
873 | } |
874 | } |
875 | |
876 | static intel_engine_mask_t reset_prepare(struct intel_gt *gt) |
877 | { |
878 | struct intel_engine_cs *engine; |
879 | intel_engine_mask_t awake = 0; |
880 | enum intel_engine_id id; |
881 | |
882 | /* For GuC mode, ensure submission is disabled before stopping ring */ |
883 | intel_uc_reset_prepare(uc: >->uc); |
884 | |
885 | for_each_engine(engine, gt, id) { |
886 | if (intel_engine_pm_get_if_awake(engine)) |
887 | awake |= engine->mask; |
888 | reset_prepare_engine(engine); |
889 | } |
890 | |
891 | return awake; |
892 | } |
893 | |
894 | static void gt_revoke(struct intel_gt *gt) |
895 | { |
896 | revoke_mmaps(gt); |
897 | } |
898 | |
899 | static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) |
900 | { |
901 | struct intel_engine_cs *engine; |
902 | enum intel_engine_id id; |
903 | int err; |
904 | |
905 | /* |
906 | * Everything depends on having the GTT running, so we need to start |
907 | * there. |
908 | */ |
909 | err = i915_ggtt_enable_hw(i915: gt->i915); |
910 | if (err) |
911 | return err; |
912 | |
913 | local_bh_disable(); |
914 | for_each_engine(engine, gt, id) |
915 | __intel_engine_reset(engine, stalled: stalled_mask & engine->mask); |
916 | local_bh_enable(); |
917 | |
918 | intel_uc_reset(uc: >->uc, ALL_ENGINES); |
919 | |
920 | intel_ggtt_restore_fences(ggtt: gt->ggtt); |
921 | |
922 | return err; |
923 | } |
924 | |
925 | static void reset_finish_engine(struct intel_engine_cs *engine) |
926 | { |
927 | if (engine->reset.finish) |
928 | engine->reset.finish(engine); |
929 | intel_uncore_forcewake_put(uncore: engine->uncore, domains: FORCEWAKE_ALL); |
930 | |
931 | intel_engine_signal_breadcrumbs(engine); |
932 | } |
933 | |
934 | static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake) |
935 | { |
936 | struct intel_engine_cs *engine; |
937 | enum intel_engine_id id; |
938 | |
939 | for_each_engine(engine, gt, id) { |
940 | reset_finish_engine(engine); |
941 | if (awake & engine->mask) |
942 | intel_engine_pm_put(engine); |
943 | } |
944 | |
945 | intel_uc_reset_finish(uc: >->uc); |
946 | } |
947 | |
948 | static void nop_submit_request(struct i915_request *request) |
949 | { |
950 | RQ_TRACE(request, "-EIO\n" ); |
951 | |
952 | request = i915_request_mark_eio(rq: request); |
953 | if (request) { |
954 | i915_request_submit(request); |
955 | intel_engine_signal_breadcrumbs(engine: request->engine); |
956 | |
957 | i915_request_put(rq: request); |
958 | } |
959 | } |
960 | |
961 | static void __intel_gt_set_wedged(struct intel_gt *gt) |
962 | { |
963 | struct intel_engine_cs *engine; |
964 | intel_engine_mask_t awake; |
965 | enum intel_engine_id id; |
966 | |
967 | if (test_bit(I915_WEDGED, >->reset.flags)) |
968 | return; |
969 | |
970 | GT_TRACE(gt, "start\n" ); |
971 | |
972 | /* |
973 | * First, stop submission to hw, but do not yet complete requests by |
974 | * rolling the global seqno forward (since this would complete requests |
975 | * for which we haven't set the fence error to EIO yet). |
976 | */ |
977 | awake = reset_prepare(gt); |
978 | |
979 | /* Even if the GPU reset fails, it should still stop the engines */ |
980 | if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) |
981 | __intel_gt_reset(gt, ALL_ENGINES); |
982 | |
983 | for_each_engine(engine, gt, id) |
984 | engine->submit_request = nop_submit_request; |
985 | |
986 | /* |
987 | * Make sure no request can slip through without getting completed by |
988 | * either this call here to intel_engine_write_global_seqno, or the one |
989 | * in nop_submit_request. |
990 | */ |
991 | synchronize_rcu_expedited(); |
992 | set_bit(I915_WEDGED, addr: >->reset.flags); |
993 | |
994 | /* Mark all executing requests as skipped */ |
995 | local_bh_disable(); |
996 | for_each_engine(engine, gt, id) |
997 | if (engine->reset.cancel) |
998 | engine->reset.cancel(engine); |
999 | intel_uc_cancel_requests(uc: >->uc); |
1000 | local_bh_enable(); |
1001 | |
1002 | reset_finish(gt, awake); |
1003 | |
1004 | GT_TRACE(gt, "end\n" ); |
1005 | } |
1006 | |
1007 | void intel_gt_set_wedged(struct intel_gt *gt) |
1008 | { |
1009 | intel_wakeref_t wakeref; |
1010 | |
1011 | if (test_bit(I915_WEDGED, >->reset.flags)) |
1012 | return; |
1013 | |
1014 | wakeref = intel_runtime_pm_get(rpm: gt->uncore->rpm); |
1015 | mutex_lock(>->reset.mutex); |
1016 | |
1017 | if (GEM_SHOW_DEBUG()) { |
1018 | struct drm_printer p = drm_dbg_printer(drm: >->i915->drm, |
1019 | category: DRM_UT_DRIVER, prefix: __func__); |
1020 | struct intel_engine_cs *engine; |
1021 | enum intel_engine_id id; |
1022 | |
1023 | drm_printf(p: &p, f: "called from %pS\n" , (void *)_RET_IP_); |
1024 | for_each_engine(engine, gt, id) { |
1025 | if (intel_engine_is_idle(engine)) |
1026 | continue; |
1027 | |
1028 | intel_engine_dump(engine, m: &p, header: "%s\n" , engine->name); |
1029 | } |
1030 | } |
1031 | |
1032 | __intel_gt_set_wedged(gt); |
1033 | |
1034 | mutex_unlock(lock: >->reset.mutex); |
1035 | intel_runtime_pm_put(rpm: gt->uncore->rpm, wref: wakeref); |
1036 | } |
1037 | |
1038 | static bool __intel_gt_unset_wedged(struct intel_gt *gt) |
1039 | { |
1040 | struct intel_gt_timelines *timelines = >->timelines; |
1041 | struct intel_timeline *tl; |
1042 | bool ok; |
1043 | |
1044 | if (!test_bit(I915_WEDGED, >->reset.flags)) |
1045 | return true; |
1046 | |
1047 | /* Never fully initialised, recovery impossible */ |
1048 | if (intel_gt_has_unrecoverable_error(gt)) |
1049 | return false; |
1050 | |
1051 | GT_TRACE(gt, "start\n" ); |
1052 | |
1053 | /* |
1054 | * Before unwedging, make sure that all pending operations |
1055 | * are flushed and errored out - we may have requests waiting upon |
1056 | * third party fences. We marked all inflight requests as EIO, and |
1057 | * every execbuf since returned EIO, for consistency we want all |
1058 | * the currently pending requests to also be marked as EIO, which |
1059 | * is done inside our nop_submit_request - and so we must wait. |
1060 | * |
1061 | * No more can be submitted until we reset the wedged bit. |
1062 | */ |
1063 | spin_lock(lock: &timelines->lock); |
1064 | list_for_each_entry(tl, &timelines->active_list, link) { |
1065 | struct dma_fence *fence; |
1066 | |
1067 | fence = i915_active_fence_get(active: &tl->last_request); |
1068 | if (!fence) |
1069 | continue; |
1070 | |
1071 | spin_unlock(lock: &timelines->lock); |
1072 | |
1073 | /* |
1074 | * All internal dependencies (i915_requests) will have |
1075 | * been flushed by the set-wedge, but we may be stuck waiting |
1076 | * for external fences. These should all be capped to 10s |
1077 | * (I915_FENCE_TIMEOUT) so this wait should not be unbounded |
1078 | * in the worst case. |
1079 | */ |
1080 | dma_fence_default_wait(fence, intr: false, MAX_SCHEDULE_TIMEOUT); |
1081 | dma_fence_put(fence); |
1082 | |
1083 | /* Restart iteration after droping lock */ |
1084 | spin_lock(lock: &timelines->lock); |
1085 | tl = list_entry(&timelines->active_list, typeof(*tl), link); |
1086 | } |
1087 | spin_unlock(lock: &timelines->lock); |
1088 | |
1089 | /* We must reset pending GPU events before restoring our submission */ |
1090 | ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */ |
1091 | if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) |
1092 | ok = __intel_gt_reset(gt, ALL_ENGINES) == 0; |
1093 | if (!ok) { |
1094 | /* |
1095 | * Warn CI about the unrecoverable wedged condition. |
1096 | * Time for a reboot. |
1097 | */ |
1098 | add_taint_for_CI(i915: gt->i915, TAINT_WARN); |
1099 | return false; |
1100 | } |
1101 | |
1102 | /* |
1103 | * Undo nop_submit_request. We prevent all new i915 requests from |
1104 | * being queued (by disallowing execbuf whilst wedged) so having |
1105 | * waited for all active requests above, we know the system is idle |
1106 | * and do not have to worry about a thread being inside |
1107 | * engine->submit_request() as we swap over. So unlike installing |
1108 | * the nop_submit_request on reset, we can do this from normal |
1109 | * context and do not require stop_machine(). |
1110 | */ |
1111 | intel_engines_reset_default_submission(gt); |
1112 | |
1113 | GT_TRACE(gt, "end\n" ); |
1114 | |
1115 | smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ |
1116 | clear_bit(I915_WEDGED, addr: >->reset.flags); |
1117 | |
1118 | return true; |
1119 | } |
1120 | |
1121 | bool intel_gt_unset_wedged(struct intel_gt *gt) |
1122 | { |
1123 | bool result; |
1124 | |
1125 | mutex_lock(>->reset.mutex); |
1126 | result = __intel_gt_unset_wedged(gt); |
1127 | mutex_unlock(lock: >->reset.mutex); |
1128 | |
1129 | return result; |
1130 | } |
1131 | |
1132 | static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) |
1133 | { |
1134 | int err, i; |
1135 | |
1136 | err = __intel_gt_reset(gt, ALL_ENGINES); |
1137 | for (i = 0; err && i < RESET_MAX_RETRIES; i++) { |
1138 | msleep(msecs: 10 * (i + 1)); |
1139 | err = __intel_gt_reset(gt, ALL_ENGINES); |
1140 | } |
1141 | if (err) |
1142 | return err; |
1143 | |
1144 | return gt_reset(gt, stalled_mask); |
1145 | } |
1146 | |
1147 | static int resume(struct intel_gt *gt) |
1148 | { |
1149 | struct intel_engine_cs *engine; |
1150 | enum intel_engine_id id; |
1151 | int ret; |
1152 | |
1153 | for_each_engine(engine, gt, id) { |
1154 | ret = intel_engine_resume(engine); |
1155 | if (ret) |
1156 | return ret; |
1157 | } |
1158 | |
1159 | return 0; |
1160 | } |
1161 | |
1162 | /** |
1163 | * intel_gt_reset - reset chip after a hang |
1164 | * @gt: #intel_gt to reset |
1165 | * @stalled_mask: mask of the stalled engines with the guilty requests |
1166 | * @reason: user error message for why we are resetting |
1167 | * |
1168 | * Reset the chip. Useful if a hang is detected. Marks the device as wedged |
1169 | * on failure. |
1170 | * |
1171 | * Procedure is fairly simple: |
1172 | * - reset the chip using the reset reg |
1173 | * - re-init context state |
1174 | * - re-init hardware status page |
1175 | * - re-init ring buffer |
1176 | * - re-init interrupt state |
1177 | * - re-init display |
1178 | */ |
1179 | void intel_gt_reset(struct intel_gt *gt, |
1180 | intel_engine_mask_t stalled_mask, |
1181 | const char *reason) |
1182 | { |
1183 | intel_engine_mask_t awake; |
1184 | int ret; |
1185 | |
1186 | GT_TRACE(gt, "flags=%lx\n" , gt->reset.flags); |
1187 | |
1188 | might_sleep(); |
1189 | GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, >->reset.flags)); |
1190 | |
1191 | /* |
1192 | * FIXME: Revoking cpu mmap ptes cannot be done from a dma_fence |
1193 | * critical section like gpu reset. |
1194 | */ |
1195 | gt_revoke(gt); |
1196 | |
1197 | mutex_lock(>->reset.mutex); |
1198 | |
1199 | /* Clear any previous failed attempts at recovery. Time to try again. */ |
1200 | if (!__intel_gt_unset_wedged(gt)) |
1201 | goto unlock; |
1202 | |
1203 | if (reason) |
1204 | gt_notice(gt, "Resetting chip for %s\n" , reason); |
1205 | atomic_inc(v: >->i915->gpu_error.reset_count); |
1206 | |
1207 | awake = reset_prepare(gt); |
1208 | |
1209 | if (!intel_has_gpu_reset(gt)) { |
1210 | if (gt->i915->params.reset) |
1211 | gt_err(gt, "GPU reset not supported\n" ); |
1212 | else |
1213 | gt_dbg(gt, "GPU reset disabled\n" ); |
1214 | goto error; |
1215 | } |
1216 | |
1217 | if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) |
1218 | intel_runtime_pm_disable_interrupts(dev_priv: gt->i915); |
1219 | |
1220 | if (do_reset(gt, stalled_mask)) { |
1221 | gt_err(gt, "Failed to reset chip\n" ); |
1222 | goto taint; |
1223 | } |
1224 | |
1225 | if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) |
1226 | intel_runtime_pm_enable_interrupts(dev_priv: gt->i915); |
1227 | |
1228 | intel_overlay_reset(dev_priv: gt->i915); |
1229 | |
1230 | /* |
1231 | * Next we need to restore the context, but we don't use those |
1232 | * yet either... |
1233 | * |
1234 | * Ring buffer needs to be re-initialized in the KMS case, or if X |
1235 | * was running at the time of the reset (i.e. we weren't VT |
1236 | * switched away). |
1237 | */ |
1238 | ret = intel_gt_init_hw(gt); |
1239 | if (ret) { |
1240 | gt_err(gt, "Failed to initialise HW following reset (%d)\n" , ret); |
1241 | goto taint; |
1242 | } |
1243 | |
1244 | ret = resume(gt); |
1245 | if (ret) |
1246 | goto taint; |
1247 | |
1248 | finish: |
1249 | reset_finish(gt, awake); |
1250 | unlock: |
1251 | mutex_unlock(lock: >->reset.mutex); |
1252 | return; |
1253 | |
1254 | taint: |
1255 | /* |
1256 | * History tells us that if we cannot reset the GPU now, we |
1257 | * never will. This then impacts everything that is run |
1258 | * subsequently. On failing the reset, we mark the driver |
1259 | * as wedged, preventing further execution on the GPU. |
1260 | * We also want to go one step further and add a taint to the |
1261 | * kernel so that any subsequent faults can be traced back to |
1262 | * this failure. This is important for CI, where if the |
1263 | * GPU/driver fails we would like to reboot and restart testing |
1264 | * rather than continue on into oblivion. For everyone else, |
1265 | * the system should still plod along, but they have been warned! |
1266 | */ |
1267 | add_taint_for_CI(i915: gt->i915, TAINT_WARN); |
1268 | error: |
1269 | __intel_gt_set_wedged(gt); |
1270 | goto finish; |
1271 | } |
1272 | |
1273 | static int intel_gt_reset_engine(struct intel_engine_cs *engine) |
1274 | { |
1275 | return __intel_gt_reset(gt: engine->gt, engine_mask: engine->mask); |
1276 | } |
1277 | |
1278 | int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg) |
1279 | { |
1280 | struct intel_gt *gt = engine->gt; |
1281 | int ret; |
1282 | |
1283 | ENGINE_TRACE(engine, "flags=%lx\n" , gt->reset.flags); |
1284 | GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, >->reset.flags)); |
1285 | |
1286 | if (intel_engine_uses_guc(engine)) |
1287 | return -ENODEV; |
1288 | |
1289 | if (!intel_engine_pm_get_if_awake(engine)) |
1290 | return 0; |
1291 | |
1292 | reset_prepare_engine(engine); |
1293 | |
1294 | if (msg) |
1295 | drm_notice(&engine->i915->drm, |
1296 | "Resetting %s for %s\n" , engine->name, msg); |
1297 | i915_increase_reset_engine_count(error: &engine->i915->gpu_error, engine); |
1298 | |
1299 | ret = intel_gt_reset_engine(engine); |
1300 | if (ret) { |
1301 | /* If we fail here, we expect to fallback to a global reset */ |
1302 | ENGINE_TRACE(engine, "Failed to reset %s, err: %d\n" , engine->name, ret); |
1303 | goto out; |
1304 | } |
1305 | |
1306 | /* |
1307 | * The request that caused the hang is stuck on elsp, we know the |
1308 | * active request and can drop it, adjust head to skip the offending |
1309 | * request to resume executing remaining requests in the queue. |
1310 | */ |
1311 | __intel_engine_reset(engine, stalled: true); |
1312 | |
1313 | /* |
1314 | * The engine and its registers (and workarounds in case of render) |
1315 | * have been reset to their default values. Follow the init_ring |
1316 | * process to program RING_MODE, HWSP and re-enable submission. |
1317 | */ |
1318 | ret = intel_engine_resume(engine); |
1319 | |
1320 | out: |
1321 | intel_engine_cancel_stop_cs(engine); |
1322 | reset_finish_engine(engine); |
1323 | intel_engine_pm_put_async(engine); |
1324 | return ret; |
1325 | } |
1326 | |
1327 | /** |
1328 | * intel_engine_reset - reset GPU engine to recover from a hang |
1329 | * @engine: engine to reset |
1330 | * @msg: reason for GPU reset; or NULL for no drm_notice() |
1331 | * |
1332 | * Reset a specific GPU engine. Useful if a hang is detected. |
1333 | * Returns zero on successful reset or otherwise an error code. |
1334 | * |
1335 | * Procedure is: |
1336 | * - identifies the request that caused the hang and it is dropped |
1337 | * - reset engine (which will force the engine to idle) |
1338 | * - re-init/configure engine |
1339 | */ |
1340 | int intel_engine_reset(struct intel_engine_cs *engine, const char *msg) |
1341 | { |
1342 | int err; |
1343 | |
1344 | local_bh_disable(); |
1345 | err = __intel_engine_reset_bh(engine, msg); |
1346 | local_bh_enable(); |
1347 | |
1348 | return err; |
1349 | } |
1350 | |
1351 | static void intel_gt_reset_global(struct intel_gt *gt, |
1352 | u32 engine_mask, |
1353 | const char *reason) |
1354 | { |
1355 | struct kobject *kobj = >->i915->drm.primary->kdev->kobj; |
1356 | char *error_event[] = { I915_ERROR_UEVENT "=1" , NULL }; |
1357 | char *reset_event[] = { I915_RESET_UEVENT "=1" , NULL }; |
1358 | char *reset_done_event[] = { I915_ERROR_UEVENT "=0" , NULL }; |
1359 | struct intel_wedge_me w; |
1360 | |
1361 | kobject_uevent_env(kobj, action: KOBJ_CHANGE, envp: error_event); |
1362 | |
1363 | GT_TRACE(gt, "resetting chip, engines=%x\n" , engine_mask); |
1364 | kobject_uevent_env(kobj, action: KOBJ_CHANGE, envp: reset_event); |
1365 | |
1366 | /* Use a watchdog to ensure that our reset completes */ |
1367 | intel_wedge_on_timeout(&w, gt, 60 * HZ) { |
1368 | intel_display_reset_prepare(i915: gt->i915); |
1369 | |
1370 | intel_gt_reset(gt, stalled_mask: engine_mask, reason); |
1371 | |
1372 | intel_display_reset_finish(i915: gt->i915); |
1373 | } |
1374 | |
1375 | if (!test_bit(I915_WEDGED, >->reset.flags)) |
1376 | kobject_uevent_env(kobj, action: KOBJ_CHANGE, envp: reset_done_event); |
1377 | } |
1378 | |
1379 | /** |
1380 | * intel_gt_handle_error - handle a gpu error |
1381 | * @gt: the intel_gt |
1382 | * @engine_mask: mask representing engines that are hung |
1383 | * @flags: control flags |
1384 | * @fmt: Error message format string |
1385 | * |
1386 | * Do some basic checking of register state at error time and |
1387 | * dump it to the syslog. Also call i915_capture_error_state() to make |
1388 | * sure we get a record and make it available in debugfs. Fire a uevent |
1389 | * so userspace knows something bad happened (should trigger collection |
1390 | * of a ring dump etc.). |
1391 | */ |
1392 | void intel_gt_handle_error(struct intel_gt *gt, |
1393 | intel_engine_mask_t engine_mask, |
1394 | unsigned long flags, |
1395 | const char *fmt, ...) |
1396 | { |
1397 | struct intel_engine_cs *engine; |
1398 | intel_wakeref_t wakeref; |
1399 | intel_engine_mask_t tmp; |
1400 | char error_msg[80]; |
1401 | char *msg = NULL; |
1402 | |
1403 | if (fmt) { |
1404 | va_list args; |
1405 | |
1406 | va_start(args, fmt); |
1407 | vscnprintf(buf: error_msg, size: sizeof(error_msg), fmt, args); |
1408 | va_end(args); |
1409 | |
1410 | msg = error_msg; |
1411 | } |
1412 | |
1413 | /* |
1414 | * In most cases it's guaranteed that we get here with an RPM |
1415 | * reference held, for example because there is a pending GPU |
1416 | * request that won't finish until the reset is done. This |
1417 | * isn't the case at least when we get here by doing a |
1418 | * simulated reset via debugfs, so get an RPM reference. |
1419 | */ |
1420 | wakeref = intel_runtime_pm_get(rpm: gt->uncore->rpm); |
1421 | |
1422 | engine_mask &= gt->info.engine_mask; |
1423 | |
1424 | if (flags & I915_ERROR_CAPTURE) { |
1425 | i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_NONE); |
1426 | intel_gt_clear_error_registers(gt, engine_mask); |
1427 | } |
1428 | |
1429 | /* |
1430 | * Try engine reset when available. We fall back to full reset if |
1431 | * single reset fails. |
1432 | */ |
1433 | if (!intel_uc_uses_guc_submission(uc: >->uc) && |
1434 | intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) { |
1435 | local_bh_disable(); |
1436 | for_each_engine_masked(engine, gt, engine_mask, tmp) { |
1437 | BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); |
1438 | if (test_and_set_bit(I915_RESET_ENGINE + engine->id, |
1439 | addr: >->reset.flags)) |
1440 | continue; |
1441 | |
1442 | if (__intel_engine_reset_bh(engine, msg) == 0) |
1443 | engine_mask &= ~engine->mask; |
1444 | |
1445 | clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, |
1446 | word: >->reset.flags); |
1447 | } |
1448 | local_bh_enable(); |
1449 | } |
1450 | |
1451 | if (!engine_mask) |
1452 | goto out; |
1453 | |
1454 | /* Full reset needs the mutex, stop any other user trying to do so. */ |
1455 | if (test_and_set_bit(I915_RESET_BACKOFF, addr: >->reset.flags)) { |
1456 | wait_event(gt->reset.queue, |
1457 | !test_bit(I915_RESET_BACKOFF, >->reset.flags)); |
1458 | goto out; /* piggy-back on the other reset */ |
1459 | } |
1460 | |
1461 | /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */ |
1462 | synchronize_rcu_expedited(); |
1463 | |
1464 | /* |
1465 | * Prevent any other reset-engine attempt. We don't do this for GuC |
1466 | * submission the GuC owns the per-engine reset, not the i915. |
1467 | */ |
1468 | if (!intel_uc_uses_guc_submission(uc: >->uc)) { |
1469 | for_each_engine(engine, gt, tmp) { |
1470 | while (test_and_set_bit(I915_RESET_ENGINE + engine->id, |
1471 | addr: >->reset.flags)) |
1472 | wait_on_bit(word: >->reset.flags, |
1473 | I915_RESET_ENGINE + engine->id, |
1474 | TASK_UNINTERRUPTIBLE); |
1475 | } |
1476 | } |
1477 | |
1478 | /* Flush everyone using a resource about to be clobbered */ |
1479 | synchronize_srcu_expedited(ssp: >->reset.backoff_srcu); |
1480 | |
1481 | intel_gt_reset_global(gt, engine_mask, reason: msg); |
1482 | |
1483 | if (!intel_uc_uses_guc_submission(uc: >->uc)) { |
1484 | for_each_engine(engine, gt, tmp) |
1485 | clear_bit_unlock(I915_RESET_ENGINE + engine->id, |
1486 | addr: >->reset.flags); |
1487 | } |
1488 | clear_bit_unlock(I915_RESET_BACKOFF, addr: >->reset.flags); |
1489 | smp_mb__after_atomic(); |
1490 | wake_up_all(>->reset.queue); |
1491 | |
1492 | out: |
1493 | intel_runtime_pm_put(rpm: gt->uncore->rpm, wref: wakeref); |
1494 | } |
1495 | |
1496 | static int _intel_gt_reset_lock(struct intel_gt *gt, int *srcu, bool retry) |
1497 | { |
1498 | might_lock(>->reset.backoff_srcu); |
1499 | if (retry) |
1500 | might_sleep(); |
1501 | |
1502 | rcu_read_lock(); |
1503 | while (test_bit(I915_RESET_BACKOFF, >->reset.flags)) { |
1504 | rcu_read_unlock(); |
1505 | |
1506 | if (!retry) |
1507 | return -EBUSY; |
1508 | |
1509 | if (wait_event_interruptible(gt->reset.queue, |
1510 | !test_bit(I915_RESET_BACKOFF, |
1511 | >->reset.flags))) |
1512 | return -EINTR; |
1513 | |
1514 | rcu_read_lock(); |
1515 | } |
1516 | *srcu = srcu_read_lock(ssp: >->reset.backoff_srcu); |
1517 | rcu_read_unlock(); |
1518 | |
1519 | return 0; |
1520 | } |
1521 | |
1522 | int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu) |
1523 | { |
1524 | return _intel_gt_reset_lock(gt, srcu, retry: false); |
1525 | } |
1526 | |
1527 | int intel_gt_reset_lock_interruptible(struct intel_gt *gt, int *srcu) |
1528 | { |
1529 | return _intel_gt_reset_lock(gt, srcu, retry: true); |
1530 | } |
1531 | |
1532 | void intel_gt_reset_unlock(struct intel_gt *gt, int tag) |
1533 | __releases(>->reset.backoff_srcu) |
1534 | { |
1535 | srcu_read_unlock(ssp: >->reset.backoff_srcu, idx: tag); |
1536 | } |
1537 | |
1538 | int intel_gt_terminally_wedged(struct intel_gt *gt) |
1539 | { |
1540 | might_sleep(); |
1541 | |
1542 | if (!intel_gt_is_wedged(gt)) |
1543 | return 0; |
1544 | |
1545 | if (intel_gt_has_unrecoverable_error(gt)) |
1546 | return -EIO; |
1547 | |
1548 | /* Reset still in progress? Maybe we will recover? */ |
1549 | if (wait_event_interruptible(gt->reset.queue, |
1550 | !test_bit(I915_RESET_BACKOFF, |
1551 | >->reset.flags))) |
1552 | return -EINTR; |
1553 | |
1554 | return intel_gt_is_wedged(gt) ? -EIO : 0; |
1555 | } |
1556 | |
1557 | void intel_gt_set_wedged_on_init(struct intel_gt *gt) |
1558 | { |
1559 | BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES > |
1560 | I915_WEDGED_ON_INIT); |
1561 | intel_gt_set_wedged(gt); |
1562 | i915_disable_error_state(i915: gt->i915, err: -ENODEV); |
1563 | set_bit(I915_WEDGED_ON_INIT, addr: >->reset.flags); |
1564 | |
1565 | /* Wedged on init is non-recoverable */ |
1566 | add_taint_for_CI(i915: gt->i915, TAINT_WARN); |
1567 | } |
1568 | |
1569 | void intel_gt_set_wedged_on_fini(struct intel_gt *gt) |
1570 | { |
1571 | intel_gt_set_wedged(gt); |
1572 | i915_disable_error_state(i915: gt->i915, err: -ENODEV); |
1573 | set_bit(I915_WEDGED_ON_FINI, addr: >->reset.flags); |
1574 | intel_gt_retire_requests(gt); /* cleanup any wedged requests */ |
1575 | } |
1576 | |
1577 | void intel_gt_init_reset(struct intel_gt *gt) |
1578 | { |
1579 | init_waitqueue_head(>->reset.queue); |
1580 | mutex_init(>->reset.mutex); |
1581 | init_srcu_struct(>->reset.backoff_srcu); |
1582 | |
1583 | /* |
1584 | * While undesirable to wait inside the shrinker, complain anyway. |
1585 | * |
1586 | * If we have to wait during shrinking, we guarantee forward progress |
1587 | * by forcing the reset. Therefore during the reset we must not |
1588 | * re-enter the shrinker. By declaring that we take the reset mutex |
1589 | * within the shrinker, we forbid ourselves from performing any |
1590 | * fs-reclaim or taking related locks during reset. |
1591 | */ |
1592 | i915_gem_shrinker_taints_mutex(i915: gt->i915, mutex: >->reset.mutex); |
1593 | |
1594 | /* no GPU until we are ready! */ |
1595 | __set_bit(I915_WEDGED, >->reset.flags); |
1596 | } |
1597 | |
1598 | void intel_gt_fini_reset(struct intel_gt *gt) |
1599 | { |
1600 | cleanup_srcu_struct(ssp: >->reset.backoff_srcu); |
1601 | } |
1602 | |
1603 | static void intel_wedge_me(struct work_struct *work) |
1604 | { |
1605 | struct intel_wedge_me *w = container_of(work, typeof(*w), work.work); |
1606 | |
1607 | gt_err(w->gt, "%s timed out, cancelling all in-flight rendering.\n" , w->name); |
1608 | intel_gt_set_wedged(gt: w->gt); |
1609 | } |
1610 | |
1611 | void __intel_init_wedge(struct intel_wedge_me *w, |
1612 | struct intel_gt *gt, |
1613 | long timeout, |
1614 | const char *name) |
1615 | { |
1616 | w->gt = gt; |
1617 | w->name = name; |
1618 | |
1619 | INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me); |
1620 | queue_delayed_work(wq: gt->i915->unordered_wq, dwork: &w->work, delay: timeout); |
1621 | } |
1622 | |
1623 | void __intel_fini_wedge(struct intel_wedge_me *w) |
1624 | { |
1625 | cancel_delayed_work_sync(dwork: &w->work); |
1626 | destroy_delayed_work_on_stack(work: &w->work); |
1627 | w->gt = NULL; |
1628 | } |
1629 | |
1630 | /* |
1631 | * Wa_22011802037 requires that we (or the GuC) ensure that no command |
1632 | * streamers are executing MI_FORCE_WAKE while an engine reset is initiated. |
1633 | */ |
1634 | bool intel_engine_reset_needs_wa_22011802037(struct intel_gt *gt) |
1635 | { |
1636 | if (GRAPHICS_VER(gt->i915) < 11) |
1637 | return false; |
1638 | |
1639 | if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0)) |
1640 | return true; |
1641 | |
1642 | if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) |
1643 | return false; |
1644 | |
1645 | return true; |
1646 | } |
1647 | |
1648 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
1649 | #include "selftest_reset.c" |
1650 | #include "selftest_hangcheck.c" |
1651 | #endif |
1652 | |