1 | // SPDX-License-Identifier: MIT |
2 | /* |
3 | * Copyright © 2018 Intel Corporation |
4 | */ |
5 | |
6 | #include "gem/i915_gem_internal.h" |
7 | #include "gem/i915_gem_pm.h" |
8 | #include "gt/intel_engine_user.h" |
9 | #include "gt/intel_gt.h" |
10 | #include "i915_selftest.h" |
11 | #include "intel_reset.h" |
12 | |
13 | #include "selftests/igt_flush_test.h" |
14 | #include "selftests/igt_reset.h" |
15 | #include "selftests/igt_spinner.h" |
16 | #include "selftests/intel_scheduler_helpers.h" |
17 | #include "selftests/mock_drm.h" |
18 | |
19 | #include "gem/selftests/igt_gem_utils.h" |
20 | #include "gem/selftests/mock_context.h" |
21 | |
22 | static const struct wo_register { |
23 | enum intel_platform platform; |
24 | u32 reg; |
25 | } wo_registers[] = { |
26 | { INTEL_GEMINILAKE, 0x731c } |
27 | }; |
28 | |
29 | struct wa_lists { |
30 | struct i915_wa_list gt_wa_list; |
31 | struct { |
32 | struct i915_wa_list wa_list; |
33 | struct i915_wa_list ctx_wa_list; |
34 | } engine[I915_NUM_ENGINES]; |
35 | }; |
36 | |
37 | static int request_add_sync(struct i915_request *rq, int err) |
38 | { |
39 | i915_request_get(rq); |
40 | i915_request_add(rq); |
41 | if (i915_request_wait(rq, flags: 0, HZ / 5) < 0) |
42 | err = -EIO; |
43 | i915_request_put(rq); |
44 | |
45 | return err; |
46 | } |
47 | |
48 | static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin) |
49 | { |
50 | int err = 0; |
51 | |
52 | i915_request_get(rq); |
53 | i915_request_add(rq); |
54 | if (spin && !igt_wait_for_spinner(spin, rq)) |
55 | err = -ETIMEDOUT; |
56 | i915_request_put(rq); |
57 | |
58 | return err; |
59 | } |
60 | |
61 | static void |
62 | reference_lists_init(struct intel_gt *gt, struct wa_lists *lists) |
63 | { |
64 | struct intel_engine_cs *engine; |
65 | enum intel_engine_id id; |
66 | |
67 | memset(lists, 0, sizeof(*lists)); |
68 | |
69 | wa_init_start(wal: &lists->gt_wa_list, gt, name: "GT_REF" , engine_name: "global" ); |
70 | gt_init_workarounds(gt, wal: &lists->gt_wa_list); |
71 | wa_init_finish(wal: &lists->gt_wa_list); |
72 | |
73 | for_each_engine(engine, gt, id) { |
74 | struct i915_wa_list *wal = &lists->engine[id].wa_list; |
75 | |
76 | wa_init_start(wal, gt, name: "REF" , engine_name: engine->name); |
77 | engine_init_workarounds(engine, wal); |
78 | wa_init_finish(wal); |
79 | |
80 | __intel_engine_init_ctx_wa(engine, |
81 | wal: &lists->engine[id].ctx_wa_list, |
82 | name: "CTX_REF" ); |
83 | } |
84 | } |
85 | |
86 | static void |
87 | reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists) |
88 | { |
89 | struct intel_engine_cs *engine; |
90 | enum intel_engine_id id; |
91 | |
92 | for_each_engine(engine, gt, id) |
93 | intel_wa_list_free(wal: &lists->engine[id].wa_list); |
94 | |
95 | intel_wa_list_free(wal: &lists->gt_wa_list); |
96 | } |
97 | |
98 | static struct drm_i915_gem_object * |
99 | read_nonprivs(struct intel_context *ce) |
100 | { |
101 | struct intel_engine_cs *engine = ce->engine; |
102 | const u32 base = engine->mmio_base; |
103 | struct drm_i915_gem_object *result; |
104 | struct i915_request *rq; |
105 | struct i915_vma *vma; |
106 | u32 srm, *cs; |
107 | int err; |
108 | int i; |
109 | |
110 | result = i915_gem_object_create_internal(i915: engine->i915, PAGE_SIZE); |
111 | if (IS_ERR(ptr: result)) |
112 | return result; |
113 | |
114 | i915_gem_object_set_cache_coherency(obj: result, cache_level: I915_CACHE_LLC); |
115 | |
116 | cs = i915_gem_object_pin_map_unlocked(obj: result, type: I915_MAP_WB); |
117 | if (IS_ERR(ptr: cs)) { |
118 | err = PTR_ERR(ptr: cs); |
119 | goto err_obj; |
120 | } |
121 | memset(cs, 0xc5, PAGE_SIZE); |
122 | i915_gem_object_flush_map(obj: result); |
123 | i915_gem_object_unpin_map(obj: result); |
124 | |
125 | vma = i915_vma_instance(obj: result, vm: &engine->gt->ggtt->vm, NULL); |
126 | if (IS_ERR(ptr: vma)) { |
127 | err = PTR_ERR(ptr: vma); |
128 | goto err_obj; |
129 | } |
130 | |
131 | err = i915_vma_pin(vma, size: 0, alignment: 0, PIN_GLOBAL); |
132 | if (err) |
133 | goto err_obj; |
134 | |
135 | rq = intel_context_create_request(ce); |
136 | if (IS_ERR(ptr: rq)) { |
137 | err = PTR_ERR(ptr: rq); |
138 | goto err_pin; |
139 | } |
140 | |
141 | err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE); |
142 | if (err) |
143 | goto err_req; |
144 | |
145 | srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; |
146 | if (GRAPHICS_VER(engine->i915) >= 8) |
147 | srm++; |
148 | |
149 | cs = intel_ring_begin(rq, num_dwords: 4 * RING_MAX_NONPRIV_SLOTS); |
150 | if (IS_ERR(ptr: cs)) { |
151 | err = PTR_ERR(ptr: cs); |
152 | goto err_req; |
153 | } |
154 | |
155 | for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { |
156 | *cs++ = srm; |
157 | *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i)); |
158 | *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i; |
159 | *cs++ = 0; |
160 | } |
161 | intel_ring_advance(rq, cs); |
162 | |
163 | i915_request_add(rq); |
164 | i915_vma_unpin(vma); |
165 | |
166 | return result; |
167 | |
168 | err_req: |
169 | i915_request_add(rq); |
170 | err_pin: |
171 | i915_vma_unpin(vma); |
172 | err_obj: |
173 | i915_gem_object_put(obj: result); |
174 | return ERR_PTR(error: err); |
175 | } |
176 | |
177 | static u32 |
178 | get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i) |
179 | { |
180 | i915_reg_t reg = i < engine->whitelist.count ? |
181 | engine->whitelist.list[i].reg : |
182 | RING_NOPID(engine->mmio_base); |
183 | |
184 | return i915_mmio_reg_offset(reg); |
185 | } |
186 | |
187 | static void |
188 | print_results(const struct intel_engine_cs *engine, const u32 *results) |
189 | { |
190 | unsigned int i; |
191 | |
192 | for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { |
193 | u32 expected = get_whitelist_reg(engine, i); |
194 | u32 actual = results[i]; |
195 | |
196 | pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n" , |
197 | i, expected, actual); |
198 | } |
199 | } |
200 | |
201 | static int check_whitelist(struct intel_context *ce) |
202 | { |
203 | struct intel_engine_cs *engine = ce->engine; |
204 | struct drm_i915_gem_object *results; |
205 | struct intel_wedge_me wedge; |
206 | u32 *vaddr; |
207 | int err; |
208 | int i; |
209 | |
210 | results = read_nonprivs(ce); |
211 | if (IS_ERR(ptr: results)) |
212 | return PTR_ERR(ptr: results); |
213 | |
214 | err = 0; |
215 | i915_gem_object_lock(obj: results, NULL); |
216 | intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */ |
217 | err = i915_gem_object_set_to_cpu_domain(obj: results, write: false); |
218 | |
219 | if (intel_gt_is_wedged(gt: engine->gt)) |
220 | err = -EIO; |
221 | if (err) |
222 | goto out_put; |
223 | |
224 | vaddr = i915_gem_object_pin_map(obj: results, type: I915_MAP_WB); |
225 | if (IS_ERR(ptr: vaddr)) { |
226 | err = PTR_ERR(ptr: vaddr); |
227 | goto out_put; |
228 | } |
229 | |
230 | for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { |
231 | u32 expected = get_whitelist_reg(engine, i); |
232 | u32 actual = vaddr[i]; |
233 | |
234 | if (expected != actual) { |
235 | print_results(engine, results: vaddr); |
236 | pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n" , |
237 | i, expected, actual); |
238 | |
239 | err = -EINVAL; |
240 | break; |
241 | } |
242 | } |
243 | |
244 | i915_gem_object_unpin_map(obj: results); |
245 | out_put: |
246 | i915_gem_object_unlock(obj: results); |
247 | i915_gem_object_put(obj: results); |
248 | return err; |
249 | } |
250 | |
251 | static int do_device_reset(struct intel_engine_cs *engine) |
252 | { |
253 | intel_gt_reset(gt: engine->gt, stalled_mask: engine->mask, reason: "live_workarounds" ); |
254 | return 0; |
255 | } |
256 | |
257 | static int do_engine_reset(struct intel_engine_cs *engine) |
258 | { |
259 | return intel_engine_reset(engine, reason: "live_workarounds" ); |
260 | } |
261 | |
262 | static int do_guc_reset(struct intel_engine_cs *engine) |
263 | { |
264 | /* Currently a no-op as the reset is handled by GuC */ |
265 | return 0; |
266 | } |
267 | |
268 | static int |
269 | switch_to_scratch_context(struct intel_engine_cs *engine, |
270 | struct igt_spinner *spin, |
271 | struct i915_request **rq) |
272 | { |
273 | struct intel_context *ce; |
274 | int err = 0; |
275 | |
276 | ce = intel_context_create(engine); |
277 | if (IS_ERR(ptr: ce)) |
278 | return PTR_ERR(ptr: ce); |
279 | |
280 | *rq = igt_spinner_create_request(spin, ce, MI_NOOP); |
281 | intel_context_put(ce); |
282 | |
283 | if (IS_ERR(ptr: *rq)) { |
284 | spin = NULL; |
285 | err = PTR_ERR(ptr: *rq); |
286 | goto err; |
287 | } |
288 | |
289 | err = request_add_spin(rq: *rq, spin); |
290 | err: |
291 | if (err && spin) |
292 | igt_spinner_end(spin); |
293 | |
294 | return err; |
295 | } |
296 | |
297 | static int check_whitelist_across_reset(struct intel_engine_cs *engine, |
298 | int (*reset)(struct intel_engine_cs *), |
299 | const char *name) |
300 | { |
301 | struct intel_context *ce, *tmp; |
302 | struct igt_spinner spin; |
303 | struct i915_request *rq; |
304 | intel_wakeref_t wakeref; |
305 | int err; |
306 | |
307 | pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n" , |
308 | engine->whitelist.count, engine->name, name); |
309 | |
310 | ce = intel_context_create(engine); |
311 | if (IS_ERR(ptr: ce)) |
312 | return PTR_ERR(ptr: ce); |
313 | |
314 | err = igt_spinner_init(spin: &spin, gt: engine->gt); |
315 | if (err) |
316 | goto out_ctx; |
317 | |
318 | err = check_whitelist(ce); |
319 | if (err) { |
320 | pr_err("Invalid whitelist *before* %s reset!\n" , name); |
321 | goto out_spin; |
322 | } |
323 | |
324 | err = switch_to_scratch_context(engine, spin: &spin, rq: &rq); |
325 | if (err) |
326 | goto out_spin; |
327 | |
328 | /* Ensure the spinner hasn't aborted */ |
329 | if (i915_request_completed(rq)) { |
330 | pr_err("%s spinner failed to start\n" , name); |
331 | err = -ETIMEDOUT; |
332 | goto out_spin; |
333 | } |
334 | |
335 | with_intel_runtime_pm(engine->uncore->rpm, wakeref) |
336 | err = reset(engine); |
337 | |
338 | /* Ensure the reset happens and kills the engine */ |
339 | if (err == 0) |
340 | err = intel_selftest_wait_for_rq(rq); |
341 | |
342 | igt_spinner_end(spin: &spin); |
343 | |
344 | if (err) { |
345 | pr_err("%s reset failed\n" , name); |
346 | goto out_spin; |
347 | } |
348 | |
349 | err = check_whitelist(ce); |
350 | if (err) { |
351 | pr_err("Whitelist not preserved in context across %s reset!\n" , |
352 | name); |
353 | goto out_spin; |
354 | } |
355 | |
356 | tmp = intel_context_create(engine); |
357 | if (IS_ERR(ptr: tmp)) { |
358 | err = PTR_ERR(ptr: tmp); |
359 | goto out_spin; |
360 | } |
361 | intel_context_put(ce); |
362 | ce = tmp; |
363 | |
364 | err = check_whitelist(ce); |
365 | if (err) { |
366 | pr_err("Invalid whitelist *after* %s reset in fresh context!\n" , |
367 | name); |
368 | goto out_spin; |
369 | } |
370 | |
371 | out_spin: |
372 | igt_spinner_fini(spin: &spin); |
373 | out_ctx: |
374 | intel_context_put(ce); |
375 | return err; |
376 | } |
377 | |
378 | static struct i915_vma *create_batch(struct i915_address_space *vm) |
379 | { |
380 | struct drm_i915_gem_object *obj; |
381 | struct i915_vma *vma; |
382 | int err; |
383 | |
384 | obj = i915_gem_object_create_internal(i915: vm->i915, size: 16 * PAGE_SIZE); |
385 | if (IS_ERR(ptr: obj)) |
386 | return ERR_CAST(ptr: obj); |
387 | |
388 | vma = i915_vma_instance(obj, vm, NULL); |
389 | if (IS_ERR(ptr: vma)) { |
390 | err = PTR_ERR(ptr: vma); |
391 | goto err_obj; |
392 | } |
393 | |
394 | err = i915_vma_pin(vma, size: 0, alignment: 0, PIN_USER); |
395 | if (err) |
396 | goto err_obj; |
397 | |
398 | return vma; |
399 | |
400 | err_obj: |
401 | i915_gem_object_put(obj); |
402 | return ERR_PTR(error: err); |
403 | } |
404 | |
405 | static u32 reg_write(u32 old, u32 new, u32 rsvd) |
406 | { |
407 | if (rsvd == 0x0000ffff) { |
408 | old &= ~(new >> 16); |
409 | old |= new & (new >> 16); |
410 | } else { |
411 | old &= ~rsvd; |
412 | old |= new & rsvd; |
413 | } |
414 | |
415 | return old; |
416 | } |
417 | |
418 | static bool wo_register(struct intel_engine_cs *engine, u32 reg) |
419 | { |
420 | enum intel_platform platform = INTEL_INFO(engine->i915)->platform; |
421 | int i; |
422 | |
423 | if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) == |
424 | RING_FORCE_TO_NONPRIV_ACCESS_WR) |
425 | return true; |
426 | |
427 | for (i = 0; i < ARRAY_SIZE(wo_registers); i++) { |
428 | if (wo_registers[i].platform == platform && |
429 | wo_registers[i].reg == reg) |
430 | return true; |
431 | } |
432 | |
433 | return false; |
434 | } |
435 | |
436 | static bool timestamp(const struct intel_engine_cs *engine, u32 reg) |
437 | { |
438 | reg = (reg - engine->mmio_base) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK; |
439 | switch (reg) { |
440 | case 0x358: |
441 | case 0x35c: |
442 | case 0x3a8: |
443 | return true; |
444 | |
445 | default: |
446 | return false; |
447 | } |
448 | } |
449 | |
450 | static bool ro_register(u32 reg) |
451 | { |
452 | if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) == |
453 | RING_FORCE_TO_NONPRIV_ACCESS_RD) |
454 | return true; |
455 | |
456 | return false; |
457 | } |
458 | |
459 | static int whitelist_writable_count(struct intel_engine_cs *engine) |
460 | { |
461 | int count = engine->whitelist.count; |
462 | int i; |
463 | |
464 | for (i = 0; i < engine->whitelist.count; i++) { |
465 | u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); |
466 | |
467 | if (ro_register(reg)) |
468 | count--; |
469 | } |
470 | |
471 | return count; |
472 | } |
473 | |
474 | static int check_dirty_whitelist(struct intel_context *ce) |
475 | { |
476 | const u32 values[] = { |
477 | 0x00000000, |
478 | 0x01010101, |
479 | 0x10100101, |
480 | 0x03030303, |
481 | 0x30300303, |
482 | 0x05050505, |
483 | 0x50500505, |
484 | 0x0f0f0f0f, |
485 | 0xf00ff00f, |
486 | 0x10101010, |
487 | 0xf0f01010, |
488 | 0x30303030, |
489 | 0xa0a03030, |
490 | 0x50505050, |
491 | 0xc0c05050, |
492 | 0xf0f0f0f0, |
493 | 0x11111111, |
494 | 0x33333333, |
495 | 0x55555555, |
496 | 0x0000ffff, |
497 | 0x00ff00ff, |
498 | 0xff0000ff, |
499 | 0xffff00ff, |
500 | 0xffffffff, |
501 | }; |
502 | struct intel_engine_cs *engine = ce->engine; |
503 | struct i915_vma *scratch; |
504 | struct i915_vma *batch; |
505 | int err = 0, i, v, sz; |
506 | u32 *cs, *results; |
507 | |
508 | sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32); |
509 | scratch = __vm_create_scratch_for_read_pinned(vm: ce->vm, size: sz); |
510 | if (IS_ERR(ptr: scratch)) |
511 | return PTR_ERR(ptr: scratch); |
512 | |
513 | batch = create_batch(vm: ce->vm); |
514 | if (IS_ERR(ptr: batch)) { |
515 | err = PTR_ERR(ptr: batch); |
516 | goto out_scratch; |
517 | } |
518 | |
519 | for (i = 0; i < engine->whitelist.count; i++) { |
520 | u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); |
521 | struct i915_gem_ww_ctx ww; |
522 | u64 addr = i915_vma_offset(vma: scratch); |
523 | struct i915_request *rq; |
524 | u32 srm, lrm, rsvd; |
525 | u32 expect; |
526 | int idx; |
527 | bool ro_reg; |
528 | |
529 | if (wo_register(engine, reg)) |
530 | continue; |
531 | |
532 | if (timestamp(engine, reg)) |
533 | continue; /* timestamps are expected to autoincrement */ |
534 | |
535 | ro_reg = ro_register(reg); |
536 | |
537 | i915_gem_ww_ctx_init(ctx: &ww, intr: false); |
538 | retry: |
539 | cs = NULL; |
540 | err = i915_gem_object_lock(obj: scratch->obj, ww: &ww); |
541 | if (!err) |
542 | err = i915_gem_object_lock(obj: batch->obj, ww: &ww); |
543 | if (!err) |
544 | err = intel_context_pin_ww(ce, ww: &ww); |
545 | if (err) |
546 | goto out; |
547 | |
548 | cs = i915_gem_object_pin_map(obj: batch->obj, type: I915_MAP_WC); |
549 | if (IS_ERR(ptr: cs)) { |
550 | err = PTR_ERR(ptr: cs); |
551 | goto out_ctx; |
552 | } |
553 | |
554 | results = i915_gem_object_pin_map(obj: scratch->obj, type: I915_MAP_WB); |
555 | if (IS_ERR(ptr: results)) { |
556 | err = PTR_ERR(ptr: results); |
557 | goto out_unmap_batch; |
558 | } |
559 | |
560 | /* Clear non priv flags */ |
561 | reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK; |
562 | |
563 | srm = MI_STORE_REGISTER_MEM; |
564 | lrm = MI_LOAD_REGISTER_MEM; |
565 | if (GRAPHICS_VER(engine->i915) >= 8) |
566 | lrm++, srm++; |
567 | |
568 | pr_debug("%s: Writing garbage to %x\n" , |
569 | engine->name, reg); |
570 | |
571 | /* SRM original */ |
572 | *cs++ = srm; |
573 | *cs++ = reg; |
574 | *cs++ = lower_32_bits(addr); |
575 | *cs++ = upper_32_bits(addr); |
576 | |
577 | idx = 1; |
578 | for (v = 0; v < ARRAY_SIZE(values); v++) { |
579 | /* LRI garbage */ |
580 | *cs++ = MI_LOAD_REGISTER_IMM(1); |
581 | *cs++ = reg; |
582 | *cs++ = values[v]; |
583 | |
584 | /* SRM result */ |
585 | *cs++ = srm; |
586 | *cs++ = reg; |
587 | *cs++ = lower_32_bits(addr + sizeof(u32) * idx); |
588 | *cs++ = upper_32_bits(addr + sizeof(u32) * idx); |
589 | idx++; |
590 | } |
591 | for (v = 0; v < ARRAY_SIZE(values); v++) { |
592 | /* LRI garbage */ |
593 | *cs++ = MI_LOAD_REGISTER_IMM(1); |
594 | *cs++ = reg; |
595 | *cs++ = ~values[v]; |
596 | |
597 | /* SRM result */ |
598 | *cs++ = srm; |
599 | *cs++ = reg; |
600 | *cs++ = lower_32_bits(addr + sizeof(u32) * idx); |
601 | *cs++ = upper_32_bits(addr + sizeof(u32) * idx); |
602 | idx++; |
603 | } |
604 | GEM_BUG_ON(idx * sizeof(u32) > scratch->size); |
605 | |
606 | /* LRM original -- don't leave garbage in the context! */ |
607 | *cs++ = lrm; |
608 | *cs++ = reg; |
609 | *cs++ = lower_32_bits(addr); |
610 | *cs++ = upper_32_bits(addr); |
611 | |
612 | *cs++ = MI_BATCH_BUFFER_END; |
613 | |
614 | i915_gem_object_flush_map(obj: batch->obj); |
615 | i915_gem_object_unpin_map(obj: batch->obj); |
616 | intel_gt_chipset_flush(gt: engine->gt); |
617 | cs = NULL; |
618 | |
619 | rq = i915_request_create(ce); |
620 | if (IS_ERR(ptr: rq)) { |
621 | err = PTR_ERR(ptr: rq); |
622 | goto out_unmap_scratch; |
623 | } |
624 | |
625 | if (engine->emit_init_breadcrumb) { /* Be nice if we hang */ |
626 | err = engine->emit_init_breadcrumb(rq); |
627 | if (err) |
628 | goto err_request; |
629 | } |
630 | |
631 | err = i915_vma_move_to_active(vma: batch, rq, flags: 0); |
632 | if (err) |
633 | goto err_request; |
634 | |
635 | err = i915_vma_move_to_active(vma: scratch, rq, |
636 | EXEC_OBJECT_WRITE); |
637 | if (err) |
638 | goto err_request; |
639 | |
640 | err = engine->emit_bb_start(rq, |
641 | i915_vma_offset(vma: batch), PAGE_SIZE, |
642 | 0); |
643 | if (err) |
644 | goto err_request; |
645 | |
646 | err_request: |
647 | err = request_add_sync(rq, err); |
648 | if (err) { |
649 | pr_err("%s: Futzing %x timedout; cancelling test\n" , |
650 | engine->name, reg); |
651 | intel_gt_set_wedged(gt: engine->gt); |
652 | goto out_unmap_scratch; |
653 | } |
654 | |
655 | GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff); |
656 | if (!ro_reg) { |
657 | /* detect write masking */ |
658 | rsvd = results[ARRAY_SIZE(values)]; |
659 | if (!rsvd) { |
660 | pr_err("%s: Unable to write to whitelisted register %x\n" , |
661 | engine->name, reg); |
662 | err = -EINVAL; |
663 | goto out_unmap_scratch; |
664 | } |
665 | } else { |
666 | rsvd = 0; |
667 | } |
668 | |
669 | expect = results[0]; |
670 | idx = 1; |
671 | for (v = 0; v < ARRAY_SIZE(values); v++) { |
672 | if (ro_reg) |
673 | expect = results[0]; |
674 | else |
675 | expect = reg_write(old: expect, new: values[v], rsvd); |
676 | |
677 | if (results[idx] != expect) |
678 | err++; |
679 | idx++; |
680 | } |
681 | for (v = 0; v < ARRAY_SIZE(values); v++) { |
682 | if (ro_reg) |
683 | expect = results[0]; |
684 | else |
685 | expect = reg_write(old: expect, new: ~values[v], rsvd); |
686 | |
687 | if (results[idx] != expect) |
688 | err++; |
689 | idx++; |
690 | } |
691 | if (err) { |
692 | pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n" , |
693 | engine->name, err, reg); |
694 | |
695 | if (ro_reg) |
696 | pr_info("%s: Whitelisted read-only register: %x, original value %08x\n" , |
697 | engine->name, reg, results[0]); |
698 | else |
699 | pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n" , |
700 | engine->name, reg, results[0], rsvd); |
701 | |
702 | expect = results[0]; |
703 | idx = 1; |
704 | for (v = 0; v < ARRAY_SIZE(values); v++) { |
705 | u32 w = values[v]; |
706 | |
707 | if (ro_reg) |
708 | expect = results[0]; |
709 | else |
710 | expect = reg_write(old: expect, new: w, rsvd); |
711 | pr_info("Wrote %08x, read %08x, expect %08x\n" , |
712 | w, results[idx], expect); |
713 | idx++; |
714 | } |
715 | for (v = 0; v < ARRAY_SIZE(values); v++) { |
716 | u32 w = ~values[v]; |
717 | |
718 | if (ro_reg) |
719 | expect = results[0]; |
720 | else |
721 | expect = reg_write(old: expect, new: w, rsvd); |
722 | pr_info("Wrote %08x, read %08x, expect %08x\n" , |
723 | w, results[idx], expect); |
724 | idx++; |
725 | } |
726 | |
727 | err = -EINVAL; |
728 | } |
729 | out_unmap_scratch: |
730 | i915_gem_object_unpin_map(obj: scratch->obj); |
731 | out_unmap_batch: |
732 | if (cs) |
733 | i915_gem_object_unpin_map(obj: batch->obj); |
734 | out_ctx: |
735 | intel_context_unpin(ce); |
736 | out: |
737 | if (err == -EDEADLK) { |
738 | err = i915_gem_ww_ctx_backoff(ctx: &ww); |
739 | if (!err) |
740 | goto retry; |
741 | } |
742 | i915_gem_ww_ctx_fini(ctx: &ww); |
743 | if (err) |
744 | break; |
745 | } |
746 | |
747 | if (igt_flush_test(i915: engine->i915)) |
748 | err = -EIO; |
749 | |
750 | i915_vma_unpin_and_release(p_vma: &batch, flags: 0); |
751 | out_scratch: |
752 | i915_vma_unpin_and_release(p_vma: &scratch, flags: 0); |
753 | return err; |
754 | } |
755 | |
756 | static int live_dirty_whitelist(void *arg) |
757 | { |
758 | struct intel_gt *gt = arg; |
759 | struct intel_engine_cs *engine; |
760 | enum intel_engine_id id; |
761 | |
762 | /* Can the user write to the whitelisted registers? */ |
763 | |
764 | if (GRAPHICS_VER(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */ |
765 | return 0; |
766 | |
767 | for_each_engine(engine, gt, id) { |
768 | struct intel_context *ce; |
769 | int err; |
770 | |
771 | if (engine->whitelist.count == 0) |
772 | continue; |
773 | |
774 | ce = intel_context_create(engine); |
775 | if (IS_ERR(ptr: ce)) |
776 | return PTR_ERR(ptr: ce); |
777 | |
778 | err = check_dirty_whitelist(ce); |
779 | intel_context_put(ce); |
780 | if (err) |
781 | return err; |
782 | } |
783 | |
784 | return 0; |
785 | } |
786 | |
787 | static int live_reset_whitelist(void *arg) |
788 | { |
789 | struct intel_gt *gt = arg; |
790 | struct intel_engine_cs *engine; |
791 | enum intel_engine_id id; |
792 | int err = 0; |
793 | |
794 | /* If we reset the gpu, we should not lose the RING_NONPRIV */ |
795 | igt_global_reset_lock(gt); |
796 | |
797 | for_each_engine(engine, gt, id) { |
798 | if (engine->whitelist.count == 0) |
799 | continue; |
800 | |
801 | if (intel_has_reset_engine(gt)) { |
802 | if (intel_engine_uses_guc(engine)) { |
803 | struct intel_selftest_saved_policy saved; |
804 | int err2; |
805 | |
806 | err = intel_selftest_modify_policy(engine, saved: &saved, |
807 | modify_type: SELFTEST_SCHEDULER_MODIFY_FAST_RESET); |
808 | if (err) |
809 | goto out; |
810 | |
811 | err = check_whitelist_across_reset(engine, |
812 | reset: do_guc_reset, |
813 | name: "guc" ); |
814 | |
815 | err2 = intel_selftest_restore_policy(engine, saved: &saved); |
816 | if (err == 0) |
817 | err = err2; |
818 | } else { |
819 | err = check_whitelist_across_reset(engine, |
820 | reset: do_engine_reset, |
821 | name: "engine" ); |
822 | } |
823 | |
824 | if (err) |
825 | goto out; |
826 | } |
827 | |
828 | if (intel_has_gpu_reset(gt)) { |
829 | err = check_whitelist_across_reset(engine, |
830 | reset: do_device_reset, |
831 | name: "device" ); |
832 | if (err) |
833 | goto out; |
834 | } |
835 | } |
836 | |
837 | out: |
838 | igt_global_reset_unlock(gt); |
839 | return err; |
840 | } |
841 | |
842 | static int read_whitelisted_registers(struct intel_context *ce, |
843 | struct i915_vma *results) |
844 | { |
845 | struct intel_engine_cs *engine = ce->engine; |
846 | struct i915_request *rq; |
847 | int i, err = 0; |
848 | u32 srm, *cs; |
849 | |
850 | rq = intel_context_create_request(ce); |
851 | if (IS_ERR(ptr: rq)) |
852 | return PTR_ERR(ptr: rq); |
853 | |
854 | err = igt_vma_move_to_active_unlocked(vma: results, rq, EXEC_OBJECT_WRITE); |
855 | if (err) |
856 | goto err_req; |
857 | |
858 | srm = MI_STORE_REGISTER_MEM; |
859 | if (GRAPHICS_VER(engine->i915) >= 8) |
860 | srm++; |
861 | |
862 | cs = intel_ring_begin(rq, num_dwords: 4 * engine->whitelist.count); |
863 | if (IS_ERR(ptr: cs)) { |
864 | err = PTR_ERR(ptr: cs); |
865 | goto err_req; |
866 | } |
867 | |
868 | for (i = 0; i < engine->whitelist.count; i++) { |
869 | u64 offset = i915_vma_offset(vma: results) + sizeof(u32) * i; |
870 | u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); |
871 | |
872 | /* Clear non priv flags */ |
873 | reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK; |
874 | |
875 | *cs++ = srm; |
876 | *cs++ = reg; |
877 | *cs++ = lower_32_bits(offset); |
878 | *cs++ = upper_32_bits(offset); |
879 | } |
880 | intel_ring_advance(rq, cs); |
881 | |
882 | err_req: |
883 | return request_add_sync(rq, err); |
884 | } |
885 | |
886 | static int scrub_whitelisted_registers(struct intel_context *ce) |
887 | { |
888 | struct intel_engine_cs *engine = ce->engine; |
889 | struct i915_request *rq; |
890 | struct i915_vma *batch; |
891 | int i, err = 0; |
892 | u32 *cs; |
893 | |
894 | batch = create_batch(vm: ce->vm); |
895 | if (IS_ERR(ptr: batch)) |
896 | return PTR_ERR(ptr: batch); |
897 | |
898 | cs = i915_gem_object_pin_map_unlocked(obj: batch->obj, type: I915_MAP_WC); |
899 | if (IS_ERR(ptr: cs)) { |
900 | err = PTR_ERR(ptr: cs); |
901 | goto err_batch; |
902 | } |
903 | |
904 | *cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine)); |
905 | for (i = 0; i < engine->whitelist.count; i++) { |
906 | u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); |
907 | |
908 | if (ro_register(reg)) |
909 | continue; |
910 | |
911 | /* Clear non priv flags */ |
912 | reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK; |
913 | |
914 | *cs++ = reg; |
915 | *cs++ = 0xffffffff; |
916 | } |
917 | *cs++ = MI_BATCH_BUFFER_END; |
918 | |
919 | i915_gem_object_flush_map(obj: batch->obj); |
920 | intel_gt_chipset_flush(gt: engine->gt); |
921 | |
922 | rq = intel_context_create_request(ce); |
923 | if (IS_ERR(ptr: rq)) { |
924 | err = PTR_ERR(ptr: rq); |
925 | goto err_unpin; |
926 | } |
927 | |
928 | if (engine->emit_init_breadcrumb) { /* Be nice if we hang */ |
929 | err = engine->emit_init_breadcrumb(rq); |
930 | if (err) |
931 | goto err_request; |
932 | } |
933 | |
934 | err = igt_vma_move_to_active_unlocked(vma: batch, rq, flags: 0); |
935 | if (err) |
936 | goto err_request; |
937 | |
938 | /* Perform the writes from an unprivileged "user" batch */ |
939 | err = engine->emit_bb_start(rq, i915_vma_offset(vma: batch), 0, 0); |
940 | |
941 | err_request: |
942 | err = request_add_sync(rq, err); |
943 | |
944 | err_unpin: |
945 | i915_gem_object_unpin_map(obj: batch->obj); |
946 | err_batch: |
947 | i915_vma_unpin_and_release(p_vma: &batch, flags: 0); |
948 | return err; |
949 | } |
950 | |
951 | struct regmask { |
952 | i915_reg_t reg; |
953 | u8 graphics_ver; |
954 | }; |
955 | |
956 | static bool find_reg(struct drm_i915_private *i915, |
957 | i915_reg_t reg, |
958 | const struct regmask *tbl, |
959 | unsigned long count) |
960 | { |
961 | u32 offset = i915_mmio_reg_offset(reg); |
962 | |
963 | while (count--) { |
964 | if (GRAPHICS_VER(i915) == tbl->graphics_ver && |
965 | i915_mmio_reg_offset(tbl->reg) == offset) |
966 | return true; |
967 | tbl++; |
968 | } |
969 | |
970 | return false; |
971 | } |
972 | |
973 | static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg) |
974 | { |
975 | /* Alas, we must pardon some whitelists. Mistakes already made */ |
976 | static const struct regmask pardon[] = { |
977 | { GEN9_CTX_PREEMPT_REG, 9 }, |
978 | { _MMIO(0xb118), 9 }, /* GEN8_L3SQCREG4 */ |
979 | }; |
980 | |
981 | return find_reg(i915, reg, tbl: pardon, ARRAY_SIZE(pardon)); |
982 | } |
983 | |
984 | static bool result_eq(struct intel_engine_cs *engine, |
985 | u32 a, u32 b, i915_reg_t reg) |
986 | { |
987 | if (a != b && !pardon_reg(i915: engine->i915, reg)) { |
988 | pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n" , |
989 | i915_mmio_reg_offset(reg), a, b); |
990 | return false; |
991 | } |
992 | |
993 | return true; |
994 | } |
995 | |
996 | static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg) |
997 | { |
998 | /* Some registers do not seem to behave and our writes unreadable */ |
999 | static const struct regmask wo[] = { |
1000 | { GEN9_SLICE_COMMON_ECO_CHICKEN1, 9 }, |
1001 | }; |
1002 | |
1003 | return find_reg(i915, reg, tbl: wo, ARRAY_SIZE(wo)); |
1004 | } |
1005 | |
1006 | static bool result_neq(struct intel_engine_cs *engine, |
1007 | u32 a, u32 b, i915_reg_t reg) |
1008 | { |
1009 | if (a == b && !writeonly_reg(i915: engine->i915, reg)) { |
1010 | pr_err("Whitelist register 0x%4x:%08x was unwritable\n" , |
1011 | i915_mmio_reg_offset(reg), a); |
1012 | return false; |
1013 | } |
1014 | |
1015 | return true; |
1016 | } |
1017 | |
1018 | static int |
1019 | check_whitelisted_registers(struct intel_engine_cs *engine, |
1020 | struct i915_vma *A, |
1021 | struct i915_vma *B, |
1022 | bool (*fn)(struct intel_engine_cs *engine, |
1023 | u32 a, u32 b, |
1024 | i915_reg_t reg)) |
1025 | { |
1026 | u32 *a, *b; |
1027 | int i, err; |
1028 | |
1029 | a = i915_gem_object_pin_map_unlocked(obj: A->obj, type: I915_MAP_WB); |
1030 | if (IS_ERR(ptr: a)) |
1031 | return PTR_ERR(ptr: a); |
1032 | |
1033 | b = i915_gem_object_pin_map_unlocked(obj: B->obj, type: I915_MAP_WB); |
1034 | if (IS_ERR(ptr: b)) { |
1035 | err = PTR_ERR(ptr: b); |
1036 | goto err_a; |
1037 | } |
1038 | |
1039 | err = 0; |
1040 | for (i = 0; i < engine->whitelist.count; i++) { |
1041 | const struct i915_wa *wa = &engine->whitelist.list[i]; |
1042 | |
1043 | if (i915_mmio_reg_offset(wa->reg) & |
1044 | RING_FORCE_TO_NONPRIV_ACCESS_RD) |
1045 | continue; |
1046 | |
1047 | if (!fn(engine, a[i], b[i], wa->reg)) |
1048 | err = -EINVAL; |
1049 | } |
1050 | |
1051 | i915_gem_object_unpin_map(obj: B->obj); |
1052 | err_a: |
1053 | i915_gem_object_unpin_map(obj: A->obj); |
1054 | return err; |
1055 | } |
1056 | |
1057 | static int live_isolated_whitelist(void *arg) |
1058 | { |
1059 | struct intel_gt *gt = arg; |
1060 | struct { |
1061 | struct i915_vma *scratch[2]; |
1062 | } client[2] = {}; |
1063 | struct intel_engine_cs *engine; |
1064 | enum intel_engine_id id; |
1065 | int i, err = 0; |
1066 | |
1067 | /* |
1068 | * Check that a write into a whitelist register works, but |
1069 | * invisible to a second context. |
1070 | */ |
1071 | |
1072 | if (!intel_engines_has_context_isolation(i915: gt->i915)) |
1073 | return 0; |
1074 | |
1075 | for (i = 0; i < ARRAY_SIZE(client); i++) { |
1076 | client[i].scratch[0] = |
1077 | __vm_create_scratch_for_read_pinned(vm: gt->vm, size: 4096); |
1078 | if (IS_ERR(ptr: client[i].scratch[0])) { |
1079 | err = PTR_ERR(ptr: client[i].scratch[0]); |
1080 | goto err; |
1081 | } |
1082 | |
1083 | client[i].scratch[1] = |
1084 | __vm_create_scratch_for_read_pinned(vm: gt->vm, size: 4096); |
1085 | if (IS_ERR(ptr: client[i].scratch[1])) { |
1086 | err = PTR_ERR(ptr: client[i].scratch[1]); |
1087 | i915_vma_unpin_and_release(p_vma: &client[i].scratch[0], flags: 0); |
1088 | goto err; |
1089 | } |
1090 | } |
1091 | |
1092 | for_each_engine(engine, gt, id) { |
1093 | struct intel_context *ce[2]; |
1094 | |
1095 | if (!engine->kernel_context->vm) |
1096 | continue; |
1097 | |
1098 | if (!whitelist_writable_count(engine)) |
1099 | continue; |
1100 | |
1101 | ce[0] = intel_context_create(engine); |
1102 | if (IS_ERR(ptr: ce[0])) { |
1103 | err = PTR_ERR(ptr: ce[0]); |
1104 | break; |
1105 | } |
1106 | ce[1] = intel_context_create(engine); |
1107 | if (IS_ERR(ptr: ce[1])) { |
1108 | err = PTR_ERR(ptr: ce[1]); |
1109 | intel_context_put(ce: ce[0]); |
1110 | break; |
1111 | } |
1112 | |
1113 | /* Read default values */ |
1114 | err = read_whitelisted_registers(ce: ce[0], results: client[0].scratch[0]); |
1115 | if (err) |
1116 | goto err_ce; |
1117 | |
1118 | /* Try to overwrite registers (should only affect ctx0) */ |
1119 | err = scrub_whitelisted_registers(ce: ce[0]); |
1120 | if (err) |
1121 | goto err_ce; |
1122 | |
1123 | /* Read values from ctx1, we expect these to be defaults */ |
1124 | err = read_whitelisted_registers(ce: ce[1], results: client[1].scratch[0]); |
1125 | if (err) |
1126 | goto err_ce; |
1127 | |
1128 | /* Verify that both reads return the same default values */ |
1129 | err = check_whitelisted_registers(engine, |
1130 | A: client[0].scratch[0], |
1131 | B: client[1].scratch[0], |
1132 | fn: result_eq); |
1133 | if (err) |
1134 | goto err_ce; |
1135 | |
1136 | /* Read back the updated values in ctx0 */ |
1137 | err = read_whitelisted_registers(ce: ce[0], results: client[0].scratch[1]); |
1138 | if (err) |
1139 | goto err_ce; |
1140 | |
1141 | /* User should be granted privilege to overwhite regs */ |
1142 | err = check_whitelisted_registers(engine, |
1143 | A: client[0].scratch[0], |
1144 | B: client[0].scratch[1], |
1145 | fn: result_neq); |
1146 | err_ce: |
1147 | intel_context_put(ce: ce[1]); |
1148 | intel_context_put(ce: ce[0]); |
1149 | if (err) |
1150 | break; |
1151 | } |
1152 | |
1153 | err: |
1154 | for (i = 0; i < ARRAY_SIZE(client); i++) { |
1155 | i915_vma_unpin_and_release(p_vma: &client[i].scratch[1], flags: 0); |
1156 | i915_vma_unpin_and_release(p_vma: &client[i].scratch[0], flags: 0); |
1157 | } |
1158 | |
1159 | if (igt_flush_test(i915: gt->i915)) |
1160 | err = -EIO; |
1161 | |
1162 | return err; |
1163 | } |
1164 | |
1165 | static bool |
1166 | verify_wa_lists(struct intel_gt *gt, struct wa_lists *lists, |
1167 | const char *str) |
1168 | { |
1169 | struct intel_engine_cs *engine; |
1170 | enum intel_engine_id id; |
1171 | bool ok = true; |
1172 | |
1173 | ok &= wa_list_verify(gt, wal: &lists->gt_wa_list, from: str); |
1174 | |
1175 | for_each_engine(engine, gt, id) { |
1176 | struct intel_context *ce; |
1177 | |
1178 | ce = intel_context_create(engine); |
1179 | if (IS_ERR(ptr: ce)) |
1180 | return false; |
1181 | |
1182 | ok &= engine_wa_list_verify(ce, |
1183 | wal: &lists->engine[id].wa_list, |
1184 | from: str) == 0; |
1185 | |
1186 | ok &= engine_wa_list_verify(ce, |
1187 | wal: &lists->engine[id].ctx_wa_list, |
1188 | from: str) == 0; |
1189 | |
1190 | intel_context_put(ce); |
1191 | } |
1192 | |
1193 | return ok; |
1194 | } |
1195 | |
1196 | static int |
1197 | live_gpu_reset_workarounds(void *arg) |
1198 | { |
1199 | struct intel_gt *gt = arg; |
1200 | intel_wakeref_t wakeref; |
1201 | struct wa_lists *lists; |
1202 | bool ok; |
1203 | |
1204 | if (!intel_has_gpu_reset(gt)) |
1205 | return 0; |
1206 | |
1207 | lists = kzalloc(size: sizeof(*lists), GFP_KERNEL); |
1208 | if (!lists) |
1209 | return -ENOMEM; |
1210 | |
1211 | pr_info("Verifying after GPU reset...\n" ); |
1212 | |
1213 | igt_global_reset_lock(gt); |
1214 | wakeref = intel_runtime_pm_get(rpm: gt->uncore->rpm); |
1215 | |
1216 | reference_lists_init(gt, lists); |
1217 | |
1218 | ok = verify_wa_lists(gt, lists, str: "before reset" ); |
1219 | if (!ok) |
1220 | goto out; |
1221 | |
1222 | intel_gt_reset(gt, ALL_ENGINES, reason: "live_workarounds" ); |
1223 | |
1224 | ok = verify_wa_lists(gt, lists, str: "after reset" ); |
1225 | |
1226 | out: |
1227 | reference_lists_fini(gt, lists); |
1228 | intel_runtime_pm_put(rpm: gt->uncore->rpm, wref: wakeref); |
1229 | igt_global_reset_unlock(gt); |
1230 | kfree(objp: lists); |
1231 | |
1232 | return ok ? 0 : -ESRCH; |
1233 | } |
1234 | |
1235 | static int |
1236 | live_engine_reset_workarounds(void *arg) |
1237 | { |
1238 | struct intel_gt *gt = arg; |
1239 | struct intel_engine_cs *engine; |
1240 | enum intel_engine_id id; |
1241 | struct intel_context *ce; |
1242 | struct igt_spinner spin; |
1243 | struct i915_request *rq; |
1244 | intel_wakeref_t wakeref; |
1245 | struct wa_lists *lists; |
1246 | int ret = 0; |
1247 | |
1248 | if (!intel_has_reset_engine(gt)) |
1249 | return 0; |
1250 | |
1251 | lists = kzalloc(size: sizeof(*lists), GFP_KERNEL); |
1252 | if (!lists) |
1253 | return -ENOMEM; |
1254 | |
1255 | igt_global_reset_lock(gt); |
1256 | wakeref = intel_runtime_pm_get(rpm: gt->uncore->rpm); |
1257 | |
1258 | reference_lists_init(gt, lists); |
1259 | |
1260 | for_each_engine(engine, gt, id) { |
1261 | struct intel_selftest_saved_policy saved; |
1262 | bool using_guc = intel_engine_uses_guc(engine); |
1263 | bool ok; |
1264 | int ret2; |
1265 | |
1266 | pr_info("Verifying after %s reset...\n" , engine->name); |
1267 | ret = intel_selftest_modify_policy(engine, saved: &saved, |
1268 | modify_type: SELFTEST_SCHEDULER_MODIFY_FAST_RESET); |
1269 | if (ret) |
1270 | break; |
1271 | |
1272 | ce = intel_context_create(engine); |
1273 | if (IS_ERR(ptr: ce)) { |
1274 | ret = PTR_ERR(ptr: ce); |
1275 | goto restore; |
1276 | } |
1277 | |
1278 | if (!using_guc) { |
1279 | ok = verify_wa_lists(gt, lists, str: "before reset" ); |
1280 | if (!ok) { |
1281 | ret = -ESRCH; |
1282 | goto err; |
1283 | } |
1284 | |
1285 | ret = intel_engine_reset(engine, reason: "live_workarounds:idle" ); |
1286 | if (ret) { |
1287 | pr_err("%s: Reset failed while idle\n" , engine->name); |
1288 | goto err; |
1289 | } |
1290 | |
1291 | ok = verify_wa_lists(gt, lists, str: "after idle reset" ); |
1292 | if (!ok) { |
1293 | ret = -ESRCH; |
1294 | goto err; |
1295 | } |
1296 | } |
1297 | |
1298 | ret = igt_spinner_init(spin: &spin, gt: engine->gt); |
1299 | if (ret) |
1300 | goto err; |
1301 | |
1302 | rq = igt_spinner_create_request(spin: &spin, ce, MI_NOOP); |
1303 | if (IS_ERR(ptr: rq)) { |
1304 | ret = PTR_ERR(ptr: rq); |
1305 | igt_spinner_fini(spin: &spin); |
1306 | goto err; |
1307 | } |
1308 | |
1309 | ret = request_add_spin(rq, spin: &spin); |
1310 | if (ret) { |
1311 | pr_err("%s: Spinner failed to start\n" , engine->name); |
1312 | igt_spinner_fini(spin: &spin); |
1313 | goto err; |
1314 | } |
1315 | |
1316 | /* Ensure the spinner hasn't aborted */ |
1317 | if (i915_request_completed(rq)) { |
1318 | ret = -ETIMEDOUT; |
1319 | goto skip; |
1320 | } |
1321 | |
1322 | if (!using_guc) { |
1323 | ret = intel_engine_reset(engine, reason: "live_workarounds:active" ); |
1324 | if (ret) { |
1325 | pr_err("%s: Reset failed on an active spinner\n" , |
1326 | engine->name); |
1327 | igt_spinner_fini(spin: &spin); |
1328 | goto err; |
1329 | } |
1330 | } |
1331 | |
1332 | /* Ensure the reset happens and kills the engine */ |
1333 | if (ret == 0) |
1334 | ret = intel_selftest_wait_for_rq(rq); |
1335 | |
1336 | skip: |
1337 | igt_spinner_end(spin: &spin); |
1338 | igt_spinner_fini(spin: &spin); |
1339 | |
1340 | ok = verify_wa_lists(gt, lists, str: "after busy reset" ); |
1341 | if (!ok) |
1342 | ret = -ESRCH; |
1343 | |
1344 | err: |
1345 | intel_context_put(ce); |
1346 | |
1347 | restore: |
1348 | ret2 = intel_selftest_restore_policy(engine, saved: &saved); |
1349 | if (ret == 0) |
1350 | ret = ret2; |
1351 | if (ret) |
1352 | break; |
1353 | } |
1354 | |
1355 | reference_lists_fini(gt, lists); |
1356 | intel_runtime_pm_put(rpm: gt->uncore->rpm, wref: wakeref); |
1357 | igt_global_reset_unlock(gt); |
1358 | kfree(objp: lists); |
1359 | |
1360 | igt_flush_test(i915: gt->i915); |
1361 | |
1362 | return ret; |
1363 | } |
1364 | |
1365 | int intel_workarounds_live_selftests(struct drm_i915_private *i915) |
1366 | { |
1367 | static const struct i915_subtest tests[] = { |
1368 | SUBTEST(live_dirty_whitelist), |
1369 | SUBTEST(live_reset_whitelist), |
1370 | SUBTEST(live_isolated_whitelist), |
1371 | SUBTEST(live_gpu_reset_workarounds), |
1372 | SUBTEST(live_engine_reset_workarounds), |
1373 | }; |
1374 | |
1375 | if (intel_gt_is_wedged(gt: to_gt(i915))) |
1376 | return 0; |
1377 | |
1378 | return intel_gt_live_subtests(tests, to_gt(i915)); |
1379 | } |
1380 | |