1 | // SPDX-License-Identifier: MIT |
2 | /* |
3 | * Copyright © 2018 Intel Corporation |
4 | */ |
5 | |
6 | #include <linux/prime_numbers.h> |
7 | |
8 | #include "gem/i915_gem_internal.h" |
9 | #include "gem/i915_gem_pm.h" |
10 | #include "gt/intel_engine_heartbeat.h" |
11 | #include "gt/intel_reset.h" |
12 | #include "gt/selftest_engine_heartbeat.h" |
13 | |
14 | #include "i915_selftest.h" |
15 | #include "selftests/i915_random.h" |
16 | #include "selftests/igt_flush_test.h" |
17 | #include "selftests/igt_live_test.h" |
18 | #include "selftests/igt_spinner.h" |
19 | #include "selftests/lib_sw_fence.h" |
20 | |
21 | #include "gem/selftests/igt_gem_utils.h" |
22 | #include "gem/selftests/mock_context.h" |
23 | |
24 | #define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4) |
25 | #define NUM_GPR 16 |
26 | #define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */ |
27 | |
28 | static bool is_active(struct i915_request *rq) |
29 | { |
30 | if (i915_request_is_active(rq)) |
31 | return true; |
32 | |
33 | if (i915_request_on_hold(rq)) |
34 | return true; |
35 | |
36 | if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq)) |
37 | return true; |
38 | |
39 | return false; |
40 | } |
41 | |
42 | static int wait_for_submit(struct intel_engine_cs *engine, |
43 | struct i915_request *rq, |
44 | unsigned long timeout) |
45 | { |
46 | /* Ignore our own attempts to suppress excess tasklets */ |
47 | tasklet_hi_schedule(t: &engine->sched_engine->tasklet); |
48 | |
49 | timeout += jiffies; |
50 | do { |
51 | bool done = time_after(jiffies, timeout); |
52 | |
53 | if (i915_request_completed(rq)) /* that was quick! */ |
54 | return 0; |
55 | |
56 | /* Wait until the HW has acknowleged the submission (or err) */ |
57 | intel_engine_flush_submission(engine); |
58 | if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) |
59 | return 0; |
60 | |
61 | if (done) |
62 | return -ETIME; |
63 | |
64 | cond_resched(); |
65 | } while (1); |
66 | } |
67 | |
68 | static int wait_for_reset(struct intel_engine_cs *engine, |
69 | struct i915_request *rq, |
70 | unsigned long timeout) |
71 | { |
72 | timeout += jiffies; |
73 | |
74 | do { |
75 | cond_resched(); |
76 | intel_engine_flush_submission(engine); |
77 | |
78 | if (READ_ONCE(engine->execlists.pending[0])) |
79 | continue; |
80 | |
81 | if (i915_request_completed(rq)) |
82 | break; |
83 | |
84 | if (READ_ONCE(rq->fence.error)) |
85 | break; |
86 | } while (time_before(jiffies, timeout)); |
87 | |
88 | if (rq->fence.error != -EIO) { |
89 | pr_err("%s: hanging request %llx:%lld not reset\n" , |
90 | engine->name, |
91 | rq->fence.context, |
92 | rq->fence.seqno); |
93 | return -EINVAL; |
94 | } |
95 | |
96 | /* Give the request a jiffie to complete after flushing the worker */ |
97 | if (i915_request_wait(rq, flags: 0, |
98 | max(0l, (long)(timeout - jiffies)) + 1) < 0) { |
99 | pr_err("%s: hanging request %llx:%lld did not complete\n" , |
100 | engine->name, |
101 | rq->fence.context, |
102 | rq->fence.seqno); |
103 | return -ETIME; |
104 | } |
105 | |
106 | return 0; |
107 | } |
108 | |
109 | static int live_sanitycheck(void *arg) |
110 | { |
111 | struct intel_gt *gt = arg; |
112 | struct intel_engine_cs *engine; |
113 | enum intel_engine_id id; |
114 | struct igt_spinner spin; |
115 | int err = 0; |
116 | |
117 | if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915)) |
118 | return 0; |
119 | |
120 | if (igt_spinner_init(spin: &spin, gt)) |
121 | return -ENOMEM; |
122 | |
123 | for_each_engine(engine, gt, id) { |
124 | struct intel_context *ce; |
125 | struct i915_request *rq; |
126 | |
127 | ce = intel_context_create(engine); |
128 | if (IS_ERR(ptr: ce)) { |
129 | err = PTR_ERR(ptr: ce); |
130 | break; |
131 | } |
132 | |
133 | rq = igt_spinner_create_request(spin: &spin, ce, MI_NOOP); |
134 | if (IS_ERR(ptr: rq)) { |
135 | err = PTR_ERR(ptr: rq); |
136 | goto out_ctx; |
137 | } |
138 | |
139 | i915_request_add(rq); |
140 | if (!igt_wait_for_spinner(spin: &spin, rq)) { |
141 | GEM_TRACE("spinner failed to start\n" ); |
142 | GEM_TRACE_DUMP(); |
143 | intel_gt_set_wedged(gt); |
144 | err = -EIO; |
145 | goto out_ctx; |
146 | } |
147 | |
148 | igt_spinner_end(spin: &spin); |
149 | if (igt_flush_test(i915: gt->i915)) { |
150 | err = -EIO; |
151 | goto out_ctx; |
152 | } |
153 | |
154 | out_ctx: |
155 | intel_context_put(ce); |
156 | if (err) |
157 | break; |
158 | } |
159 | |
160 | igt_spinner_fini(spin: &spin); |
161 | return err; |
162 | } |
163 | |
164 | static int live_unlite_restore(struct intel_gt *gt, int prio) |
165 | { |
166 | struct intel_engine_cs *engine; |
167 | enum intel_engine_id id; |
168 | struct igt_spinner spin; |
169 | int err = -ENOMEM; |
170 | |
171 | /* |
172 | * Check that we can correctly context switch between 2 instances |
173 | * on the same engine from the same parent context. |
174 | */ |
175 | |
176 | if (igt_spinner_init(spin: &spin, gt)) |
177 | return err; |
178 | |
179 | err = 0; |
180 | for_each_engine(engine, gt, id) { |
181 | struct intel_context *ce[2] = {}; |
182 | struct i915_request *rq[2]; |
183 | struct igt_live_test t; |
184 | int n; |
185 | |
186 | if (prio && !intel_engine_has_preemption(engine)) |
187 | continue; |
188 | |
189 | if (!intel_engine_can_store_dword(engine)) |
190 | continue; |
191 | |
192 | if (igt_live_test_begin(t: &t, i915: gt->i915, func: __func__, name: engine->name)) { |
193 | err = -EIO; |
194 | break; |
195 | } |
196 | st_engine_heartbeat_disable(engine); |
197 | |
198 | for (n = 0; n < ARRAY_SIZE(ce); n++) { |
199 | struct intel_context *tmp; |
200 | |
201 | tmp = intel_context_create(engine); |
202 | if (IS_ERR(ptr: tmp)) { |
203 | err = PTR_ERR(ptr: tmp); |
204 | goto err_ce; |
205 | } |
206 | |
207 | err = intel_context_pin(ce: tmp); |
208 | if (err) { |
209 | intel_context_put(ce: tmp); |
210 | goto err_ce; |
211 | } |
212 | |
213 | /* |
214 | * Setup the pair of contexts such that if we |
215 | * lite-restore using the RING_TAIL from ce[1] it |
216 | * will execute garbage from ce[0]->ring. |
217 | */ |
218 | memset(tmp->ring->vaddr, |
219 | POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */ |
220 | tmp->ring->vma->size); |
221 | |
222 | ce[n] = tmp; |
223 | } |
224 | GEM_BUG_ON(!ce[1]->ring->size); |
225 | intel_ring_reset(ring: ce[1]->ring, tail: ce[1]->ring->size / 2); |
226 | lrc_update_regs(ce: ce[1], engine, head: ce[1]->ring->head); |
227 | |
228 | rq[0] = igt_spinner_create_request(spin: &spin, ce: ce[0], MI_ARB_CHECK); |
229 | if (IS_ERR(ptr: rq[0])) { |
230 | err = PTR_ERR(ptr: rq[0]); |
231 | goto err_ce; |
232 | } |
233 | |
234 | i915_request_get(rq: rq[0]); |
235 | i915_request_add(rq: rq[0]); |
236 | GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit); |
237 | |
238 | if (!igt_wait_for_spinner(spin: &spin, rq: rq[0])) { |
239 | i915_request_put(rq: rq[0]); |
240 | goto err_ce; |
241 | } |
242 | |
243 | rq[1] = i915_request_create(ce: ce[1]); |
244 | if (IS_ERR(ptr: rq[1])) { |
245 | err = PTR_ERR(ptr: rq[1]); |
246 | i915_request_put(rq: rq[0]); |
247 | goto err_ce; |
248 | } |
249 | |
250 | if (!prio) { |
251 | /* |
252 | * Ensure we do the switch to ce[1] on completion. |
253 | * |
254 | * rq[0] is already submitted, so this should reduce |
255 | * to a no-op (a wait on a request on the same engine |
256 | * uses the submit fence, not the completion fence), |
257 | * but it will install a dependency on rq[1] for rq[0] |
258 | * that will prevent the pair being reordered by |
259 | * timeslicing. |
260 | */ |
261 | i915_request_await_dma_fence(rq: rq[1], fence: &rq[0]->fence); |
262 | } |
263 | |
264 | i915_request_get(rq: rq[1]); |
265 | i915_request_add(rq: rq[1]); |
266 | GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix); |
267 | i915_request_put(rq: rq[0]); |
268 | |
269 | if (prio) { |
270 | struct i915_sched_attr attr = { |
271 | .priority = prio, |
272 | }; |
273 | |
274 | /* Alternatively preempt the spinner with ce[1] */ |
275 | engine->sched_engine->schedule(rq[1], &attr); |
276 | } |
277 | |
278 | /* And switch back to ce[0] for good measure */ |
279 | rq[0] = i915_request_create(ce: ce[0]); |
280 | if (IS_ERR(ptr: rq[0])) { |
281 | err = PTR_ERR(ptr: rq[0]); |
282 | i915_request_put(rq: rq[1]); |
283 | goto err_ce; |
284 | } |
285 | |
286 | i915_request_await_dma_fence(rq: rq[0], fence: &rq[1]->fence); |
287 | i915_request_get(rq: rq[0]); |
288 | i915_request_add(rq: rq[0]); |
289 | GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix); |
290 | i915_request_put(rq: rq[1]); |
291 | i915_request_put(rq: rq[0]); |
292 | |
293 | err_ce: |
294 | intel_engine_flush_submission(engine); |
295 | igt_spinner_end(spin: &spin); |
296 | for (n = 0; n < ARRAY_SIZE(ce); n++) { |
297 | if (IS_ERR_OR_NULL(ptr: ce[n])) |
298 | break; |
299 | |
300 | intel_context_unpin(ce: ce[n]); |
301 | intel_context_put(ce: ce[n]); |
302 | } |
303 | |
304 | st_engine_heartbeat_enable(engine); |
305 | if (igt_live_test_end(t: &t)) |
306 | err = -EIO; |
307 | if (err) |
308 | break; |
309 | } |
310 | |
311 | igt_spinner_fini(spin: &spin); |
312 | return err; |
313 | } |
314 | |
315 | static int live_unlite_switch(void *arg) |
316 | { |
317 | return live_unlite_restore(gt: arg, prio: 0); |
318 | } |
319 | |
320 | static int live_unlite_preempt(void *arg) |
321 | { |
322 | return live_unlite_restore(gt: arg, prio: I915_PRIORITY_MAX); |
323 | } |
324 | |
325 | static int live_unlite_ring(void *arg) |
326 | { |
327 | struct intel_gt *gt = arg; |
328 | struct intel_engine_cs *engine; |
329 | struct igt_spinner spin; |
330 | enum intel_engine_id id; |
331 | int err = 0; |
332 | |
333 | /* |
334 | * Setup a preemption event that will cause almost the entire ring |
335 | * to be unwound, potentially fooling our intel_ring_direction() |
336 | * into emitting a forward lite-restore instead of the rollback. |
337 | */ |
338 | |
339 | if (igt_spinner_init(spin: &spin, gt)) |
340 | return -ENOMEM; |
341 | |
342 | for_each_engine(engine, gt, id) { |
343 | struct intel_context *ce[2] = {}; |
344 | struct i915_request *rq; |
345 | struct igt_live_test t; |
346 | int n; |
347 | |
348 | if (!intel_engine_has_preemption(engine)) |
349 | continue; |
350 | |
351 | if (!intel_engine_can_store_dword(engine)) |
352 | continue; |
353 | |
354 | if (igt_live_test_begin(t: &t, i915: gt->i915, func: __func__, name: engine->name)) { |
355 | err = -EIO; |
356 | break; |
357 | } |
358 | st_engine_heartbeat_disable(engine); |
359 | |
360 | for (n = 0; n < ARRAY_SIZE(ce); n++) { |
361 | struct intel_context *tmp; |
362 | |
363 | tmp = intel_context_create(engine); |
364 | if (IS_ERR(ptr: tmp)) { |
365 | err = PTR_ERR(ptr: tmp); |
366 | goto err_ce; |
367 | } |
368 | |
369 | err = intel_context_pin(ce: tmp); |
370 | if (err) { |
371 | intel_context_put(ce: tmp); |
372 | goto err_ce; |
373 | } |
374 | |
375 | memset32(s: tmp->ring->vaddr, |
376 | v: 0xdeadbeef, /* trigger a hang if executed */ |
377 | n: tmp->ring->vma->size / sizeof(u32)); |
378 | |
379 | ce[n] = tmp; |
380 | } |
381 | |
382 | /* Create max prio spinner, followed by N low prio nops */ |
383 | rq = igt_spinner_create_request(spin: &spin, ce: ce[0], MI_ARB_CHECK); |
384 | if (IS_ERR(ptr: rq)) { |
385 | err = PTR_ERR(ptr: rq); |
386 | goto err_ce; |
387 | } |
388 | |
389 | i915_request_get(rq); |
390 | rq->sched.attr.priority = I915_PRIORITY_BARRIER; |
391 | i915_request_add(rq); |
392 | |
393 | if (!igt_wait_for_spinner(spin: &spin, rq)) { |
394 | intel_gt_set_wedged(gt); |
395 | i915_request_put(rq); |
396 | err = -ETIME; |
397 | goto err_ce; |
398 | } |
399 | |
400 | /* Fill the ring, until we will cause a wrap */ |
401 | n = 0; |
402 | while (intel_ring_direction(ring: ce[0]->ring, |
403 | next: rq->wa_tail, |
404 | prev: ce[0]->ring->tail) <= 0) { |
405 | struct i915_request *tmp; |
406 | |
407 | tmp = intel_context_create_request(ce: ce[0]); |
408 | if (IS_ERR(ptr: tmp)) { |
409 | err = PTR_ERR(ptr: tmp); |
410 | i915_request_put(rq); |
411 | goto err_ce; |
412 | } |
413 | |
414 | i915_request_add(rq: tmp); |
415 | intel_engine_flush_submission(engine); |
416 | n++; |
417 | } |
418 | intel_engine_flush_submission(engine); |
419 | pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n" , |
420 | engine->name, n, |
421 | ce[0]->ring->size, |
422 | ce[0]->ring->tail, |
423 | ce[0]->ring->emit, |
424 | rq->tail); |
425 | GEM_BUG_ON(intel_ring_direction(ce[0]->ring, |
426 | rq->tail, |
427 | ce[0]->ring->tail) <= 0); |
428 | i915_request_put(rq); |
429 | |
430 | /* Create a second ring to preempt the first ring after rq[0] */ |
431 | rq = intel_context_create_request(ce: ce[1]); |
432 | if (IS_ERR(ptr: rq)) { |
433 | err = PTR_ERR(ptr: rq); |
434 | goto err_ce; |
435 | } |
436 | |
437 | rq->sched.attr.priority = I915_PRIORITY_BARRIER; |
438 | i915_request_get(rq); |
439 | i915_request_add(rq); |
440 | |
441 | err = wait_for_submit(engine, rq, HZ / 2); |
442 | i915_request_put(rq); |
443 | if (err) { |
444 | pr_err("%s: preemption request was not submitted\n" , |
445 | engine->name); |
446 | err = -ETIME; |
447 | } |
448 | |
449 | pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n" , |
450 | engine->name, |
451 | ce[0]->ring->tail, ce[0]->ring->emit, |
452 | ce[1]->ring->tail, ce[1]->ring->emit); |
453 | |
454 | err_ce: |
455 | intel_engine_flush_submission(engine); |
456 | igt_spinner_end(spin: &spin); |
457 | for (n = 0; n < ARRAY_SIZE(ce); n++) { |
458 | if (IS_ERR_OR_NULL(ptr: ce[n])) |
459 | break; |
460 | |
461 | intel_context_unpin(ce: ce[n]); |
462 | intel_context_put(ce: ce[n]); |
463 | } |
464 | st_engine_heartbeat_enable(engine); |
465 | if (igt_live_test_end(t: &t)) |
466 | err = -EIO; |
467 | if (err) |
468 | break; |
469 | } |
470 | |
471 | igt_spinner_fini(spin: &spin); |
472 | return err; |
473 | } |
474 | |
475 | static int live_pin_rewind(void *arg) |
476 | { |
477 | struct intel_gt *gt = arg; |
478 | struct intel_engine_cs *engine; |
479 | enum intel_engine_id id; |
480 | int err = 0; |
481 | |
482 | /* |
483 | * We have to be careful not to trust intel_ring too much, for example |
484 | * ring->head is updated upon retire which is out of sync with pinning |
485 | * the context. Thus we cannot use ring->head to set CTX_RING_HEAD, |
486 | * or else we risk writing an older, stale value. |
487 | * |
488 | * To simulate this, let's apply a bit of deliberate sabotague. |
489 | */ |
490 | |
491 | for_each_engine(engine, gt, id) { |
492 | struct intel_context *ce; |
493 | struct i915_request *rq; |
494 | struct intel_ring *ring; |
495 | struct igt_live_test t; |
496 | |
497 | if (igt_live_test_begin(t: &t, i915: gt->i915, func: __func__, name: engine->name)) { |
498 | err = -EIO; |
499 | break; |
500 | } |
501 | |
502 | ce = intel_context_create(engine); |
503 | if (IS_ERR(ptr: ce)) { |
504 | err = PTR_ERR(ptr: ce); |
505 | break; |
506 | } |
507 | |
508 | err = intel_context_pin(ce); |
509 | if (err) { |
510 | intel_context_put(ce); |
511 | break; |
512 | } |
513 | |
514 | /* Keep the context awake while we play games */ |
515 | err = i915_active_acquire(ref: &ce->active); |
516 | if (err) { |
517 | intel_context_unpin(ce); |
518 | intel_context_put(ce); |
519 | break; |
520 | } |
521 | ring = ce->ring; |
522 | |
523 | /* Poison the ring, and offset the next request from HEAD */ |
524 | memset32(s: ring->vaddr, STACK_MAGIC, n: ring->size / sizeof(u32)); |
525 | ring->emit = ring->size / 2; |
526 | ring->tail = ring->emit; |
527 | GEM_BUG_ON(ring->head); |
528 | |
529 | intel_context_unpin(ce); |
530 | |
531 | /* Submit a simple nop request */ |
532 | GEM_BUG_ON(intel_context_is_pinned(ce)); |
533 | rq = intel_context_create_request(ce); |
534 | i915_active_release(ref: &ce->active); /* e.g. async retire */ |
535 | intel_context_put(ce); |
536 | if (IS_ERR(ptr: rq)) { |
537 | err = PTR_ERR(ptr: rq); |
538 | break; |
539 | } |
540 | GEM_BUG_ON(!rq->head); |
541 | i915_request_add(rq); |
542 | |
543 | /* Expect not to hang! */ |
544 | if (igt_live_test_end(t: &t)) { |
545 | err = -EIO; |
546 | break; |
547 | } |
548 | } |
549 | |
550 | return err; |
551 | } |
552 | |
553 | static int engine_lock_reset_tasklet(struct intel_engine_cs *engine) |
554 | { |
555 | tasklet_disable(t: &engine->sched_engine->tasklet); |
556 | local_bh_disable(); |
557 | |
558 | if (test_and_set_bit(I915_RESET_ENGINE + engine->id, |
559 | addr: &engine->gt->reset.flags)) { |
560 | local_bh_enable(); |
561 | tasklet_enable(t: &engine->sched_engine->tasklet); |
562 | |
563 | intel_gt_set_wedged(gt: engine->gt); |
564 | return -EBUSY; |
565 | } |
566 | |
567 | return 0; |
568 | } |
569 | |
570 | static void engine_unlock_reset_tasklet(struct intel_engine_cs *engine) |
571 | { |
572 | clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, |
573 | word: &engine->gt->reset.flags); |
574 | |
575 | local_bh_enable(); |
576 | tasklet_enable(t: &engine->sched_engine->tasklet); |
577 | } |
578 | |
579 | static int live_hold_reset(void *arg) |
580 | { |
581 | struct intel_gt *gt = arg; |
582 | struct intel_engine_cs *engine; |
583 | enum intel_engine_id id; |
584 | struct igt_spinner spin; |
585 | int err = 0; |
586 | |
587 | /* |
588 | * In order to support offline error capture for fast preempt reset, |
589 | * we need to decouple the guilty request and ensure that it and its |
590 | * descendents are not executed while the capture is in progress. |
591 | */ |
592 | |
593 | if (!intel_has_reset_engine(gt)) |
594 | return 0; |
595 | |
596 | if (igt_spinner_init(spin: &spin, gt)) |
597 | return -ENOMEM; |
598 | |
599 | for_each_engine(engine, gt, id) { |
600 | struct intel_context *ce; |
601 | struct i915_request *rq; |
602 | |
603 | ce = intel_context_create(engine); |
604 | if (IS_ERR(ptr: ce)) { |
605 | err = PTR_ERR(ptr: ce); |
606 | break; |
607 | } |
608 | |
609 | st_engine_heartbeat_disable(engine); |
610 | |
611 | rq = igt_spinner_create_request(spin: &spin, ce, MI_ARB_CHECK); |
612 | if (IS_ERR(ptr: rq)) { |
613 | err = PTR_ERR(ptr: rq); |
614 | goto out; |
615 | } |
616 | i915_request_add(rq); |
617 | |
618 | if (!igt_wait_for_spinner(spin: &spin, rq)) { |
619 | intel_gt_set_wedged(gt); |
620 | err = -ETIME; |
621 | goto out; |
622 | } |
623 | |
624 | /* We have our request executing, now remove it and reset */ |
625 | |
626 | err = engine_lock_reset_tasklet(engine); |
627 | if (err) |
628 | goto out; |
629 | |
630 | engine->sched_engine->tasklet.callback(&engine->sched_engine->tasklet); |
631 | GEM_BUG_ON(execlists_active(&engine->execlists) != rq); |
632 | |
633 | i915_request_get(rq); |
634 | execlists_hold(engine, rq); |
635 | GEM_BUG_ON(!i915_request_on_hold(rq)); |
636 | |
637 | __intel_engine_reset_bh(engine, NULL); |
638 | GEM_BUG_ON(rq->fence.error != -EIO); |
639 | |
640 | engine_unlock_reset_tasklet(engine); |
641 | |
642 | /* Check that we do not resubmit the held request */ |
643 | if (!i915_request_wait(rq, flags: 0, HZ / 5)) { |
644 | pr_err("%s: on hold request completed!\n" , |
645 | engine->name); |
646 | i915_request_put(rq); |
647 | err = -EIO; |
648 | goto out; |
649 | } |
650 | GEM_BUG_ON(!i915_request_on_hold(rq)); |
651 | |
652 | /* But is resubmitted on release */ |
653 | execlists_unhold(engine, rq); |
654 | if (i915_request_wait(rq, flags: 0, HZ / 5) < 0) { |
655 | pr_err("%s: held request did not complete!\n" , |
656 | engine->name); |
657 | intel_gt_set_wedged(gt); |
658 | err = -ETIME; |
659 | } |
660 | i915_request_put(rq); |
661 | |
662 | out: |
663 | st_engine_heartbeat_enable(engine); |
664 | intel_context_put(ce); |
665 | if (err) |
666 | break; |
667 | } |
668 | |
669 | igt_spinner_fini(spin: &spin); |
670 | return err; |
671 | } |
672 | |
673 | static const char *error_repr(int err) |
674 | { |
675 | return err ? "bad" : "good" ; |
676 | } |
677 | |
678 | static int live_error_interrupt(void *arg) |
679 | { |
680 | static const struct error_phase { |
681 | enum { GOOD = 0, BAD = -EIO } error[2]; |
682 | } phases[] = { |
683 | { { BAD, GOOD } }, |
684 | { { BAD, BAD } }, |
685 | { { BAD, GOOD } }, |
686 | { { GOOD, GOOD } }, /* sentinel */ |
687 | }; |
688 | struct intel_gt *gt = arg; |
689 | struct intel_engine_cs *engine; |
690 | enum intel_engine_id id; |
691 | |
692 | /* |
693 | * We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning |
694 | * of invalid commands in user batches that will cause a GPU hang. |
695 | * This is a faster mechanism than using hangcheck/heartbeats, but |
696 | * only detects problems the HW knows about -- it will not warn when |
697 | * we kill the HW! |
698 | * |
699 | * To verify our detection and reset, we throw some invalid commands |
700 | * at the HW and wait for the interrupt. |
701 | */ |
702 | |
703 | if (!intel_has_reset_engine(gt)) |
704 | return 0; |
705 | |
706 | for_each_engine(engine, gt, id) { |
707 | const struct error_phase *p; |
708 | int err = 0; |
709 | |
710 | st_engine_heartbeat_disable(engine); |
711 | |
712 | for (p = phases; p->error[0] != GOOD; p++) { |
713 | struct i915_request *client[ARRAY_SIZE(phases->error)]; |
714 | u32 *cs; |
715 | int i; |
716 | |
717 | memset(client, 0, sizeof(*client)); |
718 | for (i = 0; i < ARRAY_SIZE(client); i++) { |
719 | struct intel_context *ce; |
720 | struct i915_request *rq; |
721 | |
722 | ce = intel_context_create(engine); |
723 | if (IS_ERR(ptr: ce)) { |
724 | err = PTR_ERR(ptr: ce); |
725 | goto out; |
726 | } |
727 | |
728 | rq = intel_context_create_request(ce); |
729 | intel_context_put(ce); |
730 | if (IS_ERR(ptr: rq)) { |
731 | err = PTR_ERR(ptr: rq); |
732 | goto out; |
733 | } |
734 | |
735 | if (rq->engine->emit_init_breadcrumb) { |
736 | err = rq->engine->emit_init_breadcrumb(rq); |
737 | if (err) { |
738 | i915_request_add(rq); |
739 | goto out; |
740 | } |
741 | } |
742 | |
743 | cs = intel_ring_begin(rq, num_dwords: 2); |
744 | if (IS_ERR(ptr: cs)) { |
745 | i915_request_add(rq); |
746 | err = PTR_ERR(ptr: cs); |
747 | goto out; |
748 | } |
749 | |
750 | if (p->error[i]) { |
751 | *cs++ = 0xdeadbeef; |
752 | *cs++ = 0xdeadbeef; |
753 | } else { |
754 | *cs++ = MI_NOOP; |
755 | *cs++ = MI_NOOP; |
756 | } |
757 | |
758 | client[i] = i915_request_get(rq); |
759 | i915_request_add(rq); |
760 | } |
761 | |
762 | err = wait_for_submit(engine, rq: client[0], HZ / 2); |
763 | if (err) { |
764 | pr_err("%s: first request did not start within time!\n" , |
765 | engine->name); |
766 | err = -ETIME; |
767 | goto out; |
768 | } |
769 | |
770 | for (i = 0; i < ARRAY_SIZE(client); i++) { |
771 | if (i915_request_wait(rq: client[i], flags: 0, HZ / 5) < 0) |
772 | pr_debug("%s: %s request incomplete!\n" , |
773 | engine->name, |
774 | error_repr(p->error[i])); |
775 | |
776 | if (!i915_request_started(rq: client[i])) { |
777 | pr_err("%s: %s request not started!\n" , |
778 | engine->name, |
779 | error_repr(p->error[i])); |
780 | err = -ETIME; |
781 | goto out; |
782 | } |
783 | |
784 | /* Kick the tasklet to process the error */ |
785 | intel_engine_flush_submission(engine); |
786 | if (client[i]->fence.error != p->error[i]) { |
787 | pr_err("%s: %s request (%s) with wrong error code: %d\n" , |
788 | engine->name, |
789 | error_repr(p->error[i]), |
790 | i915_request_completed(client[i]) ? "completed" : "running" , |
791 | client[i]->fence.error); |
792 | err = -EINVAL; |
793 | goto out; |
794 | } |
795 | } |
796 | |
797 | out: |
798 | for (i = 0; i < ARRAY_SIZE(client); i++) |
799 | if (client[i]) |
800 | i915_request_put(rq: client[i]); |
801 | if (err) { |
802 | pr_err("%s: failed at phase[%zd] { %d, %d }\n" , |
803 | engine->name, p - phases, |
804 | p->error[0], p->error[1]); |
805 | break; |
806 | } |
807 | } |
808 | |
809 | st_engine_heartbeat_enable(engine); |
810 | if (err) { |
811 | intel_gt_set_wedged(gt); |
812 | return err; |
813 | } |
814 | } |
815 | |
816 | return 0; |
817 | } |
818 | |
819 | static int |
820 | emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx) |
821 | { |
822 | u32 *cs; |
823 | |
824 | cs = intel_ring_begin(rq, num_dwords: 10); |
825 | if (IS_ERR(ptr: cs)) |
826 | return PTR_ERR(ptr: cs); |
827 | |
828 | *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; |
829 | |
830 | *cs++ = MI_SEMAPHORE_WAIT | |
831 | MI_SEMAPHORE_GLOBAL_GTT | |
832 | MI_SEMAPHORE_POLL | |
833 | MI_SEMAPHORE_SAD_NEQ_SDD; |
834 | *cs++ = 0; |
835 | *cs++ = i915_ggtt_offset(vma) + 4 * idx; |
836 | *cs++ = 0; |
837 | |
838 | if (idx > 0) { |
839 | *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; |
840 | *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1); |
841 | *cs++ = 0; |
842 | *cs++ = 1; |
843 | } else { |
844 | *cs++ = MI_NOOP; |
845 | *cs++ = MI_NOOP; |
846 | *cs++ = MI_NOOP; |
847 | *cs++ = MI_NOOP; |
848 | } |
849 | |
850 | *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; |
851 | |
852 | intel_ring_advance(rq, cs); |
853 | return 0; |
854 | } |
855 | |
856 | static struct i915_request * |
857 | semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx) |
858 | { |
859 | struct intel_context *ce; |
860 | struct i915_request *rq; |
861 | int err; |
862 | |
863 | ce = intel_context_create(engine); |
864 | if (IS_ERR(ptr: ce)) |
865 | return ERR_CAST(ptr: ce); |
866 | |
867 | rq = intel_context_create_request(ce); |
868 | if (IS_ERR(ptr: rq)) |
869 | goto out_ce; |
870 | |
871 | err = 0; |
872 | if (rq->engine->emit_init_breadcrumb) |
873 | err = rq->engine->emit_init_breadcrumb(rq); |
874 | if (err == 0) |
875 | err = emit_semaphore_chain(rq, vma, idx); |
876 | if (err == 0) |
877 | i915_request_get(rq); |
878 | i915_request_add(rq); |
879 | if (err) |
880 | rq = ERR_PTR(error: err); |
881 | |
882 | out_ce: |
883 | intel_context_put(ce); |
884 | return rq; |
885 | } |
886 | |
887 | static int |
888 | release_queue(struct intel_engine_cs *engine, |
889 | struct i915_vma *vma, |
890 | int idx, int prio) |
891 | { |
892 | struct i915_sched_attr attr = { |
893 | .priority = prio, |
894 | }; |
895 | struct i915_request *rq; |
896 | u32 *cs; |
897 | |
898 | rq = intel_engine_create_kernel_request(engine); |
899 | if (IS_ERR(ptr: rq)) |
900 | return PTR_ERR(ptr: rq); |
901 | |
902 | cs = intel_ring_begin(rq, num_dwords: 4); |
903 | if (IS_ERR(ptr: cs)) { |
904 | i915_request_add(rq); |
905 | return PTR_ERR(ptr: cs); |
906 | } |
907 | |
908 | *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; |
909 | *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1); |
910 | *cs++ = 0; |
911 | *cs++ = 1; |
912 | |
913 | intel_ring_advance(rq, cs); |
914 | |
915 | i915_request_get(rq); |
916 | i915_request_add(rq); |
917 | |
918 | local_bh_disable(); |
919 | engine->sched_engine->schedule(rq, &attr); |
920 | local_bh_enable(); /* kick tasklet */ |
921 | |
922 | i915_request_put(rq); |
923 | |
924 | return 0; |
925 | } |
926 | |
927 | static int |
928 | slice_semaphore_queue(struct intel_engine_cs *outer, |
929 | struct i915_vma *vma, |
930 | int count) |
931 | { |
932 | struct intel_engine_cs *engine; |
933 | struct i915_request *head; |
934 | enum intel_engine_id id; |
935 | int err, i, n = 0; |
936 | |
937 | head = semaphore_queue(engine: outer, vma, idx: n++); |
938 | if (IS_ERR(ptr: head)) |
939 | return PTR_ERR(ptr: head); |
940 | |
941 | for_each_engine(engine, outer->gt, id) { |
942 | if (!intel_engine_has_preemption(engine)) |
943 | continue; |
944 | |
945 | for (i = 0; i < count; i++) { |
946 | struct i915_request *rq; |
947 | |
948 | rq = semaphore_queue(engine, vma, idx: n++); |
949 | if (IS_ERR(ptr: rq)) { |
950 | err = PTR_ERR(ptr: rq); |
951 | goto out; |
952 | } |
953 | |
954 | i915_request_put(rq); |
955 | } |
956 | } |
957 | |
958 | err = release_queue(engine: outer, vma, idx: n, I915_PRIORITY_BARRIER); |
959 | if (err) |
960 | goto out; |
961 | |
962 | if (i915_request_wait(rq: head, flags: 0, |
963 | timeout: 2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) { |
964 | pr_err("%s: Failed to slice along semaphore chain of length (%d, %d)!\n" , |
965 | outer->name, count, n); |
966 | GEM_TRACE_DUMP(); |
967 | intel_gt_set_wedged(gt: outer->gt); |
968 | err = -EIO; |
969 | } |
970 | |
971 | out: |
972 | i915_request_put(rq: head); |
973 | return err; |
974 | } |
975 | |
976 | static int live_timeslice_preempt(void *arg) |
977 | { |
978 | struct intel_gt *gt = arg; |
979 | struct drm_i915_gem_object *obj; |
980 | struct intel_engine_cs *engine; |
981 | enum intel_engine_id id; |
982 | struct i915_vma *vma; |
983 | void *vaddr; |
984 | int err = 0; |
985 | |
986 | /* |
987 | * If a request takes too long, we would like to give other users |
988 | * a fair go on the GPU. In particular, users may create batches |
989 | * that wait upon external input, where that input may even be |
990 | * supplied by another GPU job. To avoid blocking forever, we |
991 | * need to preempt the current task and replace it with another |
992 | * ready task. |
993 | */ |
994 | if (!CONFIG_DRM_I915_TIMESLICE_DURATION) |
995 | return 0; |
996 | |
997 | obj = i915_gem_object_create_internal(i915: gt->i915, PAGE_SIZE); |
998 | if (IS_ERR(ptr: obj)) |
999 | return PTR_ERR(ptr: obj); |
1000 | |
1001 | vma = i915_vma_instance(obj, vm: >->ggtt->vm, NULL); |
1002 | if (IS_ERR(ptr: vma)) { |
1003 | err = PTR_ERR(ptr: vma); |
1004 | goto err_obj; |
1005 | } |
1006 | |
1007 | vaddr = i915_gem_object_pin_map_unlocked(obj, type: I915_MAP_WC); |
1008 | if (IS_ERR(ptr: vaddr)) { |
1009 | err = PTR_ERR(ptr: vaddr); |
1010 | goto err_obj; |
1011 | } |
1012 | |
1013 | err = i915_vma_pin(vma, size: 0, alignment: 0, PIN_GLOBAL); |
1014 | if (err) |
1015 | goto err_map; |
1016 | |
1017 | err = i915_vma_sync(vma); |
1018 | if (err) |
1019 | goto err_pin; |
1020 | |
1021 | for_each_engine(engine, gt, id) { |
1022 | if (!intel_engine_has_preemption(engine)) |
1023 | continue; |
1024 | |
1025 | memset(vaddr, 0, PAGE_SIZE); |
1026 | |
1027 | st_engine_heartbeat_disable(engine); |
1028 | err = slice_semaphore_queue(outer: engine, vma, count: 5); |
1029 | st_engine_heartbeat_enable(engine); |
1030 | if (err) |
1031 | goto err_pin; |
1032 | |
1033 | if (igt_flush_test(i915: gt->i915)) { |
1034 | err = -EIO; |
1035 | goto err_pin; |
1036 | } |
1037 | } |
1038 | |
1039 | err_pin: |
1040 | i915_vma_unpin(vma); |
1041 | err_map: |
1042 | i915_gem_object_unpin_map(obj); |
1043 | err_obj: |
1044 | i915_gem_object_put(obj); |
1045 | return err; |
1046 | } |
1047 | |
1048 | static struct i915_request * |
1049 | create_rewinder(struct intel_context *ce, |
1050 | struct i915_request *wait, |
1051 | void *slot, int idx) |
1052 | { |
1053 | const u32 offset = |
1054 | i915_ggtt_offset(vma: ce->engine->status_page.vma) + |
1055 | offset_in_page(slot); |
1056 | struct i915_request *rq; |
1057 | u32 *cs; |
1058 | int err; |
1059 | |
1060 | rq = intel_context_create_request(ce); |
1061 | if (IS_ERR(ptr: rq)) |
1062 | return rq; |
1063 | |
1064 | if (wait) { |
1065 | err = i915_request_await_dma_fence(rq, fence: &wait->fence); |
1066 | if (err) |
1067 | goto err; |
1068 | } |
1069 | |
1070 | cs = intel_ring_begin(rq, num_dwords: 14); |
1071 | if (IS_ERR(ptr: cs)) { |
1072 | err = PTR_ERR(ptr: cs); |
1073 | goto err; |
1074 | } |
1075 | |
1076 | *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; |
1077 | *cs++ = MI_NOOP; |
1078 | |
1079 | *cs++ = MI_SEMAPHORE_WAIT | |
1080 | MI_SEMAPHORE_GLOBAL_GTT | |
1081 | MI_SEMAPHORE_POLL | |
1082 | MI_SEMAPHORE_SAD_GTE_SDD; |
1083 | *cs++ = idx; |
1084 | *cs++ = offset; |
1085 | *cs++ = 0; |
1086 | |
1087 | *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; |
1088 | *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base)); |
1089 | *cs++ = offset + idx * sizeof(u32); |
1090 | *cs++ = 0; |
1091 | |
1092 | *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; |
1093 | *cs++ = offset; |
1094 | *cs++ = 0; |
1095 | *cs++ = idx + 1; |
1096 | |
1097 | intel_ring_advance(rq, cs); |
1098 | |
1099 | err = 0; |
1100 | err: |
1101 | i915_request_get(rq); |
1102 | i915_request_add(rq); |
1103 | if (err) { |
1104 | i915_request_put(rq); |
1105 | return ERR_PTR(error: err); |
1106 | } |
1107 | |
1108 | return rq; |
1109 | } |
1110 | |
1111 | static int live_timeslice_rewind(void *arg) |
1112 | { |
1113 | struct intel_gt *gt = arg; |
1114 | struct intel_engine_cs *engine; |
1115 | enum intel_engine_id id; |
1116 | |
1117 | /* |
1118 | * The usual presumption on timeslice expiration is that we replace |
1119 | * the active context with another. However, given a chain of |
1120 | * dependencies we may end up with replacing the context with itself, |
1121 | * but only a few of those requests, forcing us to rewind the |
1122 | * RING_TAIL of the original request. |
1123 | */ |
1124 | if (!CONFIG_DRM_I915_TIMESLICE_DURATION) |
1125 | return 0; |
1126 | |
1127 | for_each_engine(engine, gt, id) { |
1128 | enum { A1, A2, B1 }; |
1129 | enum { X = 1, Z, Y }; |
1130 | struct i915_request *rq[3] = {}; |
1131 | struct intel_context *ce; |
1132 | unsigned long timeslice; |
1133 | int i, err = 0; |
1134 | u32 *slot; |
1135 | |
1136 | if (!intel_engine_has_timeslices(engine)) |
1137 | continue; |
1138 | |
1139 | /* |
1140 | * A:rq1 -- semaphore wait, timestamp X |
1141 | * A:rq2 -- write timestamp Y |
1142 | * |
1143 | * B:rq1 [await A:rq1] -- write timestamp Z |
1144 | * |
1145 | * Force timeslice, release semaphore. |
1146 | * |
1147 | * Expect execution/evaluation order XZY |
1148 | */ |
1149 | |
1150 | st_engine_heartbeat_disable(engine); |
1151 | timeslice = xchg(&engine->props.timeslice_duration_ms, 1); |
1152 | |
1153 | slot = memset32(s: engine->status_page.addr + 1000, v: 0, n: 4); |
1154 | |
1155 | ce = intel_context_create(engine); |
1156 | if (IS_ERR(ptr: ce)) { |
1157 | err = PTR_ERR(ptr: ce); |
1158 | goto err; |
1159 | } |
1160 | |
1161 | rq[A1] = create_rewinder(ce, NULL, slot, idx: X); |
1162 | if (IS_ERR(ptr: rq[A1])) { |
1163 | intel_context_put(ce); |
1164 | goto err; |
1165 | } |
1166 | |
1167 | rq[A2] = create_rewinder(ce, NULL, slot, idx: Y); |
1168 | intel_context_put(ce); |
1169 | if (IS_ERR(ptr: rq[A2])) |
1170 | goto err; |
1171 | |
1172 | err = wait_for_submit(engine, rq: rq[A2], HZ / 2); |
1173 | if (err) { |
1174 | pr_err("%s: failed to submit first context\n" , |
1175 | engine->name); |
1176 | goto err; |
1177 | } |
1178 | |
1179 | ce = intel_context_create(engine); |
1180 | if (IS_ERR(ptr: ce)) { |
1181 | err = PTR_ERR(ptr: ce); |
1182 | goto err; |
1183 | } |
1184 | |
1185 | rq[B1] = create_rewinder(ce, wait: rq[A1], slot, idx: Z); |
1186 | intel_context_put(ce); |
1187 | if (IS_ERR(ptr: rq[2])) |
1188 | goto err; |
1189 | |
1190 | err = wait_for_submit(engine, rq: rq[B1], HZ / 2); |
1191 | if (err) { |
1192 | pr_err("%s: failed to submit second context\n" , |
1193 | engine->name); |
1194 | goto err; |
1195 | } |
1196 | |
1197 | /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */ |
1198 | ENGINE_TRACE(engine, "forcing tasklet for rewind\n" ); |
1199 | while (i915_request_is_active(rq: rq[A2])) { /* semaphore yield! */ |
1200 | /* Wait for the timeslice to kick in */ |
1201 | del_timer(timer: &engine->execlists.timer); |
1202 | tasklet_hi_schedule(t: &engine->sched_engine->tasklet); |
1203 | intel_engine_flush_submission(engine); |
1204 | } |
1205 | /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */ |
1206 | GEM_BUG_ON(!i915_request_is_active(rq[A1])); |
1207 | GEM_BUG_ON(!i915_request_is_active(rq[B1])); |
1208 | GEM_BUG_ON(i915_request_is_active(rq[A2])); |
1209 | |
1210 | /* Release the hounds! */ |
1211 | slot[0] = 1; |
1212 | wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */ |
1213 | |
1214 | for (i = 1; i <= 3; i++) { |
1215 | unsigned long timeout = jiffies + HZ / 2; |
1216 | |
1217 | while (!READ_ONCE(slot[i]) && |
1218 | time_before(jiffies, timeout)) |
1219 | ; |
1220 | |
1221 | if (!time_before(jiffies, timeout)) { |
1222 | pr_err("%s: rq[%d] timed out\n" , |
1223 | engine->name, i - 1); |
1224 | err = -ETIME; |
1225 | goto err; |
1226 | } |
1227 | |
1228 | pr_debug("%s: slot[%d]:%x\n" , engine->name, i, slot[i]); |
1229 | } |
1230 | |
1231 | /* XZY: XZ < XY */ |
1232 | if (slot[Z] - slot[X] >= slot[Y] - slot[X]) { |
1233 | pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n" , |
1234 | engine->name, |
1235 | slot[Z] - slot[X], |
1236 | slot[Y] - slot[X]); |
1237 | err = -EINVAL; |
1238 | } |
1239 | |
1240 | err: |
1241 | memset32(s: &slot[0], v: -1, n: 4); |
1242 | wmb(); |
1243 | |
1244 | engine->props.timeslice_duration_ms = timeslice; |
1245 | st_engine_heartbeat_enable(engine); |
1246 | for (i = 0; i < 3; i++) |
1247 | i915_request_put(rq: rq[i]); |
1248 | if (igt_flush_test(i915: gt->i915)) |
1249 | err = -EIO; |
1250 | if (err) |
1251 | return err; |
1252 | } |
1253 | |
1254 | return 0; |
1255 | } |
1256 | |
1257 | static struct i915_request *nop_request(struct intel_engine_cs *engine) |
1258 | { |
1259 | struct i915_request *rq; |
1260 | |
1261 | rq = intel_engine_create_kernel_request(engine); |
1262 | if (IS_ERR(ptr: rq)) |
1263 | return rq; |
1264 | |
1265 | i915_request_get(rq); |
1266 | i915_request_add(rq); |
1267 | |
1268 | return rq; |
1269 | } |
1270 | |
1271 | static long slice_timeout(struct intel_engine_cs *engine) |
1272 | { |
1273 | long timeout; |
1274 | |
1275 | /* Enough time for a timeslice to kick in, and kick out */ |
1276 | timeout = 2 * msecs_to_jiffies_timeout(m: timeslice(engine)); |
1277 | |
1278 | /* Enough time for the nop request to complete */ |
1279 | timeout += HZ / 5; |
1280 | |
1281 | return timeout + 1; |
1282 | } |
1283 | |
1284 | static int live_timeslice_queue(void *arg) |
1285 | { |
1286 | struct intel_gt *gt = arg; |
1287 | struct drm_i915_gem_object *obj; |
1288 | struct intel_engine_cs *engine; |
1289 | enum intel_engine_id id; |
1290 | struct i915_vma *vma; |
1291 | void *vaddr; |
1292 | int err = 0; |
1293 | |
1294 | /* |
1295 | * Make sure that even if ELSP[0] and ELSP[1] are filled with |
1296 | * timeslicing between them disabled, we *do* enable timeslicing |
1297 | * if the queue demands it. (Normally, we do not submit if |
1298 | * ELSP[1] is already occupied, so must rely on timeslicing to |
1299 | * eject ELSP[0] in favour of the queue.) |
1300 | */ |
1301 | if (!CONFIG_DRM_I915_TIMESLICE_DURATION) |
1302 | return 0; |
1303 | |
1304 | obj = i915_gem_object_create_internal(i915: gt->i915, PAGE_SIZE); |
1305 | if (IS_ERR(ptr: obj)) |
1306 | return PTR_ERR(ptr: obj); |
1307 | |
1308 | vma = i915_vma_instance(obj, vm: >->ggtt->vm, NULL); |
1309 | if (IS_ERR(ptr: vma)) { |
1310 | err = PTR_ERR(ptr: vma); |
1311 | goto err_obj; |
1312 | } |
1313 | |
1314 | vaddr = i915_gem_object_pin_map_unlocked(obj, type: I915_MAP_WC); |
1315 | if (IS_ERR(ptr: vaddr)) { |
1316 | err = PTR_ERR(ptr: vaddr); |
1317 | goto err_obj; |
1318 | } |
1319 | |
1320 | err = i915_vma_pin(vma, size: 0, alignment: 0, PIN_GLOBAL); |
1321 | if (err) |
1322 | goto err_map; |
1323 | |
1324 | err = i915_vma_sync(vma); |
1325 | if (err) |
1326 | goto err_pin; |
1327 | |
1328 | for_each_engine(engine, gt, id) { |
1329 | struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX }; |
1330 | struct i915_request *rq, *nop; |
1331 | |
1332 | if (!intel_engine_has_preemption(engine)) |
1333 | continue; |
1334 | |
1335 | st_engine_heartbeat_disable(engine); |
1336 | memset(vaddr, 0, PAGE_SIZE); |
1337 | |
1338 | /* ELSP[0]: semaphore wait */ |
1339 | rq = semaphore_queue(engine, vma, idx: 0); |
1340 | if (IS_ERR(ptr: rq)) { |
1341 | err = PTR_ERR(ptr: rq); |
1342 | goto err_heartbeat; |
1343 | } |
1344 | engine->sched_engine->schedule(rq, &attr); |
1345 | err = wait_for_submit(engine, rq, HZ / 2); |
1346 | if (err) { |
1347 | pr_err("%s: Timed out trying to submit semaphores\n" , |
1348 | engine->name); |
1349 | goto err_rq; |
1350 | } |
1351 | |
1352 | /* ELSP[1]: nop request */ |
1353 | nop = nop_request(engine); |
1354 | if (IS_ERR(ptr: nop)) { |
1355 | err = PTR_ERR(ptr: nop); |
1356 | goto err_rq; |
1357 | } |
1358 | err = wait_for_submit(engine, rq: nop, HZ / 2); |
1359 | i915_request_put(rq: nop); |
1360 | if (err) { |
1361 | pr_err("%s: Timed out trying to submit nop\n" , |
1362 | engine->name); |
1363 | goto err_rq; |
1364 | } |
1365 | |
1366 | GEM_BUG_ON(i915_request_completed(rq)); |
1367 | GEM_BUG_ON(execlists_active(&engine->execlists) != rq); |
1368 | |
1369 | /* Queue: semaphore signal, matching priority as semaphore */ |
1370 | err = release_queue(engine, vma, idx: 1, prio: effective_prio(rq)); |
1371 | if (err) |
1372 | goto err_rq; |
1373 | |
1374 | /* Wait until we ack the release_queue and start timeslicing */ |
1375 | do { |
1376 | cond_resched(); |
1377 | intel_engine_flush_submission(engine); |
1378 | } while (READ_ONCE(engine->execlists.pending[0])); |
1379 | |
1380 | /* Timeslice every jiffy, so within 2 we should signal */ |
1381 | if (i915_request_wait(rq, flags: 0, timeout: slice_timeout(engine)) < 0) { |
1382 | struct drm_printer p = |
1383 | drm_info_printer(dev: gt->i915->drm.dev); |
1384 | |
1385 | pr_err("%s: Failed to timeslice into queue\n" , |
1386 | engine->name); |
1387 | intel_engine_dump(engine, m: &p, |
1388 | header: "%s\n" , engine->name); |
1389 | |
1390 | memset(vaddr, 0xff, PAGE_SIZE); |
1391 | err = -EIO; |
1392 | } |
1393 | err_rq: |
1394 | i915_request_put(rq); |
1395 | err_heartbeat: |
1396 | st_engine_heartbeat_enable(engine); |
1397 | if (err) |
1398 | break; |
1399 | } |
1400 | |
1401 | err_pin: |
1402 | i915_vma_unpin(vma); |
1403 | err_map: |
1404 | i915_gem_object_unpin_map(obj); |
1405 | err_obj: |
1406 | i915_gem_object_put(obj); |
1407 | return err; |
1408 | } |
1409 | |
1410 | static int live_timeslice_nopreempt(void *arg) |
1411 | { |
1412 | struct intel_gt *gt = arg; |
1413 | struct intel_engine_cs *engine; |
1414 | enum intel_engine_id id; |
1415 | struct igt_spinner spin; |
1416 | int err = 0; |
1417 | |
1418 | /* |
1419 | * We should not timeslice into a request that is marked with |
1420 | * I915_REQUEST_NOPREEMPT. |
1421 | */ |
1422 | if (!CONFIG_DRM_I915_TIMESLICE_DURATION) |
1423 | return 0; |
1424 | |
1425 | if (igt_spinner_init(spin: &spin, gt)) |
1426 | return -ENOMEM; |
1427 | |
1428 | for_each_engine(engine, gt, id) { |
1429 | struct intel_context *ce; |
1430 | struct i915_request *rq; |
1431 | unsigned long timeslice; |
1432 | |
1433 | if (!intel_engine_has_preemption(engine)) |
1434 | continue; |
1435 | |
1436 | ce = intel_context_create(engine); |
1437 | if (IS_ERR(ptr: ce)) { |
1438 | err = PTR_ERR(ptr: ce); |
1439 | break; |
1440 | } |
1441 | |
1442 | st_engine_heartbeat_disable(engine); |
1443 | timeslice = xchg(&engine->props.timeslice_duration_ms, 1); |
1444 | |
1445 | /* Create an unpreemptible spinner */ |
1446 | |
1447 | rq = igt_spinner_create_request(spin: &spin, ce, MI_ARB_CHECK); |
1448 | intel_context_put(ce); |
1449 | if (IS_ERR(ptr: rq)) { |
1450 | err = PTR_ERR(ptr: rq); |
1451 | goto out_heartbeat; |
1452 | } |
1453 | |
1454 | i915_request_get(rq); |
1455 | i915_request_add(rq); |
1456 | |
1457 | if (!igt_wait_for_spinner(spin: &spin, rq)) { |
1458 | i915_request_put(rq); |
1459 | err = -ETIME; |
1460 | goto out_spin; |
1461 | } |
1462 | |
1463 | set_bit(nr: I915_FENCE_FLAG_NOPREEMPT, addr: &rq->fence.flags); |
1464 | i915_request_put(rq); |
1465 | |
1466 | /* Followed by a maximum priority barrier (heartbeat) */ |
1467 | |
1468 | ce = intel_context_create(engine); |
1469 | if (IS_ERR(ptr: ce)) { |
1470 | err = PTR_ERR(ptr: ce); |
1471 | goto out_spin; |
1472 | } |
1473 | |
1474 | rq = intel_context_create_request(ce); |
1475 | intel_context_put(ce); |
1476 | if (IS_ERR(ptr: rq)) { |
1477 | err = PTR_ERR(ptr: rq); |
1478 | goto out_spin; |
1479 | } |
1480 | |
1481 | rq->sched.attr.priority = I915_PRIORITY_BARRIER; |
1482 | i915_request_get(rq); |
1483 | i915_request_add(rq); |
1484 | |
1485 | /* |
1486 | * Wait until the barrier is in ELSP, and we know timeslicing |
1487 | * will have been activated. |
1488 | */ |
1489 | if (wait_for_submit(engine, rq, HZ / 2)) { |
1490 | i915_request_put(rq); |
1491 | err = -ETIME; |
1492 | goto out_spin; |
1493 | } |
1494 | |
1495 | /* |
1496 | * Since the ELSP[0] request is unpreemptible, it should not |
1497 | * allow the maximum priority barrier through. Wait long |
1498 | * enough to see if it is timesliced in by mistake. |
1499 | */ |
1500 | if (i915_request_wait(rq, flags: 0, timeout: slice_timeout(engine)) >= 0) { |
1501 | pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n" , |
1502 | engine->name); |
1503 | err = -EINVAL; |
1504 | } |
1505 | i915_request_put(rq); |
1506 | |
1507 | out_spin: |
1508 | igt_spinner_end(spin: &spin); |
1509 | out_heartbeat: |
1510 | xchg(&engine->props.timeslice_duration_ms, timeslice); |
1511 | st_engine_heartbeat_enable(engine); |
1512 | if (err) |
1513 | break; |
1514 | |
1515 | if (igt_flush_test(i915: gt->i915)) { |
1516 | err = -EIO; |
1517 | break; |
1518 | } |
1519 | } |
1520 | |
1521 | igt_spinner_fini(spin: &spin); |
1522 | return err; |
1523 | } |
1524 | |
1525 | static int live_busywait_preempt(void *arg) |
1526 | { |
1527 | struct intel_gt *gt = arg; |
1528 | struct i915_gem_context *ctx_hi, *ctx_lo; |
1529 | struct intel_engine_cs *engine; |
1530 | struct drm_i915_gem_object *obj; |
1531 | struct i915_vma *vma; |
1532 | enum intel_engine_id id; |
1533 | u32 *map; |
1534 | int err; |
1535 | |
1536 | /* |
1537 | * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can |
1538 | * preempt the busywaits used to synchronise between rings. |
1539 | */ |
1540 | |
1541 | ctx_hi = kernel_context(i915: gt->i915, NULL); |
1542 | if (IS_ERR(ptr: ctx_hi)) |
1543 | return PTR_ERR(ptr: ctx_hi); |
1544 | |
1545 | ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; |
1546 | |
1547 | ctx_lo = kernel_context(i915: gt->i915, NULL); |
1548 | if (IS_ERR(ptr: ctx_lo)) { |
1549 | err = PTR_ERR(ptr: ctx_lo); |
1550 | goto err_ctx_hi; |
1551 | } |
1552 | |
1553 | ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; |
1554 | |
1555 | obj = i915_gem_object_create_internal(i915: gt->i915, PAGE_SIZE); |
1556 | if (IS_ERR(ptr: obj)) { |
1557 | err = PTR_ERR(ptr: obj); |
1558 | goto err_ctx_lo; |
1559 | } |
1560 | |
1561 | map = i915_gem_object_pin_map_unlocked(obj, type: I915_MAP_WC); |
1562 | if (IS_ERR(ptr: map)) { |
1563 | err = PTR_ERR(ptr: map); |
1564 | goto err_obj; |
1565 | } |
1566 | |
1567 | vma = i915_vma_instance(obj, vm: >->ggtt->vm, NULL); |
1568 | if (IS_ERR(ptr: vma)) { |
1569 | err = PTR_ERR(ptr: vma); |
1570 | goto err_map; |
1571 | } |
1572 | |
1573 | err = i915_vma_pin(vma, size: 0, alignment: 0, PIN_GLOBAL); |
1574 | if (err) |
1575 | goto err_map; |
1576 | |
1577 | err = i915_vma_sync(vma); |
1578 | if (err) |
1579 | goto err_vma; |
1580 | |
1581 | for_each_engine(engine, gt, id) { |
1582 | struct i915_request *lo, *hi; |
1583 | struct igt_live_test t; |
1584 | u32 *cs; |
1585 | |
1586 | if (!intel_engine_has_preemption(engine)) |
1587 | continue; |
1588 | |
1589 | if (!intel_engine_can_store_dword(engine)) |
1590 | continue; |
1591 | |
1592 | if (igt_live_test_begin(t: &t, i915: gt->i915, func: __func__, name: engine->name)) { |
1593 | err = -EIO; |
1594 | goto err_vma; |
1595 | } |
1596 | |
1597 | /* |
1598 | * We create two requests. The low priority request |
1599 | * busywaits on a semaphore (inside the ringbuffer where |
1600 | * is should be preemptible) and the high priority requests |
1601 | * uses a MI_STORE_DWORD_IMM to update the semaphore value |
1602 | * allowing the first request to complete. If preemption |
1603 | * fails, we hang instead. |
1604 | */ |
1605 | |
1606 | lo = igt_request_alloc(ctx: ctx_lo, engine); |
1607 | if (IS_ERR(ptr: lo)) { |
1608 | err = PTR_ERR(ptr: lo); |
1609 | goto err_vma; |
1610 | } |
1611 | |
1612 | cs = intel_ring_begin(rq: lo, num_dwords: 8); |
1613 | if (IS_ERR(ptr: cs)) { |
1614 | err = PTR_ERR(ptr: cs); |
1615 | i915_request_add(rq: lo); |
1616 | goto err_vma; |
1617 | } |
1618 | |
1619 | *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; |
1620 | *cs++ = i915_ggtt_offset(vma); |
1621 | *cs++ = 0; |
1622 | *cs++ = 1; |
1623 | |
1624 | /* XXX Do we need a flush + invalidate here? */ |
1625 | |
1626 | *cs++ = MI_SEMAPHORE_WAIT | |
1627 | MI_SEMAPHORE_GLOBAL_GTT | |
1628 | MI_SEMAPHORE_POLL | |
1629 | MI_SEMAPHORE_SAD_EQ_SDD; |
1630 | *cs++ = 0; |
1631 | *cs++ = i915_ggtt_offset(vma); |
1632 | *cs++ = 0; |
1633 | |
1634 | intel_ring_advance(rq: lo, cs); |
1635 | |
1636 | i915_request_get(rq: lo); |
1637 | i915_request_add(rq: lo); |
1638 | |
1639 | if (wait_for(READ_ONCE(*map), 10)) { |
1640 | i915_request_put(rq: lo); |
1641 | err = -ETIMEDOUT; |
1642 | goto err_vma; |
1643 | } |
1644 | |
1645 | /* Low priority request should be busywaiting now */ |
1646 | if (i915_request_wait(rq: lo, flags: 0, timeout: 1) != -ETIME) { |
1647 | i915_request_put(rq: lo); |
1648 | pr_err("%s: Busywaiting request did not!\n" , |
1649 | engine->name); |
1650 | err = -EIO; |
1651 | goto err_vma; |
1652 | } |
1653 | |
1654 | hi = igt_request_alloc(ctx: ctx_hi, engine); |
1655 | if (IS_ERR(ptr: hi)) { |
1656 | err = PTR_ERR(ptr: hi); |
1657 | i915_request_put(rq: lo); |
1658 | goto err_vma; |
1659 | } |
1660 | |
1661 | cs = intel_ring_begin(rq: hi, num_dwords: 4); |
1662 | if (IS_ERR(ptr: cs)) { |
1663 | err = PTR_ERR(ptr: cs); |
1664 | i915_request_add(rq: hi); |
1665 | i915_request_put(rq: lo); |
1666 | goto err_vma; |
1667 | } |
1668 | |
1669 | *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; |
1670 | *cs++ = i915_ggtt_offset(vma); |
1671 | *cs++ = 0; |
1672 | *cs++ = 0; |
1673 | |
1674 | intel_ring_advance(rq: hi, cs); |
1675 | i915_request_add(rq: hi); |
1676 | |
1677 | if (i915_request_wait(rq: lo, flags: 0, HZ / 5) < 0) { |
1678 | struct drm_printer p = drm_info_printer(dev: gt->i915->drm.dev); |
1679 | |
1680 | pr_err("%s: Failed to preempt semaphore busywait!\n" , |
1681 | engine->name); |
1682 | |
1683 | intel_engine_dump(engine, m: &p, header: "%s\n" , engine->name); |
1684 | GEM_TRACE_DUMP(); |
1685 | |
1686 | i915_request_put(rq: lo); |
1687 | intel_gt_set_wedged(gt); |
1688 | err = -EIO; |
1689 | goto err_vma; |
1690 | } |
1691 | GEM_BUG_ON(READ_ONCE(*map)); |
1692 | i915_request_put(rq: lo); |
1693 | |
1694 | if (igt_live_test_end(t: &t)) { |
1695 | err = -EIO; |
1696 | goto err_vma; |
1697 | } |
1698 | } |
1699 | |
1700 | err = 0; |
1701 | err_vma: |
1702 | i915_vma_unpin(vma); |
1703 | err_map: |
1704 | i915_gem_object_unpin_map(obj); |
1705 | err_obj: |
1706 | i915_gem_object_put(obj); |
1707 | err_ctx_lo: |
1708 | kernel_context_close(ctx: ctx_lo); |
1709 | err_ctx_hi: |
1710 | kernel_context_close(ctx: ctx_hi); |
1711 | return err; |
1712 | } |
1713 | |
1714 | static struct i915_request * |
1715 | spinner_create_request(struct igt_spinner *spin, |
1716 | struct i915_gem_context *ctx, |
1717 | struct intel_engine_cs *engine, |
1718 | u32 arb) |
1719 | { |
1720 | struct intel_context *ce; |
1721 | struct i915_request *rq; |
1722 | |
1723 | ce = i915_gem_context_get_engine(ctx, idx: engine->legacy_idx); |
1724 | if (IS_ERR(ptr: ce)) |
1725 | return ERR_CAST(ptr: ce); |
1726 | |
1727 | rq = igt_spinner_create_request(spin, ce, arbitration_command: arb); |
1728 | intel_context_put(ce); |
1729 | return rq; |
1730 | } |
1731 | |
1732 | static int live_preempt(void *arg) |
1733 | { |
1734 | struct intel_gt *gt = arg; |
1735 | struct i915_gem_context *ctx_hi, *ctx_lo; |
1736 | struct igt_spinner spin_hi, spin_lo; |
1737 | struct intel_engine_cs *engine; |
1738 | enum intel_engine_id id; |
1739 | int err = -ENOMEM; |
1740 | |
1741 | ctx_hi = kernel_context(i915: gt->i915, NULL); |
1742 | if (!ctx_hi) |
1743 | return -ENOMEM; |
1744 | ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; |
1745 | |
1746 | ctx_lo = kernel_context(i915: gt->i915, NULL); |
1747 | if (!ctx_lo) |
1748 | goto err_ctx_hi; |
1749 | ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; |
1750 | |
1751 | if (igt_spinner_init(spin: &spin_hi, gt)) |
1752 | goto err_ctx_lo; |
1753 | |
1754 | if (igt_spinner_init(spin: &spin_lo, gt)) |
1755 | goto err_spin_hi; |
1756 | |
1757 | for_each_engine(engine, gt, id) { |
1758 | struct igt_live_test t; |
1759 | struct i915_request *rq; |
1760 | |
1761 | if (!intel_engine_has_preemption(engine)) |
1762 | continue; |
1763 | |
1764 | if (igt_live_test_begin(t: &t, i915: gt->i915, func: __func__, name: engine->name)) { |
1765 | err = -EIO; |
1766 | goto err_spin_lo; |
1767 | } |
1768 | |
1769 | rq = spinner_create_request(spin: &spin_lo, ctx: ctx_lo, engine, |
1770 | MI_ARB_CHECK); |
1771 | if (IS_ERR(ptr: rq)) { |
1772 | err = PTR_ERR(ptr: rq); |
1773 | goto err_spin_lo; |
1774 | } |
1775 | |
1776 | i915_request_add(rq); |
1777 | if (!igt_wait_for_spinner(spin: &spin_lo, rq)) { |
1778 | GEM_TRACE("lo spinner failed to start\n" ); |
1779 | GEM_TRACE_DUMP(); |
1780 | intel_gt_set_wedged(gt); |
1781 | err = -EIO; |
1782 | goto err_spin_lo; |
1783 | } |
1784 | |
1785 | rq = spinner_create_request(spin: &spin_hi, ctx: ctx_hi, engine, |
1786 | MI_ARB_CHECK); |
1787 | if (IS_ERR(ptr: rq)) { |
1788 | igt_spinner_end(spin: &spin_lo); |
1789 | err = PTR_ERR(ptr: rq); |
1790 | goto err_spin_lo; |
1791 | } |
1792 | |
1793 | i915_request_add(rq); |
1794 | if (!igt_wait_for_spinner(spin: &spin_hi, rq)) { |
1795 | GEM_TRACE("hi spinner failed to start\n" ); |
1796 | GEM_TRACE_DUMP(); |
1797 | intel_gt_set_wedged(gt); |
1798 | err = -EIO; |
1799 | goto err_spin_lo; |
1800 | } |
1801 | |
1802 | igt_spinner_end(spin: &spin_hi); |
1803 | igt_spinner_end(spin: &spin_lo); |
1804 | |
1805 | if (igt_live_test_end(t: &t)) { |
1806 | err = -EIO; |
1807 | goto err_spin_lo; |
1808 | } |
1809 | } |
1810 | |
1811 | err = 0; |
1812 | err_spin_lo: |
1813 | igt_spinner_fini(spin: &spin_lo); |
1814 | err_spin_hi: |
1815 | igt_spinner_fini(spin: &spin_hi); |
1816 | err_ctx_lo: |
1817 | kernel_context_close(ctx: ctx_lo); |
1818 | err_ctx_hi: |
1819 | kernel_context_close(ctx: ctx_hi); |
1820 | return err; |
1821 | } |
1822 | |
1823 | static int live_late_preempt(void *arg) |
1824 | { |
1825 | struct intel_gt *gt = arg; |
1826 | struct i915_gem_context *ctx_hi, *ctx_lo; |
1827 | struct igt_spinner spin_hi, spin_lo; |
1828 | struct intel_engine_cs *engine; |
1829 | struct i915_sched_attr attr = {}; |
1830 | enum intel_engine_id id; |
1831 | int err = -ENOMEM; |
1832 | |
1833 | ctx_hi = kernel_context(i915: gt->i915, NULL); |
1834 | if (!ctx_hi) |
1835 | return -ENOMEM; |
1836 | |
1837 | ctx_lo = kernel_context(i915: gt->i915, NULL); |
1838 | if (!ctx_lo) |
1839 | goto err_ctx_hi; |
1840 | |
1841 | if (igt_spinner_init(spin: &spin_hi, gt)) |
1842 | goto err_ctx_lo; |
1843 | |
1844 | if (igt_spinner_init(spin: &spin_lo, gt)) |
1845 | goto err_spin_hi; |
1846 | |
1847 | /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */ |
1848 | ctx_lo->sched.priority = 1; |
1849 | |
1850 | for_each_engine(engine, gt, id) { |
1851 | struct igt_live_test t; |
1852 | struct i915_request *rq; |
1853 | |
1854 | if (!intel_engine_has_preemption(engine)) |
1855 | continue; |
1856 | |
1857 | if (igt_live_test_begin(t: &t, i915: gt->i915, func: __func__, name: engine->name)) { |
1858 | err = -EIO; |
1859 | goto err_spin_lo; |
1860 | } |
1861 | |
1862 | rq = spinner_create_request(spin: &spin_lo, ctx: ctx_lo, engine, |
1863 | MI_ARB_CHECK); |
1864 | if (IS_ERR(ptr: rq)) { |
1865 | err = PTR_ERR(ptr: rq); |
1866 | goto err_spin_lo; |
1867 | } |
1868 | |
1869 | i915_request_add(rq); |
1870 | if (!igt_wait_for_spinner(spin: &spin_lo, rq)) { |
1871 | pr_err("First context failed to start\n" ); |
1872 | goto err_wedged; |
1873 | } |
1874 | |
1875 | rq = spinner_create_request(spin: &spin_hi, ctx: ctx_hi, engine, |
1876 | MI_NOOP); |
1877 | if (IS_ERR(ptr: rq)) { |
1878 | igt_spinner_end(spin: &spin_lo); |
1879 | err = PTR_ERR(ptr: rq); |
1880 | goto err_spin_lo; |
1881 | } |
1882 | |
1883 | i915_request_add(rq); |
1884 | if (igt_wait_for_spinner(spin: &spin_hi, rq)) { |
1885 | pr_err("Second context overtook first?\n" ); |
1886 | goto err_wedged; |
1887 | } |
1888 | |
1889 | attr.priority = I915_PRIORITY_MAX; |
1890 | engine->sched_engine->schedule(rq, &attr); |
1891 | |
1892 | if (!igt_wait_for_spinner(spin: &spin_hi, rq)) { |
1893 | pr_err("High priority context failed to preempt the low priority context\n" ); |
1894 | GEM_TRACE_DUMP(); |
1895 | goto err_wedged; |
1896 | } |
1897 | |
1898 | igt_spinner_end(spin: &spin_hi); |
1899 | igt_spinner_end(spin: &spin_lo); |
1900 | |
1901 | if (igt_live_test_end(t: &t)) { |
1902 | err = -EIO; |
1903 | goto err_spin_lo; |
1904 | } |
1905 | } |
1906 | |
1907 | err = 0; |
1908 | err_spin_lo: |
1909 | igt_spinner_fini(spin: &spin_lo); |
1910 | err_spin_hi: |
1911 | igt_spinner_fini(spin: &spin_hi); |
1912 | err_ctx_lo: |
1913 | kernel_context_close(ctx: ctx_lo); |
1914 | err_ctx_hi: |
1915 | kernel_context_close(ctx: ctx_hi); |
1916 | return err; |
1917 | |
1918 | err_wedged: |
1919 | igt_spinner_end(spin: &spin_hi); |
1920 | igt_spinner_end(spin: &spin_lo); |
1921 | intel_gt_set_wedged(gt); |
1922 | err = -EIO; |
1923 | goto err_spin_lo; |
1924 | } |
1925 | |
1926 | struct preempt_client { |
1927 | struct igt_spinner spin; |
1928 | struct i915_gem_context *ctx; |
1929 | }; |
1930 | |
1931 | static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c) |
1932 | { |
1933 | c->ctx = kernel_context(i915: gt->i915, NULL); |
1934 | if (!c->ctx) |
1935 | return -ENOMEM; |
1936 | |
1937 | if (igt_spinner_init(spin: &c->spin, gt)) |
1938 | goto err_ctx; |
1939 | |
1940 | return 0; |
1941 | |
1942 | err_ctx: |
1943 | kernel_context_close(ctx: c->ctx); |
1944 | return -ENOMEM; |
1945 | } |
1946 | |
1947 | static void preempt_client_fini(struct preempt_client *c) |
1948 | { |
1949 | igt_spinner_fini(spin: &c->spin); |
1950 | kernel_context_close(ctx: c->ctx); |
1951 | } |
1952 | |
1953 | static int live_nopreempt(void *arg) |
1954 | { |
1955 | struct intel_gt *gt = arg; |
1956 | struct intel_engine_cs *engine; |
1957 | struct preempt_client a, b; |
1958 | enum intel_engine_id id; |
1959 | int err = -ENOMEM; |
1960 | |
1961 | /* |
1962 | * Verify that we can disable preemption for an individual request |
1963 | * that may be being observed and not want to be interrupted. |
1964 | */ |
1965 | |
1966 | if (preempt_client_init(gt, c: &a)) |
1967 | return -ENOMEM; |
1968 | if (preempt_client_init(gt, c: &b)) |
1969 | goto err_client_a; |
1970 | b.ctx->sched.priority = I915_PRIORITY_MAX; |
1971 | |
1972 | for_each_engine(engine, gt, id) { |
1973 | struct i915_request *rq_a, *rq_b; |
1974 | |
1975 | if (!intel_engine_has_preemption(engine)) |
1976 | continue; |
1977 | |
1978 | engine->execlists.preempt_hang.count = 0; |
1979 | |
1980 | rq_a = spinner_create_request(spin: &a.spin, |
1981 | ctx: a.ctx, engine, |
1982 | MI_ARB_CHECK); |
1983 | if (IS_ERR(ptr: rq_a)) { |
1984 | err = PTR_ERR(ptr: rq_a); |
1985 | goto err_client_b; |
1986 | } |
1987 | |
1988 | /* Low priority client, but unpreemptable! */ |
1989 | __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags); |
1990 | |
1991 | i915_request_add(rq: rq_a); |
1992 | if (!igt_wait_for_spinner(spin: &a.spin, rq: rq_a)) { |
1993 | pr_err("First client failed to start\n" ); |
1994 | goto err_wedged; |
1995 | } |
1996 | |
1997 | rq_b = spinner_create_request(spin: &b.spin, |
1998 | ctx: b.ctx, engine, |
1999 | MI_ARB_CHECK); |
2000 | if (IS_ERR(ptr: rq_b)) { |
2001 | err = PTR_ERR(ptr: rq_b); |
2002 | goto err_client_b; |
2003 | } |
2004 | |
2005 | i915_request_add(rq: rq_b); |
2006 | |
2007 | /* B is much more important than A! (But A is unpreemptable.) */ |
2008 | GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a)); |
2009 | |
2010 | /* Wait long enough for preemption and timeslicing */ |
2011 | if (igt_wait_for_spinner(spin: &b.spin, rq: rq_b)) { |
2012 | pr_err("Second client started too early!\n" ); |
2013 | goto err_wedged; |
2014 | } |
2015 | |
2016 | igt_spinner_end(spin: &a.spin); |
2017 | |
2018 | if (!igt_wait_for_spinner(spin: &b.spin, rq: rq_b)) { |
2019 | pr_err("Second client failed to start\n" ); |
2020 | goto err_wedged; |
2021 | } |
2022 | |
2023 | igt_spinner_end(spin: &b.spin); |
2024 | |
2025 | if (engine->execlists.preempt_hang.count) { |
2026 | pr_err("Preemption recorded x%d; should have been suppressed!\n" , |
2027 | engine->execlists.preempt_hang.count); |
2028 | err = -EINVAL; |
2029 | goto err_wedged; |
2030 | } |
2031 | |
2032 | if (igt_flush_test(i915: gt->i915)) |
2033 | goto err_wedged; |
2034 | } |
2035 | |
2036 | err = 0; |
2037 | err_client_b: |
2038 | preempt_client_fini(c: &b); |
2039 | err_client_a: |
2040 | preempt_client_fini(c: &a); |
2041 | return err; |
2042 | |
2043 | err_wedged: |
2044 | igt_spinner_end(spin: &b.spin); |
2045 | igt_spinner_end(spin: &a.spin); |
2046 | intel_gt_set_wedged(gt); |
2047 | err = -EIO; |
2048 | goto err_client_b; |
2049 | } |
2050 | |
2051 | struct live_preempt_cancel { |
2052 | struct intel_engine_cs *engine; |
2053 | struct preempt_client a, b; |
2054 | }; |
2055 | |
2056 | static int __cancel_active0(struct live_preempt_cancel *arg) |
2057 | { |
2058 | struct i915_request *rq; |
2059 | struct igt_live_test t; |
2060 | int err; |
2061 | |
2062 | /* Preempt cancel of ELSP0 */ |
2063 | GEM_TRACE("%s(%s)\n" , __func__, arg->engine->name); |
2064 | if (igt_live_test_begin(t: &t, i915: arg->engine->i915, |
2065 | func: __func__, name: arg->engine->name)) |
2066 | return -EIO; |
2067 | |
2068 | rq = spinner_create_request(spin: &arg->a.spin, |
2069 | ctx: arg->a.ctx, engine: arg->engine, |
2070 | MI_ARB_CHECK); |
2071 | if (IS_ERR(ptr: rq)) |
2072 | return PTR_ERR(ptr: rq); |
2073 | |
2074 | clear_bit(CONTEXT_BANNED, addr: &rq->context->flags); |
2075 | i915_request_get(rq); |
2076 | i915_request_add(rq); |
2077 | if (!igt_wait_for_spinner(spin: &arg->a.spin, rq)) { |
2078 | err = -EIO; |
2079 | goto out; |
2080 | } |
2081 | |
2082 | intel_context_ban(ce: rq->context, rq); |
2083 | err = intel_engine_pulse(engine: arg->engine); |
2084 | if (err) |
2085 | goto out; |
2086 | |
2087 | err = wait_for_reset(engine: arg->engine, rq, HZ / 2); |
2088 | if (err) { |
2089 | pr_err("Cancelled inflight0 request did not reset\n" ); |
2090 | goto out; |
2091 | } |
2092 | |
2093 | out: |
2094 | i915_request_put(rq); |
2095 | if (igt_live_test_end(t: &t)) |
2096 | err = -EIO; |
2097 | return err; |
2098 | } |
2099 | |
2100 | static int __cancel_active1(struct live_preempt_cancel *arg) |
2101 | { |
2102 | struct i915_request *rq[2] = {}; |
2103 | struct igt_live_test t; |
2104 | int err; |
2105 | |
2106 | /* Preempt cancel of ELSP1 */ |
2107 | GEM_TRACE("%s(%s)\n" , __func__, arg->engine->name); |
2108 | if (igt_live_test_begin(t: &t, i915: arg->engine->i915, |
2109 | func: __func__, name: arg->engine->name)) |
2110 | return -EIO; |
2111 | |
2112 | rq[0] = spinner_create_request(spin: &arg->a.spin, |
2113 | ctx: arg->a.ctx, engine: arg->engine, |
2114 | MI_NOOP); /* no preemption */ |
2115 | if (IS_ERR(ptr: rq[0])) |
2116 | return PTR_ERR(ptr: rq[0]); |
2117 | |
2118 | clear_bit(CONTEXT_BANNED, addr: &rq[0]->context->flags); |
2119 | i915_request_get(rq: rq[0]); |
2120 | i915_request_add(rq: rq[0]); |
2121 | if (!igt_wait_for_spinner(spin: &arg->a.spin, rq: rq[0])) { |
2122 | err = -EIO; |
2123 | goto out; |
2124 | } |
2125 | |
2126 | rq[1] = spinner_create_request(spin: &arg->b.spin, |
2127 | ctx: arg->b.ctx, engine: arg->engine, |
2128 | MI_ARB_CHECK); |
2129 | if (IS_ERR(ptr: rq[1])) { |
2130 | err = PTR_ERR(ptr: rq[1]); |
2131 | goto out; |
2132 | } |
2133 | |
2134 | clear_bit(CONTEXT_BANNED, addr: &rq[1]->context->flags); |
2135 | i915_request_get(rq: rq[1]); |
2136 | err = i915_request_await_dma_fence(rq: rq[1], fence: &rq[0]->fence); |
2137 | i915_request_add(rq: rq[1]); |
2138 | if (err) |
2139 | goto out; |
2140 | |
2141 | intel_context_ban(ce: rq[1]->context, rq: rq[1]); |
2142 | err = intel_engine_pulse(engine: arg->engine); |
2143 | if (err) |
2144 | goto out; |
2145 | |
2146 | igt_spinner_end(spin: &arg->a.spin); |
2147 | err = wait_for_reset(engine: arg->engine, rq: rq[1], HZ / 2); |
2148 | if (err) |
2149 | goto out; |
2150 | |
2151 | if (rq[0]->fence.error != 0) { |
2152 | pr_err("Normal inflight0 request did not complete\n" ); |
2153 | err = -EINVAL; |
2154 | goto out; |
2155 | } |
2156 | |
2157 | if (rq[1]->fence.error != -EIO) { |
2158 | pr_err("Cancelled inflight1 request did not report -EIO\n" ); |
2159 | err = -EINVAL; |
2160 | goto out; |
2161 | } |
2162 | |
2163 | out: |
2164 | i915_request_put(rq: rq[1]); |
2165 | i915_request_put(rq: rq[0]); |
2166 | if (igt_live_test_end(t: &t)) |
2167 | err = -EIO; |
2168 | return err; |
2169 | } |
2170 | |
2171 | static int __cancel_queued(struct live_preempt_cancel *arg) |
2172 | { |
2173 | struct i915_request *rq[3] = {}; |
2174 | struct igt_live_test t; |
2175 | int err; |
2176 | |
2177 | /* Full ELSP and one in the wings */ |
2178 | GEM_TRACE("%s(%s)\n" , __func__, arg->engine->name); |
2179 | if (igt_live_test_begin(t: &t, i915: arg->engine->i915, |
2180 | func: __func__, name: arg->engine->name)) |
2181 | return -EIO; |
2182 | |
2183 | rq[0] = spinner_create_request(spin: &arg->a.spin, |
2184 | ctx: arg->a.ctx, engine: arg->engine, |
2185 | MI_ARB_CHECK); |
2186 | if (IS_ERR(ptr: rq[0])) |
2187 | return PTR_ERR(ptr: rq[0]); |
2188 | |
2189 | clear_bit(CONTEXT_BANNED, addr: &rq[0]->context->flags); |
2190 | i915_request_get(rq: rq[0]); |
2191 | i915_request_add(rq: rq[0]); |
2192 | if (!igt_wait_for_spinner(spin: &arg->a.spin, rq: rq[0])) { |
2193 | err = -EIO; |
2194 | goto out; |
2195 | } |
2196 | |
2197 | rq[1] = igt_request_alloc(ctx: arg->b.ctx, engine: arg->engine); |
2198 | if (IS_ERR(ptr: rq[1])) { |
2199 | err = PTR_ERR(ptr: rq[1]); |
2200 | goto out; |
2201 | } |
2202 | |
2203 | clear_bit(CONTEXT_BANNED, addr: &rq[1]->context->flags); |
2204 | i915_request_get(rq: rq[1]); |
2205 | err = i915_request_await_dma_fence(rq: rq[1], fence: &rq[0]->fence); |
2206 | i915_request_add(rq: rq[1]); |
2207 | if (err) |
2208 | goto out; |
2209 | |
2210 | rq[2] = spinner_create_request(spin: &arg->b.spin, |
2211 | ctx: arg->a.ctx, engine: arg->engine, |
2212 | MI_ARB_CHECK); |
2213 | if (IS_ERR(ptr: rq[2])) { |
2214 | err = PTR_ERR(ptr: rq[2]); |
2215 | goto out; |
2216 | } |
2217 | |
2218 | i915_request_get(rq: rq[2]); |
2219 | err = i915_request_await_dma_fence(rq: rq[2], fence: &rq[1]->fence); |
2220 | i915_request_add(rq: rq[2]); |
2221 | if (err) |
2222 | goto out; |
2223 | |
2224 | intel_context_ban(ce: rq[2]->context, rq: rq[2]); |
2225 | err = intel_engine_pulse(engine: arg->engine); |
2226 | if (err) |
2227 | goto out; |
2228 | |
2229 | err = wait_for_reset(engine: arg->engine, rq: rq[2], HZ / 2); |
2230 | if (err) |
2231 | goto out; |
2232 | |
2233 | if (rq[0]->fence.error != -EIO) { |
2234 | pr_err("Cancelled inflight0 request did not report -EIO\n" ); |
2235 | err = -EINVAL; |
2236 | goto out; |
2237 | } |
2238 | |
2239 | /* |
2240 | * The behavior between having semaphores and not is different. With |
2241 | * semaphores the subsequent request is on the hardware and not cancelled |
2242 | * while without the request is held in the driver and cancelled. |
2243 | */ |
2244 | if (intel_engine_has_semaphores(engine: rq[1]->engine) && |
2245 | rq[1]->fence.error != 0) { |
2246 | pr_err("Normal inflight1 request did not complete\n" ); |
2247 | err = -EINVAL; |
2248 | goto out; |
2249 | } |
2250 | |
2251 | if (rq[2]->fence.error != -EIO) { |
2252 | pr_err("Cancelled queued request did not report -EIO\n" ); |
2253 | err = -EINVAL; |
2254 | goto out; |
2255 | } |
2256 | |
2257 | out: |
2258 | i915_request_put(rq: rq[2]); |
2259 | i915_request_put(rq: rq[1]); |
2260 | i915_request_put(rq: rq[0]); |
2261 | if (igt_live_test_end(t: &t)) |
2262 | err = -EIO; |
2263 | return err; |
2264 | } |
2265 | |
2266 | static int __cancel_hostile(struct live_preempt_cancel *arg) |
2267 | { |
2268 | struct i915_request *rq; |
2269 | int err; |
2270 | |
2271 | /* Preempt cancel non-preemptible spinner in ELSP0 */ |
2272 | if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT) |
2273 | return 0; |
2274 | |
2275 | if (!intel_has_reset_engine(gt: arg->engine->gt)) |
2276 | return 0; |
2277 | |
2278 | GEM_TRACE("%s(%s)\n" , __func__, arg->engine->name); |
2279 | rq = spinner_create_request(spin: &arg->a.spin, |
2280 | ctx: arg->a.ctx, engine: arg->engine, |
2281 | MI_NOOP); /* preemption disabled */ |
2282 | if (IS_ERR(ptr: rq)) |
2283 | return PTR_ERR(ptr: rq); |
2284 | |
2285 | clear_bit(CONTEXT_BANNED, addr: &rq->context->flags); |
2286 | i915_request_get(rq); |
2287 | i915_request_add(rq); |
2288 | if (!igt_wait_for_spinner(spin: &arg->a.spin, rq)) { |
2289 | err = -EIO; |
2290 | goto out; |
2291 | } |
2292 | |
2293 | intel_context_ban(ce: rq->context, rq); |
2294 | err = intel_engine_pulse(engine: arg->engine); /* force reset */ |
2295 | if (err) |
2296 | goto out; |
2297 | |
2298 | err = wait_for_reset(engine: arg->engine, rq, HZ / 2); |
2299 | if (err) { |
2300 | pr_err("Cancelled inflight0 request did not reset\n" ); |
2301 | goto out; |
2302 | } |
2303 | |
2304 | out: |
2305 | i915_request_put(rq); |
2306 | if (igt_flush_test(i915: arg->engine->i915)) |
2307 | err = -EIO; |
2308 | return err; |
2309 | } |
2310 | |
2311 | static void force_reset_timeout(struct intel_engine_cs *engine) |
2312 | { |
2313 | engine->reset_timeout.probability = 999; |
2314 | atomic_set(v: &engine->reset_timeout.times, i: -1); |
2315 | } |
2316 | |
2317 | static void cancel_reset_timeout(struct intel_engine_cs *engine) |
2318 | { |
2319 | memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout)); |
2320 | } |
2321 | |
2322 | static int __cancel_fail(struct live_preempt_cancel *arg) |
2323 | { |
2324 | struct intel_engine_cs *engine = arg->engine; |
2325 | struct i915_request *rq; |
2326 | int err; |
2327 | |
2328 | if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT) |
2329 | return 0; |
2330 | |
2331 | if (!intel_has_reset_engine(gt: engine->gt)) |
2332 | return 0; |
2333 | |
2334 | GEM_TRACE("%s(%s)\n" , __func__, engine->name); |
2335 | rq = spinner_create_request(spin: &arg->a.spin, |
2336 | ctx: arg->a.ctx, engine, |
2337 | MI_NOOP); /* preemption disabled */ |
2338 | if (IS_ERR(ptr: rq)) |
2339 | return PTR_ERR(ptr: rq); |
2340 | |
2341 | clear_bit(CONTEXT_BANNED, addr: &rq->context->flags); |
2342 | i915_request_get(rq); |
2343 | i915_request_add(rq); |
2344 | if (!igt_wait_for_spinner(spin: &arg->a.spin, rq)) { |
2345 | err = -EIO; |
2346 | goto out; |
2347 | } |
2348 | |
2349 | intel_context_set_banned(ce: rq->context); |
2350 | |
2351 | err = intel_engine_pulse(engine); |
2352 | if (err) |
2353 | goto out; |
2354 | |
2355 | force_reset_timeout(engine); |
2356 | |
2357 | /* force preempt reset [failure] */ |
2358 | while (!engine->execlists.pending[0]) |
2359 | intel_engine_flush_submission(engine); |
2360 | del_timer_sync(timer: &engine->execlists.preempt); |
2361 | intel_engine_flush_submission(engine); |
2362 | |
2363 | cancel_reset_timeout(engine); |
2364 | |
2365 | /* after failure, require heartbeats to reset device */ |
2366 | intel_engine_set_heartbeat(engine, delay: 1); |
2367 | err = wait_for_reset(engine, rq, HZ / 2); |
2368 | intel_engine_set_heartbeat(engine, |
2369 | delay: engine->defaults.heartbeat_interval_ms); |
2370 | if (err) { |
2371 | pr_err("Cancelled inflight0 request did not reset\n" ); |
2372 | goto out; |
2373 | } |
2374 | |
2375 | out: |
2376 | i915_request_put(rq); |
2377 | if (igt_flush_test(i915: engine->i915)) |
2378 | err = -EIO; |
2379 | return err; |
2380 | } |
2381 | |
2382 | static int live_preempt_cancel(void *arg) |
2383 | { |
2384 | struct intel_gt *gt = arg; |
2385 | struct live_preempt_cancel data; |
2386 | enum intel_engine_id id; |
2387 | int err = -ENOMEM; |
2388 | |
2389 | /* |
2390 | * To cancel an inflight context, we need to first remove it from the |
2391 | * GPU. That sounds like preemption! Plus a little bit of bookkeeping. |
2392 | */ |
2393 | |
2394 | if (preempt_client_init(gt, c: &data.a)) |
2395 | return -ENOMEM; |
2396 | if (preempt_client_init(gt, c: &data.b)) |
2397 | goto err_client_a; |
2398 | |
2399 | for_each_engine(data.engine, gt, id) { |
2400 | if (!intel_engine_has_preemption(engine: data.engine)) |
2401 | continue; |
2402 | |
2403 | err = __cancel_active0(arg: &data); |
2404 | if (err) |
2405 | goto err_wedged; |
2406 | |
2407 | err = __cancel_active1(arg: &data); |
2408 | if (err) |
2409 | goto err_wedged; |
2410 | |
2411 | err = __cancel_queued(arg: &data); |
2412 | if (err) |
2413 | goto err_wedged; |
2414 | |
2415 | err = __cancel_hostile(arg: &data); |
2416 | if (err) |
2417 | goto err_wedged; |
2418 | |
2419 | err = __cancel_fail(arg: &data); |
2420 | if (err) |
2421 | goto err_wedged; |
2422 | } |
2423 | |
2424 | err = 0; |
2425 | err_client_b: |
2426 | preempt_client_fini(c: &data.b); |
2427 | err_client_a: |
2428 | preempt_client_fini(c: &data.a); |
2429 | return err; |
2430 | |
2431 | err_wedged: |
2432 | GEM_TRACE_DUMP(); |
2433 | igt_spinner_end(spin: &data.b.spin); |
2434 | igt_spinner_end(spin: &data.a.spin); |
2435 | intel_gt_set_wedged(gt); |
2436 | goto err_client_b; |
2437 | } |
2438 | |
2439 | static int live_suppress_self_preempt(void *arg) |
2440 | { |
2441 | struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX }; |
2442 | struct intel_gt *gt = arg; |
2443 | struct intel_engine_cs *engine; |
2444 | struct preempt_client a, b; |
2445 | enum intel_engine_id id; |
2446 | int err = -ENOMEM; |
2447 | |
2448 | /* |
2449 | * Verify that if a preemption request does not cause a change in |
2450 | * the current execution order, the preempt-to-idle injection is |
2451 | * skipped and that we do not accidentally apply it after the CS |
2452 | * completion event. |
2453 | */ |
2454 | |
2455 | if (intel_uc_uses_guc_submission(uc: >->uc)) |
2456 | return 0; /* presume black blox */ |
2457 | |
2458 | if (intel_vgpu_active(i915: gt->i915)) |
2459 | return 0; /* GVT forces single port & request submission */ |
2460 | |
2461 | if (preempt_client_init(gt, c: &a)) |
2462 | return -ENOMEM; |
2463 | if (preempt_client_init(gt, c: &b)) |
2464 | goto err_client_a; |
2465 | |
2466 | for_each_engine(engine, gt, id) { |
2467 | struct i915_request *rq_a, *rq_b; |
2468 | int depth; |
2469 | |
2470 | if (!intel_engine_has_preemption(engine)) |
2471 | continue; |
2472 | |
2473 | if (igt_flush_test(i915: gt->i915)) |
2474 | goto err_wedged; |
2475 | |
2476 | st_engine_heartbeat_disable(engine); |
2477 | engine->execlists.preempt_hang.count = 0; |
2478 | |
2479 | rq_a = spinner_create_request(spin: &a.spin, |
2480 | ctx: a.ctx, engine, |
2481 | MI_NOOP); |
2482 | if (IS_ERR(ptr: rq_a)) { |
2483 | err = PTR_ERR(ptr: rq_a); |
2484 | st_engine_heartbeat_enable(engine); |
2485 | goto err_client_b; |
2486 | } |
2487 | |
2488 | i915_request_add(rq: rq_a); |
2489 | if (!igt_wait_for_spinner(spin: &a.spin, rq: rq_a)) { |
2490 | pr_err("First client failed to start\n" ); |
2491 | st_engine_heartbeat_enable(engine); |
2492 | goto err_wedged; |
2493 | } |
2494 | |
2495 | /* Keep postponing the timer to avoid premature slicing */ |
2496 | mod_timer(timer: &engine->execlists.timer, expires: jiffies + HZ); |
2497 | for (depth = 0; depth < 8; depth++) { |
2498 | rq_b = spinner_create_request(spin: &b.spin, |
2499 | ctx: b.ctx, engine, |
2500 | MI_NOOP); |
2501 | if (IS_ERR(ptr: rq_b)) { |
2502 | err = PTR_ERR(ptr: rq_b); |
2503 | st_engine_heartbeat_enable(engine); |
2504 | goto err_client_b; |
2505 | } |
2506 | i915_request_add(rq: rq_b); |
2507 | |
2508 | GEM_BUG_ON(i915_request_completed(rq_a)); |
2509 | engine->sched_engine->schedule(rq_a, &attr); |
2510 | igt_spinner_end(spin: &a.spin); |
2511 | |
2512 | if (!igt_wait_for_spinner(spin: &b.spin, rq: rq_b)) { |
2513 | pr_err("Second client failed to start\n" ); |
2514 | st_engine_heartbeat_enable(engine); |
2515 | goto err_wedged; |
2516 | } |
2517 | |
2518 | swap(a, b); |
2519 | rq_a = rq_b; |
2520 | } |
2521 | igt_spinner_end(spin: &a.spin); |
2522 | |
2523 | if (engine->execlists.preempt_hang.count) { |
2524 | pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n" , |
2525 | engine->name, |
2526 | engine->execlists.preempt_hang.count, |
2527 | depth); |
2528 | st_engine_heartbeat_enable(engine); |
2529 | err = -EINVAL; |
2530 | goto err_client_b; |
2531 | } |
2532 | |
2533 | st_engine_heartbeat_enable(engine); |
2534 | if (igt_flush_test(i915: gt->i915)) |
2535 | goto err_wedged; |
2536 | } |
2537 | |
2538 | err = 0; |
2539 | err_client_b: |
2540 | preempt_client_fini(c: &b); |
2541 | err_client_a: |
2542 | preempt_client_fini(c: &a); |
2543 | return err; |
2544 | |
2545 | err_wedged: |
2546 | igt_spinner_end(spin: &b.spin); |
2547 | igt_spinner_end(spin: &a.spin); |
2548 | intel_gt_set_wedged(gt); |
2549 | err = -EIO; |
2550 | goto err_client_b; |
2551 | } |
2552 | |
2553 | static int live_chain_preempt(void *arg) |
2554 | { |
2555 | struct intel_gt *gt = arg; |
2556 | struct intel_engine_cs *engine; |
2557 | struct preempt_client hi, lo; |
2558 | enum intel_engine_id id; |
2559 | int err = -ENOMEM; |
2560 | |
2561 | /* |
2562 | * Build a chain AB...BA between two contexts (A, B) and request |
2563 | * preemption of the last request. It should then complete before |
2564 | * the previously submitted spinner in B. |
2565 | */ |
2566 | |
2567 | if (preempt_client_init(gt, c: &hi)) |
2568 | return -ENOMEM; |
2569 | |
2570 | if (preempt_client_init(gt, c: &lo)) |
2571 | goto err_client_hi; |
2572 | |
2573 | for_each_engine(engine, gt, id) { |
2574 | struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX }; |
2575 | struct igt_live_test t; |
2576 | struct i915_request *rq; |
2577 | int ring_size, count, i; |
2578 | |
2579 | if (!intel_engine_has_preemption(engine)) |
2580 | continue; |
2581 | |
2582 | rq = spinner_create_request(spin: &lo.spin, |
2583 | ctx: lo.ctx, engine, |
2584 | MI_ARB_CHECK); |
2585 | if (IS_ERR(ptr: rq)) |
2586 | goto err_wedged; |
2587 | |
2588 | i915_request_get(rq); |
2589 | i915_request_add(rq); |
2590 | |
2591 | ring_size = rq->wa_tail - rq->head; |
2592 | if (ring_size < 0) |
2593 | ring_size += rq->ring->size; |
2594 | ring_size = rq->ring->size / ring_size; |
2595 | pr_debug("%s(%s): Using maximum of %d requests\n" , |
2596 | __func__, engine->name, ring_size); |
2597 | |
2598 | igt_spinner_end(spin: &lo.spin); |
2599 | if (i915_request_wait(rq, flags: 0, HZ / 2) < 0) { |
2600 | pr_err("Timed out waiting to flush %s\n" , engine->name); |
2601 | i915_request_put(rq); |
2602 | goto err_wedged; |
2603 | } |
2604 | i915_request_put(rq); |
2605 | |
2606 | if (igt_live_test_begin(t: &t, i915: gt->i915, func: __func__, name: engine->name)) { |
2607 | err = -EIO; |
2608 | goto err_wedged; |
2609 | } |
2610 | |
2611 | for_each_prime_number_from(count, 1, ring_size) { |
2612 | rq = spinner_create_request(spin: &hi.spin, |
2613 | ctx: hi.ctx, engine, |
2614 | MI_ARB_CHECK); |
2615 | if (IS_ERR(ptr: rq)) |
2616 | goto err_wedged; |
2617 | i915_request_add(rq); |
2618 | if (!igt_wait_for_spinner(spin: &hi.spin, rq)) |
2619 | goto err_wedged; |
2620 | |
2621 | rq = spinner_create_request(spin: &lo.spin, |
2622 | ctx: lo.ctx, engine, |
2623 | MI_ARB_CHECK); |
2624 | if (IS_ERR(ptr: rq)) |
2625 | goto err_wedged; |
2626 | i915_request_add(rq); |
2627 | |
2628 | for (i = 0; i < count; i++) { |
2629 | rq = igt_request_alloc(ctx: lo.ctx, engine); |
2630 | if (IS_ERR(ptr: rq)) |
2631 | goto err_wedged; |
2632 | i915_request_add(rq); |
2633 | } |
2634 | |
2635 | rq = igt_request_alloc(ctx: hi.ctx, engine); |
2636 | if (IS_ERR(ptr: rq)) |
2637 | goto err_wedged; |
2638 | |
2639 | i915_request_get(rq); |
2640 | i915_request_add(rq); |
2641 | engine->sched_engine->schedule(rq, &attr); |
2642 | |
2643 | igt_spinner_end(spin: &hi.spin); |
2644 | if (i915_request_wait(rq, flags: 0, HZ / 5) < 0) { |
2645 | struct drm_printer p = |
2646 | drm_info_printer(dev: gt->i915->drm.dev); |
2647 | |
2648 | pr_err("Failed to preempt over chain of %d\n" , |
2649 | count); |
2650 | intel_engine_dump(engine, m: &p, |
2651 | header: "%s\n" , engine->name); |
2652 | i915_request_put(rq); |
2653 | goto err_wedged; |
2654 | } |
2655 | igt_spinner_end(spin: &lo.spin); |
2656 | i915_request_put(rq); |
2657 | |
2658 | rq = igt_request_alloc(ctx: lo.ctx, engine); |
2659 | if (IS_ERR(ptr: rq)) |
2660 | goto err_wedged; |
2661 | |
2662 | i915_request_get(rq); |
2663 | i915_request_add(rq); |
2664 | |
2665 | if (i915_request_wait(rq, flags: 0, HZ / 5) < 0) { |
2666 | struct drm_printer p = |
2667 | drm_info_printer(dev: gt->i915->drm.dev); |
2668 | |
2669 | pr_err("Failed to flush low priority chain of %d requests\n" , |
2670 | count); |
2671 | intel_engine_dump(engine, m: &p, |
2672 | header: "%s\n" , engine->name); |
2673 | |
2674 | i915_request_put(rq); |
2675 | goto err_wedged; |
2676 | } |
2677 | i915_request_put(rq); |
2678 | } |
2679 | |
2680 | if (igt_live_test_end(t: &t)) { |
2681 | err = -EIO; |
2682 | goto err_wedged; |
2683 | } |
2684 | } |
2685 | |
2686 | err = 0; |
2687 | err_client_lo: |
2688 | preempt_client_fini(c: &lo); |
2689 | err_client_hi: |
2690 | preempt_client_fini(c: &hi); |
2691 | return err; |
2692 | |
2693 | err_wedged: |
2694 | igt_spinner_end(spin: &hi.spin); |
2695 | igt_spinner_end(spin: &lo.spin); |
2696 | intel_gt_set_wedged(gt); |
2697 | err = -EIO; |
2698 | goto err_client_lo; |
2699 | } |
2700 | |
2701 | static int create_gang(struct intel_engine_cs *engine, |
2702 | struct i915_request **prev) |
2703 | { |
2704 | struct drm_i915_gem_object *obj; |
2705 | struct intel_context *ce; |
2706 | struct i915_request *rq; |
2707 | struct i915_vma *vma; |
2708 | u32 *cs; |
2709 | int err; |
2710 | |
2711 | ce = intel_context_create(engine); |
2712 | if (IS_ERR(ptr: ce)) |
2713 | return PTR_ERR(ptr: ce); |
2714 | |
2715 | obj = i915_gem_object_create_internal(i915: engine->i915, size: 4096); |
2716 | if (IS_ERR(ptr: obj)) { |
2717 | err = PTR_ERR(ptr: obj); |
2718 | goto err_ce; |
2719 | } |
2720 | |
2721 | vma = i915_vma_instance(obj, vm: ce->vm, NULL); |
2722 | if (IS_ERR(ptr: vma)) { |
2723 | err = PTR_ERR(ptr: vma); |
2724 | goto err_obj; |
2725 | } |
2726 | |
2727 | err = i915_vma_pin(vma, size: 0, alignment: 0, PIN_USER); |
2728 | if (err) |
2729 | goto err_obj; |
2730 | |
2731 | cs = i915_gem_object_pin_map_unlocked(obj, type: I915_MAP_WC); |
2732 | if (IS_ERR(ptr: cs)) { |
2733 | err = PTR_ERR(ptr: cs); |
2734 | goto err_obj; |
2735 | } |
2736 | |
2737 | /* Semaphore target: spin until zero */ |
2738 | *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; |
2739 | |
2740 | *cs++ = MI_SEMAPHORE_WAIT | |
2741 | MI_SEMAPHORE_POLL | |
2742 | MI_SEMAPHORE_SAD_EQ_SDD; |
2743 | *cs++ = 0; |
2744 | *cs++ = lower_32_bits(i915_vma_offset(vma)); |
2745 | *cs++ = upper_32_bits(i915_vma_offset(vma)); |
2746 | |
2747 | if (*prev) { |
2748 | u64 offset = i915_vma_offset(vma: (*prev)->batch); |
2749 | |
2750 | /* Terminate the spinner in the next lower priority batch. */ |
2751 | *cs++ = MI_STORE_DWORD_IMM_GEN4; |
2752 | *cs++ = lower_32_bits(offset); |
2753 | *cs++ = upper_32_bits(offset); |
2754 | *cs++ = 0; |
2755 | } |
2756 | |
2757 | *cs++ = MI_BATCH_BUFFER_END; |
2758 | i915_gem_object_flush_map(obj); |
2759 | i915_gem_object_unpin_map(obj); |
2760 | |
2761 | rq = intel_context_create_request(ce); |
2762 | if (IS_ERR(ptr: rq)) { |
2763 | err = PTR_ERR(ptr: rq); |
2764 | goto err_obj; |
2765 | } |
2766 | |
2767 | rq->batch = i915_vma_get(vma); |
2768 | i915_request_get(rq); |
2769 | |
2770 | err = igt_vma_move_to_active_unlocked(vma, rq, flags: 0); |
2771 | if (!err) |
2772 | err = rq->engine->emit_bb_start(rq, |
2773 | i915_vma_offset(vma), |
2774 | PAGE_SIZE, 0); |
2775 | i915_request_add(rq); |
2776 | if (err) |
2777 | goto err_rq; |
2778 | |
2779 | i915_gem_object_put(obj); |
2780 | intel_context_put(ce); |
2781 | |
2782 | rq->mock.link.next = &(*prev)->mock.link; |
2783 | *prev = rq; |
2784 | return 0; |
2785 | |
2786 | err_rq: |
2787 | i915_vma_put(vma: rq->batch); |
2788 | i915_request_put(rq); |
2789 | err_obj: |
2790 | i915_gem_object_put(obj); |
2791 | err_ce: |
2792 | intel_context_put(ce); |
2793 | return err; |
2794 | } |
2795 | |
2796 | static int __live_preempt_ring(struct intel_engine_cs *engine, |
2797 | struct igt_spinner *spin, |
2798 | int queue_sz, int ring_sz) |
2799 | { |
2800 | struct intel_context *ce[2] = {}; |
2801 | struct i915_request *rq; |
2802 | struct igt_live_test t; |
2803 | int err = 0; |
2804 | int n; |
2805 | |
2806 | if (igt_live_test_begin(t: &t, i915: engine->i915, func: __func__, name: engine->name)) |
2807 | return -EIO; |
2808 | |
2809 | for (n = 0; n < ARRAY_SIZE(ce); n++) { |
2810 | struct intel_context *tmp; |
2811 | |
2812 | tmp = intel_context_create(engine); |
2813 | if (IS_ERR(ptr: tmp)) { |
2814 | err = PTR_ERR(ptr: tmp); |
2815 | goto err_ce; |
2816 | } |
2817 | |
2818 | tmp->ring_size = ring_sz; |
2819 | |
2820 | err = intel_context_pin(ce: tmp); |
2821 | if (err) { |
2822 | intel_context_put(ce: tmp); |
2823 | goto err_ce; |
2824 | } |
2825 | |
2826 | memset32(s: tmp->ring->vaddr, |
2827 | v: 0xdeadbeef, /* trigger a hang if executed */ |
2828 | n: tmp->ring->vma->size / sizeof(u32)); |
2829 | |
2830 | ce[n] = tmp; |
2831 | } |
2832 | |
2833 | rq = igt_spinner_create_request(spin, ce: ce[0], MI_ARB_CHECK); |
2834 | if (IS_ERR(ptr: rq)) { |
2835 | err = PTR_ERR(ptr: rq); |
2836 | goto err_ce; |
2837 | } |
2838 | |
2839 | i915_request_get(rq); |
2840 | rq->sched.attr.priority = I915_PRIORITY_BARRIER; |
2841 | i915_request_add(rq); |
2842 | |
2843 | if (!igt_wait_for_spinner(spin, rq)) { |
2844 | intel_gt_set_wedged(gt: engine->gt); |
2845 | i915_request_put(rq); |
2846 | err = -ETIME; |
2847 | goto err_ce; |
2848 | } |
2849 | |
2850 | /* Fill the ring, until we will cause a wrap */ |
2851 | n = 0; |
2852 | while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) { |
2853 | struct i915_request *tmp; |
2854 | |
2855 | tmp = intel_context_create_request(ce: ce[0]); |
2856 | if (IS_ERR(ptr: tmp)) { |
2857 | err = PTR_ERR(ptr: tmp); |
2858 | i915_request_put(rq); |
2859 | goto err_ce; |
2860 | } |
2861 | |
2862 | i915_request_add(rq: tmp); |
2863 | intel_engine_flush_submission(engine); |
2864 | n++; |
2865 | } |
2866 | intel_engine_flush_submission(engine); |
2867 | pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n" , |
2868 | engine->name, queue_sz, n, |
2869 | ce[0]->ring->size, |
2870 | ce[0]->ring->tail, |
2871 | ce[0]->ring->emit, |
2872 | rq->tail); |
2873 | i915_request_put(rq); |
2874 | |
2875 | /* Create a second request to preempt the first ring */ |
2876 | rq = intel_context_create_request(ce: ce[1]); |
2877 | if (IS_ERR(ptr: rq)) { |
2878 | err = PTR_ERR(ptr: rq); |
2879 | goto err_ce; |
2880 | } |
2881 | |
2882 | rq->sched.attr.priority = I915_PRIORITY_BARRIER; |
2883 | i915_request_get(rq); |
2884 | i915_request_add(rq); |
2885 | |
2886 | err = wait_for_submit(engine, rq, HZ / 2); |
2887 | i915_request_put(rq); |
2888 | if (err) { |
2889 | pr_err("%s: preemption request was not submitted\n" , |
2890 | engine->name); |
2891 | err = -ETIME; |
2892 | } |
2893 | |
2894 | pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n" , |
2895 | engine->name, |
2896 | ce[0]->ring->tail, ce[0]->ring->emit, |
2897 | ce[1]->ring->tail, ce[1]->ring->emit); |
2898 | |
2899 | err_ce: |
2900 | intel_engine_flush_submission(engine); |
2901 | igt_spinner_end(spin); |
2902 | for (n = 0; n < ARRAY_SIZE(ce); n++) { |
2903 | if (IS_ERR_OR_NULL(ptr: ce[n])) |
2904 | break; |
2905 | |
2906 | intel_context_unpin(ce: ce[n]); |
2907 | intel_context_put(ce: ce[n]); |
2908 | } |
2909 | if (igt_live_test_end(t: &t)) |
2910 | err = -EIO; |
2911 | return err; |
2912 | } |
2913 | |
2914 | static int live_preempt_ring(void *arg) |
2915 | { |
2916 | struct intel_gt *gt = arg; |
2917 | struct intel_engine_cs *engine; |
2918 | struct igt_spinner spin; |
2919 | enum intel_engine_id id; |
2920 | int err = 0; |
2921 | |
2922 | /* |
2923 | * Check that we rollback large chunks of a ring in order to do a |
2924 | * preemption event. Similar to live_unlite_ring, but looking at |
2925 | * ring size rather than the impact of intel_ring_direction(). |
2926 | */ |
2927 | |
2928 | if (igt_spinner_init(spin: &spin, gt)) |
2929 | return -ENOMEM; |
2930 | |
2931 | for_each_engine(engine, gt, id) { |
2932 | int n; |
2933 | |
2934 | if (!intel_engine_has_preemption(engine)) |
2935 | continue; |
2936 | |
2937 | if (!intel_engine_can_store_dword(engine)) |
2938 | continue; |
2939 | |
2940 | st_engine_heartbeat_disable(engine); |
2941 | |
2942 | for (n = 0; n <= 3; n++) { |
2943 | err = __live_preempt_ring(engine, spin: &spin, |
2944 | queue_sz: n * SZ_4K / 4, SZ_4K); |
2945 | if (err) |
2946 | break; |
2947 | } |
2948 | |
2949 | st_engine_heartbeat_enable(engine); |
2950 | if (err) |
2951 | break; |
2952 | } |
2953 | |
2954 | igt_spinner_fini(spin: &spin); |
2955 | return err; |
2956 | } |
2957 | |
2958 | static int live_preempt_gang(void *arg) |
2959 | { |
2960 | struct intel_gt *gt = arg; |
2961 | struct intel_engine_cs *engine; |
2962 | enum intel_engine_id id; |
2963 | |
2964 | /* |
2965 | * Build as long a chain of preempters as we can, with each |
2966 | * request higher priority than the last. Once we are ready, we release |
2967 | * the last batch which then precolates down the chain, each releasing |
2968 | * the next oldest in turn. The intent is to simply push as hard as we |
2969 | * can with the number of preemptions, trying to exceed narrow HW |
2970 | * limits. At a minimum, we insist that we can sort all the user |
2971 | * high priority levels into execution order. |
2972 | */ |
2973 | |
2974 | for_each_engine(engine, gt, id) { |
2975 | struct i915_request *rq = NULL; |
2976 | struct igt_live_test t; |
2977 | IGT_TIMEOUT(end_time); |
2978 | int prio = 0; |
2979 | int err = 0; |
2980 | u32 *cs; |
2981 | |
2982 | if (!intel_engine_has_preemption(engine)) |
2983 | continue; |
2984 | |
2985 | if (igt_live_test_begin(t: &t, i915: gt->i915, func: __func__, name: engine->name)) |
2986 | return -EIO; |
2987 | |
2988 | do { |
2989 | struct i915_sched_attr attr = { .priority = prio++ }; |
2990 | |
2991 | err = create_gang(engine, prev: &rq); |
2992 | if (err) |
2993 | break; |
2994 | |
2995 | /* Submit each spinner at increasing priority */ |
2996 | engine->sched_engine->schedule(rq, &attr); |
2997 | } while (prio <= I915_PRIORITY_MAX && |
2998 | !__igt_timeout(timeout: end_time, NULL)); |
2999 | pr_debug("%s: Preempt chain of %d requests\n" , |
3000 | engine->name, prio); |
3001 | |
3002 | /* |
3003 | * Such that the last spinner is the highest priority and |
3004 | * should execute first. When that spinner completes, |
3005 | * it will terminate the next lowest spinner until there |
3006 | * are no more spinners and the gang is complete. |
3007 | */ |
3008 | cs = i915_gem_object_pin_map_unlocked(obj: rq->batch->obj, type: I915_MAP_WC); |
3009 | if (!IS_ERR(ptr: cs)) { |
3010 | *cs = 0; |
3011 | i915_gem_object_unpin_map(obj: rq->batch->obj); |
3012 | } else { |
3013 | err = PTR_ERR(ptr: cs); |
3014 | intel_gt_set_wedged(gt); |
3015 | } |
3016 | |
3017 | while (rq) { /* wait for each rq from highest to lowest prio */ |
3018 | struct i915_request *n = list_next_entry(rq, mock.link); |
3019 | |
3020 | if (err == 0 && i915_request_wait(rq, flags: 0, HZ / 5) < 0) { |
3021 | struct drm_printer p = |
3022 | drm_info_printer(dev: engine->i915->drm.dev); |
3023 | |
3024 | pr_err("Failed to flush chain of %d requests, at %d\n" , |
3025 | prio, rq_prio(rq)); |
3026 | intel_engine_dump(engine, m: &p, |
3027 | header: "%s\n" , engine->name); |
3028 | |
3029 | err = -ETIME; |
3030 | } |
3031 | |
3032 | i915_vma_put(vma: rq->batch); |
3033 | i915_request_put(rq); |
3034 | rq = n; |
3035 | } |
3036 | |
3037 | if (igt_live_test_end(t: &t)) |
3038 | err = -EIO; |
3039 | if (err) |
3040 | return err; |
3041 | } |
3042 | |
3043 | return 0; |
3044 | } |
3045 | |
3046 | static struct i915_vma * |
3047 | create_gpr_user(struct intel_engine_cs *engine, |
3048 | struct i915_vma *result, |
3049 | unsigned int offset) |
3050 | { |
3051 | struct drm_i915_gem_object *obj; |
3052 | struct i915_vma *vma; |
3053 | u32 *cs; |
3054 | int err; |
3055 | int i; |
3056 | |
3057 | obj = i915_gem_object_create_internal(i915: engine->i915, size: 4096); |
3058 | if (IS_ERR(ptr: obj)) |
3059 | return ERR_CAST(ptr: obj); |
3060 | |
3061 | vma = i915_vma_instance(obj, vm: result->vm, NULL); |
3062 | if (IS_ERR(ptr: vma)) { |
3063 | i915_gem_object_put(obj); |
3064 | return vma; |
3065 | } |
3066 | |
3067 | err = i915_vma_pin(vma, size: 0, alignment: 0, PIN_USER); |
3068 | if (err) { |
3069 | i915_vma_put(vma); |
3070 | return ERR_PTR(error: err); |
3071 | } |
3072 | |
3073 | cs = i915_gem_object_pin_map_unlocked(obj, type: I915_MAP_WC); |
3074 | if (IS_ERR(ptr: cs)) { |
3075 | i915_vma_put(vma); |
3076 | return ERR_CAST(ptr: cs); |
3077 | } |
3078 | |
3079 | /* All GPR are clear for new contexts. We use GPR(0) as a constant */ |
3080 | *cs++ = MI_LOAD_REGISTER_IMM(1); |
3081 | *cs++ = CS_GPR(engine, 0); |
3082 | *cs++ = 1; |
3083 | |
3084 | for (i = 1; i < NUM_GPR; i++) { |
3085 | u64 addr; |
3086 | |
3087 | /* |
3088 | * Perform: GPR[i]++ |
3089 | * |
3090 | * As we read and write into the context saved GPR[i], if |
3091 | * we restart this batch buffer from an earlier point, we |
3092 | * will repeat the increment and store a value > 1. |
3093 | */ |
3094 | *cs++ = MI_MATH(4); |
3095 | *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i)); |
3096 | *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0)); |
3097 | *cs++ = MI_MATH_ADD; |
3098 | *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU); |
3099 | |
3100 | addr = i915_vma_offset(vma: result) + offset + i * sizeof(*cs); |
3101 | *cs++ = MI_STORE_REGISTER_MEM_GEN8; |
3102 | *cs++ = CS_GPR(engine, 2 * i); |
3103 | *cs++ = lower_32_bits(addr); |
3104 | *cs++ = upper_32_bits(addr); |
3105 | |
3106 | *cs++ = MI_SEMAPHORE_WAIT | |
3107 | MI_SEMAPHORE_POLL | |
3108 | MI_SEMAPHORE_SAD_GTE_SDD; |
3109 | *cs++ = i; |
3110 | *cs++ = lower_32_bits(i915_vma_offset(result)); |
3111 | *cs++ = upper_32_bits(i915_vma_offset(result)); |
3112 | } |
3113 | |
3114 | *cs++ = MI_BATCH_BUFFER_END; |
3115 | i915_gem_object_flush_map(obj); |
3116 | i915_gem_object_unpin_map(obj); |
3117 | |
3118 | return vma; |
3119 | } |
3120 | |
3121 | static struct i915_vma *create_global(struct intel_gt *gt, size_t sz) |
3122 | { |
3123 | struct drm_i915_gem_object *obj; |
3124 | struct i915_vma *vma; |
3125 | int err; |
3126 | |
3127 | obj = i915_gem_object_create_internal(i915: gt->i915, size: sz); |
3128 | if (IS_ERR(ptr: obj)) |
3129 | return ERR_CAST(ptr: obj); |
3130 | |
3131 | vma = i915_vma_instance(obj, vm: >->ggtt->vm, NULL); |
3132 | if (IS_ERR(ptr: vma)) { |
3133 | i915_gem_object_put(obj); |
3134 | return vma; |
3135 | } |
3136 | |
3137 | err = i915_ggtt_pin(vma, NULL, align: 0, flags: 0); |
3138 | if (err) { |
3139 | i915_vma_put(vma); |
3140 | return ERR_PTR(error: err); |
3141 | } |
3142 | |
3143 | return vma; |
3144 | } |
3145 | |
3146 | static struct i915_request * |
3147 | create_gpr_client(struct intel_engine_cs *engine, |
3148 | struct i915_vma *global, |
3149 | unsigned int offset) |
3150 | { |
3151 | struct i915_vma *batch, *vma; |
3152 | struct intel_context *ce; |
3153 | struct i915_request *rq; |
3154 | int err; |
3155 | |
3156 | ce = intel_context_create(engine); |
3157 | if (IS_ERR(ptr: ce)) |
3158 | return ERR_CAST(ptr: ce); |
3159 | |
3160 | vma = i915_vma_instance(obj: global->obj, vm: ce->vm, NULL); |
3161 | if (IS_ERR(ptr: vma)) { |
3162 | err = PTR_ERR(ptr: vma); |
3163 | goto out_ce; |
3164 | } |
3165 | |
3166 | err = i915_vma_pin(vma, size: 0, alignment: 0, PIN_USER); |
3167 | if (err) |
3168 | goto out_ce; |
3169 | |
3170 | batch = create_gpr_user(engine, result: vma, offset); |
3171 | if (IS_ERR(ptr: batch)) { |
3172 | err = PTR_ERR(ptr: batch); |
3173 | goto out_vma; |
3174 | } |
3175 | |
3176 | rq = intel_context_create_request(ce); |
3177 | if (IS_ERR(ptr: rq)) { |
3178 | err = PTR_ERR(ptr: rq); |
3179 | goto out_batch; |
3180 | } |
3181 | |
3182 | err = igt_vma_move_to_active_unlocked(vma, rq, flags: 0); |
3183 | |
3184 | i915_vma_lock(vma: batch); |
3185 | if (!err) |
3186 | err = i915_vma_move_to_active(vma: batch, rq, flags: 0); |
3187 | if (!err) |
3188 | err = rq->engine->emit_bb_start(rq, |
3189 | i915_vma_offset(vma: batch), |
3190 | PAGE_SIZE, 0); |
3191 | i915_vma_unlock(vma: batch); |
3192 | i915_vma_unpin(vma: batch); |
3193 | |
3194 | if (!err) |
3195 | i915_request_get(rq); |
3196 | i915_request_add(rq); |
3197 | |
3198 | out_batch: |
3199 | i915_vma_put(vma: batch); |
3200 | out_vma: |
3201 | i915_vma_unpin(vma); |
3202 | out_ce: |
3203 | intel_context_put(ce); |
3204 | return err ? ERR_PTR(error: err) : rq; |
3205 | } |
3206 | |
3207 | static int preempt_user(struct intel_engine_cs *engine, |
3208 | struct i915_vma *global, |
3209 | int id) |
3210 | { |
3211 | struct i915_sched_attr attr = { |
3212 | .priority = I915_PRIORITY_MAX |
3213 | }; |
3214 | struct i915_request *rq; |
3215 | int err = 0; |
3216 | u32 *cs; |
3217 | |
3218 | rq = intel_engine_create_kernel_request(engine); |
3219 | if (IS_ERR(ptr: rq)) |
3220 | return PTR_ERR(ptr: rq); |
3221 | |
3222 | cs = intel_ring_begin(rq, num_dwords: 4); |
3223 | if (IS_ERR(ptr: cs)) { |
3224 | i915_request_add(rq); |
3225 | return PTR_ERR(ptr: cs); |
3226 | } |
3227 | |
3228 | *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; |
3229 | *cs++ = i915_ggtt_offset(vma: global); |
3230 | *cs++ = 0; |
3231 | *cs++ = id; |
3232 | |
3233 | intel_ring_advance(rq, cs); |
3234 | |
3235 | i915_request_get(rq); |
3236 | i915_request_add(rq); |
3237 | |
3238 | engine->sched_engine->schedule(rq, &attr); |
3239 | |
3240 | if (i915_request_wait(rq, flags: 0, HZ / 2) < 0) |
3241 | err = -ETIME; |
3242 | i915_request_put(rq); |
3243 | |
3244 | return err; |
3245 | } |
3246 | |
3247 | static int live_preempt_user(void *arg) |
3248 | { |
3249 | struct intel_gt *gt = arg; |
3250 | struct intel_engine_cs *engine; |
3251 | struct i915_vma *global; |
3252 | enum intel_engine_id id; |
3253 | u32 *result; |
3254 | int err = 0; |
3255 | |
3256 | /* |
3257 | * In our other tests, we look at preemption in carefully |
3258 | * controlled conditions in the ringbuffer. Since most of the |
3259 | * time is spent in user batches, most of our preemptions naturally |
3260 | * occur there. We want to verify that when we preempt inside a batch |
3261 | * we continue on from the current instruction and do not roll back |
3262 | * to the start, or another earlier arbitration point. |
3263 | * |
3264 | * To verify this, we create a batch which is a mixture of |
3265 | * MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with |
3266 | * a few preempting contexts thrown into the mix, we look for any |
3267 | * repeated instructions (which show up as incorrect values). |
3268 | */ |
3269 | |
3270 | global = create_global(gt, sz: 4096); |
3271 | if (IS_ERR(ptr: global)) |
3272 | return PTR_ERR(ptr: global); |
3273 | |
3274 | result = i915_gem_object_pin_map_unlocked(obj: global->obj, type: I915_MAP_WC); |
3275 | if (IS_ERR(ptr: result)) { |
3276 | i915_vma_unpin_and_release(p_vma: &global, flags: 0); |
3277 | return PTR_ERR(ptr: result); |
3278 | } |
3279 | |
3280 | for_each_engine(engine, gt, id) { |
3281 | struct i915_request *client[3] = {}; |
3282 | struct igt_live_test t; |
3283 | int i; |
3284 | |
3285 | if (!intel_engine_has_preemption(engine)) |
3286 | continue; |
3287 | |
3288 | if (GRAPHICS_VER(gt->i915) == 8 && engine->class != RENDER_CLASS) |
3289 | continue; /* we need per-context GPR */ |
3290 | |
3291 | if (igt_live_test_begin(t: &t, i915: gt->i915, func: __func__, name: engine->name)) { |
3292 | err = -EIO; |
3293 | break; |
3294 | } |
3295 | |
3296 | memset(result, 0, 4096); |
3297 | |
3298 | for (i = 0; i < ARRAY_SIZE(client); i++) { |
3299 | struct i915_request *rq; |
3300 | |
3301 | rq = create_gpr_client(engine, global, |
3302 | NUM_GPR * i * sizeof(u32)); |
3303 | if (IS_ERR(ptr: rq)) { |
3304 | err = PTR_ERR(ptr: rq); |
3305 | goto end_test; |
3306 | } |
3307 | |
3308 | client[i] = rq; |
3309 | } |
3310 | |
3311 | /* Continuously preempt the set of 3 running contexts */ |
3312 | for (i = 1; i <= NUM_GPR; i++) { |
3313 | err = preempt_user(engine, global, id: i); |
3314 | if (err) |
3315 | goto end_test; |
3316 | } |
3317 | |
3318 | if (READ_ONCE(result[0]) != NUM_GPR) { |
3319 | pr_err("%s: Failed to release semaphore\n" , |
3320 | engine->name); |
3321 | err = -EIO; |
3322 | goto end_test; |
3323 | } |
3324 | |
3325 | for (i = 0; i < ARRAY_SIZE(client); i++) { |
3326 | int gpr; |
3327 | |
3328 | if (i915_request_wait(rq: client[i], flags: 0, HZ / 2) < 0) { |
3329 | err = -ETIME; |
3330 | goto end_test; |
3331 | } |
3332 | |
3333 | for (gpr = 1; gpr < NUM_GPR; gpr++) { |
3334 | if (result[NUM_GPR * i + gpr] != 1) { |
3335 | pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n" , |
3336 | engine->name, |
3337 | i, gpr, result[NUM_GPR * i + gpr]); |
3338 | err = -EINVAL; |
3339 | goto end_test; |
3340 | } |
3341 | } |
3342 | } |
3343 | |
3344 | end_test: |
3345 | for (i = 0; i < ARRAY_SIZE(client); i++) { |
3346 | if (!client[i]) |
3347 | break; |
3348 | |
3349 | i915_request_put(rq: client[i]); |
3350 | } |
3351 | |
3352 | /* Flush the semaphores on error */ |
3353 | smp_store_mb(result[0], -1); |
3354 | if (igt_live_test_end(t: &t)) |
3355 | err = -EIO; |
3356 | if (err) |
3357 | break; |
3358 | } |
3359 | |
3360 | i915_vma_unpin_and_release(p_vma: &global, I915_VMA_RELEASE_MAP); |
3361 | return err; |
3362 | } |
3363 | |
3364 | static int live_preempt_timeout(void *arg) |
3365 | { |
3366 | struct intel_gt *gt = arg; |
3367 | struct i915_gem_context *ctx_hi, *ctx_lo; |
3368 | struct igt_spinner spin_lo; |
3369 | struct intel_engine_cs *engine; |
3370 | enum intel_engine_id id; |
3371 | int err = -ENOMEM; |
3372 | |
3373 | /* |
3374 | * Check that we force preemption to occur by cancelling the previous |
3375 | * context if it refuses to yield the GPU. |
3376 | */ |
3377 | if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT) |
3378 | return 0; |
3379 | |
3380 | if (!intel_has_reset_engine(gt)) |
3381 | return 0; |
3382 | |
3383 | ctx_hi = kernel_context(i915: gt->i915, NULL); |
3384 | if (!ctx_hi) |
3385 | return -ENOMEM; |
3386 | ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; |
3387 | |
3388 | ctx_lo = kernel_context(i915: gt->i915, NULL); |
3389 | if (!ctx_lo) |
3390 | goto err_ctx_hi; |
3391 | ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; |
3392 | |
3393 | if (igt_spinner_init(spin: &spin_lo, gt)) |
3394 | goto err_ctx_lo; |
3395 | |
3396 | for_each_engine(engine, gt, id) { |
3397 | unsigned long saved_timeout; |
3398 | struct i915_request *rq; |
3399 | |
3400 | if (!intel_engine_has_preemption(engine)) |
3401 | continue; |
3402 | |
3403 | rq = spinner_create_request(spin: &spin_lo, ctx: ctx_lo, engine, |
3404 | MI_NOOP); /* preemption disabled */ |
3405 | if (IS_ERR(ptr: rq)) { |
3406 | err = PTR_ERR(ptr: rq); |
3407 | goto err_spin_lo; |
3408 | } |
3409 | |
3410 | i915_request_add(rq); |
3411 | if (!igt_wait_for_spinner(spin: &spin_lo, rq)) { |
3412 | intel_gt_set_wedged(gt); |
3413 | err = -EIO; |
3414 | goto err_spin_lo; |
3415 | } |
3416 | |
3417 | rq = igt_request_alloc(ctx: ctx_hi, engine); |
3418 | if (IS_ERR(ptr: rq)) { |
3419 | igt_spinner_end(spin: &spin_lo); |
3420 | err = PTR_ERR(ptr: rq); |
3421 | goto err_spin_lo; |
3422 | } |
3423 | |
3424 | /* Flush the previous CS ack before changing timeouts */ |
3425 | while (READ_ONCE(engine->execlists.pending[0])) |
3426 | cpu_relax(); |
3427 | |
3428 | saved_timeout = engine->props.preempt_timeout_ms; |
3429 | engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */ |
3430 | |
3431 | i915_request_get(rq); |
3432 | i915_request_add(rq); |
3433 | |
3434 | intel_engine_flush_submission(engine); |
3435 | engine->props.preempt_timeout_ms = saved_timeout; |
3436 | |
3437 | if (i915_request_wait(rq, flags: 0, HZ / 10) < 0) { |
3438 | intel_gt_set_wedged(gt); |
3439 | i915_request_put(rq); |
3440 | err = -ETIME; |
3441 | goto err_spin_lo; |
3442 | } |
3443 | |
3444 | igt_spinner_end(spin: &spin_lo); |
3445 | i915_request_put(rq); |
3446 | } |
3447 | |
3448 | err = 0; |
3449 | err_spin_lo: |
3450 | igt_spinner_fini(spin: &spin_lo); |
3451 | err_ctx_lo: |
3452 | kernel_context_close(ctx: ctx_lo); |
3453 | err_ctx_hi: |
3454 | kernel_context_close(ctx: ctx_hi); |
3455 | return err; |
3456 | } |
3457 | |
3458 | static int random_range(struct rnd_state *rnd, int min, int max) |
3459 | { |
3460 | return i915_prandom_u32_max_state(ep_ro: max - min, state: rnd) + min; |
3461 | } |
3462 | |
3463 | static int random_priority(struct rnd_state *rnd) |
3464 | { |
3465 | return random_range(rnd, min: I915_PRIORITY_MIN, max: I915_PRIORITY_MAX); |
3466 | } |
3467 | |
3468 | struct preempt_smoke { |
3469 | struct intel_gt *gt; |
3470 | struct kthread_work work; |
3471 | struct i915_gem_context **contexts; |
3472 | struct intel_engine_cs *engine; |
3473 | struct drm_i915_gem_object *batch; |
3474 | unsigned int ncontext; |
3475 | struct rnd_state prng; |
3476 | unsigned long count; |
3477 | int result; |
3478 | }; |
3479 | |
3480 | static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke) |
3481 | { |
3482 | return smoke->contexts[i915_prandom_u32_max_state(ep_ro: smoke->ncontext, |
3483 | state: &smoke->prng)]; |
3484 | } |
3485 | |
3486 | static int smoke_submit(struct preempt_smoke *smoke, |
3487 | struct i915_gem_context *ctx, int prio, |
3488 | struct drm_i915_gem_object *batch) |
3489 | { |
3490 | struct i915_request *rq; |
3491 | struct i915_vma *vma = NULL; |
3492 | int err = 0; |
3493 | |
3494 | if (batch) { |
3495 | struct i915_address_space *vm; |
3496 | |
3497 | vm = i915_gem_context_get_eb_vm(ctx); |
3498 | vma = i915_vma_instance(obj: batch, vm, NULL); |
3499 | i915_vm_put(vm); |
3500 | if (IS_ERR(ptr: vma)) |
3501 | return PTR_ERR(ptr: vma); |
3502 | |
3503 | err = i915_vma_pin(vma, size: 0, alignment: 0, PIN_USER); |
3504 | if (err) |
3505 | return err; |
3506 | } |
3507 | |
3508 | ctx->sched.priority = prio; |
3509 | |
3510 | rq = igt_request_alloc(ctx, engine: smoke->engine); |
3511 | if (IS_ERR(ptr: rq)) { |
3512 | err = PTR_ERR(ptr: rq); |
3513 | goto unpin; |
3514 | } |
3515 | |
3516 | if (vma) { |
3517 | err = igt_vma_move_to_active_unlocked(vma, rq, flags: 0); |
3518 | if (!err) |
3519 | err = rq->engine->emit_bb_start(rq, |
3520 | i915_vma_offset(vma), |
3521 | PAGE_SIZE, 0); |
3522 | } |
3523 | |
3524 | i915_request_add(rq); |
3525 | |
3526 | unpin: |
3527 | if (vma) |
3528 | i915_vma_unpin(vma); |
3529 | |
3530 | return err; |
3531 | } |
3532 | |
3533 | static void smoke_crescendo_work(struct kthread_work *work) |
3534 | { |
3535 | struct preempt_smoke *smoke = container_of(work, typeof(*smoke), work); |
3536 | IGT_TIMEOUT(end_time); |
3537 | unsigned long count; |
3538 | |
3539 | count = 0; |
3540 | do { |
3541 | struct i915_gem_context *ctx = smoke_context(smoke); |
3542 | |
3543 | smoke->result = smoke_submit(smoke, ctx, |
3544 | prio: count % I915_PRIORITY_MAX, |
3545 | batch: smoke->batch); |
3546 | |
3547 | count++; |
3548 | } while (!smoke->result && count < smoke->ncontext && |
3549 | !__igt_timeout(timeout: end_time, NULL)); |
3550 | |
3551 | smoke->count = count; |
3552 | } |
3553 | |
3554 | static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) |
3555 | #define BATCH BIT(0) |
3556 | { |
3557 | struct kthread_worker *worker[I915_NUM_ENGINES] = {}; |
3558 | struct preempt_smoke *arg; |
3559 | struct intel_engine_cs *engine; |
3560 | enum intel_engine_id id; |
3561 | unsigned long count; |
3562 | int err = 0; |
3563 | |
3564 | arg = kmalloc_array(n: I915_NUM_ENGINES, size: sizeof(*arg), GFP_KERNEL); |
3565 | if (!arg) |
3566 | return -ENOMEM; |
3567 | |
3568 | memset(arg, 0, I915_NUM_ENGINES * sizeof(*arg)); |
3569 | |
3570 | for_each_engine(engine, smoke->gt, id) { |
3571 | arg[id] = *smoke; |
3572 | arg[id].engine = engine; |
3573 | if (!(flags & BATCH)) |
3574 | arg[id].batch = NULL; |
3575 | arg[id].count = 0; |
3576 | |
3577 | worker[id] = kthread_create_worker(flags: 0, namefmt: "igt/smoke:%d" , id); |
3578 | if (IS_ERR(ptr: worker[id])) { |
3579 | err = PTR_ERR(ptr: worker[id]); |
3580 | break; |
3581 | } |
3582 | |
3583 | kthread_init_work(&arg[id].work, smoke_crescendo_work); |
3584 | kthread_queue_work(worker: worker[id], work: &arg[id].work); |
3585 | } |
3586 | |
3587 | count = 0; |
3588 | for_each_engine(engine, smoke->gt, id) { |
3589 | if (IS_ERR_OR_NULL(ptr: worker[id])) |
3590 | continue; |
3591 | |
3592 | kthread_flush_work(work: &arg[id].work); |
3593 | if (arg[id].result && !err) |
3594 | err = arg[id].result; |
3595 | |
3596 | count += arg[id].count; |
3597 | |
3598 | kthread_destroy_worker(worker: worker[id]); |
3599 | } |
3600 | |
3601 | pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n" , |
3602 | count, flags, smoke->gt->info.num_engines, smoke->ncontext); |
3603 | |
3604 | kfree(objp: arg); |
3605 | return 0; |
3606 | } |
3607 | |
3608 | static int smoke_random(struct preempt_smoke *smoke, unsigned int flags) |
3609 | { |
3610 | enum intel_engine_id id; |
3611 | IGT_TIMEOUT(end_time); |
3612 | unsigned long count; |
3613 | |
3614 | count = 0; |
3615 | do { |
3616 | for_each_engine(smoke->engine, smoke->gt, id) { |
3617 | struct i915_gem_context *ctx = smoke_context(smoke); |
3618 | int err; |
3619 | |
3620 | err = smoke_submit(smoke, |
3621 | ctx, prio: random_priority(rnd: &smoke->prng), |
3622 | batch: flags & BATCH ? smoke->batch : NULL); |
3623 | if (err) |
3624 | return err; |
3625 | |
3626 | count++; |
3627 | } |
3628 | } while (count < smoke->ncontext && !__igt_timeout(timeout: end_time, NULL)); |
3629 | |
3630 | pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n" , |
3631 | count, flags, smoke->gt->info.num_engines, smoke->ncontext); |
3632 | return 0; |
3633 | } |
3634 | |
3635 | static int live_preempt_smoke(void *arg) |
3636 | { |
3637 | struct preempt_smoke smoke = { |
3638 | .gt = arg, |
3639 | .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed), |
3640 | .ncontext = 256, |
3641 | }; |
3642 | const unsigned int phase[] = { 0, BATCH }; |
3643 | struct igt_live_test t; |
3644 | int err = -ENOMEM; |
3645 | u32 *cs; |
3646 | int n; |
3647 | |
3648 | smoke.contexts = kmalloc_array(n: smoke.ncontext, |
3649 | size: sizeof(*smoke.contexts), |
3650 | GFP_KERNEL); |
3651 | if (!smoke.contexts) |
3652 | return -ENOMEM; |
3653 | |
3654 | smoke.batch = |
3655 | i915_gem_object_create_internal(i915: smoke.gt->i915, PAGE_SIZE); |
3656 | if (IS_ERR(ptr: smoke.batch)) { |
3657 | err = PTR_ERR(ptr: smoke.batch); |
3658 | goto err_free; |
3659 | } |
3660 | |
3661 | cs = i915_gem_object_pin_map_unlocked(obj: smoke.batch, type: I915_MAP_WB); |
3662 | if (IS_ERR(ptr: cs)) { |
3663 | err = PTR_ERR(ptr: cs); |
3664 | goto err_batch; |
3665 | } |
3666 | for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++) |
3667 | cs[n] = MI_ARB_CHECK; |
3668 | cs[n] = MI_BATCH_BUFFER_END; |
3669 | i915_gem_object_flush_map(obj: smoke.batch); |
3670 | i915_gem_object_unpin_map(obj: smoke.batch); |
3671 | |
3672 | if (igt_live_test_begin(t: &t, i915: smoke.gt->i915, func: __func__, name: "all" )) { |
3673 | err = -EIO; |
3674 | goto err_batch; |
3675 | } |
3676 | |
3677 | for (n = 0; n < smoke.ncontext; n++) { |
3678 | smoke.contexts[n] = kernel_context(i915: smoke.gt->i915, NULL); |
3679 | if (!smoke.contexts[n]) |
3680 | goto err_ctx; |
3681 | } |
3682 | |
3683 | for (n = 0; n < ARRAY_SIZE(phase); n++) { |
3684 | err = smoke_crescendo(smoke: &smoke, flags: phase[n]); |
3685 | if (err) |
3686 | goto err_ctx; |
3687 | |
3688 | err = smoke_random(smoke: &smoke, flags: phase[n]); |
3689 | if (err) |
3690 | goto err_ctx; |
3691 | } |
3692 | |
3693 | err_ctx: |
3694 | if (igt_live_test_end(t: &t)) |
3695 | err = -EIO; |
3696 | |
3697 | for (n = 0; n < smoke.ncontext; n++) { |
3698 | if (!smoke.contexts[n]) |
3699 | break; |
3700 | kernel_context_close(ctx: smoke.contexts[n]); |
3701 | } |
3702 | |
3703 | err_batch: |
3704 | i915_gem_object_put(obj: smoke.batch); |
3705 | err_free: |
3706 | kfree(objp: smoke.contexts); |
3707 | |
3708 | return err; |
3709 | } |
3710 | |
3711 | static int nop_virtual_engine(struct intel_gt *gt, |
3712 | struct intel_engine_cs **siblings, |
3713 | unsigned int nsibling, |
3714 | unsigned int nctx, |
3715 | unsigned int flags) |
3716 | #define CHAIN BIT(0) |
3717 | { |
3718 | IGT_TIMEOUT(end_time); |
3719 | struct i915_request *request[16] = {}; |
3720 | struct intel_context *ve[16]; |
3721 | unsigned long n, prime, nc; |
3722 | struct igt_live_test t; |
3723 | ktime_t times[2] = {}; |
3724 | int err; |
3725 | |
3726 | GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve)); |
3727 | |
3728 | for (n = 0; n < nctx; n++) { |
3729 | ve[n] = intel_engine_create_virtual(siblings, count: nsibling, flags: 0); |
3730 | if (IS_ERR(ptr: ve[n])) { |
3731 | err = PTR_ERR(ptr: ve[n]); |
3732 | nctx = n; |
3733 | goto out; |
3734 | } |
3735 | |
3736 | err = intel_context_pin(ce: ve[n]); |
3737 | if (err) { |
3738 | intel_context_put(ce: ve[n]); |
3739 | nctx = n; |
3740 | goto out; |
3741 | } |
3742 | } |
3743 | |
3744 | err = igt_live_test_begin(t: &t, i915: gt->i915, func: __func__, name: ve[0]->engine->name); |
3745 | if (err) |
3746 | goto out; |
3747 | |
3748 | for_each_prime_number_from(prime, 1, 8192) { |
3749 | times[1] = ktime_get_raw(); |
3750 | |
3751 | if (flags & CHAIN) { |
3752 | for (nc = 0; nc < nctx; nc++) { |
3753 | for (n = 0; n < prime; n++) { |
3754 | struct i915_request *rq; |
3755 | |
3756 | rq = i915_request_create(ce: ve[nc]); |
3757 | if (IS_ERR(ptr: rq)) { |
3758 | err = PTR_ERR(ptr: rq); |
3759 | goto out; |
3760 | } |
3761 | |
3762 | if (request[nc]) |
3763 | i915_request_put(rq: request[nc]); |
3764 | request[nc] = i915_request_get(rq); |
3765 | i915_request_add(rq); |
3766 | } |
3767 | } |
3768 | } else { |
3769 | for (n = 0; n < prime; n++) { |
3770 | for (nc = 0; nc < nctx; nc++) { |
3771 | struct i915_request *rq; |
3772 | |
3773 | rq = i915_request_create(ce: ve[nc]); |
3774 | if (IS_ERR(ptr: rq)) { |
3775 | err = PTR_ERR(ptr: rq); |
3776 | goto out; |
3777 | } |
3778 | |
3779 | if (request[nc]) |
3780 | i915_request_put(rq: request[nc]); |
3781 | request[nc] = i915_request_get(rq); |
3782 | i915_request_add(rq); |
3783 | } |
3784 | } |
3785 | } |
3786 | |
3787 | for (nc = 0; nc < nctx; nc++) { |
3788 | if (i915_request_wait(rq: request[nc], flags: 0, HZ / 10) < 0) { |
3789 | pr_err("%s(%s): wait for %llx:%lld timed out\n" , |
3790 | __func__, ve[0]->engine->name, |
3791 | request[nc]->fence.context, |
3792 | request[nc]->fence.seqno); |
3793 | |
3794 | GEM_TRACE("%s(%s) failed at request %llx:%lld\n" , |
3795 | __func__, ve[0]->engine->name, |
3796 | request[nc]->fence.context, |
3797 | request[nc]->fence.seqno); |
3798 | GEM_TRACE_DUMP(); |
3799 | intel_gt_set_wedged(gt); |
3800 | break; |
3801 | } |
3802 | } |
3803 | |
3804 | times[1] = ktime_sub(ktime_get_raw(), times[1]); |
3805 | if (prime == 1) |
3806 | times[0] = times[1]; |
3807 | |
3808 | for (nc = 0; nc < nctx; nc++) { |
3809 | i915_request_put(rq: request[nc]); |
3810 | request[nc] = NULL; |
3811 | } |
3812 | |
3813 | if (__igt_timeout(timeout: end_time, NULL)) |
3814 | break; |
3815 | } |
3816 | |
3817 | err = igt_live_test_end(t: &t); |
3818 | if (err) |
3819 | goto out; |
3820 | |
3821 | pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n" , |
3822 | nctx, ve[0]->engine->name, ktime_to_ns(times[0]), |
3823 | prime, div64_u64(ktime_to_ns(times[1]), prime)); |
3824 | |
3825 | out: |
3826 | if (igt_flush_test(i915: gt->i915)) |
3827 | err = -EIO; |
3828 | |
3829 | for (nc = 0; nc < nctx; nc++) { |
3830 | i915_request_put(rq: request[nc]); |
3831 | intel_context_unpin(ce: ve[nc]); |
3832 | intel_context_put(ce: ve[nc]); |
3833 | } |
3834 | return err; |
3835 | } |
3836 | |
3837 | static unsigned int |
3838 | __select_siblings(struct intel_gt *gt, |
3839 | unsigned int class, |
3840 | struct intel_engine_cs **siblings, |
3841 | bool (*filter)(const struct intel_engine_cs *)) |
3842 | { |
3843 | unsigned int n = 0; |
3844 | unsigned int inst; |
3845 | |
3846 | for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) { |
3847 | if (!gt->engine_class[class][inst]) |
3848 | continue; |
3849 | |
3850 | if (filter && !filter(gt->engine_class[class][inst])) |
3851 | continue; |
3852 | |
3853 | siblings[n++] = gt->engine_class[class][inst]; |
3854 | } |
3855 | |
3856 | return n; |
3857 | } |
3858 | |
3859 | static unsigned int |
3860 | select_siblings(struct intel_gt *gt, |
3861 | unsigned int class, |
3862 | struct intel_engine_cs **siblings) |
3863 | { |
3864 | return __select_siblings(gt, class, siblings, NULL); |
3865 | } |
3866 | |
3867 | static int live_virtual_engine(void *arg) |
3868 | { |
3869 | struct intel_gt *gt = arg; |
3870 | struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; |
3871 | struct intel_engine_cs *engine; |
3872 | enum intel_engine_id id; |
3873 | unsigned int class; |
3874 | int err; |
3875 | |
3876 | if (intel_uc_uses_guc_submission(uc: >->uc)) |
3877 | return 0; |
3878 | |
3879 | for_each_engine(engine, gt, id) { |
3880 | err = nop_virtual_engine(gt, siblings: &engine, nsibling: 1, nctx: 1, flags: 0); |
3881 | if (err) { |
3882 | pr_err("Failed to wrap engine %s: err=%d\n" , |
3883 | engine->name, err); |
3884 | return err; |
3885 | } |
3886 | } |
3887 | |
3888 | for (class = 0; class <= MAX_ENGINE_CLASS; class++) { |
3889 | int nsibling, n; |
3890 | |
3891 | nsibling = select_siblings(gt, class, siblings); |
3892 | if (nsibling < 2) |
3893 | continue; |
3894 | |
3895 | for (n = 1; n <= nsibling + 1; n++) { |
3896 | err = nop_virtual_engine(gt, siblings, nsibling, |
3897 | nctx: n, flags: 0); |
3898 | if (err) |
3899 | return err; |
3900 | } |
3901 | |
3902 | err = nop_virtual_engine(gt, siblings, nsibling, nctx: n, CHAIN); |
3903 | if (err) |
3904 | return err; |
3905 | } |
3906 | |
3907 | return 0; |
3908 | } |
3909 | |
3910 | static int mask_virtual_engine(struct intel_gt *gt, |
3911 | struct intel_engine_cs **siblings, |
3912 | unsigned int nsibling) |
3913 | { |
3914 | struct i915_request *request[MAX_ENGINE_INSTANCE + 1]; |
3915 | struct intel_context *ve; |
3916 | struct igt_live_test t; |
3917 | unsigned int n; |
3918 | int err; |
3919 | |
3920 | /* |
3921 | * Check that by setting the execution mask on a request, we can |
3922 | * restrict it to our desired engine within the virtual engine. |
3923 | */ |
3924 | |
3925 | ve = intel_engine_create_virtual(siblings, count: nsibling, flags: 0); |
3926 | if (IS_ERR(ptr: ve)) { |
3927 | err = PTR_ERR(ptr: ve); |
3928 | goto out_close; |
3929 | } |
3930 | |
3931 | err = intel_context_pin(ce: ve); |
3932 | if (err) |
3933 | goto out_put; |
3934 | |
3935 | err = igt_live_test_begin(t: &t, i915: gt->i915, func: __func__, name: ve->engine->name); |
3936 | if (err) |
3937 | goto out_unpin; |
3938 | |
3939 | for (n = 0; n < nsibling; n++) { |
3940 | request[n] = i915_request_create(ce: ve); |
3941 | if (IS_ERR(ptr: request[n])) { |
3942 | err = PTR_ERR(ptr: request[n]); |
3943 | nsibling = n; |
3944 | goto out; |
3945 | } |
3946 | |
3947 | /* Reverse order as it's more likely to be unnatural */ |
3948 | request[n]->execution_mask = siblings[nsibling - n - 1]->mask; |
3949 | |
3950 | i915_request_get(rq: request[n]); |
3951 | i915_request_add(rq: request[n]); |
3952 | } |
3953 | |
3954 | for (n = 0; n < nsibling; n++) { |
3955 | if (i915_request_wait(rq: request[n], flags: 0, HZ / 10) < 0) { |
3956 | pr_err("%s(%s): wait for %llx:%lld timed out\n" , |
3957 | __func__, ve->engine->name, |
3958 | request[n]->fence.context, |
3959 | request[n]->fence.seqno); |
3960 | |
3961 | GEM_TRACE("%s(%s) failed at request %llx:%lld\n" , |
3962 | __func__, ve->engine->name, |
3963 | request[n]->fence.context, |
3964 | request[n]->fence.seqno); |
3965 | GEM_TRACE_DUMP(); |
3966 | intel_gt_set_wedged(gt); |
3967 | err = -EIO; |
3968 | goto out; |
3969 | } |
3970 | |
3971 | if (request[n]->engine != siblings[nsibling - n - 1]) { |
3972 | pr_err("Executed on wrong sibling '%s', expected '%s'\n" , |
3973 | request[n]->engine->name, |
3974 | siblings[nsibling - n - 1]->name); |
3975 | err = -EINVAL; |
3976 | goto out; |
3977 | } |
3978 | } |
3979 | |
3980 | err = igt_live_test_end(t: &t); |
3981 | out: |
3982 | if (igt_flush_test(i915: gt->i915)) |
3983 | err = -EIO; |
3984 | |
3985 | for (n = 0; n < nsibling; n++) |
3986 | i915_request_put(rq: request[n]); |
3987 | |
3988 | out_unpin: |
3989 | intel_context_unpin(ce: ve); |
3990 | out_put: |
3991 | intel_context_put(ce: ve); |
3992 | out_close: |
3993 | return err; |
3994 | } |
3995 | |
3996 | static int live_virtual_mask(void *arg) |
3997 | { |
3998 | struct intel_gt *gt = arg; |
3999 | struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; |
4000 | unsigned int class; |
4001 | int err; |
4002 | |
4003 | if (intel_uc_uses_guc_submission(uc: >->uc)) |
4004 | return 0; |
4005 | |
4006 | for (class = 0; class <= MAX_ENGINE_CLASS; class++) { |
4007 | unsigned int nsibling; |
4008 | |
4009 | nsibling = select_siblings(gt, class, siblings); |
4010 | if (nsibling < 2) |
4011 | continue; |
4012 | |
4013 | err = mask_virtual_engine(gt, siblings, nsibling); |
4014 | if (err) |
4015 | return err; |
4016 | } |
4017 | |
4018 | return 0; |
4019 | } |
4020 | |
4021 | static int slicein_virtual_engine(struct intel_gt *gt, |
4022 | struct intel_engine_cs **siblings, |
4023 | unsigned int nsibling) |
4024 | { |
4025 | const long timeout = slice_timeout(engine: siblings[0]); |
4026 | struct intel_context *ce; |
4027 | struct i915_request *rq; |
4028 | struct igt_spinner spin; |
4029 | unsigned int n; |
4030 | int err = 0; |
4031 | |
4032 | /* |
4033 | * Virtual requests must take part in timeslicing on the target engines. |
4034 | */ |
4035 | |
4036 | if (igt_spinner_init(spin: &spin, gt)) |
4037 | return -ENOMEM; |
4038 | |
4039 | for (n = 0; n < nsibling; n++) { |
4040 | ce = intel_context_create(engine: siblings[n]); |
4041 | if (IS_ERR(ptr: ce)) { |
4042 | err = PTR_ERR(ptr: ce); |
4043 | goto out; |
4044 | } |
4045 | |
4046 | rq = igt_spinner_create_request(spin: &spin, ce, MI_ARB_CHECK); |
4047 | intel_context_put(ce); |
4048 | if (IS_ERR(ptr: rq)) { |
4049 | err = PTR_ERR(ptr: rq); |
4050 | goto out; |
4051 | } |
4052 | |
4053 | i915_request_add(rq); |
4054 | } |
4055 | |
4056 | ce = intel_engine_create_virtual(siblings, count: nsibling, flags: 0); |
4057 | if (IS_ERR(ptr: ce)) { |
4058 | err = PTR_ERR(ptr: ce); |
4059 | goto out; |
4060 | } |
4061 | |
4062 | rq = intel_context_create_request(ce); |
4063 | intel_context_put(ce); |
4064 | if (IS_ERR(ptr: rq)) { |
4065 | err = PTR_ERR(ptr: rq); |
4066 | goto out; |
4067 | } |
4068 | |
4069 | i915_request_get(rq); |
4070 | i915_request_add(rq); |
4071 | if (i915_request_wait(rq, flags: 0, timeout) < 0) { |
4072 | GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n" , |
4073 | __func__, rq->engine->name); |
4074 | GEM_TRACE_DUMP(); |
4075 | intel_gt_set_wedged(gt); |
4076 | err = -EIO; |
4077 | } |
4078 | i915_request_put(rq); |
4079 | |
4080 | out: |
4081 | igt_spinner_end(spin: &spin); |
4082 | if (igt_flush_test(i915: gt->i915)) |
4083 | err = -EIO; |
4084 | igt_spinner_fini(spin: &spin); |
4085 | return err; |
4086 | } |
4087 | |
4088 | static int sliceout_virtual_engine(struct intel_gt *gt, |
4089 | struct intel_engine_cs **siblings, |
4090 | unsigned int nsibling) |
4091 | { |
4092 | const long timeout = slice_timeout(engine: siblings[0]); |
4093 | struct intel_context *ce; |
4094 | struct i915_request *rq; |
4095 | struct igt_spinner spin; |
4096 | unsigned int n; |
4097 | int err = 0; |
4098 | |
4099 | /* |
4100 | * Virtual requests must allow others a fair timeslice. |
4101 | */ |
4102 | |
4103 | if (igt_spinner_init(spin: &spin, gt)) |
4104 | return -ENOMEM; |
4105 | |
4106 | /* XXX We do not handle oversubscription and fairness with normal rq */ |
4107 | for (n = 0; n < nsibling; n++) { |
4108 | ce = intel_engine_create_virtual(siblings, count: nsibling, flags: 0); |
4109 | if (IS_ERR(ptr: ce)) { |
4110 | err = PTR_ERR(ptr: ce); |
4111 | goto out; |
4112 | } |
4113 | |
4114 | rq = igt_spinner_create_request(spin: &spin, ce, MI_ARB_CHECK); |
4115 | intel_context_put(ce); |
4116 | if (IS_ERR(ptr: rq)) { |
4117 | err = PTR_ERR(ptr: rq); |
4118 | goto out; |
4119 | } |
4120 | |
4121 | i915_request_add(rq); |
4122 | } |
4123 | |
4124 | for (n = 0; !err && n < nsibling; n++) { |
4125 | ce = intel_context_create(engine: siblings[n]); |
4126 | if (IS_ERR(ptr: ce)) { |
4127 | err = PTR_ERR(ptr: ce); |
4128 | goto out; |
4129 | } |
4130 | |
4131 | rq = intel_context_create_request(ce); |
4132 | intel_context_put(ce); |
4133 | if (IS_ERR(ptr: rq)) { |
4134 | err = PTR_ERR(ptr: rq); |
4135 | goto out; |
4136 | } |
4137 | |
4138 | i915_request_get(rq); |
4139 | i915_request_add(rq); |
4140 | if (i915_request_wait(rq, flags: 0, timeout) < 0) { |
4141 | GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n" , |
4142 | __func__, siblings[n]->name); |
4143 | GEM_TRACE_DUMP(); |
4144 | intel_gt_set_wedged(gt); |
4145 | err = -EIO; |
4146 | } |
4147 | i915_request_put(rq); |
4148 | } |
4149 | |
4150 | out: |
4151 | igt_spinner_end(spin: &spin); |
4152 | if (igt_flush_test(i915: gt->i915)) |
4153 | err = -EIO; |
4154 | igt_spinner_fini(spin: &spin); |
4155 | return err; |
4156 | } |
4157 | |
4158 | static int live_virtual_slice(void *arg) |
4159 | { |
4160 | struct intel_gt *gt = arg; |
4161 | struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; |
4162 | unsigned int class; |
4163 | int err; |
4164 | |
4165 | if (intel_uc_uses_guc_submission(uc: >->uc)) |
4166 | return 0; |
4167 | |
4168 | for (class = 0; class <= MAX_ENGINE_CLASS; class++) { |
4169 | unsigned int nsibling; |
4170 | |
4171 | nsibling = __select_siblings(gt, class, siblings, |
4172 | filter: intel_engine_has_timeslices); |
4173 | if (nsibling < 2) |
4174 | continue; |
4175 | |
4176 | err = slicein_virtual_engine(gt, siblings, nsibling); |
4177 | if (err) |
4178 | return err; |
4179 | |
4180 | err = sliceout_virtual_engine(gt, siblings, nsibling); |
4181 | if (err) |
4182 | return err; |
4183 | } |
4184 | |
4185 | return 0; |
4186 | } |
4187 | |
4188 | static int preserved_virtual_engine(struct intel_gt *gt, |
4189 | struct intel_engine_cs **siblings, |
4190 | unsigned int nsibling) |
4191 | { |
4192 | struct i915_request *last = NULL; |
4193 | struct intel_context *ve; |
4194 | struct i915_vma *scratch; |
4195 | struct igt_live_test t; |
4196 | unsigned int n; |
4197 | int err = 0; |
4198 | u32 *cs; |
4199 | |
4200 | scratch = |
4201 | __vm_create_scratch_for_read_pinned(vm: &siblings[0]->gt->ggtt->vm, |
4202 | PAGE_SIZE); |
4203 | if (IS_ERR(ptr: scratch)) |
4204 | return PTR_ERR(ptr: scratch); |
4205 | |
4206 | err = i915_vma_sync(vma: scratch); |
4207 | if (err) |
4208 | goto out_scratch; |
4209 | |
4210 | ve = intel_engine_create_virtual(siblings, count: nsibling, flags: 0); |
4211 | if (IS_ERR(ptr: ve)) { |
4212 | err = PTR_ERR(ptr: ve); |
4213 | goto out_scratch; |
4214 | } |
4215 | |
4216 | err = intel_context_pin(ce: ve); |
4217 | if (err) |
4218 | goto out_put; |
4219 | |
4220 | err = igt_live_test_begin(t: &t, i915: gt->i915, func: __func__, name: ve->engine->name); |
4221 | if (err) |
4222 | goto out_unpin; |
4223 | |
4224 | for (n = 0; n < NUM_GPR_DW; n++) { |
4225 | struct intel_engine_cs *engine = siblings[n % nsibling]; |
4226 | struct i915_request *rq; |
4227 | |
4228 | rq = i915_request_create(ce: ve); |
4229 | if (IS_ERR(ptr: rq)) { |
4230 | err = PTR_ERR(ptr: rq); |
4231 | goto out_end; |
4232 | } |
4233 | |
4234 | i915_request_put(rq: last); |
4235 | last = i915_request_get(rq); |
4236 | |
4237 | cs = intel_ring_begin(rq, num_dwords: 8); |
4238 | if (IS_ERR(ptr: cs)) { |
4239 | i915_request_add(rq); |
4240 | err = PTR_ERR(ptr: cs); |
4241 | goto out_end; |
4242 | } |
4243 | |
4244 | *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; |
4245 | *cs++ = CS_GPR(engine, n); |
4246 | *cs++ = i915_ggtt_offset(vma: scratch) + n * sizeof(u32); |
4247 | *cs++ = 0; |
4248 | |
4249 | *cs++ = MI_LOAD_REGISTER_IMM(1); |
4250 | *cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW); |
4251 | *cs++ = n + 1; |
4252 | |
4253 | *cs++ = MI_NOOP; |
4254 | intel_ring_advance(rq, cs); |
4255 | |
4256 | /* Restrict this request to run on a particular engine */ |
4257 | rq->execution_mask = engine->mask; |
4258 | i915_request_add(rq); |
4259 | } |
4260 | |
4261 | if (i915_request_wait(rq: last, flags: 0, HZ / 5) < 0) { |
4262 | err = -ETIME; |
4263 | goto out_end; |
4264 | } |
4265 | |
4266 | cs = i915_gem_object_pin_map_unlocked(obj: scratch->obj, type: I915_MAP_WB); |
4267 | if (IS_ERR(ptr: cs)) { |
4268 | err = PTR_ERR(ptr: cs); |
4269 | goto out_end; |
4270 | } |
4271 | |
4272 | for (n = 0; n < NUM_GPR_DW; n++) { |
4273 | if (cs[n] != n) { |
4274 | pr_err("Incorrect value[%d] found for GPR[%d]\n" , |
4275 | cs[n], n); |
4276 | err = -EINVAL; |
4277 | break; |
4278 | } |
4279 | } |
4280 | |
4281 | i915_gem_object_unpin_map(obj: scratch->obj); |
4282 | |
4283 | out_end: |
4284 | if (igt_live_test_end(t: &t)) |
4285 | err = -EIO; |
4286 | i915_request_put(rq: last); |
4287 | out_unpin: |
4288 | intel_context_unpin(ce: ve); |
4289 | out_put: |
4290 | intel_context_put(ce: ve); |
4291 | out_scratch: |
4292 | i915_vma_unpin_and_release(p_vma: &scratch, flags: 0); |
4293 | return err; |
4294 | } |
4295 | |
4296 | static int live_virtual_preserved(void *arg) |
4297 | { |
4298 | struct intel_gt *gt = arg; |
4299 | struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; |
4300 | unsigned int class; |
4301 | |
4302 | /* |
4303 | * Check that the context image retains non-privileged (user) registers |
4304 | * from one engine to the next. For this we check that the CS_GPR |
4305 | * are preserved. |
4306 | */ |
4307 | |
4308 | if (intel_uc_uses_guc_submission(uc: >->uc)) |
4309 | return 0; |
4310 | |
4311 | /* As we use CS_GPR we cannot run before they existed on all engines. */ |
4312 | if (GRAPHICS_VER(gt->i915) < 9) |
4313 | return 0; |
4314 | |
4315 | for (class = 0; class <= MAX_ENGINE_CLASS; class++) { |
4316 | int nsibling, err; |
4317 | |
4318 | nsibling = select_siblings(gt, class, siblings); |
4319 | if (nsibling < 2) |
4320 | continue; |
4321 | |
4322 | err = preserved_virtual_engine(gt, siblings, nsibling); |
4323 | if (err) |
4324 | return err; |
4325 | } |
4326 | |
4327 | return 0; |
4328 | } |
4329 | |
4330 | static int reset_virtual_engine(struct intel_gt *gt, |
4331 | struct intel_engine_cs **siblings, |
4332 | unsigned int nsibling) |
4333 | { |
4334 | struct intel_engine_cs *engine; |
4335 | struct intel_context *ve; |
4336 | struct igt_spinner spin; |
4337 | struct i915_request *rq; |
4338 | unsigned int n; |
4339 | int err = 0; |
4340 | |
4341 | /* |
4342 | * In order to support offline error capture for fast preempt reset, |
4343 | * we need to decouple the guilty request and ensure that it and its |
4344 | * descendents are not executed while the capture is in progress. |
4345 | */ |
4346 | |
4347 | if (igt_spinner_init(spin: &spin, gt)) |
4348 | return -ENOMEM; |
4349 | |
4350 | ve = intel_engine_create_virtual(siblings, count: nsibling, flags: 0); |
4351 | if (IS_ERR(ptr: ve)) { |
4352 | err = PTR_ERR(ptr: ve); |
4353 | goto out_spin; |
4354 | } |
4355 | |
4356 | for (n = 0; n < nsibling; n++) |
4357 | st_engine_heartbeat_disable(engine: siblings[n]); |
4358 | |
4359 | rq = igt_spinner_create_request(spin: &spin, ce: ve, MI_ARB_CHECK); |
4360 | if (IS_ERR(ptr: rq)) { |
4361 | err = PTR_ERR(ptr: rq); |
4362 | goto out_heartbeat; |
4363 | } |
4364 | i915_request_add(rq); |
4365 | |
4366 | if (!igt_wait_for_spinner(spin: &spin, rq)) { |
4367 | intel_gt_set_wedged(gt); |
4368 | err = -ETIME; |
4369 | goto out_heartbeat; |
4370 | } |
4371 | |
4372 | engine = rq->engine; |
4373 | GEM_BUG_ON(engine == ve->engine); |
4374 | |
4375 | /* Take ownership of the reset and tasklet */ |
4376 | err = engine_lock_reset_tasklet(engine); |
4377 | if (err) |
4378 | goto out_heartbeat; |
4379 | |
4380 | engine->sched_engine->tasklet.callback(&engine->sched_engine->tasklet); |
4381 | GEM_BUG_ON(execlists_active(&engine->execlists) != rq); |
4382 | |
4383 | /* Fake a preemption event; failed of course */ |
4384 | spin_lock_irq(lock: &engine->sched_engine->lock); |
4385 | __unwind_incomplete_requests(engine); |
4386 | spin_unlock_irq(lock: &engine->sched_engine->lock); |
4387 | GEM_BUG_ON(rq->engine != engine); |
4388 | |
4389 | /* Reset the engine while keeping our active request on hold */ |
4390 | execlists_hold(engine, rq); |
4391 | GEM_BUG_ON(!i915_request_on_hold(rq)); |
4392 | |
4393 | __intel_engine_reset_bh(engine, NULL); |
4394 | GEM_BUG_ON(rq->fence.error != -EIO); |
4395 | |
4396 | /* Release our grasp on the engine, letting CS flow again */ |
4397 | engine_unlock_reset_tasklet(engine); |
4398 | |
4399 | /* Check that we do not resubmit the held request */ |
4400 | i915_request_get(rq); |
4401 | if (!i915_request_wait(rq, flags: 0, HZ / 5)) { |
4402 | pr_err("%s: on hold request completed!\n" , |
4403 | engine->name); |
4404 | intel_gt_set_wedged(gt); |
4405 | err = -EIO; |
4406 | goto out_rq; |
4407 | } |
4408 | GEM_BUG_ON(!i915_request_on_hold(rq)); |
4409 | |
4410 | /* But is resubmitted on release */ |
4411 | execlists_unhold(engine, rq); |
4412 | if (i915_request_wait(rq, flags: 0, HZ / 5) < 0) { |
4413 | pr_err("%s: held request did not complete!\n" , |
4414 | engine->name); |
4415 | intel_gt_set_wedged(gt); |
4416 | err = -ETIME; |
4417 | } |
4418 | |
4419 | out_rq: |
4420 | i915_request_put(rq); |
4421 | out_heartbeat: |
4422 | for (n = 0; n < nsibling; n++) |
4423 | st_engine_heartbeat_enable(engine: siblings[n]); |
4424 | |
4425 | intel_context_put(ce: ve); |
4426 | out_spin: |
4427 | igt_spinner_fini(spin: &spin); |
4428 | return err; |
4429 | } |
4430 | |
4431 | static int live_virtual_reset(void *arg) |
4432 | { |
4433 | struct intel_gt *gt = arg; |
4434 | struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; |
4435 | unsigned int class; |
4436 | |
4437 | /* |
4438 | * Check that we handle a reset event within a virtual engine. |
4439 | * Only the physical engine is reset, but we have to check the flow |
4440 | * of the virtual requests around the reset, and make sure it is not |
4441 | * forgotten. |
4442 | */ |
4443 | |
4444 | if (intel_uc_uses_guc_submission(uc: >->uc)) |
4445 | return 0; |
4446 | |
4447 | if (!intel_has_reset_engine(gt)) |
4448 | return 0; |
4449 | |
4450 | for (class = 0; class <= MAX_ENGINE_CLASS; class++) { |
4451 | int nsibling, err; |
4452 | |
4453 | nsibling = select_siblings(gt, class, siblings); |
4454 | if (nsibling < 2) |
4455 | continue; |
4456 | |
4457 | err = reset_virtual_engine(gt, siblings, nsibling); |
4458 | if (err) |
4459 | return err; |
4460 | } |
4461 | |
4462 | return 0; |
4463 | } |
4464 | |
4465 | int intel_execlists_live_selftests(struct drm_i915_private *i915) |
4466 | { |
4467 | static const struct i915_subtest tests[] = { |
4468 | SUBTEST(live_sanitycheck), |
4469 | SUBTEST(live_unlite_switch), |
4470 | SUBTEST(live_unlite_preempt), |
4471 | SUBTEST(live_unlite_ring), |
4472 | SUBTEST(live_pin_rewind), |
4473 | SUBTEST(live_hold_reset), |
4474 | SUBTEST(live_error_interrupt), |
4475 | SUBTEST(live_timeslice_preempt), |
4476 | SUBTEST(live_timeslice_rewind), |
4477 | SUBTEST(live_timeslice_queue), |
4478 | SUBTEST(live_timeslice_nopreempt), |
4479 | SUBTEST(live_busywait_preempt), |
4480 | SUBTEST(live_preempt), |
4481 | SUBTEST(live_late_preempt), |
4482 | SUBTEST(live_nopreempt), |
4483 | SUBTEST(live_preempt_cancel), |
4484 | SUBTEST(live_suppress_self_preempt), |
4485 | SUBTEST(live_chain_preempt), |
4486 | SUBTEST(live_preempt_ring), |
4487 | SUBTEST(live_preempt_gang), |
4488 | SUBTEST(live_preempt_timeout), |
4489 | SUBTEST(live_preempt_user), |
4490 | SUBTEST(live_preempt_smoke), |
4491 | SUBTEST(live_virtual_engine), |
4492 | SUBTEST(live_virtual_mask), |
4493 | SUBTEST(live_virtual_preserved), |
4494 | SUBTEST(live_virtual_slice), |
4495 | SUBTEST(live_virtual_reset), |
4496 | }; |
4497 | |
4498 | if (to_gt(i915)->submission_method != INTEL_SUBMISSION_ELSP) |
4499 | return 0; |
4500 | |
4501 | if (intel_gt_is_wedged(gt: to_gt(i915))) |
4502 | return 0; |
4503 | |
4504 | return intel_gt_live_subtests(tests, to_gt(i915)); |
4505 | } |
4506 | |