1 | /* |
2 | * SPDX-License-Identifier: MIT |
3 | * |
4 | * Copyright © 2019 Intel Corporation |
5 | */ |
6 | |
7 | #include <linux/debugobjects.h> |
8 | |
9 | #include "gt/intel_context.h" |
10 | #include "gt/intel_engine_heartbeat.h" |
11 | #include "gt/intel_engine_pm.h" |
12 | #include "gt/intel_ring.h" |
13 | |
14 | #include "i915_drv.h" |
15 | #include "i915_active.h" |
16 | |
17 | /* |
18 | * Active refs memory management |
19 | * |
20 | * To be more economical with memory, we reap all the i915_active trees as |
21 | * they idle (when we know the active requests are inactive) and allocate the |
22 | * nodes from a local slab cache to hopefully reduce the fragmentation. |
23 | */ |
24 | static struct kmem_cache *slab_cache; |
25 | |
26 | struct active_node { |
27 | struct rb_node node; |
28 | struct i915_active_fence base; |
29 | struct i915_active *ref; |
30 | u64 timeline; |
31 | }; |
32 | |
33 | #define fetch_node(x) rb_entry(READ_ONCE(x), typeof(struct active_node), node) |
34 | |
35 | static inline struct active_node * |
36 | node_from_active(struct i915_active_fence *active) |
37 | { |
38 | return container_of(active, struct active_node, base); |
39 | } |
40 | |
41 | #define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers) |
42 | |
43 | static inline bool is_barrier(const struct i915_active_fence *active) |
44 | { |
45 | return IS_ERR(rcu_access_pointer(active->fence)); |
46 | } |
47 | |
48 | static inline struct llist_node *barrier_to_ll(struct active_node *node) |
49 | { |
50 | GEM_BUG_ON(!is_barrier(&node->base)); |
51 | return (struct llist_node *)&node->base.cb.node; |
52 | } |
53 | |
54 | static inline struct intel_engine_cs * |
55 | __barrier_to_engine(struct active_node *node) |
56 | { |
57 | return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev); |
58 | } |
59 | |
60 | static inline struct intel_engine_cs * |
61 | barrier_to_engine(struct active_node *node) |
62 | { |
63 | GEM_BUG_ON(!is_barrier(&node->base)); |
64 | return __barrier_to_engine(node); |
65 | } |
66 | |
67 | static inline struct active_node *barrier_from_ll(struct llist_node *x) |
68 | { |
69 | return container_of((struct list_head *)x, |
70 | struct active_node, base.cb.node); |
71 | } |
72 | |
73 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS) |
74 | |
75 | static void *active_debug_hint(void *addr) |
76 | { |
77 | struct i915_active *ref = addr; |
78 | |
79 | return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref; |
80 | } |
81 | |
82 | static const struct debug_obj_descr active_debug_desc = { |
83 | .name = "i915_active" , |
84 | .debug_hint = active_debug_hint, |
85 | }; |
86 | |
87 | static void debug_active_init(struct i915_active *ref) |
88 | { |
89 | debug_object_init(ref, &active_debug_desc); |
90 | } |
91 | |
92 | static void debug_active_activate(struct i915_active *ref) |
93 | { |
94 | lockdep_assert_held(&ref->tree_lock); |
95 | debug_object_activate(ref, &active_debug_desc); |
96 | } |
97 | |
98 | static void debug_active_deactivate(struct i915_active *ref) |
99 | { |
100 | lockdep_assert_held(&ref->tree_lock); |
101 | if (!atomic_read(&ref->count)) /* after the last dec */ |
102 | debug_object_deactivate(ref, &active_debug_desc); |
103 | } |
104 | |
105 | static void debug_active_fini(struct i915_active *ref) |
106 | { |
107 | debug_object_free(ref, &active_debug_desc); |
108 | } |
109 | |
110 | static void debug_active_assert(struct i915_active *ref) |
111 | { |
112 | debug_object_assert_init(ref, &active_debug_desc); |
113 | } |
114 | |
115 | #else |
116 | |
117 | static inline void debug_active_init(struct i915_active *ref) { } |
118 | static inline void debug_active_activate(struct i915_active *ref) { } |
119 | static inline void debug_active_deactivate(struct i915_active *ref) { } |
120 | static inline void debug_active_fini(struct i915_active *ref) { } |
121 | static inline void debug_active_assert(struct i915_active *ref) { } |
122 | |
123 | #endif |
124 | |
125 | static void |
126 | __active_retire(struct i915_active *ref) |
127 | { |
128 | struct rb_root root = RB_ROOT; |
129 | struct active_node *it, *n; |
130 | unsigned long flags; |
131 | |
132 | GEM_BUG_ON(i915_active_is_idle(ref)); |
133 | |
134 | /* return the unused nodes to our slabcache -- flushing the allocator */ |
135 | if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags)) |
136 | return; |
137 | |
138 | GEM_BUG_ON(rcu_access_pointer(ref->excl.fence)); |
139 | debug_active_deactivate(ref); |
140 | |
141 | /* Even if we have not used the cache, we may still have a barrier */ |
142 | if (!ref->cache) |
143 | ref->cache = fetch_node(ref->tree.rb_node); |
144 | |
145 | /* Keep the MRU cached node for reuse */ |
146 | if (ref->cache) { |
147 | /* Discard all other nodes in the tree */ |
148 | rb_erase(&ref->cache->node, &ref->tree); |
149 | root = ref->tree; |
150 | |
151 | /* Rebuild the tree with only the cached node */ |
152 | rb_link_node(node: &ref->cache->node, NULL, rb_link: &ref->tree.rb_node); |
153 | rb_insert_color(&ref->cache->node, &ref->tree); |
154 | GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node); |
155 | |
156 | /* Make the cached node available for reuse with any timeline */ |
157 | ref->cache->timeline = 0; /* needs cmpxchg(u64) */ |
158 | } |
159 | |
160 | spin_unlock_irqrestore(lock: &ref->tree_lock, flags); |
161 | |
162 | /* After the final retire, the entire struct may be freed */ |
163 | if (ref->retire) |
164 | ref->retire(ref); |
165 | |
166 | /* ... except if you wait on it, you must manage your own references! */ |
167 | wake_up_var(var: ref); |
168 | |
169 | /* Finally free the discarded timeline tree */ |
170 | rbtree_postorder_for_each_entry_safe(it, n, &root, node) { |
171 | GEM_BUG_ON(i915_active_fence_isset(&it->base)); |
172 | kmem_cache_free(s: slab_cache, objp: it); |
173 | } |
174 | } |
175 | |
176 | static void |
177 | active_work(struct work_struct *wrk) |
178 | { |
179 | struct i915_active *ref = container_of(wrk, typeof(*ref), work); |
180 | |
181 | GEM_BUG_ON(!atomic_read(&ref->count)); |
182 | if (atomic_add_unless(v: &ref->count, a: -1, u: 1)) |
183 | return; |
184 | |
185 | __active_retire(ref); |
186 | } |
187 | |
188 | static void |
189 | active_retire(struct i915_active *ref) |
190 | { |
191 | GEM_BUG_ON(!atomic_read(&ref->count)); |
192 | if (atomic_add_unless(v: &ref->count, a: -1, u: 1)) |
193 | return; |
194 | |
195 | if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) { |
196 | queue_work(wq: system_unbound_wq, work: &ref->work); |
197 | return; |
198 | } |
199 | |
200 | __active_retire(ref); |
201 | } |
202 | |
203 | static inline struct dma_fence ** |
204 | __active_fence_slot(struct i915_active_fence *active) |
205 | { |
206 | return (struct dma_fence ** __force)&active->fence; |
207 | } |
208 | |
209 | static inline bool |
210 | active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb) |
211 | { |
212 | struct i915_active_fence *active = |
213 | container_of(cb, typeof(*active), cb); |
214 | |
215 | return cmpxchg(__active_fence_slot(active), fence, NULL) == fence; |
216 | } |
217 | |
218 | static void |
219 | node_retire(struct dma_fence *fence, struct dma_fence_cb *cb) |
220 | { |
221 | if (active_fence_cb(fence, cb)) |
222 | active_retire(container_of(cb, struct active_node, base.cb)->ref); |
223 | } |
224 | |
225 | static void |
226 | excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb) |
227 | { |
228 | if (active_fence_cb(fence, cb)) |
229 | active_retire(container_of(cb, struct i915_active, excl.cb)); |
230 | } |
231 | |
232 | static struct active_node *__active_lookup(struct i915_active *ref, u64 idx) |
233 | { |
234 | struct active_node *it; |
235 | |
236 | GEM_BUG_ON(idx == 0); /* 0 is the unordered timeline, rsvd for cache */ |
237 | |
238 | /* |
239 | * We track the most recently used timeline to skip a rbtree search |
240 | * for the common case, under typical loads we never need the rbtree |
241 | * at all. We can reuse the last slot if it is empty, that is |
242 | * after the previous activity has been retired, or if it matches the |
243 | * current timeline. |
244 | */ |
245 | it = READ_ONCE(ref->cache); |
246 | if (it) { |
247 | u64 cached = READ_ONCE(it->timeline); |
248 | |
249 | /* Once claimed, this slot will only belong to this idx */ |
250 | if (cached == idx) |
251 | return it; |
252 | |
253 | /* |
254 | * An unclaimed cache [.timeline=0] can only be claimed once. |
255 | * |
256 | * If the value is already non-zero, some other thread has |
257 | * claimed the cache and we know that is does not match our |
258 | * idx. If, and only if, the timeline is currently zero is it |
259 | * worth competing to claim it atomically for ourselves (for |
260 | * only the winner of that race will cmpxchg return the old |
261 | * value of 0). |
262 | */ |
263 | if (!cached && !cmpxchg64(&it->timeline, 0, idx)) |
264 | return it; |
265 | } |
266 | |
267 | BUILD_BUG_ON(offsetof(typeof(*it), node)); |
268 | |
269 | /* While active, the tree can only be built; not destroyed */ |
270 | GEM_BUG_ON(i915_active_is_idle(ref)); |
271 | |
272 | it = fetch_node(ref->tree.rb_node); |
273 | while (it) { |
274 | if (it->timeline < idx) { |
275 | it = fetch_node(it->node.rb_right); |
276 | } else if (it->timeline > idx) { |
277 | it = fetch_node(it->node.rb_left); |
278 | } else { |
279 | WRITE_ONCE(ref->cache, it); |
280 | break; |
281 | } |
282 | } |
283 | |
284 | /* NB: If the tree rotated beneath us, we may miss our target. */ |
285 | return it; |
286 | } |
287 | |
288 | static struct i915_active_fence * |
289 | active_instance(struct i915_active *ref, u64 idx) |
290 | { |
291 | struct active_node *node; |
292 | struct rb_node **p, *parent; |
293 | |
294 | node = __active_lookup(ref, idx); |
295 | if (likely(node)) |
296 | return &node->base; |
297 | |
298 | spin_lock_irq(lock: &ref->tree_lock); |
299 | GEM_BUG_ON(i915_active_is_idle(ref)); |
300 | |
301 | parent = NULL; |
302 | p = &ref->tree.rb_node; |
303 | while (*p) { |
304 | parent = *p; |
305 | |
306 | node = rb_entry(parent, struct active_node, node); |
307 | if (node->timeline == idx) |
308 | goto out; |
309 | |
310 | if (node->timeline < idx) |
311 | p = &parent->rb_right; |
312 | else |
313 | p = &parent->rb_left; |
314 | } |
315 | |
316 | /* |
317 | * XXX: We should preallocate this before i915_active_ref() is ever |
318 | * called, but we cannot call into fs_reclaim() anyway, so use GFP_ATOMIC. |
319 | */ |
320 | node = kmem_cache_alloc(cachep: slab_cache, GFP_ATOMIC); |
321 | if (!node) |
322 | goto out; |
323 | |
324 | __i915_active_fence_init(active: &node->base, NULL, fn: node_retire); |
325 | node->ref = ref; |
326 | node->timeline = idx; |
327 | |
328 | rb_link_node(node: &node->node, parent, rb_link: p); |
329 | rb_insert_color(&node->node, &ref->tree); |
330 | |
331 | out: |
332 | WRITE_ONCE(ref->cache, node); |
333 | spin_unlock_irq(lock: &ref->tree_lock); |
334 | |
335 | return &node->base; |
336 | } |
337 | |
338 | void __i915_active_init(struct i915_active *ref, |
339 | int (*active)(struct i915_active *ref), |
340 | void (*retire)(struct i915_active *ref), |
341 | unsigned long flags, |
342 | struct lock_class_key *mkey, |
343 | struct lock_class_key *wkey) |
344 | { |
345 | debug_active_init(ref); |
346 | |
347 | ref->flags = flags; |
348 | ref->active = active; |
349 | ref->retire = retire; |
350 | |
351 | spin_lock_init(&ref->tree_lock); |
352 | ref->tree = RB_ROOT; |
353 | ref->cache = NULL; |
354 | |
355 | init_llist_head(list: &ref->preallocated_barriers); |
356 | atomic_set(v: &ref->count, i: 0); |
357 | __mutex_init(lock: &ref->mutex, name: "i915_active" , key: mkey); |
358 | __i915_active_fence_init(active: &ref->excl, NULL, fn: excl_retire); |
359 | INIT_WORK(&ref->work, active_work); |
360 | #if IS_ENABLED(CONFIG_LOCKDEP) |
361 | lockdep_init_map(lock: &ref->work.lockdep_map, name: "i915_active.work" , key: wkey, subclass: 0); |
362 | #endif |
363 | } |
364 | |
365 | static bool ____active_del_barrier(struct i915_active *ref, |
366 | struct active_node *node, |
367 | struct intel_engine_cs *engine) |
368 | |
369 | { |
370 | struct llist_node *head = NULL, *tail = NULL; |
371 | struct llist_node *pos, *next; |
372 | |
373 | GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context); |
374 | |
375 | /* |
376 | * Rebuild the llist excluding our node. We may perform this |
377 | * outside of the kernel_context timeline mutex and so someone |
378 | * else may be manipulating the engine->barrier_tasks, in |
379 | * which case either we or they will be upset :) |
380 | * |
381 | * A second __active_del_barrier() will report failure to claim |
382 | * the active_node and the caller will just shrug and know not to |
383 | * claim ownership of its node. |
384 | * |
385 | * A concurrent i915_request_add_active_barriers() will miss adding |
386 | * any of the tasks, but we will try again on the next -- and since |
387 | * we are actively using the barrier, we know that there will be |
388 | * at least another opportunity when we idle. |
389 | */ |
390 | llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) { |
391 | if (node == barrier_from_ll(x: pos)) { |
392 | node = NULL; |
393 | continue; |
394 | } |
395 | |
396 | pos->next = head; |
397 | head = pos; |
398 | if (!tail) |
399 | tail = pos; |
400 | } |
401 | if (head) |
402 | llist_add_batch(new_first: head, new_last: tail, head: &engine->barrier_tasks); |
403 | |
404 | return !node; |
405 | } |
406 | |
407 | static bool |
408 | __active_del_barrier(struct i915_active *ref, struct active_node *node) |
409 | { |
410 | return ____active_del_barrier(ref, node, engine: barrier_to_engine(node)); |
411 | } |
412 | |
413 | static bool |
414 | replace_barrier(struct i915_active *ref, struct i915_active_fence *active) |
415 | { |
416 | if (!is_barrier(active)) /* proto-node used by our idle barrier? */ |
417 | return false; |
418 | |
419 | /* |
420 | * This request is on the kernel_context timeline, and so |
421 | * we can use it to substitute for the pending idle-barrer |
422 | * request that we want to emit on the kernel_context. |
423 | */ |
424 | return __active_del_barrier(ref, node: node_from_active(active)); |
425 | } |
426 | |
427 | int i915_active_add_request(struct i915_active *ref, struct i915_request *rq) |
428 | { |
429 | u64 idx = i915_request_timeline(rq)->fence_context; |
430 | struct dma_fence *fence = &rq->fence; |
431 | struct i915_active_fence *active; |
432 | int err; |
433 | |
434 | /* Prevent reaping in case we malloc/wait while building the tree */ |
435 | err = i915_active_acquire(ref); |
436 | if (err) |
437 | return err; |
438 | |
439 | do { |
440 | active = active_instance(ref, idx); |
441 | if (!active) { |
442 | err = -ENOMEM; |
443 | goto out; |
444 | } |
445 | |
446 | if (replace_barrier(ref, active)) { |
447 | RCU_INIT_POINTER(active->fence, NULL); |
448 | atomic_dec(v: &ref->count); |
449 | } |
450 | } while (unlikely(is_barrier(active))); |
451 | |
452 | fence = __i915_active_fence_set(active, fence); |
453 | if (!fence) |
454 | __i915_active_acquire(ref); |
455 | else |
456 | dma_fence_put(fence); |
457 | |
458 | out: |
459 | i915_active_release(ref); |
460 | return err; |
461 | } |
462 | |
463 | static struct dma_fence * |
464 | __i915_active_set_fence(struct i915_active *ref, |
465 | struct i915_active_fence *active, |
466 | struct dma_fence *fence) |
467 | { |
468 | struct dma_fence *prev; |
469 | |
470 | if (replace_barrier(ref, active)) { |
471 | RCU_INIT_POINTER(active->fence, fence); |
472 | return NULL; |
473 | } |
474 | |
475 | prev = __i915_active_fence_set(active, fence); |
476 | if (!prev) |
477 | __i915_active_acquire(ref); |
478 | |
479 | return prev; |
480 | } |
481 | |
482 | struct dma_fence * |
483 | i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f) |
484 | { |
485 | /* We expect the caller to manage the exclusive timeline ordering */ |
486 | return __i915_active_set_fence(ref, active: &ref->excl, fence: f); |
487 | } |
488 | |
489 | bool i915_active_acquire_if_busy(struct i915_active *ref) |
490 | { |
491 | debug_active_assert(ref); |
492 | return atomic_add_unless(v: &ref->count, a: 1, u: 0); |
493 | } |
494 | |
495 | static void __i915_active_activate(struct i915_active *ref) |
496 | { |
497 | spin_lock_irq(lock: &ref->tree_lock); /* __active_retire() */ |
498 | if (!atomic_fetch_inc(v: &ref->count)) |
499 | debug_active_activate(ref); |
500 | spin_unlock_irq(lock: &ref->tree_lock); |
501 | } |
502 | |
503 | int i915_active_acquire(struct i915_active *ref) |
504 | { |
505 | int err; |
506 | |
507 | if (i915_active_acquire_if_busy(ref)) |
508 | return 0; |
509 | |
510 | if (!ref->active) { |
511 | __i915_active_activate(ref); |
512 | return 0; |
513 | } |
514 | |
515 | err = mutex_lock_interruptible(&ref->mutex); |
516 | if (err) |
517 | return err; |
518 | |
519 | if (likely(!i915_active_acquire_if_busy(ref))) { |
520 | err = ref->active(ref); |
521 | if (!err) |
522 | __i915_active_activate(ref); |
523 | } |
524 | |
525 | mutex_unlock(lock: &ref->mutex); |
526 | |
527 | return err; |
528 | } |
529 | |
530 | int i915_active_acquire_for_context(struct i915_active *ref, u64 idx) |
531 | { |
532 | struct i915_active_fence *active; |
533 | int err; |
534 | |
535 | err = i915_active_acquire(ref); |
536 | if (err) |
537 | return err; |
538 | |
539 | active = active_instance(ref, idx); |
540 | if (!active) { |
541 | i915_active_release(ref); |
542 | return -ENOMEM; |
543 | } |
544 | |
545 | return 0; /* return with active ref */ |
546 | } |
547 | |
548 | void i915_active_release(struct i915_active *ref) |
549 | { |
550 | debug_active_assert(ref); |
551 | active_retire(ref); |
552 | } |
553 | |
554 | static void enable_signaling(struct i915_active_fence *active) |
555 | { |
556 | struct dma_fence *fence; |
557 | |
558 | if (unlikely(is_barrier(active))) |
559 | return; |
560 | |
561 | fence = i915_active_fence_get(active); |
562 | if (!fence) |
563 | return; |
564 | |
565 | dma_fence_enable_sw_signaling(fence); |
566 | dma_fence_put(fence); |
567 | } |
568 | |
569 | static int flush_barrier(struct active_node *it) |
570 | { |
571 | struct intel_engine_cs *engine; |
572 | |
573 | if (likely(!is_barrier(&it->base))) |
574 | return 0; |
575 | |
576 | engine = __barrier_to_engine(node: it); |
577 | smp_rmb(); /* serialise with add_active_barriers */ |
578 | if (!is_barrier(active: &it->base)) |
579 | return 0; |
580 | |
581 | return intel_engine_flush_barriers(engine); |
582 | } |
583 | |
584 | static int flush_lazy_signals(struct i915_active *ref) |
585 | { |
586 | struct active_node *it, *n; |
587 | int err = 0; |
588 | |
589 | enable_signaling(active: &ref->excl); |
590 | rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { |
591 | err = flush_barrier(it); /* unconnected idle barrier? */ |
592 | if (err) |
593 | break; |
594 | |
595 | enable_signaling(active: &it->base); |
596 | } |
597 | |
598 | return err; |
599 | } |
600 | |
601 | int __i915_active_wait(struct i915_active *ref, int state) |
602 | { |
603 | might_sleep(); |
604 | |
605 | /* Any fence added after the wait begins will not be auto-signaled */ |
606 | if (i915_active_acquire_if_busy(ref)) { |
607 | int err; |
608 | |
609 | err = flush_lazy_signals(ref); |
610 | i915_active_release(ref); |
611 | if (err) |
612 | return err; |
613 | |
614 | if (___wait_var_event(ref, i915_active_is_idle(ref), |
615 | state, 0, 0, schedule())) |
616 | return -EINTR; |
617 | } |
618 | |
619 | /* |
620 | * After the wait is complete, the caller may free the active. |
621 | * We have to flush any concurrent retirement before returning. |
622 | */ |
623 | flush_work(work: &ref->work); |
624 | return 0; |
625 | } |
626 | |
627 | static int __await_active(struct i915_active_fence *active, |
628 | int (*fn)(void *arg, struct dma_fence *fence), |
629 | void *arg) |
630 | { |
631 | struct dma_fence *fence; |
632 | |
633 | if (is_barrier(active)) /* XXX flush the barrier? */ |
634 | return 0; |
635 | |
636 | fence = i915_active_fence_get(active); |
637 | if (fence) { |
638 | int err; |
639 | |
640 | err = fn(arg, fence); |
641 | dma_fence_put(fence); |
642 | if (err < 0) |
643 | return err; |
644 | } |
645 | |
646 | return 0; |
647 | } |
648 | |
649 | struct wait_barrier { |
650 | struct wait_queue_entry base; |
651 | struct i915_active *ref; |
652 | }; |
653 | |
654 | static int |
655 | barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key) |
656 | { |
657 | struct wait_barrier *wb = container_of(wq, typeof(*wb), base); |
658 | |
659 | if (i915_active_is_idle(ref: wb->ref)) { |
660 | list_del(entry: &wq->entry); |
661 | i915_sw_fence_complete(fence: wq->private); |
662 | kfree(objp: wq); |
663 | } |
664 | |
665 | return 0; |
666 | } |
667 | |
668 | static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence) |
669 | { |
670 | struct wait_barrier *wb; |
671 | |
672 | wb = kmalloc(size: sizeof(*wb), GFP_KERNEL); |
673 | if (unlikely(!wb)) |
674 | return -ENOMEM; |
675 | |
676 | GEM_BUG_ON(i915_active_is_idle(ref)); |
677 | if (!i915_sw_fence_await(fence)) { |
678 | kfree(objp: wb); |
679 | return -EINVAL; |
680 | } |
681 | |
682 | wb->base.flags = 0; |
683 | wb->base.func = barrier_wake; |
684 | wb->base.private = fence; |
685 | wb->ref = ref; |
686 | |
687 | add_wait_queue(wq_head: __var_waitqueue(p: ref), wq_entry: &wb->base); |
688 | return 0; |
689 | } |
690 | |
691 | static int await_active(struct i915_active *ref, |
692 | unsigned int flags, |
693 | int (*fn)(void *arg, struct dma_fence *fence), |
694 | void *arg, struct i915_sw_fence *barrier) |
695 | { |
696 | int err = 0; |
697 | |
698 | if (!i915_active_acquire_if_busy(ref)) |
699 | return 0; |
700 | |
701 | if (flags & I915_ACTIVE_AWAIT_EXCL && |
702 | rcu_access_pointer(ref->excl.fence)) { |
703 | err = __await_active(active: &ref->excl, fn, arg); |
704 | if (err) |
705 | goto out; |
706 | } |
707 | |
708 | if (flags & I915_ACTIVE_AWAIT_ACTIVE) { |
709 | struct active_node *it, *n; |
710 | |
711 | rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { |
712 | err = __await_active(active: &it->base, fn, arg); |
713 | if (err) |
714 | goto out; |
715 | } |
716 | } |
717 | |
718 | if (flags & I915_ACTIVE_AWAIT_BARRIER) { |
719 | err = flush_lazy_signals(ref); |
720 | if (err) |
721 | goto out; |
722 | |
723 | err = __await_barrier(ref, fence: barrier); |
724 | if (err) |
725 | goto out; |
726 | } |
727 | |
728 | out: |
729 | i915_active_release(ref); |
730 | return err; |
731 | } |
732 | |
733 | static int rq_await_fence(void *arg, struct dma_fence *fence) |
734 | { |
735 | return i915_request_await_dma_fence(rq: arg, fence); |
736 | } |
737 | |
738 | int i915_request_await_active(struct i915_request *rq, |
739 | struct i915_active *ref, |
740 | unsigned int flags) |
741 | { |
742 | return await_active(ref, flags, fn: rq_await_fence, arg: rq, barrier: &rq->submit); |
743 | } |
744 | |
745 | static int sw_await_fence(void *arg, struct dma_fence *fence) |
746 | { |
747 | return i915_sw_fence_await_dma_fence(fence: arg, dma: fence, timeout: 0, |
748 | GFP_NOWAIT | __GFP_NOWARN); |
749 | } |
750 | |
751 | int i915_sw_fence_await_active(struct i915_sw_fence *fence, |
752 | struct i915_active *ref, |
753 | unsigned int flags) |
754 | { |
755 | return await_active(ref, flags, fn: sw_await_fence, arg: fence, barrier: fence); |
756 | } |
757 | |
758 | void i915_active_fini(struct i915_active *ref) |
759 | { |
760 | debug_active_fini(ref); |
761 | GEM_BUG_ON(atomic_read(&ref->count)); |
762 | GEM_BUG_ON(work_pending(&ref->work)); |
763 | mutex_destroy(lock: &ref->mutex); |
764 | |
765 | if (ref->cache) |
766 | kmem_cache_free(s: slab_cache, objp: ref->cache); |
767 | } |
768 | |
769 | static inline bool is_idle_barrier(struct active_node *node, u64 idx) |
770 | { |
771 | return node->timeline == idx && !i915_active_fence_isset(active: &node->base); |
772 | } |
773 | |
774 | static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) |
775 | { |
776 | struct rb_node *prev, *p; |
777 | |
778 | if (RB_EMPTY_ROOT(&ref->tree)) |
779 | return NULL; |
780 | |
781 | GEM_BUG_ON(i915_active_is_idle(ref)); |
782 | |
783 | /* |
784 | * Try to reuse any existing barrier nodes already allocated for this |
785 | * i915_active, due to overlapping active phases there is likely a |
786 | * node kept alive (as we reuse before parking). We prefer to reuse |
787 | * completely idle barriers (less hassle in manipulating the llists), |
788 | * but otherwise any will do. |
789 | */ |
790 | if (ref->cache && is_idle_barrier(node: ref->cache, idx)) { |
791 | p = &ref->cache->node; |
792 | goto match; |
793 | } |
794 | |
795 | prev = NULL; |
796 | p = ref->tree.rb_node; |
797 | while (p) { |
798 | struct active_node *node = |
799 | rb_entry(p, struct active_node, node); |
800 | |
801 | if (is_idle_barrier(node, idx)) |
802 | goto match; |
803 | |
804 | prev = p; |
805 | if (node->timeline < idx) |
806 | p = READ_ONCE(p->rb_right); |
807 | else |
808 | p = READ_ONCE(p->rb_left); |
809 | } |
810 | |
811 | /* |
812 | * No quick match, but we did find the leftmost rb_node for the |
813 | * kernel_context. Walk the rb_tree in-order to see if there were |
814 | * any idle-barriers on this timeline that we missed, or just use |
815 | * the first pending barrier. |
816 | */ |
817 | for (p = prev; p; p = rb_next(p)) { |
818 | struct active_node *node = |
819 | rb_entry(p, struct active_node, node); |
820 | struct intel_engine_cs *engine; |
821 | |
822 | if (node->timeline > idx) |
823 | break; |
824 | |
825 | if (node->timeline < idx) |
826 | continue; |
827 | |
828 | if (is_idle_barrier(node, idx)) |
829 | goto match; |
830 | |
831 | /* |
832 | * The list of pending barriers is protected by the |
833 | * kernel_context timeline, which notably we do not hold |
834 | * here. i915_request_add_active_barriers() may consume |
835 | * the barrier before we claim it, so we have to check |
836 | * for success. |
837 | */ |
838 | engine = __barrier_to_engine(node); |
839 | smp_rmb(); /* serialise with add_active_barriers */ |
840 | if (is_barrier(active: &node->base) && |
841 | ____active_del_barrier(ref, node, engine)) |
842 | goto match; |
843 | } |
844 | |
845 | return NULL; |
846 | |
847 | match: |
848 | spin_lock_irq(lock: &ref->tree_lock); |
849 | rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */ |
850 | if (p == &ref->cache->node) |
851 | WRITE_ONCE(ref->cache, NULL); |
852 | spin_unlock_irq(lock: &ref->tree_lock); |
853 | |
854 | return rb_entry(p, struct active_node, node); |
855 | } |
856 | |
857 | int i915_active_acquire_preallocate_barrier(struct i915_active *ref, |
858 | struct intel_engine_cs *engine) |
859 | { |
860 | intel_engine_mask_t tmp, mask = engine->mask; |
861 | struct llist_node *first = NULL, *last = NULL; |
862 | struct intel_gt *gt = engine->gt; |
863 | |
864 | GEM_BUG_ON(i915_active_is_idle(ref)); |
865 | |
866 | /* Wait until the previous preallocation is completed */ |
867 | while (!llist_empty(head: &ref->preallocated_barriers)) |
868 | cond_resched(); |
869 | |
870 | /* |
871 | * Preallocate a node for each physical engine supporting the target |
872 | * engine (remember virtual engines have more than one sibling). |
873 | * We can then use the preallocated nodes in |
874 | * i915_active_acquire_barrier() |
875 | */ |
876 | GEM_BUG_ON(!mask); |
877 | for_each_engine_masked(engine, gt, mask, tmp) { |
878 | u64 idx = engine->kernel_context->timeline->fence_context; |
879 | struct llist_node *prev = first; |
880 | struct active_node *node; |
881 | |
882 | rcu_read_lock(); |
883 | node = reuse_idle_barrier(ref, idx); |
884 | rcu_read_unlock(); |
885 | if (!node) { |
886 | node = kmem_cache_alloc(cachep: slab_cache, GFP_KERNEL); |
887 | if (!node) |
888 | goto unwind; |
889 | |
890 | RCU_INIT_POINTER(node->base.fence, NULL); |
891 | node->base.cb.func = node_retire; |
892 | node->timeline = idx; |
893 | node->ref = ref; |
894 | } |
895 | |
896 | if (!i915_active_fence_isset(active: &node->base)) { |
897 | /* |
898 | * Mark this as being *our* unconnected proto-node. |
899 | * |
900 | * Since this node is not in any list, and we have |
901 | * decoupled it from the rbtree, we can reuse the |
902 | * request to indicate this is an idle-barrier node |
903 | * and then we can use the rb_node and list pointers |
904 | * for our tracking of the pending barrier. |
905 | */ |
906 | RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN)); |
907 | node->base.cb.node.prev = (void *)engine; |
908 | __i915_active_acquire(ref); |
909 | } |
910 | GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN)); |
911 | |
912 | GEM_BUG_ON(barrier_to_engine(node) != engine); |
913 | first = barrier_to_ll(node); |
914 | first->next = prev; |
915 | if (!last) |
916 | last = first; |
917 | intel_engine_pm_get(engine); |
918 | } |
919 | |
920 | GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers)); |
921 | llist_add_batch(new_first: first, new_last: last, head: &ref->preallocated_barriers); |
922 | |
923 | return 0; |
924 | |
925 | unwind: |
926 | while (first) { |
927 | struct active_node *node = barrier_from_ll(x: first); |
928 | |
929 | first = first->next; |
930 | |
931 | atomic_dec(v: &ref->count); |
932 | intel_engine_pm_put(engine: barrier_to_engine(node)); |
933 | |
934 | kmem_cache_free(s: slab_cache, objp: node); |
935 | } |
936 | return -ENOMEM; |
937 | } |
938 | |
939 | void i915_active_acquire_barrier(struct i915_active *ref) |
940 | { |
941 | struct llist_node *pos, *next; |
942 | unsigned long flags; |
943 | |
944 | GEM_BUG_ON(i915_active_is_idle(ref)); |
945 | |
946 | /* |
947 | * Transfer the list of preallocated barriers into the |
948 | * i915_active rbtree, but only as proto-nodes. They will be |
949 | * populated by i915_request_add_active_barriers() to point to the |
950 | * request that will eventually release them. |
951 | */ |
952 | llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) { |
953 | struct active_node *node = barrier_from_ll(x: pos); |
954 | struct intel_engine_cs *engine = barrier_to_engine(node); |
955 | struct rb_node **p, *parent; |
956 | |
957 | spin_lock_irqsave_nested(&ref->tree_lock, flags, |
958 | SINGLE_DEPTH_NESTING); |
959 | parent = NULL; |
960 | p = &ref->tree.rb_node; |
961 | while (*p) { |
962 | struct active_node *it; |
963 | |
964 | parent = *p; |
965 | |
966 | it = rb_entry(parent, struct active_node, node); |
967 | if (it->timeline < node->timeline) |
968 | p = &parent->rb_right; |
969 | else |
970 | p = &parent->rb_left; |
971 | } |
972 | rb_link_node(node: &node->node, parent, rb_link: p); |
973 | rb_insert_color(&node->node, &ref->tree); |
974 | spin_unlock_irqrestore(lock: &ref->tree_lock, flags); |
975 | |
976 | GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); |
977 | llist_add(new: barrier_to_ll(node), head: &engine->barrier_tasks); |
978 | intel_engine_pm_put_delay(engine, delay: 2); |
979 | } |
980 | } |
981 | |
982 | static struct dma_fence **ll_to_fence_slot(struct llist_node *node) |
983 | { |
984 | return __active_fence_slot(active: &barrier_from_ll(x: node)->base); |
985 | } |
986 | |
987 | void i915_request_add_active_barriers(struct i915_request *rq) |
988 | { |
989 | struct intel_engine_cs *engine = rq->engine; |
990 | struct llist_node *node, *next; |
991 | unsigned long flags; |
992 | |
993 | GEM_BUG_ON(!intel_context_is_barrier(rq->context)); |
994 | GEM_BUG_ON(intel_engine_is_virtual(engine)); |
995 | GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline); |
996 | |
997 | node = llist_del_all(head: &engine->barrier_tasks); |
998 | if (!node) |
999 | return; |
1000 | /* |
1001 | * Attach the list of proto-fences to the in-flight request such |
1002 | * that the parent i915_active will be released when this request |
1003 | * is retired. |
1004 | */ |
1005 | spin_lock_irqsave(&rq->lock, flags); |
1006 | llist_for_each_safe(node, next, node) { |
1007 | /* serialise with reuse_idle_barrier */ |
1008 | smp_store_mb(*ll_to_fence_slot(node), &rq->fence); |
1009 | list_add_tail(new: (struct list_head *)node, head: &rq->fence.cb_list); |
1010 | } |
1011 | spin_unlock_irqrestore(lock: &rq->lock, flags); |
1012 | } |
1013 | |
1014 | /* |
1015 | * __i915_active_fence_set: Update the last active fence along its timeline |
1016 | * @active: the active tracker |
1017 | * @fence: the new fence (under construction) |
1018 | * |
1019 | * Records the new @fence as the last active fence along its timeline in |
1020 | * this active tracker, moving the tracking callbacks from the previous |
1021 | * fence onto this one. Gets and returns a reference to the previous fence |
1022 | * (if not already completed), which the caller must put after making sure |
1023 | * that it is executed before the new fence. To ensure that the order of |
1024 | * fences within the timeline of the i915_active_fence is understood, it |
1025 | * should be locked by the caller. |
1026 | */ |
1027 | struct dma_fence * |
1028 | __i915_active_fence_set(struct i915_active_fence *active, |
1029 | struct dma_fence *fence) |
1030 | { |
1031 | struct dma_fence *prev; |
1032 | unsigned long flags; |
1033 | |
1034 | /* |
1035 | * In case of fences embedded in i915_requests, their memory is |
1036 | * SLAB_FAILSAFE_BY_RCU, then it can be reused right after release |
1037 | * by new requests. Then, there is a risk of passing back a pointer |
1038 | * to a new, completely unrelated fence that reuses the same memory |
1039 | * while tracked under a different active tracker. Combined with i915 |
1040 | * perf open/close operations that build await dependencies between |
1041 | * engine kernel context requests and user requests from different |
1042 | * timelines, this can lead to dependency loops and infinite waits. |
1043 | * |
1044 | * As a countermeasure, we try to get a reference to the active->fence |
1045 | * first, so if we succeed and pass it back to our user then it is not |
1046 | * released and potentially reused by an unrelated request before the |
1047 | * user has a chance to set up an await dependency on it. |
1048 | */ |
1049 | prev = i915_active_fence_get(active); |
1050 | if (fence == prev) |
1051 | return fence; |
1052 | |
1053 | GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); |
1054 | |
1055 | /* |
1056 | * Consider that we have two threads arriving (A and B), with |
1057 | * C already resident as the active->fence. |
1058 | * |
1059 | * Both A and B have got a reference to C or NULL, depending on the |
1060 | * timing of the interrupt handler. Let's assume that if A has got C |
1061 | * then it has locked C first (before B). |
1062 | * |
1063 | * Note the strong ordering of the timeline also provides consistent |
1064 | * nesting rules for the fence->lock; the inner lock is always the |
1065 | * older lock. |
1066 | */ |
1067 | spin_lock_irqsave(fence->lock, flags); |
1068 | if (prev) |
1069 | spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING); |
1070 | |
1071 | /* |
1072 | * A does the cmpxchg first, and so it sees C or NULL, as before, or |
1073 | * something else, depending on the timing of other threads and/or |
1074 | * interrupt handler. If not the same as before then A unlocks C if |
1075 | * applicable and retries, starting from an attempt to get a new |
1076 | * active->fence. Meanwhile, B follows the same path as A. |
1077 | * Once A succeeds with cmpxch, B fails again, retires, gets A from |
1078 | * active->fence, locks it as soon as A completes, and possibly |
1079 | * succeeds with cmpxchg. |
1080 | */ |
1081 | while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) { |
1082 | if (prev) { |
1083 | spin_unlock(lock: prev->lock); |
1084 | dma_fence_put(fence: prev); |
1085 | } |
1086 | spin_unlock_irqrestore(lock: fence->lock, flags); |
1087 | |
1088 | prev = i915_active_fence_get(active); |
1089 | GEM_BUG_ON(prev == fence); |
1090 | |
1091 | spin_lock_irqsave(fence->lock, flags); |
1092 | if (prev) |
1093 | spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING); |
1094 | } |
1095 | |
1096 | /* |
1097 | * If prev is NULL then the previous fence must have been signaled |
1098 | * and we know that we are first on the timeline. If it is still |
1099 | * present then, having the lock on that fence already acquired, we |
1100 | * serialise with the interrupt handler, in the process of removing it |
1101 | * from any future interrupt callback. A will then wait on C before |
1102 | * executing (if present). |
1103 | * |
1104 | * As B is second, it sees A as the previous fence and so waits for |
1105 | * it to complete its transition and takes over the occupancy for |
1106 | * itself -- remembering that it needs to wait on A before executing. |
1107 | */ |
1108 | if (prev) { |
1109 | __list_del_entry(entry: &active->cb.node); |
1110 | spin_unlock(lock: prev->lock); /* serialise with prev->cb_list */ |
1111 | } |
1112 | list_add_tail(new: &active->cb.node, head: &fence->cb_list); |
1113 | spin_unlock_irqrestore(lock: fence->lock, flags); |
1114 | |
1115 | return prev; |
1116 | } |
1117 | |
1118 | int i915_active_fence_set(struct i915_active_fence *active, |
1119 | struct i915_request *rq) |
1120 | { |
1121 | struct dma_fence *fence; |
1122 | int err = 0; |
1123 | |
1124 | /* Must maintain timeline ordering wrt previous active requests */ |
1125 | fence = __i915_active_fence_set(active, fence: &rq->fence); |
1126 | if (fence) { |
1127 | err = i915_request_await_dma_fence(rq, fence); |
1128 | dma_fence_put(fence); |
1129 | } |
1130 | |
1131 | return err; |
1132 | } |
1133 | |
1134 | void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb) |
1135 | { |
1136 | active_fence_cb(fence, cb); |
1137 | } |
1138 | |
1139 | struct auto_active { |
1140 | struct i915_active base; |
1141 | struct kref ref; |
1142 | }; |
1143 | |
1144 | struct i915_active *i915_active_get(struct i915_active *ref) |
1145 | { |
1146 | struct auto_active *aa = container_of(ref, typeof(*aa), base); |
1147 | |
1148 | kref_get(kref: &aa->ref); |
1149 | return &aa->base; |
1150 | } |
1151 | |
1152 | static void auto_release(struct kref *ref) |
1153 | { |
1154 | struct auto_active *aa = container_of(ref, typeof(*aa), ref); |
1155 | |
1156 | i915_active_fini(ref: &aa->base); |
1157 | kfree(objp: aa); |
1158 | } |
1159 | |
1160 | void i915_active_put(struct i915_active *ref) |
1161 | { |
1162 | struct auto_active *aa = container_of(ref, typeof(*aa), base); |
1163 | |
1164 | kref_put(kref: &aa->ref, release: auto_release); |
1165 | } |
1166 | |
1167 | static int auto_active(struct i915_active *ref) |
1168 | { |
1169 | i915_active_get(ref); |
1170 | return 0; |
1171 | } |
1172 | |
1173 | static void auto_retire(struct i915_active *ref) |
1174 | { |
1175 | i915_active_put(ref); |
1176 | } |
1177 | |
1178 | struct i915_active *i915_active_create(void) |
1179 | { |
1180 | struct auto_active *aa; |
1181 | |
1182 | aa = kmalloc(size: sizeof(*aa), GFP_KERNEL); |
1183 | if (!aa) |
1184 | return NULL; |
1185 | |
1186 | kref_init(kref: &aa->ref); |
1187 | i915_active_init(&aa->base, auto_active, auto_retire, 0); |
1188 | |
1189 | return &aa->base; |
1190 | } |
1191 | |
1192 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
1193 | #include "selftests/i915_active.c" |
1194 | #endif |
1195 | |
1196 | void i915_active_module_exit(void) |
1197 | { |
1198 | kmem_cache_destroy(s: slab_cache); |
1199 | } |
1200 | |
1201 | int __init i915_active_module_init(void) |
1202 | { |
1203 | slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN); |
1204 | if (!slab_cache) |
1205 | return -ENOMEM; |
1206 | |
1207 | return 0; |
1208 | } |
1209 | |