1 | /* |
2 | * SPDX-License-Identifier: MIT |
3 | * |
4 | * Copyright © 2018 Intel Corporation |
5 | */ |
6 | |
7 | #include <linux/kref.h> |
8 | #include <linux/string_helpers.h> |
9 | |
10 | #include "gem/i915_gem_pm.h" |
11 | #include "gt/intel_gt.h" |
12 | |
13 | #include "i915_selftest.h" |
14 | |
15 | #include "igt_flush_test.h" |
16 | #include "lib_sw_fence.h" |
17 | |
18 | struct live_active { |
19 | struct i915_active base; |
20 | struct kref ref; |
21 | bool retired; |
22 | }; |
23 | |
24 | static void __live_get(struct live_active *active) |
25 | { |
26 | kref_get(kref: &active->ref); |
27 | } |
28 | |
29 | static void __live_free(struct live_active *active) |
30 | { |
31 | i915_active_fini(ref: &active->base); |
32 | kfree(objp: active); |
33 | } |
34 | |
35 | static void __live_release(struct kref *ref) |
36 | { |
37 | struct live_active *active = container_of(ref, typeof(*active), ref); |
38 | |
39 | __live_free(active); |
40 | } |
41 | |
42 | static void __live_put(struct live_active *active) |
43 | { |
44 | kref_put(kref: &active->ref, release: __live_release); |
45 | } |
46 | |
47 | static int __live_active(struct i915_active *base) |
48 | { |
49 | struct live_active *active = container_of(base, typeof(*active), base); |
50 | |
51 | __live_get(active); |
52 | return 0; |
53 | } |
54 | |
55 | static void __live_retire(struct i915_active *base) |
56 | { |
57 | struct live_active *active = container_of(base, typeof(*active), base); |
58 | |
59 | active->retired = true; |
60 | __live_put(active); |
61 | } |
62 | |
63 | static struct live_active *__live_alloc(struct drm_i915_private *i915) |
64 | { |
65 | struct live_active *active; |
66 | |
67 | active = kzalloc(size: sizeof(*active), GFP_KERNEL); |
68 | if (!active) |
69 | return NULL; |
70 | |
71 | kref_init(kref: &active->ref); |
72 | i915_active_init(&active->base, __live_active, __live_retire, 0); |
73 | |
74 | return active; |
75 | } |
76 | |
77 | static struct live_active * |
78 | __live_active_setup(struct drm_i915_private *i915) |
79 | { |
80 | struct intel_engine_cs *engine; |
81 | struct i915_sw_fence *submit; |
82 | struct live_active *active; |
83 | unsigned int count = 0; |
84 | int err = 0; |
85 | |
86 | active = __live_alloc(i915); |
87 | if (!active) |
88 | return ERR_PTR(error: -ENOMEM); |
89 | |
90 | submit = heap_fence_create(GFP_KERNEL); |
91 | if (!submit) { |
92 | kfree(objp: active); |
93 | return ERR_PTR(error: -ENOMEM); |
94 | } |
95 | |
96 | err = i915_active_acquire(ref: &active->base); |
97 | if (err) |
98 | goto out; |
99 | |
100 | for_each_uabi_engine(engine, i915) { |
101 | struct i915_request *rq; |
102 | |
103 | rq = intel_engine_create_kernel_request(engine); |
104 | if (IS_ERR(ptr: rq)) { |
105 | err = PTR_ERR(ptr: rq); |
106 | break; |
107 | } |
108 | |
109 | err = i915_sw_fence_await_sw_fence_gfp(fence: &rq->submit, |
110 | after: submit, |
111 | GFP_KERNEL); |
112 | if (err >= 0) |
113 | err = i915_active_add_request(ref: &active->base, rq); |
114 | i915_request_add(rq); |
115 | if (err) { |
116 | pr_err("Failed to track active ref!\n" ); |
117 | break; |
118 | } |
119 | |
120 | count++; |
121 | } |
122 | |
123 | i915_active_release(ref: &active->base); |
124 | if (READ_ONCE(active->retired) && count) { |
125 | pr_err("i915_active retired before submission!\n" ); |
126 | err = -EINVAL; |
127 | } |
128 | if (atomic_read(v: &active->base.count) != count) { |
129 | pr_err("i915_active not tracking all requests, found %d, expected %d\n" , |
130 | atomic_read(&active->base.count), count); |
131 | err = -EINVAL; |
132 | } |
133 | |
134 | out: |
135 | i915_sw_fence_commit(fence: submit); |
136 | heap_fence_put(fence: submit); |
137 | if (err) { |
138 | __live_put(active); |
139 | active = ERR_PTR(error: err); |
140 | } |
141 | |
142 | return active; |
143 | } |
144 | |
145 | static int live_active_wait(void *arg) |
146 | { |
147 | struct drm_i915_private *i915 = arg; |
148 | struct live_active *active; |
149 | int err = 0; |
150 | |
151 | /* Check that we get a callback when requests retire upon waiting */ |
152 | |
153 | active = __live_active_setup(i915); |
154 | if (IS_ERR(ptr: active)) |
155 | return PTR_ERR(ptr: active); |
156 | |
157 | __i915_active_wait(ref: &active->base, TASK_UNINTERRUPTIBLE); |
158 | if (!READ_ONCE(active->retired)) { |
159 | struct drm_printer p = drm_err_printer(drm: &i915->drm, prefix: __func__); |
160 | |
161 | drm_printf(p: &p, f: "i915_active not retired after waiting!\n" ); |
162 | i915_active_print(ref: &active->base, m: &p); |
163 | |
164 | err = -EINVAL; |
165 | } |
166 | |
167 | __live_put(active); |
168 | |
169 | if (igt_flush_test(i915)) |
170 | err = -EIO; |
171 | |
172 | return err; |
173 | } |
174 | |
175 | static int live_active_retire(void *arg) |
176 | { |
177 | struct drm_i915_private *i915 = arg; |
178 | struct live_active *active; |
179 | int err = 0; |
180 | |
181 | /* Check that we get a callback when requests are indirectly retired */ |
182 | |
183 | active = __live_active_setup(i915); |
184 | if (IS_ERR(ptr: active)) |
185 | return PTR_ERR(ptr: active); |
186 | |
187 | /* waits for & retires all requests */ |
188 | if (igt_flush_test(i915)) |
189 | err = -EIO; |
190 | |
191 | if (!READ_ONCE(active->retired)) { |
192 | struct drm_printer p = drm_err_printer(drm: &i915->drm, prefix: __func__); |
193 | |
194 | drm_printf(p: &p, f: "i915_active not retired after flushing!\n" ); |
195 | i915_active_print(ref: &active->base, m: &p); |
196 | |
197 | err = -EINVAL; |
198 | } |
199 | |
200 | __live_put(active); |
201 | |
202 | return err; |
203 | } |
204 | |
205 | static int live_active_barrier(void *arg) |
206 | { |
207 | struct drm_i915_private *i915 = arg; |
208 | struct intel_engine_cs *engine; |
209 | struct live_active *active; |
210 | int err = 0; |
211 | |
212 | /* Check that we get a callback when requests retire upon waiting */ |
213 | |
214 | active = __live_alloc(i915); |
215 | if (!active) |
216 | return -ENOMEM; |
217 | |
218 | err = i915_active_acquire(ref: &active->base); |
219 | if (err) |
220 | goto out; |
221 | |
222 | for_each_uabi_engine(engine, i915) { |
223 | err = i915_active_acquire_preallocate_barrier(ref: &active->base, |
224 | engine); |
225 | if (err) |
226 | break; |
227 | |
228 | i915_active_acquire_barrier(ref: &active->base); |
229 | } |
230 | |
231 | i915_active_release(ref: &active->base); |
232 | if (err) |
233 | goto out; |
234 | |
235 | __i915_active_wait(ref: &active->base, TASK_UNINTERRUPTIBLE); |
236 | if (!READ_ONCE(active->retired)) { |
237 | pr_err("i915_active not retired after flushing barriers!\n" ); |
238 | err = -EINVAL; |
239 | } |
240 | |
241 | out: |
242 | __live_put(active); |
243 | |
244 | if (igt_flush_test(i915)) |
245 | err = -EIO; |
246 | |
247 | return err; |
248 | } |
249 | |
250 | int i915_active_live_selftests(struct drm_i915_private *i915) |
251 | { |
252 | static const struct i915_subtest tests[] = { |
253 | SUBTEST(live_active_wait), |
254 | SUBTEST(live_active_retire), |
255 | SUBTEST(live_active_barrier), |
256 | }; |
257 | |
258 | if (intel_gt_is_wedged(gt: to_gt(i915))) |
259 | return 0; |
260 | |
261 | return i915_subtests(tests, i915); |
262 | } |
263 | |
264 | static struct intel_engine_cs *node_to_barrier(struct active_node *it) |
265 | { |
266 | struct intel_engine_cs *engine; |
267 | |
268 | if (!is_barrier(active: &it->base)) |
269 | return NULL; |
270 | |
271 | engine = __barrier_to_engine(node: it); |
272 | smp_rmb(); /* serialise with add_active_barriers */ |
273 | if (!is_barrier(active: &it->base)) |
274 | return NULL; |
275 | |
276 | return engine; |
277 | } |
278 | |
279 | void i915_active_print(struct i915_active *ref, struct drm_printer *m) |
280 | { |
281 | drm_printf(p: m, f: "active %ps:%ps\n" , ref->active, ref->retire); |
282 | drm_printf(p: m, f: "\tcount: %d\n" , atomic_read(v: &ref->count)); |
283 | drm_printf(p: m, f: "\tpreallocated barriers? %s\n" , |
284 | str_yes_no(v: !llist_empty(head: &ref->preallocated_barriers))); |
285 | |
286 | if (i915_active_acquire_if_busy(ref)) { |
287 | struct active_node *it, *n; |
288 | |
289 | rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { |
290 | struct intel_engine_cs *engine; |
291 | |
292 | engine = node_to_barrier(it); |
293 | if (engine) { |
294 | drm_printf(p: m, f: "\tbarrier: %s\n" , engine->name); |
295 | continue; |
296 | } |
297 | |
298 | if (i915_active_fence_isset(active: &it->base)) { |
299 | drm_printf(p: m, |
300 | f: "\ttimeline: %llx\n" , it->timeline); |
301 | continue; |
302 | } |
303 | } |
304 | |
305 | i915_active_release(ref); |
306 | } |
307 | } |
308 | |
309 | static void spin_unlock_wait(spinlock_t *lock) |
310 | { |
311 | spin_lock_irq(lock); |
312 | spin_unlock_irq(lock); |
313 | } |
314 | |
315 | static void active_flush(struct i915_active *ref, |
316 | struct i915_active_fence *active) |
317 | { |
318 | struct dma_fence *fence; |
319 | |
320 | fence = xchg(__active_fence_slot(active), NULL); |
321 | if (!fence) |
322 | return; |
323 | |
324 | spin_lock_irq(lock: fence->lock); |
325 | __list_del_entry(entry: &active->cb.node); |
326 | spin_unlock_irq(lock: fence->lock); /* serialise with fence->cb_list */ |
327 | atomic_dec(v: &ref->count); |
328 | |
329 | GEM_BUG_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); |
330 | } |
331 | |
332 | void i915_active_unlock_wait(struct i915_active *ref) |
333 | { |
334 | if (i915_active_acquire_if_busy(ref)) { |
335 | struct active_node *it, *n; |
336 | |
337 | /* Wait for all active callbacks */ |
338 | rcu_read_lock(); |
339 | active_flush(ref, active: &ref->excl); |
340 | rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) |
341 | active_flush(ref, active: &it->base); |
342 | rcu_read_unlock(); |
343 | |
344 | i915_active_release(ref); |
345 | } |
346 | |
347 | /* And wait for the retire callback */ |
348 | spin_unlock_wait(lock: &ref->tree_lock); |
349 | |
350 | /* ... which may have been on a thread instead */ |
351 | flush_work(work: &ref->work); |
352 | } |
353 | |