1 | /* |
2 | * Copyright © 2008-2010 Intel Corporation |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. |
22 | * |
23 | * Authors: |
24 | * Eric Anholt <eric@anholt.net> |
25 | * Chris Wilson <chris@chris-wilson.co.uuk> |
26 | * |
27 | */ |
28 | |
29 | #include "gem/i915_gem_context.h" |
30 | #include "gt/intel_gt.h" |
31 | #include "gt/intel_gt_requests.h" |
32 | |
33 | #include "i915_drv.h" |
34 | #include "i915_gem_evict.h" |
35 | #include "i915_trace.h" |
36 | |
37 | I915_SELFTEST_DECLARE(static struct igt_evict_ctl { |
38 | bool fail_if_busy:1; |
39 | } igt_evict_ctl;) |
40 | |
41 | static bool dying_vma(struct i915_vma *vma) |
42 | { |
43 | return !kref_read(kref: &vma->obj->base.refcount); |
44 | } |
45 | |
46 | static int ggtt_flush(struct i915_address_space *vm) |
47 | { |
48 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
49 | struct intel_gt *gt; |
50 | int ret = 0; |
51 | |
52 | list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) { |
53 | /* |
54 | * Not everything in the GGTT is tracked via vma (otherwise we |
55 | * could evict as required with minimal stalling) so we are forced |
56 | * to idle the GPU and explicitly retire outstanding requests in |
57 | * the hopes that we can then remove contexts and the like only |
58 | * bound by their active reference. |
59 | */ |
60 | ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); |
61 | if (ret) |
62 | return ret; |
63 | } |
64 | return ret; |
65 | } |
66 | |
67 | static bool grab_vma(struct i915_vma *vma, struct i915_gem_ww_ctx *ww) |
68 | { |
69 | /* |
70 | * We add the extra refcount so the object doesn't drop to zero until |
71 | * after ungrab_vma(), this way trylock is always paired with unlock. |
72 | */ |
73 | if (i915_gem_object_get_rcu(obj: vma->obj)) { |
74 | if (!i915_gem_object_trylock(obj: vma->obj, ww)) { |
75 | i915_gem_object_put(obj: vma->obj); |
76 | return false; |
77 | } |
78 | } else { |
79 | /* Dead objects don't need pins */ |
80 | atomic_and(i: ~I915_VMA_PIN_MASK, v: &vma->flags); |
81 | } |
82 | |
83 | return true; |
84 | } |
85 | |
86 | static void ungrab_vma(struct i915_vma *vma) |
87 | { |
88 | if (dying_vma(vma)) |
89 | return; |
90 | |
91 | i915_gem_object_unlock(obj: vma->obj); |
92 | i915_gem_object_put(obj: vma->obj); |
93 | } |
94 | |
95 | static bool |
96 | mark_free(struct drm_mm_scan *scan, |
97 | struct i915_gem_ww_ctx *ww, |
98 | struct i915_vma *vma, |
99 | unsigned int flags, |
100 | struct list_head *unwind) |
101 | { |
102 | if (i915_vma_is_pinned(vma)) |
103 | return false; |
104 | |
105 | if (!grab_vma(vma, ww)) |
106 | return false; |
107 | |
108 | list_add(new: &vma->evict_link, head: unwind); |
109 | return drm_mm_scan_add_block(scan, node: &vma->node); |
110 | } |
111 | |
112 | static bool defer_evict(struct i915_vma *vma) |
113 | { |
114 | if (i915_vma_is_active(vma)) |
115 | return true; |
116 | |
117 | if (i915_vma_is_scanout(vma)) |
118 | return true; |
119 | |
120 | return false; |
121 | } |
122 | |
123 | /** |
124 | * i915_gem_evict_something - Evict vmas to make room for binding a new one |
125 | * @vm: address space to evict from |
126 | * @ww: An optional struct i915_gem_ww_ctx. |
127 | * @min_size: size of the desired free space |
128 | * @alignment: alignment constraint of the desired free space |
129 | * @color: color for the desired space |
130 | * @start: start (inclusive) of the range from which to evict objects |
131 | * @end: end (exclusive) of the range from which to evict objects |
132 | * @flags: additional flags to control the eviction algorithm |
133 | * |
134 | * This function will try to evict vmas until a free space satisfying the |
135 | * requirements is found. Callers must check first whether any such hole exists |
136 | * already before calling this function. |
137 | * |
138 | * This function is used by the object/vma binding code. |
139 | * |
140 | * Since this function is only used to free up virtual address space it only |
141 | * ignores pinned vmas, and not object where the backing storage itself is |
142 | * pinned. Hence obj->pages_pin_count does not protect against eviction. |
143 | * |
144 | * To clarify: This is for freeing up virtual address space, not for freeing |
145 | * memory in e.g. the shrinker. |
146 | */ |
147 | int |
148 | i915_gem_evict_something(struct i915_address_space *vm, |
149 | struct i915_gem_ww_ctx *ww, |
150 | u64 min_size, u64 alignment, |
151 | unsigned long color, |
152 | u64 start, u64 end, |
153 | unsigned flags) |
154 | { |
155 | struct drm_mm_scan scan; |
156 | struct list_head eviction_list; |
157 | struct i915_vma *vma, *next; |
158 | struct drm_mm_node *node; |
159 | enum drm_mm_insert_mode mode; |
160 | struct i915_vma *active; |
161 | struct intel_gt *gt; |
162 | int ret; |
163 | |
164 | lockdep_assert_held(&vm->mutex); |
165 | trace_i915_gem_evict(vm, size: min_size, align: alignment, flags); |
166 | |
167 | /* |
168 | * The goal is to evict objects and amalgamate space in rough LRU order. |
169 | * Since both active and inactive objects reside on the same list, |
170 | * in a mix of creation and last scanned order, as we process the list |
171 | * we sort it into inactive/active, which keeps the active portion |
172 | * in a rough MRU order. |
173 | * |
174 | * The retirement sequence is thus: |
175 | * 1. Inactive objects (already retired, random order) |
176 | * 2. Active objects (will stall on unbinding, oldest scanned first) |
177 | */ |
178 | mode = DRM_MM_INSERT_BEST; |
179 | if (flags & PIN_HIGH) |
180 | mode = DRM_MM_INSERT_HIGH; |
181 | if (flags & PIN_MAPPABLE) |
182 | mode = DRM_MM_INSERT_LOW; |
183 | drm_mm_scan_init_with_range(scan: &scan, mm: &vm->mm, |
184 | size: min_size, alignment, color, |
185 | start, end, mode); |
186 | |
187 | if (i915_is_ggtt(vm)) { |
188 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
189 | |
190 | list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) |
191 | intel_gt_retire_requests(gt); |
192 | } else { |
193 | intel_gt_retire_requests(gt: vm->gt); |
194 | } |
195 | |
196 | search_again: |
197 | active = NULL; |
198 | INIT_LIST_HEAD(list: &eviction_list); |
199 | list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) { |
200 | if (vma == active) { /* now seen this vma twice */ |
201 | if (flags & PIN_NONBLOCK) |
202 | break; |
203 | |
204 | active = ERR_PTR(error: -EAGAIN); |
205 | } |
206 | |
207 | /* |
208 | * We keep this list in a rough least-recently scanned order |
209 | * of active elements (inactive elements are cheap to reap). |
210 | * New entries are added to the end, and we move anything we |
211 | * scan to the end. The assumption is that the working set |
212 | * of applications is either steady state (and thanks to the |
213 | * userspace bo cache it almost always is) or volatile and |
214 | * frequently replaced after a frame, which are self-evicting! |
215 | * Given that assumption, the MRU order of the scan list is |
216 | * fairly static, and keeping it in least-recently scan order |
217 | * is suitable. |
218 | * |
219 | * To notice when we complete one full cycle, we record the |
220 | * first active element seen, before moving it to the tail. |
221 | */ |
222 | if (active != ERR_PTR(error: -EAGAIN) && defer_evict(vma)) { |
223 | if (!active) |
224 | active = vma; |
225 | |
226 | list_move_tail(list: &vma->vm_link, head: &vm->bound_list); |
227 | continue; |
228 | } |
229 | |
230 | if (mark_free(scan: &scan, ww, vma, flags, unwind: &eviction_list)) |
231 | goto found; |
232 | } |
233 | |
234 | /* Nothing found, clean up and bail out! */ |
235 | list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { |
236 | ret = drm_mm_scan_remove_block(scan: &scan, node: &vma->node); |
237 | BUG_ON(ret); |
238 | ungrab_vma(vma); |
239 | } |
240 | |
241 | /* |
242 | * Can we unpin some objects such as idle hw contents, |
243 | * or pending flips? But since only the GGTT has global entries |
244 | * such as scanouts, rinbuffers and contexts, we can skip the |
245 | * purge when inspecting per-process local address spaces. |
246 | */ |
247 | if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) |
248 | return -ENOSPC; |
249 | |
250 | /* |
251 | * Not everything in the GGTT is tracked via VMA using |
252 | * i915_vma_move_to_active(), otherwise we could evict as required |
253 | * with minimal stalling. Instead we are forced to idle the GPU and |
254 | * explicitly retire outstanding requests which will then remove |
255 | * the pinning for active objects such as contexts and ring, |
256 | * enabling us to evict them on the next iteration. |
257 | * |
258 | * To ensure that all user contexts are evictable, we perform |
259 | * a switch to the perma-pinned kernel context. This all also gives |
260 | * us a termination condition, when the last retired context is |
261 | * the kernel's there is no more we can evict. |
262 | */ |
263 | if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy)) |
264 | return -EBUSY; |
265 | |
266 | ret = ggtt_flush(vm); |
267 | if (ret) |
268 | return ret; |
269 | |
270 | cond_resched(); |
271 | |
272 | flags |= PIN_NONBLOCK; |
273 | goto search_again; |
274 | |
275 | found: |
276 | /* drm_mm doesn't allow any other other operations while |
277 | * scanning, therefore store to-be-evicted objects on a |
278 | * temporary list and take a reference for all before |
279 | * calling unbind (which may remove the active reference |
280 | * of any of our objects, thus corrupting the list). |
281 | */ |
282 | list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { |
283 | if (drm_mm_scan_remove_block(scan: &scan, node: &vma->node)) { |
284 | __i915_vma_pin(vma); |
285 | } else { |
286 | list_del(entry: &vma->evict_link); |
287 | ungrab_vma(vma); |
288 | } |
289 | } |
290 | |
291 | /* Unbinding will emit any required flushes */ |
292 | ret = 0; |
293 | list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { |
294 | __i915_vma_unpin(vma); |
295 | if (ret == 0) |
296 | ret = __i915_vma_unbind(vma); |
297 | ungrab_vma(vma); |
298 | } |
299 | |
300 | while (ret == 0 && (node = drm_mm_scan_color_evict(scan: &scan))) { |
301 | vma = container_of(node, struct i915_vma, node); |
302 | |
303 | /* If we find any non-objects (!vma), we cannot evict them */ |
304 | if (vma->node.color != I915_COLOR_UNEVICTABLE && |
305 | grab_vma(vma, ww)) { |
306 | ret = __i915_vma_unbind(vma); |
307 | ungrab_vma(vma); |
308 | } else { |
309 | ret = -ENOSPC; |
310 | } |
311 | } |
312 | |
313 | return ret; |
314 | } |
315 | |
316 | /** |
317 | * i915_gem_evict_for_node - Evict vmas to make room for binding a new one |
318 | * @vm: address space to evict from |
319 | * @ww: An optional struct i915_gem_ww_ctx. |
320 | * @target: range (and color) to evict for |
321 | * @flags: additional flags to control the eviction algorithm |
322 | * |
323 | * This function will try to evict vmas that overlap the target node. |
324 | * |
325 | * To clarify: This is for freeing up virtual address space, not for freeing |
326 | * memory in e.g. the shrinker. |
327 | */ |
328 | int i915_gem_evict_for_node(struct i915_address_space *vm, |
329 | struct i915_gem_ww_ctx *ww, |
330 | struct drm_mm_node *target, |
331 | unsigned int flags) |
332 | { |
333 | LIST_HEAD(eviction_list); |
334 | struct drm_mm_node *node; |
335 | u64 start = target->start; |
336 | u64 end = start + target->size; |
337 | struct i915_vma *vma, *next; |
338 | int ret = 0; |
339 | |
340 | lockdep_assert_held(&vm->mutex); |
341 | GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); |
342 | GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); |
343 | |
344 | trace_i915_gem_evict_node(vm, node: target, flags); |
345 | |
346 | /* |
347 | * Retire before we search the active list. Although we have |
348 | * reasonable accuracy in our retirement lists, we may have |
349 | * a stray pin (preventing eviction) that can only be resolved by |
350 | * retiring. |
351 | */ |
352 | if (i915_is_ggtt(vm)) { |
353 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
354 | struct intel_gt *gt; |
355 | |
356 | list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) |
357 | intel_gt_retire_requests(gt); |
358 | } else { |
359 | intel_gt_retire_requests(gt: vm->gt); |
360 | } |
361 | |
362 | if (i915_vm_has_cache_coloring(vm)) { |
363 | /* Expand search to cover neighbouring guard pages (or lack!) */ |
364 | if (start) |
365 | start -= I915_GTT_PAGE_SIZE; |
366 | |
367 | /* Always look at the page afterwards to avoid the end-of-GTT */ |
368 | end += I915_GTT_PAGE_SIZE; |
369 | } |
370 | GEM_BUG_ON(start >= end); |
371 | |
372 | drm_mm_for_each_node_in_range(node, &vm->mm, start, end) { |
373 | /* If we find any non-objects (!vma), we cannot evict them */ |
374 | if (node->color == I915_COLOR_UNEVICTABLE) { |
375 | ret = -ENOSPC; |
376 | break; |
377 | } |
378 | |
379 | GEM_BUG_ON(!drm_mm_node_allocated(node)); |
380 | vma = container_of(node, typeof(*vma), node); |
381 | |
382 | /* |
383 | * If we are using coloring to insert guard pages between |
384 | * different cache domains within the address space, we have |
385 | * to check whether the objects on either side of our range |
386 | * abutt and conflict. If they are in conflict, then we evict |
387 | * those as well to make room for our guard pages. |
388 | */ |
389 | if (i915_vm_has_cache_coloring(vm)) { |
390 | if (node->start + node->size == target->start) { |
391 | if (node->color == target->color) |
392 | continue; |
393 | } |
394 | if (node->start == target->start + target->size) { |
395 | if (node->color == target->color) |
396 | continue; |
397 | } |
398 | } |
399 | |
400 | if (i915_vma_is_pinned(vma)) { |
401 | ret = -ENOSPC; |
402 | break; |
403 | } |
404 | |
405 | if (flags & PIN_NONBLOCK && i915_vma_is_active(vma)) { |
406 | ret = -ENOSPC; |
407 | break; |
408 | } |
409 | |
410 | if (!grab_vma(vma, ww)) { |
411 | ret = -ENOSPC; |
412 | break; |
413 | } |
414 | |
415 | /* |
416 | * Never show fear in the face of dragons! |
417 | * |
418 | * We cannot directly remove this node from within this |
419 | * iterator and as with i915_gem_evict_something() we employ |
420 | * the vma pin_count in order to prevent the action of |
421 | * unbinding one vma from freeing (by dropping its active |
422 | * reference) another in our eviction list. |
423 | */ |
424 | __i915_vma_pin(vma); |
425 | list_add(new: &vma->evict_link, head: &eviction_list); |
426 | } |
427 | |
428 | list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { |
429 | __i915_vma_unpin(vma); |
430 | if (ret == 0) |
431 | ret = __i915_vma_unbind(vma); |
432 | |
433 | ungrab_vma(vma); |
434 | } |
435 | |
436 | return ret; |
437 | } |
438 | |
439 | /** |
440 | * i915_gem_evict_vm - Evict all idle vmas from a vm |
441 | * @vm: Address space to cleanse |
442 | * @ww: An optional struct i915_gem_ww_ctx. If not NULL, i915_gem_evict_vm |
443 | * will be able to evict vma's locked by the ww as well. |
444 | * @busy_bo: Optional pointer to struct drm_i915_gem_object. If not NULL, then |
445 | * in the event i915_gem_evict_vm() is unable to trylock an object for eviction, |
446 | * then @busy_bo will point to it. -EBUSY is also returned. The caller must drop |
447 | * the vm->mutex, before trying again to acquire the contended lock. The caller |
448 | * also owns a reference to the object. |
449 | * |
450 | * This function evicts all vmas from a vm. |
451 | * |
452 | * This is used by the execbuf code as a last-ditch effort to defragment the |
453 | * address space. |
454 | * |
455 | * To clarify: This is for freeing up virtual address space, not for freeing |
456 | * memory in e.g. the shrinker. |
457 | */ |
458 | int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww, |
459 | struct drm_i915_gem_object **busy_bo) |
460 | { |
461 | int ret = 0; |
462 | |
463 | lockdep_assert_held(&vm->mutex); |
464 | trace_i915_gem_evict_vm(vm); |
465 | |
466 | /* Switch back to the default context in order to unpin |
467 | * the existing context objects. However, such objects only |
468 | * pin themselves inside the global GTT and performing the |
469 | * switch otherwise is ineffective. |
470 | */ |
471 | if (i915_is_ggtt(vm)) { |
472 | ret = ggtt_flush(vm); |
473 | if (ret) |
474 | return ret; |
475 | } |
476 | |
477 | do { |
478 | struct i915_vma *vma, *vn; |
479 | LIST_HEAD(eviction_list); |
480 | LIST_HEAD(locked_eviction_list); |
481 | |
482 | list_for_each_entry(vma, &vm->bound_list, vm_link) { |
483 | if (i915_vma_is_pinned(vma)) |
484 | continue; |
485 | |
486 | /* |
487 | * If we already own the lock, trylock fails. In case |
488 | * the resv is shared among multiple objects, we still |
489 | * need the object ref. |
490 | */ |
491 | if (!i915_gem_object_get_rcu(obj: vma->obj) || |
492 | (ww && (dma_resv_locking_ctx(obj: vma->obj->base.resv) == &ww->ctx))) { |
493 | __i915_vma_pin(vma); |
494 | list_add(new: &vma->evict_link, head: &locked_eviction_list); |
495 | continue; |
496 | } |
497 | |
498 | if (!i915_gem_object_trylock(obj: vma->obj, ww)) { |
499 | if (busy_bo) { |
500 | *busy_bo = vma->obj; /* holds ref */ |
501 | ret = -EBUSY; |
502 | break; |
503 | } |
504 | i915_gem_object_put(obj: vma->obj); |
505 | continue; |
506 | } |
507 | |
508 | __i915_vma_pin(vma); |
509 | list_add(new: &vma->evict_link, head: &eviction_list); |
510 | } |
511 | if (list_empty(head: &eviction_list) && list_empty(head: &locked_eviction_list)) |
512 | break; |
513 | |
514 | /* Unbind locked objects first, before unlocking the eviction_list */ |
515 | list_for_each_entry_safe(vma, vn, &locked_eviction_list, evict_link) { |
516 | __i915_vma_unpin(vma); |
517 | |
518 | if (ret == 0) { |
519 | ret = __i915_vma_unbind(vma); |
520 | if (ret != -EINTR) /* "Get me out of here!" */ |
521 | ret = 0; |
522 | } |
523 | if (!dying_vma(vma)) |
524 | i915_gem_object_put(obj: vma->obj); |
525 | } |
526 | |
527 | list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) { |
528 | __i915_vma_unpin(vma); |
529 | if (ret == 0) { |
530 | ret = __i915_vma_unbind(vma); |
531 | if (ret != -EINTR) /* "Get me out of here!" */ |
532 | ret = 0; |
533 | } |
534 | |
535 | i915_gem_object_unlock(obj: vma->obj); |
536 | i915_gem_object_put(obj: vma->obj); |
537 | } |
538 | } while (ret == 0); |
539 | |
540 | return ret; |
541 | } |
542 | |
543 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
544 | #include "selftests/i915_gem_evict.c" |
545 | #endif |
546 | |