1 | /* |
2 | * Copyright © 2016 Intel Corporation |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. |
22 | * |
23 | */ |
24 | |
25 | #include <linux/list_sort.h> |
26 | #include <linux/prime_numbers.h> |
27 | |
28 | #include "gem/i915_gem_context.h" |
29 | #include "gem/i915_gem_internal.h" |
30 | #include "gem/i915_gem_lmem.h" |
31 | #include "gem/i915_gem_region.h" |
32 | #include "gem/selftests/mock_context.h" |
33 | #include "gt/intel_context.h" |
34 | #include "gt/intel_gpu_commands.h" |
35 | #include "gt/intel_gtt.h" |
36 | |
37 | #include "i915_random.h" |
38 | #include "i915_selftest.h" |
39 | #include "i915_vma_resource.h" |
40 | |
41 | #include "mock_drm.h" |
42 | #include "mock_gem_device.h" |
43 | #include "mock_gtt.h" |
44 | #include "igt_flush_test.h" |
45 | |
46 | static void cleanup_freed_objects(struct drm_i915_private *i915) |
47 | { |
48 | i915_gem_drain_freed_objects(i915); |
49 | } |
50 | |
51 | static void fake_free_pages(struct drm_i915_gem_object *obj, |
52 | struct sg_table *pages) |
53 | { |
54 | sg_free_table(pages); |
55 | kfree(objp: pages); |
56 | } |
57 | |
58 | static int fake_get_pages(struct drm_i915_gem_object *obj) |
59 | { |
60 | #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY) |
61 | #define PFN_BIAS 0x1000 |
62 | struct sg_table *pages; |
63 | struct scatterlist *sg; |
64 | typeof(obj->base.size) rem; |
65 | |
66 | pages = kmalloc(size: sizeof(*pages), GFP); |
67 | if (!pages) |
68 | return -ENOMEM; |
69 | |
70 | rem = round_up(obj->base.size, BIT(31)) >> 31; |
71 | /* restricted by sg_alloc_table */ |
72 | if (overflows_type(rem, unsigned int)) { |
73 | kfree(objp: pages); |
74 | return -E2BIG; |
75 | } |
76 | |
77 | if (sg_alloc_table(pages, rem, GFP)) { |
78 | kfree(objp: pages); |
79 | return -ENOMEM; |
80 | } |
81 | |
82 | rem = obj->base.size; |
83 | for (sg = pages->sgl; sg; sg = sg_next(sg)) { |
84 | unsigned long len = min_t(typeof(rem), rem, BIT(31)); |
85 | |
86 | GEM_BUG_ON(!len); |
87 | sg_set_page(sg, pfn_to_page(PFN_BIAS), len, offset: 0); |
88 | sg_dma_address(sg) = page_to_phys(sg_page(sg)); |
89 | sg_dma_len(sg) = len; |
90 | |
91 | rem -= len; |
92 | } |
93 | GEM_BUG_ON(rem); |
94 | |
95 | __i915_gem_object_set_pages(obj, pages); |
96 | |
97 | return 0; |
98 | #undef GFP |
99 | } |
100 | |
101 | static void fake_put_pages(struct drm_i915_gem_object *obj, |
102 | struct sg_table *pages) |
103 | { |
104 | fake_free_pages(obj, pages); |
105 | obj->mm.dirty = false; |
106 | } |
107 | |
108 | static const struct drm_i915_gem_object_ops fake_ops = { |
109 | .name = "fake-gem" , |
110 | .flags = I915_GEM_OBJECT_IS_SHRINKABLE, |
111 | .get_pages = fake_get_pages, |
112 | .put_pages = fake_put_pages, |
113 | }; |
114 | |
115 | static struct drm_i915_gem_object * |
116 | fake_dma_object(struct drm_i915_private *i915, u64 size) |
117 | { |
118 | static struct lock_class_key lock_class; |
119 | struct drm_i915_gem_object *obj; |
120 | |
121 | GEM_BUG_ON(!size); |
122 | GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); |
123 | |
124 | if (overflows_type(size, obj->base.size)) |
125 | return ERR_PTR(error: -E2BIG); |
126 | |
127 | obj = i915_gem_object_alloc(); |
128 | if (!obj) |
129 | goto err; |
130 | |
131 | drm_gem_private_object_init(dev: &i915->drm, obj: &obj->base, size); |
132 | i915_gem_object_init(obj, ops: &fake_ops, key: &lock_class, alloc_flags: 0); |
133 | |
134 | i915_gem_object_set_volatile(obj); |
135 | |
136 | obj->write_domain = I915_GEM_DOMAIN_CPU; |
137 | obj->read_domains = I915_GEM_DOMAIN_CPU; |
138 | obj->pat_index = i915_gem_get_pat_index(i915, level: I915_CACHE_NONE); |
139 | |
140 | /* Preallocate the "backing storage" */ |
141 | if (i915_gem_object_pin_pages_unlocked(obj)) |
142 | goto err_obj; |
143 | |
144 | i915_gem_object_unpin_pages(obj); |
145 | return obj; |
146 | |
147 | err_obj: |
148 | i915_gem_object_put(obj); |
149 | err: |
150 | return ERR_PTR(error: -ENOMEM); |
151 | } |
152 | |
153 | static int igt_ppgtt_alloc(void *arg) |
154 | { |
155 | struct drm_i915_private *dev_priv = arg; |
156 | struct i915_ppgtt *ppgtt; |
157 | struct i915_gem_ww_ctx ww; |
158 | u64 size, last, limit; |
159 | int err = 0; |
160 | |
161 | /* Allocate a ppggt and try to fill the entire range */ |
162 | |
163 | if (!HAS_PPGTT(dev_priv)) |
164 | return 0; |
165 | |
166 | ppgtt = i915_ppgtt_create(gt: to_gt(i915: dev_priv), lmem_pt_obj_flags: 0); |
167 | if (IS_ERR(ptr: ppgtt)) |
168 | return PTR_ERR(ptr: ppgtt); |
169 | |
170 | if (!ppgtt->vm.allocate_va_range) |
171 | goto err_ppgtt_cleanup; |
172 | |
173 | /* |
174 | * While we only allocate the page tables here and so we could |
175 | * address a much larger GTT than we could actually fit into |
176 | * RAM, a practical limit is the amount of physical pages in the system. |
177 | * This should ensure that we do not run into the oomkiller during |
178 | * the test and take down the machine wilfully. |
179 | */ |
180 | limit = totalram_pages() << PAGE_SHIFT; |
181 | limit = min(ppgtt->vm.total, limit); |
182 | |
183 | i915_gem_ww_ctx_init(ctx: &ww, intr: false); |
184 | retry: |
185 | err = i915_vm_lock_objects(vm: &ppgtt->vm, ww: &ww); |
186 | if (err) |
187 | goto err_ppgtt_cleanup; |
188 | |
189 | /* Check we can allocate the entire range */ |
190 | for (size = 4096; size <= limit; size <<= 2) { |
191 | struct i915_vm_pt_stash stash = {}; |
192 | |
193 | err = i915_vm_alloc_pt_stash(vm: &ppgtt->vm, stash: &stash, size); |
194 | if (err) |
195 | goto err_ppgtt_cleanup; |
196 | |
197 | err = i915_vm_map_pt_stash(vm: &ppgtt->vm, stash: &stash); |
198 | if (err) { |
199 | i915_vm_free_pt_stash(vm: &ppgtt->vm, stash: &stash); |
200 | goto err_ppgtt_cleanup; |
201 | } |
202 | |
203 | ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size); |
204 | cond_resched(); |
205 | |
206 | ppgtt->vm.clear_range(&ppgtt->vm, 0, size); |
207 | |
208 | i915_vm_free_pt_stash(vm: &ppgtt->vm, stash: &stash); |
209 | } |
210 | |
211 | /* Check we can incrementally allocate the entire range */ |
212 | for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) { |
213 | struct i915_vm_pt_stash stash = {}; |
214 | |
215 | err = i915_vm_alloc_pt_stash(vm: &ppgtt->vm, stash: &stash, size: size - last); |
216 | if (err) |
217 | goto err_ppgtt_cleanup; |
218 | |
219 | err = i915_vm_map_pt_stash(vm: &ppgtt->vm, stash: &stash); |
220 | if (err) { |
221 | i915_vm_free_pt_stash(vm: &ppgtt->vm, stash: &stash); |
222 | goto err_ppgtt_cleanup; |
223 | } |
224 | |
225 | ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, |
226 | last, size - last); |
227 | cond_resched(); |
228 | |
229 | i915_vm_free_pt_stash(vm: &ppgtt->vm, stash: &stash); |
230 | } |
231 | |
232 | err_ppgtt_cleanup: |
233 | if (err == -EDEADLK) { |
234 | err = i915_gem_ww_ctx_backoff(ctx: &ww); |
235 | if (!err) |
236 | goto retry; |
237 | } |
238 | i915_gem_ww_ctx_fini(ctx: &ww); |
239 | |
240 | i915_vm_put(vm: &ppgtt->vm); |
241 | return err; |
242 | } |
243 | |
244 | static int lowlevel_hole(struct i915_address_space *vm, |
245 | u64 hole_start, u64 hole_end, |
246 | unsigned long end_time) |
247 | { |
248 | const unsigned int min_alignment = |
249 | i915_vm_min_alignment(vm, type: INTEL_MEMORY_SYSTEM); |
250 | I915_RND_STATE(seed_prng); |
251 | struct i915_vma_resource *mock_vma_res; |
252 | unsigned int size; |
253 | |
254 | mock_vma_res = kzalloc(size: sizeof(*mock_vma_res), GFP_KERNEL); |
255 | if (!mock_vma_res) |
256 | return -ENOMEM; |
257 | |
258 | /* Keep creating larger objects until one cannot fit into the hole */ |
259 | for (size = 12; (hole_end - hole_start) >> size; size++) { |
260 | I915_RND_SUBSTATE(prng, seed_prng); |
261 | struct drm_i915_gem_object *obj; |
262 | unsigned int *order, count, n; |
263 | u64 hole_size, aligned_size; |
264 | |
265 | aligned_size = max_t(u32, ilog2(min_alignment), size); |
266 | hole_size = (hole_end - hole_start) >> aligned_size; |
267 | if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32)) |
268 | hole_size = KMALLOC_MAX_SIZE / sizeof(u32); |
269 | count = hole_size >> 1; |
270 | if (!count) { |
271 | pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n" , |
272 | __func__, hole_start, hole_end, size, hole_size); |
273 | break; |
274 | } |
275 | |
276 | do { |
277 | order = i915_random_order(count, state: &prng); |
278 | if (order) |
279 | break; |
280 | } while (count >>= 1); |
281 | if (!count) { |
282 | kfree(objp: mock_vma_res); |
283 | return -ENOMEM; |
284 | } |
285 | GEM_BUG_ON(!order); |
286 | |
287 | GEM_BUG_ON(count * BIT_ULL(aligned_size) > vm->total); |
288 | GEM_BUG_ON(hole_start + count * BIT_ULL(aligned_size) > hole_end); |
289 | |
290 | /* Ignore allocation failures (i.e. don't report them as |
291 | * a test failure) as we are purposefully allocating very |
292 | * large objects without checking that we have sufficient |
293 | * memory. We expect to hit -ENOMEM. |
294 | */ |
295 | |
296 | obj = fake_dma_object(i915: vm->i915, BIT_ULL(size)); |
297 | if (IS_ERR(ptr: obj)) { |
298 | kfree(objp: order); |
299 | break; |
300 | } |
301 | |
302 | GEM_BUG_ON(obj->base.size != BIT_ULL(size)); |
303 | |
304 | if (i915_gem_object_pin_pages_unlocked(obj)) { |
305 | i915_gem_object_put(obj); |
306 | kfree(objp: order); |
307 | break; |
308 | } |
309 | |
310 | for (n = 0; n < count; n++) { |
311 | u64 addr = hole_start + order[n] * BIT_ULL(aligned_size); |
312 | intel_wakeref_t wakeref; |
313 | |
314 | GEM_BUG_ON(addr + BIT_ULL(aligned_size) > vm->total); |
315 | |
316 | if (igt_timeout(end_time, |
317 | "%s timed out before %d/%d\n" , |
318 | __func__, n, count)) { |
319 | hole_end = hole_start; /* quit */ |
320 | break; |
321 | } |
322 | |
323 | if (vm->allocate_va_range) { |
324 | struct i915_vm_pt_stash stash = {}; |
325 | struct i915_gem_ww_ctx ww; |
326 | int err; |
327 | |
328 | i915_gem_ww_ctx_init(ctx: &ww, intr: false); |
329 | retry: |
330 | err = i915_vm_lock_objects(vm, ww: &ww); |
331 | if (err) |
332 | goto alloc_vm_end; |
333 | |
334 | err = -ENOMEM; |
335 | if (i915_vm_alloc_pt_stash(vm, stash: &stash, |
336 | BIT_ULL(size))) |
337 | goto alloc_vm_end; |
338 | |
339 | err = i915_vm_map_pt_stash(vm, stash: &stash); |
340 | if (!err) |
341 | vm->allocate_va_range(vm, &stash, |
342 | addr, BIT_ULL(size)); |
343 | i915_vm_free_pt_stash(vm, stash: &stash); |
344 | alloc_vm_end: |
345 | if (err == -EDEADLK) { |
346 | err = i915_gem_ww_ctx_backoff(ctx: &ww); |
347 | if (!err) |
348 | goto retry; |
349 | } |
350 | i915_gem_ww_ctx_fini(ctx: &ww); |
351 | |
352 | if (err) |
353 | break; |
354 | } |
355 | |
356 | mock_vma_res->bi.pages = obj->mm.pages; |
357 | mock_vma_res->node_size = BIT_ULL(aligned_size); |
358 | mock_vma_res->start = addr; |
359 | |
360 | with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref) |
361 | vm->insert_entries(vm, mock_vma_res, |
362 | i915_gem_get_pat_index(i915: vm->i915, |
363 | level: I915_CACHE_NONE), |
364 | 0); |
365 | } |
366 | count = n; |
367 | |
368 | i915_random_reorder(order, count, state: &prng); |
369 | for (n = 0; n < count; n++) { |
370 | u64 addr = hole_start + order[n] * BIT_ULL(aligned_size); |
371 | intel_wakeref_t wakeref; |
372 | |
373 | GEM_BUG_ON(addr + BIT_ULL(size) > vm->total); |
374 | with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref) |
375 | vm->clear_range(vm, addr, BIT_ULL(size)); |
376 | } |
377 | |
378 | i915_gem_object_unpin_pages(obj); |
379 | i915_gem_object_put(obj); |
380 | |
381 | kfree(objp: order); |
382 | |
383 | cleanup_freed_objects(i915: vm->i915); |
384 | } |
385 | |
386 | kfree(objp: mock_vma_res); |
387 | return 0; |
388 | } |
389 | |
390 | static void close_object_list(struct list_head *objects, |
391 | struct i915_address_space *vm) |
392 | { |
393 | struct drm_i915_gem_object *obj, *on; |
394 | int __maybe_unused ignored; |
395 | |
396 | list_for_each_entry_safe(obj, on, objects, st_link) { |
397 | struct i915_vma *vma; |
398 | |
399 | vma = i915_vma_instance(obj, vm, NULL); |
400 | if (!IS_ERR(ptr: vma)) |
401 | ignored = i915_vma_unbind_unlocked(vma); |
402 | |
403 | list_del(entry: &obj->st_link); |
404 | i915_gem_object_put(obj); |
405 | } |
406 | } |
407 | |
408 | static int fill_hole(struct i915_address_space *vm, |
409 | u64 hole_start, u64 hole_end, |
410 | unsigned long end_time) |
411 | { |
412 | const u64 hole_size = hole_end - hole_start; |
413 | struct drm_i915_gem_object *obj; |
414 | const unsigned int min_alignment = |
415 | i915_vm_min_alignment(vm, type: INTEL_MEMORY_SYSTEM); |
416 | const unsigned long max_pages = |
417 | min_t(u64, ULONG_MAX - 1, (hole_size / 2) >> ilog2(min_alignment)); |
418 | const unsigned long max_step = max(int_sqrt(max_pages), 2UL); |
419 | unsigned long npages, prime, flags; |
420 | struct i915_vma *vma; |
421 | LIST_HEAD(objects); |
422 | int err; |
423 | |
424 | /* Try binding many VMA working inwards from either edge */ |
425 | |
426 | flags = PIN_OFFSET_FIXED | PIN_USER; |
427 | if (i915_is_ggtt(vm)) |
428 | flags |= PIN_GLOBAL; |
429 | |
430 | for_each_prime_number_from(prime, 2, max_step) { |
431 | for (npages = 1; npages <= max_pages; npages *= prime) { |
432 | const u64 full_size = npages << PAGE_SHIFT; |
433 | const struct { |
434 | const char *name; |
435 | u64 offset; |
436 | int step; |
437 | } phases[] = { |
438 | { "top-down" , hole_end, -1, }, |
439 | { "bottom-up" , hole_start, 1, }, |
440 | { } |
441 | }, *p; |
442 | |
443 | obj = fake_dma_object(i915: vm->i915, size: full_size); |
444 | if (IS_ERR(ptr: obj)) |
445 | break; |
446 | |
447 | list_add(new: &obj->st_link, head: &objects); |
448 | |
449 | /* Align differing sized objects against the edges, and |
450 | * check we don't walk off into the void when binding |
451 | * them into the GTT. |
452 | */ |
453 | for (p = phases; p->name; p++) { |
454 | u64 offset; |
455 | |
456 | offset = p->offset; |
457 | list_for_each_entry(obj, &objects, st_link) { |
458 | u64 aligned_size = round_up(obj->base.size, |
459 | min_alignment); |
460 | |
461 | vma = i915_vma_instance(obj, vm, NULL); |
462 | if (IS_ERR(ptr: vma)) |
463 | continue; |
464 | |
465 | if (p->step < 0) { |
466 | if (offset < hole_start + aligned_size) |
467 | break; |
468 | offset -= aligned_size; |
469 | } |
470 | |
471 | err = i915_vma_pin(vma, size: 0, alignment: 0, flags: offset | flags); |
472 | if (err) { |
473 | pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n" , |
474 | __func__, p->name, err, npages, prime, offset); |
475 | goto err; |
476 | } |
477 | |
478 | if (!drm_mm_node_allocated(node: &vma->node) || |
479 | i915_vma_misplaced(vma, size: 0, alignment: 0, flags: offset | flags)) { |
480 | pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n" , |
481 | __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), |
482 | offset); |
483 | err = -EINVAL; |
484 | goto err; |
485 | } |
486 | |
487 | i915_vma_unpin(vma); |
488 | |
489 | if (p->step > 0) { |
490 | if (offset + aligned_size > hole_end) |
491 | break; |
492 | offset += aligned_size; |
493 | } |
494 | } |
495 | |
496 | offset = p->offset; |
497 | list_for_each_entry(obj, &objects, st_link) { |
498 | u64 aligned_size = round_up(obj->base.size, |
499 | min_alignment); |
500 | |
501 | vma = i915_vma_instance(obj, vm, NULL); |
502 | if (IS_ERR(ptr: vma)) |
503 | continue; |
504 | |
505 | if (p->step < 0) { |
506 | if (offset < hole_start + aligned_size) |
507 | break; |
508 | offset -= aligned_size; |
509 | } |
510 | |
511 | if (!drm_mm_node_allocated(node: &vma->node) || |
512 | i915_vma_misplaced(vma, size: 0, alignment: 0, flags: offset | flags)) { |
513 | pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n" , |
514 | __func__, p->name, vma->node.start, vma->node.size, |
515 | offset); |
516 | err = -EINVAL; |
517 | goto err; |
518 | } |
519 | |
520 | err = i915_vma_unbind_unlocked(vma); |
521 | if (err) { |
522 | pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n" , |
523 | __func__, p->name, vma->node.start, vma->node.size, |
524 | err); |
525 | goto err; |
526 | } |
527 | |
528 | if (p->step > 0) { |
529 | if (offset + aligned_size > hole_end) |
530 | break; |
531 | offset += aligned_size; |
532 | } |
533 | } |
534 | |
535 | offset = p->offset; |
536 | list_for_each_entry_reverse(obj, &objects, st_link) { |
537 | u64 aligned_size = round_up(obj->base.size, |
538 | min_alignment); |
539 | |
540 | vma = i915_vma_instance(obj, vm, NULL); |
541 | if (IS_ERR(ptr: vma)) |
542 | continue; |
543 | |
544 | if (p->step < 0) { |
545 | if (offset < hole_start + aligned_size) |
546 | break; |
547 | offset -= aligned_size; |
548 | } |
549 | |
550 | err = i915_vma_pin(vma, size: 0, alignment: 0, flags: offset | flags); |
551 | if (err) { |
552 | pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n" , |
553 | __func__, p->name, err, npages, prime, offset); |
554 | goto err; |
555 | } |
556 | |
557 | if (!drm_mm_node_allocated(node: &vma->node) || |
558 | i915_vma_misplaced(vma, size: 0, alignment: 0, flags: offset | flags)) { |
559 | pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n" , |
560 | __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), |
561 | offset); |
562 | err = -EINVAL; |
563 | goto err; |
564 | } |
565 | |
566 | i915_vma_unpin(vma); |
567 | |
568 | if (p->step > 0) { |
569 | if (offset + aligned_size > hole_end) |
570 | break; |
571 | offset += aligned_size; |
572 | } |
573 | } |
574 | |
575 | offset = p->offset; |
576 | list_for_each_entry_reverse(obj, &objects, st_link) { |
577 | u64 aligned_size = round_up(obj->base.size, |
578 | min_alignment); |
579 | |
580 | vma = i915_vma_instance(obj, vm, NULL); |
581 | if (IS_ERR(ptr: vma)) |
582 | continue; |
583 | |
584 | if (p->step < 0) { |
585 | if (offset < hole_start + aligned_size) |
586 | break; |
587 | offset -= aligned_size; |
588 | } |
589 | |
590 | if (!drm_mm_node_allocated(node: &vma->node) || |
591 | i915_vma_misplaced(vma, size: 0, alignment: 0, flags: offset | flags)) { |
592 | pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n" , |
593 | __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), |
594 | offset); |
595 | err = -EINVAL; |
596 | goto err; |
597 | } |
598 | |
599 | err = i915_vma_unbind_unlocked(vma); |
600 | if (err) { |
601 | pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n" , |
602 | __func__, p->name, vma->node.start, vma->node.size, |
603 | err); |
604 | goto err; |
605 | } |
606 | |
607 | if (p->step > 0) { |
608 | if (offset + aligned_size > hole_end) |
609 | break; |
610 | offset += aligned_size; |
611 | } |
612 | } |
613 | } |
614 | |
615 | if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n" , |
616 | __func__, npages, prime)) { |
617 | err = -EINTR; |
618 | goto err; |
619 | } |
620 | } |
621 | |
622 | close_object_list(objects: &objects, vm); |
623 | cleanup_freed_objects(i915: vm->i915); |
624 | } |
625 | |
626 | return 0; |
627 | |
628 | err: |
629 | close_object_list(objects: &objects, vm); |
630 | return err; |
631 | } |
632 | |
633 | static int walk_hole(struct i915_address_space *vm, |
634 | u64 hole_start, u64 hole_end, |
635 | unsigned long end_time) |
636 | { |
637 | const u64 hole_size = hole_end - hole_start; |
638 | const unsigned long max_pages = |
639 | min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT); |
640 | unsigned long min_alignment; |
641 | unsigned long flags; |
642 | u64 size; |
643 | |
644 | /* Try binding a single VMA in different positions within the hole */ |
645 | |
646 | flags = PIN_OFFSET_FIXED | PIN_USER; |
647 | if (i915_is_ggtt(vm)) |
648 | flags |= PIN_GLOBAL; |
649 | |
650 | min_alignment = i915_vm_min_alignment(vm, type: INTEL_MEMORY_SYSTEM); |
651 | |
652 | for_each_prime_number_from(size, 1, max_pages) { |
653 | struct drm_i915_gem_object *obj; |
654 | struct i915_vma *vma; |
655 | u64 addr; |
656 | int err = 0; |
657 | |
658 | obj = fake_dma_object(i915: vm->i915, size: size << PAGE_SHIFT); |
659 | if (IS_ERR(ptr: obj)) |
660 | break; |
661 | |
662 | vma = i915_vma_instance(obj, vm, NULL); |
663 | if (IS_ERR(ptr: vma)) { |
664 | err = PTR_ERR(ptr: vma); |
665 | goto err_put; |
666 | } |
667 | |
668 | for (addr = hole_start; |
669 | addr + obj->base.size < hole_end; |
670 | addr += round_up(obj->base.size, min_alignment)) { |
671 | err = i915_vma_pin(vma, size: 0, alignment: 0, flags: addr | flags); |
672 | if (err) { |
673 | pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n" , |
674 | __func__, addr, vma->size, |
675 | hole_start, hole_end, err); |
676 | goto err_put; |
677 | } |
678 | i915_vma_unpin(vma); |
679 | |
680 | if (!drm_mm_node_allocated(node: &vma->node) || |
681 | i915_vma_misplaced(vma, size: 0, alignment: 0, flags: addr | flags)) { |
682 | pr_err("%s incorrect at %llx + %llx\n" , |
683 | __func__, addr, vma->size); |
684 | err = -EINVAL; |
685 | goto err_put; |
686 | } |
687 | |
688 | err = i915_vma_unbind_unlocked(vma); |
689 | if (err) { |
690 | pr_err("%s unbind failed at %llx + %llx with err=%d\n" , |
691 | __func__, addr, vma->size, err); |
692 | goto err_put; |
693 | } |
694 | |
695 | GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); |
696 | |
697 | if (igt_timeout(end_time, |
698 | "%s timed out at %llx\n" , |
699 | __func__, addr)) { |
700 | err = -EINTR; |
701 | goto err_put; |
702 | } |
703 | } |
704 | |
705 | err_put: |
706 | i915_gem_object_put(obj); |
707 | if (err) |
708 | return err; |
709 | |
710 | cleanup_freed_objects(i915: vm->i915); |
711 | } |
712 | |
713 | return 0; |
714 | } |
715 | |
716 | static int pot_hole(struct i915_address_space *vm, |
717 | u64 hole_start, u64 hole_end, |
718 | unsigned long end_time) |
719 | { |
720 | struct drm_i915_gem_object *obj; |
721 | struct i915_vma *vma; |
722 | unsigned int min_alignment; |
723 | unsigned long flags; |
724 | unsigned int pot; |
725 | int err = 0; |
726 | |
727 | flags = PIN_OFFSET_FIXED | PIN_USER; |
728 | if (i915_is_ggtt(vm)) |
729 | flags |= PIN_GLOBAL; |
730 | |
731 | min_alignment = i915_vm_min_alignment(vm, type: INTEL_MEMORY_SYSTEM); |
732 | |
733 | obj = i915_gem_object_create_internal(i915: vm->i915, size: 2 * I915_GTT_PAGE_SIZE); |
734 | if (IS_ERR(ptr: obj)) |
735 | return PTR_ERR(ptr: obj); |
736 | |
737 | vma = i915_vma_instance(obj, vm, NULL); |
738 | if (IS_ERR(ptr: vma)) { |
739 | err = PTR_ERR(ptr: vma); |
740 | goto err_obj; |
741 | } |
742 | |
743 | /* Insert a pair of pages across every pot boundary within the hole */ |
744 | for (pot = fls64(x: hole_end - 1) - 1; |
745 | pot > ilog2(2 * min_alignment); |
746 | pot--) { |
747 | u64 step = BIT_ULL(pot); |
748 | u64 addr; |
749 | |
750 | for (addr = round_up(hole_start + min_alignment, step) - min_alignment; |
751 | hole_end > addr && hole_end - addr >= 2 * min_alignment; |
752 | addr += step) { |
753 | err = i915_vma_pin(vma, size: 0, alignment: 0, flags: addr | flags); |
754 | if (err) { |
755 | pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n" , |
756 | __func__, |
757 | addr, |
758 | hole_start, hole_end, |
759 | err); |
760 | goto err_obj; |
761 | } |
762 | |
763 | if (!drm_mm_node_allocated(node: &vma->node) || |
764 | i915_vma_misplaced(vma, size: 0, alignment: 0, flags: addr | flags)) { |
765 | pr_err("%s incorrect at %llx + %llx\n" , |
766 | __func__, addr, vma->size); |
767 | i915_vma_unpin(vma); |
768 | err = i915_vma_unbind_unlocked(vma); |
769 | err = -EINVAL; |
770 | goto err_obj; |
771 | } |
772 | |
773 | i915_vma_unpin(vma); |
774 | err = i915_vma_unbind_unlocked(vma); |
775 | GEM_BUG_ON(err); |
776 | } |
777 | |
778 | if (igt_timeout(end_time, |
779 | "%s timed out after %d/%d\n" , |
780 | __func__, pot, fls64(hole_end - 1) - 1)) { |
781 | err = -EINTR; |
782 | goto err_obj; |
783 | } |
784 | } |
785 | |
786 | err_obj: |
787 | i915_gem_object_put(obj); |
788 | return err; |
789 | } |
790 | |
791 | static int drunk_hole(struct i915_address_space *vm, |
792 | u64 hole_start, u64 hole_end, |
793 | unsigned long end_time) |
794 | { |
795 | I915_RND_STATE(prng); |
796 | unsigned int min_alignment; |
797 | unsigned int size; |
798 | unsigned long flags; |
799 | |
800 | flags = PIN_OFFSET_FIXED | PIN_USER; |
801 | if (i915_is_ggtt(vm)) |
802 | flags |= PIN_GLOBAL; |
803 | |
804 | min_alignment = i915_vm_min_alignment(vm, type: INTEL_MEMORY_SYSTEM); |
805 | |
806 | /* Keep creating larger objects until one cannot fit into the hole */ |
807 | for (size = 12; (hole_end - hole_start) >> size; size++) { |
808 | struct drm_i915_gem_object *obj; |
809 | unsigned int *order, count, n; |
810 | struct i915_vma *vma; |
811 | u64 hole_size, aligned_size; |
812 | int err = -ENODEV; |
813 | |
814 | aligned_size = max_t(u32, ilog2(min_alignment), size); |
815 | hole_size = (hole_end - hole_start) >> aligned_size; |
816 | if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32)) |
817 | hole_size = KMALLOC_MAX_SIZE / sizeof(u32); |
818 | count = hole_size >> 1; |
819 | if (!count) { |
820 | pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n" , |
821 | __func__, hole_start, hole_end, size, hole_size); |
822 | break; |
823 | } |
824 | |
825 | do { |
826 | order = i915_random_order(count, state: &prng); |
827 | if (order) |
828 | break; |
829 | } while (count >>= 1); |
830 | if (!count) |
831 | return -ENOMEM; |
832 | GEM_BUG_ON(!order); |
833 | |
834 | /* Ignore allocation failures (i.e. don't report them as |
835 | * a test failure) as we are purposefully allocating very |
836 | * large objects without checking that we have sufficient |
837 | * memory. We expect to hit -ENOMEM. |
838 | */ |
839 | |
840 | obj = fake_dma_object(i915: vm->i915, BIT_ULL(size)); |
841 | if (IS_ERR(ptr: obj)) { |
842 | kfree(objp: order); |
843 | break; |
844 | } |
845 | |
846 | vma = i915_vma_instance(obj, vm, NULL); |
847 | if (IS_ERR(ptr: vma)) { |
848 | err = PTR_ERR(ptr: vma); |
849 | goto err_obj; |
850 | } |
851 | |
852 | GEM_BUG_ON(vma->size != BIT_ULL(size)); |
853 | |
854 | for (n = 0; n < count; n++) { |
855 | u64 addr = hole_start + order[n] * BIT_ULL(aligned_size); |
856 | |
857 | err = i915_vma_pin(vma, size: 0, alignment: 0, flags: addr | flags); |
858 | if (err) { |
859 | pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n" , |
860 | __func__, |
861 | addr, BIT_ULL(size), |
862 | hole_start, hole_end, |
863 | err); |
864 | goto err_obj; |
865 | } |
866 | |
867 | if (!drm_mm_node_allocated(node: &vma->node) || |
868 | i915_vma_misplaced(vma, size: 0, alignment: 0, flags: addr | flags)) { |
869 | pr_err("%s incorrect at %llx + %llx\n" , |
870 | __func__, addr, BIT_ULL(size)); |
871 | i915_vma_unpin(vma); |
872 | err = i915_vma_unbind_unlocked(vma); |
873 | err = -EINVAL; |
874 | goto err_obj; |
875 | } |
876 | |
877 | i915_vma_unpin(vma); |
878 | err = i915_vma_unbind_unlocked(vma); |
879 | GEM_BUG_ON(err); |
880 | |
881 | if (igt_timeout(end_time, |
882 | "%s timed out after %d/%d\n" , |
883 | __func__, n, count)) { |
884 | err = -EINTR; |
885 | goto err_obj; |
886 | } |
887 | } |
888 | |
889 | err_obj: |
890 | i915_gem_object_put(obj); |
891 | kfree(objp: order); |
892 | if (err) |
893 | return err; |
894 | |
895 | cleanup_freed_objects(i915: vm->i915); |
896 | } |
897 | |
898 | return 0; |
899 | } |
900 | |
901 | static int __shrink_hole(struct i915_address_space *vm, |
902 | u64 hole_start, u64 hole_end, |
903 | unsigned long end_time) |
904 | { |
905 | struct drm_i915_gem_object *obj; |
906 | unsigned long flags = PIN_OFFSET_FIXED | PIN_USER; |
907 | unsigned int min_alignment; |
908 | unsigned int order = 12; |
909 | LIST_HEAD(objects); |
910 | int err = 0; |
911 | u64 addr; |
912 | |
913 | min_alignment = i915_vm_min_alignment(vm, type: INTEL_MEMORY_SYSTEM); |
914 | |
915 | /* Keep creating larger objects until one cannot fit into the hole */ |
916 | for (addr = hole_start; addr < hole_end; ) { |
917 | struct i915_vma *vma; |
918 | u64 size = BIT_ULL(order++); |
919 | |
920 | size = min(size, hole_end - addr); |
921 | obj = fake_dma_object(i915: vm->i915, size); |
922 | if (IS_ERR(ptr: obj)) { |
923 | err = PTR_ERR(ptr: obj); |
924 | break; |
925 | } |
926 | |
927 | list_add(new: &obj->st_link, head: &objects); |
928 | |
929 | vma = i915_vma_instance(obj, vm, NULL); |
930 | if (IS_ERR(ptr: vma)) { |
931 | err = PTR_ERR(ptr: vma); |
932 | break; |
933 | } |
934 | |
935 | GEM_BUG_ON(vma->size != size); |
936 | |
937 | err = i915_vma_pin(vma, size: 0, alignment: 0, flags: addr | flags); |
938 | if (err) { |
939 | pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n" , |
940 | __func__, addr, size, hole_start, hole_end, err); |
941 | break; |
942 | } |
943 | |
944 | if (!drm_mm_node_allocated(node: &vma->node) || |
945 | i915_vma_misplaced(vma, size: 0, alignment: 0, flags: addr | flags)) { |
946 | pr_err("%s incorrect at %llx + %llx\n" , |
947 | __func__, addr, size); |
948 | i915_vma_unpin(vma); |
949 | err = i915_vma_unbind_unlocked(vma); |
950 | err = -EINVAL; |
951 | break; |
952 | } |
953 | |
954 | i915_vma_unpin(vma); |
955 | addr += round_up(size, min_alignment); |
956 | |
957 | /* |
958 | * Since we are injecting allocation faults at random intervals, |
959 | * wait for this allocation to complete before we change the |
960 | * faultinjection. |
961 | */ |
962 | err = i915_vma_sync(vma); |
963 | if (err) |
964 | break; |
965 | |
966 | if (igt_timeout(end_time, |
967 | "%s timed out at ofset %llx [%llx - %llx]\n" , |
968 | __func__, addr, hole_start, hole_end)) { |
969 | err = -EINTR; |
970 | break; |
971 | } |
972 | } |
973 | |
974 | close_object_list(objects: &objects, vm); |
975 | cleanup_freed_objects(i915: vm->i915); |
976 | return err; |
977 | } |
978 | |
979 | static int shrink_hole(struct i915_address_space *vm, |
980 | u64 hole_start, u64 hole_end, |
981 | unsigned long end_time) |
982 | { |
983 | unsigned long prime; |
984 | int err; |
985 | |
986 | vm->fault_attr.probability = 999; |
987 | atomic_set(v: &vm->fault_attr.times, i: -1); |
988 | |
989 | for_each_prime_number_from(prime, 0, ULONG_MAX - 1) { |
990 | vm->fault_attr.interval = prime; |
991 | err = __shrink_hole(vm, hole_start, hole_end, end_time); |
992 | if (err) |
993 | break; |
994 | } |
995 | |
996 | memset(&vm->fault_attr, 0, sizeof(vm->fault_attr)); |
997 | |
998 | return err; |
999 | } |
1000 | |
1001 | static int shrink_boom(struct i915_address_space *vm, |
1002 | u64 hole_start, u64 hole_end, |
1003 | unsigned long end_time) |
1004 | { |
1005 | unsigned int sizes[] = { SZ_2M, SZ_1G }; |
1006 | struct drm_i915_gem_object *purge; |
1007 | struct drm_i915_gem_object *explode; |
1008 | int err; |
1009 | int i; |
1010 | |
1011 | /* |
1012 | * Catch the case which shrink_hole seems to miss. The setup here |
1013 | * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while |
1014 | * ensuring that all vma assiocated with the respective pd/pdp are |
1015 | * unpinned at the time. |
1016 | */ |
1017 | |
1018 | for (i = 0; i < ARRAY_SIZE(sizes); ++i) { |
1019 | unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; |
1020 | unsigned int size = sizes[i]; |
1021 | struct i915_vma *vma; |
1022 | |
1023 | purge = fake_dma_object(i915: vm->i915, size); |
1024 | if (IS_ERR(ptr: purge)) |
1025 | return PTR_ERR(ptr: purge); |
1026 | |
1027 | vma = i915_vma_instance(obj: purge, vm, NULL); |
1028 | if (IS_ERR(ptr: vma)) { |
1029 | err = PTR_ERR(ptr: vma); |
1030 | goto err_purge; |
1031 | } |
1032 | |
1033 | err = i915_vma_pin(vma, size: 0, alignment: 0, flags); |
1034 | if (err) |
1035 | goto err_purge; |
1036 | |
1037 | /* Should now be ripe for purging */ |
1038 | i915_vma_unpin(vma); |
1039 | |
1040 | explode = fake_dma_object(i915: vm->i915, size); |
1041 | if (IS_ERR(ptr: explode)) { |
1042 | err = PTR_ERR(ptr: explode); |
1043 | goto err_purge; |
1044 | } |
1045 | |
1046 | vm->fault_attr.probability = 100; |
1047 | vm->fault_attr.interval = 1; |
1048 | atomic_set(v: &vm->fault_attr.times, i: -1); |
1049 | |
1050 | vma = i915_vma_instance(obj: explode, vm, NULL); |
1051 | if (IS_ERR(ptr: vma)) { |
1052 | err = PTR_ERR(ptr: vma); |
1053 | goto err_explode; |
1054 | } |
1055 | |
1056 | err = i915_vma_pin(vma, size: 0, alignment: 0, flags: flags | size); |
1057 | if (err) |
1058 | goto err_explode; |
1059 | |
1060 | i915_vma_unpin(vma); |
1061 | |
1062 | i915_gem_object_put(obj: purge); |
1063 | i915_gem_object_put(obj: explode); |
1064 | |
1065 | memset(&vm->fault_attr, 0, sizeof(vm->fault_attr)); |
1066 | cleanup_freed_objects(i915: vm->i915); |
1067 | } |
1068 | |
1069 | return 0; |
1070 | |
1071 | err_explode: |
1072 | i915_gem_object_put(obj: explode); |
1073 | err_purge: |
1074 | i915_gem_object_put(obj: purge); |
1075 | memset(&vm->fault_attr, 0, sizeof(vm->fault_attr)); |
1076 | return err; |
1077 | } |
1078 | |
1079 | static int misaligned_case(struct i915_address_space *vm, struct intel_memory_region *mr, |
1080 | u64 addr, u64 size, unsigned long flags) |
1081 | { |
1082 | struct drm_i915_gem_object *obj; |
1083 | struct i915_vma *vma; |
1084 | int err = 0; |
1085 | u64 expected_vma_size, expected_node_size; |
1086 | bool is_stolen = mr->type == INTEL_MEMORY_STOLEN_SYSTEM || |
1087 | mr->type == INTEL_MEMORY_STOLEN_LOCAL; |
1088 | |
1089 | obj = i915_gem_object_create_region(mem: mr, size, page_size: 0, I915_BO_ALLOC_GPU_ONLY); |
1090 | if (IS_ERR(ptr: obj)) { |
1091 | /* if iGVT-g or DMAR is active, stolen mem will be uninitialized */ |
1092 | if (PTR_ERR(ptr: obj) == -ENODEV && is_stolen) |
1093 | return 0; |
1094 | return PTR_ERR(ptr: obj); |
1095 | } |
1096 | |
1097 | vma = i915_vma_instance(obj, vm, NULL); |
1098 | if (IS_ERR(ptr: vma)) { |
1099 | err = PTR_ERR(ptr: vma); |
1100 | goto err_put; |
1101 | } |
1102 | |
1103 | err = i915_vma_pin(vma, size: 0, alignment: 0, flags: addr | flags); |
1104 | if (err) |
1105 | goto err_put; |
1106 | i915_vma_unpin(vma); |
1107 | |
1108 | if (!drm_mm_node_allocated(node: &vma->node)) { |
1109 | err = -EINVAL; |
1110 | goto err_put; |
1111 | } |
1112 | |
1113 | if (i915_vma_misplaced(vma, size: 0, alignment: 0, flags: addr | flags)) { |
1114 | err = -EINVAL; |
1115 | goto err_put; |
1116 | } |
1117 | |
1118 | expected_vma_size = round_up(size, 1 << (ffs(vma->resource->page_sizes_gtt) - 1)); |
1119 | expected_node_size = expected_vma_size; |
1120 | |
1121 | if (HAS_64K_PAGES(vm->i915) && i915_gem_object_is_lmem(obj)) { |
1122 | expected_vma_size = round_up(size, I915_GTT_PAGE_SIZE_64K); |
1123 | expected_node_size = round_up(size, I915_GTT_PAGE_SIZE_64K); |
1124 | } |
1125 | |
1126 | if (vma->size != expected_vma_size || vma->node.size != expected_node_size) { |
1127 | err = i915_vma_unbind_unlocked(vma); |
1128 | err = -EBADSLT; |
1129 | goto err_put; |
1130 | } |
1131 | |
1132 | err = i915_vma_unbind_unlocked(vma); |
1133 | if (err) |
1134 | goto err_put; |
1135 | |
1136 | GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); |
1137 | |
1138 | err_put: |
1139 | i915_gem_object_put(obj); |
1140 | cleanup_freed_objects(i915: vm->i915); |
1141 | return err; |
1142 | } |
1143 | |
1144 | static int misaligned_pin(struct i915_address_space *vm, |
1145 | u64 hole_start, u64 hole_end, |
1146 | unsigned long end_time) |
1147 | { |
1148 | struct intel_memory_region *mr; |
1149 | enum intel_region_id id; |
1150 | unsigned long flags = PIN_OFFSET_FIXED | PIN_USER; |
1151 | int err = 0; |
1152 | u64 hole_size = hole_end - hole_start; |
1153 | |
1154 | if (i915_is_ggtt(vm)) |
1155 | flags |= PIN_GLOBAL; |
1156 | |
1157 | for_each_memory_region(mr, vm->i915, id) { |
1158 | u64 min_alignment = i915_vm_min_alignment(vm, type: mr->type); |
1159 | u64 size = min_alignment; |
1160 | u64 addr = round_down(hole_start + (hole_size / 2), min_alignment); |
1161 | |
1162 | /* avoid -ENOSPC on very small hole setups */ |
1163 | if (hole_size < 3 * min_alignment) |
1164 | continue; |
1165 | |
1166 | /* we can't test < 4k alignment due to flags being encoded in lower bits */ |
1167 | if (min_alignment != I915_GTT_PAGE_SIZE_4K) { |
1168 | err = misaligned_case(vm, mr, addr: addr + (min_alignment / 2), size, flags); |
1169 | /* misaligned should error with -EINVAL*/ |
1170 | if (!err) |
1171 | err = -EBADSLT; |
1172 | if (err != -EINVAL) |
1173 | return err; |
1174 | } |
1175 | |
1176 | /* test for vma->size expansion to min page size */ |
1177 | err = misaligned_case(vm, mr, addr, PAGE_SIZE, flags); |
1178 | if (err) |
1179 | return err; |
1180 | |
1181 | /* test for intermediate size not expanding vma->size for large alignments */ |
1182 | err = misaligned_case(vm, mr, addr, size: size / 2, flags); |
1183 | if (err) |
1184 | return err; |
1185 | } |
1186 | |
1187 | return 0; |
1188 | } |
1189 | |
1190 | static int exercise_ppgtt(struct drm_i915_private *dev_priv, |
1191 | int (*func)(struct i915_address_space *vm, |
1192 | u64 hole_start, u64 hole_end, |
1193 | unsigned long end_time)) |
1194 | { |
1195 | struct i915_ppgtt *ppgtt; |
1196 | IGT_TIMEOUT(end_time); |
1197 | struct file *file; |
1198 | int err; |
1199 | |
1200 | if (!HAS_FULL_PPGTT(dev_priv)) |
1201 | return 0; |
1202 | |
1203 | file = mock_file(i915: dev_priv); |
1204 | if (IS_ERR(ptr: file)) |
1205 | return PTR_ERR(ptr: file); |
1206 | |
1207 | ppgtt = i915_ppgtt_create(gt: to_gt(i915: dev_priv), lmem_pt_obj_flags: 0); |
1208 | if (IS_ERR(ptr: ppgtt)) { |
1209 | err = PTR_ERR(ptr: ppgtt); |
1210 | goto out_free; |
1211 | } |
1212 | GEM_BUG_ON(offset_in_page(ppgtt->vm.total)); |
1213 | assert_vm_alive(vm: &ppgtt->vm); |
1214 | |
1215 | err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time); |
1216 | |
1217 | i915_vm_put(vm: &ppgtt->vm); |
1218 | |
1219 | out_free: |
1220 | fput(file); |
1221 | return err; |
1222 | } |
1223 | |
1224 | static int igt_ppgtt_fill(void *arg) |
1225 | { |
1226 | return exercise_ppgtt(dev_priv: arg, func: fill_hole); |
1227 | } |
1228 | |
1229 | static int igt_ppgtt_walk(void *arg) |
1230 | { |
1231 | return exercise_ppgtt(dev_priv: arg, func: walk_hole); |
1232 | } |
1233 | |
1234 | static int igt_ppgtt_pot(void *arg) |
1235 | { |
1236 | return exercise_ppgtt(dev_priv: arg, func: pot_hole); |
1237 | } |
1238 | |
1239 | static int igt_ppgtt_drunk(void *arg) |
1240 | { |
1241 | return exercise_ppgtt(dev_priv: arg, func: drunk_hole); |
1242 | } |
1243 | |
1244 | static int igt_ppgtt_lowlevel(void *arg) |
1245 | { |
1246 | return exercise_ppgtt(dev_priv: arg, func: lowlevel_hole); |
1247 | } |
1248 | |
1249 | static int igt_ppgtt_shrink(void *arg) |
1250 | { |
1251 | return exercise_ppgtt(dev_priv: arg, func: shrink_hole); |
1252 | } |
1253 | |
1254 | static int igt_ppgtt_shrink_boom(void *arg) |
1255 | { |
1256 | return exercise_ppgtt(dev_priv: arg, func: shrink_boom); |
1257 | } |
1258 | |
1259 | static int igt_ppgtt_misaligned_pin(void *arg) |
1260 | { |
1261 | return exercise_ppgtt(dev_priv: arg, func: misaligned_pin); |
1262 | } |
1263 | |
1264 | static int sort_holes(void *priv, const struct list_head *A, |
1265 | const struct list_head *B) |
1266 | { |
1267 | struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack); |
1268 | struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack); |
1269 | |
1270 | if (a->start < b->start) |
1271 | return -1; |
1272 | else |
1273 | return 1; |
1274 | } |
1275 | |
1276 | static int exercise_ggtt(struct drm_i915_private *i915, |
1277 | int (*func)(struct i915_address_space *vm, |
1278 | u64 hole_start, u64 hole_end, |
1279 | unsigned long end_time)) |
1280 | { |
1281 | struct i915_ggtt *ggtt = to_gt(i915)->ggtt; |
1282 | u64 hole_start, hole_end, last = 0; |
1283 | struct drm_mm_node *node; |
1284 | IGT_TIMEOUT(end_time); |
1285 | int err = 0; |
1286 | |
1287 | restart: |
1288 | list_sort(NULL, head: &ggtt->vm.mm.hole_stack, cmp: sort_holes); |
1289 | drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) { |
1290 | if (hole_start < last) |
1291 | continue; |
1292 | |
1293 | if (ggtt->vm.mm.color_adjust) |
1294 | ggtt->vm.mm.color_adjust(node, 0, |
1295 | &hole_start, &hole_end); |
1296 | if (hole_start >= hole_end) |
1297 | continue; |
1298 | |
1299 | err = func(&ggtt->vm, hole_start, hole_end, end_time); |
1300 | if (err) |
1301 | break; |
1302 | |
1303 | /* As we have manipulated the drm_mm, the list may be corrupt */ |
1304 | last = hole_end; |
1305 | goto restart; |
1306 | } |
1307 | |
1308 | return err; |
1309 | } |
1310 | |
1311 | static int igt_ggtt_fill(void *arg) |
1312 | { |
1313 | return exercise_ggtt(i915: arg, func: fill_hole); |
1314 | } |
1315 | |
1316 | static int igt_ggtt_walk(void *arg) |
1317 | { |
1318 | return exercise_ggtt(i915: arg, func: walk_hole); |
1319 | } |
1320 | |
1321 | static int igt_ggtt_pot(void *arg) |
1322 | { |
1323 | return exercise_ggtt(i915: arg, func: pot_hole); |
1324 | } |
1325 | |
1326 | static int igt_ggtt_drunk(void *arg) |
1327 | { |
1328 | return exercise_ggtt(i915: arg, func: drunk_hole); |
1329 | } |
1330 | |
1331 | static int igt_ggtt_lowlevel(void *arg) |
1332 | { |
1333 | return exercise_ggtt(i915: arg, func: lowlevel_hole); |
1334 | } |
1335 | |
1336 | static int igt_ggtt_misaligned_pin(void *arg) |
1337 | { |
1338 | return exercise_ggtt(i915: arg, func: misaligned_pin); |
1339 | } |
1340 | |
1341 | static int igt_ggtt_page(void *arg) |
1342 | { |
1343 | const unsigned int count = PAGE_SIZE/sizeof(u32); |
1344 | I915_RND_STATE(prng); |
1345 | struct drm_i915_private *i915 = arg; |
1346 | struct i915_ggtt *ggtt = to_gt(i915)->ggtt; |
1347 | struct drm_i915_gem_object *obj; |
1348 | intel_wakeref_t wakeref; |
1349 | struct drm_mm_node tmp; |
1350 | unsigned int *order, n; |
1351 | int err; |
1352 | |
1353 | if (!i915_ggtt_has_aperture(ggtt)) |
1354 | return 0; |
1355 | |
1356 | obj = i915_gem_object_create_internal(i915, PAGE_SIZE); |
1357 | if (IS_ERR(ptr: obj)) |
1358 | return PTR_ERR(ptr: obj); |
1359 | |
1360 | err = i915_gem_object_pin_pages_unlocked(obj); |
1361 | if (err) |
1362 | goto out_free; |
1363 | |
1364 | memset(&tmp, 0, sizeof(tmp)); |
1365 | mutex_lock(&ggtt->vm.mutex); |
1366 | err = drm_mm_insert_node_in_range(mm: &ggtt->vm.mm, node: &tmp, |
1367 | size: count * PAGE_SIZE, alignment: 0, |
1368 | I915_COLOR_UNEVICTABLE, |
1369 | start: 0, end: ggtt->mappable_end, |
1370 | mode: DRM_MM_INSERT_LOW); |
1371 | mutex_unlock(lock: &ggtt->vm.mutex); |
1372 | if (err) |
1373 | goto out_unpin; |
1374 | |
1375 | wakeref = intel_runtime_pm_get(rpm: &i915->runtime_pm); |
1376 | |
1377 | for (n = 0; n < count; n++) { |
1378 | u64 offset = tmp.start + n * PAGE_SIZE; |
1379 | |
1380 | ggtt->vm.insert_page(&ggtt->vm, |
1381 | i915_gem_object_get_dma_address(obj, 0), |
1382 | offset, |
1383 | i915_gem_get_pat_index(i915, |
1384 | level: I915_CACHE_NONE), |
1385 | 0); |
1386 | } |
1387 | |
1388 | order = i915_random_order(count, state: &prng); |
1389 | if (!order) { |
1390 | err = -ENOMEM; |
1391 | goto out_remove; |
1392 | } |
1393 | |
1394 | for (n = 0; n < count; n++) { |
1395 | u64 offset = tmp.start + order[n] * PAGE_SIZE; |
1396 | u32 __iomem *vaddr; |
1397 | |
1398 | vaddr = io_mapping_map_atomic_wc(mapping: &ggtt->iomap, offset); |
1399 | iowrite32(n, vaddr + n); |
1400 | io_mapping_unmap_atomic(vaddr); |
1401 | } |
1402 | intel_gt_flush_ggtt_writes(gt: ggtt->vm.gt); |
1403 | |
1404 | i915_random_reorder(order, count, state: &prng); |
1405 | for (n = 0; n < count; n++) { |
1406 | u64 offset = tmp.start + order[n] * PAGE_SIZE; |
1407 | u32 __iomem *vaddr; |
1408 | u32 val; |
1409 | |
1410 | vaddr = io_mapping_map_atomic_wc(mapping: &ggtt->iomap, offset); |
1411 | val = ioread32(vaddr + n); |
1412 | io_mapping_unmap_atomic(vaddr); |
1413 | |
1414 | if (val != n) { |
1415 | pr_err("insert page failed: found %d, expected %d\n" , |
1416 | val, n); |
1417 | err = -EINVAL; |
1418 | break; |
1419 | } |
1420 | } |
1421 | |
1422 | kfree(objp: order); |
1423 | out_remove: |
1424 | ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size); |
1425 | intel_runtime_pm_put(rpm: &i915->runtime_pm, wref: wakeref); |
1426 | mutex_lock(&ggtt->vm.mutex); |
1427 | drm_mm_remove_node(node: &tmp); |
1428 | mutex_unlock(lock: &ggtt->vm.mutex); |
1429 | out_unpin: |
1430 | i915_gem_object_unpin_pages(obj); |
1431 | out_free: |
1432 | i915_gem_object_put(obj); |
1433 | return err; |
1434 | } |
1435 | |
1436 | static void track_vma_bind(struct i915_vma *vma) |
1437 | { |
1438 | struct drm_i915_gem_object *obj = vma->obj; |
1439 | |
1440 | __i915_gem_object_pin_pages(obj); |
1441 | |
1442 | GEM_BUG_ON(atomic_read(&vma->pages_count)); |
1443 | atomic_set(v: &vma->pages_count, I915_VMA_PAGES_ACTIVE); |
1444 | __i915_gem_object_pin_pages(obj); |
1445 | vma->pages = obj->mm.pages; |
1446 | vma->resource->bi.pages = vma->pages; |
1447 | |
1448 | mutex_lock(&vma->vm->mutex); |
1449 | list_move_tail(list: &vma->vm_link, head: &vma->vm->bound_list); |
1450 | mutex_unlock(lock: &vma->vm->mutex); |
1451 | } |
1452 | |
1453 | static int exercise_mock(struct drm_i915_private *i915, |
1454 | int (*func)(struct i915_address_space *vm, |
1455 | u64 hole_start, u64 hole_end, |
1456 | unsigned long end_time)) |
1457 | { |
1458 | const u64 limit = totalram_pages() << PAGE_SHIFT; |
1459 | struct i915_address_space *vm; |
1460 | struct i915_gem_context *ctx; |
1461 | IGT_TIMEOUT(end_time); |
1462 | int err; |
1463 | |
1464 | ctx = mock_context(i915, name: "mock" ); |
1465 | if (!ctx) |
1466 | return -ENOMEM; |
1467 | |
1468 | vm = i915_gem_context_get_eb_vm(ctx); |
1469 | err = func(vm, 0, min(vm->total, limit), end_time); |
1470 | i915_vm_put(vm); |
1471 | |
1472 | mock_context_close(ctx); |
1473 | return err; |
1474 | } |
1475 | |
1476 | static int igt_mock_fill(void *arg) |
1477 | { |
1478 | struct i915_ggtt *ggtt = arg; |
1479 | |
1480 | return exercise_mock(i915: ggtt->vm.i915, func: fill_hole); |
1481 | } |
1482 | |
1483 | static int igt_mock_walk(void *arg) |
1484 | { |
1485 | struct i915_ggtt *ggtt = arg; |
1486 | |
1487 | return exercise_mock(i915: ggtt->vm.i915, func: walk_hole); |
1488 | } |
1489 | |
1490 | static int igt_mock_pot(void *arg) |
1491 | { |
1492 | struct i915_ggtt *ggtt = arg; |
1493 | |
1494 | return exercise_mock(i915: ggtt->vm.i915, func: pot_hole); |
1495 | } |
1496 | |
1497 | static int igt_mock_drunk(void *arg) |
1498 | { |
1499 | struct i915_ggtt *ggtt = arg; |
1500 | |
1501 | return exercise_mock(i915: ggtt->vm.i915, func: drunk_hole); |
1502 | } |
1503 | |
1504 | static int reserve_gtt_with_resource(struct i915_vma *vma, u64 offset) |
1505 | { |
1506 | struct i915_address_space *vm = vma->vm; |
1507 | struct i915_vma_resource *vma_res; |
1508 | struct drm_i915_gem_object *obj = vma->obj; |
1509 | int err; |
1510 | |
1511 | vma_res = i915_vma_resource_alloc(); |
1512 | if (IS_ERR(ptr: vma_res)) |
1513 | return PTR_ERR(ptr: vma_res); |
1514 | |
1515 | mutex_lock(&vm->mutex); |
1516 | err = i915_gem_gtt_reserve(vm, NULL, node: &vma->node, size: obj->base.size, |
1517 | offset, |
1518 | color: obj->pat_index, |
1519 | flags: 0); |
1520 | if (!err) { |
1521 | i915_vma_resource_init_from_vma(vma_res, vma); |
1522 | vma->resource = vma_res; |
1523 | } else { |
1524 | kfree(objp: vma_res); |
1525 | } |
1526 | mutex_unlock(lock: &vm->mutex); |
1527 | |
1528 | return err; |
1529 | } |
1530 | |
1531 | static int igt_gtt_reserve(void *arg) |
1532 | { |
1533 | struct i915_ggtt *ggtt = arg; |
1534 | struct drm_i915_gem_object *obj, *on; |
1535 | I915_RND_STATE(prng); |
1536 | LIST_HEAD(objects); |
1537 | u64 total; |
1538 | int err = -ENODEV; |
1539 | |
1540 | /* i915_gem_gtt_reserve() tries to reserve the precise range |
1541 | * for the node, and evicts if it has to. So our test checks that |
1542 | * it can give us the requsted space and prevent overlaps. |
1543 | */ |
1544 | |
1545 | /* Start by filling the GGTT */ |
1546 | for (total = 0; |
1547 | total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total; |
1548 | total += 2 * I915_GTT_PAGE_SIZE) { |
1549 | struct i915_vma *vma; |
1550 | |
1551 | obj = i915_gem_object_create_internal(i915: ggtt->vm.i915, |
1552 | size: 2 * PAGE_SIZE); |
1553 | if (IS_ERR(ptr: obj)) { |
1554 | err = PTR_ERR(ptr: obj); |
1555 | goto out; |
1556 | } |
1557 | |
1558 | err = i915_gem_object_pin_pages_unlocked(obj); |
1559 | if (err) { |
1560 | i915_gem_object_put(obj); |
1561 | goto out; |
1562 | } |
1563 | |
1564 | list_add(new: &obj->st_link, head: &objects); |
1565 | vma = i915_vma_instance(obj, vm: &ggtt->vm, NULL); |
1566 | if (IS_ERR(ptr: vma)) { |
1567 | err = PTR_ERR(ptr: vma); |
1568 | goto out; |
1569 | } |
1570 | |
1571 | err = reserve_gtt_with_resource(vma, offset: total); |
1572 | if (err) { |
1573 | pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n" , |
1574 | total, ggtt->vm.total, err); |
1575 | goto out; |
1576 | } |
1577 | track_vma_bind(vma); |
1578 | |
1579 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
1580 | if (vma->node.start != total || |
1581 | vma->node.size != 2*I915_GTT_PAGE_SIZE) { |
1582 | pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n" , |
1583 | vma->node.start, vma->node.size, |
1584 | total, 2*I915_GTT_PAGE_SIZE); |
1585 | err = -EINVAL; |
1586 | goto out; |
1587 | } |
1588 | } |
1589 | |
1590 | /* Now we start forcing evictions */ |
1591 | for (total = I915_GTT_PAGE_SIZE; |
1592 | total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total; |
1593 | total += 2 * I915_GTT_PAGE_SIZE) { |
1594 | struct i915_vma *vma; |
1595 | |
1596 | obj = i915_gem_object_create_internal(i915: ggtt->vm.i915, |
1597 | size: 2 * PAGE_SIZE); |
1598 | if (IS_ERR(ptr: obj)) { |
1599 | err = PTR_ERR(ptr: obj); |
1600 | goto out; |
1601 | } |
1602 | |
1603 | err = i915_gem_object_pin_pages_unlocked(obj); |
1604 | if (err) { |
1605 | i915_gem_object_put(obj); |
1606 | goto out; |
1607 | } |
1608 | |
1609 | list_add(new: &obj->st_link, head: &objects); |
1610 | |
1611 | vma = i915_vma_instance(obj, vm: &ggtt->vm, NULL); |
1612 | if (IS_ERR(ptr: vma)) { |
1613 | err = PTR_ERR(ptr: vma); |
1614 | goto out; |
1615 | } |
1616 | |
1617 | err = reserve_gtt_with_resource(vma, offset: total); |
1618 | if (err) { |
1619 | pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n" , |
1620 | total, ggtt->vm.total, err); |
1621 | goto out; |
1622 | } |
1623 | track_vma_bind(vma); |
1624 | |
1625 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
1626 | if (vma->node.start != total || |
1627 | vma->node.size != 2*I915_GTT_PAGE_SIZE) { |
1628 | pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n" , |
1629 | vma->node.start, vma->node.size, |
1630 | total, 2*I915_GTT_PAGE_SIZE); |
1631 | err = -EINVAL; |
1632 | goto out; |
1633 | } |
1634 | } |
1635 | |
1636 | /* And then try at random */ |
1637 | list_for_each_entry_safe(obj, on, &objects, st_link) { |
1638 | struct i915_vma *vma; |
1639 | u64 offset; |
1640 | |
1641 | vma = i915_vma_instance(obj, vm: &ggtt->vm, NULL); |
1642 | if (IS_ERR(ptr: vma)) { |
1643 | err = PTR_ERR(ptr: vma); |
1644 | goto out; |
1645 | } |
1646 | |
1647 | err = i915_vma_unbind_unlocked(vma); |
1648 | if (err) { |
1649 | pr_err("i915_vma_unbind failed with err=%d!\n" , err); |
1650 | goto out; |
1651 | } |
1652 | |
1653 | offset = igt_random_offset(state: &prng, |
1654 | start: 0, end: ggtt->vm.total, |
1655 | len: 2 * I915_GTT_PAGE_SIZE, |
1656 | I915_GTT_MIN_ALIGNMENT); |
1657 | |
1658 | err = reserve_gtt_with_resource(vma, offset); |
1659 | if (err) { |
1660 | pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n" , |
1661 | total, ggtt->vm.total, err); |
1662 | goto out; |
1663 | } |
1664 | track_vma_bind(vma); |
1665 | |
1666 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
1667 | if (vma->node.start != offset || |
1668 | vma->node.size != 2*I915_GTT_PAGE_SIZE) { |
1669 | pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n" , |
1670 | vma->node.start, vma->node.size, |
1671 | offset, 2*I915_GTT_PAGE_SIZE); |
1672 | err = -EINVAL; |
1673 | goto out; |
1674 | } |
1675 | } |
1676 | |
1677 | out: |
1678 | list_for_each_entry_safe(obj, on, &objects, st_link) { |
1679 | i915_gem_object_unpin_pages(obj); |
1680 | i915_gem_object_put(obj); |
1681 | } |
1682 | return err; |
1683 | } |
1684 | |
1685 | static int insert_gtt_with_resource(struct i915_vma *vma) |
1686 | { |
1687 | struct i915_address_space *vm = vma->vm; |
1688 | struct i915_vma_resource *vma_res; |
1689 | struct drm_i915_gem_object *obj = vma->obj; |
1690 | int err; |
1691 | |
1692 | vma_res = i915_vma_resource_alloc(); |
1693 | if (IS_ERR(ptr: vma_res)) |
1694 | return PTR_ERR(ptr: vma_res); |
1695 | |
1696 | mutex_lock(&vm->mutex); |
1697 | err = i915_gem_gtt_insert(vm, NULL, node: &vma->node, size: obj->base.size, alignment: 0, |
1698 | color: obj->pat_index, start: 0, end: vm->total, flags: 0); |
1699 | if (!err) { |
1700 | i915_vma_resource_init_from_vma(vma_res, vma); |
1701 | vma->resource = vma_res; |
1702 | } else { |
1703 | kfree(objp: vma_res); |
1704 | } |
1705 | mutex_unlock(lock: &vm->mutex); |
1706 | |
1707 | return err; |
1708 | } |
1709 | |
1710 | static int igt_gtt_insert(void *arg) |
1711 | { |
1712 | struct i915_ggtt *ggtt = arg; |
1713 | struct drm_i915_gem_object *obj, *on; |
1714 | struct drm_mm_node tmp = {}; |
1715 | const struct invalid_insert { |
1716 | u64 size; |
1717 | u64 alignment; |
1718 | u64 start, end; |
1719 | } invalid_insert[] = { |
1720 | { |
1721 | ggtt->vm.total + I915_GTT_PAGE_SIZE, 0, |
1722 | 0, ggtt->vm.total, |
1723 | }, |
1724 | { |
1725 | 2*I915_GTT_PAGE_SIZE, 0, |
1726 | 0, I915_GTT_PAGE_SIZE, |
1727 | }, |
1728 | { |
1729 | -(u64)I915_GTT_PAGE_SIZE, 0, |
1730 | 0, 4*I915_GTT_PAGE_SIZE, |
1731 | }, |
1732 | { |
1733 | -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE, |
1734 | 0, 4*I915_GTT_PAGE_SIZE, |
1735 | }, |
1736 | { |
1737 | I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1, |
1738 | I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1, |
1739 | }, |
1740 | {} |
1741 | }, *ii; |
1742 | LIST_HEAD(objects); |
1743 | u64 total; |
1744 | int err = -ENODEV; |
1745 | |
1746 | /* i915_gem_gtt_insert() tries to allocate some free space in the GTT |
1747 | * to the node, evicting if required. |
1748 | */ |
1749 | |
1750 | /* Check a couple of obviously invalid requests */ |
1751 | for (ii = invalid_insert; ii->size; ii++) { |
1752 | mutex_lock(&ggtt->vm.mutex); |
1753 | err = i915_gem_gtt_insert(vm: &ggtt->vm, NULL, node: &tmp, |
1754 | size: ii->size, alignment: ii->alignment, |
1755 | I915_COLOR_UNEVICTABLE, |
1756 | start: ii->start, end: ii->end, |
1757 | flags: 0); |
1758 | mutex_unlock(lock: &ggtt->vm.mutex); |
1759 | if (err != -ENOSPC) { |
1760 | pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n" , |
1761 | ii->size, ii->alignment, ii->start, ii->end, |
1762 | err); |
1763 | return -EINVAL; |
1764 | } |
1765 | } |
1766 | |
1767 | /* Start by filling the GGTT */ |
1768 | for (total = 0; |
1769 | total + I915_GTT_PAGE_SIZE <= ggtt->vm.total; |
1770 | total += I915_GTT_PAGE_SIZE) { |
1771 | struct i915_vma *vma; |
1772 | |
1773 | obj = i915_gem_object_create_internal(i915: ggtt->vm.i915, |
1774 | I915_GTT_PAGE_SIZE); |
1775 | if (IS_ERR(ptr: obj)) { |
1776 | err = PTR_ERR(ptr: obj); |
1777 | goto out; |
1778 | } |
1779 | |
1780 | err = i915_gem_object_pin_pages_unlocked(obj); |
1781 | if (err) { |
1782 | i915_gem_object_put(obj); |
1783 | goto out; |
1784 | } |
1785 | |
1786 | list_add(new: &obj->st_link, head: &objects); |
1787 | |
1788 | vma = i915_vma_instance(obj, vm: &ggtt->vm, NULL); |
1789 | if (IS_ERR(ptr: vma)) { |
1790 | err = PTR_ERR(ptr: vma); |
1791 | goto out; |
1792 | } |
1793 | |
1794 | err = insert_gtt_with_resource(vma); |
1795 | if (err == -ENOSPC) { |
1796 | /* maxed out the GGTT space */ |
1797 | i915_gem_object_put(obj); |
1798 | break; |
1799 | } |
1800 | if (err) { |
1801 | pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n" , |
1802 | total, ggtt->vm.total, err); |
1803 | goto out; |
1804 | } |
1805 | track_vma_bind(vma); |
1806 | __i915_vma_pin(vma); |
1807 | |
1808 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
1809 | } |
1810 | |
1811 | list_for_each_entry(obj, &objects, st_link) { |
1812 | struct i915_vma *vma; |
1813 | |
1814 | vma = i915_vma_instance(obj, vm: &ggtt->vm, NULL); |
1815 | if (IS_ERR(ptr: vma)) { |
1816 | err = PTR_ERR(ptr: vma); |
1817 | goto out; |
1818 | } |
1819 | |
1820 | if (!drm_mm_node_allocated(node: &vma->node)) { |
1821 | pr_err("VMA was unexpectedly evicted!\n" ); |
1822 | err = -EINVAL; |
1823 | goto out; |
1824 | } |
1825 | |
1826 | __i915_vma_unpin(vma); |
1827 | } |
1828 | |
1829 | /* If we then reinsert, we should find the same hole */ |
1830 | list_for_each_entry_safe(obj, on, &objects, st_link) { |
1831 | struct i915_vma *vma; |
1832 | u64 offset; |
1833 | |
1834 | vma = i915_vma_instance(obj, vm: &ggtt->vm, NULL); |
1835 | if (IS_ERR(ptr: vma)) { |
1836 | err = PTR_ERR(ptr: vma); |
1837 | goto out; |
1838 | } |
1839 | |
1840 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
1841 | offset = vma->node.start; |
1842 | |
1843 | err = i915_vma_unbind_unlocked(vma); |
1844 | if (err) { |
1845 | pr_err("i915_vma_unbind failed with err=%d!\n" , err); |
1846 | goto out; |
1847 | } |
1848 | |
1849 | err = insert_gtt_with_resource(vma); |
1850 | if (err) { |
1851 | pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n" , |
1852 | total, ggtt->vm.total, err); |
1853 | goto out; |
1854 | } |
1855 | track_vma_bind(vma); |
1856 | |
1857 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
1858 | if (vma->node.start != offset) { |
1859 | pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n" , |
1860 | offset, vma->node.start); |
1861 | err = -EINVAL; |
1862 | goto out; |
1863 | } |
1864 | } |
1865 | |
1866 | /* And then force evictions */ |
1867 | for (total = 0; |
1868 | total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total; |
1869 | total += 2 * I915_GTT_PAGE_SIZE) { |
1870 | struct i915_vma *vma; |
1871 | |
1872 | obj = i915_gem_object_create_internal(i915: ggtt->vm.i915, |
1873 | size: 2 * I915_GTT_PAGE_SIZE); |
1874 | if (IS_ERR(ptr: obj)) { |
1875 | err = PTR_ERR(ptr: obj); |
1876 | goto out; |
1877 | } |
1878 | |
1879 | err = i915_gem_object_pin_pages_unlocked(obj); |
1880 | if (err) { |
1881 | i915_gem_object_put(obj); |
1882 | goto out; |
1883 | } |
1884 | |
1885 | list_add(new: &obj->st_link, head: &objects); |
1886 | |
1887 | vma = i915_vma_instance(obj, vm: &ggtt->vm, NULL); |
1888 | if (IS_ERR(ptr: vma)) { |
1889 | err = PTR_ERR(ptr: vma); |
1890 | goto out; |
1891 | } |
1892 | |
1893 | err = insert_gtt_with_resource(vma); |
1894 | if (err) { |
1895 | pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n" , |
1896 | total, ggtt->vm.total, err); |
1897 | goto out; |
1898 | } |
1899 | track_vma_bind(vma); |
1900 | |
1901 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
1902 | } |
1903 | |
1904 | out: |
1905 | list_for_each_entry_safe(obj, on, &objects, st_link) { |
1906 | i915_gem_object_unpin_pages(obj); |
1907 | i915_gem_object_put(obj); |
1908 | } |
1909 | return err; |
1910 | } |
1911 | |
1912 | int i915_gem_gtt_mock_selftests(void) |
1913 | { |
1914 | static const struct i915_subtest tests[] = { |
1915 | SUBTEST(igt_mock_drunk), |
1916 | SUBTEST(igt_mock_walk), |
1917 | SUBTEST(igt_mock_pot), |
1918 | SUBTEST(igt_mock_fill), |
1919 | SUBTEST(igt_gtt_reserve), |
1920 | SUBTEST(igt_gtt_insert), |
1921 | }; |
1922 | struct drm_i915_private *i915; |
1923 | struct intel_gt *gt; |
1924 | int err; |
1925 | |
1926 | i915 = mock_gem_device(); |
1927 | if (!i915) |
1928 | return -ENOMEM; |
1929 | |
1930 | /* allocate the ggtt */ |
1931 | err = intel_gt_assign_ggtt(gt: to_gt(i915)); |
1932 | if (err) |
1933 | goto out_put; |
1934 | |
1935 | gt = to_gt(i915); |
1936 | |
1937 | mock_init_ggtt(gt); |
1938 | |
1939 | err = i915_subtests(tests, gt->ggtt); |
1940 | |
1941 | mock_device_flush(i915); |
1942 | i915_gem_drain_freed_objects(i915); |
1943 | mock_fini_ggtt(ggtt: gt->ggtt); |
1944 | |
1945 | out_put: |
1946 | mock_destroy_device(i915); |
1947 | return err; |
1948 | } |
1949 | |
1950 | int i915_gem_gtt_live_selftests(struct drm_i915_private *i915) |
1951 | { |
1952 | static const struct i915_subtest tests[] = { |
1953 | SUBTEST(igt_ppgtt_alloc), |
1954 | SUBTEST(igt_ppgtt_lowlevel), |
1955 | SUBTEST(igt_ppgtt_drunk), |
1956 | SUBTEST(igt_ppgtt_walk), |
1957 | SUBTEST(igt_ppgtt_pot), |
1958 | SUBTEST(igt_ppgtt_fill), |
1959 | SUBTEST(igt_ppgtt_shrink), |
1960 | SUBTEST(igt_ppgtt_shrink_boom), |
1961 | SUBTEST(igt_ppgtt_misaligned_pin), |
1962 | SUBTEST(igt_ggtt_lowlevel), |
1963 | SUBTEST(igt_ggtt_drunk), |
1964 | SUBTEST(igt_ggtt_walk), |
1965 | SUBTEST(igt_ggtt_pot), |
1966 | SUBTEST(igt_ggtt_fill), |
1967 | SUBTEST(igt_ggtt_page), |
1968 | SUBTEST(igt_ggtt_misaligned_pin), |
1969 | }; |
1970 | |
1971 | GEM_BUG_ON(offset_in_page(to_gt(i915)->ggtt->vm.total)); |
1972 | |
1973 | return i915_live_subtests(tests, i915); |
1974 | } |
1975 | |