1 | /* |
2 | * SPDX-License-Identifier: MIT |
3 | * |
4 | * Copyright © 2016 Intel Corporation |
5 | */ |
6 | |
7 | #include <linux/highmem.h> |
8 | #include <linux/prime_numbers.h> |
9 | |
10 | #include "gem/i915_gem_internal.h" |
11 | #include "gem/i915_gem_lmem.h" |
12 | #include "gem/i915_gem_region.h" |
13 | #include "gem/i915_gem_ttm.h" |
14 | #include "gem/i915_gem_ttm_move.h" |
15 | #include "gt/intel_engine_pm.h" |
16 | #include "gt/intel_gpu_commands.h" |
17 | #include "gt/intel_gt.h" |
18 | #include "gt/intel_gt_pm.h" |
19 | #include "gt/intel_migrate.h" |
20 | #include "i915_reg.h" |
21 | #include "i915_ttm_buddy_manager.h" |
22 | |
23 | #include "huge_gem_object.h" |
24 | #include "i915_selftest.h" |
25 | #include "selftests/i915_random.h" |
26 | #include "selftests/igt_flush_test.h" |
27 | #include "selftests/igt_reset.h" |
28 | #include "selftests/igt_mmap.h" |
29 | |
30 | struct tile { |
31 | unsigned int width; |
32 | unsigned int height; |
33 | unsigned int stride; |
34 | unsigned int size; |
35 | unsigned int tiling; |
36 | unsigned int swizzle; |
37 | }; |
38 | |
39 | static u64 swizzle_bit(unsigned int bit, u64 offset) |
40 | { |
41 | return (offset & BIT_ULL(bit)) >> (bit - 6); |
42 | } |
43 | |
44 | static u64 tiled_offset(const struct tile *tile, u64 v) |
45 | { |
46 | u64 x, y; |
47 | |
48 | if (tile->tiling == I915_TILING_NONE) |
49 | return v; |
50 | |
51 | y = div64_u64_rem(dividend: v, divisor: tile->stride, remainder: &x); |
52 | v = div64_u64_rem(dividend: y, divisor: tile->height, remainder: &y) * tile->stride * tile->height; |
53 | |
54 | if (tile->tiling == I915_TILING_X) { |
55 | v += y * tile->width; |
56 | v += div64_u64_rem(dividend: x, divisor: tile->width, remainder: &x) << tile->size; |
57 | v += x; |
58 | } else if (tile->width == 128) { |
59 | const unsigned int ytile_span = 16; |
60 | const unsigned int ytile_height = 512; |
61 | |
62 | v += y * ytile_span; |
63 | v += div64_u64_rem(dividend: x, divisor: ytile_span, remainder: &x) * ytile_height; |
64 | v += x; |
65 | } else { |
66 | const unsigned int ytile_span = 32; |
67 | const unsigned int ytile_height = 256; |
68 | |
69 | v += y * ytile_span; |
70 | v += div64_u64_rem(dividend: x, divisor: ytile_span, remainder: &x) * ytile_height; |
71 | v += x; |
72 | } |
73 | |
74 | switch (tile->swizzle) { |
75 | case I915_BIT_6_SWIZZLE_9: |
76 | v ^= swizzle_bit(bit: 9, offset: v); |
77 | break; |
78 | case I915_BIT_6_SWIZZLE_9_10: |
79 | v ^= swizzle_bit(bit: 9, offset: v) ^ swizzle_bit(bit: 10, offset: v); |
80 | break; |
81 | case I915_BIT_6_SWIZZLE_9_11: |
82 | v ^= swizzle_bit(bit: 9, offset: v) ^ swizzle_bit(bit: 11, offset: v); |
83 | break; |
84 | case I915_BIT_6_SWIZZLE_9_10_11: |
85 | v ^= swizzle_bit(bit: 9, offset: v) ^ swizzle_bit(bit: 10, offset: v) ^ swizzle_bit(bit: 11, offset: v); |
86 | break; |
87 | } |
88 | |
89 | return v; |
90 | } |
91 | |
92 | static int check_partial_mapping(struct drm_i915_gem_object *obj, |
93 | const struct tile *tile, |
94 | struct rnd_state *prng) |
95 | { |
96 | const unsigned long npages = obj->base.size / PAGE_SIZE; |
97 | struct drm_i915_private *i915 = to_i915(dev: obj->base.dev); |
98 | struct i915_gtt_view view; |
99 | struct i915_vma *vma; |
100 | unsigned long offset; |
101 | unsigned long page; |
102 | u32 __iomem *io; |
103 | struct page *p; |
104 | unsigned int n; |
105 | u32 *cpu; |
106 | int err; |
107 | |
108 | err = i915_gem_object_set_tiling(obj, tiling: tile->tiling, stride: tile->stride); |
109 | if (err) { |
110 | pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n" , |
111 | tile->tiling, tile->stride, err); |
112 | return err; |
113 | } |
114 | |
115 | GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling); |
116 | GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride); |
117 | |
118 | i915_gem_object_lock(obj, NULL); |
119 | err = i915_gem_object_set_to_gtt_domain(obj, write: true); |
120 | i915_gem_object_unlock(obj); |
121 | if (err) { |
122 | pr_err("Failed to flush to GTT write domain; err=%d\n" , err); |
123 | return err; |
124 | } |
125 | |
126 | page = i915_prandom_u32_max_state(ep_ro: npages, state: prng); |
127 | view = compute_partial_view(obj, page_offset: page, MIN_CHUNK_PAGES); |
128 | |
129 | vma = i915_gem_object_ggtt_pin(obj, view: &view, size: 0, alignment: 0, PIN_MAPPABLE); |
130 | if (IS_ERR(ptr: vma)) { |
131 | pr_err("Failed to pin partial view: offset=%lu; err=%d\n" , |
132 | page, (int)PTR_ERR(vma)); |
133 | return PTR_ERR(ptr: vma); |
134 | } |
135 | |
136 | n = page - view.partial.offset; |
137 | GEM_BUG_ON(n >= view.partial.size); |
138 | |
139 | io = i915_vma_pin_iomap(vma); |
140 | i915_vma_unpin(vma); |
141 | if (IS_ERR(ptr: io)) { |
142 | pr_err("Failed to iomap partial view: offset=%lu; err=%d\n" , |
143 | page, (int)PTR_ERR(io)); |
144 | err = PTR_ERR(ptr: io); |
145 | goto out; |
146 | } |
147 | |
148 | iowrite32(page, io + n * PAGE_SIZE / sizeof(*io)); |
149 | i915_vma_unpin_iomap(vma); |
150 | |
151 | offset = tiled_offset(tile, v: page << PAGE_SHIFT); |
152 | if (offset >= obj->base.size) |
153 | goto out; |
154 | |
155 | intel_gt_flush_ggtt_writes(gt: to_gt(i915)); |
156 | |
157 | p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); |
158 | cpu = kmap(page: p) + offset_in_page(offset); |
159 | drm_clflush_virt_range(addr: cpu, length: sizeof(*cpu)); |
160 | if (*cpu != (u32)page) { |
161 | pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%lu + %u [0x%lx]) of 0x%x, found 0x%x\n" , |
162 | page, n, |
163 | view.partial.offset, |
164 | view.partial.size, |
165 | vma->size >> PAGE_SHIFT, |
166 | tile->tiling ? tile_row_pages(obj) : 0, |
167 | vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride, |
168 | offset >> PAGE_SHIFT, |
169 | (unsigned int)offset_in_page(offset), |
170 | offset, |
171 | (u32)page, *cpu); |
172 | err = -EINVAL; |
173 | } |
174 | *cpu = 0; |
175 | drm_clflush_virt_range(addr: cpu, length: sizeof(*cpu)); |
176 | kunmap(page: p); |
177 | |
178 | out: |
179 | i915_gem_object_lock(obj, NULL); |
180 | i915_vma_destroy(vma); |
181 | i915_gem_object_unlock(obj); |
182 | return err; |
183 | } |
184 | |
185 | static int check_partial_mappings(struct drm_i915_gem_object *obj, |
186 | const struct tile *tile, |
187 | unsigned long end_time) |
188 | { |
189 | const unsigned int nreal = obj->scratch / PAGE_SIZE; |
190 | const unsigned long npages = obj->base.size / PAGE_SIZE; |
191 | struct drm_i915_private *i915 = to_i915(dev: obj->base.dev); |
192 | struct i915_vma *vma; |
193 | unsigned long page; |
194 | int err; |
195 | |
196 | err = i915_gem_object_set_tiling(obj, tiling: tile->tiling, stride: tile->stride); |
197 | if (err) { |
198 | pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n" , |
199 | tile->tiling, tile->stride, err); |
200 | return err; |
201 | } |
202 | |
203 | GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling); |
204 | GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride); |
205 | |
206 | i915_gem_object_lock(obj, NULL); |
207 | err = i915_gem_object_set_to_gtt_domain(obj, write: true); |
208 | i915_gem_object_unlock(obj); |
209 | if (err) { |
210 | pr_err("Failed to flush to GTT write domain; err=%d\n" , err); |
211 | return err; |
212 | } |
213 | |
214 | for_each_prime_number_from(page, 1, npages) { |
215 | struct i915_gtt_view view = |
216 | compute_partial_view(obj, page_offset: page, MIN_CHUNK_PAGES); |
217 | unsigned long offset; |
218 | u32 __iomem *io; |
219 | struct page *p; |
220 | unsigned int n; |
221 | u32 *cpu; |
222 | |
223 | GEM_BUG_ON(view.partial.size > nreal); |
224 | cond_resched(); |
225 | |
226 | vma = i915_gem_object_ggtt_pin(obj, view: &view, size: 0, alignment: 0, PIN_MAPPABLE); |
227 | if (IS_ERR(ptr: vma)) { |
228 | pr_err("Failed to pin partial view: offset=%lu; err=%d\n" , |
229 | page, (int)PTR_ERR(vma)); |
230 | return PTR_ERR(ptr: vma); |
231 | } |
232 | |
233 | n = page - view.partial.offset; |
234 | GEM_BUG_ON(n >= view.partial.size); |
235 | |
236 | io = i915_vma_pin_iomap(vma); |
237 | i915_vma_unpin(vma); |
238 | if (IS_ERR(ptr: io)) { |
239 | pr_err("Failed to iomap partial view: offset=%lu; err=%d\n" , |
240 | page, (int)PTR_ERR(io)); |
241 | return PTR_ERR(ptr: io); |
242 | } |
243 | |
244 | iowrite32(page, io + n * PAGE_SIZE / sizeof(*io)); |
245 | i915_vma_unpin_iomap(vma); |
246 | |
247 | offset = tiled_offset(tile, v: page << PAGE_SHIFT); |
248 | if (offset >= obj->base.size) |
249 | continue; |
250 | |
251 | intel_gt_flush_ggtt_writes(gt: to_gt(i915)); |
252 | |
253 | p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); |
254 | cpu = kmap(page: p) + offset_in_page(offset); |
255 | drm_clflush_virt_range(addr: cpu, length: sizeof(*cpu)); |
256 | if (*cpu != (u32)page) { |
257 | pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%lu + %u [0x%lx]) of 0x%x, found 0x%x\n" , |
258 | page, n, |
259 | view.partial.offset, |
260 | view.partial.size, |
261 | vma->size >> PAGE_SHIFT, |
262 | tile->tiling ? tile_row_pages(obj) : 0, |
263 | vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride, |
264 | offset >> PAGE_SHIFT, |
265 | (unsigned int)offset_in_page(offset), |
266 | offset, |
267 | (u32)page, *cpu); |
268 | err = -EINVAL; |
269 | } |
270 | *cpu = 0; |
271 | drm_clflush_virt_range(addr: cpu, length: sizeof(*cpu)); |
272 | kunmap(page: p); |
273 | if (err) |
274 | return err; |
275 | |
276 | i915_gem_object_lock(obj, NULL); |
277 | i915_vma_destroy(vma); |
278 | i915_gem_object_unlock(obj); |
279 | |
280 | if (igt_timeout(end_time, |
281 | "%s: timed out after tiling=%d stride=%d\n" , |
282 | __func__, tile->tiling, tile->stride)) |
283 | return -EINTR; |
284 | } |
285 | |
286 | return 0; |
287 | } |
288 | |
289 | static unsigned int |
290 | setup_tile_size(struct tile *tile, struct drm_i915_private *i915) |
291 | { |
292 | if (GRAPHICS_VER(i915) <= 2) { |
293 | tile->height = 16; |
294 | tile->width = 128; |
295 | tile->size = 11; |
296 | } else if (tile->tiling == I915_TILING_Y && |
297 | HAS_128_BYTE_Y_TILING(i915)) { |
298 | tile->height = 32; |
299 | tile->width = 128; |
300 | tile->size = 12; |
301 | } else { |
302 | tile->height = 8; |
303 | tile->width = 512; |
304 | tile->size = 12; |
305 | } |
306 | |
307 | if (GRAPHICS_VER(i915) < 4) |
308 | return 8192 / tile->width; |
309 | else if (GRAPHICS_VER(i915) < 7) |
310 | return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width; |
311 | else |
312 | return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width; |
313 | } |
314 | |
315 | static int igt_partial_tiling(void *arg) |
316 | { |
317 | const unsigned int nreal = 1 << 12; /* largest tile row x2 */ |
318 | struct drm_i915_private *i915 = arg; |
319 | struct drm_i915_gem_object *obj; |
320 | intel_wakeref_t wakeref; |
321 | int tiling; |
322 | int err; |
323 | |
324 | if (!i915_ggtt_has_aperture(ggtt: to_gt(i915)->ggtt)) |
325 | return 0; |
326 | |
327 | /* We want to check the page mapping and fencing of a large object |
328 | * mmapped through the GTT. The object we create is larger than can |
329 | * possibly be mmaped as a whole, and so we must use partial GGTT vma. |
330 | * We then check that a write through each partial GGTT vma ends up |
331 | * in the right set of pages within the object, and with the expected |
332 | * tiling, which we verify by manual swizzling. |
333 | */ |
334 | |
335 | obj = huge_gem_object(i915, |
336 | phys_size: nreal << PAGE_SHIFT, |
337 | dma_size: (1 + next_prime_number(x: to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); |
338 | if (IS_ERR(ptr: obj)) |
339 | return PTR_ERR(ptr: obj); |
340 | |
341 | err = i915_gem_object_pin_pages_unlocked(obj); |
342 | if (err) { |
343 | pr_err("Failed to allocate %u pages (%lu total), err=%d\n" , |
344 | nreal, obj->base.size / PAGE_SIZE, err); |
345 | goto out; |
346 | } |
347 | |
348 | wakeref = intel_runtime_pm_get(rpm: &i915->runtime_pm); |
349 | |
350 | if (1) { |
351 | IGT_TIMEOUT(end); |
352 | struct tile tile; |
353 | |
354 | tile.height = 1; |
355 | tile.width = 1; |
356 | tile.size = 0; |
357 | tile.stride = 0; |
358 | tile.swizzle = I915_BIT_6_SWIZZLE_NONE; |
359 | tile.tiling = I915_TILING_NONE; |
360 | |
361 | err = check_partial_mappings(obj, tile: &tile, end_time: end); |
362 | if (err && err != -EINTR) |
363 | goto out_unlock; |
364 | } |
365 | |
366 | for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) { |
367 | IGT_TIMEOUT(end); |
368 | unsigned int max_pitch; |
369 | unsigned int pitch; |
370 | struct tile tile; |
371 | |
372 | if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) |
373 | /* |
374 | * The swizzling pattern is actually unknown as it |
375 | * varies based on physical address of each page. |
376 | * See i915_gem_detect_bit_6_swizzle(). |
377 | */ |
378 | break; |
379 | |
380 | tile.tiling = tiling; |
381 | switch (tiling) { |
382 | case I915_TILING_X: |
383 | tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x; |
384 | break; |
385 | case I915_TILING_Y: |
386 | tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y; |
387 | break; |
388 | } |
389 | |
390 | GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN); |
391 | if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 || |
392 | tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) |
393 | continue; |
394 | |
395 | max_pitch = setup_tile_size(tile: &tile, i915); |
396 | |
397 | for (pitch = max_pitch; pitch; pitch >>= 1) { |
398 | tile.stride = tile.width * pitch; |
399 | err = check_partial_mappings(obj, tile: &tile, end_time: end); |
400 | if (err == -EINTR) |
401 | goto next_tiling; |
402 | if (err) |
403 | goto out_unlock; |
404 | |
405 | if (pitch > 2 && GRAPHICS_VER(i915) >= 4) { |
406 | tile.stride = tile.width * (pitch - 1); |
407 | err = check_partial_mappings(obj, tile: &tile, end_time: end); |
408 | if (err == -EINTR) |
409 | goto next_tiling; |
410 | if (err) |
411 | goto out_unlock; |
412 | } |
413 | |
414 | if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) { |
415 | tile.stride = tile.width * (pitch + 1); |
416 | err = check_partial_mappings(obj, tile: &tile, end_time: end); |
417 | if (err == -EINTR) |
418 | goto next_tiling; |
419 | if (err) |
420 | goto out_unlock; |
421 | } |
422 | } |
423 | |
424 | if (GRAPHICS_VER(i915) >= 4) { |
425 | for_each_prime_number(pitch, max_pitch) { |
426 | tile.stride = tile.width * pitch; |
427 | err = check_partial_mappings(obj, tile: &tile, end_time: end); |
428 | if (err == -EINTR) |
429 | goto next_tiling; |
430 | if (err) |
431 | goto out_unlock; |
432 | } |
433 | } |
434 | |
435 | next_tiling: ; |
436 | } |
437 | |
438 | out_unlock: |
439 | intel_runtime_pm_put(rpm: &i915->runtime_pm, wref: wakeref); |
440 | i915_gem_object_unpin_pages(obj); |
441 | out: |
442 | i915_gem_object_put(obj); |
443 | return err; |
444 | } |
445 | |
446 | static int igt_smoke_tiling(void *arg) |
447 | { |
448 | const unsigned int nreal = 1 << 12; /* largest tile row x2 */ |
449 | struct drm_i915_private *i915 = arg; |
450 | struct drm_i915_gem_object *obj; |
451 | intel_wakeref_t wakeref; |
452 | I915_RND_STATE(prng); |
453 | unsigned long count; |
454 | IGT_TIMEOUT(end); |
455 | int err; |
456 | |
457 | if (!i915_ggtt_has_aperture(ggtt: to_gt(i915)->ggtt)) |
458 | return 0; |
459 | |
460 | /* |
461 | * igt_partial_tiling() does an exhastive check of partial tiling |
462 | * chunking, but will undoubtably run out of time. Here, we do a |
463 | * randomised search and hope over many runs of 1s with different |
464 | * seeds we will do a thorough check. |
465 | * |
466 | * Remember to look at the st_seed if we see a flip-flop in BAT! |
467 | */ |
468 | |
469 | if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) |
470 | return 0; |
471 | |
472 | obj = huge_gem_object(i915, |
473 | phys_size: nreal << PAGE_SHIFT, |
474 | dma_size: (1 + next_prime_number(x: to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); |
475 | if (IS_ERR(ptr: obj)) |
476 | return PTR_ERR(ptr: obj); |
477 | |
478 | err = i915_gem_object_pin_pages_unlocked(obj); |
479 | if (err) { |
480 | pr_err("Failed to allocate %u pages (%lu total), err=%d\n" , |
481 | nreal, obj->base.size / PAGE_SIZE, err); |
482 | goto out; |
483 | } |
484 | |
485 | wakeref = intel_runtime_pm_get(rpm: &i915->runtime_pm); |
486 | |
487 | count = 0; |
488 | do { |
489 | struct tile tile; |
490 | |
491 | tile.tiling = |
492 | i915_prandom_u32_max_state(I915_TILING_Y + 1, state: &prng); |
493 | switch (tile.tiling) { |
494 | case I915_TILING_NONE: |
495 | tile.height = 1; |
496 | tile.width = 1; |
497 | tile.size = 0; |
498 | tile.stride = 0; |
499 | tile.swizzle = I915_BIT_6_SWIZZLE_NONE; |
500 | break; |
501 | |
502 | case I915_TILING_X: |
503 | tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x; |
504 | break; |
505 | case I915_TILING_Y: |
506 | tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y; |
507 | break; |
508 | } |
509 | |
510 | if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 || |
511 | tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) |
512 | continue; |
513 | |
514 | if (tile.tiling != I915_TILING_NONE) { |
515 | unsigned int max_pitch = setup_tile_size(tile: &tile, i915); |
516 | |
517 | tile.stride = |
518 | i915_prandom_u32_max_state(ep_ro: max_pitch, state: &prng); |
519 | tile.stride = (1 + tile.stride) * tile.width; |
520 | if (GRAPHICS_VER(i915) < 4) |
521 | tile.stride = rounddown_pow_of_two(tile.stride); |
522 | } |
523 | |
524 | err = check_partial_mapping(obj, tile: &tile, prng: &prng); |
525 | if (err) |
526 | break; |
527 | |
528 | count++; |
529 | } while (!__igt_timeout(timeout: end, NULL)); |
530 | |
531 | pr_info("%s: Completed %lu trials\n" , __func__, count); |
532 | |
533 | intel_runtime_pm_put(rpm: &i915->runtime_pm, wref: wakeref); |
534 | i915_gem_object_unpin_pages(obj); |
535 | out: |
536 | i915_gem_object_put(obj); |
537 | return err; |
538 | } |
539 | |
540 | static int make_obj_busy(struct drm_i915_gem_object *obj) |
541 | { |
542 | struct drm_i915_private *i915 = to_i915(dev: obj->base.dev); |
543 | struct intel_engine_cs *engine; |
544 | |
545 | for_each_uabi_engine(engine, i915) { |
546 | struct i915_request *rq; |
547 | struct i915_vma *vma; |
548 | struct i915_gem_ww_ctx ww; |
549 | int err; |
550 | |
551 | vma = i915_vma_instance(obj, vm: &engine->gt->ggtt->vm, NULL); |
552 | if (IS_ERR(ptr: vma)) |
553 | return PTR_ERR(ptr: vma); |
554 | |
555 | i915_gem_ww_ctx_init(ctx: &ww, intr: false); |
556 | retry: |
557 | err = i915_gem_object_lock(obj, ww: &ww); |
558 | if (!err) |
559 | err = i915_vma_pin_ww(vma, ww: &ww, size: 0, alignment: 0, PIN_USER); |
560 | if (err) |
561 | goto err; |
562 | |
563 | rq = intel_engine_create_kernel_request(engine); |
564 | if (IS_ERR(ptr: rq)) { |
565 | err = PTR_ERR(ptr: rq); |
566 | goto err_unpin; |
567 | } |
568 | |
569 | err = i915_vma_move_to_active(vma, rq, |
570 | EXEC_OBJECT_WRITE); |
571 | |
572 | i915_request_add(rq); |
573 | err_unpin: |
574 | i915_vma_unpin(vma); |
575 | err: |
576 | if (err == -EDEADLK) { |
577 | err = i915_gem_ww_ctx_backoff(ctx: &ww); |
578 | if (!err) |
579 | goto retry; |
580 | } |
581 | i915_gem_ww_ctx_fini(ctx: &ww); |
582 | if (err) |
583 | return err; |
584 | } |
585 | |
586 | i915_gem_object_put(obj); /* leave it only alive via its active ref */ |
587 | return 0; |
588 | } |
589 | |
590 | static enum i915_mmap_type default_mapping(struct drm_i915_private *i915) |
591 | { |
592 | if (HAS_LMEM(i915)) |
593 | return I915_MMAP_TYPE_FIXED; |
594 | |
595 | return I915_MMAP_TYPE_GTT; |
596 | } |
597 | |
598 | static struct drm_i915_gem_object * |
599 | create_sys_or_internal(struct drm_i915_private *i915, |
600 | unsigned long size) |
601 | { |
602 | if (HAS_LMEM(i915)) { |
603 | struct intel_memory_region *sys_region = |
604 | i915->mm.regions[INTEL_REGION_SMEM]; |
605 | |
606 | return __i915_gem_object_create_user(i915, size, placements: &sys_region, n_placements: 1); |
607 | } |
608 | |
609 | return i915_gem_object_create_internal(i915, size); |
610 | } |
611 | |
612 | static bool assert_mmap_offset(struct drm_i915_private *i915, |
613 | unsigned long size, |
614 | int expected) |
615 | { |
616 | struct drm_i915_gem_object *obj; |
617 | u64 offset; |
618 | int ret; |
619 | |
620 | obj = create_sys_or_internal(i915, size); |
621 | if (IS_ERR(ptr: obj)) |
622 | return expected && expected == PTR_ERR(ptr: obj); |
623 | |
624 | ret = __assign_mmap_offset(obj, mmap_type: default_mapping(i915), offset: &offset, NULL); |
625 | i915_gem_object_put(obj); |
626 | |
627 | return ret == expected; |
628 | } |
629 | |
630 | static void disable_retire_worker(struct drm_i915_private *i915) |
631 | { |
632 | i915_gem_driver_unregister__shrinker(i915); |
633 | intel_gt_pm_get_untracked(gt: to_gt(i915)); |
634 | cancel_delayed_work_sync(dwork: &to_gt(i915)->requests.retire_work); |
635 | } |
636 | |
637 | static void restore_retire_worker(struct drm_i915_private *i915) |
638 | { |
639 | igt_flush_test(i915); |
640 | intel_gt_pm_put_untracked(gt: to_gt(i915)); |
641 | i915_gem_driver_register__shrinker(i915); |
642 | } |
643 | |
644 | static void mmap_offset_lock(struct drm_i915_private *i915) |
645 | __acquires(&i915->drm.vma_offset_manager->vm_lock) |
646 | { |
647 | write_lock(&i915->drm.vma_offset_manager->vm_lock); |
648 | } |
649 | |
650 | static void mmap_offset_unlock(struct drm_i915_private *i915) |
651 | __releases(&i915->drm.vma_offset_manager->vm_lock) |
652 | { |
653 | write_unlock(&i915->drm.vma_offset_manager->vm_lock); |
654 | } |
655 | |
656 | static int igt_mmap_offset_exhaustion(void *arg) |
657 | { |
658 | struct drm_i915_private *i915 = arg; |
659 | struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm; |
660 | struct drm_i915_gem_object *obj; |
661 | struct drm_mm_node *hole, *next; |
662 | int loop, err = 0; |
663 | u64 offset; |
664 | int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC; |
665 | |
666 | /* Disable background reaper */ |
667 | disable_retire_worker(i915); |
668 | GEM_BUG_ON(!to_gt(i915)->awake); |
669 | intel_gt_retire_requests(gt: to_gt(i915)); |
670 | i915_gem_drain_freed_objects(i915); |
671 | |
672 | /* Trim the device mmap space to only a page */ |
673 | mmap_offset_lock(i915); |
674 | loop = 1; /* PAGE_SIZE units */ |
675 | list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) { |
676 | struct drm_mm_node *resv; |
677 | |
678 | resv = kzalloc(size: sizeof(*resv), GFP_NOWAIT); |
679 | if (!resv) { |
680 | err = -ENOMEM; |
681 | goto out_park; |
682 | } |
683 | |
684 | resv->start = drm_mm_hole_node_start(hole_node: hole) + loop; |
685 | resv->size = hole->hole_size - loop; |
686 | resv->color = -1ul; |
687 | loop = 0; |
688 | |
689 | if (!resv->size) { |
690 | kfree(objp: resv); |
691 | continue; |
692 | } |
693 | |
694 | pr_debug("Reserving hole [%llx + %llx]\n" , |
695 | resv->start, resv->size); |
696 | |
697 | err = drm_mm_reserve_node(mm, node: resv); |
698 | if (err) { |
699 | pr_err("Failed to trim VMA manager, err=%d\n" , err); |
700 | kfree(objp: resv); |
701 | goto out_park; |
702 | } |
703 | } |
704 | GEM_BUG_ON(!list_is_singular(&mm->hole_stack)); |
705 | mmap_offset_unlock(i915); |
706 | |
707 | /* Just fits! */ |
708 | if (!assert_mmap_offset(i915, PAGE_SIZE, expected: 0)) { |
709 | pr_err("Unable to insert object into single page hole\n" ); |
710 | err = -EINVAL; |
711 | goto out; |
712 | } |
713 | |
714 | /* Too large */ |
715 | if (!assert_mmap_offset(i915, size: 2 * PAGE_SIZE, expected: enospc)) { |
716 | pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n" ); |
717 | err = -EINVAL; |
718 | goto out; |
719 | } |
720 | |
721 | /* Fill the hole, further allocation attempts should then fail */ |
722 | obj = create_sys_or_internal(i915, PAGE_SIZE); |
723 | if (IS_ERR(ptr: obj)) { |
724 | err = PTR_ERR(ptr: obj); |
725 | pr_err("Unable to create object for reclaimed hole\n" ); |
726 | goto out; |
727 | } |
728 | |
729 | err = __assign_mmap_offset(obj, mmap_type: default_mapping(i915), offset: &offset, NULL); |
730 | if (err) { |
731 | pr_err("Unable to insert object into reclaimed hole\n" ); |
732 | goto err_obj; |
733 | } |
734 | |
735 | if (!assert_mmap_offset(i915, PAGE_SIZE, expected: enospc)) { |
736 | pr_err("Unexpectedly succeeded in inserting object into no holes!\n" ); |
737 | err = -EINVAL; |
738 | goto err_obj; |
739 | } |
740 | |
741 | i915_gem_object_put(obj); |
742 | |
743 | /* Now fill with busy dead objects that we expect to reap */ |
744 | for (loop = 0; loop < 3; loop++) { |
745 | if (intel_gt_is_wedged(gt: to_gt(i915))) |
746 | break; |
747 | |
748 | obj = i915_gem_object_create_internal(i915, PAGE_SIZE); |
749 | if (IS_ERR(ptr: obj)) { |
750 | err = PTR_ERR(ptr: obj); |
751 | goto out; |
752 | } |
753 | |
754 | err = make_obj_busy(obj); |
755 | if (err) { |
756 | pr_err("[loop %d] Failed to busy the object\n" , loop); |
757 | goto err_obj; |
758 | } |
759 | } |
760 | |
761 | out: |
762 | mmap_offset_lock(i915); |
763 | out_park: |
764 | drm_mm_for_each_node_safe(hole, next, mm) { |
765 | if (hole->color != -1ul) |
766 | continue; |
767 | |
768 | drm_mm_remove_node(node: hole); |
769 | kfree(objp: hole); |
770 | } |
771 | mmap_offset_unlock(i915); |
772 | restore_retire_worker(i915); |
773 | return err; |
774 | err_obj: |
775 | i915_gem_object_put(obj); |
776 | goto out; |
777 | } |
778 | |
779 | static int gtt_set(struct drm_i915_gem_object *obj) |
780 | { |
781 | intel_wakeref_t wakeref; |
782 | struct i915_vma *vma; |
783 | void __iomem *map; |
784 | int err = 0; |
785 | |
786 | vma = i915_gem_object_ggtt_pin(obj, NULL, size: 0, alignment: 0, PIN_MAPPABLE); |
787 | if (IS_ERR(ptr: vma)) |
788 | return PTR_ERR(ptr: vma); |
789 | |
790 | wakeref = intel_gt_pm_get(gt: vma->vm->gt); |
791 | map = i915_vma_pin_iomap(vma); |
792 | i915_vma_unpin(vma); |
793 | if (IS_ERR(ptr: map)) { |
794 | err = PTR_ERR(ptr: map); |
795 | goto out; |
796 | } |
797 | |
798 | memset_io(map, POISON_INUSE, obj->base.size); |
799 | i915_vma_unpin_iomap(vma); |
800 | |
801 | out: |
802 | intel_gt_pm_put(gt: vma->vm->gt, handle: wakeref); |
803 | return err; |
804 | } |
805 | |
806 | static int gtt_check(struct drm_i915_gem_object *obj) |
807 | { |
808 | intel_wakeref_t wakeref; |
809 | struct i915_vma *vma; |
810 | void __iomem *map; |
811 | int err = 0; |
812 | |
813 | vma = i915_gem_object_ggtt_pin(obj, NULL, size: 0, alignment: 0, PIN_MAPPABLE); |
814 | if (IS_ERR(ptr: vma)) |
815 | return PTR_ERR(ptr: vma); |
816 | |
817 | wakeref = intel_gt_pm_get(gt: vma->vm->gt); |
818 | map = i915_vma_pin_iomap(vma); |
819 | i915_vma_unpin(vma); |
820 | if (IS_ERR(ptr: map)) { |
821 | err = PTR_ERR(ptr: map); |
822 | goto out; |
823 | } |
824 | |
825 | if (memchr_inv(p: (void __force *)map, POISON_FREE, size: obj->base.size)) { |
826 | pr_err("%s: Write via mmap did not land in backing store (GTT)\n" , |
827 | obj->mm.region->name); |
828 | err = -EINVAL; |
829 | } |
830 | i915_vma_unpin_iomap(vma); |
831 | |
832 | out: |
833 | intel_gt_pm_put(gt: vma->vm->gt, handle: wakeref); |
834 | return err; |
835 | } |
836 | |
837 | static int wc_set(struct drm_i915_gem_object *obj) |
838 | { |
839 | void *vaddr; |
840 | |
841 | vaddr = i915_gem_object_pin_map_unlocked(obj, type: I915_MAP_WC); |
842 | if (IS_ERR(ptr: vaddr)) |
843 | return PTR_ERR(ptr: vaddr); |
844 | |
845 | memset(vaddr, POISON_INUSE, obj->base.size); |
846 | i915_gem_object_flush_map(obj); |
847 | i915_gem_object_unpin_map(obj); |
848 | |
849 | return 0; |
850 | } |
851 | |
852 | static int wc_check(struct drm_i915_gem_object *obj) |
853 | { |
854 | void *vaddr; |
855 | int err = 0; |
856 | |
857 | vaddr = i915_gem_object_pin_map_unlocked(obj, type: I915_MAP_WC); |
858 | if (IS_ERR(ptr: vaddr)) |
859 | return PTR_ERR(ptr: vaddr); |
860 | |
861 | if (memchr_inv(p: vaddr, POISON_FREE, size: obj->base.size)) { |
862 | pr_err("%s: Write via mmap did not land in backing store (WC)\n" , |
863 | obj->mm.region->name); |
864 | err = -EINVAL; |
865 | } |
866 | i915_gem_object_unpin_map(obj); |
867 | |
868 | return err; |
869 | } |
870 | |
871 | static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type) |
872 | { |
873 | struct drm_i915_private *i915 = to_i915(dev: obj->base.dev); |
874 | bool no_map; |
875 | |
876 | if (obj->ops->mmap_offset) |
877 | return type == I915_MMAP_TYPE_FIXED; |
878 | else if (type == I915_MMAP_TYPE_FIXED) |
879 | return false; |
880 | |
881 | if (type == I915_MMAP_TYPE_GTT && |
882 | !i915_ggtt_has_aperture(ggtt: to_gt(i915)->ggtt)) |
883 | return false; |
884 | |
885 | i915_gem_object_lock(obj, NULL); |
886 | no_map = (type != I915_MMAP_TYPE_GTT && |
887 | !i915_gem_object_has_struct_page(obj) && |
888 | !i915_gem_object_has_iomem(obj)); |
889 | i915_gem_object_unlock(obj); |
890 | |
891 | return !no_map; |
892 | } |
893 | |
894 | #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24)) |
895 | static int __igt_mmap(struct drm_i915_private *i915, |
896 | struct drm_i915_gem_object *obj, |
897 | enum i915_mmap_type type) |
898 | { |
899 | struct vm_area_struct *area; |
900 | unsigned long addr; |
901 | int err, i; |
902 | u64 offset; |
903 | |
904 | if (!can_mmap(obj, type)) |
905 | return 0; |
906 | |
907 | err = wc_set(obj); |
908 | if (err == -ENXIO) |
909 | err = gtt_set(obj); |
910 | if (err) |
911 | return err; |
912 | |
913 | err = __assign_mmap_offset(obj, mmap_type: type, offset: &offset, NULL); |
914 | if (err) |
915 | return err; |
916 | |
917 | addr = igt_mmap_offset(i915, offset, size: obj->base.size, PROT_WRITE, MAP_SHARED); |
918 | if (IS_ERR_VALUE(addr)) |
919 | return addr; |
920 | |
921 | pr_debug("igt_mmap(%s, %d) @ %lx\n" , obj->mm.region->name, type, addr); |
922 | |
923 | mmap_read_lock(current->mm); |
924 | area = vma_lookup(current->mm, addr); |
925 | mmap_read_unlock(current->mm); |
926 | if (!area) { |
927 | pr_err("%s: Did not create a vm_area_struct for the mmap\n" , |
928 | obj->mm.region->name); |
929 | err = -EINVAL; |
930 | goto out_unmap; |
931 | } |
932 | |
933 | for (i = 0; i < obj->base.size / sizeof(u32); i++) { |
934 | u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux))); |
935 | u32 x; |
936 | |
937 | if (get_user(x, ux)) { |
938 | pr_err("%s: Unable to read from mmap, offset:%zd\n" , |
939 | obj->mm.region->name, i * sizeof(x)); |
940 | err = -EFAULT; |
941 | goto out_unmap; |
942 | } |
943 | |
944 | if (x != expand32(POISON_INUSE)) { |
945 | pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n" , |
946 | obj->mm.region->name, |
947 | i * sizeof(x), x, expand32(POISON_INUSE)); |
948 | err = -EINVAL; |
949 | goto out_unmap; |
950 | } |
951 | |
952 | x = expand32(POISON_FREE); |
953 | if (put_user(x, ux)) { |
954 | pr_err("%s: Unable to write to mmap, offset:%zd\n" , |
955 | obj->mm.region->name, i * sizeof(x)); |
956 | err = -EFAULT; |
957 | goto out_unmap; |
958 | } |
959 | } |
960 | |
961 | if (type == I915_MMAP_TYPE_GTT) |
962 | intel_gt_flush_ggtt_writes(gt: to_gt(i915)); |
963 | |
964 | err = wc_check(obj); |
965 | if (err == -ENXIO) |
966 | err = gtt_check(obj); |
967 | out_unmap: |
968 | vm_munmap(addr, obj->base.size); |
969 | return err; |
970 | } |
971 | |
972 | static int igt_mmap(void *arg) |
973 | { |
974 | struct drm_i915_private *i915 = arg; |
975 | struct intel_memory_region *mr; |
976 | enum intel_region_id id; |
977 | |
978 | for_each_memory_region(mr, i915, id) { |
979 | unsigned long sizes[] = { |
980 | PAGE_SIZE, |
981 | mr->min_page_size, |
982 | SZ_4M, |
983 | }; |
984 | int i; |
985 | |
986 | if (mr->private) |
987 | continue; |
988 | |
989 | for (i = 0; i < ARRAY_SIZE(sizes); i++) { |
990 | struct drm_i915_gem_object *obj; |
991 | int err; |
992 | |
993 | obj = __i915_gem_object_create_user(i915, size: sizes[i], placements: &mr, n_placements: 1); |
994 | if (obj == ERR_PTR(error: -ENODEV)) |
995 | continue; |
996 | |
997 | if (IS_ERR(ptr: obj)) |
998 | return PTR_ERR(ptr: obj); |
999 | |
1000 | err = __igt_mmap(i915, obj, type: I915_MMAP_TYPE_GTT); |
1001 | if (err == 0) |
1002 | err = __igt_mmap(i915, obj, type: I915_MMAP_TYPE_WC); |
1003 | if (err == 0) |
1004 | err = __igt_mmap(i915, obj, type: I915_MMAP_TYPE_FIXED); |
1005 | |
1006 | i915_gem_object_put(obj); |
1007 | if (err) |
1008 | return err; |
1009 | } |
1010 | } |
1011 | |
1012 | return 0; |
1013 | } |
1014 | |
1015 | static void igt_close_objects(struct drm_i915_private *i915, |
1016 | struct list_head *objects) |
1017 | { |
1018 | struct drm_i915_gem_object *obj, *on; |
1019 | |
1020 | list_for_each_entry_safe(obj, on, objects, st_link) { |
1021 | i915_gem_object_lock(obj, NULL); |
1022 | if (i915_gem_object_has_pinned_pages(obj)) |
1023 | i915_gem_object_unpin_pages(obj); |
1024 | /* No polluting the memory region between tests */ |
1025 | __i915_gem_object_put_pages(obj); |
1026 | i915_gem_object_unlock(obj); |
1027 | list_del(entry: &obj->st_link); |
1028 | i915_gem_object_put(obj); |
1029 | } |
1030 | |
1031 | cond_resched(); |
1032 | |
1033 | i915_gem_drain_freed_objects(i915); |
1034 | } |
1035 | |
1036 | static void igt_make_evictable(struct list_head *objects) |
1037 | { |
1038 | struct drm_i915_gem_object *obj; |
1039 | |
1040 | list_for_each_entry(obj, objects, st_link) { |
1041 | i915_gem_object_lock(obj, NULL); |
1042 | if (i915_gem_object_has_pinned_pages(obj)) |
1043 | i915_gem_object_unpin_pages(obj); |
1044 | i915_gem_object_unlock(obj); |
1045 | } |
1046 | |
1047 | cond_resched(); |
1048 | } |
1049 | |
1050 | static int igt_fill_mappable(struct intel_memory_region *mr, |
1051 | struct list_head *objects) |
1052 | { |
1053 | u64 size, total; |
1054 | int err; |
1055 | |
1056 | total = 0; |
1057 | size = resource_size(res: &mr->io); |
1058 | do { |
1059 | struct drm_i915_gem_object *obj; |
1060 | |
1061 | obj = i915_gem_object_create_region(mem: mr, size, page_size: 0, flags: 0); |
1062 | if (IS_ERR(ptr: obj)) { |
1063 | err = PTR_ERR(ptr: obj); |
1064 | goto err_close; |
1065 | } |
1066 | |
1067 | list_add(new: &obj->st_link, head: objects); |
1068 | |
1069 | err = i915_gem_object_pin_pages_unlocked(obj); |
1070 | if (err) { |
1071 | if (err != -ENXIO && err != -ENOMEM) |
1072 | goto err_close; |
1073 | |
1074 | if (size == mr->min_page_size) { |
1075 | err = 0; |
1076 | break; |
1077 | } |
1078 | |
1079 | size >>= 1; |
1080 | continue; |
1081 | } |
1082 | |
1083 | total += obj->base.size; |
1084 | } while (1); |
1085 | |
1086 | pr_info("%s filled=%lluMiB\n" , __func__, total >> 20); |
1087 | return 0; |
1088 | |
1089 | err_close: |
1090 | igt_close_objects(i915: mr->i915, objects); |
1091 | return err; |
1092 | } |
1093 | |
1094 | static int ___igt_mmap_migrate(struct drm_i915_private *i915, |
1095 | struct drm_i915_gem_object *obj, |
1096 | unsigned long addr, |
1097 | bool unfaultable) |
1098 | { |
1099 | struct vm_area_struct *area; |
1100 | int err = 0, i; |
1101 | |
1102 | pr_info("igt_mmap(%s, %d) @ %lx\n" , |
1103 | obj->mm.region->name, I915_MMAP_TYPE_FIXED, addr); |
1104 | |
1105 | mmap_read_lock(current->mm); |
1106 | area = vma_lookup(current->mm, addr); |
1107 | mmap_read_unlock(current->mm); |
1108 | if (!area) { |
1109 | pr_err("%s: Did not create a vm_area_struct for the mmap\n" , |
1110 | obj->mm.region->name); |
1111 | err = -EINVAL; |
1112 | goto out_unmap; |
1113 | } |
1114 | |
1115 | for (i = 0; i < obj->base.size / sizeof(u32); i++) { |
1116 | u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux))); |
1117 | u32 x; |
1118 | |
1119 | if (get_user(x, ux)) { |
1120 | err = -EFAULT; |
1121 | if (!unfaultable) { |
1122 | pr_err("%s: Unable to read from mmap, offset:%zd\n" , |
1123 | obj->mm.region->name, i * sizeof(x)); |
1124 | goto out_unmap; |
1125 | } |
1126 | |
1127 | continue; |
1128 | } |
1129 | |
1130 | if (unfaultable) { |
1131 | pr_err("%s: Faulted unmappable memory\n" , |
1132 | obj->mm.region->name); |
1133 | err = -EINVAL; |
1134 | goto out_unmap; |
1135 | } |
1136 | |
1137 | if (x != expand32(POISON_INUSE)) { |
1138 | pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n" , |
1139 | obj->mm.region->name, |
1140 | i * sizeof(x), x, expand32(POISON_INUSE)); |
1141 | err = -EINVAL; |
1142 | goto out_unmap; |
1143 | } |
1144 | |
1145 | x = expand32(POISON_FREE); |
1146 | if (put_user(x, ux)) { |
1147 | pr_err("%s: Unable to write to mmap, offset:%zd\n" , |
1148 | obj->mm.region->name, i * sizeof(x)); |
1149 | err = -EFAULT; |
1150 | goto out_unmap; |
1151 | } |
1152 | } |
1153 | |
1154 | if (unfaultable) { |
1155 | if (err == -EFAULT) |
1156 | err = 0; |
1157 | } else { |
1158 | obj->flags &= ~I915_BO_ALLOC_GPU_ONLY; |
1159 | err = wc_check(obj); |
1160 | } |
1161 | out_unmap: |
1162 | vm_munmap(addr, obj->base.size); |
1163 | return err; |
1164 | } |
1165 | |
1166 | #define IGT_MMAP_MIGRATE_TOPDOWN (1 << 0) |
1167 | #define IGT_MMAP_MIGRATE_FILL (1 << 1) |
1168 | #define IGT_MMAP_MIGRATE_EVICTABLE (1 << 2) |
1169 | #define IGT_MMAP_MIGRATE_UNFAULTABLE (1 << 3) |
1170 | #define IGT_MMAP_MIGRATE_FAIL_GPU (1 << 4) |
1171 | static int __igt_mmap_migrate(struct intel_memory_region **placements, |
1172 | int n_placements, |
1173 | struct intel_memory_region *expected_mr, |
1174 | unsigned int flags) |
1175 | { |
1176 | struct drm_i915_private *i915 = placements[0]->i915; |
1177 | struct drm_i915_gem_object *obj; |
1178 | struct i915_request *rq = NULL; |
1179 | unsigned long addr; |
1180 | LIST_HEAD(objects); |
1181 | u64 offset; |
1182 | int err; |
1183 | |
1184 | obj = __i915_gem_object_create_user(i915, PAGE_SIZE, |
1185 | placements, |
1186 | n_placements); |
1187 | if (IS_ERR(ptr: obj)) |
1188 | return PTR_ERR(ptr: obj); |
1189 | |
1190 | if (flags & IGT_MMAP_MIGRATE_TOPDOWN) |
1191 | obj->flags |= I915_BO_ALLOC_GPU_ONLY; |
1192 | |
1193 | err = __assign_mmap_offset(obj, mmap_type: I915_MMAP_TYPE_FIXED, offset: &offset, NULL); |
1194 | if (err) |
1195 | goto out_put; |
1196 | |
1197 | /* |
1198 | * This will eventually create a GEM context, due to opening dummy drm |
1199 | * file, which needs a tiny amount of mappable device memory for the top |
1200 | * level paging structures(and perhaps scratch), so make sure we |
1201 | * allocate early, to avoid tears. |
1202 | */ |
1203 | addr = igt_mmap_offset(i915, offset, size: obj->base.size, |
1204 | PROT_WRITE, MAP_SHARED); |
1205 | if (IS_ERR_VALUE(addr)) { |
1206 | err = addr; |
1207 | goto out_put; |
1208 | } |
1209 | |
1210 | if (flags & IGT_MMAP_MIGRATE_FILL) { |
1211 | err = igt_fill_mappable(mr: placements[0], objects: &objects); |
1212 | if (err) |
1213 | goto out_put; |
1214 | } |
1215 | |
1216 | err = i915_gem_object_lock(obj, NULL); |
1217 | if (err) |
1218 | goto out_put; |
1219 | |
1220 | err = i915_gem_object_pin_pages(obj); |
1221 | if (err) { |
1222 | i915_gem_object_unlock(obj); |
1223 | goto out_put; |
1224 | } |
1225 | |
1226 | err = intel_context_migrate_clear(ce: to_gt(i915)->migrate.context, NULL, |
1227 | sg: obj->mm.pages->sgl, pat_index: obj->pat_index, |
1228 | is_lmem: i915_gem_object_is_lmem(obj), |
1229 | expand32(POISON_INUSE), out: &rq); |
1230 | i915_gem_object_unpin_pages(obj); |
1231 | if (rq) { |
1232 | err = dma_resv_reserve_fences(obj: obj->base.resv, num_fences: 1); |
1233 | if (!err) |
1234 | dma_resv_add_fence(obj: obj->base.resv, fence: &rq->fence, |
1235 | usage: DMA_RESV_USAGE_KERNEL); |
1236 | i915_request_put(rq); |
1237 | } |
1238 | i915_gem_object_unlock(obj); |
1239 | if (err) |
1240 | goto out_put; |
1241 | |
1242 | if (flags & IGT_MMAP_MIGRATE_EVICTABLE) |
1243 | igt_make_evictable(objects: &objects); |
1244 | |
1245 | if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) { |
1246 | err = i915_gem_object_lock(obj, NULL); |
1247 | if (err) |
1248 | goto out_put; |
1249 | |
1250 | /* |
1251 | * Ensure we only simulate the gpu failuire when faulting the |
1252 | * pages. |
1253 | */ |
1254 | err = i915_gem_object_wait_moving_fence(obj, intr: true); |
1255 | i915_gem_object_unlock(obj); |
1256 | if (err) |
1257 | goto out_put; |
1258 | i915_ttm_migrate_set_failure_modes(gpu_migration: true, work_allocation: false); |
1259 | } |
1260 | |
1261 | err = ___igt_mmap_migrate(i915, obj, addr, |
1262 | unfaultable: flags & IGT_MMAP_MIGRATE_UNFAULTABLE); |
1263 | |
1264 | if (!err && obj->mm.region != expected_mr) { |
1265 | pr_err("%s region mismatch %s\n" , __func__, expected_mr->name); |
1266 | err = -EINVAL; |
1267 | } |
1268 | |
1269 | if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) { |
1270 | struct intel_gt *gt; |
1271 | unsigned int id; |
1272 | |
1273 | i915_ttm_migrate_set_failure_modes(gpu_migration: false, work_allocation: false); |
1274 | |
1275 | for_each_gt(gt, i915, id) { |
1276 | intel_wakeref_t wakeref; |
1277 | bool wedged; |
1278 | |
1279 | mutex_lock(>->reset.mutex); |
1280 | wedged = test_bit(I915_WEDGED, >->reset.flags); |
1281 | mutex_unlock(lock: >->reset.mutex); |
1282 | if (!wedged) { |
1283 | pr_err("gt(%u) not wedged\n" , id); |
1284 | err = -EINVAL; |
1285 | continue; |
1286 | } |
1287 | |
1288 | wakeref = intel_runtime_pm_get(rpm: gt->uncore->rpm); |
1289 | igt_global_reset_lock(gt); |
1290 | intel_gt_reset(gt, ALL_ENGINES, NULL); |
1291 | igt_global_reset_unlock(gt); |
1292 | intel_runtime_pm_put(rpm: gt->uncore->rpm, wref: wakeref); |
1293 | } |
1294 | |
1295 | if (!i915_gem_object_has_unknown_state(obj)) { |
1296 | pr_err("object missing unknown_state\n" ); |
1297 | err = -EINVAL; |
1298 | } |
1299 | } |
1300 | |
1301 | out_put: |
1302 | i915_gem_object_put(obj); |
1303 | igt_close_objects(i915, objects: &objects); |
1304 | return err; |
1305 | } |
1306 | |
1307 | static int igt_mmap_migrate(void *arg) |
1308 | { |
1309 | struct drm_i915_private *i915 = arg; |
1310 | struct intel_memory_region *system = i915->mm.regions[INTEL_REGION_SMEM]; |
1311 | struct intel_memory_region *mr; |
1312 | enum intel_region_id id; |
1313 | |
1314 | for_each_memory_region(mr, i915, id) { |
1315 | struct intel_memory_region *mixed[] = { mr, system }; |
1316 | struct intel_memory_region *single[] = { mr }; |
1317 | struct ttm_resource_manager *man = mr->region_private; |
1318 | struct resource saved_io; |
1319 | int err; |
1320 | |
1321 | if (mr->private) |
1322 | continue; |
1323 | |
1324 | if (!resource_size(res: &mr->io)) |
1325 | continue; |
1326 | |
1327 | /* |
1328 | * For testing purposes let's force small BAR, if not already |
1329 | * present. |
1330 | */ |
1331 | saved_io = mr->io; |
1332 | if (resource_size(res: &mr->io) == mr->total) { |
1333 | resource_size_t io_size = resource_size(res: &mr->io); |
1334 | |
1335 | io_size = rounddown_pow_of_two(io_size >> 1); |
1336 | if (io_size < PAGE_SIZE) |
1337 | continue; |
1338 | |
1339 | mr->io = DEFINE_RES_MEM(mr->io.start, io_size); |
1340 | i915_ttm_buddy_man_force_visible_size(man, |
1341 | size: io_size >> PAGE_SHIFT); |
1342 | } |
1343 | |
1344 | /* |
1345 | * Allocate in the mappable portion, should be no suprises here. |
1346 | */ |
1347 | err = __igt_mmap_migrate(placements: mixed, ARRAY_SIZE(mixed), expected_mr: mr, flags: 0); |
1348 | if (err) |
1349 | goto out_io_size; |
1350 | |
1351 | /* |
1352 | * Allocate in the non-mappable portion, but force migrating to |
1353 | * the mappable portion on fault (LMEM -> LMEM) |
1354 | */ |
1355 | err = __igt_mmap_migrate(placements: single, ARRAY_SIZE(single), expected_mr: mr, |
1356 | IGT_MMAP_MIGRATE_TOPDOWN | |
1357 | IGT_MMAP_MIGRATE_FILL | |
1358 | IGT_MMAP_MIGRATE_EVICTABLE); |
1359 | if (err) |
1360 | goto out_io_size; |
1361 | |
1362 | /* |
1363 | * Allocate in the non-mappable portion, but force spilling into |
1364 | * system memory on fault (LMEM -> SMEM) |
1365 | */ |
1366 | err = __igt_mmap_migrate(placements: mixed, ARRAY_SIZE(mixed), expected_mr: system, |
1367 | IGT_MMAP_MIGRATE_TOPDOWN | |
1368 | IGT_MMAP_MIGRATE_FILL); |
1369 | if (err) |
1370 | goto out_io_size; |
1371 | |
1372 | /* |
1373 | * Allocate in the non-mappable portion, but since the mappable |
1374 | * portion is already full, and we can't spill to system memory, |
1375 | * then we should expect the fault to fail. |
1376 | */ |
1377 | err = __igt_mmap_migrate(placements: single, ARRAY_SIZE(single), expected_mr: mr, |
1378 | IGT_MMAP_MIGRATE_TOPDOWN | |
1379 | IGT_MMAP_MIGRATE_FILL | |
1380 | IGT_MMAP_MIGRATE_UNFAULTABLE); |
1381 | if (err) |
1382 | goto out_io_size; |
1383 | |
1384 | /* |
1385 | * Allocate in the non-mappable portion, but force migrating to |
1386 | * the mappable portion on fault (LMEM -> LMEM). We then also |
1387 | * simulate a gpu error when moving the pages when faulting the |
1388 | * pages, which should result in wedging the gpu and returning |
1389 | * SIGBUS in the fault handler, since we can't fallback to |
1390 | * memcpy. |
1391 | */ |
1392 | err = __igt_mmap_migrate(placements: single, ARRAY_SIZE(single), expected_mr: mr, |
1393 | IGT_MMAP_MIGRATE_TOPDOWN | |
1394 | IGT_MMAP_MIGRATE_FILL | |
1395 | IGT_MMAP_MIGRATE_EVICTABLE | |
1396 | IGT_MMAP_MIGRATE_FAIL_GPU | |
1397 | IGT_MMAP_MIGRATE_UNFAULTABLE); |
1398 | out_io_size: |
1399 | mr->io = saved_io; |
1400 | i915_ttm_buddy_man_force_visible_size(man, |
1401 | size: resource_size(res: &mr->io) >> PAGE_SHIFT); |
1402 | if (err) |
1403 | return err; |
1404 | } |
1405 | |
1406 | return 0; |
1407 | } |
1408 | |
1409 | static const char *repr_mmap_type(enum i915_mmap_type type) |
1410 | { |
1411 | switch (type) { |
1412 | case I915_MMAP_TYPE_GTT: return "gtt" ; |
1413 | case I915_MMAP_TYPE_WB: return "wb" ; |
1414 | case I915_MMAP_TYPE_WC: return "wc" ; |
1415 | case I915_MMAP_TYPE_UC: return "uc" ; |
1416 | case I915_MMAP_TYPE_FIXED: return "fixed" ; |
1417 | default: return "unknown" ; |
1418 | } |
1419 | } |
1420 | |
1421 | static bool can_access(struct drm_i915_gem_object *obj) |
1422 | { |
1423 | bool access; |
1424 | |
1425 | i915_gem_object_lock(obj, NULL); |
1426 | access = i915_gem_object_has_struct_page(obj) || |
1427 | i915_gem_object_has_iomem(obj); |
1428 | i915_gem_object_unlock(obj); |
1429 | |
1430 | return access; |
1431 | } |
1432 | |
1433 | static int __igt_mmap_access(struct drm_i915_private *i915, |
1434 | struct drm_i915_gem_object *obj, |
1435 | enum i915_mmap_type type) |
1436 | { |
1437 | unsigned long __user *ptr; |
1438 | unsigned long A, B; |
1439 | unsigned long x, y; |
1440 | unsigned long addr; |
1441 | int err; |
1442 | u64 offset; |
1443 | |
1444 | memset(&A, 0xAA, sizeof(A)); |
1445 | memset(&B, 0xBB, sizeof(B)); |
1446 | |
1447 | if (!can_mmap(obj, type) || !can_access(obj)) |
1448 | return 0; |
1449 | |
1450 | err = __assign_mmap_offset(obj, mmap_type: type, offset: &offset, NULL); |
1451 | if (err) |
1452 | return err; |
1453 | |
1454 | addr = igt_mmap_offset(i915, offset, size: obj->base.size, PROT_WRITE, MAP_SHARED); |
1455 | if (IS_ERR_VALUE(addr)) |
1456 | return addr; |
1457 | ptr = (unsigned long __user *)addr; |
1458 | |
1459 | err = __put_user(A, ptr); |
1460 | if (err) { |
1461 | pr_err("%s(%s): failed to write into user mmap\n" , |
1462 | obj->mm.region->name, repr_mmap_type(type)); |
1463 | goto out_unmap; |
1464 | } |
1465 | |
1466 | intel_gt_flush_ggtt_writes(gt: to_gt(i915)); |
1467 | |
1468 | err = access_process_vm(current, addr, buf: &x, len: sizeof(x), gup_flags: 0); |
1469 | if (err != sizeof(x)) { |
1470 | pr_err("%s(%s): access_process_vm() read failed\n" , |
1471 | obj->mm.region->name, repr_mmap_type(type)); |
1472 | goto out_unmap; |
1473 | } |
1474 | |
1475 | err = access_process_vm(current, addr, buf: &B, len: sizeof(B), gup_flags: FOLL_WRITE); |
1476 | if (err != sizeof(B)) { |
1477 | pr_err("%s(%s): access_process_vm() write failed\n" , |
1478 | obj->mm.region->name, repr_mmap_type(type)); |
1479 | goto out_unmap; |
1480 | } |
1481 | |
1482 | intel_gt_flush_ggtt_writes(gt: to_gt(i915)); |
1483 | |
1484 | err = __get_user(y, ptr); |
1485 | if (err) { |
1486 | pr_err("%s(%s): failed to read from user mmap\n" , |
1487 | obj->mm.region->name, repr_mmap_type(type)); |
1488 | goto out_unmap; |
1489 | } |
1490 | |
1491 | if (x != A || y != B) { |
1492 | pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n" , |
1493 | obj->mm.region->name, repr_mmap_type(type), |
1494 | x, y); |
1495 | err = -EINVAL; |
1496 | goto out_unmap; |
1497 | } |
1498 | |
1499 | out_unmap: |
1500 | vm_munmap(addr, obj->base.size); |
1501 | return err; |
1502 | } |
1503 | |
1504 | static int igt_mmap_access(void *arg) |
1505 | { |
1506 | struct drm_i915_private *i915 = arg; |
1507 | struct intel_memory_region *mr; |
1508 | enum intel_region_id id; |
1509 | |
1510 | for_each_memory_region(mr, i915, id) { |
1511 | struct drm_i915_gem_object *obj; |
1512 | int err; |
1513 | |
1514 | if (mr->private) |
1515 | continue; |
1516 | |
1517 | obj = __i915_gem_object_create_user(i915, PAGE_SIZE, placements: &mr, n_placements: 1); |
1518 | if (obj == ERR_PTR(error: -ENODEV)) |
1519 | continue; |
1520 | |
1521 | if (IS_ERR(ptr: obj)) |
1522 | return PTR_ERR(ptr: obj); |
1523 | |
1524 | err = __igt_mmap_access(i915, obj, type: I915_MMAP_TYPE_GTT); |
1525 | if (err == 0) |
1526 | err = __igt_mmap_access(i915, obj, type: I915_MMAP_TYPE_WB); |
1527 | if (err == 0) |
1528 | err = __igt_mmap_access(i915, obj, type: I915_MMAP_TYPE_WC); |
1529 | if (err == 0) |
1530 | err = __igt_mmap_access(i915, obj, type: I915_MMAP_TYPE_UC); |
1531 | if (err == 0) |
1532 | err = __igt_mmap_access(i915, obj, type: I915_MMAP_TYPE_FIXED); |
1533 | |
1534 | i915_gem_object_put(obj); |
1535 | if (err) |
1536 | return err; |
1537 | } |
1538 | |
1539 | return 0; |
1540 | } |
1541 | |
1542 | static int __igt_mmap_gpu(struct drm_i915_private *i915, |
1543 | struct drm_i915_gem_object *obj, |
1544 | enum i915_mmap_type type) |
1545 | { |
1546 | struct intel_engine_cs *engine; |
1547 | unsigned long addr; |
1548 | u32 __user *ux; |
1549 | u32 bbe; |
1550 | int err; |
1551 | u64 offset; |
1552 | |
1553 | /* |
1554 | * Verify that the mmap access into the backing store aligns with |
1555 | * that of the GPU, i.e. that mmap is indeed writing into the same |
1556 | * page as being read by the GPU. |
1557 | */ |
1558 | |
1559 | if (!can_mmap(obj, type)) |
1560 | return 0; |
1561 | |
1562 | err = wc_set(obj); |
1563 | if (err == -ENXIO) |
1564 | err = gtt_set(obj); |
1565 | if (err) |
1566 | return err; |
1567 | |
1568 | err = __assign_mmap_offset(obj, mmap_type: type, offset: &offset, NULL); |
1569 | if (err) |
1570 | return err; |
1571 | |
1572 | addr = igt_mmap_offset(i915, offset, size: obj->base.size, PROT_WRITE, MAP_SHARED); |
1573 | if (IS_ERR_VALUE(addr)) |
1574 | return addr; |
1575 | |
1576 | ux = u64_to_user_ptr((u64)addr); |
1577 | bbe = MI_BATCH_BUFFER_END; |
1578 | if (put_user(bbe, ux)) { |
1579 | pr_err("%s: Unable to write to mmap\n" , obj->mm.region->name); |
1580 | err = -EFAULT; |
1581 | goto out_unmap; |
1582 | } |
1583 | |
1584 | if (type == I915_MMAP_TYPE_GTT) |
1585 | intel_gt_flush_ggtt_writes(gt: to_gt(i915)); |
1586 | |
1587 | for_each_uabi_engine(engine, i915) { |
1588 | struct i915_request *rq; |
1589 | struct i915_vma *vma; |
1590 | struct i915_gem_ww_ctx ww; |
1591 | |
1592 | vma = i915_vma_instance(obj, vm: engine->kernel_context->vm, NULL); |
1593 | if (IS_ERR(ptr: vma)) { |
1594 | err = PTR_ERR(ptr: vma); |
1595 | goto out_unmap; |
1596 | } |
1597 | |
1598 | i915_gem_ww_ctx_init(ctx: &ww, intr: false); |
1599 | retry: |
1600 | err = i915_gem_object_lock(obj, ww: &ww); |
1601 | if (!err) |
1602 | err = i915_vma_pin_ww(vma, ww: &ww, size: 0, alignment: 0, PIN_USER); |
1603 | if (err) |
1604 | goto out_ww; |
1605 | |
1606 | rq = i915_request_create(ce: engine->kernel_context); |
1607 | if (IS_ERR(ptr: rq)) { |
1608 | err = PTR_ERR(ptr: rq); |
1609 | goto out_unpin; |
1610 | } |
1611 | |
1612 | err = i915_vma_move_to_active(vma, rq, flags: 0); |
1613 | |
1614 | err = engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0); |
1615 | i915_request_get(rq); |
1616 | i915_request_add(rq); |
1617 | |
1618 | if (i915_request_wait(rq, flags: 0, HZ / 5) < 0) { |
1619 | struct drm_printer p = |
1620 | drm_info_printer(dev: engine->i915->drm.dev); |
1621 | |
1622 | pr_err("%s(%s, %s): Failed to execute batch\n" , |
1623 | __func__, engine->name, obj->mm.region->name); |
1624 | intel_engine_dump(engine, m: &p, |
1625 | header: "%s\n" , engine->name); |
1626 | |
1627 | intel_gt_set_wedged(gt: engine->gt); |
1628 | err = -EIO; |
1629 | } |
1630 | i915_request_put(rq); |
1631 | |
1632 | out_unpin: |
1633 | i915_vma_unpin(vma); |
1634 | out_ww: |
1635 | if (err == -EDEADLK) { |
1636 | err = i915_gem_ww_ctx_backoff(ctx: &ww); |
1637 | if (!err) |
1638 | goto retry; |
1639 | } |
1640 | i915_gem_ww_ctx_fini(ctx: &ww); |
1641 | if (err) |
1642 | goto out_unmap; |
1643 | } |
1644 | |
1645 | out_unmap: |
1646 | vm_munmap(addr, obj->base.size); |
1647 | return err; |
1648 | } |
1649 | |
1650 | static int igt_mmap_gpu(void *arg) |
1651 | { |
1652 | struct drm_i915_private *i915 = arg; |
1653 | struct intel_memory_region *mr; |
1654 | enum intel_region_id id; |
1655 | |
1656 | for_each_memory_region(mr, i915, id) { |
1657 | struct drm_i915_gem_object *obj; |
1658 | int err; |
1659 | |
1660 | if (mr->private) |
1661 | continue; |
1662 | |
1663 | obj = __i915_gem_object_create_user(i915, PAGE_SIZE, placements: &mr, n_placements: 1); |
1664 | if (obj == ERR_PTR(error: -ENODEV)) |
1665 | continue; |
1666 | |
1667 | if (IS_ERR(ptr: obj)) |
1668 | return PTR_ERR(ptr: obj); |
1669 | |
1670 | err = __igt_mmap_gpu(i915, obj, type: I915_MMAP_TYPE_GTT); |
1671 | if (err == 0) |
1672 | err = __igt_mmap_gpu(i915, obj, type: I915_MMAP_TYPE_WC); |
1673 | if (err == 0) |
1674 | err = __igt_mmap_gpu(i915, obj, type: I915_MMAP_TYPE_FIXED); |
1675 | |
1676 | i915_gem_object_put(obj); |
1677 | if (err) |
1678 | return err; |
1679 | } |
1680 | |
1681 | return 0; |
1682 | } |
1683 | |
1684 | static int check_present_pte(pte_t *pte, unsigned long addr, void *data) |
1685 | { |
1686 | pte_t ptent = ptep_get(ptep: pte); |
1687 | |
1688 | if (!pte_present(a: ptent) || pte_none(pte: ptent)) { |
1689 | pr_err("missing PTE:%lx\n" , |
1690 | (addr - (unsigned long)data) >> PAGE_SHIFT); |
1691 | return -EINVAL; |
1692 | } |
1693 | |
1694 | return 0; |
1695 | } |
1696 | |
1697 | static int check_absent_pte(pte_t *pte, unsigned long addr, void *data) |
1698 | { |
1699 | pte_t ptent = ptep_get(ptep: pte); |
1700 | |
1701 | if (pte_present(a: ptent) && !pte_none(pte: ptent)) { |
1702 | pr_err("present PTE:%lx; expected to be revoked\n" , |
1703 | (addr - (unsigned long)data) >> PAGE_SHIFT); |
1704 | return -EINVAL; |
1705 | } |
1706 | |
1707 | return 0; |
1708 | } |
1709 | |
1710 | static int check_present(unsigned long addr, unsigned long len) |
1711 | { |
1712 | return apply_to_page_range(current->mm, address: addr, size: len, |
1713 | fn: check_present_pte, data: (void *)addr); |
1714 | } |
1715 | |
1716 | static int check_absent(unsigned long addr, unsigned long len) |
1717 | { |
1718 | return apply_to_page_range(current->mm, address: addr, size: len, |
1719 | fn: check_absent_pte, data: (void *)addr); |
1720 | } |
1721 | |
1722 | static int prefault_range(u64 start, u64 len) |
1723 | { |
1724 | const char __user *addr, *end; |
1725 | char __maybe_unused c; |
1726 | int err; |
1727 | |
1728 | addr = u64_to_user_ptr(start); |
1729 | end = addr + len; |
1730 | |
1731 | for (; addr < end; addr += PAGE_SIZE) { |
1732 | err = __get_user(c, addr); |
1733 | if (err) |
1734 | return err; |
1735 | } |
1736 | |
1737 | return __get_user(c, end - 1); |
1738 | } |
1739 | |
1740 | static int __igt_mmap_revoke(struct drm_i915_private *i915, |
1741 | struct drm_i915_gem_object *obj, |
1742 | enum i915_mmap_type type) |
1743 | { |
1744 | unsigned long addr; |
1745 | int err; |
1746 | u64 offset; |
1747 | |
1748 | if (!can_mmap(obj, type)) |
1749 | return 0; |
1750 | |
1751 | err = __assign_mmap_offset(obj, mmap_type: type, offset: &offset, NULL); |
1752 | if (err) |
1753 | return err; |
1754 | |
1755 | addr = igt_mmap_offset(i915, offset, size: obj->base.size, PROT_WRITE, MAP_SHARED); |
1756 | if (IS_ERR_VALUE(addr)) |
1757 | return addr; |
1758 | |
1759 | err = prefault_range(start: addr, len: obj->base.size); |
1760 | if (err) |
1761 | goto out_unmap; |
1762 | |
1763 | err = check_present(addr, len: obj->base.size); |
1764 | if (err) { |
1765 | pr_err("%s: was not present\n" , obj->mm.region->name); |
1766 | goto out_unmap; |
1767 | } |
1768 | |
1769 | /* |
1770 | * After unbinding the object from the GGTT, its address may be reused |
1771 | * for other objects. Ergo we have to revoke the previous mmap PTE |
1772 | * access as it no longer points to the same object. |
1773 | */ |
1774 | i915_gem_object_lock(obj, NULL); |
1775 | err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); |
1776 | i915_gem_object_unlock(obj); |
1777 | if (err) { |
1778 | pr_err("Failed to unbind object!\n" ); |
1779 | goto out_unmap; |
1780 | } |
1781 | |
1782 | if (type != I915_MMAP_TYPE_GTT) { |
1783 | i915_gem_object_lock(obj, NULL); |
1784 | __i915_gem_object_put_pages(obj); |
1785 | i915_gem_object_unlock(obj); |
1786 | if (i915_gem_object_has_pages(obj)) { |
1787 | pr_err("Failed to put-pages object!\n" ); |
1788 | err = -EINVAL; |
1789 | goto out_unmap; |
1790 | } |
1791 | } |
1792 | |
1793 | err = check_absent(addr, len: obj->base.size); |
1794 | if (err) { |
1795 | pr_err("%s: was not absent\n" , obj->mm.region->name); |
1796 | goto out_unmap; |
1797 | } |
1798 | |
1799 | out_unmap: |
1800 | vm_munmap(addr, obj->base.size); |
1801 | return err; |
1802 | } |
1803 | |
1804 | static int igt_mmap_revoke(void *arg) |
1805 | { |
1806 | struct drm_i915_private *i915 = arg; |
1807 | struct intel_memory_region *mr; |
1808 | enum intel_region_id id; |
1809 | |
1810 | for_each_memory_region(mr, i915, id) { |
1811 | struct drm_i915_gem_object *obj; |
1812 | int err; |
1813 | |
1814 | if (mr->private) |
1815 | continue; |
1816 | |
1817 | obj = __i915_gem_object_create_user(i915, PAGE_SIZE, placements: &mr, n_placements: 1); |
1818 | if (obj == ERR_PTR(error: -ENODEV)) |
1819 | continue; |
1820 | |
1821 | if (IS_ERR(ptr: obj)) |
1822 | return PTR_ERR(ptr: obj); |
1823 | |
1824 | err = __igt_mmap_revoke(i915, obj, type: I915_MMAP_TYPE_GTT); |
1825 | if (err == 0) |
1826 | err = __igt_mmap_revoke(i915, obj, type: I915_MMAP_TYPE_WC); |
1827 | if (err == 0) |
1828 | err = __igt_mmap_revoke(i915, obj, type: I915_MMAP_TYPE_FIXED); |
1829 | |
1830 | i915_gem_object_put(obj); |
1831 | if (err) |
1832 | return err; |
1833 | } |
1834 | |
1835 | return 0; |
1836 | } |
1837 | |
1838 | int i915_gem_mman_live_selftests(struct drm_i915_private *i915) |
1839 | { |
1840 | static const struct i915_subtest tests[] = { |
1841 | SUBTEST(igt_partial_tiling), |
1842 | SUBTEST(igt_smoke_tiling), |
1843 | SUBTEST(igt_mmap_offset_exhaustion), |
1844 | SUBTEST(igt_mmap), |
1845 | SUBTEST(igt_mmap_migrate), |
1846 | SUBTEST(igt_mmap_access), |
1847 | SUBTEST(igt_mmap_revoke), |
1848 | SUBTEST(igt_mmap_gpu), |
1849 | }; |
1850 | |
1851 | return i915_live_subtests(tests, i915); |
1852 | } |
1853 | |