1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright © 2020 Intel Corporation |
4 | */ |
5 | |
6 | static struct intel_ring *mock_ring(unsigned long sz) |
7 | { |
8 | struct intel_ring *ring; |
9 | |
10 | ring = kzalloc(size: sizeof(*ring) + sz, GFP_KERNEL); |
11 | if (!ring) |
12 | return NULL; |
13 | |
14 | kref_init(kref: &ring->ref); |
15 | ring->size = sz; |
16 | ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(sz); |
17 | ring->effective_size = sz; |
18 | ring->vaddr = (void *)(ring + 1); |
19 | atomic_set(v: &ring->pin_count, i: 1); |
20 | |
21 | intel_ring_update_space(ring); |
22 | |
23 | return ring; |
24 | } |
25 | |
26 | static void mock_ring_free(struct intel_ring *ring) |
27 | { |
28 | kfree(objp: ring); |
29 | } |
30 | |
31 | static int check_ring_direction(struct intel_ring *ring, |
32 | u32 next, u32 prev, |
33 | int expected) |
34 | { |
35 | int result; |
36 | |
37 | result = intel_ring_direction(ring, next, prev); |
38 | if (result < 0) |
39 | result = -1; |
40 | else if (result > 0) |
41 | result = 1; |
42 | |
43 | if (result != expected) { |
44 | pr_err("intel_ring_direction(%u, %u):%d != %d\n" , |
45 | next, prev, result, expected); |
46 | return -EINVAL; |
47 | } |
48 | |
49 | return 0; |
50 | } |
51 | |
52 | static int check_ring_step(struct intel_ring *ring, u32 x, u32 step) |
53 | { |
54 | u32 prev = x, next = intel_ring_wrap(ring, pos: x + step); |
55 | int err = 0; |
56 | |
57 | err |= check_ring_direction(ring, next, prev: next, expected: 0); |
58 | err |= check_ring_direction(ring, next: prev, prev, expected: 0); |
59 | err |= check_ring_direction(ring, next, prev, expected: 1); |
60 | err |= check_ring_direction(ring, next: prev, prev: next, expected: -1); |
61 | |
62 | return err; |
63 | } |
64 | |
65 | static int check_ring_offset(struct intel_ring *ring, u32 x, u32 step) |
66 | { |
67 | int err = 0; |
68 | |
69 | err |= check_ring_step(ring, x, step); |
70 | err |= check_ring_step(ring, x: intel_ring_wrap(ring, pos: x + 1), step); |
71 | err |= check_ring_step(ring, x: intel_ring_wrap(ring, pos: x - 1), step); |
72 | |
73 | return err; |
74 | } |
75 | |
76 | static int igt_ring_direction(void *dummy) |
77 | { |
78 | struct intel_ring *ring; |
79 | unsigned int half = 2048; |
80 | int step, err = 0; |
81 | |
82 | ring = mock_ring(sz: 2 * half); |
83 | if (!ring) |
84 | return -ENOMEM; |
85 | |
86 | GEM_BUG_ON(ring->size != 2 * half); |
87 | |
88 | /* Precision of wrap detection is limited to ring->size / 2 */ |
89 | for (step = 1; step < half; step <<= 1) { |
90 | err |= check_ring_offset(ring, x: 0, step); |
91 | err |= check_ring_offset(ring, x: half, step); |
92 | } |
93 | err |= check_ring_step(ring, x: 0, step: half - 64); |
94 | |
95 | /* And check unwrapped handling for good measure */ |
96 | err |= check_ring_offset(ring, x: 0, step: 2 * half + 64); |
97 | err |= check_ring_offset(ring, x: 3 * half, step: 1); |
98 | |
99 | mock_ring_free(ring); |
100 | return err; |
101 | } |
102 | |
103 | int intel_ring_mock_selftests(void) |
104 | { |
105 | static const struct i915_subtest tests[] = { |
106 | SUBTEST(igt_ring_direction), |
107 | }; |
108 | |
109 | return i915_subtests(tests, NULL); |
110 | } |
111 | |