1 | /* |
2 | * Copyright © 2016 Intel Corporation |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. |
22 | * |
23 | */ |
24 | |
25 | #ifndef __I915_UTILS_H |
26 | #define __I915_UTILS_H |
27 | |
28 | #include <linux/list.h> |
29 | #include <linux/overflow.h> |
30 | #include <linux/sched.h> |
31 | #include <linux/string_helpers.h> |
32 | #include <linux/types.h> |
33 | #include <linux/workqueue.h> |
34 | #include <linux/sched/clock.h> |
35 | |
36 | #ifdef CONFIG_X86 |
37 | #include <asm/hypervisor.h> |
38 | #endif |
39 | |
40 | struct drm_i915_private; |
41 | struct timer_list; |
42 | |
43 | #define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs" |
44 | |
45 | #define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \ |
46 | __stringify(x), (long)(x)) |
47 | |
48 | void __printf(3, 4) |
49 | __i915_printk(struct drm_i915_private *dev_priv, const char *level, |
50 | const char *fmt, ...); |
51 | |
52 | #define i915_report_error(dev_priv, fmt, ...) \ |
53 | __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) |
54 | |
55 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) |
56 | |
57 | int __i915_inject_probe_error(struct drm_i915_private *i915, int err, |
58 | const char *func, int line); |
59 | #define i915_inject_probe_error(_i915, _err) \ |
60 | __i915_inject_probe_error((_i915), (_err), __func__, __LINE__) |
61 | bool i915_error_injected(void); |
62 | |
63 | #else |
64 | |
65 | #define i915_inject_probe_error(i915, e) ({ BUILD_BUG_ON_INVALID(i915); 0; }) |
66 | #define i915_error_injected() false |
67 | |
68 | #endif |
69 | |
70 | #define i915_inject_probe_failure(i915) i915_inject_probe_error((i915), -ENODEV) |
71 | |
72 | #define i915_probe_error(i915, fmt, ...) \ |
73 | __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \ |
74 | fmt, ##__VA_ARGS__) |
75 | |
76 | #if defined(GCC_VERSION) && GCC_VERSION >= 70000 |
77 | #define add_overflows_t(T, A, B) \ |
78 | __builtin_add_overflow_p((A), (B), (T)0) |
79 | #else |
80 | #define add_overflows_t(T, A, B) ({ \ |
81 | typeof(A) a = (A); \ |
82 | typeof(B) b = (B); \ |
83 | (T)(a + b) < a; \ |
84 | }) |
85 | #endif |
86 | |
87 | #define add_overflows(A, B) \ |
88 | add_overflows_t(typeof((A) + (B)), (A), (B)) |
89 | |
90 | #define range_overflows(start, size, max) ({ \ |
91 | typeof(start) start__ = (start); \ |
92 | typeof(size) size__ = (size); \ |
93 | typeof(max) max__ = (max); \ |
94 | (void)(&start__ == &size__); \ |
95 | (void)(&start__ == &max__); \ |
96 | start__ >= max__ || size__ > max__ - start__; \ |
97 | }) |
98 | |
99 | #define range_overflows_t(type, start, size, max) \ |
100 | range_overflows((type)(start), (type)(size), (type)(max)) |
101 | |
102 | #define range_overflows_end(start, size, max) ({ \ |
103 | typeof(start) start__ = (start); \ |
104 | typeof(size) size__ = (size); \ |
105 | typeof(max) max__ = (max); \ |
106 | (void)(&start__ == &size__); \ |
107 | (void)(&start__ == &max__); \ |
108 | start__ > max__ || size__ > max__ - start__; \ |
109 | }) |
110 | |
111 | #define range_overflows_end_t(type, start, size, max) \ |
112 | range_overflows_end((type)(start), (type)(size), (type)(max)) |
113 | |
114 | #define ptr_mask_bits(ptr, n) ({ \ |
115 | unsigned long __v = (unsigned long)(ptr); \ |
116 | (typeof(ptr))(__v & -BIT(n)); \ |
117 | }) |
118 | |
119 | #define ptr_unmask_bits(ptr, n) ((unsigned long)(ptr) & (BIT(n) - 1)) |
120 | |
121 | #define ptr_unpack_bits(ptr, bits, n) ({ \ |
122 | unsigned long __v = (unsigned long)(ptr); \ |
123 | *(bits) = __v & (BIT(n) - 1); \ |
124 | (typeof(ptr))(__v & -BIT(n)); \ |
125 | }) |
126 | |
127 | #define ptr_pack_bits(ptr, bits, n) ({ \ |
128 | unsigned long __bits = (bits); \ |
129 | GEM_BUG_ON(__bits & -BIT(n)); \ |
130 | ((typeof(ptr))((unsigned long)(ptr) | __bits)); \ |
131 | }) |
132 | |
133 | #define ptr_dec(ptr) ({ \ |
134 | unsigned long __v = (unsigned long)(ptr); \ |
135 | (typeof(ptr))(__v - 1); \ |
136 | }) |
137 | |
138 | #define ptr_inc(ptr) ({ \ |
139 | unsigned long __v = (unsigned long)(ptr); \ |
140 | (typeof(ptr))(__v + 1); \ |
141 | }) |
142 | |
143 | #define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT) |
144 | #define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT) |
145 | #define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT) |
146 | #define page_unpack_bits(ptr, bits) ptr_unpack_bits(ptr, bits, PAGE_SHIFT) |
147 | |
148 | #define fetch_and_zero(ptr) ({ \ |
149 | typeof(*ptr) __T = *(ptr); \ |
150 | *(ptr) = (typeof(*ptr))0; \ |
151 | __T; \ |
152 | }) |
153 | |
154 | static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b) |
155 | { |
156 | return a - b; |
157 | } |
158 | |
159 | /* |
160 | * container_of_user: Extract the superclass from a pointer to a member. |
161 | * |
162 | * Exactly like container_of() with the exception that it plays nicely |
163 | * with sparse for __user @ptr. |
164 | */ |
165 | #define container_of_user(ptr, type, member) ({ \ |
166 | void __user *__mptr = (void __user *)(ptr); \ |
167 | BUILD_BUG_ON_MSG(!__same_type(*(ptr), typeof_member(type, member)) && \ |
168 | !__same_type(*(ptr), void), \ |
169 | "pointer type mismatch in container_of()"); \ |
170 | ((type __user *)(__mptr - offsetof(type, member))); }) |
171 | |
172 | /* |
173 | * check_user_mbz: Check that a user value exists and is zero |
174 | * |
175 | * Frequently in our uABI we reserve space for future extensions, and |
176 | * two ensure that userspace is prepared we enforce that space must |
177 | * be zero. (Then any future extension can safely assume a default value |
178 | * of 0.) |
179 | * |
180 | * check_user_mbz() combines checking that the user pointer is accessible |
181 | * and that the contained value is zero. |
182 | * |
183 | * Returns: -EFAULT if not accessible, -EINVAL if !zero, or 0 on success. |
184 | */ |
185 | #define check_user_mbz(U) ({ \ |
186 | typeof(*(U)) mbz__; \ |
187 | get_user(mbz__, (U)) ? -EFAULT : mbz__ ? -EINVAL : 0; \ |
188 | }) |
189 | |
190 | #define u64_to_ptr(T, x) ({ \ |
191 | typecheck(u64, x); \ |
192 | (T *)(uintptr_t)(x); \ |
193 | }) |
194 | |
195 | #define __mask_next_bit(mask) ({ \ |
196 | int __idx = ffs(mask) - 1; \ |
197 | mask &= ~BIT(__idx); \ |
198 | __idx; \ |
199 | }) |
200 | |
201 | static inline bool is_power_of_2_u64(u64 n) |
202 | { |
203 | return (n != 0 && ((n & (n - 1)) == 0)); |
204 | } |
205 | |
206 | static inline void __list_del_many(struct list_head *head, |
207 | struct list_head *first) |
208 | { |
209 | first->prev = head; |
210 | WRITE_ONCE(head->next, first); |
211 | } |
212 | |
213 | static inline int list_is_last_rcu(const struct list_head *list, |
214 | const struct list_head *head) |
215 | { |
216 | return READ_ONCE(list->next) == head; |
217 | } |
218 | |
219 | static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) |
220 | { |
221 | unsigned long j = msecs_to_jiffies(m); |
222 | |
223 | return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); |
224 | } |
225 | |
226 | /* |
227 | * If you need to wait X milliseconds between events A and B, but event B |
228 | * doesn't happen exactly after event A, you record the timestamp (jiffies) of |
229 | * when event A happened, then just before event B you call this function and |
230 | * pass the timestamp as the first argument, and X as the second argument. |
231 | */ |
232 | static inline void |
233 | wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) |
234 | { |
235 | unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; |
236 | |
237 | /* |
238 | * Don't re-read the value of "jiffies" every time since it may change |
239 | * behind our back and break the math. |
240 | */ |
241 | tmp_jiffies = jiffies; |
242 | target_jiffies = timestamp_jiffies + |
243 | msecs_to_jiffies_timeout(m: to_wait_ms); |
244 | |
245 | if (time_after(target_jiffies, tmp_jiffies)) { |
246 | remaining_jiffies = target_jiffies - tmp_jiffies; |
247 | while (remaining_jiffies) |
248 | remaining_jiffies = |
249 | schedule_timeout_uninterruptible(timeout: remaining_jiffies); |
250 | } |
251 | } |
252 | |
253 | /* |
254 | * __wait_for - magic wait macro |
255 | * |
256 | * Macro to help avoid open coding check/wait/timeout patterns. Note that it's |
257 | * important that we check the condition again after having timed out, since the |
258 | * timeout could be due to preemption or similar and we've never had a chance to |
259 | * check the condition before the timeout. |
260 | */ |
261 | #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ |
262 | const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \ |
263 | long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ |
264 | int ret__; \ |
265 | might_sleep(); \ |
266 | for (;;) { \ |
267 | const bool expired__ = ktime_after(ktime_get_raw(), end__); \ |
268 | OP; \ |
269 | /* Guarantee COND check prior to timeout */ \ |
270 | barrier(); \ |
271 | if (COND) { \ |
272 | ret__ = 0; \ |
273 | break; \ |
274 | } \ |
275 | if (expired__) { \ |
276 | ret__ = -ETIMEDOUT; \ |
277 | break; \ |
278 | } \ |
279 | usleep_range(wait__, wait__ * 2); \ |
280 | if (wait__ < (Wmax)) \ |
281 | wait__ <<= 1; \ |
282 | } \ |
283 | ret__; \ |
284 | }) |
285 | |
286 | #define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \ |
287 | (Wmax)) |
288 | #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) |
289 | |
290 | /* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */ |
291 | #if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) |
292 | # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic()) |
293 | #else |
294 | # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0) |
295 | #endif |
296 | |
297 | #define _wait_for_atomic(COND, US, ATOMIC) \ |
298 | ({ \ |
299 | int cpu, ret, timeout = (US) * 1000; \ |
300 | u64 base; \ |
301 | _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \ |
302 | if (!(ATOMIC)) { \ |
303 | preempt_disable(); \ |
304 | cpu = smp_processor_id(); \ |
305 | } \ |
306 | base = local_clock(); \ |
307 | for (;;) { \ |
308 | u64 now = local_clock(); \ |
309 | if (!(ATOMIC)) \ |
310 | preempt_enable(); \ |
311 | /* Guarantee COND check prior to timeout */ \ |
312 | barrier(); \ |
313 | if (COND) { \ |
314 | ret = 0; \ |
315 | break; \ |
316 | } \ |
317 | if (now - base >= timeout) { \ |
318 | ret = -ETIMEDOUT; \ |
319 | break; \ |
320 | } \ |
321 | cpu_relax(); \ |
322 | if (!(ATOMIC)) { \ |
323 | preempt_disable(); \ |
324 | if (unlikely(cpu != smp_processor_id())) { \ |
325 | timeout -= now - base; \ |
326 | cpu = smp_processor_id(); \ |
327 | base = local_clock(); \ |
328 | } \ |
329 | } \ |
330 | } \ |
331 | ret; \ |
332 | }) |
333 | |
334 | #define wait_for_us(COND, US) \ |
335 | ({ \ |
336 | int ret__; \ |
337 | BUILD_BUG_ON(!__builtin_constant_p(US)); \ |
338 | if ((US) > 10) \ |
339 | ret__ = _wait_for((COND), (US), 10, 10); \ |
340 | else \ |
341 | ret__ = _wait_for_atomic((COND), (US), 0); \ |
342 | ret__; \ |
343 | }) |
344 | |
345 | #define wait_for_atomic_us(COND, US) \ |
346 | ({ \ |
347 | BUILD_BUG_ON(!__builtin_constant_p(US)); \ |
348 | BUILD_BUG_ON((US) > 50000); \ |
349 | _wait_for_atomic((COND), (US), 1); \ |
350 | }) |
351 | |
352 | #define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000) |
353 | |
354 | #define KHz(x) (1000 * (x)) |
355 | #define MHz(x) KHz(1000 * (x)) |
356 | |
357 | void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint); |
358 | static inline void __add_taint_for_CI(unsigned int taint) |
359 | { |
360 | /* |
361 | * The system is "ok", just about surviving for the user, but |
362 | * CI results are now unreliable as the HW is very suspect. |
363 | * CI checks the taint state after every test and will reboot |
364 | * the machine if the kernel is tainted. |
365 | */ |
366 | add_taint(flag: taint, LOCKDEP_STILL_OK); |
367 | } |
368 | |
369 | void cancel_timer(struct timer_list *t); |
370 | void set_timer_ms(struct timer_list *t, unsigned long timeout); |
371 | |
372 | static inline bool timer_active(const struct timer_list *t) |
373 | { |
374 | return READ_ONCE(t->expires); |
375 | } |
376 | |
377 | static inline bool timer_expired(const struct timer_list *t) |
378 | { |
379 | return timer_active(t) && !timer_pending(timer: t); |
380 | } |
381 | |
382 | static inline bool i915_run_as_guest(void) |
383 | { |
384 | #if IS_ENABLED(CONFIG_X86) |
385 | return !hypervisor_is_type(type: X86_HYPER_NATIVE); |
386 | #else |
387 | /* Not supported yet */ |
388 | return false; |
389 | #endif |
390 | } |
391 | |
392 | bool i915_vtd_active(struct drm_i915_private *i915); |
393 | |
394 | #endif /* !__I915_UTILS_H */ |
395 | |