1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * KCSAN access checks and modifiers. These can be used to explicitly check |
4 | * uninstrumented accesses, or change KCSAN checking behaviour of accesses. |
5 | * |
6 | * Copyright (C) 2019, Google LLC. |
7 | */ |
8 | |
9 | #ifndef _LINUX_KCSAN_CHECKS_H |
10 | #define _LINUX_KCSAN_CHECKS_H |
11 | |
12 | /* Note: Only include what is already included by compiler.h. */ |
13 | #include <linux/compiler_attributes.h> |
14 | #include <linux/types.h> |
15 | |
16 | /* Access types -- if KCSAN_ACCESS_WRITE is not set, the access is a read. */ |
17 | #define KCSAN_ACCESS_WRITE (1 << 0) /* Access is a write. */ |
18 | #define KCSAN_ACCESS_COMPOUND (1 << 1) /* Compounded read-write instrumentation. */ |
19 | #define KCSAN_ACCESS_ATOMIC (1 << 2) /* Access is atomic. */ |
20 | /* The following are special, and never due to compiler instrumentation. */ |
21 | #define KCSAN_ACCESS_ASSERT (1 << 3) /* Access is an assertion. */ |
22 | #define KCSAN_ACCESS_SCOPED (1 << 4) /* Access is a scoped access. */ |
23 | |
24 | /* |
25 | * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used |
26 | * even in compilation units that selectively disable KCSAN, but must use KCSAN |
27 | * to validate access to an address. Never use these in header files! |
28 | */ |
29 | #ifdef CONFIG_KCSAN |
30 | /** |
31 | * __kcsan_check_access - check generic access for races |
32 | * |
33 | * @ptr: address of access |
34 | * @size: size of access |
35 | * @type: access type modifier |
36 | */ |
37 | void __kcsan_check_access(const volatile void *ptr, size_t size, int type); |
38 | |
39 | /* |
40 | * See definition of __tsan_atomic_signal_fence() in kernel/kcsan/core.c. |
41 | * Note: The mappings are arbitrary, and do not reflect any real mappings of C11 |
42 | * memory orders to the LKMM memory orders and vice-versa! |
43 | */ |
44 | #define __KCSAN_BARRIER_TO_SIGNAL_FENCE_mb __ATOMIC_SEQ_CST |
45 | #define __KCSAN_BARRIER_TO_SIGNAL_FENCE_wmb __ATOMIC_ACQ_REL |
46 | #define __KCSAN_BARRIER_TO_SIGNAL_FENCE_rmb __ATOMIC_ACQUIRE |
47 | #define __KCSAN_BARRIER_TO_SIGNAL_FENCE_release __ATOMIC_RELEASE |
48 | |
49 | /** |
50 | * __kcsan_mb - full memory barrier instrumentation |
51 | */ |
52 | void __kcsan_mb(void); |
53 | |
54 | /** |
55 | * __kcsan_wmb - write memory barrier instrumentation |
56 | */ |
57 | void __kcsan_wmb(void); |
58 | |
59 | /** |
60 | * __kcsan_rmb - read memory barrier instrumentation |
61 | */ |
62 | void __kcsan_rmb(void); |
63 | |
64 | /** |
65 | * __kcsan_release - release barrier instrumentation |
66 | */ |
67 | void __kcsan_release(void); |
68 | |
69 | /** |
70 | * kcsan_disable_current - disable KCSAN for the current context |
71 | * |
72 | * Supports nesting. |
73 | */ |
74 | void kcsan_disable_current(void); |
75 | |
76 | /** |
77 | * kcsan_enable_current - re-enable KCSAN for the current context |
78 | * |
79 | * Supports nesting. |
80 | */ |
81 | void kcsan_enable_current(void); |
82 | void kcsan_enable_current_nowarn(void); /* Safe in uaccess regions. */ |
83 | |
84 | /** |
85 | * kcsan_nestable_atomic_begin - begin nestable atomic region |
86 | * |
87 | * Accesses within the atomic region may appear to race with other accesses but |
88 | * should be considered atomic. |
89 | */ |
90 | void kcsan_nestable_atomic_begin(void); |
91 | |
92 | /** |
93 | * kcsan_nestable_atomic_end - end nestable atomic region |
94 | */ |
95 | void kcsan_nestable_atomic_end(void); |
96 | |
97 | /** |
98 | * kcsan_flat_atomic_begin - begin flat atomic region |
99 | * |
100 | * Accesses within the atomic region may appear to race with other accesses but |
101 | * should be considered atomic. |
102 | */ |
103 | void kcsan_flat_atomic_begin(void); |
104 | |
105 | /** |
106 | * kcsan_flat_atomic_end - end flat atomic region |
107 | */ |
108 | void kcsan_flat_atomic_end(void); |
109 | |
110 | /** |
111 | * kcsan_atomic_next - consider following accesses as atomic |
112 | * |
113 | * Force treating the next n memory accesses for the current context as atomic |
114 | * operations. |
115 | * |
116 | * @n: number of following memory accesses to treat as atomic. |
117 | */ |
118 | void kcsan_atomic_next(int n); |
119 | |
120 | /** |
121 | * kcsan_set_access_mask - set access mask |
122 | * |
123 | * Set the access mask for all accesses for the current context if non-zero. |
124 | * Only value changes to bits set in the mask will be reported. |
125 | * |
126 | * @mask: bitmask |
127 | */ |
128 | void kcsan_set_access_mask(unsigned long mask); |
129 | |
130 | /* Scoped access information. */ |
131 | struct kcsan_scoped_access { |
132 | union { |
133 | struct list_head list; /* scoped_accesses list */ |
134 | /* |
135 | * Not an entry in scoped_accesses list; stack depth from where |
136 | * the access was initialized. |
137 | */ |
138 | int stack_depth; |
139 | }; |
140 | |
141 | /* Access information. */ |
142 | const volatile void *ptr; |
143 | size_t size; |
144 | int type; |
145 | /* Location where scoped access was set up. */ |
146 | unsigned long ip; |
147 | }; |
148 | /* |
149 | * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes |
150 | * out of scope; relies on attribute "cleanup", which is supported by all |
151 | * compilers that support KCSAN. |
152 | */ |
153 | #define __kcsan_cleanup_scoped \ |
154 | __maybe_unused __attribute__((__cleanup__(kcsan_end_scoped_access))) |
155 | |
156 | /** |
157 | * kcsan_begin_scoped_access - begin scoped access |
158 | * |
159 | * Begin scoped access and initialize @sa, which will cause KCSAN to |
160 | * continuously check the memory range in the current thread until |
161 | * kcsan_end_scoped_access() is called for @sa. |
162 | * |
163 | * Scoped accesses are implemented by appending @sa to an internal list for the |
164 | * current execution context, and then checked on every call into the KCSAN |
165 | * runtime. |
166 | * |
167 | * @ptr: address of access |
168 | * @size: size of access |
169 | * @type: access type modifier |
170 | * @sa: struct kcsan_scoped_access to use for the scope of the access |
171 | */ |
172 | struct kcsan_scoped_access * |
173 | kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type, |
174 | struct kcsan_scoped_access *sa); |
175 | |
176 | /** |
177 | * kcsan_end_scoped_access - end scoped access |
178 | * |
179 | * End a scoped access, which will stop KCSAN checking the memory range. |
180 | * Requires that kcsan_begin_scoped_access() was previously called once for @sa. |
181 | * |
182 | * @sa: a previously initialized struct kcsan_scoped_access |
183 | */ |
184 | void kcsan_end_scoped_access(struct kcsan_scoped_access *sa); |
185 | |
186 | |
187 | #else /* CONFIG_KCSAN */ |
188 | |
189 | static inline void __kcsan_check_access(const volatile void *ptr, size_t size, |
190 | int type) { } |
191 | |
192 | static inline void __kcsan_mb(void) { } |
193 | static inline void __kcsan_wmb(void) { } |
194 | static inline void __kcsan_rmb(void) { } |
195 | static inline void __kcsan_release(void) { } |
196 | static inline void kcsan_disable_current(void) { } |
197 | static inline void kcsan_enable_current(void) { } |
198 | static inline void kcsan_enable_current_nowarn(void) { } |
199 | static inline void kcsan_nestable_atomic_begin(void) { } |
200 | static inline void kcsan_nestable_atomic_end(void) { } |
201 | static inline void kcsan_flat_atomic_begin(void) { } |
202 | static inline void kcsan_flat_atomic_end(void) { } |
203 | static inline void kcsan_atomic_next(int n) { } |
204 | static inline void kcsan_set_access_mask(unsigned long mask) { } |
205 | |
206 | struct kcsan_scoped_access { }; |
207 | #define __kcsan_cleanup_scoped __maybe_unused |
208 | static inline struct kcsan_scoped_access * |
209 | kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type, |
210 | struct kcsan_scoped_access *sa) { return sa; } |
211 | static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { } |
212 | |
213 | #endif /* CONFIG_KCSAN */ |
214 | |
215 | #ifdef __SANITIZE_THREAD__ |
216 | /* |
217 | * Only calls into the runtime when the particular compilation unit has KCSAN |
218 | * instrumentation enabled. May be used in header files. |
219 | */ |
220 | #define kcsan_check_access __kcsan_check_access |
221 | |
222 | /* |
223 | * Only use these to disable KCSAN for accesses in the current compilation unit; |
224 | * calls into libraries may still perform KCSAN checks. |
225 | */ |
226 | #define __kcsan_disable_current kcsan_disable_current |
227 | #define __kcsan_enable_current kcsan_enable_current_nowarn |
228 | #else /* __SANITIZE_THREAD__ */ |
229 | static inline void kcsan_check_access(const volatile void *ptr, size_t size, |
230 | int type) { } |
231 | static inline void __kcsan_enable_current(void) { } |
232 | static inline void __kcsan_disable_current(void) { } |
233 | #endif /* __SANITIZE_THREAD__ */ |
234 | |
235 | #if defined(CONFIG_KCSAN_WEAK_MEMORY) && defined(__SANITIZE_THREAD__) |
236 | /* |
237 | * Normal barrier instrumentation is not done via explicit calls, but by mapping |
238 | * to a repurposed __atomic_signal_fence(), which normally does not generate any |
239 | * real instructions, but is still intercepted by fsanitize=thread. This means, |
240 | * like any other compile-time instrumentation, barrier instrumentation can be |
241 | * disabled with the __no_kcsan function attribute. |
242 | * |
243 | * Also see definition of __tsan_atomic_signal_fence() in kernel/kcsan/core.c. |
244 | * |
245 | * These are all macros, like <asm/barrier.h>, since some architectures use them |
246 | * in non-static inline functions. |
247 | */ |
248 | #define __KCSAN_BARRIER_TO_SIGNAL_FENCE(name) \ |
249 | do { \ |
250 | barrier(); \ |
251 | __atomic_signal_fence(__KCSAN_BARRIER_TO_SIGNAL_FENCE_##name); \ |
252 | barrier(); \ |
253 | } while (0) |
254 | #define kcsan_mb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(mb) |
255 | #define kcsan_wmb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(wmb) |
256 | #define kcsan_rmb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(rmb) |
257 | #define kcsan_release() __KCSAN_BARRIER_TO_SIGNAL_FENCE(release) |
258 | #elif defined(CONFIG_KCSAN_WEAK_MEMORY) && defined(__KCSAN_INSTRUMENT_BARRIERS__) |
259 | #define kcsan_mb __kcsan_mb |
260 | #define kcsan_wmb __kcsan_wmb |
261 | #define kcsan_rmb __kcsan_rmb |
262 | #define kcsan_release __kcsan_release |
263 | #else /* CONFIG_KCSAN_WEAK_MEMORY && ... */ |
264 | #define kcsan_mb() do { } while (0) |
265 | #define kcsan_wmb() do { } while (0) |
266 | #define kcsan_rmb() do { } while (0) |
267 | #define kcsan_release() do { } while (0) |
268 | #endif /* CONFIG_KCSAN_WEAK_MEMORY && ... */ |
269 | |
270 | /** |
271 | * __kcsan_check_read - check regular read access for races |
272 | * |
273 | * @ptr: address of access |
274 | * @size: size of access |
275 | */ |
276 | #define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0) |
277 | |
278 | /** |
279 | * __kcsan_check_write - check regular write access for races |
280 | * |
281 | * @ptr: address of access |
282 | * @size: size of access |
283 | */ |
284 | #define __kcsan_check_write(ptr, size) \ |
285 | __kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE) |
286 | |
287 | /** |
288 | * __kcsan_check_read_write - check regular read-write access for races |
289 | * |
290 | * @ptr: address of access |
291 | * @size: size of access |
292 | */ |
293 | #define __kcsan_check_read_write(ptr, size) \ |
294 | __kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE) |
295 | |
296 | /** |
297 | * kcsan_check_read - check regular read access for races |
298 | * |
299 | * @ptr: address of access |
300 | * @size: size of access |
301 | */ |
302 | #define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0) |
303 | |
304 | /** |
305 | * kcsan_check_write - check regular write access for races |
306 | * |
307 | * @ptr: address of access |
308 | * @size: size of access |
309 | */ |
310 | #define kcsan_check_write(ptr, size) \ |
311 | kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE) |
312 | |
313 | /** |
314 | * kcsan_check_read_write - check regular read-write access for races |
315 | * |
316 | * @ptr: address of access |
317 | * @size: size of access |
318 | */ |
319 | #define kcsan_check_read_write(ptr, size) \ |
320 | kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE) |
321 | |
322 | /* |
323 | * Check for atomic accesses: if atomic accesses are not ignored, this simply |
324 | * aliases to kcsan_check_access(), otherwise becomes a no-op. |
325 | */ |
326 | #ifdef CONFIG_KCSAN_IGNORE_ATOMICS |
327 | #define kcsan_check_atomic_read(...) do { } while (0) |
328 | #define kcsan_check_atomic_write(...) do { } while (0) |
329 | #define kcsan_check_atomic_read_write(...) do { } while (0) |
330 | #else |
331 | #define kcsan_check_atomic_read(ptr, size) \ |
332 | kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC) |
333 | #define kcsan_check_atomic_write(ptr, size) \ |
334 | kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE) |
335 | #define kcsan_check_atomic_read_write(ptr, size) \ |
336 | kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND) |
337 | #endif |
338 | |
339 | /** |
340 | * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var |
341 | * |
342 | * Assert that there are no concurrent writes to @var; other readers are |
343 | * allowed. This assertion can be used to specify properties of concurrent code, |
344 | * where violation cannot be detected as a normal data race. |
345 | * |
346 | * For example, if we only have a single writer, but multiple concurrent |
347 | * readers, to avoid data races, all these accesses must be marked; even |
348 | * concurrent marked writes racing with the single writer are bugs. |
349 | * Unfortunately, due to being marked, they are no longer data races. For cases |
350 | * like these, we can use the macro as follows: |
351 | * |
352 | * .. code-block:: c |
353 | * |
354 | * void writer(void) { |
355 | * spin_lock(&update_foo_lock); |
356 | * ASSERT_EXCLUSIVE_WRITER(shared_foo); |
357 | * WRITE_ONCE(shared_foo, ...); |
358 | * spin_unlock(&update_foo_lock); |
359 | * } |
360 | * void reader(void) { |
361 | * // update_foo_lock does not need to be held! |
362 | * ... = READ_ONCE(shared_foo); |
363 | * } |
364 | * |
365 | * Note: ASSERT_EXCLUSIVE_WRITER_SCOPED(), if applicable, performs more thorough |
366 | * checking if a clear scope where no concurrent writes are expected exists. |
367 | * |
368 | * @var: variable to assert on |
369 | */ |
370 | #define ASSERT_EXCLUSIVE_WRITER(var) \ |
371 | __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT) |
372 | |
373 | /* |
374 | * Helper macros for implementation of for ASSERT_EXCLUSIVE_*_SCOPED(). @id is |
375 | * expected to be unique for the scope in which instances of kcsan_scoped_access |
376 | * are declared. |
377 | */ |
378 | #define __kcsan_scoped_name(c, suffix) __kcsan_scoped_##c##suffix |
379 | #define __ASSERT_EXCLUSIVE_SCOPED(var, type, id) \ |
380 | struct kcsan_scoped_access __kcsan_scoped_name(id, _) \ |
381 | __kcsan_cleanup_scoped; \ |
382 | struct kcsan_scoped_access *__kcsan_scoped_name(id, _dummy_p) \ |
383 | __maybe_unused = kcsan_begin_scoped_access( \ |
384 | &(var), sizeof(var), KCSAN_ACCESS_SCOPED | (type), \ |
385 | &__kcsan_scoped_name(id, _)) |
386 | |
387 | /** |
388 | * ASSERT_EXCLUSIVE_WRITER_SCOPED - assert no concurrent writes to @var in scope |
389 | * |
390 | * Scoped variant of ASSERT_EXCLUSIVE_WRITER(). |
391 | * |
392 | * Assert that there are no concurrent writes to @var for the duration of the |
393 | * scope in which it is introduced. This provides a better way to fully cover |
394 | * the enclosing scope, compared to multiple ASSERT_EXCLUSIVE_WRITER(), and |
395 | * increases the likelihood for KCSAN to detect racing accesses. |
396 | * |
397 | * For example, it allows finding race-condition bugs that only occur due to |
398 | * state changes within the scope itself: |
399 | * |
400 | * .. code-block:: c |
401 | * |
402 | * void writer(void) { |
403 | * spin_lock(&update_foo_lock); |
404 | * { |
405 | * ASSERT_EXCLUSIVE_WRITER_SCOPED(shared_foo); |
406 | * WRITE_ONCE(shared_foo, 42); |
407 | * ... |
408 | * // shared_foo should still be 42 here! |
409 | * } |
410 | * spin_unlock(&update_foo_lock); |
411 | * } |
412 | * void buggy(void) { |
413 | * if (READ_ONCE(shared_foo) == 42) |
414 | * WRITE_ONCE(shared_foo, 1); // bug! |
415 | * } |
416 | * |
417 | * @var: variable to assert on |
418 | */ |
419 | #define ASSERT_EXCLUSIVE_WRITER_SCOPED(var) \ |
420 | __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_ASSERT, __COUNTER__) |
421 | |
422 | /** |
423 | * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var |
424 | * |
425 | * Assert that there are no concurrent accesses to @var (no readers nor |
426 | * writers). This assertion can be used to specify properties of concurrent |
427 | * code, where violation cannot be detected as a normal data race. |
428 | * |
429 | * For example, where exclusive access is expected after determining no other |
430 | * users of an object are left, but the object is not actually freed. We can |
431 | * check that this property actually holds as follows: |
432 | * |
433 | * .. code-block:: c |
434 | * |
435 | * if (refcount_dec_and_test(&obj->refcnt)) { |
436 | * ASSERT_EXCLUSIVE_ACCESS(*obj); |
437 | * do_some_cleanup(obj); |
438 | * release_for_reuse(obj); |
439 | * } |
440 | * |
441 | * Note: |
442 | * |
443 | * 1. ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough |
444 | * checking if a clear scope where no concurrent accesses are expected exists. |
445 | * |
446 | * 2. For cases where the object is freed, `KASAN <kasan.html>`_ is a better |
447 | * fit to detect use-after-free bugs. |
448 | * |
449 | * @var: variable to assert on |
450 | */ |
451 | #define ASSERT_EXCLUSIVE_ACCESS(var) \ |
452 | __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT) |
453 | |
454 | /** |
455 | * ASSERT_EXCLUSIVE_ACCESS_SCOPED - assert no concurrent accesses to @var in scope |
456 | * |
457 | * Scoped variant of ASSERT_EXCLUSIVE_ACCESS(). |
458 | * |
459 | * Assert that there are no concurrent accesses to @var (no readers nor writers) |
460 | * for the entire duration of the scope in which it is introduced. This provides |
461 | * a better way to fully cover the enclosing scope, compared to multiple |
462 | * ASSERT_EXCLUSIVE_ACCESS(), and increases the likelihood for KCSAN to detect |
463 | * racing accesses. |
464 | * |
465 | * @var: variable to assert on |
466 | */ |
467 | #define ASSERT_EXCLUSIVE_ACCESS_SCOPED(var) \ |
468 | __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, __COUNTER__) |
469 | |
470 | /** |
471 | * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var |
472 | * |
473 | * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER(). |
474 | * |
475 | * Assert that there are no concurrent writes to a subset of bits in @var; |
476 | * concurrent readers are permitted. This assertion captures more detailed |
477 | * bit-level properties, compared to the other (word granularity) assertions. |
478 | * Only the bits set in @mask are checked for concurrent modifications, while |
479 | * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~mask bits |
480 | * are ignored. |
481 | * |
482 | * Use this for variables, where some bits must not be modified concurrently, |
483 | * yet other bits are expected to be modified concurrently. |
484 | * |
485 | * For example, variables where, after initialization, some bits are read-only, |
486 | * but other bits may still be modified concurrently. A reader may wish to |
487 | * assert that this is true as follows: |
488 | * |
489 | * .. code-block:: c |
490 | * |
491 | * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK); |
492 | * foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT; |
493 | * |
494 | * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is assumed |
495 | * to access the masked bits only, and KCSAN optimistically assumes it is |
496 | * therefore safe, even in the presence of data races, and marking it with |
497 | * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however, that |
498 | * it may still be advisable to do so, since we cannot reason about all compiler |
499 | * optimizations when it comes to bit manipulations (on the reader and writer |
500 | * side). If you are sure nothing can go wrong, we can write the above simply |
501 | * as: |
502 | * |
503 | * .. code-block:: c |
504 | * |
505 | * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK); |
506 | * foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT; |
507 | * |
508 | * Another example, where this may be used, is when certain bits of @var may |
509 | * only be modified when holding the appropriate lock, but other bits may still |
510 | * be modified concurrently. Writers, where other bits may change concurrently, |
511 | * could use the assertion as follows: |
512 | * |
513 | * .. code-block:: c |
514 | * |
515 | * spin_lock(&foo_lock); |
516 | * ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK); |
517 | * old_flags = flags; |
518 | * new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT); |
519 | * if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... } |
520 | * spin_unlock(&foo_lock); |
521 | * |
522 | * @var: variable to assert on |
523 | * @mask: only check for modifications to bits set in @mask |
524 | */ |
525 | #define ASSERT_EXCLUSIVE_BITS(var, mask) \ |
526 | do { \ |
527 | kcsan_set_access_mask(mask); \ |
528 | __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\ |
529 | kcsan_set_access_mask(0); \ |
530 | kcsan_atomic_next(1); \ |
531 | } while (0) |
532 | |
533 | #endif /* _LINUX_KCSAN_CHECKS_H */ |
534 | |