1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * Runtime locking correctness validator |
4 | * |
5 | * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
6 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra |
7 | * |
8 | * see Documentation/locking/lockdep-design.rst for more details. |
9 | */ |
10 | #ifndef __LINUX_LOCKDEP_H |
11 | #define __LINUX_LOCKDEP_H |
12 | |
13 | #include <linux/lockdep_types.h> |
14 | #include <linux/smp.h> |
15 | #include <asm/percpu.h> |
16 | |
17 | struct task_struct; |
18 | |
19 | #ifdef CONFIG_LOCKDEP |
20 | |
21 | #include <linux/linkage.h> |
22 | #include <linux/list.h> |
23 | #include <linux/debug_locks.h> |
24 | #include <linux/stacktrace.h> |
25 | |
26 | static inline void lockdep_copy_map(struct lockdep_map *to, |
27 | struct lockdep_map *from) |
28 | { |
29 | int i; |
30 | |
31 | *to = *from; |
32 | /* |
33 | * Since the class cache can be modified concurrently we could observe |
34 | * half pointers (64bit arch using 32bit copy insns). Therefore clear |
35 | * the caches and take the performance hit. |
36 | * |
37 | * XXX it doesn't work well with lockdep_set_class_and_subclass(), since |
38 | * that relies on cache abuse. |
39 | */ |
40 | for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) |
41 | to->class_cache[i] = NULL; |
42 | } |
43 | |
44 | /* |
45 | * Every lock has a list of other locks that were taken after it. |
46 | * We only grow the list, never remove from it: |
47 | */ |
48 | struct lock_list { |
49 | struct list_head entry; |
50 | struct lock_class *class; |
51 | struct lock_class *links_to; |
52 | const struct lock_trace *trace; |
53 | u16 distance; |
54 | /* bitmap of different dependencies from head to this */ |
55 | u8 dep; |
56 | /* used by BFS to record whether "prev -> this" only has -(*R)-> */ |
57 | u8 only_xr; |
58 | |
59 | /* |
60 | * The parent field is used to implement breadth-first search, and the |
61 | * bit 0 is reused to indicate if the lock has been accessed in BFS. |
62 | */ |
63 | struct lock_list *parent; |
64 | }; |
65 | |
66 | /** |
67 | * struct lock_chain - lock dependency chain record |
68 | * |
69 | * @irq_context: the same as irq_context in held_lock below |
70 | * @depth: the number of held locks in this chain |
71 | * @base: the index in chain_hlocks for this chain |
72 | * @entry: the collided lock chains in lock_chain hash list |
73 | * @chain_key: the hash key of this lock_chain |
74 | */ |
75 | struct lock_chain { |
76 | /* see BUILD_BUG_ON()s in add_chain_cache() */ |
77 | unsigned int irq_context : 2, |
78 | depth : 6, |
79 | base : 24; |
80 | /* 4 byte hole */ |
81 | struct hlist_node entry; |
82 | u64 chain_key; |
83 | }; |
84 | |
85 | /* |
86 | * Initialization, self-test and debugging-output methods: |
87 | */ |
88 | extern void lockdep_init(void); |
89 | extern void lockdep_reset(void); |
90 | extern void lockdep_reset_lock(struct lockdep_map *lock); |
91 | extern void lockdep_free_key_range(void *start, unsigned long size); |
92 | extern asmlinkage void lockdep_sys_exit(void); |
93 | extern void lockdep_set_selftest_task(struct task_struct *task); |
94 | |
95 | extern void lockdep_init_task(struct task_struct *task); |
96 | |
97 | /* |
98 | * Split the recursion counter in two to readily detect 'off' vs recursion. |
99 | */ |
100 | #define LOCKDEP_RECURSION_BITS 16 |
101 | #define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS) |
102 | #define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1) |
103 | |
104 | /* |
105 | * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due |
106 | * to header dependencies. |
107 | */ |
108 | |
109 | #define lockdep_off() \ |
110 | do { \ |
111 | current->lockdep_recursion += LOCKDEP_OFF; \ |
112 | } while (0) |
113 | |
114 | #define lockdep_on() \ |
115 | do { \ |
116 | current->lockdep_recursion -= LOCKDEP_OFF; \ |
117 | } while (0) |
118 | |
119 | extern void lockdep_register_key(struct lock_class_key *key); |
120 | extern void lockdep_unregister_key(struct lock_class_key *key); |
121 | |
122 | /* |
123 | * These methods are used by specific locking variants (spinlocks, |
124 | * rwlocks, mutexes and rwsems) to pass init/acquire/release events |
125 | * to lockdep: |
126 | */ |
127 | |
128 | extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name, |
129 | struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type); |
130 | |
131 | static inline void |
132 | lockdep_init_map_waits(struct lockdep_map *lock, const char *name, |
133 | struct lock_class_key *key, int subclass, u8 inner, u8 outer) |
134 | { |
135 | lockdep_init_map_type(lock, name, key, subclass, inner, outer, lock_type: LD_LOCK_NORMAL); |
136 | } |
137 | |
138 | static inline void |
139 | lockdep_init_map_wait(struct lockdep_map *lock, const char *name, |
140 | struct lock_class_key *key, int subclass, u8 inner) |
141 | { |
142 | lockdep_init_map_waits(lock, name, key, subclass, inner, outer: LD_WAIT_INV); |
143 | } |
144 | |
145 | static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, |
146 | struct lock_class_key *key, int subclass) |
147 | { |
148 | lockdep_init_map_wait(lock, name, key, subclass, inner: LD_WAIT_INV); |
149 | } |
150 | |
151 | /* |
152 | * Reinitialize a lock key - for cases where there is special locking or |
153 | * special initialization of locks so that the validator gets the scope |
154 | * of dependencies wrong: they are either too broad (they need a class-split) |
155 | * or they are too narrow (they suffer from a false class-split): |
156 | */ |
157 | #define lockdep_set_class(lock, key) \ |
158 | lockdep_init_map_type(&(lock)->dep_map, #key, key, 0, \ |
159 | (lock)->dep_map.wait_type_inner, \ |
160 | (lock)->dep_map.wait_type_outer, \ |
161 | (lock)->dep_map.lock_type) |
162 | |
163 | #define lockdep_set_class_and_name(lock, key, name) \ |
164 | lockdep_init_map_type(&(lock)->dep_map, name, key, 0, \ |
165 | (lock)->dep_map.wait_type_inner, \ |
166 | (lock)->dep_map.wait_type_outer, \ |
167 | (lock)->dep_map.lock_type) |
168 | |
169 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
170 | lockdep_init_map_type(&(lock)->dep_map, #key, key, sub, \ |
171 | (lock)->dep_map.wait_type_inner, \ |
172 | (lock)->dep_map.wait_type_outer, \ |
173 | (lock)->dep_map.lock_type) |
174 | |
175 | #define lockdep_set_subclass(lock, sub) \ |
176 | lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ |
177 | (lock)->dep_map.wait_type_inner, \ |
178 | (lock)->dep_map.wait_type_outer, \ |
179 | (lock)->dep_map.lock_type) |
180 | |
181 | #define lockdep_set_novalidate_class(lock) \ |
182 | lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) |
183 | |
184 | /* |
185 | * Compare locking classes |
186 | */ |
187 | #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) |
188 | |
189 | static inline int lockdep_match_key(struct lockdep_map *lock, |
190 | struct lock_class_key *key) |
191 | { |
192 | return lock->key == key; |
193 | } |
194 | |
195 | /* |
196 | * Acquire a lock. |
197 | * |
198 | * Values for "read": |
199 | * |
200 | * 0: exclusive (write) acquire |
201 | * 1: read-acquire (no recursion allowed) |
202 | * 2: read-acquire with same-instance recursion allowed |
203 | * |
204 | * Values for check: |
205 | * |
206 | * 0: simple checks (freeing, held-at-exit-time, etc.) |
207 | * 1: full validation |
208 | */ |
209 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
210 | int trylock, int read, int check, |
211 | struct lockdep_map *nest_lock, unsigned long ip); |
212 | |
213 | extern void lock_release(struct lockdep_map *lock, unsigned long ip); |
214 | |
215 | extern void lock_sync(struct lockdep_map *lock, unsigned int subclass, |
216 | int read, int check, struct lockdep_map *nest_lock, |
217 | unsigned long ip); |
218 | |
219 | /* lock_is_held_type() returns */ |
220 | #define LOCK_STATE_UNKNOWN -1 |
221 | #define LOCK_STATE_NOT_HELD 0 |
222 | #define LOCK_STATE_HELD 1 |
223 | |
224 | /* |
225 | * Same "read" as for lock_acquire(), except -1 means any. |
226 | */ |
227 | extern int lock_is_held_type(const struct lockdep_map *lock, int read); |
228 | |
229 | static inline int lock_is_held(const struct lockdep_map *lock) |
230 | { |
231 | return lock_is_held_type(lock, read: -1); |
232 | } |
233 | |
234 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) |
235 | #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r)) |
236 | |
237 | extern void lock_set_class(struct lockdep_map *lock, const char *name, |
238 | struct lock_class_key *key, unsigned int subclass, |
239 | unsigned long ip); |
240 | |
241 | #define lock_set_novalidate_class(l, n, i) \ |
242 | lock_set_class(l, n, &__lockdep_no_validate__, 0, i) |
243 | |
244 | static inline void lock_set_subclass(struct lockdep_map *lock, |
245 | unsigned int subclass, unsigned long ip) |
246 | { |
247 | lock_set_class(lock, name: lock->name, key: lock->key, subclass, ip); |
248 | } |
249 | |
250 | extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip); |
251 | |
252 | #define NIL_COOKIE (struct pin_cookie){ .val = 0U, } |
253 | |
254 | extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); |
255 | extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie); |
256 | extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); |
257 | |
258 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
259 | |
260 | #define lockdep_assert(cond) \ |
261 | do { WARN_ON(debug_locks && !(cond)); } while (0) |
262 | |
263 | #define lockdep_assert_once(cond) \ |
264 | do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0) |
265 | |
266 | #define lockdep_assert_held(l) \ |
267 | lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD) |
268 | |
269 | #define lockdep_assert_not_held(l) \ |
270 | lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD) |
271 | |
272 | #define lockdep_assert_held_write(l) \ |
273 | lockdep_assert(lockdep_is_held_type(l, 0)) |
274 | |
275 | #define lockdep_assert_held_read(l) \ |
276 | lockdep_assert(lockdep_is_held_type(l, 1)) |
277 | |
278 | #define lockdep_assert_held_once(l) \ |
279 | lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD) |
280 | |
281 | #define lockdep_assert_none_held_once() \ |
282 | lockdep_assert_once(!current->lockdep_depth) |
283 | |
284 | #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) |
285 | |
286 | #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) |
287 | #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) |
288 | #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) |
289 | |
290 | /* |
291 | * Must use lock_map_aquire_try() with override maps to avoid |
292 | * lockdep thinking they participate in the block chain. |
293 | */ |
294 | #define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \ |
295 | struct lockdep_map _name = { \ |
296 | .name = #_name "-wait-type-override", \ |
297 | .wait_type_inner = _wait_type, \ |
298 | .lock_type = LD_LOCK_WAIT_OVERRIDE, } |
299 | |
300 | #else /* !CONFIG_LOCKDEP */ |
301 | |
302 | static inline void lockdep_init_task(struct task_struct *task) |
303 | { |
304 | } |
305 | |
306 | static inline void lockdep_off(void) |
307 | { |
308 | } |
309 | |
310 | static inline void lockdep_on(void) |
311 | { |
312 | } |
313 | |
314 | static inline void lockdep_set_selftest_task(struct task_struct *task) |
315 | { |
316 | } |
317 | |
318 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) |
319 | # define lock_release(l, i) do { } while (0) |
320 | # define lock_downgrade(l, i) do { } while (0) |
321 | # define lock_set_class(l, n, key, s, i) do { (void)(key); } while (0) |
322 | # define lock_set_novalidate_class(l, n, i) do { } while (0) |
323 | # define lock_set_subclass(l, s, i) do { } while (0) |
324 | # define lockdep_init() do { } while (0) |
325 | # define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \ |
326 | do { (void)(name); (void)(key); } while (0) |
327 | # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \ |
328 | do { (void)(name); (void)(key); } while (0) |
329 | # define lockdep_init_map_wait(lock, name, key, sub, inner) \ |
330 | do { (void)(name); (void)(key); } while (0) |
331 | # define lockdep_init_map(lock, name, key, sub) \ |
332 | do { (void)(name); (void)(key); } while (0) |
333 | # define lockdep_set_class(lock, key) do { (void)(key); } while (0) |
334 | # define lockdep_set_class_and_name(lock, key, name) \ |
335 | do { (void)(key); (void)(name); } while (0) |
336 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
337 | do { (void)(key); } while (0) |
338 | #define lockdep_set_subclass(lock, sub) do { } while (0) |
339 | |
340 | #define lockdep_set_novalidate_class(lock) do { } while (0) |
341 | |
342 | /* |
343 | * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP |
344 | * case since the result is not well defined and the caller should rather |
345 | * #ifdef the call himself. |
346 | */ |
347 | |
348 | # define lockdep_reset() do { debug_locks = 1; } while (0) |
349 | # define lockdep_free_key_range(start, size) do { } while (0) |
350 | # define lockdep_sys_exit() do { } while (0) |
351 | |
352 | static inline void lockdep_register_key(struct lock_class_key *key) |
353 | { |
354 | } |
355 | |
356 | static inline void lockdep_unregister_key(struct lock_class_key *key) |
357 | { |
358 | } |
359 | |
360 | #define lockdep_depth(tsk) (0) |
361 | |
362 | /* |
363 | * Dummy forward declarations, allow users to write less ifdef-y code |
364 | * and depend on dead code elimination. |
365 | */ |
366 | extern int lock_is_held(const void *); |
367 | extern int lockdep_is_held(const void *); |
368 | #define lockdep_is_held_type(l, r) (1) |
369 | |
370 | #define lockdep_assert(c) do { } while (0) |
371 | #define lockdep_assert_once(c) do { } while (0) |
372 | |
373 | #define lockdep_assert_held(l) do { (void)(l); } while (0) |
374 | #define lockdep_assert_not_held(l) do { (void)(l); } while (0) |
375 | #define lockdep_assert_held_write(l) do { (void)(l); } while (0) |
376 | #define lockdep_assert_held_read(l) do { (void)(l); } while (0) |
377 | #define lockdep_assert_held_once(l) do { (void)(l); } while (0) |
378 | #define lockdep_assert_none_held_once() do { } while (0) |
379 | |
380 | #define lockdep_recursing(tsk) (0) |
381 | |
382 | #define NIL_COOKIE (struct pin_cookie){ } |
383 | |
384 | #define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) |
385 | #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) |
386 | #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) |
387 | |
388 | #define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \ |
389 | struct lockdep_map __maybe_unused _name = {} |
390 | |
391 | #endif /* !LOCKDEP */ |
392 | |
393 | #ifdef CONFIG_PROVE_LOCKING |
394 | void lockdep_set_lock_cmp_fn(struct lockdep_map *, lock_cmp_fn, lock_print_fn); |
395 | |
396 | #define lock_set_cmp_fn(lock, ...) lockdep_set_lock_cmp_fn(&(lock)->dep_map, __VA_ARGS__) |
397 | #else |
398 | #define lock_set_cmp_fn(lock, ...) do { } while (0) |
399 | #endif |
400 | |
401 | enum xhlock_context_t { |
402 | XHLOCK_HARD, |
403 | XHLOCK_SOFT, |
404 | XHLOCK_CTX_NR, |
405 | }; |
406 | |
407 | /* |
408 | * To initialize a lockdep_map statically use this macro. |
409 | * Note that _name must not be NULL. |
410 | */ |
411 | #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ |
412 | { .name = (_name), .key = (void *)(_key), } |
413 | |
414 | static inline void lockdep_invariant_state(bool force) {} |
415 | static inline void lockdep_free_task(struct task_struct *task) {} |
416 | |
417 | #ifdef CONFIG_LOCK_STAT |
418 | |
419 | extern void lock_contended(struct lockdep_map *lock, unsigned long ip); |
420 | extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); |
421 | |
422 | #define LOCK_CONTENDED(_lock, try, lock) \ |
423 | do { \ |
424 | if (!try(_lock)) { \ |
425 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ |
426 | lock(_lock); \ |
427 | } \ |
428 | lock_acquired(&(_lock)->dep_map, _RET_IP_); \ |
429 | } while (0) |
430 | |
431 | #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ |
432 | ({ \ |
433 | int ____err = 0; \ |
434 | if (!try(_lock)) { \ |
435 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ |
436 | ____err = lock(_lock); \ |
437 | } \ |
438 | if (!____err) \ |
439 | lock_acquired(&(_lock)->dep_map, _RET_IP_); \ |
440 | ____err; \ |
441 | }) |
442 | |
443 | #else /* CONFIG_LOCK_STAT */ |
444 | |
445 | #define lock_contended(lockdep_map, ip) do {} while (0) |
446 | #define lock_acquired(lockdep_map, ip) do {} while (0) |
447 | |
448 | #define LOCK_CONTENDED(_lock, try, lock) \ |
449 | lock(_lock) |
450 | |
451 | #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ |
452 | lock(_lock) |
453 | |
454 | #endif /* CONFIG_LOCK_STAT */ |
455 | |
456 | #ifdef CONFIG_PROVE_LOCKING |
457 | extern void print_irqtrace_events(struct task_struct *curr); |
458 | #else |
459 | static inline void print_irqtrace_events(struct task_struct *curr) |
460 | { |
461 | } |
462 | #endif |
463 | |
464 | /* Variable used to make lockdep treat read_lock() as recursive in selftests */ |
465 | #ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS |
466 | extern unsigned int force_read_lock_recursive; |
467 | #else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ |
468 | #define force_read_lock_recursive 0 |
469 | #endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ |
470 | |
471 | #ifdef CONFIG_LOCKDEP |
472 | extern bool read_lock_is_recursive(void); |
473 | #else /* CONFIG_LOCKDEP */ |
474 | /* If !LOCKDEP, the value is meaningless */ |
475 | #define read_lock_is_recursive() 0 |
476 | #endif |
477 | |
478 | /* |
479 | * For trivial one-depth nesting of a lock-class, the following |
480 | * global define can be used. (Subsystems with multiple levels |
481 | * of nesting should define their own lock-nesting subclasses.) |
482 | */ |
483 | #define SINGLE_DEPTH_NESTING 1 |
484 | |
485 | /* |
486 | * Map the dependency ops to NOP or to real lockdep ops, depending |
487 | * on the per lock-class debug mode: |
488 | */ |
489 | |
490 | #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
491 | #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) |
492 | #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) |
493 | |
494 | #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
495 | #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
496 | #define spin_release(l, i) lock_release(l, i) |
497 | |
498 | #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
499 | #define rwlock_acquire_read(l, s, t, i) \ |
500 | do { \ |
501 | if (read_lock_is_recursive()) \ |
502 | lock_acquire_shared_recursive(l, s, t, NULL, i); \ |
503 | else \ |
504 | lock_acquire_shared(l, s, t, NULL, i); \ |
505 | } while (0) |
506 | |
507 | #define rwlock_release(l, i) lock_release(l, i) |
508 | |
509 | #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
510 | #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) |
511 | #define seqcount_release(l, i) lock_release(l, i) |
512 | |
513 | #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
514 | #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
515 | #define mutex_release(l, i) lock_release(l, i) |
516 | |
517 | #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
518 | #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
519 | #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) |
520 | #define rwsem_release(l, i) lock_release(l, i) |
521 | |
522 | #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) |
523 | #define lock_map_acquire_try(l) lock_acquire_exclusive(l, 0, 1, NULL, _THIS_IP_) |
524 | #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) |
525 | #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) |
526 | #define lock_map_release(l) lock_release(l, _THIS_IP_) |
527 | #define lock_map_sync(l) lock_sync(l, 0, 0, 1, NULL, _THIS_IP_) |
528 | |
529 | #ifdef CONFIG_PROVE_LOCKING |
530 | # define might_lock(lock) \ |
531 | do { \ |
532 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
533 | lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ |
534 | lock_release(&(lock)->dep_map, _THIS_IP_); \ |
535 | } while (0) |
536 | # define might_lock_read(lock) \ |
537 | do { \ |
538 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
539 | lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ |
540 | lock_release(&(lock)->dep_map, _THIS_IP_); \ |
541 | } while (0) |
542 | # define might_lock_nested(lock, subclass) \ |
543 | do { \ |
544 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
545 | lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \ |
546 | _THIS_IP_); \ |
547 | lock_release(&(lock)->dep_map, _THIS_IP_); \ |
548 | } while (0) |
549 | |
550 | DECLARE_PER_CPU(int, hardirqs_enabled); |
551 | DECLARE_PER_CPU(int, hardirq_context); |
552 | DECLARE_PER_CPU(unsigned int, lockdep_recursion); |
553 | |
554 | #define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion)) |
555 | |
556 | #define lockdep_assert_irqs_enabled() \ |
557 | do { \ |
558 | WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \ |
559 | } while (0) |
560 | |
561 | #define lockdep_assert_irqs_disabled() \ |
562 | do { \ |
563 | WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \ |
564 | } while (0) |
565 | |
566 | #define lockdep_assert_in_irq() \ |
567 | do { \ |
568 | WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \ |
569 | } while (0) |
570 | |
571 | #define lockdep_assert_no_hardirq() \ |
572 | do { \ |
573 | WARN_ON_ONCE(__lockdep_enabled && (this_cpu_read(hardirq_context) || \ |
574 | !this_cpu_read(hardirqs_enabled))); \ |
575 | } while (0) |
576 | |
577 | #define lockdep_assert_preemption_enabled() \ |
578 | do { \ |
579 | WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ |
580 | __lockdep_enabled && \ |
581 | (preempt_count() != 0 || \ |
582 | !this_cpu_read(hardirqs_enabled))); \ |
583 | } while (0) |
584 | |
585 | #define lockdep_assert_preemption_disabled() \ |
586 | do { \ |
587 | WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ |
588 | __lockdep_enabled && \ |
589 | (preempt_count() == 0 && \ |
590 | this_cpu_read(hardirqs_enabled))); \ |
591 | } while (0) |
592 | |
593 | /* |
594 | * Acceptable for protecting per-CPU resources accessed from BH. |
595 | * Much like in_softirq() - semantics are ambiguous, use carefully. |
596 | */ |
597 | #define lockdep_assert_in_softirq() \ |
598 | do { \ |
599 | WARN_ON_ONCE(__lockdep_enabled && \ |
600 | (!in_softirq() || in_irq() || in_nmi())); \ |
601 | } while (0) |
602 | |
603 | #else |
604 | # define might_lock(lock) do { } while (0) |
605 | # define might_lock_read(lock) do { } while (0) |
606 | # define might_lock_nested(lock, subclass) do { } while (0) |
607 | |
608 | # define lockdep_assert_irqs_enabled() do { } while (0) |
609 | # define lockdep_assert_irqs_disabled() do { } while (0) |
610 | # define lockdep_assert_in_irq() do { } while (0) |
611 | # define lockdep_assert_no_hardirq() do { } while (0) |
612 | |
613 | # define lockdep_assert_preemption_enabled() do { } while (0) |
614 | # define lockdep_assert_preemption_disabled() do { } while (0) |
615 | # define lockdep_assert_in_softirq() do { } while (0) |
616 | #endif |
617 | |
618 | #ifdef CONFIG_PROVE_RAW_LOCK_NESTING |
619 | |
620 | # define lockdep_assert_RT_in_threaded_ctx() do { \ |
621 | WARN_ONCE(debug_locks && !current->lockdep_recursion && \ |
622 | lockdep_hardirq_context() && \ |
623 | !(current->hardirq_threaded || current->irq_config), \ |
624 | "Not in threaded context on PREEMPT_RT as expected\n"); \ |
625 | } while (0) |
626 | |
627 | #else |
628 | |
629 | # define lockdep_assert_RT_in_threaded_ctx() do { } while (0) |
630 | |
631 | #endif |
632 | |
633 | #ifdef CONFIG_LOCKDEP |
634 | void lockdep_rcu_suspicious(const char *file, const int line, const char *s); |
635 | #else |
636 | static inline void |
637 | lockdep_rcu_suspicious(const char *file, const int line, const char *s) |
638 | { |
639 | } |
640 | #endif |
641 | |
642 | #endif /* __LINUX_LOCKDEP_H */ |
643 | |