1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Runtime locking correctness validator
4 *
5 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7 *
8 * see Documentation/locking/lockdep-design.rst for more details.
9 */
10#ifndef __LINUX_LOCKDEP_H
11#define __LINUX_LOCKDEP_H
12
13#include <linux/lockdep_types.h>
14#include <linux/smp.h>
15#include <asm/percpu.h>
16
17struct task_struct;
18
19#ifdef CONFIG_LOCKDEP
20
21#include <linux/linkage.h>
22#include <linux/list.h>
23#include <linux/debug_locks.h>
24#include <linux/stacktrace.h>
25
26static inline void lockdep_copy_map(struct lockdep_map *to,
27 struct lockdep_map *from)
28{
29 int i;
30
31 *to = *from;
32 /*
33 * Since the class cache can be modified concurrently we could observe
34 * half pointers (64bit arch using 32bit copy insns). Therefore clear
35 * the caches and take the performance hit.
36 *
37 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
38 * that relies on cache abuse.
39 */
40 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
41 to->class_cache[i] = NULL;
42}
43
44/*
45 * Every lock has a list of other locks that were taken after it.
46 * We only grow the list, never remove from it:
47 */
48struct lock_list {
49 struct list_head entry;
50 struct lock_class *class;
51 struct lock_class *links_to;
52 const struct lock_trace *trace;
53 u16 distance;
54 /* bitmap of different dependencies from head to this */
55 u8 dep;
56 /* used by BFS to record whether "prev -> this" only has -(*R)-> */
57 u8 only_xr;
58
59 /*
60 * The parent field is used to implement breadth-first search, and the
61 * bit 0 is reused to indicate if the lock has been accessed in BFS.
62 */
63 struct lock_list *parent;
64};
65
66/**
67 * struct lock_chain - lock dependency chain record
68 *
69 * @irq_context: the same as irq_context in held_lock below
70 * @depth: the number of held locks in this chain
71 * @base: the index in chain_hlocks for this chain
72 * @entry: the collided lock chains in lock_chain hash list
73 * @chain_key: the hash key of this lock_chain
74 */
75struct lock_chain {
76 /* see BUILD_BUG_ON()s in add_chain_cache() */
77 unsigned int irq_context : 2,
78 depth : 6,
79 base : 24;
80 /* 4 byte hole */
81 struct hlist_node entry;
82 u64 chain_key;
83};
84
85#define MAX_LOCKDEP_KEYS_BITS 13
86#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
87#define INITIAL_CHAIN_KEY -1
88
89struct held_lock {
90 /*
91 * One-way hash of the dependency chain up to this point. We
92 * hash the hashes step by step as the dependency chain grows.
93 *
94 * We use it for dependency-caching and we skip detection
95 * passes and dependency-updates if there is a cache-hit, so
96 * it is absolutely critical for 100% coverage of the validator
97 * to have a unique key value for every unique dependency path
98 * that can occur in the system, to make a unique hash value
99 * as likely as possible - hence the 64-bit width.
100 *
101 * The task struct holds the current hash value (initialized
102 * with zero), here we store the previous hash value:
103 */
104 u64 prev_chain_key;
105 unsigned long acquire_ip;
106 struct lockdep_map *instance;
107 struct lockdep_map *nest_lock;
108#ifdef CONFIG_LOCK_STAT
109 u64 waittime_stamp;
110 u64 holdtime_stamp;
111#endif
112 /*
113 * class_idx is zero-indexed; it points to the element in
114 * lock_classes this held lock instance belongs to. class_idx is in
115 * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
116 */
117 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
118 /*
119 * The lock-stack is unified in that the lock chains of interrupt
120 * contexts nest ontop of process context chains, but we 'separate'
121 * the hashes by starting with 0 if we cross into an interrupt
122 * context, and we also keep do not add cross-context lock
123 * dependencies - the lock usage graph walking covers that area
124 * anyway, and we'd just unnecessarily increase the number of
125 * dependencies otherwise. [Note: hardirq and softirq contexts
126 * are separated from each other too.]
127 *
128 * The following field is used to detect when we cross into an
129 * interrupt context:
130 */
131 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
132 unsigned int trylock:1; /* 16 bits */
133
134 unsigned int read:2; /* see lock_acquire() comment */
135 unsigned int check:1; /* see lock_acquire() comment */
136 unsigned int hardirqs_off:1;
137 unsigned int references:12; /* 32 bits */
138 unsigned int pin_count;
139};
140
141/*
142 * Initialization, self-test and debugging-output methods:
143 */
144extern void lockdep_init(void);
145extern void lockdep_reset(void);
146extern void lockdep_reset_lock(struct lockdep_map *lock);
147extern void lockdep_free_key_range(void *start, unsigned long size);
148extern asmlinkage void lockdep_sys_exit(void);
149extern void lockdep_set_selftest_task(struct task_struct *task);
150
151extern void lockdep_init_task(struct task_struct *task);
152
153/*
154 * Split the recursion counter in two to readily detect 'off' vs recursion.
155 */
156#define LOCKDEP_RECURSION_BITS 16
157#define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS)
158#define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1)
159
160/*
161 * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due
162 * to header dependencies.
163 */
164
165#define lockdep_off() \
166do { \
167 current->lockdep_recursion += LOCKDEP_OFF; \
168} while (0)
169
170#define lockdep_on() \
171do { \
172 current->lockdep_recursion -= LOCKDEP_OFF; \
173} while (0)
174
175extern void lockdep_register_key(struct lock_class_key *key);
176extern void lockdep_unregister_key(struct lock_class_key *key);
177
178/*
179 * These methods are used by specific locking variants (spinlocks,
180 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
181 * to lockdep:
182 */
183
184extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
185 struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type);
186
187static inline void
188lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
189 struct lock_class_key *key, int subclass, u8 inner, u8 outer)
190{
191 lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL);
192}
193
194static inline void
195lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
196 struct lock_class_key *key, int subclass, u8 inner)
197{
198 lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
199}
200
201static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
202 struct lock_class_key *key, int subclass)
203{
204 lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV);
205}
206
207/*
208 * Reinitialize a lock key - for cases where there is special locking or
209 * special initialization of locks so that the validator gets the scope
210 * of dependencies wrong: they are either too broad (they need a class-split)
211 * or they are too narrow (they suffer from a false class-split):
212 */
213#define lockdep_set_class(lock, key) \
214 lockdep_init_map_type(&(lock)->dep_map, #key, key, 0, \
215 (lock)->dep_map.wait_type_inner, \
216 (lock)->dep_map.wait_type_outer, \
217 (lock)->dep_map.lock_type)
218
219#define lockdep_set_class_and_name(lock, key, name) \
220 lockdep_init_map_type(&(lock)->dep_map, name, key, 0, \
221 (lock)->dep_map.wait_type_inner, \
222 (lock)->dep_map.wait_type_outer, \
223 (lock)->dep_map.lock_type)
224
225#define lockdep_set_class_and_subclass(lock, key, sub) \
226 lockdep_init_map_type(&(lock)->dep_map, #key, key, sub, \
227 (lock)->dep_map.wait_type_inner, \
228 (lock)->dep_map.wait_type_outer, \
229 (lock)->dep_map.lock_type)
230
231#define lockdep_set_subclass(lock, sub) \
232 lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
233 (lock)->dep_map.wait_type_inner, \
234 (lock)->dep_map.wait_type_outer, \
235 (lock)->dep_map.lock_type)
236
237#define lockdep_set_novalidate_class(lock) \
238 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
239
240/*
241 * Compare locking classes
242 */
243#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
244
245static inline int lockdep_match_key(struct lockdep_map *lock,
246 struct lock_class_key *key)
247{
248 return lock->key == key;
249}
250
251/*
252 * Acquire a lock.
253 *
254 * Values for "read":
255 *
256 * 0: exclusive (write) acquire
257 * 1: read-acquire (no recursion allowed)
258 * 2: read-acquire with same-instance recursion allowed
259 *
260 * Values for check:
261 *
262 * 0: simple checks (freeing, held-at-exit-time, etc.)
263 * 1: full validation
264 */
265extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
266 int trylock, int read, int check,
267 struct lockdep_map *nest_lock, unsigned long ip);
268
269extern void lock_release(struct lockdep_map *lock, unsigned long ip);
270
271/* lock_is_held_type() returns */
272#define LOCK_STATE_UNKNOWN -1
273#define LOCK_STATE_NOT_HELD 0
274#define LOCK_STATE_HELD 1
275
276/*
277 * Same "read" as for lock_acquire(), except -1 means any.
278 */
279extern int lock_is_held_type(const struct lockdep_map *lock, int read);
280
281static inline int lock_is_held(const struct lockdep_map *lock)
282{
283 return lock_is_held_type(lock, -1);
284}
285
286#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
287#define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r))
288
289extern void lock_set_class(struct lockdep_map *lock, const char *name,
290 struct lock_class_key *key, unsigned int subclass,
291 unsigned long ip);
292
293#define lock_set_novalidate_class(l, n, i) \
294 lock_set_class(l, n, &__lockdep_no_validate__, 0, i)
295
296static inline void lock_set_subclass(struct lockdep_map *lock,
297 unsigned int subclass, unsigned long ip)
298{
299 lock_set_class(lock, lock->name, lock->key, subclass, ip);
300}
301
302extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
303
304#define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
305
306extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
307extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
308extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
309
310#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
311
312#define lockdep_assert(cond) \
313 do { WARN_ON(debug_locks && !(cond)); } while (0)
314
315#define lockdep_assert_once(cond) \
316 do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0)
317
318#define lockdep_assert_held(l) \
319 lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
320
321#define lockdep_assert_not_held(l) \
322 lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD)
323
324#define lockdep_assert_held_write(l) \
325 lockdep_assert(lockdep_is_held_type(l, 0))
326
327#define lockdep_assert_held_read(l) \
328 lockdep_assert(lockdep_is_held_type(l, 1))
329
330#define lockdep_assert_held_once(l) \
331 lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
332
333#define lockdep_assert_none_held_once() \
334 lockdep_assert_once(!current->lockdep_depth)
335
336#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
337
338#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
339#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
340#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
341
342#else /* !CONFIG_LOCKDEP */
343
344static inline void lockdep_init_task(struct task_struct *task)
345{
346}
347
348static inline void lockdep_off(void)
349{
350}
351
352static inline void lockdep_on(void)
353{
354}
355
356static inline void lockdep_set_selftest_task(struct task_struct *task)
357{
358}
359
360# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
361# define lock_release(l, i) do { } while (0)
362# define lock_downgrade(l, i) do { } while (0)
363# define lock_set_class(l, n, key, s, i) do { (void)(key); } while (0)
364# define lock_set_novalidate_class(l, n, i) do { } while (0)
365# define lock_set_subclass(l, s, i) do { } while (0)
366# define lockdep_init() do { } while (0)
367# define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \
368 do { (void)(name); (void)(key); } while (0)
369# define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
370 do { (void)(name); (void)(key); } while (0)
371# define lockdep_init_map_wait(lock, name, key, sub, inner) \
372 do { (void)(name); (void)(key); } while (0)
373# define lockdep_init_map(lock, name, key, sub) \
374 do { (void)(name); (void)(key); } while (0)
375# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
376# define lockdep_set_class_and_name(lock, key, name) \
377 do { (void)(key); (void)(name); } while (0)
378#define lockdep_set_class_and_subclass(lock, key, sub) \
379 do { (void)(key); } while (0)
380#define lockdep_set_subclass(lock, sub) do { } while (0)
381
382#define lockdep_set_novalidate_class(lock) do { } while (0)
383
384/*
385 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
386 * case since the result is not well defined and the caller should rather
387 * #ifdef the call himself.
388 */
389
390# define lockdep_reset() do { debug_locks = 1; } while (0)
391# define lockdep_free_key_range(start, size) do { } while (0)
392# define lockdep_sys_exit() do { } while (0)
393
394static inline void lockdep_register_key(struct lock_class_key *key)
395{
396}
397
398static inline void lockdep_unregister_key(struct lock_class_key *key)
399{
400}
401
402#define lockdep_depth(tsk) (0)
403
404/*
405 * Dummy forward declarations, allow users to write less ifdef-y code
406 * and depend on dead code elimination.
407 */
408extern int lock_is_held(const void *);
409extern int lockdep_is_held(const void *);
410#define lockdep_is_held_type(l, r) (1)
411
412#define lockdep_assert(c) do { } while (0)
413#define lockdep_assert_once(c) do { } while (0)
414
415#define lockdep_assert_held(l) do { (void)(l); } while (0)
416#define lockdep_assert_not_held(l) do { (void)(l); } while (0)
417#define lockdep_assert_held_write(l) do { (void)(l); } while (0)
418#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
419#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
420#define lockdep_assert_none_held_once() do { } while (0)
421
422#define lockdep_recursing(tsk) (0)
423
424#define NIL_COOKIE (struct pin_cookie){ }
425
426#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; })
427#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
428#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
429
430#endif /* !LOCKDEP */
431
432enum xhlock_context_t {
433 XHLOCK_HARD,
434 XHLOCK_SOFT,
435 XHLOCK_CTX_NR,
436};
437
438#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
439/*
440 * To initialize a lockdep_map statically use this macro.
441 * Note that _name must not be NULL.
442 */
443#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
444 { .name = (_name), .key = (void *)(_key), }
445
446static inline void lockdep_invariant_state(bool force) {}
447static inline void lockdep_free_task(struct task_struct *task) {}
448
449#ifdef CONFIG_LOCK_STAT
450
451extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
452extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
453
454#define LOCK_CONTENDED(_lock, try, lock) \
455do { \
456 if (!try(_lock)) { \
457 lock_contended(&(_lock)->dep_map, _RET_IP_); \
458 lock(_lock); \
459 } \
460 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
461} while (0)
462
463#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
464({ \
465 int ____err = 0; \
466 if (!try(_lock)) { \
467 lock_contended(&(_lock)->dep_map, _RET_IP_); \
468 ____err = lock(_lock); \
469 } \
470 if (!____err) \
471 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
472 ____err; \
473})
474
475#else /* CONFIG_LOCK_STAT */
476
477#define lock_contended(lockdep_map, ip) do {} while (0)
478#define lock_acquired(lockdep_map, ip) do {} while (0)
479
480#define LOCK_CONTENDED(_lock, try, lock) \
481 lock(_lock)
482
483#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
484 lock(_lock)
485
486#endif /* CONFIG_LOCK_STAT */
487
488#ifdef CONFIG_PROVE_LOCKING
489extern void print_irqtrace_events(struct task_struct *curr);
490#else
491static inline void print_irqtrace_events(struct task_struct *curr)
492{
493}
494#endif
495
496/* Variable used to make lockdep treat read_lock() as recursive in selftests */
497#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
498extern unsigned int force_read_lock_recursive;
499#else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
500#define force_read_lock_recursive 0
501#endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
502
503#ifdef CONFIG_LOCKDEP
504extern bool read_lock_is_recursive(void);
505#else /* CONFIG_LOCKDEP */
506/* If !LOCKDEP, the value is meaningless */
507#define read_lock_is_recursive() 0
508#endif
509
510/*
511 * For trivial one-depth nesting of a lock-class, the following
512 * global define can be used. (Subsystems with multiple levels
513 * of nesting should define their own lock-nesting subclasses.)
514 */
515#define SINGLE_DEPTH_NESTING 1
516
517/*
518 * Map the dependency ops to NOP or to real lockdep ops, depending
519 * on the per lock-class debug mode:
520 */
521
522#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
523#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
524#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
525
526#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
527#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
528#define spin_release(l, i) lock_release(l, i)
529
530#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
531#define rwlock_acquire_read(l, s, t, i) \
532do { \
533 if (read_lock_is_recursive()) \
534 lock_acquire_shared_recursive(l, s, t, NULL, i); \
535 else \
536 lock_acquire_shared(l, s, t, NULL, i); \
537} while (0)
538
539#define rwlock_release(l, i) lock_release(l, i)
540
541#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
542#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
543#define seqcount_release(l, i) lock_release(l, i)
544
545#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
546#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
547#define mutex_release(l, i) lock_release(l, i)
548
549#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
550#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
551#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
552#define rwsem_release(l, i) lock_release(l, i)
553
554#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
555#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
556#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
557#define lock_map_release(l) lock_release(l, _THIS_IP_)
558
559#ifdef CONFIG_PROVE_LOCKING
560# define might_lock(lock) \
561do { \
562 typecheck(struct lockdep_map *, &(lock)->dep_map); \
563 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
564 lock_release(&(lock)->dep_map, _THIS_IP_); \
565} while (0)
566# define might_lock_read(lock) \
567do { \
568 typecheck(struct lockdep_map *, &(lock)->dep_map); \
569 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
570 lock_release(&(lock)->dep_map, _THIS_IP_); \
571} while (0)
572# define might_lock_nested(lock, subclass) \
573do { \
574 typecheck(struct lockdep_map *, &(lock)->dep_map); \
575 lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \
576 _THIS_IP_); \
577 lock_release(&(lock)->dep_map, _THIS_IP_); \
578} while (0)
579
580DECLARE_PER_CPU(int, hardirqs_enabled);
581DECLARE_PER_CPU(int, hardirq_context);
582DECLARE_PER_CPU(unsigned int, lockdep_recursion);
583
584#define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion))
585
586#define lockdep_assert_irqs_enabled() \
587do { \
588 WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
589} while (0)
590
591#define lockdep_assert_irqs_disabled() \
592do { \
593 WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
594} while (0)
595
596#define lockdep_assert_in_irq() \
597do { \
598 WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
599} while (0)
600
601#define lockdep_assert_preemption_enabled() \
602do { \
603 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
604 __lockdep_enabled && \
605 (preempt_count() != 0 || \
606 !this_cpu_read(hardirqs_enabled))); \
607} while (0)
608
609#define lockdep_assert_preemption_disabled() \
610do { \
611 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
612 __lockdep_enabled && \
613 (preempt_count() == 0 && \
614 this_cpu_read(hardirqs_enabled))); \
615} while (0)
616
617/*
618 * Acceptable for protecting per-CPU resources accessed from BH.
619 * Much like in_softirq() - semantics are ambiguous, use carefully.
620 */
621#define lockdep_assert_in_softirq() \
622do { \
623 WARN_ON_ONCE(__lockdep_enabled && \
624 (!in_softirq() || in_irq() || in_nmi())); \
625} while (0)
626
627#else
628# define might_lock(lock) do { } while (0)
629# define might_lock_read(lock) do { } while (0)
630# define might_lock_nested(lock, subclass) do { } while (0)
631
632# define lockdep_assert_irqs_enabled() do { } while (0)
633# define lockdep_assert_irqs_disabled() do { } while (0)
634# define lockdep_assert_in_irq() do { } while (0)
635
636# define lockdep_assert_preemption_enabled() do { } while (0)
637# define lockdep_assert_preemption_disabled() do { } while (0)
638# define lockdep_assert_in_softirq() do { } while (0)
639#endif
640
641#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
642
643# define lockdep_assert_RT_in_threaded_ctx() do { \
644 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
645 lockdep_hardirq_context() && \
646 !(current->hardirq_threaded || current->irq_config), \
647 "Not in threaded context on PREEMPT_RT as expected\n"); \
648} while (0)
649
650#else
651
652# define lockdep_assert_RT_in_threaded_ctx() do { } while (0)
653
654#endif
655
656#ifdef CONFIG_LOCKDEP
657void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
658#else
659static inline void
660lockdep_rcu_suspicious(const char *file, const int line, const char *s)
661{
662}
663#endif
664
665#endif /* __LINUX_LOCKDEP_H */
666

source code of linux/include/linux/lockdep.h