1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * kernel/lockdep_internals.h |
4 | * |
5 | * Runtime locking correctness validator |
6 | * |
7 | * lockdep subsystem internal functions and variables. |
8 | */ |
9 | |
10 | /* |
11 | * Lock-class usage-state bits: |
12 | */ |
13 | enum lock_usage_bit { |
14 | #define LOCKDEP_STATE(__STATE) \ |
15 | LOCK_USED_IN_##__STATE, \ |
16 | LOCK_USED_IN_##__STATE##_READ, \ |
17 | LOCK_ENABLED_##__STATE, \ |
18 | LOCK_ENABLED_##__STATE##_READ, |
19 | #include "lockdep_states.h" |
20 | #undef LOCKDEP_STATE |
21 | LOCK_USED, |
22 | LOCK_USED_READ, |
23 | LOCK_USAGE_STATES, |
24 | }; |
25 | |
26 | /* states after LOCK_USED_READ are not traced and printed */ |
27 | static_assert(LOCK_TRACE_STATES == LOCK_USAGE_STATES); |
28 | |
29 | #define LOCK_USAGE_READ_MASK 1 |
30 | #define LOCK_USAGE_DIR_MASK 2 |
31 | #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK)) |
32 | |
33 | /* |
34 | * Usage-state bitmasks: |
35 | */ |
36 | #define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE), |
37 | |
38 | enum { |
39 | #define LOCKDEP_STATE(__STATE) \ |
40 | __LOCKF(USED_IN_##__STATE) \ |
41 | __LOCKF(USED_IN_##__STATE##_READ) \ |
42 | __LOCKF(ENABLED_##__STATE) \ |
43 | __LOCKF(ENABLED_##__STATE##_READ) |
44 | #include "lockdep_states.h" |
45 | #undef LOCKDEP_STATE |
46 | __LOCKF(USED) |
47 | __LOCKF(USED_READ) |
48 | }; |
49 | |
50 | #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE | |
51 | static const unsigned long LOCKF_ENABLED_IRQ = |
52 | #include "lockdep_states.h" |
53 | 0; |
54 | #undef LOCKDEP_STATE |
55 | |
56 | #define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE | |
57 | static const unsigned long LOCKF_USED_IN_IRQ = |
58 | #include "lockdep_states.h" |
59 | 0; |
60 | #undef LOCKDEP_STATE |
61 | |
62 | #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE##_READ | |
63 | static const unsigned long LOCKF_ENABLED_IRQ_READ = |
64 | #include "lockdep_states.h" |
65 | 0; |
66 | #undef LOCKDEP_STATE |
67 | |
68 | #define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE##_READ | |
69 | static const unsigned long LOCKF_USED_IN_IRQ_READ = |
70 | #include "lockdep_states.h" |
71 | 0; |
72 | #undef LOCKDEP_STATE |
73 | |
74 | #define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ) |
75 | #define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ) |
76 | |
77 | #define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ) |
78 | #define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ) |
79 | |
80 | /* |
81 | * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text, |
82 | * .data and .bss to fit in required 32MB limit for the kernel. With |
83 | * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems. |
84 | * So, reduce the static allocations for lockdeps related structures so that |
85 | * everything fits in current required size limit. |
86 | */ |
87 | #ifdef CONFIG_LOCKDEP_SMALL |
88 | /* |
89 | * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies |
90 | * we track. |
91 | * |
92 | * We use the per-lock dependency maps in two ways: we grow it by adding |
93 | * every to-be-taken lock to all currently held lock's own dependency |
94 | * table (if it's not there yet), and we check it for lock order |
95 | * conflicts and deadlocks. |
96 | */ |
97 | #define MAX_LOCKDEP_ENTRIES 16384UL |
98 | #define MAX_LOCKDEP_CHAINS_BITS 15 |
99 | #define MAX_STACK_TRACE_ENTRIES 262144UL |
100 | #define STACK_TRACE_HASH_SIZE 8192 |
101 | #else |
102 | #define MAX_LOCKDEP_ENTRIES (1UL << CONFIG_LOCKDEP_BITS) |
103 | |
104 | #define MAX_LOCKDEP_CHAINS_BITS CONFIG_LOCKDEP_CHAINS_BITS |
105 | |
106 | /* |
107 | * Stack-trace: tightly packed array of stack backtrace |
108 | * addresses. Protected by the hash_lock. |
109 | */ |
110 | #define MAX_STACK_TRACE_ENTRIES (1UL << CONFIG_LOCKDEP_STACK_TRACE_BITS) |
111 | #define STACK_TRACE_HASH_SIZE (1 << CONFIG_LOCKDEP_STACK_TRACE_HASH_BITS) |
112 | #endif |
113 | |
114 | /* |
115 | * Bit definitions for lock_chain.irq_context |
116 | */ |
117 | #define LOCK_CHAIN_SOFTIRQ_CONTEXT (1 << 0) |
118 | #define LOCK_CHAIN_HARDIRQ_CONTEXT (1 << 1) |
119 | |
120 | #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) |
121 | |
122 | #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) |
123 | |
124 | extern struct lock_chain lock_chains[]; |
125 | |
126 | #define LOCK_USAGE_CHARS (2*XXX_LOCK_USAGE_STATES + 1) |
127 | |
128 | extern void get_usage_chars(struct lock_class *class, |
129 | char usage[LOCK_USAGE_CHARS]); |
130 | |
131 | extern const char *__get_key_name(const struct lockdep_subclass_key *key, |
132 | char *str); |
133 | |
134 | struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i); |
135 | |
136 | extern unsigned long nr_lock_classes; |
137 | extern unsigned long nr_zapped_classes; |
138 | extern unsigned long nr_zapped_lock_chains; |
139 | extern unsigned long nr_list_entries; |
140 | long lockdep_next_lockchain(long i); |
141 | unsigned long lock_chain_count(void); |
142 | extern unsigned long nr_stack_trace_entries; |
143 | |
144 | extern unsigned int nr_hardirq_chains; |
145 | extern unsigned int nr_softirq_chains; |
146 | extern unsigned int nr_process_chains; |
147 | extern unsigned int nr_free_chain_hlocks; |
148 | extern unsigned int nr_lost_chain_hlocks; |
149 | extern unsigned int nr_large_chain_blocks; |
150 | |
151 | extern unsigned int max_lockdep_depth; |
152 | extern unsigned int max_bfs_queue_depth; |
153 | extern unsigned long max_lock_class_idx; |
154 | |
155 | extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; |
156 | extern unsigned long lock_classes_in_use[]; |
157 | |
158 | #ifdef CONFIG_PROVE_LOCKING |
159 | extern unsigned long lockdep_count_forward_deps(struct lock_class *); |
160 | extern unsigned long lockdep_count_backward_deps(struct lock_class *); |
161 | #ifdef CONFIG_TRACE_IRQFLAGS |
162 | u64 lockdep_stack_trace_count(void); |
163 | u64 lockdep_stack_hash_count(void); |
164 | #endif |
165 | #else |
166 | static inline unsigned long |
167 | lockdep_count_forward_deps(struct lock_class *class) |
168 | { |
169 | return 0; |
170 | } |
171 | static inline unsigned long |
172 | lockdep_count_backward_deps(struct lock_class *class) |
173 | { |
174 | return 0; |
175 | } |
176 | #endif |
177 | |
178 | #ifdef CONFIG_DEBUG_LOCKDEP |
179 | |
180 | #include <asm/local.h> |
181 | /* |
182 | * Various lockdep statistics. |
183 | * We want them per cpu as they are often accessed in fast path |
184 | * and we want to avoid too much cache bouncing. |
185 | */ |
186 | struct lockdep_stats { |
187 | unsigned long chain_lookup_hits; |
188 | unsigned int chain_lookup_misses; |
189 | unsigned long hardirqs_on_events; |
190 | unsigned long hardirqs_off_events; |
191 | unsigned long redundant_hardirqs_on; |
192 | unsigned long redundant_hardirqs_off; |
193 | unsigned long softirqs_on_events; |
194 | unsigned long softirqs_off_events; |
195 | unsigned long redundant_softirqs_on; |
196 | unsigned long redundant_softirqs_off; |
197 | int nr_unused_locks; |
198 | unsigned int nr_redundant_checks; |
199 | unsigned int nr_redundant; |
200 | unsigned int nr_cyclic_checks; |
201 | unsigned int nr_find_usage_forwards_checks; |
202 | unsigned int nr_find_usage_backwards_checks; |
203 | |
204 | /* |
205 | * Per lock class locking operation stat counts |
206 | */ |
207 | unsigned long lock_class_ops[MAX_LOCKDEP_KEYS]; |
208 | }; |
209 | |
210 | DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); |
211 | |
212 | #define __debug_atomic_inc(ptr) \ |
213 | this_cpu_inc(lockdep_stats.ptr); |
214 | |
215 | #define debug_atomic_inc(ptr) { \ |
216 | WARN_ON_ONCE(!irqs_disabled()); \ |
217 | __this_cpu_inc(lockdep_stats.ptr); \ |
218 | } |
219 | |
220 | #define debug_atomic_dec(ptr) { \ |
221 | WARN_ON_ONCE(!irqs_disabled()); \ |
222 | __this_cpu_dec(lockdep_stats.ptr); \ |
223 | } |
224 | |
225 | #define debug_atomic_read(ptr) ({ \ |
226 | struct lockdep_stats *__cpu_lockdep_stats; \ |
227 | unsigned long long __total = 0; \ |
228 | int __cpu; \ |
229 | for_each_possible_cpu(__cpu) { \ |
230 | __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \ |
231 | __total += __cpu_lockdep_stats->ptr; \ |
232 | } \ |
233 | __total; \ |
234 | }) |
235 | |
236 | static inline void debug_class_ops_inc(struct lock_class *class) |
237 | { |
238 | int idx; |
239 | |
240 | idx = class - lock_classes; |
241 | __debug_atomic_inc(lock_class_ops[idx]); |
242 | } |
243 | |
244 | static inline unsigned long debug_class_ops_read(struct lock_class *class) |
245 | { |
246 | int idx, cpu; |
247 | unsigned long ops = 0; |
248 | |
249 | idx = class - lock_classes; |
250 | for_each_possible_cpu(cpu) |
251 | ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu); |
252 | return ops; |
253 | } |
254 | |
255 | #else |
256 | # define __debug_atomic_inc(ptr) do { } while (0) |
257 | # define debug_atomic_inc(ptr) do { } while (0) |
258 | # define debug_atomic_dec(ptr) do { } while (0) |
259 | # define debug_atomic_read(ptr) 0 |
260 | # define debug_class_ops_inc(ptr) do { } while (0) |
261 | #endif |
262 | |