1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * Runtime locking correctness validator |
4 | * |
5 | * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
6 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra |
7 | * |
8 | * see Documentation/locking/lockdep-design.rst for more details. |
9 | */ |
10 | #ifndef __LINUX_LOCKDEP_TYPES_H |
11 | #define __LINUX_LOCKDEP_TYPES_H |
12 | |
13 | #include <linux/types.h> |
14 | |
15 | #define MAX_LOCKDEP_SUBCLASSES 8UL |
16 | |
17 | enum lockdep_wait_type { |
18 | LD_WAIT_INV = 0, /* not checked, catch all */ |
19 | |
20 | LD_WAIT_FREE, /* wait free, rcu etc.. */ |
21 | LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */ |
22 | |
23 | #ifdef CONFIG_PROVE_RAW_LOCK_NESTING |
24 | LD_WAIT_CONFIG, /* preemptible in PREEMPT_RT, spinlock_t etc.. */ |
25 | #else |
26 | LD_WAIT_CONFIG = LD_WAIT_SPIN, |
27 | #endif |
28 | LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */ |
29 | |
30 | LD_WAIT_MAX, /* must be last */ |
31 | }; |
32 | |
33 | enum lockdep_lock_type { |
34 | LD_LOCK_NORMAL = 0, /* normal, catch all */ |
35 | LD_LOCK_PERCPU, /* percpu */ |
36 | LD_LOCK_WAIT_OVERRIDE, /* annotation */ |
37 | LD_LOCK_MAX, |
38 | }; |
39 | |
40 | #ifdef CONFIG_LOCKDEP |
41 | |
42 | /* |
43 | * We'd rather not expose kernel/lockdep_states.h this wide, but we do need |
44 | * the total number of states... :-( |
45 | * |
46 | * XXX_LOCK_USAGE_STATES is the number of lines in lockdep_states.h, for each |
47 | * of those we generates 4 states, Additionally we report on USED and USED_READ. |
48 | */ |
49 | #define XXX_LOCK_USAGE_STATES 2 |
50 | #define LOCK_TRACE_STATES (XXX_LOCK_USAGE_STATES*4 + 2) |
51 | |
52 | /* |
53 | * NR_LOCKDEP_CACHING_CLASSES ... Number of classes |
54 | * cached in the instance of lockdep_map |
55 | * |
56 | * Currently main class (subclass == 0) and single depth subclass |
57 | * are cached in lockdep_map. This optimization is mainly targeting |
58 | * on rq->lock. double_rq_lock() acquires this highly competitive with |
59 | * single depth. |
60 | */ |
61 | #define NR_LOCKDEP_CACHING_CLASSES 2 |
62 | |
63 | /* |
64 | * A lockdep key is associated with each lock object. For static locks we use |
65 | * the lock address itself as the key. Dynamically allocated lock objects can |
66 | * have a statically or dynamically allocated key. Dynamically allocated lock |
67 | * keys must be registered before being used and must be unregistered before |
68 | * the key memory is freed. |
69 | */ |
70 | struct lockdep_subclass_key { |
71 | char __one_byte; |
72 | } __attribute__ ((__packed__)); |
73 | |
74 | /* hash_entry is used to keep track of dynamically allocated keys. */ |
75 | struct lock_class_key { |
76 | union { |
77 | struct hlist_node hash_entry; |
78 | struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; |
79 | }; |
80 | }; |
81 | |
82 | extern struct lock_class_key __lockdep_no_validate__; |
83 | |
84 | struct lock_trace; |
85 | |
86 | #define LOCKSTAT_POINTS 4 |
87 | |
88 | struct lockdep_map; |
89 | typedef int (*lock_cmp_fn)(const struct lockdep_map *a, |
90 | const struct lockdep_map *b); |
91 | typedef void (*lock_print_fn)(const struct lockdep_map *map); |
92 | |
93 | /* |
94 | * The lock-class itself. The order of the structure members matters. |
95 | * reinit_class() zeroes the key member and all subsequent members. |
96 | */ |
97 | struct lock_class { |
98 | /* |
99 | * class-hash: |
100 | */ |
101 | struct hlist_node hash_entry; |
102 | |
103 | /* |
104 | * Entry in all_lock_classes when in use. Entry in free_lock_classes |
105 | * when not in use. Instances that are being freed are on one of the |
106 | * zapped_classes lists. |
107 | */ |
108 | struct list_head lock_entry; |
109 | |
110 | /* |
111 | * These fields represent a directed graph of lock dependencies, |
112 | * to every node we attach a list of "forward" and a list of |
113 | * "backward" graph nodes. |
114 | */ |
115 | struct list_head locks_after, locks_before; |
116 | |
117 | const struct lockdep_subclass_key *key; |
118 | lock_cmp_fn cmp_fn; |
119 | lock_print_fn print_fn; |
120 | |
121 | unsigned int subclass; |
122 | unsigned int dep_gen_id; |
123 | |
124 | /* |
125 | * IRQ/softirq usage tracking bits: |
126 | */ |
127 | unsigned long usage_mask; |
128 | const struct lock_trace *usage_traces[LOCK_TRACE_STATES]; |
129 | |
130 | const char *name; |
131 | /* |
132 | * Generation counter, when doing certain classes of graph walking, |
133 | * to ensure that we check one node only once: |
134 | */ |
135 | int name_version; |
136 | |
137 | u8 wait_type_inner; |
138 | u8 wait_type_outer; |
139 | u8 lock_type; |
140 | /* u8 hole; */ |
141 | |
142 | #ifdef CONFIG_LOCK_STAT |
143 | unsigned long contention_point[LOCKSTAT_POINTS]; |
144 | unsigned long contending_point[LOCKSTAT_POINTS]; |
145 | #endif |
146 | } __no_randomize_layout; |
147 | |
148 | #ifdef CONFIG_LOCK_STAT |
149 | struct lock_time { |
150 | s64 min; |
151 | s64 max; |
152 | s64 total; |
153 | unsigned long nr; |
154 | }; |
155 | |
156 | enum bounce_type { |
157 | bounce_acquired_write, |
158 | bounce_acquired_read, |
159 | bounce_contended_write, |
160 | bounce_contended_read, |
161 | nr_bounce_types, |
162 | |
163 | bounce_acquired = bounce_acquired_write, |
164 | bounce_contended = bounce_contended_write, |
165 | }; |
166 | |
167 | struct lock_class_stats { |
168 | unsigned long contention_point[LOCKSTAT_POINTS]; |
169 | unsigned long contending_point[LOCKSTAT_POINTS]; |
170 | struct lock_time read_waittime; |
171 | struct lock_time write_waittime; |
172 | struct lock_time read_holdtime; |
173 | struct lock_time write_holdtime; |
174 | unsigned long bounces[nr_bounce_types]; |
175 | }; |
176 | |
177 | struct lock_class_stats lock_stats(struct lock_class *class); |
178 | void clear_lock_stats(struct lock_class *class); |
179 | #endif |
180 | |
181 | /* |
182 | * Map the lock object (the lock instance) to the lock-class object. |
183 | * This is embedded into specific lock instances: |
184 | */ |
185 | struct lockdep_map { |
186 | struct lock_class_key *key; |
187 | struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; |
188 | const char *name; |
189 | u8 wait_type_outer; /* can be taken in this context */ |
190 | u8 wait_type_inner; /* presents this context */ |
191 | u8 lock_type; |
192 | /* u8 hole; */ |
193 | #ifdef CONFIG_LOCK_STAT |
194 | int cpu; |
195 | unsigned long ip; |
196 | #endif |
197 | }; |
198 | |
199 | struct pin_cookie { unsigned int val; }; |
200 | |
201 | #define MAX_LOCKDEP_KEYS_BITS 13 |
202 | #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) |
203 | #define INITIAL_CHAIN_KEY -1 |
204 | |
205 | struct held_lock { |
206 | /* |
207 | * One-way hash of the dependency chain up to this point. We |
208 | * hash the hashes step by step as the dependency chain grows. |
209 | * |
210 | * We use it for dependency-caching and we skip detection |
211 | * passes and dependency-updates if there is a cache-hit, so |
212 | * it is absolutely critical for 100% coverage of the validator |
213 | * to have a unique key value for every unique dependency path |
214 | * that can occur in the system, to make a unique hash value |
215 | * as likely as possible - hence the 64-bit width. |
216 | * |
217 | * The task struct holds the current hash value (initialized |
218 | * with zero), here we store the previous hash value: |
219 | */ |
220 | u64 prev_chain_key; |
221 | unsigned long acquire_ip; |
222 | struct lockdep_map *instance; |
223 | struct lockdep_map *nest_lock; |
224 | #ifdef CONFIG_LOCK_STAT |
225 | u64 waittime_stamp; |
226 | u64 holdtime_stamp; |
227 | #endif |
228 | /* |
229 | * class_idx is zero-indexed; it points to the element in |
230 | * lock_classes this held lock instance belongs to. class_idx is in |
231 | * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. |
232 | */ |
233 | unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; |
234 | /* |
235 | * The lock-stack is unified in that the lock chains of interrupt |
236 | * contexts nest ontop of process context chains, but we 'separate' |
237 | * the hashes by starting with 0 if we cross into an interrupt |
238 | * context, and we also keep do not add cross-context lock |
239 | * dependencies - the lock usage graph walking covers that area |
240 | * anyway, and we'd just unnecessarily increase the number of |
241 | * dependencies otherwise. [Note: hardirq and softirq contexts |
242 | * are separated from each other too.] |
243 | * |
244 | * The following field is used to detect when we cross into an |
245 | * interrupt context: |
246 | */ |
247 | unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ |
248 | unsigned int trylock:1; /* 16 bits */ |
249 | |
250 | unsigned int read:2; /* see lock_acquire() comment */ |
251 | unsigned int check:1; /* see lock_acquire() comment */ |
252 | unsigned int hardirqs_off:1; |
253 | unsigned int sync:1; |
254 | unsigned int references:11; /* 32 bits */ |
255 | unsigned int pin_count; |
256 | }; |
257 | |
258 | #else /* !CONFIG_LOCKDEP */ |
259 | |
260 | /* |
261 | * The class key takes no space if lockdep is disabled: |
262 | */ |
263 | struct lock_class_key { }; |
264 | |
265 | /* |
266 | * The lockdep_map takes no space if lockdep is disabled: |
267 | */ |
268 | struct lockdep_map { }; |
269 | |
270 | struct pin_cookie { }; |
271 | |
272 | #endif /* !LOCKDEP */ |
273 | |
274 | #endif /* __LINUX_LOCKDEP_TYPES_H */ |
275 | |