1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * PREEMPT_RT substitution for spin/rw_locks |
4 | * |
5 | * spinlocks and rwlocks on RT are based on rtmutexes, with a few twists to |
6 | * resemble the non RT semantics: |
7 | * |
8 | * - Contrary to plain rtmutexes, spinlocks and rwlocks are state |
9 | * preserving. The task state is saved before blocking on the underlying |
10 | * rtmutex, and restored when the lock has been acquired. Regular wakeups |
11 | * during that time are redirected to the saved state so no wake up is |
12 | * missed. |
13 | * |
14 | * - Non RT spin/rwlocks disable preemption and eventually interrupts. |
15 | * Disabling preemption has the side effect of disabling migration and |
16 | * preventing RCU grace periods. |
17 | * |
18 | * The RT substitutions explicitly disable migration and take |
19 | * rcu_read_lock() across the lock held section. |
20 | */ |
21 | #include <linux/spinlock.h> |
22 | #include <linux/export.h> |
23 | |
24 | #define RT_MUTEX_BUILD_SPINLOCKS |
25 | #include "rtmutex.c" |
26 | |
27 | /* |
28 | * __might_resched() skips the state check as rtlocks are state |
29 | * preserving. Take RCU nesting into account as spin/read/write_lock() can |
30 | * legitimately nest into an RCU read side critical section. |
31 | */ |
32 | #define RTLOCK_RESCHED_OFFSETS \ |
33 | (rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT) |
34 | |
35 | #define rtlock_might_resched() \ |
36 | __might_resched(__FILE__, __LINE__, RTLOCK_RESCHED_OFFSETS) |
37 | |
38 | static __always_inline void rtlock_lock(struct rt_mutex_base *rtm) |
39 | { |
40 | lockdep_assert(!current->pi_blocked_on); |
41 | |
42 | if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current))) |
43 | rtlock_slowlock(lock: rtm); |
44 | } |
45 | |
46 | static __always_inline void __rt_spin_lock(spinlock_t *lock) |
47 | { |
48 | rtlock_might_resched(); |
49 | rtlock_lock(rtm: &lock->lock); |
50 | rcu_read_lock(); |
51 | migrate_disable(); |
52 | } |
53 | |
54 | void __sched rt_spin_lock(spinlock_t *lock) |
55 | { |
56 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
57 | __rt_spin_lock(lock); |
58 | } |
59 | EXPORT_SYMBOL(rt_spin_lock); |
60 | |
61 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
62 | void __sched rt_spin_lock_nested(spinlock_t *lock, int subclass) |
63 | { |
64 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); |
65 | __rt_spin_lock(lock); |
66 | } |
67 | EXPORT_SYMBOL(rt_spin_lock_nested); |
68 | |
69 | void __sched rt_spin_lock_nest_lock(spinlock_t *lock, |
70 | struct lockdep_map *nest_lock) |
71 | { |
72 | spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); |
73 | __rt_spin_lock(lock); |
74 | } |
75 | EXPORT_SYMBOL(rt_spin_lock_nest_lock); |
76 | #endif |
77 | |
78 | void __sched rt_spin_unlock(spinlock_t *lock) |
79 | { |
80 | spin_release(&lock->dep_map, _RET_IP_); |
81 | migrate_enable(); |
82 | rcu_read_unlock(); |
83 | |
84 | if (unlikely(!rt_mutex_cmpxchg_release(&lock->lock, current, NULL))) |
85 | rt_mutex_slowunlock(lock: &lock->lock); |
86 | } |
87 | EXPORT_SYMBOL(rt_spin_unlock); |
88 | |
89 | /* |
90 | * Wait for the lock to get unlocked: instead of polling for an unlock |
91 | * (like raw spinlocks do), lock and unlock, to force the kernel to |
92 | * schedule if there's contention: |
93 | */ |
94 | void __sched rt_spin_lock_unlock(spinlock_t *lock) |
95 | { |
96 | spin_lock(lock); |
97 | spin_unlock(lock); |
98 | } |
99 | EXPORT_SYMBOL(rt_spin_lock_unlock); |
100 | |
101 | static __always_inline int __rt_spin_trylock(spinlock_t *lock) |
102 | { |
103 | int ret = 1; |
104 | |
105 | if (unlikely(!rt_mutex_cmpxchg_acquire(&lock->lock, NULL, current))) |
106 | ret = rt_mutex_slowtrylock(lock: &lock->lock); |
107 | |
108 | if (ret) { |
109 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
110 | rcu_read_lock(); |
111 | migrate_disable(); |
112 | } |
113 | return ret; |
114 | } |
115 | |
116 | int __sched rt_spin_trylock(spinlock_t *lock) |
117 | { |
118 | return __rt_spin_trylock(lock); |
119 | } |
120 | EXPORT_SYMBOL(rt_spin_trylock); |
121 | |
122 | int __sched rt_spin_trylock_bh(spinlock_t *lock) |
123 | { |
124 | int ret; |
125 | |
126 | local_bh_disable(); |
127 | ret = __rt_spin_trylock(lock); |
128 | if (!ret) |
129 | local_bh_enable(); |
130 | return ret; |
131 | } |
132 | EXPORT_SYMBOL(rt_spin_trylock_bh); |
133 | |
134 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
135 | void __rt_spin_lock_init(spinlock_t *lock, const char *name, |
136 | struct lock_class_key *key, bool percpu) |
137 | { |
138 | u8 type = percpu ? LD_LOCK_PERCPU : LD_LOCK_NORMAL; |
139 | |
140 | debug_check_no_locks_freed(from: (void *)lock, len: sizeof(*lock)); |
141 | lockdep_init_map_type(lock: &lock->dep_map, name, key, subclass: 0, inner: LD_WAIT_CONFIG, |
142 | outer: LD_WAIT_INV, lock_type: type); |
143 | } |
144 | EXPORT_SYMBOL(__rt_spin_lock_init); |
145 | #endif |
146 | |
147 | /* |
148 | * RT-specific reader/writer locks |
149 | */ |
150 | #define rwbase_set_and_save_current_state(state) \ |
151 | current_save_and_set_rtlock_wait_state() |
152 | |
153 | #define rwbase_restore_current_state() \ |
154 | current_restore_rtlock_saved_state() |
155 | |
156 | static __always_inline int |
157 | rwbase_rtmutex_lock_state(struct rt_mutex_base *rtm, unsigned int state) |
158 | { |
159 | if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current))) |
160 | rtlock_slowlock(lock: rtm); |
161 | return 0; |
162 | } |
163 | |
164 | static __always_inline int |
165 | rwbase_rtmutex_slowlock_locked(struct rt_mutex_base *rtm, unsigned int state) |
166 | { |
167 | rtlock_slowlock_locked(lock: rtm); |
168 | return 0; |
169 | } |
170 | |
171 | static __always_inline void rwbase_rtmutex_unlock(struct rt_mutex_base *rtm) |
172 | { |
173 | if (likely(rt_mutex_cmpxchg_acquire(rtm, current, NULL))) |
174 | return; |
175 | |
176 | rt_mutex_slowunlock(lock: rtm); |
177 | } |
178 | |
179 | static __always_inline int rwbase_rtmutex_trylock(struct rt_mutex_base *rtm) |
180 | { |
181 | if (likely(rt_mutex_cmpxchg_acquire(rtm, NULL, current))) |
182 | return 1; |
183 | |
184 | return rt_mutex_slowtrylock(lock: rtm); |
185 | } |
186 | |
187 | #define rwbase_signal_pending_state(state, current) (0) |
188 | |
189 | #define rwbase_pre_schedule() |
190 | |
191 | #define rwbase_schedule() \ |
192 | schedule_rtlock() |
193 | |
194 | #define rwbase_post_schedule() |
195 | |
196 | #include "rwbase_rt.c" |
197 | /* |
198 | * The common functions which get wrapped into the rwlock API. |
199 | */ |
200 | int __sched rt_read_trylock(rwlock_t *rwlock) |
201 | { |
202 | int ret; |
203 | |
204 | ret = rwbase_read_trylock(rwb: &rwlock->rwbase); |
205 | if (ret) { |
206 | rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); |
207 | rcu_read_lock(); |
208 | migrate_disable(); |
209 | } |
210 | return ret; |
211 | } |
212 | EXPORT_SYMBOL(rt_read_trylock); |
213 | |
214 | int __sched rt_write_trylock(rwlock_t *rwlock) |
215 | { |
216 | int ret; |
217 | |
218 | ret = rwbase_write_trylock(rwb: &rwlock->rwbase); |
219 | if (ret) { |
220 | rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); |
221 | rcu_read_lock(); |
222 | migrate_disable(); |
223 | } |
224 | return ret; |
225 | } |
226 | EXPORT_SYMBOL(rt_write_trylock); |
227 | |
228 | void __sched rt_read_lock(rwlock_t *rwlock) |
229 | { |
230 | rtlock_might_resched(); |
231 | rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); |
232 | rwbase_read_lock(rwb: &rwlock->rwbase, TASK_RTLOCK_WAIT); |
233 | rcu_read_lock(); |
234 | migrate_disable(); |
235 | } |
236 | EXPORT_SYMBOL(rt_read_lock); |
237 | |
238 | void __sched rt_write_lock(rwlock_t *rwlock) |
239 | { |
240 | rtlock_might_resched(); |
241 | rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); |
242 | rwbase_write_lock(rwb: &rwlock->rwbase, TASK_RTLOCK_WAIT); |
243 | rcu_read_lock(); |
244 | migrate_disable(); |
245 | } |
246 | EXPORT_SYMBOL(rt_write_lock); |
247 | |
248 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
249 | void __sched rt_write_lock_nested(rwlock_t *rwlock, int subclass) |
250 | { |
251 | rtlock_might_resched(); |
252 | rwlock_acquire(&rwlock->dep_map, subclass, 0, _RET_IP_); |
253 | rwbase_write_lock(rwb: &rwlock->rwbase, TASK_RTLOCK_WAIT); |
254 | rcu_read_lock(); |
255 | migrate_disable(); |
256 | } |
257 | EXPORT_SYMBOL(rt_write_lock_nested); |
258 | #endif |
259 | |
260 | void __sched rt_read_unlock(rwlock_t *rwlock) |
261 | { |
262 | rwlock_release(&rwlock->dep_map, _RET_IP_); |
263 | migrate_enable(); |
264 | rcu_read_unlock(); |
265 | rwbase_read_unlock(rwb: &rwlock->rwbase, TASK_RTLOCK_WAIT); |
266 | } |
267 | EXPORT_SYMBOL(rt_read_unlock); |
268 | |
269 | void __sched rt_write_unlock(rwlock_t *rwlock) |
270 | { |
271 | rwlock_release(&rwlock->dep_map, _RET_IP_); |
272 | rcu_read_unlock(); |
273 | migrate_enable(); |
274 | rwbase_write_unlock(rwb: &rwlock->rwbase); |
275 | } |
276 | EXPORT_SYMBOL(rt_write_unlock); |
277 | |
278 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
279 | void __rt_rwlock_init(rwlock_t *rwlock, const char *name, |
280 | struct lock_class_key *key) |
281 | { |
282 | debug_check_no_locks_freed(from: (void *)rwlock, len: sizeof(*rwlock)); |
283 | lockdep_init_map_wait(lock: &rwlock->dep_map, name, key, subclass: 0, inner: LD_WAIT_CONFIG); |
284 | } |
285 | EXPORT_SYMBOL(__rt_rwlock_init); |
286 | #endif |
287 | |