1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | |
3 | /* |
4 | * RT-specific reader/writer semaphores and reader/writer locks |
5 | * |
6 | * down_write/write_lock() |
7 | * 1) Lock rtmutex |
8 | * 2) Remove the reader BIAS to force readers into the slow path |
9 | * 3) Wait until all readers have left the critical section |
10 | * 4) Mark it write locked |
11 | * |
12 | * up_write/write_unlock() |
13 | * 1) Remove the write locked marker |
14 | * 2) Set the reader BIAS, so readers can use the fast path again |
15 | * 3) Unlock rtmutex, to release blocked readers |
16 | * |
17 | * down_read/read_lock() |
18 | * 1) Try fast path acquisition (reader BIAS is set) |
19 | * 2) Take tmutex::wait_lock, which protects the writelocked flag |
20 | * 3) If !writelocked, acquire it for read |
21 | * 4) If writelocked, block on tmutex |
22 | * 5) unlock rtmutex, goto 1) |
23 | * |
24 | * up_read/read_unlock() |
25 | * 1) Try fast path release (reader count != 1) |
26 | * 2) Wake the writer waiting in down_write()/write_lock() #3 |
27 | * |
28 | * down_read/read_lock()#3 has the consequence, that rw semaphores and rw |
29 | * locks on RT are not writer fair, but writers, which should be avoided in |
30 | * RT tasks (think mmap_sem), are subject to the rtmutex priority/DL |
31 | * inheritance mechanism. |
32 | * |
33 | * It's possible to make the rw primitives writer fair by keeping a list of |
34 | * active readers. A blocked writer would force all newly incoming readers |
35 | * to block on the rtmutex, but the rtmutex would have to be proxy locked |
36 | * for one reader after the other. We can't use multi-reader inheritance |
37 | * because there is no way to support that with SCHED_DEADLINE. |
38 | * Implementing the one by one reader boosting/handover mechanism is a |
39 | * major surgery for a very dubious value. |
40 | * |
41 | * The risk of writer starvation is there, but the pathological use cases |
42 | * which trigger it are not necessarily the typical RT workloads. |
43 | * |
44 | * Fast-path orderings: |
45 | * The lock/unlock of readers can run in fast paths: lock and unlock are only |
46 | * atomic ops, and there is no inner lock to provide ACQUIRE and RELEASE |
47 | * semantics of rwbase_rt. Atomic ops should thus provide _acquire() |
48 | * and _release() (or stronger). |
49 | * |
50 | * Common code shared between RT rw_semaphore and rwlock |
51 | */ |
52 | |
53 | static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb) |
54 | { |
55 | int r; |
56 | |
57 | /* |
58 | * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is |
59 | * set. |
60 | */ |
61 | for (r = atomic_read(&rwb->readers); r < 0;) { |
62 | if (likely(atomic_try_cmpxchg_acquire(&rwb->readers, &r, r + 1))) |
63 | return 1; |
64 | } |
65 | return 0; |
66 | } |
67 | |
68 | static int __sched __rwbase_read_lock(struct rwbase_rt *rwb, |
69 | unsigned int state) |
70 | { |
71 | struct rt_mutex_base *rtm = &rwb->rtmutex; |
72 | int ret; |
73 | |
74 | rwbase_pre_schedule(); |
75 | raw_spin_lock_irq(&rtm->wait_lock); |
76 | |
77 | /* |
78 | * Call into the slow lock path with the rtmutex->wait_lock |
79 | * held, so this can't result in the following race: |
80 | * |
81 | * Reader1 Reader2 Writer |
82 | * down_read() |
83 | * down_write() |
84 | * rtmutex_lock(m) |
85 | * wait() |
86 | * down_read() |
87 | * unlock(m->wait_lock) |
88 | * up_read() |
89 | * wake(Writer) |
90 | * lock(m->wait_lock) |
91 | * sem->writelocked=true |
92 | * unlock(m->wait_lock) |
93 | * |
94 | * up_write() |
95 | * sem->writelocked=false |
96 | * rtmutex_unlock(m) |
97 | * down_read() |
98 | * down_write() |
99 | * rtmutex_lock(m) |
100 | * wait() |
101 | * rtmutex_lock(m) |
102 | * |
103 | * That would put Reader1 behind the writer waiting on |
104 | * Reader2 to call up_read(), which might be unbound. |
105 | */ |
106 | |
107 | trace_contention_begin(rwb, LCB_F_RT | LCB_F_READ); |
108 | |
109 | /* |
110 | * For rwlocks this returns 0 unconditionally, so the below |
111 | * !ret conditionals are optimized out. |
112 | */ |
113 | ret = rwbase_rtmutex_slowlock_locked(rtm, state); |
114 | |
115 | /* |
116 | * On success the rtmutex is held, so there can't be a writer |
117 | * active. Increment the reader count and immediately drop the |
118 | * rtmutex again. |
119 | * |
120 | * rtmutex->wait_lock has to be unlocked in any case of course. |
121 | */ |
122 | if (!ret) |
123 | atomic_inc(&rwb->readers); |
124 | raw_spin_unlock_irq(&rtm->wait_lock); |
125 | if (!ret) |
126 | rwbase_rtmutex_unlock(rtm); |
127 | |
128 | trace_contention_end(rwb, ret); |
129 | rwbase_post_schedule(); |
130 | return ret; |
131 | } |
132 | |
133 | static __always_inline int rwbase_read_lock(struct rwbase_rt *rwb, |
134 | unsigned int state) |
135 | { |
136 | lockdep_assert(!current->pi_blocked_on); |
137 | |
138 | if (rwbase_read_trylock(rwb)) |
139 | return 0; |
140 | |
141 | return __rwbase_read_lock(rwb, state); |
142 | } |
143 | |
144 | static void __sched __rwbase_read_unlock(struct rwbase_rt *rwb, |
145 | unsigned int state) |
146 | { |
147 | struct rt_mutex_base *rtm = &rwb->rtmutex; |
148 | struct task_struct *owner; |
149 | DEFINE_RT_WAKE_Q(wqh); |
150 | |
151 | raw_spin_lock_irq(&rtm->wait_lock); |
152 | /* |
153 | * Wake the writer, i.e. the rtmutex owner. It might release the |
154 | * rtmutex concurrently in the fast path (due to a signal), but to |
155 | * clean up rwb->readers it needs to acquire rtm->wait_lock. The |
156 | * worst case which can happen is a spurious wakeup. |
157 | */ |
158 | owner = rt_mutex_owner(rtm); |
159 | if (owner) |
160 | rt_mutex_wake_q_add_task(&wqh, owner, state); |
161 | |
162 | /* Pairs with the preempt_enable in rt_mutex_wake_up_q() */ |
163 | preempt_disable(); |
164 | raw_spin_unlock_irq(&rtm->wait_lock); |
165 | rt_mutex_wake_up_q(&wqh); |
166 | } |
167 | |
168 | static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb, |
169 | unsigned int state) |
170 | { |
171 | /* |
172 | * rwb->readers can only hit 0 when a writer is waiting for the |
173 | * active readers to leave the critical section. |
174 | * |
175 | * dec_and_test() is fully ordered, provides RELEASE. |
176 | */ |
177 | if (unlikely(atomic_dec_and_test(&rwb->readers))) |
178 | __rwbase_read_unlock(rwb, state); |
179 | } |
180 | |
181 | static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias, |
182 | unsigned long flags) |
183 | { |
184 | struct rt_mutex_base *rtm = &rwb->rtmutex; |
185 | |
186 | /* |
187 | * _release() is needed in case that reader is in fast path, pairing |
188 | * with atomic_try_cmpxchg_acquire() in rwbase_read_trylock(). |
189 | */ |
190 | (void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers); |
191 | raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); |
192 | rwbase_rtmutex_unlock(rtm); |
193 | } |
194 | |
195 | static inline void rwbase_write_unlock(struct rwbase_rt *rwb) |
196 | { |
197 | struct rt_mutex_base *rtm = &rwb->rtmutex; |
198 | unsigned long flags; |
199 | |
200 | raw_spin_lock_irqsave(&rtm->wait_lock, flags); |
201 | __rwbase_write_unlock(rwb, bias: WRITER_BIAS, flags); |
202 | } |
203 | |
204 | static inline void rwbase_write_downgrade(struct rwbase_rt *rwb) |
205 | { |
206 | struct rt_mutex_base *rtm = &rwb->rtmutex; |
207 | unsigned long flags; |
208 | |
209 | raw_spin_lock_irqsave(&rtm->wait_lock, flags); |
210 | /* Release it and account current as reader */ |
211 | __rwbase_write_unlock(rwb, bias: WRITER_BIAS - 1, flags); |
212 | } |
213 | |
214 | static inline bool __rwbase_write_trylock(struct rwbase_rt *rwb) |
215 | { |
216 | /* Can do without CAS because we're serialized by wait_lock. */ |
217 | lockdep_assert_held(&rwb->rtmutex.wait_lock); |
218 | |
219 | /* |
220 | * _acquire is needed in case the reader is in the fast path, pairing |
221 | * with rwbase_read_unlock(), provides ACQUIRE. |
222 | */ |
223 | if (!atomic_read_acquire(&rwb->readers)) { |
224 | atomic_set(&rwb->readers, WRITER_BIAS); |
225 | return 1; |
226 | } |
227 | |
228 | return 0; |
229 | } |
230 | |
231 | static int __sched rwbase_write_lock(struct rwbase_rt *rwb, |
232 | unsigned int state) |
233 | { |
234 | struct rt_mutex_base *rtm = &rwb->rtmutex; |
235 | unsigned long flags; |
236 | |
237 | /* Take the rtmutex as a first step */ |
238 | if (rwbase_rtmutex_lock_state(rtm, state)) |
239 | return -EINTR; |
240 | |
241 | /* Force readers into slow path */ |
242 | atomic_sub(READER_BIAS, &rwb->readers); |
243 | |
244 | rwbase_pre_schedule(); |
245 | |
246 | raw_spin_lock_irqsave(&rtm->wait_lock, flags); |
247 | if (__rwbase_write_trylock(rwb)) |
248 | goto out_unlock; |
249 | |
250 | rwbase_set_and_save_current_state(state); |
251 | trace_contention_begin(rwb, LCB_F_RT | LCB_F_WRITE); |
252 | for (;;) { |
253 | /* Optimized out for rwlocks */ |
254 | if (rwbase_signal_pending_state(state, current)) { |
255 | rwbase_restore_current_state(); |
256 | __rwbase_write_unlock(rwb, 0, flags); |
257 | rwbase_post_schedule(); |
258 | trace_contention_end(rwb, -EINTR); |
259 | return -EINTR; |
260 | } |
261 | |
262 | if (__rwbase_write_trylock(rwb)) |
263 | break; |
264 | |
265 | raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); |
266 | rwbase_schedule(); |
267 | raw_spin_lock_irqsave(&rtm->wait_lock, flags); |
268 | |
269 | set_current_state(state); |
270 | } |
271 | rwbase_restore_current_state(); |
272 | trace_contention_end(rwb, 0); |
273 | |
274 | out_unlock: |
275 | raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); |
276 | rwbase_post_schedule(); |
277 | return 0; |
278 | } |
279 | |
280 | static inline int rwbase_write_trylock(struct rwbase_rt *rwb) |
281 | { |
282 | struct rt_mutex_base *rtm = &rwb->rtmutex; |
283 | unsigned long flags; |
284 | |
285 | if (!rwbase_rtmutex_trylock(rtm)) |
286 | return 0; |
287 | |
288 | atomic_sub(READER_BIAS, &rwb->readers); |
289 | |
290 | raw_spin_lock_irqsave(&rtm->wait_lock, flags); |
291 | if (__rwbase_write_trylock(rwb)) { |
292 | raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); |
293 | return 1; |
294 | } |
295 | __rwbase_write_unlock(rwb, bias: 0, flags); |
296 | return 0; |
297 | } |
298 | |