1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * rtmutex API |
4 | */ |
5 | #include <linux/spinlock.h> |
6 | #include <linux/export.h> |
7 | |
8 | #define RT_MUTEX_BUILD_MUTEX |
9 | #include "rtmutex.c" |
10 | |
11 | /* |
12 | * Max number of times we'll walk the boosting chain: |
13 | */ |
14 | int max_lock_depth = 1024; |
15 | |
16 | /* |
17 | * Debug aware fast / slowpath lock,trylock,unlock |
18 | * |
19 | * The atomic acquire/release ops are compiled away, when either the |
20 | * architecture does not support cmpxchg or when debugging is enabled. |
21 | */ |
22 | static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock, |
23 | unsigned int state, |
24 | struct lockdep_map *nest_lock, |
25 | unsigned int subclass) |
26 | { |
27 | int ret; |
28 | |
29 | might_sleep(); |
30 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_); |
31 | ret = __rt_mutex_lock(lock: &lock->rtmutex, state); |
32 | if (ret) |
33 | mutex_release(&lock->dep_map, _RET_IP_); |
34 | return ret; |
35 | } |
36 | |
37 | void rt_mutex_base_init(struct rt_mutex_base *rtb) |
38 | { |
39 | __rt_mutex_base_init(lock: rtb); |
40 | } |
41 | EXPORT_SYMBOL(rt_mutex_base_init); |
42 | |
43 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
44 | /** |
45 | * rt_mutex_lock_nested - lock a rt_mutex |
46 | * |
47 | * @lock: the rt_mutex to be locked |
48 | * @subclass: the lockdep subclass |
49 | */ |
50 | void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) |
51 | { |
52 | __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass); |
53 | } |
54 | EXPORT_SYMBOL_GPL(rt_mutex_lock_nested); |
55 | |
56 | void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock) |
57 | { |
58 | __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, nest_lock, subclass: 0); |
59 | } |
60 | EXPORT_SYMBOL_GPL(_rt_mutex_lock_nest_lock); |
61 | |
62 | #else /* !CONFIG_DEBUG_LOCK_ALLOC */ |
63 | |
64 | /** |
65 | * rt_mutex_lock - lock a rt_mutex |
66 | * |
67 | * @lock: the rt_mutex to be locked |
68 | */ |
69 | void __sched rt_mutex_lock(struct rt_mutex *lock) |
70 | { |
71 | __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, 0); |
72 | } |
73 | EXPORT_SYMBOL_GPL(rt_mutex_lock); |
74 | #endif |
75 | |
76 | /** |
77 | * rt_mutex_lock_interruptible - lock a rt_mutex interruptible |
78 | * |
79 | * @lock: the rt_mutex to be locked |
80 | * |
81 | * Returns: |
82 | * 0 on success |
83 | * -EINTR when interrupted by a signal |
84 | */ |
85 | int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) |
86 | { |
87 | return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, NULL, subclass: 0); |
88 | } |
89 | EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); |
90 | |
91 | /** |
92 | * rt_mutex_lock_killable - lock a rt_mutex killable |
93 | * |
94 | * @lock: the rt_mutex to be locked |
95 | * |
96 | * Returns: |
97 | * 0 on success |
98 | * -EINTR when interrupted by a signal |
99 | */ |
100 | int __sched rt_mutex_lock_killable(struct rt_mutex *lock) |
101 | { |
102 | return __rt_mutex_lock_common(lock, TASK_KILLABLE, NULL, subclass: 0); |
103 | } |
104 | EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); |
105 | |
106 | /** |
107 | * rt_mutex_trylock - try to lock a rt_mutex |
108 | * |
109 | * @lock: the rt_mutex to be locked |
110 | * |
111 | * This function can only be called in thread context. It's safe to call it |
112 | * from atomic regions, but not from hard or soft interrupt context. |
113 | * |
114 | * Returns: |
115 | * 1 on success |
116 | * 0 on contention |
117 | */ |
118 | int __sched rt_mutex_trylock(struct rt_mutex *lock) |
119 | { |
120 | int ret; |
121 | |
122 | if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task())) |
123 | return 0; |
124 | |
125 | ret = __rt_mutex_trylock(lock: &lock->rtmutex); |
126 | if (ret) |
127 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
128 | |
129 | return ret; |
130 | } |
131 | EXPORT_SYMBOL_GPL(rt_mutex_trylock); |
132 | |
133 | /** |
134 | * rt_mutex_unlock - unlock a rt_mutex |
135 | * |
136 | * @lock: the rt_mutex to be unlocked |
137 | */ |
138 | void __sched rt_mutex_unlock(struct rt_mutex *lock) |
139 | { |
140 | mutex_release(&lock->dep_map, _RET_IP_); |
141 | __rt_mutex_unlock(lock: &lock->rtmutex); |
142 | } |
143 | EXPORT_SYMBOL_GPL(rt_mutex_unlock); |
144 | |
145 | /* |
146 | * Futex variants, must not use fastpath. |
147 | */ |
148 | int __sched rt_mutex_futex_trylock(struct rt_mutex_base *lock) |
149 | { |
150 | return rt_mutex_slowtrylock(lock); |
151 | } |
152 | |
153 | int __sched __rt_mutex_futex_trylock(struct rt_mutex_base *lock) |
154 | { |
155 | return __rt_mutex_slowtrylock(lock); |
156 | } |
157 | |
158 | /** |
159 | * __rt_mutex_futex_unlock - Futex variant, that since futex variants |
160 | * do not use the fast-path, can be simple and will not need to retry. |
161 | * |
162 | * @lock: The rt_mutex to be unlocked |
163 | * @wqh: The wake queue head from which to get the next lock waiter |
164 | */ |
165 | bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock, |
166 | struct rt_wake_q_head *wqh) |
167 | { |
168 | lockdep_assert_held(&lock->wait_lock); |
169 | |
170 | debug_rt_mutex_unlock(lock); |
171 | |
172 | if (!rt_mutex_has_waiters(lock)) { |
173 | lock->owner = NULL; |
174 | return false; /* done */ |
175 | } |
176 | |
177 | /* |
178 | * We've already deboosted, mark_wakeup_next_waiter() will |
179 | * retain preempt_disabled when we drop the wait_lock, to |
180 | * avoid inversion prior to the wakeup. preempt_disable() |
181 | * therein pairs with rt_mutex_postunlock(). |
182 | */ |
183 | mark_wakeup_next_waiter(wqh, lock); |
184 | |
185 | return true; /* call postunlock() */ |
186 | } |
187 | |
188 | void __sched rt_mutex_futex_unlock(struct rt_mutex_base *lock) |
189 | { |
190 | DEFINE_RT_WAKE_Q(wqh); |
191 | unsigned long flags; |
192 | bool postunlock; |
193 | |
194 | raw_spin_lock_irqsave(&lock->wait_lock, flags); |
195 | postunlock = __rt_mutex_futex_unlock(lock, wqh: &wqh); |
196 | raw_spin_unlock_irqrestore(&lock->wait_lock, flags); |
197 | |
198 | if (postunlock) |
199 | rt_mutex_postunlock(wqh: &wqh); |
200 | } |
201 | |
202 | /** |
203 | * __rt_mutex_init - initialize the rt_mutex |
204 | * |
205 | * @lock: The rt_mutex to be initialized |
206 | * @name: The lock name used for debugging |
207 | * @key: The lock class key used for debugging |
208 | * |
209 | * Initialize the rt_mutex to unlocked state. |
210 | * |
211 | * Initializing of a locked rt_mutex is not allowed |
212 | */ |
213 | void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name, |
214 | struct lock_class_key *key) |
215 | { |
216 | debug_check_no_locks_freed(from: (void *)lock, len: sizeof(*lock)); |
217 | __rt_mutex_base_init(lock: &lock->rtmutex); |
218 | lockdep_init_map_wait(lock: &lock->dep_map, name, key, subclass: 0, inner: LD_WAIT_SLEEP); |
219 | } |
220 | EXPORT_SYMBOL_GPL(__rt_mutex_init); |
221 | |
222 | /** |
223 | * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a |
224 | * proxy owner |
225 | * |
226 | * @lock: the rt_mutex to be locked |
227 | * @proxy_owner:the task to set as owner |
228 | * |
229 | * No locking. Caller has to do serializing itself |
230 | * |
231 | * Special API call for PI-futex support. This initializes the rtmutex and |
232 | * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not |
233 | * possible at this point because the pi_state which contains the rtmutex |
234 | * is not yet visible to other tasks. |
235 | */ |
236 | void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock, |
237 | struct task_struct *proxy_owner) |
238 | { |
239 | static struct lock_class_key pi_futex_key; |
240 | |
241 | __rt_mutex_base_init(lock); |
242 | /* |
243 | * On PREEMPT_RT the futex hashbucket spinlock becomes 'sleeping' |
244 | * and rtmutex based. That causes a lockdep false positive, because |
245 | * some of the futex functions invoke spin_unlock(&hb->lock) with |
246 | * the wait_lock of the rtmutex associated to the pi_futex held. |
247 | * spin_unlock() in turn takes wait_lock of the rtmutex on which |
248 | * the spinlock is based, which makes lockdep notice a lock |
249 | * recursion. Give the futex/rtmutex wait_lock a separate key. |
250 | */ |
251 | lockdep_set_class(&lock->wait_lock, &pi_futex_key); |
252 | rt_mutex_set_owner(lock, owner: proxy_owner); |
253 | } |
254 | |
255 | /** |
256 | * rt_mutex_proxy_unlock - release a lock on behalf of owner |
257 | * |
258 | * @lock: the rt_mutex to be locked |
259 | * |
260 | * No locking. Caller has to do serializing itself |
261 | * |
262 | * Special API call for PI-futex support. This just cleans up the rtmutex |
263 | * (debugging) state. Concurrent operations on this rt_mutex are not |
264 | * possible because it belongs to the pi_state which is about to be freed |
265 | * and it is not longer visible to other tasks. |
266 | */ |
267 | void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock) |
268 | { |
269 | debug_rt_mutex_proxy_unlock(lock); |
270 | rt_mutex_clear_owner(lock); |
271 | } |
272 | |
273 | /** |
274 | * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task |
275 | * @lock: the rt_mutex to take |
276 | * @waiter: the pre-initialized rt_mutex_waiter |
277 | * @task: the task to prepare |
278 | * |
279 | * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock |
280 | * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that. |
281 | * |
282 | * NOTE: does _NOT_ remove the @waiter on failure; must either call |
283 | * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this. |
284 | * |
285 | * Returns: |
286 | * 0 - task blocked on lock |
287 | * 1 - acquired the lock for task, caller should wake it up |
288 | * <0 - error |
289 | * |
290 | * Special API call for PI-futex support. |
291 | */ |
292 | int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, |
293 | struct rt_mutex_waiter *waiter, |
294 | struct task_struct *task) |
295 | { |
296 | int ret; |
297 | |
298 | lockdep_assert_held(&lock->wait_lock); |
299 | |
300 | if (try_to_take_rt_mutex(lock, task, NULL)) |
301 | return 1; |
302 | |
303 | /* We enforce deadlock detection for futexes */ |
304 | ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL, |
305 | chwalk: RT_MUTEX_FULL_CHAINWALK); |
306 | |
307 | if (ret && !rt_mutex_owner(lock)) { |
308 | /* |
309 | * Reset the return value. We might have |
310 | * returned with -EDEADLK and the owner |
311 | * released the lock while we were walking the |
312 | * pi chain. Let the waiter sort it out. |
313 | */ |
314 | ret = 0; |
315 | } |
316 | |
317 | return ret; |
318 | } |
319 | |
320 | /** |
321 | * rt_mutex_start_proxy_lock() - Start lock acquisition for another task |
322 | * @lock: the rt_mutex to take |
323 | * @waiter: the pre-initialized rt_mutex_waiter |
324 | * @task: the task to prepare |
325 | * |
326 | * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock |
327 | * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that. |
328 | * |
329 | * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter |
330 | * on failure. |
331 | * |
332 | * Returns: |
333 | * 0 - task blocked on lock |
334 | * 1 - acquired the lock for task, caller should wake it up |
335 | * <0 - error |
336 | * |
337 | * Special API call for PI-futex support. |
338 | */ |
339 | int __sched rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, |
340 | struct rt_mutex_waiter *waiter, |
341 | struct task_struct *task) |
342 | { |
343 | int ret; |
344 | |
345 | raw_spin_lock_irq(&lock->wait_lock); |
346 | ret = __rt_mutex_start_proxy_lock(lock, waiter, task); |
347 | if (unlikely(ret)) |
348 | remove_waiter(lock, waiter); |
349 | raw_spin_unlock_irq(&lock->wait_lock); |
350 | |
351 | return ret; |
352 | } |
353 | |
354 | /** |
355 | * rt_mutex_wait_proxy_lock() - Wait for lock acquisition |
356 | * @lock: the rt_mutex we were woken on |
357 | * @to: the timeout, null if none. hrtimer should already have |
358 | * been started. |
359 | * @waiter: the pre-initialized rt_mutex_waiter |
360 | * |
361 | * Wait for the lock acquisition started on our behalf by |
362 | * rt_mutex_start_proxy_lock(). Upon failure, the caller must call |
363 | * rt_mutex_cleanup_proxy_lock(). |
364 | * |
365 | * Returns: |
366 | * 0 - success |
367 | * <0 - error, one of -EINTR, -ETIMEDOUT |
368 | * |
369 | * Special API call for PI-futex support |
370 | */ |
371 | int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock, |
372 | struct hrtimer_sleeper *to, |
373 | struct rt_mutex_waiter *waiter) |
374 | { |
375 | int ret; |
376 | |
377 | raw_spin_lock_irq(&lock->wait_lock); |
378 | /* sleep on the mutex */ |
379 | set_current_state(TASK_INTERRUPTIBLE); |
380 | ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, timeout: to, waiter); |
381 | /* |
382 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might |
383 | * have to fix that up. |
384 | */ |
385 | fixup_rt_mutex_waiters(lock, acquire_lock: true); |
386 | raw_spin_unlock_irq(&lock->wait_lock); |
387 | |
388 | return ret; |
389 | } |
390 | |
391 | /** |
392 | * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition |
393 | * @lock: the rt_mutex we were woken on |
394 | * @waiter: the pre-initialized rt_mutex_waiter |
395 | * |
396 | * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or |
397 | * rt_mutex_wait_proxy_lock(). |
398 | * |
399 | * Unless we acquired the lock; we're still enqueued on the wait-list and can |
400 | * in fact still be granted ownership until we're removed. Therefore we can |
401 | * find we are in fact the owner and must disregard the |
402 | * rt_mutex_wait_proxy_lock() failure. |
403 | * |
404 | * Returns: |
405 | * true - did the cleanup, we done. |
406 | * false - we acquired the lock after rt_mutex_wait_proxy_lock() returned, |
407 | * caller should disregards its return value. |
408 | * |
409 | * Special API call for PI-futex support |
410 | */ |
411 | bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock, |
412 | struct rt_mutex_waiter *waiter) |
413 | { |
414 | bool cleanup = false; |
415 | |
416 | raw_spin_lock_irq(&lock->wait_lock); |
417 | /* |
418 | * Do an unconditional try-lock, this deals with the lock stealing |
419 | * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter() |
420 | * sets a NULL owner. |
421 | * |
422 | * We're not interested in the return value, because the subsequent |
423 | * test on rt_mutex_owner() will infer that. If the trylock succeeded, |
424 | * we will own the lock and it will have removed the waiter. If we |
425 | * failed the trylock, we're still not owner and we need to remove |
426 | * ourselves. |
427 | */ |
428 | try_to_take_rt_mutex(lock, current, waiter); |
429 | /* |
430 | * Unless we're the owner; we're still enqueued on the wait_list. |
431 | * So check if we became owner, if not, take us off the wait_list. |
432 | */ |
433 | if (rt_mutex_owner(lock) != current) { |
434 | remove_waiter(lock, waiter); |
435 | cleanup = true; |
436 | } |
437 | /* |
438 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might |
439 | * have to fix that up. |
440 | */ |
441 | fixup_rt_mutex_waiters(lock, acquire_lock: false); |
442 | |
443 | raw_spin_unlock_irq(&lock->wait_lock); |
444 | |
445 | return cleanup; |
446 | } |
447 | |
448 | /* |
449 | * Recheck the pi chain, in case we got a priority setting |
450 | * |
451 | * Called from sched_setscheduler |
452 | */ |
453 | void __sched rt_mutex_adjust_pi(struct task_struct *task) |
454 | { |
455 | struct rt_mutex_waiter *waiter; |
456 | struct rt_mutex_base *next_lock; |
457 | unsigned long flags; |
458 | |
459 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
460 | |
461 | waiter = task->pi_blocked_on; |
462 | if (!waiter || rt_waiter_node_equal(left: &waiter->tree, task_to_waiter_node(task))) { |
463 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
464 | return; |
465 | } |
466 | next_lock = waiter->lock; |
467 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
468 | |
469 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
470 | get_task_struct(t: task); |
471 | |
472 | rt_mutex_adjust_prio_chain(task, chwalk: RT_MUTEX_MIN_CHAINWALK, NULL, |
473 | next_lock, NULL, top_task: task); |
474 | } |
475 | |
476 | /* |
477 | * Performs the wakeup of the top-waiter and re-enables preemption. |
478 | */ |
479 | void __sched rt_mutex_postunlock(struct rt_wake_q_head *wqh) |
480 | { |
481 | rt_mutex_wake_up_q(wqh); |
482 | } |
483 | |
484 | #ifdef CONFIG_DEBUG_RT_MUTEXES |
485 | void rt_mutex_debug_task_free(struct task_struct *task) |
486 | { |
487 | DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root)); |
488 | DEBUG_LOCKS_WARN_ON(task->pi_blocked_on); |
489 | } |
490 | #endif |
491 | |
492 | #ifdef CONFIG_PREEMPT_RT |
493 | /* Mutexes */ |
494 | void __mutex_rt_init(struct mutex *mutex, const char *name, |
495 | struct lock_class_key *key) |
496 | { |
497 | debug_check_no_locks_freed((void *)mutex, sizeof(*mutex)); |
498 | lockdep_init_map_wait(&mutex->dep_map, name, key, 0, LD_WAIT_SLEEP); |
499 | } |
500 | EXPORT_SYMBOL(__mutex_rt_init); |
501 | |
502 | static __always_inline int __mutex_lock_common(struct mutex *lock, |
503 | unsigned int state, |
504 | unsigned int subclass, |
505 | struct lockdep_map *nest_lock, |
506 | unsigned long ip) |
507 | { |
508 | int ret; |
509 | |
510 | might_sleep(); |
511 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
512 | ret = __rt_mutex_lock(&lock->rtmutex, state); |
513 | if (ret) |
514 | mutex_release(&lock->dep_map, ip); |
515 | else |
516 | lock_acquired(&lock->dep_map, ip); |
517 | return ret; |
518 | } |
519 | |
520 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
521 | void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass) |
522 | { |
523 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); |
524 | } |
525 | EXPORT_SYMBOL_GPL(mutex_lock_nested); |
526 | |
527 | void __sched _mutex_lock_nest_lock(struct mutex *lock, |
528 | struct lockdep_map *nest_lock) |
529 | { |
530 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest_lock, _RET_IP_); |
531 | } |
532 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); |
533 | |
534 | int __sched mutex_lock_interruptible_nested(struct mutex *lock, |
535 | unsigned int subclass) |
536 | { |
537 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_); |
538 | } |
539 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
540 | |
541 | int __sched mutex_lock_killable_nested(struct mutex *lock, |
542 | unsigned int subclass) |
543 | { |
544 | return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); |
545 | } |
546 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); |
547 | |
548 | void __sched mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) |
549 | { |
550 | int token; |
551 | |
552 | might_sleep(); |
553 | |
554 | token = io_schedule_prepare(); |
555 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); |
556 | io_schedule_finish(token); |
557 | } |
558 | EXPORT_SYMBOL_GPL(mutex_lock_io_nested); |
559 | |
560 | #else /* CONFIG_DEBUG_LOCK_ALLOC */ |
561 | |
562 | void __sched mutex_lock(struct mutex *lock) |
563 | { |
564 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); |
565 | } |
566 | EXPORT_SYMBOL(mutex_lock); |
567 | |
568 | int __sched mutex_lock_interruptible(struct mutex *lock) |
569 | { |
570 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); |
571 | } |
572 | EXPORT_SYMBOL(mutex_lock_interruptible); |
573 | |
574 | int __sched mutex_lock_killable(struct mutex *lock) |
575 | { |
576 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); |
577 | } |
578 | EXPORT_SYMBOL(mutex_lock_killable); |
579 | |
580 | void __sched mutex_lock_io(struct mutex *lock) |
581 | { |
582 | int token = io_schedule_prepare(); |
583 | |
584 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); |
585 | io_schedule_finish(token); |
586 | } |
587 | EXPORT_SYMBOL(mutex_lock_io); |
588 | #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ |
589 | |
590 | int __sched mutex_trylock(struct mutex *lock) |
591 | { |
592 | int ret; |
593 | |
594 | if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task())) |
595 | return 0; |
596 | |
597 | ret = __rt_mutex_trylock(&lock->rtmutex); |
598 | if (ret) |
599 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
600 | |
601 | return ret; |
602 | } |
603 | EXPORT_SYMBOL(mutex_trylock); |
604 | |
605 | void __sched mutex_unlock(struct mutex *lock) |
606 | { |
607 | mutex_release(&lock->dep_map, _RET_IP_); |
608 | __rt_mutex_unlock(&lock->rtmutex); |
609 | } |
610 | EXPORT_SYMBOL(mutex_unlock); |
611 | |
612 | #endif /* CONFIG_PREEMPT_RT */ |
613 | |