1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/locking/mutex.c
4 *
5 * Mutexes: blocking mutual exclusion locks
6 *
7 * Started by Ingo Molnar:
8 *
9 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 *
11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
12 * David Howells for suggestions and improvements.
13 *
14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15 * from the -rt tree, where it was originally implemented for rtmutexes
16 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
17 * and Sven Dietrich.
18 *
19 * Also see Documentation/locking/mutex-design.rst.
20 */
21#include <linux/mutex.h>
22#include <linux/ww_mutex.h>
23#include <linux/sched/signal.h>
24#include <linux/sched/rt.h>
25#include <linux/sched/wake_q.h>
26#include <linux/sched/debug.h>
27#include <linux/export.h>
28#include <linux/spinlock.h>
29#include <linux/interrupt.h>
30#include <linux/debug_locks.h>
31#include <linux/osq_lock.h>
32#include <linux/hung_task.h>
33
34#define CREATE_TRACE_POINTS
35#include <trace/events/lock.h>
36
37#ifndef CONFIG_PREEMPT_RT
38#include "mutex.h"
39
40#ifdef CONFIG_DEBUG_MUTEXES
41# define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
42#else
43# define MUTEX_WARN_ON(cond)
44#endif
45
46void
47__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
48{
49 atomic_long_set(v: &lock->owner, i: 0);
50 raw_spin_lock_init(&lock->wait_lock);
51 INIT_LIST_HEAD(list: &lock->wait_list);
52#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
53 osq_lock_init(lock: &lock->osq);
54#endif
55
56 debug_mutex_init(lock, name, key);
57}
58EXPORT_SYMBOL(__mutex_init);
59
60static inline struct task_struct *__owner_task(unsigned long owner)
61{
62 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
63}
64
65bool mutex_is_locked(struct mutex *lock)
66{
67 return __mutex_owner(lock) != NULL;
68}
69EXPORT_SYMBOL(mutex_is_locked);
70
71static inline unsigned long __owner_flags(unsigned long owner)
72{
73 return owner & MUTEX_FLAGS;
74}
75
76/* Do not use the return value as a pointer directly. */
77unsigned long mutex_get_owner(struct mutex *lock)
78{
79 unsigned long owner = atomic_long_read(v: &lock->owner);
80
81 return (unsigned long)__owner_task(owner);
82}
83
84/*
85 * Returns: __mutex_owner(lock) on failure or NULL on success.
86 */
87static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
88{
89 unsigned long owner, curr = (unsigned long)current;
90
91 owner = atomic_long_read(v: &lock->owner);
92 for (;;) { /* must loop, can race against a flag */
93 unsigned long flags = __owner_flags(owner);
94 unsigned long task = owner & ~MUTEX_FLAGS;
95
96 if (task) {
97 if (flags & MUTEX_FLAG_PICKUP) {
98 if (task != curr)
99 break;
100 flags &= ~MUTEX_FLAG_PICKUP;
101 } else if (handoff) {
102 if (flags & MUTEX_FLAG_HANDOFF)
103 break;
104 flags |= MUTEX_FLAG_HANDOFF;
105 } else {
106 break;
107 }
108 } else {
109 MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
110 task = curr;
111 }
112
113 if (atomic_long_try_cmpxchg_acquire(v: &lock->owner, old: &owner, new: task | flags)) {
114 if (task == curr)
115 return NULL;
116 break;
117 }
118 }
119
120 return __owner_task(owner);
121}
122
123/*
124 * Trylock or set HANDOFF
125 */
126static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
127{
128 return !__mutex_trylock_common(lock, handoff);
129}
130
131/*
132 * Actual trylock that will work on any unlocked state.
133 */
134static inline bool __mutex_trylock(struct mutex *lock)
135{
136 return !__mutex_trylock_common(lock, handoff: false);
137}
138
139#ifndef CONFIG_DEBUG_LOCK_ALLOC
140/*
141 * Lockdep annotations are contained to the slow paths for simplicity.
142 * There is nothing that would stop spreading the lockdep annotations outwards
143 * except more code.
144 */
145
146/*
147 * Optimistic trylock that only works in the uncontended case. Make sure to
148 * follow with a __mutex_trylock() before failing.
149 */
150static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
151{
152 unsigned long curr = (unsigned long)current;
153 unsigned long zero = 0UL;
154
155 MUTEX_WARN_ON(lock->magic != lock);
156
157 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
158 return true;
159
160 return false;
161}
162
163static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
164{
165 unsigned long curr = (unsigned long)current;
166
167 return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
168}
169#endif
170
171static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
172{
173 atomic_long_or(i: flag, v: &lock->owner);
174}
175
176static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
177{
178 atomic_long_andnot(i: flag, v: &lock->owner);
179}
180
181static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
182{
183 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
184}
185
186/*
187 * Add @waiter to a given location in the lock wait_list and set the
188 * FLAG_WAITERS flag if it's the first waiter.
189 */
190static void
191__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
192 struct list_head *list)
193{
194#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
195 hung_task_set_blocker(lock, BLOCKER_TYPE_MUTEX);
196#endif
197 debug_mutex_add_waiter(lock, waiter, current);
198
199 list_add_tail(new: &waiter->list, head: list);
200 if (__mutex_waiter_is_first(lock, waiter))
201 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
202}
203
204static void
205__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
206{
207 list_del(entry: &waiter->list);
208 if (likely(list_empty(&lock->wait_list)))
209 __mutex_clear_flag(lock, MUTEX_FLAGS);
210
211 debug_mutex_remove_waiter(lock, waiter, current);
212#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
213 hung_task_clear_blocker();
214#endif
215}
216
217/*
218 * Give up ownership to a specific task, when @task = NULL, this is equivalent
219 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
220 * WAITERS. Provides RELEASE semantics like a regular unlock, the
221 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
222 */
223static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
224{
225 unsigned long owner = atomic_long_read(v: &lock->owner);
226
227 for (;;) {
228 unsigned long new;
229
230 MUTEX_WARN_ON(__owner_task(owner) != current);
231 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
232
233 new = (owner & MUTEX_FLAG_WAITERS);
234 new |= (unsigned long)task;
235 if (task)
236 new |= MUTEX_FLAG_PICKUP;
237
238 if (atomic_long_try_cmpxchg_release(v: &lock->owner, old: &owner, new))
239 break;
240 }
241}
242
243#ifndef CONFIG_DEBUG_LOCK_ALLOC
244/*
245 * We split the mutex lock/unlock logic into separate fastpath and
246 * slowpath functions, to reduce the register pressure on the fastpath.
247 * We also put the fastpath first in the kernel image, to make sure the
248 * branch is predicted by the CPU as default-untaken.
249 */
250static void __sched __mutex_lock_slowpath(struct mutex *lock);
251
252/**
253 * mutex_lock - acquire the mutex
254 * @lock: the mutex to be acquired
255 *
256 * Lock the mutex exclusively for this task. If the mutex is not
257 * available right now, it will sleep until it can get it.
258 *
259 * The mutex must later on be released by the same task that
260 * acquired it. Recursive locking is not allowed. The task
261 * may not exit without first unlocking the mutex. Also, kernel
262 * memory where the mutex resides must not be freed with
263 * the mutex still locked. The mutex must first be initialized
264 * (or statically defined) before it can be locked. memset()-ing
265 * the mutex to 0 is not allowed.
266 *
267 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
268 * checks that will enforce the restrictions and will also do
269 * deadlock debugging)
270 *
271 * This function is similar to (but not equivalent to) down().
272 */
273void __sched mutex_lock(struct mutex *lock)
274{
275 might_sleep();
276
277 if (!__mutex_trylock_fast(lock))
278 __mutex_lock_slowpath(lock);
279}
280EXPORT_SYMBOL(mutex_lock);
281#endif
282
283#include "ww_mutex.h"
284
285#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
286
287/*
288 * Trylock variant that returns the owning task on failure.
289 */
290static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
291{
292 return __mutex_trylock_common(lock, handoff: false);
293}
294
295static inline
296bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
297 struct mutex_waiter *waiter)
298{
299 struct ww_mutex *ww;
300
301 ww = container_of(lock, struct ww_mutex, base);
302
303 /*
304 * If ww->ctx is set the contents are undefined, only
305 * by acquiring wait_lock there is a guarantee that
306 * they are not invalid when reading.
307 *
308 * As such, when deadlock detection needs to be
309 * performed the optimistic spinning cannot be done.
310 *
311 * Check this in every inner iteration because we may
312 * be racing against another thread's ww_mutex_lock.
313 */
314 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
315 return false;
316
317 /*
318 * If we aren't on the wait list yet, cancel the spin
319 * if there are waiters. We want to avoid stealing the
320 * lock from a waiter with an earlier stamp, since the
321 * other thread may already own a lock that we also
322 * need.
323 */
324 if (!waiter && (atomic_long_read(v: &lock->owner) & MUTEX_FLAG_WAITERS))
325 return false;
326
327 /*
328 * Similarly, stop spinning if we are no longer the
329 * first waiter.
330 */
331 if (waiter && !__mutex_waiter_is_first(lock, waiter))
332 return false;
333
334 return true;
335}
336
337/*
338 * Look out! "owner" is an entirely speculative pointer access and not
339 * reliable.
340 *
341 * "noinline" so that this function shows up on perf profiles.
342 */
343static noinline
344bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
345 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
346{
347 bool ret = true;
348
349 lockdep_assert_preemption_disabled();
350
351 while (__mutex_owner(lock) == owner) {
352 /*
353 * Ensure we emit the owner->on_cpu, dereference _after_
354 * checking lock->owner still matches owner. And we already
355 * disabled preemption which is equal to the RCU read-side
356 * crital section in optimistic spinning code. Thus the
357 * task_strcut structure won't go away during the spinning
358 * period
359 */
360 barrier();
361
362 /*
363 * Use vcpu_is_preempted to detect lock holder preemption issue.
364 */
365 if (!owner_on_cpu(owner) || need_resched()) {
366 ret = false;
367 break;
368 }
369
370 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
371 ret = false;
372 break;
373 }
374
375 cpu_relax();
376 }
377
378 return ret;
379}
380
381/*
382 * Initial check for entering the mutex spinning loop
383 */
384static inline int mutex_can_spin_on_owner(struct mutex *lock)
385{
386 struct task_struct *owner;
387 int retval = 1;
388
389 lockdep_assert_preemption_disabled();
390
391 if (need_resched())
392 return 0;
393
394 /*
395 * We already disabled preemption which is equal to the RCU read-side
396 * crital section in optimistic spinning code. Thus the task_strcut
397 * structure won't go away during the spinning period.
398 */
399 owner = __mutex_owner(lock);
400 if (owner)
401 retval = owner_on_cpu(owner);
402
403 /*
404 * If lock->owner is not set, the mutex has been released. Return true
405 * such that we'll trylock in the spin path, which is a faster option
406 * than the blocking slow path.
407 */
408 return retval;
409}
410
411/*
412 * Optimistic spinning.
413 *
414 * We try to spin for acquisition when we find that the lock owner
415 * is currently running on a (different) CPU and while we don't
416 * need to reschedule. The rationale is that if the lock owner is
417 * running, it is likely to release the lock soon.
418 *
419 * The mutex spinners are queued up using MCS lock so that only one
420 * spinner can compete for the mutex. However, if mutex spinning isn't
421 * going to happen, there is no point in going through the lock/unlock
422 * overhead.
423 *
424 * Returns true when the lock was taken, otherwise false, indicating
425 * that we need to jump to the slowpath and sleep.
426 *
427 * The waiter flag is set to true if the spinner is a waiter in the wait
428 * queue. The waiter-spinner will spin on the lock directly and concurrently
429 * with the spinner at the head of the OSQ, if present, until the owner is
430 * changed to itself.
431 */
432static __always_inline bool
433mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
434 struct mutex_waiter *waiter)
435{
436 if (!waiter) {
437 /*
438 * The purpose of the mutex_can_spin_on_owner() function is
439 * to eliminate the overhead of osq_lock() and osq_unlock()
440 * in case spinning isn't possible. As a waiter-spinner
441 * is not going to take OSQ lock anyway, there is no need
442 * to call mutex_can_spin_on_owner().
443 */
444 if (!mutex_can_spin_on_owner(lock))
445 goto fail;
446
447 /*
448 * In order to avoid a stampede of mutex spinners trying to
449 * acquire the mutex all at once, the spinners need to take a
450 * MCS (queued) lock first before spinning on the owner field.
451 */
452 if (!osq_lock(lock: &lock->osq))
453 goto fail;
454 }
455
456 for (;;) {
457 struct task_struct *owner;
458
459 /* Try to acquire the mutex... */
460 owner = __mutex_trylock_or_owner(lock);
461 if (!owner)
462 break;
463
464 /*
465 * There's an owner, wait for it to either
466 * release the lock or go to sleep.
467 */
468 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
469 goto fail_unlock;
470
471 /*
472 * The cpu_relax() call is a compiler barrier which forces
473 * everything in this loop to be re-loaded. We don't need
474 * memory barriers as we'll eventually observe the right
475 * values at the cost of a few extra spins.
476 */
477 cpu_relax();
478 }
479
480 if (!waiter)
481 osq_unlock(lock: &lock->osq);
482
483 return true;
484
485
486fail_unlock:
487 if (!waiter)
488 osq_unlock(lock: &lock->osq);
489
490fail:
491 /*
492 * If we fell out of the spin path because of need_resched(),
493 * reschedule now, before we try-lock the mutex. This avoids getting
494 * scheduled out right after we obtained the mutex.
495 */
496 if (need_resched()) {
497 /*
498 * We _should_ have TASK_RUNNING here, but just in case
499 * we do not, make it so, otherwise we might get stuck.
500 */
501 __set_current_state(TASK_RUNNING);
502 schedule_preempt_disabled();
503 }
504
505 return false;
506}
507#else
508static __always_inline bool
509mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
510 struct mutex_waiter *waiter)
511{
512 return false;
513}
514#endif
515
516static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
517
518/**
519 * mutex_unlock - release the mutex
520 * @lock: the mutex to be released
521 *
522 * Unlock a mutex that has been locked by this task previously.
523 *
524 * This function must not be used in interrupt context. Unlocking
525 * of a not locked mutex is not allowed.
526 *
527 * The caller must ensure that the mutex stays alive until this function has
528 * returned - mutex_unlock() can NOT directly be used to release an object such
529 * that another concurrent task can free it.
530 * Mutexes are different from spinlocks & refcounts in this aspect.
531 *
532 * This function is similar to (but not equivalent to) up().
533 */
534void __sched mutex_unlock(struct mutex *lock)
535{
536#ifndef CONFIG_DEBUG_LOCK_ALLOC
537 if (__mutex_unlock_fast(lock))
538 return;
539#endif
540 __mutex_unlock_slowpath(lock, _RET_IP_);
541}
542EXPORT_SYMBOL(mutex_unlock);
543
544/**
545 * ww_mutex_unlock - release the w/w mutex
546 * @lock: the mutex to be released
547 *
548 * Unlock a mutex that has been locked by this task previously with any of the
549 * ww_mutex_lock* functions (with or without an acquire context). It is
550 * forbidden to release the locks after releasing the acquire context.
551 *
552 * This function must not be used in interrupt context. Unlocking
553 * of a unlocked mutex is not allowed.
554 */
555void __sched ww_mutex_unlock(struct ww_mutex *lock)
556{
557 __ww_mutex_unlock(lock);
558 mutex_unlock(&lock->base);
559}
560EXPORT_SYMBOL(ww_mutex_unlock);
561
562/*
563 * Lock a mutex (possibly interruptible), slowpath:
564 */
565static __always_inline int __sched
566__mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
567 struct lockdep_map *nest_lock, unsigned long ip,
568 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
569{
570 DEFINE_WAKE_Q(wake_q);
571 struct mutex_waiter waiter;
572 struct ww_mutex *ww;
573 unsigned long flags;
574 int ret;
575
576 if (!use_ww_ctx)
577 ww_ctx = NULL;
578
579 might_sleep();
580
581 MUTEX_WARN_ON(lock->magic != lock);
582
583 ww = container_of(lock, struct ww_mutex, base);
584 if (ww_ctx) {
585 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
586 return -EALREADY;
587
588 /*
589 * Reset the wounded flag after a kill. No other process can
590 * race and wound us here since they can't have a valid owner
591 * pointer if we don't have any locks held.
592 */
593 if (ww_ctx->acquired == 0)
594 ww_ctx->wounded = 0;
595
596#ifdef CONFIG_DEBUG_LOCK_ALLOC
597 nest_lock = &ww_ctx->dep_map;
598#endif
599 }
600
601 preempt_disable();
602 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
603
604 trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
605 if (__mutex_trylock(lock) ||
606 mutex_optimistic_spin(lock, ww_ctx, NULL)) {
607 /* got the lock, yay! */
608 lock_acquired(lock: &lock->dep_map, ip);
609 if (ww_ctx)
610 ww_mutex_set_context_fastpath(lock: ww, ctx: ww_ctx);
611 trace_contention_end(lock, ret: 0);
612 preempt_enable();
613 return 0;
614 }
615
616 raw_spin_lock_irqsave(&lock->wait_lock, flags);
617 /*
618 * After waiting to acquire the wait_lock, try again.
619 */
620 if (__mutex_trylock(lock)) {
621 if (ww_ctx)
622 __ww_mutex_check_waiters(lock, ww_ctx, wake_q: &wake_q);
623
624 goto skip_wait;
625 }
626
627 debug_mutex_lock_common(lock, waiter: &waiter);
628 waiter.task = current;
629 if (use_ww_ctx)
630 waiter.ww_ctx = ww_ctx;
631
632 lock_contended(lock: &lock->dep_map, ip);
633
634 if (!use_ww_ctx) {
635 /* add waiting tasks to the end of the waitqueue (FIFO): */
636 __mutex_add_waiter(lock, waiter: &waiter, list: &lock->wait_list);
637 } else {
638 /*
639 * Add in stamp order, waking up waiters that must kill
640 * themselves.
641 */
642 ret = __ww_mutex_add_waiter(waiter: &waiter, lock, ww_ctx, wake_q: &wake_q);
643 if (ret)
644 goto err_early_kill;
645 }
646
647 set_current_state(state);
648 trace_contention_begin(lock, LCB_F_MUTEX);
649 for (;;) {
650 bool first;
651
652 /*
653 * Once we hold wait_lock, we're serialized against
654 * mutex_unlock() handing the lock off to us, do a trylock
655 * before testing the error conditions to make sure we pick up
656 * the handoff.
657 */
658 if (__mutex_trylock(lock))
659 goto acquired;
660
661 /*
662 * Check for signals and kill conditions while holding
663 * wait_lock. This ensures the lock cancellation is ordered
664 * against mutex_unlock() and wake-ups do not go missing.
665 */
666 if (signal_pending_state(state, current)) {
667 ret = -EINTR;
668 goto err;
669 }
670
671 if (ww_ctx) {
672 ret = __ww_mutex_check_kill(lock, waiter: &waiter, ctx: ww_ctx);
673 if (ret)
674 goto err;
675 }
676
677 raw_spin_unlock_irqrestore_wake(lock: &lock->wait_lock, flags, wake_q: &wake_q);
678
679 schedule_preempt_disabled();
680
681 first = __mutex_waiter_is_first(lock, waiter: &waiter);
682
683 set_current_state(state);
684 /*
685 * Here we order against unlock; we must either see it change
686 * state back to RUNNING and fall through the next schedule(),
687 * or we must see its unlock and acquire.
688 */
689 if (__mutex_trylock_or_handoff(lock, handoff: first))
690 break;
691
692 if (first) {
693 trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
694 if (mutex_optimistic_spin(lock, ww_ctx, waiter: &waiter))
695 break;
696 trace_contention_begin(lock, LCB_F_MUTEX);
697 }
698
699 raw_spin_lock_irqsave(&lock->wait_lock, flags);
700 }
701 raw_spin_lock_irqsave(&lock->wait_lock, flags);
702acquired:
703 __set_current_state(TASK_RUNNING);
704
705 if (ww_ctx) {
706 /*
707 * Wound-Wait; we stole the lock (!first_waiter), check the
708 * waiters as anyone might want to wound us.
709 */
710 if (!ww_ctx->is_wait_die &&
711 !__mutex_waiter_is_first(lock, waiter: &waiter))
712 __ww_mutex_check_waiters(lock, ww_ctx, wake_q: &wake_q);
713 }
714
715 __mutex_remove_waiter(lock, waiter: &waiter);
716
717 debug_mutex_free_waiter(waiter: &waiter);
718
719skip_wait:
720 /* got the lock - cleanup and rejoice! */
721 lock_acquired(lock: &lock->dep_map, ip);
722 trace_contention_end(lock, ret: 0);
723
724 if (ww_ctx)
725 ww_mutex_lock_acquired(ww, ww_ctx);
726
727 raw_spin_unlock_irqrestore_wake(lock: &lock->wait_lock, flags, wake_q: &wake_q);
728 preempt_enable();
729 return 0;
730
731err:
732 __set_current_state(TASK_RUNNING);
733 __mutex_remove_waiter(lock, waiter: &waiter);
734err_early_kill:
735 trace_contention_end(lock, ret);
736 raw_spin_unlock_irqrestore_wake(lock: &lock->wait_lock, flags, wake_q: &wake_q);
737 debug_mutex_free_waiter(waiter: &waiter);
738 mutex_release(&lock->dep_map, ip);
739 preempt_enable();
740 return ret;
741}
742
743static int __sched
744__mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
745 struct lockdep_map *nest_lock, unsigned long ip)
746{
747 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, use_ww_ctx: false);
748}
749
750static int __sched
751__ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
752 unsigned long ip, struct ww_acquire_ctx *ww_ctx)
753{
754 return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, use_ww_ctx: true);
755}
756
757/**
758 * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
759 * @ww: mutex to lock
760 * @ww_ctx: optional w/w acquire context
761 *
762 * Trylocks a mutex with the optional acquire context; no deadlock detection is
763 * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
764 *
765 * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is
766 * specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
767 *
768 * A mutex acquired with this function must be released with ww_mutex_unlock.
769 */
770int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
771{
772 if (!ww_ctx)
773 return mutex_trylock(&ww->base);
774
775 MUTEX_WARN_ON(ww->base.magic != &ww->base);
776
777 /*
778 * Reset the wounded flag after a kill. No other process can
779 * race and wound us here, since they can't have a valid owner
780 * pointer if we don't have any locks held.
781 */
782 if (ww_ctx->acquired == 0)
783 ww_ctx->wounded = 0;
784
785 if (__mutex_trylock(lock: &ww->base)) {
786 ww_mutex_set_context_fastpath(lock: ww, ctx: ww_ctx);
787 mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
788 return 1;
789 }
790
791 return 0;
792}
793EXPORT_SYMBOL(ww_mutex_trylock);
794
795#ifdef CONFIG_DEBUG_LOCK_ALLOC
796void __sched
797mutex_lock_nested(struct mutex *lock, unsigned int subclass)
798{
799 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
800}
801
802EXPORT_SYMBOL_GPL(mutex_lock_nested);
803
804void __sched
805_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
806{
807 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass: 0, nest_lock: nest, _RET_IP_);
808}
809EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
810
811int __sched
812_mutex_lock_killable(struct mutex *lock, unsigned int subclass,
813 struct lockdep_map *nest)
814{
815 return __mutex_lock(lock, TASK_KILLABLE, subclass, nest_lock: nest, _RET_IP_);
816}
817EXPORT_SYMBOL_GPL(_mutex_lock_killable);
818
819int __sched
820mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
821{
822 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
823}
824EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
825
826void __sched
827mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
828{
829 int token;
830
831 might_sleep();
832
833 token = io_schedule_prepare();
834 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
835 subclass, NULL, _RET_IP_, NULL, use_ww_ctx: 0);
836 io_schedule_finish(token);
837}
838EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
839
840static inline int
841ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
842{
843#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
844 unsigned tmp;
845
846 if (ctx->deadlock_inject_countdown-- == 0) {
847 tmp = ctx->deadlock_inject_interval;
848 if (tmp > UINT_MAX/4)
849 tmp = UINT_MAX;
850 else
851 tmp = tmp*2 + tmp + tmp/2;
852
853 ctx->deadlock_inject_interval = tmp;
854 ctx->deadlock_inject_countdown = tmp;
855 ctx->contending_lock = lock;
856
857 ww_mutex_unlock(lock);
858
859 return -EDEADLK;
860 }
861#endif
862
863 return 0;
864}
865
866int __sched
867ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
868{
869 int ret;
870
871 might_sleep();
872 ret = __ww_mutex_lock(lock: &lock->base, TASK_UNINTERRUPTIBLE,
873 subclass: 0, _RET_IP_, ww_ctx: ctx);
874 if (!ret && ctx && ctx->acquired > 1)
875 return ww_mutex_deadlock_injection(lock, ctx);
876
877 return ret;
878}
879EXPORT_SYMBOL_GPL(ww_mutex_lock);
880
881int __sched
882ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
883{
884 int ret;
885
886 might_sleep();
887 ret = __ww_mutex_lock(lock: &lock->base, TASK_INTERRUPTIBLE,
888 subclass: 0, _RET_IP_, ww_ctx: ctx);
889
890 if (!ret && ctx && ctx->acquired > 1)
891 return ww_mutex_deadlock_injection(lock, ctx);
892
893 return ret;
894}
895EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
896
897#endif
898
899/*
900 * Release the lock, slowpath:
901 */
902static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
903{
904 struct task_struct *next = NULL;
905 DEFINE_WAKE_Q(wake_q);
906 unsigned long owner;
907 unsigned long flags;
908
909 mutex_release(&lock->dep_map, ip);
910
911 /*
912 * Release the lock before (potentially) taking the spinlock such that
913 * other contenders can get on with things ASAP.
914 *
915 * Except when HANDOFF, in that case we must not clear the owner field,
916 * but instead set it to the top waiter.
917 */
918 owner = atomic_long_read(v: &lock->owner);
919 for (;;) {
920 MUTEX_WARN_ON(__owner_task(owner) != current);
921 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
922
923 if (owner & MUTEX_FLAG_HANDOFF)
924 break;
925
926 if (atomic_long_try_cmpxchg_release(v: &lock->owner, old: &owner, new: __owner_flags(owner))) {
927 if (owner & MUTEX_FLAG_WAITERS)
928 break;
929
930 return;
931 }
932 }
933
934 raw_spin_lock_irqsave(&lock->wait_lock, flags);
935 debug_mutex_unlock(lock);
936 if (!list_empty(head: &lock->wait_list)) {
937 /* get the first entry from the wait-list: */
938 struct mutex_waiter *waiter =
939 list_first_entry(&lock->wait_list,
940 struct mutex_waiter, list);
941
942 next = waiter->task;
943
944 debug_mutex_wake_waiter(lock, waiter);
945 wake_q_add(head: &wake_q, task: next);
946 }
947
948 if (owner & MUTEX_FLAG_HANDOFF)
949 __mutex_handoff(lock, task: next);
950
951 raw_spin_unlock_irqrestore_wake(lock: &lock->wait_lock, flags, wake_q: &wake_q);
952}
953
954#ifndef CONFIG_DEBUG_LOCK_ALLOC
955/*
956 * Here come the less common (and hence less performance-critical) APIs:
957 * mutex_lock_interruptible() and mutex_trylock().
958 */
959static noinline int __sched
960__mutex_lock_killable_slowpath(struct mutex *lock);
961
962static noinline int __sched
963__mutex_lock_interruptible_slowpath(struct mutex *lock);
964
965/**
966 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
967 * @lock: The mutex to be acquired.
968 *
969 * Lock the mutex like mutex_lock(). If a signal is delivered while the
970 * process is sleeping, this function will return without acquiring the
971 * mutex.
972 *
973 * Context: Process context.
974 * Return: 0 if the lock was successfully acquired or %-EINTR if a
975 * signal arrived.
976 */
977int __sched mutex_lock_interruptible(struct mutex *lock)
978{
979 might_sleep();
980
981 if (__mutex_trylock_fast(lock))
982 return 0;
983
984 return __mutex_lock_interruptible_slowpath(lock);
985}
986
987EXPORT_SYMBOL(mutex_lock_interruptible);
988
989/**
990 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
991 * @lock: The mutex to be acquired.
992 *
993 * Lock the mutex like mutex_lock(). If a signal which will be fatal to
994 * the current process is delivered while the process is sleeping, this
995 * function will return without acquiring the mutex.
996 *
997 * Context: Process context.
998 * Return: 0 if the lock was successfully acquired or %-EINTR if a
999 * fatal signal arrived.
1000 */
1001int __sched mutex_lock_killable(struct mutex *lock)
1002{
1003 might_sleep();
1004
1005 if (__mutex_trylock_fast(lock))
1006 return 0;
1007
1008 return __mutex_lock_killable_slowpath(lock);
1009}
1010EXPORT_SYMBOL(mutex_lock_killable);
1011
1012/**
1013 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1014 * @lock: The mutex to be acquired.
1015 *
1016 * Lock the mutex like mutex_lock(). While the task is waiting for this
1017 * mutex, it will be accounted as being in the IO wait state by the
1018 * scheduler.
1019 *
1020 * Context: Process context.
1021 */
1022void __sched mutex_lock_io(struct mutex *lock)
1023{
1024 int token;
1025
1026 token = io_schedule_prepare();
1027 mutex_lock(lock);
1028 io_schedule_finish(token);
1029}
1030EXPORT_SYMBOL_GPL(mutex_lock_io);
1031
1032static noinline void __sched
1033__mutex_lock_slowpath(struct mutex *lock)
1034{
1035 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1036}
1037
1038static noinline int __sched
1039__mutex_lock_killable_slowpath(struct mutex *lock)
1040{
1041 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1042}
1043
1044static noinline int __sched
1045__mutex_lock_interruptible_slowpath(struct mutex *lock)
1046{
1047 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1048}
1049
1050static noinline int __sched
1051__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1052{
1053 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
1054 _RET_IP_, ctx);
1055}
1056
1057static noinline int __sched
1058__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1059 struct ww_acquire_ctx *ctx)
1060{
1061 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
1062 _RET_IP_, ctx);
1063}
1064
1065#endif
1066
1067#ifndef CONFIG_DEBUG_LOCK_ALLOC
1068/**
1069 * mutex_trylock - try to acquire the mutex, without waiting
1070 * @lock: the mutex to be acquired
1071 *
1072 * Try to acquire the mutex atomically. Returns 1 if the mutex
1073 * has been acquired successfully, and 0 on contention.
1074 *
1075 * NOTE: this function follows the spin_trylock() convention, so
1076 * it is negated from the down_trylock() return values! Be careful
1077 * about this when converting semaphore users to mutexes.
1078 *
1079 * This function must not be used in interrupt context. The
1080 * mutex must be released by the same task that acquired it.
1081 */
1082int __sched mutex_trylock(struct mutex *lock)
1083{
1084 MUTEX_WARN_ON(lock->magic != lock);
1085 return __mutex_trylock(lock);
1086}
1087EXPORT_SYMBOL(mutex_trylock);
1088#else
1089int __sched _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock)
1090{
1091 bool locked;
1092
1093 MUTEX_WARN_ON(lock->magic != lock);
1094 locked = __mutex_trylock(lock);
1095 if (locked)
1096 mutex_acquire_nest(&lock->dep_map, 0, 1, nest_lock, _RET_IP_);
1097
1098 return locked;
1099}
1100EXPORT_SYMBOL(_mutex_trylock_nest_lock);
1101#endif
1102
1103#ifndef CONFIG_DEBUG_LOCK_ALLOC
1104int __sched
1105ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1106{
1107 might_sleep();
1108
1109 if (__mutex_trylock_fast(&lock->base)) {
1110 if (ctx)
1111 ww_mutex_set_context_fastpath(lock, ctx);
1112 return 0;
1113 }
1114
1115 return __ww_mutex_lock_slowpath(lock, ctx);
1116}
1117EXPORT_SYMBOL(ww_mutex_lock);
1118
1119int __sched
1120ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1121{
1122 might_sleep();
1123
1124 if (__mutex_trylock_fast(&lock->base)) {
1125 if (ctx)
1126 ww_mutex_set_context_fastpath(lock, ctx);
1127 return 0;
1128 }
1129
1130 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1131}
1132EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1133
1134#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
1135#endif /* !CONFIG_PREEMPT_RT */
1136
1137EXPORT_TRACEPOINT_SYMBOL_GPL(contention_begin);
1138EXPORT_TRACEPOINT_SYMBOL_GPL(contention_end);
1139
1140/**
1141 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1142 * @cnt: the atomic which we are to dec
1143 * @lock: the mutex to return holding if we dec to 0
1144 *
1145 * return true and hold lock if we dec to 0, return false otherwise
1146 */
1147int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1148{
1149 /* dec if we can't possibly hit 0 */
1150 if (atomic_add_unless(v: cnt, a: -1, u: 1))
1151 return 0;
1152 /* we might hit 0, so take the lock */
1153 mutex_lock(lock);
1154 if (!atomic_dec_and_test(v: cnt)) {
1155 /* when we actually did the dec, we didn't hit 0 */
1156 mutex_unlock(lock);
1157 return 0;
1158 }
1159 /* we hit 0, and we hold the lock */
1160 return 1;
1161}
1162EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1163

Provided by KDAB

Privacy Policy
Improve your Profiling and Debugging skills
Find out more

source code of linux/kernel/locking/mutex.c