1#[cfg(all(test, not(target_os = "emscripten")))]
2mod tests;
3
4use crate::cell::UnsafeCell;
5use crate::ops::Deref;
6use crate::panic::{RefUnwindSafe, UnwindSafe};
7use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed};
8use crate::sys::locks as sys;
9
10/// A reentrant mutual exclusion
11///
12/// This mutex will block *other* threads waiting for the lock to become
13/// available. The thread which has already locked the mutex can lock it
14/// multiple times without blocking, preventing a common source of deadlocks.
15///
16/// This is used by stdout().lock() and friends.
17///
18/// ## Implementation details
19///
20/// The 'owner' field tracks which thread has locked the mutex.
21///
22/// We use current_thread_unique_ptr() as the thread identifier,
23/// which is just the address of a thread local variable.
24///
25/// If `owner` is set to the identifier of the current thread,
26/// we assume the mutex is already locked and instead of locking it again,
27/// we increment `lock_count`.
28///
29/// When unlocking, we decrement `lock_count`, and only unlock the mutex when
30/// it reaches zero.
31///
32/// `lock_count` is protected by the mutex and only accessed by the thread that has
33/// locked the mutex, so needs no synchronization.
34///
35/// `owner` can be checked by other threads that want to see if they already
36/// hold the lock, so needs to be atomic. If it compares equal, we're on the
37/// same thread that holds the mutex and memory access can use relaxed ordering
38/// since we're not dealing with multiple threads. If it's not equal,
39/// synchronization is left to the mutex, making relaxed memory ordering for
40/// the `owner` field fine in all cases.
41pub struct ReentrantMutex<T> {
42 mutex: sys::Mutex,
43 owner: AtomicUsize,
44 lock_count: UnsafeCell<u32>,
45 data: T,
46}
47
48unsafe impl<T: Send> Send for ReentrantMutex<T> {}
49unsafe impl<T: Send> Sync for ReentrantMutex<T> {}
50
51impl<T> UnwindSafe for ReentrantMutex<T> {}
52impl<T> RefUnwindSafe for ReentrantMutex<T> {}
53
54/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
55/// dropped (falls out of scope), the lock will be unlocked.
56///
57/// The data protected by the mutex can be accessed through this guard via its
58/// Deref implementation.
59///
60/// # Mutability
61///
62/// Unlike `MutexGuard`, `ReentrantMutexGuard` does not implement `DerefMut`,
63/// because implementation of the trait would violate Rust’s reference aliasing
64/// rules. Use interior mutability (usually `RefCell`) in order to mutate the
65/// guarded data.
66#[must_use = "if unused the ReentrantMutex will immediately unlock"]
67pub struct ReentrantMutexGuard<'a, T: 'a> {
68 lock: &'a ReentrantMutex<T>,
69}
70
71impl<T> !Send for ReentrantMutexGuard<'_, T> {}
72
73impl<T> ReentrantMutex<T> {
74 /// Creates a new reentrant mutex in an unlocked state.
75 pub const fn new(t: T) -> ReentrantMutex<T> {
76 ReentrantMutex {
77 mutex: sys::Mutex::new(),
78 owner: AtomicUsize::new(0),
79 lock_count: UnsafeCell::new(0),
80 data: t,
81 }
82 }
83
84 /// Acquires a mutex, blocking the current thread until it is able to do so.
85 ///
86 /// This function will block the caller until it is available to acquire the mutex.
87 /// Upon returning, the thread is the only thread with the mutex held. When the thread
88 /// calling this method already holds the lock, the call shall succeed without
89 /// blocking.
90 ///
91 /// # Errors
92 ///
93 /// If another user of this mutex panicked while holding the mutex, then
94 /// this call will return failure if the mutex would otherwise be
95 /// acquired.
96 pub fn lock(&self) -> ReentrantMutexGuard<'_, T> {
97 let this_thread = current_thread_unique_ptr();
98 // Safety: We only touch lock_count when we own the lock.
99 unsafe {
100 if self.owner.load(Relaxed) == this_thread {
101 self.increment_lock_count();
102 } else {
103 self.mutex.lock();
104 self.owner.store(this_thread, Relaxed);
105 debug_assert_eq!(*self.lock_count.get(), 0);
106 *self.lock_count.get() = 1;
107 }
108 }
109 ReentrantMutexGuard { lock: self }
110 }
111
112 /// Attempts to acquire this lock.
113 ///
114 /// If the lock could not be acquired at this time, then `Err` is returned.
115 /// Otherwise, an RAII guard is returned.
116 ///
117 /// This function does not block.
118 ///
119 /// # Errors
120 ///
121 /// If another user of this mutex panicked while holding the mutex, then
122 /// this call will return failure if the mutex would otherwise be
123 /// acquired.
124 pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, T>> {
125 let this_thread = current_thread_unique_ptr();
126 // Safety: We only touch lock_count when we own the lock.
127 unsafe {
128 if self.owner.load(Relaxed) == this_thread {
129 self.increment_lock_count();
130 Some(ReentrantMutexGuard { lock: self })
131 } else if self.mutex.try_lock() {
132 self.owner.store(this_thread, Relaxed);
133 debug_assert_eq!(*self.lock_count.get(), 0);
134 *self.lock_count.get() = 1;
135 Some(ReentrantMutexGuard { lock: self })
136 } else {
137 None
138 }
139 }
140 }
141
142 unsafe fn increment_lock_count(&self) {
143 *self.lock_count.get() = (*self.lock_count.get())
144 .checked_add(1)
145 .expect("lock count overflow in reentrant mutex");
146 }
147}
148
149impl<T> Deref for ReentrantMutexGuard<'_, T> {
150 type Target = T;
151
152 fn deref(&self) -> &T {
153 &self.lock.data
154 }
155}
156
157impl<T> Drop for ReentrantMutexGuard<'_, T> {
158 #[inline]
159 fn drop(&mut self) {
160 // Safety: We own the lock.
161 unsafe {
162 *self.lock.lock_count.get() -= 1;
163 if *self.lock.lock_count.get() == 0 {
164 self.lock.owner.store(val:0, order:Relaxed);
165 self.lock.mutex.unlock();
166 }
167 }
168 }
169}
170
171/// Get an address that is unique per running thread.
172///
173/// This can be used as a non-null usize-sized ID.
174pub fn current_thread_unique_ptr() -> usize {
175 // Use a non-drop type to make sure it's still available during thread destruction.
176 thread_local! { static X: u8 = const { 0 } }
177 X.with(|x: &u8| <*const _>::addr(self:x))
178}
179