1 | #[cfg (all(test, not(target_os = "emscripten" )))] |
2 | mod tests; |
3 | |
4 | use crate::cell::UnsafeCell; |
5 | use crate::fmt; |
6 | use crate::ops::Deref; |
7 | use crate::panic::{RefUnwindSafe, UnwindSafe}; |
8 | use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed}; |
9 | use crate::sys::sync as sys; |
10 | |
11 | /// A re-entrant mutual exclusion lock |
12 | /// |
13 | /// This lock will block *other* threads waiting for the lock to become |
14 | /// available. The thread which has already locked the mutex can lock it |
15 | /// multiple times without blocking, preventing a common source of deadlocks. |
16 | /// |
17 | /// # Examples |
18 | /// |
19 | /// Allow recursively calling a function needing synchronization from within |
20 | /// a callback (this is how [`StdoutLock`](crate::io::StdoutLock) is currently |
21 | /// implemented): |
22 | /// |
23 | /// ``` |
24 | /// #![feature(reentrant_lock)] |
25 | /// |
26 | /// use std::cell::RefCell; |
27 | /// use std::sync::ReentrantLock; |
28 | /// |
29 | /// pub struct Log { |
30 | /// data: RefCell<String>, |
31 | /// } |
32 | /// |
33 | /// impl Log { |
34 | /// pub fn append(&self, msg: &str) { |
35 | /// self.data.borrow_mut().push_str(msg); |
36 | /// } |
37 | /// } |
38 | /// |
39 | /// static LOG: ReentrantLock<Log> = ReentrantLock::new(Log { data: RefCell::new(String::new()) }); |
40 | /// |
41 | /// pub fn with_log<R>(f: impl FnOnce(&Log) -> R) -> R { |
42 | /// let log = LOG.lock(); |
43 | /// f(&*log) |
44 | /// } |
45 | /// |
46 | /// with_log(|log| { |
47 | /// log.append("Hello" ); |
48 | /// with_log(|log| log.append(" there!" )); |
49 | /// }); |
50 | /// ``` |
51 | /// |
52 | // # Implementation details |
53 | // |
54 | // The 'owner' field tracks which thread has locked the mutex. |
55 | // |
56 | // We use current_thread_unique_ptr() as the thread identifier, |
57 | // which is just the address of a thread local variable. |
58 | // |
59 | // If `owner` is set to the identifier of the current thread, |
60 | // we assume the mutex is already locked and instead of locking it again, |
61 | // we increment `lock_count`. |
62 | // |
63 | // When unlocking, we decrement `lock_count`, and only unlock the mutex when |
64 | // it reaches zero. |
65 | // |
66 | // `lock_count` is protected by the mutex and only accessed by the thread that has |
67 | // locked the mutex, so needs no synchronization. |
68 | // |
69 | // `owner` can be checked by other threads that want to see if they already |
70 | // hold the lock, so needs to be atomic. If it compares equal, we're on the |
71 | // same thread that holds the mutex and memory access can use relaxed ordering |
72 | // since we're not dealing with multiple threads. If it's not equal, |
73 | // synchronization is left to the mutex, making relaxed memory ordering for |
74 | // the `owner` field fine in all cases. |
75 | #[unstable (feature = "reentrant_lock" , issue = "121440" )] |
76 | pub struct ReentrantLock<T: ?Sized> { |
77 | mutex: sys::Mutex, |
78 | owner: AtomicUsize, |
79 | lock_count: UnsafeCell<u32>, |
80 | data: T, |
81 | } |
82 | |
83 | #[unstable (feature = "reentrant_lock" , issue = "121440" )] |
84 | unsafe impl<T: Send + ?Sized> Send for ReentrantLock<T> {} |
85 | #[unstable (feature = "reentrant_lock" , issue = "121440" )] |
86 | unsafe impl<T: Send + ?Sized> Sync for ReentrantLock<T> {} |
87 | |
88 | // Because of the `UnsafeCell`, these traits are not implemented automatically |
89 | #[unstable (feature = "reentrant_lock" , issue = "121440" )] |
90 | impl<T: UnwindSafe + ?Sized> UnwindSafe for ReentrantLock<T> {} |
91 | #[unstable (feature = "reentrant_lock" , issue = "121440" )] |
92 | impl<T: RefUnwindSafe + ?Sized> RefUnwindSafe for ReentrantLock<T> {} |
93 | |
94 | /// An RAII implementation of a "scoped lock" of a re-entrant lock. When this |
95 | /// structure is dropped (falls out of scope), the lock will be unlocked. |
96 | /// |
97 | /// The data protected by the mutex can be accessed through this guard via its |
98 | /// [`Deref`] implementation. |
99 | /// |
100 | /// This structure is created by the [`lock`](ReentrantLock::lock) method on |
101 | /// [`ReentrantLock`]. |
102 | /// |
103 | /// # Mutability |
104 | /// |
105 | /// Unlike [`MutexGuard`](super::MutexGuard), `ReentrantLockGuard` does not |
106 | /// implement [`DerefMut`](crate::ops::DerefMut), because implementation of |
107 | /// the trait would violate Rust’s reference aliasing rules. Use interior |
108 | /// mutability (usually [`RefCell`](crate::cell::RefCell)) in order to mutate |
109 | /// the guarded data. |
110 | #[must_use = "if unused the ReentrantLock will immediately unlock" ] |
111 | #[unstable (feature = "reentrant_lock" , issue = "121440" )] |
112 | pub struct ReentrantLockGuard<'a, T: ?Sized + 'a> { |
113 | lock: &'a ReentrantLock<T>, |
114 | } |
115 | |
116 | #[unstable (feature = "reentrant_lock" , issue = "121440" )] |
117 | impl<T: ?Sized> !Send for ReentrantLockGuard<'_, T> {} |
118 | |
119 | #[unstable (feature = "reentrant_lock" , issue = "121440" )] |
120 | impl<T> ReentrantLock<T> { |
121 | /// Creates a new re-entrant lock in an unlocked state ready for use. |
122 | /// |
123 | /// # Examples |
124 | /// |
125 | /// ``` |
126 | /// #![feature(reentrant_lock)] |
127 | /// use std::sync::ReentrantLock; |
128 | /// |
129 | /// let lock = ReentrantLock::new(0); |
130 | /// ``` |
131 | pub const fn new(t: T) -> ReentrantLock<T> { |
132 | ReentrantLock { |
133 | mutex: sys::Mutex::new(), |
134 | owner: AtomicUsize::new(0), |
135 | lock_count: UnsafeCell::new(0), |
136 | data: t, |
137 | } |
138 | } |
139 | |
140 | /// Consumes this lock, returning the underlying data. |
141 | /// |
142 | /// # Examples |
143 | /// |
144 | /// ``` |
145 | /// #![feature(reentrant_lock)] |
146 | /// |
147 | /// use std::sync::ReentrantLock; |
148 | /// |
149 | /// let lock = ReentrantLock::new(0); |
150 | /// assert_eq!(lock.into_inner(), 0); |
151 | /// ``` |
152 | pub fn into_inner(self) -> T { |
153 | self.data |
154 | } |
155 | } |
156 | |
157 | #[unstable (feature = "reentrant_lock" , issue = "121440" )] |
158 | impl<T: ?Sized> ReentrantLock<T> { |
159 | /// Acquires the lock, blocking the current thread until it is able to do |
160 | /// so. |
161 | /// |
162 | /// This function will block the caller until it is available to acquire |
163 | /// the lock. Upon returning, the thread is the only thread with the lock |
164 | /// held. When the thread calling this method already holds the lock, the |
165 | /// call succeeds without blocking. |
166 | /// |
167 | /// # Examples |
168 | /// |
169 | /// ``` |
170 | /// #![feature(reentrant_lock)] |
171 | /// use std::cell::Cell; |
172 | /// use std::sync::{Arc, ReentrantLock}; |
173 | /// use std::thread; |
174 | /// |
175 | /// let lock = Arc::new(ReentrantLock::new(Cell::new(0))); |
176 | /// let c_lock = Arc::clone(&lock); |
177 | /// |
178 | /// thread::spawn(move || { |
179 | /// c_lock.lock().set(10); |
180 | /// }).join().expect("thread::spawn failed" ); |
181 | /// assert_eq!(lock.lock().get(), 10); |
182 | /// ``` |
183 | pub fn lock(&self) -> ReentrantLockGuard<'_, T> { |
184 | let this_thread = current_thread_unique_ptr(); |
185 | // Safety: We only touch lock_count when we own the lock. |
186 | unsafe { |
187 | if self.owner.load(Relaxed) == this_thread { |
188 | self.increment_lock_count().expect("lock count overflow in reentrant mutex" ); |
189 | } else { |
190 | self.mutex.lock(); |
191 | self.owner.store(this_thread, Relaxed); |
192 | debug_assert_eq!(*self.lock_count.get(), 0); |
193 | *self.lock_count.get() = 1; |
194 | } |
195 | } |
196 | ReentrantLockGuard { lock: self } |
197 | } |
198 | |
199 | /// Returns a mutable reference to the underlying data. |
200 | /// |
201 | /// Since this call borrows the `ReentrantLock` mutably, no actual locking |
202 | /// needs to take place -- the mutable borrow statically guarantees no locks |
203 | /// exist. |
204 | /// |
205 | /// # Examples |
206 | /// |
207 | /// ``` |
208 | /// #![feature(reentrant_lock)] |
209 | /// use std::sync::ReentrantLock; |
210 | /// |
211 | /// let mut lock = ReentrantLock::new(0); |
212 | /// *lock.get_mut() = 10; |
213 | /// assert_eq!(*lock.lock(), 10); |
214 | /// ``` |
215 | pub fn get_mut(&mut self) -> &mut T { |
216 | &mut self.data |
217 | } |
218 | |
219 | /// Attempts to acquire this lock. |
220 | /// |
221 | /// If the lock could not be acquired at this time, then `None` is returned. |
222 | /// Otherwise, an RAII guard is returned. |
223 | /// |
224 | /// This function does not block. |
225 | pub(crate) fn try_lock(&self) -> Option<ReentrantLockGuard<'_, T>> { |
226 | let this_thread = current_thread_unique_ptr(); |
227 | // Safety: We only touch lock_count when we own the lock. |
228 | unsafe { |
229 | if self.owner.load(Relaxed) == this_thread { |
230 | self.increment_lock_count()?; |
231 | Some(ReentrantLockGuard { lock: self }) |
232 | } else if self.mutex.try_lock() { |
233 | self.owner.store(this_thread, Relaxed); |
234 | debug_assert_eq!(*self.lock_count.get(), 0); |
235 | *self.lock_count.get() = 1; |
236 | Some(ReentrantLockGuard { lock: self }) |
237 | } else { |
238 | None |
239 | } |
240 | } |
241 | } |
242 | |
243 | unsafe fn increment_lock_count(&self) -> Option<()> { |
244 | *self.lock_count.get() = (*self.lock_count.get()).checked_add(1)?; |
245 | Some(()) |
246 | } |
247 | } |
248 | |
249 | #[unstable (feature = "reentrant_lock" , issue = "121440" )] |
250 | impl<T: fmt::Debug + ?Sized> fmt::Debug for ReentrantLock<T> { |
251 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
252 | let mut d: DebugStruct<'_, '_> = f.debug_struct(name:"ReentrantLock" ); |
253 | match self.try_lock() { |
254 | Some(v: ReentrantLockGuard<'_, T>) => d.field(name:"data" , &&*v), |
255 | None => d.field(name:"data" , &format_args!("<locked>" )), |
256 | }; |
257 | d.finish_non_exhaustive() |
258 | } |
259 | } |
260 | |
261 | #[unstable (feature = "reentrant_lock" , issue = "121440" )] |
262 | impl<T: Default> Default for ReentrantLock<T> { |
263 | fn default() -> Self { |
264 | Self::new(T::default()) |
265 | } |
266 | } |
267 | |
268 | #[unstable (feature = "reentrant_lock" , issue = "121440" )] |
269 | impl<T> From<T> for ReentrantLock<T> { |
270 | fn from(t: T) -> Self { |
271 | Self::new(t) |
272 | } |
273 | } |
274 | |
275 | #[unstable (feature = "reentrant_lock" , issue = "121440" )] |
276 | impl<T: ?Sized> Deref for ReentrantLockGuard<'_, T> { |
277 | type Target = T; |
278 | |
279 | fn deref(&self) -> &T { |
280 | &self.lock.data |
281 | } |
282 | } |
283 | |
284 | #[unstable (feature = "reentrant_lock" , issue = "121440" )] |
285 | impl<T: fmt::Debug + ?Sized> fmt::Debug for ReentrantLockGuard<'_, T> { |
286 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
287 | (**self).fmt(f) |
288 | } |
289 | } |
290 | |
291 | #[unstable (feature = "reentrant_lock" , issue = "121440" )] |
292 | impl<T: fmt::Display + ?Sized> fmt::Display for ReentrantLockGuard<'_, T> { |
293 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
294 | (**self).fmt(f) |
295 | } |
296 | } |
297 | |
298 | #[unstable (feature = "reentrant_lock" , issue = "121440" )] |
299 | impl<T: ?Sized> Drop for ReentrantLockGuard<'_, T> { |
300 | #[inline ] |
301 | fn drop(&mut self) { |
302 | // Safety: We own the lock. |
303 | unsafe { |
304 | *self.lock.lock_count.get() -= 1; |
305 | if *self.lock.lock_count.get() == 0 { |
306 | self.lock.owner.store(val:0, order:Relaxed); |
307 | self.lock.mutex.unlock(); |
308 | } |
309 | } |
310 | } |
311 | } |
312 | |
313 | /// Get an address that is unique per running thread. |
314 | /// |
315 | /// This can be used as a non-null usize-sized ID. |
316 | pub(crate) fn current_thread_unique_ptr() -> usize { |
317 | // Use a non-drop type to make sure it's still available during thread destruction. |
318 | thread_local! { static X: u8 = const { 0 } } |
319 | X.with(|x: &u8| <*const _>::addr(self:x)) |
320 | } |
321 | |