1 | // Copyright 2018 Amanieu d'Antras |
2 | // |
3 | // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or |
4 | // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or |
5 | // http://opensource.org/licenses/MIT>, at your option. This file may not be |
6 | // copied, modified, or distributed except according to those terms. |
7 | |
8 | use crate::{ |
9 | mutex::{RawMutex, RawMutexFair, RawMutexTimed}, |
10 | GuardNoSend, |
11 | }; |
12 | use core::{ |
13 | cell::{Cell, UnsafeCell}, |
14 | fmt, |
15 | marker::PhantomData, |
16 | mem, |
17 | num::NonZeroUsize, |
18 | ops::Deref, |
19 | sync::atomic::{AtomicUsize, Ordering}, |
20 | }; |
21 | |
22 | #[cfg (feature = "arc_lock" )] |
23 | use alloc::sync::Arc; |
24 | #[cfg (feature = "arc_lock" )] |
25 | use core::mem::ManuallyDrop; |
26 | #[cfg (feature = "arc_lock" )] |
27 | use core::ptr; |
28 | |
29 | #[cfg (feature = "owning_ref" )] |
30 | use owning_ref::StableAddress; |
31 | |
32 | #[cfg (feature = "serde" )] |
33 | use serde::{Deserialize, Deserializer, Serialize, Serializer}; |
34 | |
35 | /// Helper trait which returns a non-zero thread ID. |
36 | /// |
37 | /// The simplest way to implement this trait is to return the address of a |
38 | /// thread-local variable. |
39 | /// |
40 | /// # Safety |
41 | /// |
42 | /// Implementations of this trait must ensure that no two active threads share |
43 | /// the same thread ID. However the ID of a thread that has exited can be |
44 | /// re-used since that thread is no longer active. |
45 | pub unsafe trait GetThreadId { |
46 | /// Initial value. |
47 | // A “non-constant” const item is a legacy way to supply an initialized value to downstream |
48 | // static items. Can hopefully be replaced with `const fn new() -> Self` at some point. |
49 | #[allow (clippy::declare_interior_mutable_const)] |
50 | const INIT: Self; |
51 | |
52 | /// Returns a non-zero thread ID which identifies the current thread of |
53 | /// execution. |
54 | fn nonzero_thread_id(&self) -> NonZeroUsize; |
55 | } |
56 | |
57 | /// A raw mutex type that wraps another raw mutex to provide reentrancy. |
58 | /// |
59 | /// Although this has the same methods as the [`RawMutex`] trait, it does |
60 | /// not implement it, and should not be used in the same way, since this |
61 | /// mutex can successfully acquire a lock multiple times in the same thread. |
62 | /// Only use this when you know you want a raw mutex that can be locked |
63 | /// reentrantly; you probably want [`ReentrantMutex`] instead. |
64 | /// |
65 | /// [`RawMutex`]: trait.RawMutex.html |
66 | /// [`ReentrantMutex`]: struct.ReentrantMutex.html |
67 | pub struct RawReentrantMutex<R, G> { |
68 | owner: AtomicUsize, |
69 | lock_count: Cell<usize>, |
70 | mutex: R, |
71 | get_thread_id: G, |
72 | } |
73 | |
74 | unsafe impl<R: RawMutex + Send, G: GetThreadId + Send> Send for RawReentrantMutex<R, G> {} |
75 | unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync> Sync for RawReentrantMutex<R, G> {} |
76 | |
77 | impl<R: RawMutex, G: GetThreadId> RawReentrantMutex<R, G> { |
78 | /// Initial value for an unlocked mutex. |
79 | #[allow (clippy::declare_interior_mutable_const)] |
80 | pub const INIT: Self = RawReentrantMutex { |
81 | owner: AtomicUsize::new(0), |
82 | lock_count: Cell::new(0), |
83 | mutex: R::INIT, |
84 | get_thread_id: G::INIT, |
85 | }; |
86 | |
87 | #[inline ] |
88 | fn lock_internal<F: FnOnce() -> bool>(&self, try_lock: F) -> bool { |
89 | let id = self.get_thread_id.nonzero_thread_id().get(); |
90 | if self.owner.load(Ordering::Relaxed) == id { |
91 | self.lock_count.set( |
92 | self.lock_count |
93 | .get() |
94 | .checked_add(1) |
95 | .expect("ReentrantMutex lock count overflow" ), |
96 | ); |
97 | } else { |
98 | if !try_lock() { |
99 | return false; |
100 | } |
101 | self.owner.store(id, Ordering::Relaxed); |
102 | debug_assert_eq!(self.lock_count.get(), 0); |
103 | self.lock_count.set(1); |
104 | } |
105 | true |
106 | } |
107 | |
108 | /// Acquires this mutex, blocking if it's held by another thread. |
109 | #[inline ] |
110 | pub fn lock(&self) { |
111 | self.lock_internal(|| { |
112 | self.mutex.lock(); |
113 | true |
114 | }); |
115 | } |
116 | |
117 | /// Attempts to acquire this mutex without blocking. Returns `true` |
118 | /// if the lock was successfully acquired and `false` otherwise. |
119 | #[inline ] |
120 | pub fn try_lock(&self) -> bool { |
121 | self.lock_internal(|| self.mutex.try_lock()) |
122 | } |
123 | |
124 | /// Unlocks this mutex. The inner mutex may not be unlocked if |
125 | /// this mutex was acquired previously in the current thread. |
126 | /// |
127 | /// # Safety |
128 | /// |
129 | /// This method may only be called if the mutex is held by the current thread. |
130 | #[inline ] |
131 | pub unsafe fn unlock(&self) { |
132 | let lock_count = self.lock_count.get() - 1; |
133 | self.lock_count.set(lock_count); |
134 | if lock_count == 0 { |
135 | self.owner.store(0, Ordering::Relaxed); |
136 | self.mutex.unlock(); |
137 | } |
138 | } |
139 | |
140 | /// Checks whether the mutex is currently locked. |
141 | #[inline ] |
142 | pub fn is_locked(&self) -> bool { |
143 | self.mutex.is_locked() |
144 | } |
145 | |
146 | /// Checks whether the mutex is currently held by the current thread. |
147 | #[inline ] |
148 | pub fn is_owned_by_current_thread(&self) -> bool { |
149 | let id = self.get_thread_id.nonzero_thread_id().get(); |
150 | self.owner.load(Ordering::Relaxed) == id |
151 | } |
152 | } |
153 | |
154 | impl<R: RawMutexFair, G: GetThreadId> RawReentrantMutex<R, G> { |
155 | /// Unlocks this mutex using a fair unlock protocol. The inner mutex |
156 | /// may not be unlocked if this mutex was acquired previously in the |
157 | /// current thread. |
158 | /// |
159 | /// # Safety |
160 | /// |
161 | /// This method may only be called if the mutex is held by the current thread. |
162 | #[inline ] |
163 | pub unsafe fn unlock_fair(&self) { |
164 | let lock_count = self.lock_count.get() - 1; |
165 | self.lock_count.set(lock_count); |
166 | if lock_count == 0 { |
167 | self.owner.store(0, Ordering::Relaxed); |
168 | self.mutex.unlock_fair(); |
169 | } |
170 | } |
171 | |
172 | /// Temporarily yields the mutex to a waiting thread if there is one. |
173 | /// |
174 | /// This method is functionally equivalent to calling `unlock_fair` followed |
175 | /// by `lock`, however it can be much more efficient in the case where there |
176 | /// are no waiting threads. |
177 | /// |
178 | /// # Safety |
179 | /// |
180 | /// This method may only be called if the mutex is held by the current thread. |
181 | #[inline ] |
182 | pub unsafe fn bump(&self) { |
183 | if self.lock_count.get() == 1 { |
184 | let id = self.owner.load(Ordering::Relaxed); |
185 | self.owner.store(0, Ordering::Relaxed); |
186 | self.lock_count.set(0); |
187 | self.mutex.bump(); |
188 | self.owner.store(id, Ordering::Relaxed); |
189 | self.lock_count.set(1); |
190 | } |
191 | } |
192 | } |
193 | |
194 | impl<R: RawMutexTimed, G: GetThreadId> RawReentrantMutex<R, G> { |
195 | /// Attempts to acquire this lock until a timeout is reached. |
196 | #[inline ] |
197 | pub fn try_lock_until(&self, timeout: R::Instant) -> bool { |
198 | self.lock_internal(|| self.mutex.try_lock_until(timeout)) |
199 | } |
200 | |
201 | /// Attempts to acquire this lock until a timeout is reached. |
202 | #[inline ] |
203 | pub fn try_lock_for(&self, timeout: R::Duration) -> bool { |
204 | self.lock_internal(|| self.mutex.try_lock_for(timeout)) |
205 | } |
206 | } |
207 | |
208 | /// A mutex which can be recursively locked by a single thread. |
209 | /// |
210 | /// This type is identical to `Mutex` except for the following points: |
211 | /// |
212 | /// - Locking multiple times from the same thread will work correctly instead of |
213 | /// deadlocking. |
214 | /// - `ReentrantMutexGuard` does not give mutable references to the locked data. |
215 | /// Use a `RefCell` if you need this. |
216 | /// |
217 | /// See [`Mutex`](struct.Mutex.html) for more details about the underlying mutex |
218 | /// primitive. |
219 | pub struct ReentrantMutex<R, G, T: ?Sized> { |
220 | raw: RawReentrantMutex<R, G>, |
221 | data: UnsafeCell<T>, |
222 | } |
223 | |
224 | unsafe impl<R: RawMutex + Send, G: GetThreadId + Send, T: ?Sized + Send> Send |
225 | for ReentrantMutex<R, G, T> |
226 | { |
227 | } |
228 | unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync, T: ?Sized + Send> Sync |
229 | for ReentrantMutex<R, G, T> |
230 | { |
231 | } |
232 | |
233 | impl<R: RawMutex, G: GetThreadId, T> ReentrantMutex<R, G, T> { |
234 | /// Creates a new reentrant mutex in an unlocked state ready for use. |
235 | #[cfg (has_const_fn_trait_bound)] |
236 | #[inline ] |
237 | pub const fn new(val: T) -> ReentrantMutex<R, G, T> { |
238 | ReentrantMutex { |
239 | data: UnsafeCell::new(val), |
240 | raw: RawReentrantMutex { |
241 | owner: AtomicUsize::new(0), |
242 | lock_count: Cell::new(0), |
243 | mutex: R::INIT, |
244 | get_thread_id: G::INIT, |
245 | }, |
246 | } |
247 | } |
248 | |
249 | /// Creates a new reentrant mutex in an unlocked state ready for use. |
250 | #[cfg (not(has_const_fn_trait_bound))] |
251 | #[inline ] |
252 | pub fn new(val: T) -> ReentrantMutex<R, G, T> { |
253 | ReentrantMutex { |
254 | data: UnsafeCell::new(val), |
255 | raw: RawReentrantMutex { |
256 | owner: AtomicUsize::new(0), |
257 | lock_count: Cell::new(0), |
258 | mutex: R::INIT, |
259 | get_thread_id: G::INIT, |
260 | }, |
261 | } |
262 | } |
263 | |
264 | /// Consumes this mutex, returning the underlying data. |
265 | #[inline ] |
266 | pub fn into_inner(self) -> T { |
267 | self.data.into_inner() |
268 | } |
269 | } |
270 | |
271 | impl<R, G, T> ReentrantMutex<R, G, T> { |
272 | /// Creates a new reentrant mutex based on a pre-existing raw mutex and a |
273 | /// helper to get the thread ID. |
274 | /// |
275 | /// This allows creating a reentrant mutex in a constant context on stable |
276 | /// Rust. |
277 | #[inline ] |
278 | pub const fn const_new(raw_mutex: R, get_thread_id: G, val: T) -> ReentrantMutex<R, G, T> { |
279 | ReentrantMutex { |
280 | data: UnsafeCell::new(val), |
281 | raw: RawReentrantMutex { |
282 | owner: AtomicUsize::new(0), |
283 | lock_count: Cell::new(0), |
284 | mutex: raw_mutex, |
285 | get_thread_id, |
286 | }, |
287 | } |
288 | } |
289 | } |
290 | |
291 | impl<R: RawMutex, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> { |
292 | /// Creates a new `ReentrantMutexGuard` without checking if the lock is held. |
293 | /// |
294 | /// # Safety |
295 | /// |
296 | /// This method must only be called if the thread logically holds the lock. |
297 | /// |
298 | /// Calling this function when a guard has already been produced is undefined behaviour unless |
299 | /// the guard was forgotten with `mem::forget`. |
300 | #[inline ] |
301 | pub unsafe fn make_guard_unchecked(&self) -> ReentrantMutexGuard<'_, R, G, T> { |
302 | ReentrantMutexGuard { |
303 | remutex: &self, |
304 | marker: PhantomData, |
305 | } |
306 | } |
307 | |
308 | /// Acquires a reentrant mutex, blocking the current thread until it is able |
309 | /// to do so. |
310 | /// |
311 | /// If the mutex is held by another thread then this function will block the |
312 | /// local thread until it is available to acquire the mutex. If the mutex is |
313 | /// already held by the current thread then this function will increment the |
314 | /// lock reference count and return immediately. Upon returning, |
315 | /// the thread is the only thread with the mutex held. An RAII guard is |
316 | /// returned to allow scoped unlock of the lock. When the guard goes out of |
317 | /// scope, the mutex will be unlocked. |
318 | #[inline ] |
319 | pub fn lock(&self) -> ReentrantMutexGuard<'_, R, G, T> { |
320 | self.raw.lock(); |
321 | // SAFETY: The lock is held, as required. |
322 | unsafe { self.make_guard_unchecked() } |
323 | } |
324 | |
325 | /// Attempts to acquire this lock. |
326 | /// |
327 | /// If the lock could not be acquired at this time, then `None` is returned. |
328 | /// Otherwise, an RAII guard is returned. The lock will be unlocked when the |
329 | /// guard is dropped. |
330 | /// |
331 | /// This function does not block. |
332 | #[inline ] |
333 | pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, R, G, T>> { |
334 | if self.raw.try_lock() { |
335 | // SAFETY: The lock is held, as required. |
336 | Some(unsafe { self.make_guard_unchecked() }) |
337 | } else { |
338 | None |
339 | } |
340 | } |
341 | |
342 | /// Returns a mutable reference to the underlying data. |
343 | /// |
344 | /// Since this call borrows the `ReentrantMutex` mutably, no actual locking needs to |
345 | /// take place---the mutable borrow statically guarantees no locks exist. |
346 | #[inline ] |
347 | pub fn get_mut(&mut self) -> &mut T { |
348 | unsafe { &mut *self.data.get() } |
349 | } |
350 | |
351 | /// Checks whether the mutex is currently locked. |
352 | #[inline ] |
353 | pub fn is_locked(&self) -> bool { |
354 | self.raw.is_locked() |
355 | } |
356 | |
357 | /// Checks whether the mutex is currently held by the current thread. |
358 | #[inline ] |
359 | pub fn is_owned_by_current_thread(&self) -> bool { |
360 | self.raw.is_owned_by_current_thread() |
361 | } |
362 | |
363 | /// Forcibly unlocks the mutex. |
364 | /// |
365 | /// This is useful when combined with `mem::forget` to hold a lock without |
366 | /// the need to maintain a `ReentrantMutexGuard` object alive, for example when |
367 | /// dealing with FFI. |
368 | /// |
369 | /// # Safety |
370 | /// |
371 | /// This method must only be called if the current thread logically owns a |
372 | /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`. |
373 | /// Behavior is undefined if a mutex is unlocked when not locked. |
374 | #[inline ] |
375 | pub unsafe fn force_unlock(&self) { |
376 | self.raw.unlock(); |
377 | } |
378 | |
379 | /// Returns the underlying raw mutex object. |
380 | /// |
381 | /// Note that you will most likely need to import the `RawMutex` trait from |
382 | /// `lock_api` to be able to call functions on the raw mutex. |
383 | /// |
384 | /// # Safety |
385 | /// |
386 | /// This method is unsafe because it allows unlocking a mutex while |
387 | /// still holding a reference to a `ReentrantMutexGuard`. |
388 | #[inline ] |
389 | pub unsafe fn raw(&self) -> &R { |
390 | &self.raw.mutex |
391 | } |
392 | |
393 | /// Returns a raw pointer to the underlying data. |
394 | /// |
395 | /// This is useful when combined with `mem::forget` to hold a lock without |
396 | /// the need to maintain a `ReentrantMutexGuard` object alive, for example |
397 | /// when dealing with FFI. |
398 | /// |
399 | /// # Safety |
400 | /// |
401 | /// You must ensure that there are no data races when dereferencing the |
402 | /// returned pointer, for example if the current thread logically owns a |
403 | /// `ReentrantMutexGuard` but that guard has been discarded using |
404 | /// `mem::forget`. |
405 | #[inline ] |
406 | pub fn data_ptr(&self) -> *mut T { |
407 | self.data.get() |
408 | } |
409 | |
410 | /// Creates a new `ArcReentrantMutexGuard` without checking if the lock is held. |
411 | /// |
412 | /// # Safety |
413 | /// |
414 | /// This method must only be called if the thread logically holds the lock. |
415 | /// |
416 | /// Calling this function when a guard has already been produced is undefined behaviour unless |
417 | /// the guard was forgotten with `mem::forget`. |
418 | #[cfg (feature = "arc_lock" )] |
419 | #[inline ] |
420 | pub unsafe fn make_arc_guard_unchecked(self: &Arc<Self>) -> ArcReentrantMutexGuard<R, G, T> { |
421 | ArcReentrantMutexGuard { |
422 | remutex: self.clone(), |
423 | marker: PhantomData, |
424 | } |
425 | } |
426 | |
427 | /// Acquires a reentrant mutex through an `Arc`. |
428 | /// |
429 | /// This method is similar to the `lock` method; however, it requires the `ReentrantMutex` to be inside of an |
430 | /// `Arc` and the resulting mutex guard has no lifetime requirements. |
431 | #[cfg (feature = "arc_lock" )] |
432 | #[inline ] |
433 | pub fn lock_arc(self: &Arc<Self>) -> ArcReentrantMutexGuard<R, G, T> { |
434 | self.raw.lock(); |
435 | // SAFETY: locking guarantee is upheld |
436 | unsafe { self.make_arc_guard_unchecked() } |
437 | } |
438 | |
439 | /// Attempts to acquire a reentrant mutex through an `Arc`. |
440 | /// |
441 | /// This method is similar to the `try_lock` method; however, it requires the `ReentrantMutex` to be inside |
442 | /// of an `Arc` and the resulting mutex guard has no lifetime requirements. |
443 | #[cfg (feature = "arc_lock" )] |
444 | #[inline ] |
445 | pub fn try_lock_arc(self: &Arc<Self>) -> Option<ArcReentrantMutexGuard<R, G, T>> { |
446 | if self.raw.try_lock() { |
447 | // SAFETY: locking guarantee is upheld |
448 | Some(unsafe { self.make_arc_guard_unchecked() }) |
449 | } else { |
450 | None |
451 | } |
452 | } |
453 | } |
454 | |
455 | impl<R: RawMutexFair, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> { |
456 | /// Forcibly unlocks the mutex using a fair unlock protocol. |
457 | /// |
458 | /// This is useful when combined with `mem::forget` to hold a lock without |
459 | /// the need to maintain a `ReentrantMutexGuard` object alive, for example when |
460 | /// dealing with FFI. |
461 | /// |
462 | /// # Safety |
463 | /// |
464 | /// This method must only be called if the current thread logically owns a |
465 | /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`. |
466 | /// Behavior is undefined if a mutex is unlocked when not locked. |
467 | #[inline ] |
468 | pub unsafe fn force_unlock_fair(&self) { |
469 | self.raw.unlock_fair(); |
470 | } |
471 | } |
472 | |
473 | impl<R: RawMutexTimed, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> { |
474 | /// Attempts to acquire this lock until a timeout is reached. |
475 | /// |
476 | /// If the lock could not be acquired before the timeout expired, then |
477 | /// `None` is returned. Otherwise, an RAII guard is returned. The lock will |
478 | /// be unlocked when the guard is dropped. |
479 | #[inline ] |
480 | pub fn try_lock_for(&self, timeout: R::Duration) -> Option<ReentrantMutexGuard<'_, R, G, T>> { |
481 | if self.raw.try_lock_for(timeout) { |
482 | // SAFETY: The lock is held, as required. |
483 | Some(unsafe { self.make_guard_unchecked() }) |
484 | } else { |
485 | None |
486 | } |
487 | } |
488 | |
489 | /// Attempts to acquire this lock until a timeout is reached. |
490 | /// |
491 | /// If the lock could not be acquired before the timeout expired, then |
492 | /// `None` is returned. Otherwise, an RAII guard is returned. The lock will |
493 | /// be unlocked when the guard is dropped. |
494 | #[inline ] |
495 | pub fn try_lock_until(&self, timeout: R::Instant) -> Option<ReentrantMutexGuard<'_, R, G, T>> { |
496 | if self.raw.try_lock_until(timeout) { |
497 | // SAFETY: The lock is held, as required. |
498 | Some(unsafe { self.make_guard_unchecked() }) |
499 | } else { |
500 | None |
501 | } |
502 | } |
503 | |
504 | /// Attempts to acquire this lock until a timeout is reached, through an `Arc`. |
505 | /// |
506 | /// This method is similar to the `try_lock_for` method; however, it requires the `ReentrantMutex` to be |
507 | /// inside of an `Arc` and the resulting mutex guard has no lifetime requirements. |
508 | #[cfg (feature = "arc_lock" )] |
509 | #[inline ] |
510 | pub fn try_lock_arc_for( |
511 | self: &Arc<Self>, |
512 | timeout: R::Duration, |
513 | ) -> Option<ArcReentrantMutexGuard<R, G, T>> { |
514 | if self.raw.try_lock_for(timeout) { |
515 | // SAFETY: locking guarantee is upheld |
516 | Some(unsafe { self.make_arc_guard_unchecked() }) |
517 | } else { |
518 | None |
519 | } |
520 | } |
521 | |
522 | /// Attempts to acquire this lock until a timeout is reached, through an `Arc`. |
523 | /// |
524 | /// This method is similar to the `try_lock_until` method; however, it requires the `ReentrantMutex` to be |
525 | /// inside of an `Arc` and the resulting mutex guard has no lifetime requirements. |
526 | #[cfg (feature = "arc_lock" )] |
527 | #[inline ] |
528 | pub fn try_lock_arc_until( |
529 | self: &Arc<Self>, |
530 | timeout: R::Instant, |
531 | ) -> Option<ArcReentrantMutexGuard<R, G, T>> { |
532 | if self.raw.try_lock_until(timeout) { |
533 | // SAFETY: locking guarantee is upheld |
534 | Some(unsafe { self.make_arc_guard_unchecked() }) |
535 | } else { |
536 | None |
537 | } |
538 | } |
539 | } |
540 | |
541 | impl<R: RawMutex, G: GetThreadId, T: ?Sized + Default> Default for ReentrantMutex<R, G, T> { |
542 | #[inline ] |
543 | fn default() -> ReentrantMutex<R, G, T> { |
544 | ReentrantMutex::new(Default::default()) |
545 | } |
546 | } |
547 | |
548 | impl<R: RawMutex, G: GetThreadId, T> From<T> for ReentrantMutex<R, G, T> { |
549 | #[inline ] |
550 | fn from(t: T) -> ReentrantMutex<R, G, T> { |
551 | ReentrantMutex::new(t) |
552 | } |
553 | } |
554 | |
555 | impl<R: RawMutex, G: GetThreadId, T: ?Sized + fmt::Debug> fmt::Debug for ReentrantMutex<R, G, T> { |
556 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
557 | match self.try_lock() { |
558 | Some(guard) => f |
559 | .debug_struct("ReentrantMutex" ) |
560 | .field("data" , &&*guard) |
561 | .finish(), |
562 | None => { |
563 | struct LockedPlaceholder; |
564 | impl fmt::Debug for LockedPlaceholder { |
565 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
566 | f.write_str("<locked>" ) |
567 | } |
568 | } |
569 | |
570 | f.debug_struct("ReentrantMutex" ) |
571 | .field("data" , &LockedPlaceholder) |
572 | .finish() |
573 | } |
574 | } |
575 | } |
576 | } |
577 | |
578 | // Copied and modified from serde |
579 | #[cfg (feature = "serde" )] |
580 | impl<R, G, T> Serialize for ReentrantMutex<R, G, T> |
581 | where |
582 | R: RawMutex, |
583 | G: GetThreadId, |
584 | T: Serialize + ?Sized, |
585 | { |
586 | fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> |
587 | where |
588 | S: Serializer, |
589 | { |
590 | self.lock().serialize(serializer) |
591 | } |
592 | } |
593 | |
594 | #[cfg (feature = "serde" )] |
595 | impl<'de, R, G, T> Deserialize<'de> for ReentrantMutex<R, G, T> |
596 | where |
597 | R: RawMutex, |
598 | G: GetThreadId, |
599 | T: Deserialize<'de> + ?Sized, |
600 | { |
601 | fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> |
602 | where |
603 | D: Deserializer<'de>, |
604 | { |
605 | Deserialize::deserialize(deserializer).map(ReentrantMutex::new) |
606 | } |
607 | } |
608 | |
609 | /// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure |
610 | /// is dropped (falls out of scope), the lock will be unlocked. |
611 | /// |
612 | /// The data protected by the mutex can be accessed through this guard via its |
613 | /// `Deref` implementation. |
614 | #[clippy::has_significant_drop] |
615 | #[must_use = "if unused the ReentrantMutex will immediately unlock" ] |
616 | pub struct ReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> { |
617 | remutex: &'a ReentrantMutex<R, G, T>, |
618 | marker: PhantomData<(&'a T, GuardNoSend)>, |
619 | } |
620 | |
621 | unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync |
622 | for ReentrantMutexGuard<'a, R, G, T> |
623 | { |
624 | } |
625 | |
626 | impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGuard<'a, R, G, T> { |
627 | /// Returns a reference to the original `ReentrantMutex` object. |
628 | pub fn remutex(s: &Self) -> &'a ReentrantMutex<R, G, T> { |
629 | s.remutex |
630 | } |
631 | |
632 | /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data. |
633 | /// |
634 | /// This operation cannot fail as the `ReentrantMutexGuard` passed |
635 | /// in already locked the mutex. |
636 | /// |
637 | /// This is an associated function that needs to be |
638 | /// used as `ReentrantMutexGuard::map(...)`. A method would interfere with methods of |
639 | /// the same name on the contents of the locked data. |
640 | #[inline ] |
641 | pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U> |
642 | where |
643 | F: FnOnce(&T) -> &U, |
644 | { |
645 | let raw = &s.remutex.raw; |
646 | let data = f(unsafe { &*s.remutex.data.get() }); |
647 | mem::forget(s); |
648 | MappedReentrantMutexGuard { |
649 | raw, |
650 | data, |
651 | marker: PhantomData, |
652 | } |
653 | } |
654 | |
655 | /// Attempts to make a new `MappedReentrantMutexGuard` for a component of the |
656 | /// locked data. The original guard is return if the closure returns `None`. |
657 | /// |
658 | /// This operation cannot fail as the `ReentrantMutexGuard` passed |
659 | /// in already locked the mutex. |
660 | /// |
661 | /// This is an associated function that needs to be |
662 | /// used as `ReentrantMutexGuard::try_map(...)`. A method would interfere with methods of |
663 | /// the same name on the contents of the locked data. |
664 | #[inline ] |
665 | pub fn try_map<U: ?Sized, F>( |
666 | s: Self, |
667 | f: F, |
668 | ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self> |
669 | where |
670 | F: FnOnce(&T) -> Option<&U>, |
671 | { |
672 | let raw = &s.remutex.raw; |
673 | let data = match f(unsafe { &*s.remutex.data.get() }) { |
674 | Some(data) => data, |
675 | None => return Err(s), |
676 | }; |
677 | mem::forget(s); |
678 | Ok(MappedReentrantMutexGuard { |
679 | raw, |
680 | data, |
681 | marker: PhantomData, |
682 | }) |
683 | } |
684 | |
685 | /// Temporarily unlocks the mutex to execute the given function. |
686 | /// |
687 | /// This is safe because `&mut` guarantees that there exist no other |
688 | /// references to the data protected by the mutex. |
689 | #[inline ] |
690 | pub fn unlocked<F, U>(s: &mut Self, f: F) -> U |
691 | where |
692 | F: FnOnce() -> U, |
693 | { |
694 | // Safety: A ReentrantMutexGuard always holds the lock. |
695 | unsafe { |
696 | s.remutex.raw.unlock(); |
697 | } |
698 | defer!(s.remutex.raw.lock()); |
699 | f() |
700 | } |
701 | } |
702 | |
703 | impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> |
704 | ReentrantMutexGuard<'a, R, G, T> |
705 | { |
706 | /// Unlocks the mutex using a fair unlock protocol. |
707 | /// |
708 | /// By default, mutexes are unfair and allow the current thread to re-lock |
709 | /// the mutex before another has the chance to acquire the lock, even if |
710 | /// that thread has been blocked on the mutex for a long time. This is the |
711 | /// default because it allows much higher throughput as it avoids forcing a |
712 | /// context switch on every mutex unlock. This can result in one thread |
713 | /// acquiring a mutex many more times than other threads. |
714 | /// |
715 | /// However in some cases it can be beneficial to ensure fairness by forcing |
716 | /// the lock to pass on to a waiting thread if there is one. This is done by |
717 | /// using this method instead of dropping the `ReentrantMutexGuard` normally. |
718 | #[inline ] |
719 | pub fn unlock_fair(s: Self) { |
720 | // Safety: A ReentrantMutexGuard always holds the lock |
721 | unsafe { |
722 | s.remutex.raw.unlock_fair(); |
723 | } |
724 | mem::forget(s); |
725 | } |
726 | |
727 | /// Temporarily unlocks the mutex to execute the given function. |
728 | /// |
729 | /// The mutex is unlocked a fair unlock protocol. |
730 | /// |
731 | /// This is safe because `&mut` guarantees that there exist no other |
732 | /// references to the data protected by the mutex. |
733 | #[inline ] |
734 | pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U |
735 | where |
736 | F: FnOnce() -> U, |
737 | { |
738 | // Safety: A ReentrantMutexGuard always holds the lock |
739 | unsafe { |
740 | s.remutex.raw.unlock_fair(); |
741 | } |
742 | defer!(s.remutex.raw.lock()); |
743 | f() |
744 | } |
745 | |
746 | /// Temporarily yields the mutex to a waiting thread if there is one. |
747 | /// |
748 | /// This method is functionally equivalent to calling `unlock_fair` followed |
749 | /// by `lock`, however it can be much more efficient in the case where there |
750 | /// are no waiting threads. |
751 | #[inline ] |
752 | pub fn bump(s: &mut Self) { |
753 | // Safety: A ReentrantMutexGuard always holds the lock |
754 | unsafe { |
755 | s.remutex.raw.bump(); |
756 | } |
757 | } |
758 | } |
759 | |
760 | impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref |
761 | for ReentrantMutexGuard<'a, R, G, T> |
762 | { |
763 | type Target = T; |
764 | #[inline ] |
765 | fn deref(&self) -> &T { |
766 | unsafe { &*self.remutex.data.get() } |
767 | } |
768 | } |
769 | |
770 | impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop |
771 | for ReentrantMutexGuard<'a, R, G, T> |
772 | { |
773 | #[inline ] |
774 | fn drop(&mut self) { |
775 | // Safety: A ReentrantMutexGuard always holds the lock. |
776 | unsafe { |
777 | self.remutex.raw.unlock(); |
778 | } |
779 | } |
780 | } |
781 | |
782 | impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug |
783 | for ReentrantMutexGuard<'a, R, G, T> |
784 | { |
785 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
786 | fmt::Debug::fmt(&**self, f) |
787 | } |
788 | } |
789 | |
790 | impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display |
791 | for ReentrantMutexGuard<'a, R, G, T> |
792 | { |
793 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
794 | (**self).fmt(f) |
795 | } |
796 | } |
797 | |
798 | #[cfg (feature = "owning_ref" )] |
799 | unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress |
800 | for ReentrantMutexGuard<'a, R, G, T> |
801 | { |
802 | } |
803 | |
804 | /// An RAII mutex guard returned by the `Arc` locking operations on `ReentrantMutex`. |
805 | /// |
806 | /// This is similar to the `ReentrantMutexGuard` struct, except instead of using a reference to unlock the |
807 | /// `Mutex` it uses an `Arc<ReentrantMutex>`. This has several advantages, most notably that it has an `'static` |
808 | /// lifetime. |
809 | #[cfg (feature = "arc_lock" )] |
810 | #[clippy::has_significant_drop] |
811 | #[must_use = "if unused the ReentrantMutex will immediately unlock" ] |
812 | pub struct ArcReentrantMutexGuard<R: RawMutex, G: GetThreadId, T: ?Sized> { |
813 | remutex: Arc<ReentrantMutex<R, G, T>>, |
814 | marker: PhantomData<GuardNoSend>, |
815 | } |
816 | |
817 | #[cfg (feature = "arc_lock" )] |
818 | impl<R: RawMutex, G: GetThreadId, T: ?Sized> ArcReentrantMutexGuard<R, G, T> { |
819 | /// Returns a reference to the `ReentrantMutex` this object is guarding, contained in its `Arc`. |
820 | pub fn remutex(s: &Self) -> &Arc<ReentrantMutex<R, G, T>> { |
821 | &s.remutex |
822 | } |
823 | |
824 | /// Temporarily unlocks the mutex to execute the given function. |
825 | /// |
826 | /// This is safe because `&mut` guarantees that there exist no other |
827 | /// references to the data protected by the mutex. |
828 | #[inline ] |
829 | pub fn unlocked<F, U>(s: &mut Self, f: F) -> U |
830 | where |
831 | F: FnOnce() -> U, |
832 | { |
833 | // Safety: A ReentrantMutexGuard always holds the lock. |
834 | unsafe { |
835 | s.remutex.raw.unlock(); |
836 | } |
837 | defer!(s.remutex.raw.lock()); |
838 | f() |
839 | } |
840 | } |
841 | |
842 | #[cfg (feature = "arc_lock" )] |
843 | impl<R: RawMutexFair, G: GetThreadId, T: ?Sized> ArcReentrantMutexGuard<R, G, T> { |
844 | /// Unlocks the mutex using a fair unlock protocol. |
845 | /// |
846 | /// This is functionally identical to the `unlock_fair` method on [`ReentrantMutexGuard`]. |
847 | #[inline ] |
848 | pub fn unlock_fair(s: Self) { |
849 | // Safety: A ReentrantMutexGuard always holds the lock |
850 | unsafe { |
851 | s.remutex.raw.unlock_fair(); |
852 | } |
853 | |
854 | // SAFETY: ensure that the Arc's refcount is decremented |
855 | let mut s = ManuallyDrop::new(s); |
856 | unsafe { ptr::drop_in_place(&mut s.remutex) }; |
857 | } |
858 | |
859 | /// Temporarily unlocks the mutex to execute the given function. |
860 | /// |
861 | /// This is functionally identical to the `unlocked_fair` method on [`ReentrantMutexGuard`]. |
862 | #[inline ] |
863 | pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U |
864 | where |
865 | F: FnOnce() -> U, |
866 | { |
867 | // Safety: A ReentrantMutexGuard always holds the lock |
868 | unsafe { |
869 | s.remutex.raw.unlock_fair(); |
870 | } |
871 | defer!(s.remutex.raw.lock()); |
872 | f() |
873 | } |
874 | |
875 | /// Temporarily yields the mutex to a waiting thread if there is one. |
876 | /// |
877 | /// This is functionally equivalent to the `bump` method on [`ReentrantMutexGuard`]. |
878 | #[inline ] |
879 | pub fn bump(s: &mut Self) { |
880 | // Safety: A ReentrantMutexGuard always holds the lock |
881 | unsafe { |
882 | s.remutex.raw.bump(); |
883 | } |
884 | } |
885 | } |
886 | |
887 | #[cfg (feature = "arc_lock" )] |
888 | impl<R: RawMutex, G: GetThreadId, T: ?Sized> Deref for ArcReentrantMutexGuard<R, G, T> { |
889 | type Target = T; |
890 | #[inline ] |
891 | fn deref(&self) -> &T { |
892 | unsafe { &*self.remutex.data.get() } |
893 | } |
894 | } |
895 | |
896 | #[cfg (feature = "arc_lock" )] |
897 | impl<R: RawMutex, G: GetThreadId, T: ?Sized> Drop for ArcReentrantMutexGuard<R, G, T> { |
898 | #[inline ] |
899 | fn drop(&mut self) { |
900 | // Safety: A ReentrantMutexGuard always holds the lock. |
901 | unsafe { |
902 | self.remutex.raw.unlock(); |
903 | } |
904 | } |
905 | } |
906 | |
907 | /// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a |
908 | /// subfield of the protected data. |
909 | /// |
910 | /// The main difference between `MappedReentrantMutexGuard` and `ReentrantMutexGuard` is that the |
911 | /// former doesn't support temporarily unlocking and re-locking, since that |
912 | /// could introduce soundness issues if the locked object is modified by another |
913 | /// thread. |
914 | #[clippy::has_significant_drop] |
915 | #[must_use = "if unused the ReentrantMutex will immediately unlock" ] |
916 | pub struct MappedReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> { |
917 | raw: &'a RawReentrantMutex<R, G>, |
918 | data: *const T, |
919 | marker: PhantomData<&'a T>, |
920 | } |
921 | |
922 | unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync |
923 | for MappedReentrantMutexGuard<'a, R, G, T> |
924 | { |
925 | } |
926 | |
927 | impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> |
928 | MappedReentrantMutexGuard<'a, R, G, T> |
929 | { |
930 | /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data. |
931 | /// |
932 | /// This operation cannot fail as the `MappedReentrantMutexGuard` passed |
933 | /// in already locked the mutex. |
934 | /// |
935 | /// This is an associated function that needs to be |
936 | /// used as `MappedReentrantMutexGuard::map(...)`. A method would interfere with methods of |
937 | /// the same name on the contents of the locked data. |
938 | #[inline ] |
939 | pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U> |
940 | where |
941 | F: FnOnce(&T) -> &U, |
942 | { |
943 | let raw = s.raw; |
944 | let data = f(unsafe { &*s.data }); |
945 | mem::forget(s); |
946 | MappedReentrantMutexGuard { |
947 | raw, |
948 | data, |
949 | marker: PhantomData, |
950 | } |
951 | } |
952 | |
953 | /// Attempts to make a new `MappedReentrantMutexGuard` for a component of the |
954 | /// locked data. The original guard is return if the closure returns `None`. |
955 | /// |
956 | /// This operation cannot fail as the `MappedReentrantMutexGuard` passed |
957 | /// in already locked the mutex. |
958 | /// |
959 | /// This is an associated function that needs to be |
960 | /// used as `MappedReentrantMutexGuard::try_map(...)`. A method would interfere with methods of |
961 | /// the same name on the contents of the locked data. |
962 | #[inline ] |
963 | pub fn try_map<U: ?Sized, F>( |
964 | s: Self, |
965 | f: F, |
966 | ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self> |
967 | where |
968 | F: FnOnce(&T) -> Option<&U>, |
969 | { |
970 | let raw = s.raw; |
971 | let data = match f(unsafe { &*s.data }) { |
972 | Some(data) => data, |
973 | None => return Err(s), |
974 | }; |
975 | mem::forget(s); |
976 | Ok(MappedReentrantMutexGuard { |
977 | raw, |
978 | data, |
979 | marker: PhantomData, |
980 | }) |
981 | } |
982 | } |
983 | |
984 | impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> |
985 | MappedReentrantMutexGuard<'a, R, G, T> |
986 | { |
987 | /// Unlocks the mutex using a fair unlock protocol. |
988 | /// |
989 | /// By default, mutexes are unfair and allow the current thread to re-lock |
990 | /// the mutex before another has the chance to acquire the lock, even if |
991 | /// that thread has been blocked on the mutex for a long time. This is the |
992 | /// default because it allows much higher throughput as it avoids forcing a |
993 | /// context switch on every mutex unlock. This can result in one thread |
994 | /// acquiring a mutex many more times than other threads. |
995 | /// |
996 | /// However in some cases it can be beneficial to ensure fairness by forcing |
997 | /// the lock to pass on to a waiting thread if there is one. This is done by |
998 | /// using this method instead of dropping the `ReentrantMutexGuard` normally. |
999 | #[inline ] |
1000 | pub fn unlock_fair(s: Self) { |
1001 | // Safety: A MappedReentrantMutexGuard always holds the lock |
1002 | unsafe { |
1003 | s.raw.unlock_fair(); |
1004 | } |
1005 | mem::forget(s); |
1006 | } |
1007 | } |
1008 | |
1009 | impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref |
1010 | for MappedReentrantMutexGuard<'a, R, G, T> |
1011 | { |
1012 | type Target = T; |
1013 | #[inline ] |
1014 | fn deref(&self) -> &T { |
1015 | unsafe { &*self.data } |
1016 | } |
1017 | } |
1018 | |
1019 | impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop |
1020 | for MappedReentrantMutexGuard<'a, R, G, T> |
1021 | { |
1022 | #[inline ] |
1023 | fn drop(&mut self) { |
1024 | // Safety: A MappedReentrantMutexGuard always holds the lock. |
1025 | unsafe { |
1026 | self.raw.unlock(); |
1027 | } |
1028 | } |
1029 | } |
1030 | |
1031 | impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug |
1032 | for MappedReentrantMutexGuard<'a, R, G, T> |
1033 | { |
1034 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
1035 | fmt::Debug::fmt(&**self, f) |
1036 | } |
1037 | } |
1038 | |
1039 | impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display |
1040 | for MappedReentrantMutexGuard<'a, R, G, T> |
1041 | { |
1042 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
1043 | (**self).fmt(f) |
1044 | } |
1045 | } |
1046 | |
1047 | #[cfg (feature = "owning_ref" )] |
1048 | unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress |
1049 | for MappedReentrantMutexGuard<'a, R, G, T> |
1050 | { |
1051 | } |
1052 | |