1// Copyright 2018 Amanieu d'Antras
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8use crate::{
9 mutex::{RawMutex, RawMutexFair, RawMutexTimed},
10 GuardNoSend,
11};
12use core::{
13 cell::{Cell, UnsafeCell},
14 fmt,
15 marker::PhantomData,
16 mem,
17 num::NonZeroUsize,
18 ops::Deref,
19 sync::atomic::{AtomicUsize, Ordering},
20};
21
22#[cfg(feature = "arc_lock")]
23use alloc::sync::Arc;
24#[cfg(feature = "arc_lock")]
25use core::mem::ManuallyDrop;
26#[cfg(feature = "arc_lock")]
27use core::ptr;
28
29#[cfg(feature = "owning_ref")]
30use owning_ref::StableAddress;
31
32#[cfg(feature = "serde")]
33use serde::{Deserialize, Deserializer, Serialize, Serializer};
34
35/// Helper trait which returns a non-zero thread ID.
36///
37/// The simplest way to implement this trait is to return the address of a
38/// thread-local variable.
39///
40/// # Safety
41///
42/// Implementations of this trait must ensure that no two active threads share
43/// the same thread ID. However the ID of a thread that has exited can be
44/// re-used since that thread is no longer active.
45pub unsafe trait GetThreadId {
46 /// Initial value.
47 // A “non-constant” const item is a legacy way to supply an initialized value to downstream
48 // static items. Can hopefully be replaced with `const fn new() -> Self` at some point.
49 #[allow(clippy::declare_interior_mutable_const)]
50 const INIT: Self;
51
52 /// Returns a non-zero thread ID which identifies the current thread of
53 /// execution.
54 fn nonzero_thread_id(&self) -> NonZeroUsize;
55}
56
57/// A raw mutex type that wraps another raw mutex to provide reentrancy.
58///
59/// Although this has the same methods as the [`RawMutex`] trait, it does
60/// not implement it, and should not be used in the same way, since this
61/// mutex can successfully acquire a lock multiple times in the same thread.
62/// Only use this when you know you want a raw mutex that can be locked
63/// reentrantly; you probably want [`ReentrantMutex`] instead.
64///
65/// [`RawMutex`]: trait.RawMutex.html
66/// [`ReentrantMutex`]: struct.ReentrantMutex.html
67pub struct RawReentrantMutex<R, G> {
68 owner: AtomicUsize,
69 lock_count: Cell<usize>,
70 mutex: R,
71 get_thread_id: G,
72}
73
74unsafe impl<R: RawMutex + Send, G: GetThreadId + Send> Send for RawReentrantMutex<R, G> {}
75unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync> Sync for RawReentrantMutex<R, G> {}
76
77impl<R: RawMutex, G: GetThreadId> RawReentrantMutex<R, G> {
78 /// Initial value for an unlocked mutex.
79 #[allow(clippy::declare_interior_mutable_const)]
80 pub const INIT: Self = RawReentrantMutex {
81 owner: AtomicUsize::new(0),
82 lock_count: Cell::new(0),
83 mutex: R::INIT,
84 get_thread_id: G::INIT,
85 };
86
87 #[inline]
88 fn lock_internal<F: FnOnce() -> bool>(&self, try_lock: F) -> bool {
89 let id = self.get_thread_id.nonzero_thread_id().get();
90 if self.owner.load(Ordering::Relaxed) == id {
91 self.lock_count.set(
92 self.lock_count
93 .get()
94 .checked_add(1)
95 .expect("ReentrantMutex lock count overflow"),
96 );
97 } else {
98 if !try_lock() {
99 return false;
100 }
101 self.owner.store(id, Ordering::Relaxed);
102 debug_assert_eq!(self.lock_count.get(), 0);
103 self.lock_count.set(1);
104 }
105 true
106 }
107
108 /// Acquires this mutex, blocking if it's held by another thread.
109 #[inline]
110 pub fn lock(&self) {
111 self.lock_internal(|| {
112 self.mutex.lock();
113 true
114 });
115 }
116
117 /// Attempts to acquire this mutex without blocking. Returns `true`
118 /// if the lock was successfully acquired and `false` otherwise.
119 #[inline]
120 pub fn try_lock(&self) -> bool {
121 self.lock_internal(|| self.mutex.try_lock())
122 }
123
124 /// Unlocks this mutex. The inner mutex may not be unlocked if
125 /// this mutex was acquired previously in the current thread.
126 ///
127 /// # Safety
128 ///
129 /// This method may only be called if the mutex is held by the current thread.
130 #[inline]
131 pub unsafe fn unlock(&self) {
132 let lock_count = self.lock_count.get() - 1;
133 self.lock_count.set(lock_count);
134 if lock_count == 0 {
135 self.owner.store(0, Ordering::Relaxed);
136 self.mutex.unlock();
137 }
138 }
139
140 /// Checks whether the mutex is currently locked.
141 #[inline]
142 pub fn is_locked(&self) -> bool {
143 self.mutex.is_locked()
144 }
145
146 /// Checks whether the mutex is currently held by the current thread.
147 #[inline]
148 pub fn is_owned_by_current_thread(&self) -> bool {
149 let id = self.get_thread_id.nonzero_thread_id().get();
150 self.owner.load(Ordering::Relaxed) == id
151 }
152}
153
154impl<R: RawMutexFair, G: GetThreadId> RawReentrantMutex<R, G> {
155 /// Unlocks this mutex using a fair unlock protocol. The inner mutex
156 /// may not be unlocked if this mutex was acquired previously in the
157 /// current thread.
158 ///
159 /// # Safety
160 ///
161 /// This method may only be called if the mutex is held by the current thread.
162 #[inline]
163 pub unsafe fn unlock_fair(&self) {
164 let lock_count = self.lock_count.get() - 1;
165 self.lock_count.set(lock_count);
166 if lock_count == 0 {
167 self.owner.store(0, Ordering::Relaxed);
168 self.mutex.unlock_fair();
169 }
170 }
171
172 /// Temporarily yields the mutex to a waiting thread if there is one.
173 ///
174 /// This method is functionally equivalent to calling `unlock_fair` followed
175 /// by `lock`, however it can be much more efficient in the case where there
176 /// are no waiting threads.
177 ///
178 /// # Safety
179 ///
180 /// This method may only be called if the mutex is held by the current thread.
181 #[inline]
182 pub unsafe fn bump(&self) {
183 if self.lock_count.get() == 1 {
184 let id = self.owner.load(Ordering::Relaxed);
185 self.owner.store(0, Ordering::Relaxed);
186 self.mutex.bump();
187 self.owner.store(id, Ordering::Relaxed);
188 }
189 }
190}
191
192impl<R: RawMutexTimed, G: GetThreadId> RawReentrantMutex<R, G> {
193 /// Attempts to acquire this lock until a timeout is reached.
194 #[inline]
195 pub fn try_lock_until(&self, timeout: R::Instant) -> bool {
196 self.lock_internal(|| self.mutex.try_lock_until(timeout))
197 }
198
199 /// Attempts to acquire this lock until a timeout is reached.
200 #[inline]
201 pub fn try_lock_for(&self, timeout: R::Duration) -> bool {
202 self.lock_internal(|| self.mutex.try_lock_for(timeout))
203 }
204}
205
206/// A mutex which can be recursively locked by a single thread.
207///
208/// This type is identical to `Mutex` except for the following points:
209///
210/// - Locking multiple times from the same thread will work correctly instead of
211/// deadlocking.
212/// - `ReentrantMutexGuard` does not give mutable references to the locked data.
213/// Use a `RefCell` if you need this.
214///
215/// See [`Mutex`](struct.Mutex.html) for more details about the underlying mutex
216/// primitive.
217pub struct ReentrantMutex<R, G, T: ?Sized> {
218 raw: RawReentrantMutex<R, G>,
219 data: UnsafeCell<T>,
220}
221
222unsafe impl<R: RawMutex + Send, G: GetThreadId + Send, T: ?Sized + Send> Send
223 for ReentrantMutex<R, G, T>
224{
225}
226unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync, T: ?Sized + Send> Sync
227 for ReentrantMutex<R, G, T>
228{
229}
230
231impl<R: RawMutex, G: GetThreadId, T> ReentrantMutex<R, G, T> {
232 /// Creates a new reentrant mutex in an unlocked state ready for use.
233 #[cfg(has_const_fn_trait_bound)]
234 #[inline]
235 pub const fn new(val: T) -> ReentrantMutex<R, G, T> {
236 ReentrantMutex {
237 data: UnsafeCell::new(val),
238 raw: RawReentrantMutex {
239 owner: AtomicUsize::new(0),
240 lock_count: Cell::new(0),
241 mutex: R::INIT,
242 get_thread_id: G::INIT,
243 },
244 }
245 }
246
247 /// Creates a new reentrant mutex in an unlocked state ready for use.
248 #[cfg(not(has_const_fn_trait_bound))]
249 #[inline]
250 pub fn new(val: T) -> ReentrantMutex<R, G, T> {
251 ReentrantMutex {
252 data: UnsafeCell::new(val),
253 raw: RawReentrantMutex {
254 owner: AtomicUsize::new(0),
255 lock_count: Cell::new(0),
256 mutex: R::INIT,
257 get_thread_id: G::INIT,
258 },
259 }
260 }
261
262 /// Consumes this mutex, returning the underlying data.
263 #[inline]
264 pub fn into_inner(self) -> T {
265 self.data.into_inner()
266 }
267}
268
269impl<R, G, T> ReentrantMutex<R, G, T> {
270 /// Creates a new reentrant mutex based on a pre-existing raw mutex and a
271 /// helper to get the thread ID.
272 ///
273 /// This allows creating a reentrant mutex in a constant context on stable
274 /// Rust.
275 #[inline]
276 pub const fn const_new(raw_mutex: R, get_thread_id: G, val: T) -> ReentrantMutex<R, G, T> {
277 ReentrantMutex {
278 data: UnsafeCell::new(val),
279 raw: RawReentrantMutex {
280 owner: AtomicUsize::new(0),
281 lock_count: Cell::new(0),
282 mutex: raw_mutex,
283 get_thread_id,
284 },
285 }
286 }
287}
288
289impl<R: RawMutex, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
290 /// # Safety
291 ///
292 /// The lock must be held when calling this method.
293 #[inline]
294 unsafe fn guard(&self) -> ReentrantMutexGuard<'_, R, G, T> {
295 ReentrantMutexGuard {
296 remutex: &self,
297 marker: PhantomData,
298 }
299 }
300
301 /// Acquires a reentrant mutex, blocking the current thread until it is able
302 /// to do so.
303 ///
304 /// If the mutex is held by another thread then this function will block the
305 /// local thread until it is available to acquire the mutex. If the mutex is
306 /// already held by the current thread then this function will increment the
307 /// lock reference count and return immediately. Upon returning,
308 /// the thread is the only thread with the mutex held. An RAII guard is
309 /// returned to allow scoped unlock of the lock. When the guard goes out of
310 /// scope, the mutex will be unlocked.
311 #[inline]
312 pub fn lock(&self) -> ReentrantMutexGuard<'_, R, G, T> {
313 self.raw.lock();
314 // SAFETY: The lock is held, as required.
315 unsafe { self.guard() }
316 }
317
318 /// Attempts to acquire this lock.
319 ///
320 /// If the lock could not be acquired at this time, then `None` is returned.
321 /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
322 /// guard is dropped.
323 ///
324 /// This function does not block.
325 #[inline]
326 pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
327 if self.raw.try_lock() {
328 // SAFETY: The lock is held, as required.
329 Some(unsafe { self.guard() })
330 } else {
331 None
332 }
333 }
334
335 /// Returns a mutable reference to the underlying data.
336 ///
337 /// Since this call borrows the `ReentrantMutex` mutably, no actual locking needs to
338 /// take place---the mutable borrow statically guarantees no locks exist.
339 #[inline]
340 pub fn get_mut(&mut self) -> &mut T {
341 unsafe { &mut *self.data.get() }
342 }
343
344 /// Checks whether the mutex is currently locked.
345 #[inline]
346 pub fn is_locked(&self) -> bool {
347 self.raw.is_locked()
348 }
349
350 /// Checks whether the mutex is currently held by the current thread.
351 #[inline]
352 pub fn is_owned_by_current_thread(&self) -> bool {
353 self.raw.is_owned_by_current_thread()
354 }
355
356 /// Forcibly unlocks the mutex.
357 ///
358 /// This is useful when combined with `mem::forget` to hold a lock without
359 /// the need to maintain a `ReentrantMutexGuard` object alive, for example when
360 /// dealing with FFI.
361 ///
362 /// # Safety
363 ///
364 /// This method must only be called if the current thread logically owns a
365 /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`.
366 /// Behavior is undefined if a mutex is unlocked when not locked.
367 #[inline]
368 pub unsafe fn force_unlock(&self) {
369 self.raw.unlock();
370 }
371
372 /// Returns the underlying raw mutex object.
373 ///
374 /// Note that you will most likely need to import the `RawMutex` trait from
375 /// `lock_api` to be able to call functions on the raw mutex.
376 ///
377 /// # Safety
378 ///
379 /// This method is unsafe because it allows unlocking a mutex while
380 /// still holding a reference to a `ReentrantMutexGuard`.
381 #[inline]
382 pub unsafe fn raw(&self) -> &R {
383 &self.raw.mutex
384 }
385
386 /// Returns a raw pointer to the underlying data.
387 ///
388 /// This is useful when combined with `mem::forget` to hold a lock without
389 /// the need to maintain a `ReentrantMutexGuard` object alive, for example
390 /// when dealing with FFI.
391 ///
392 /// # Safety
393 ///
394 /// You must ensure that there are no data races when dereferencing the
395 /// returned pointer, for example if the current thread logically owns a
396 /// `ReentrantMutexGuard` but that guard has been discarded using
397 /// `mem::forget`.
398 #[inline]
399 pub fn data_ptr(&self) -> *mut T {
400 self.data.get()
401 }
402
403 /// # Safety
404 ///
405 /// The lock must be held before calling this method.
406 #[cfg(feature = "arc_lock")]
407 #[inline]
408 unsafe fn guard_arc(self: &Arc<Self>) -> ArcReentrantMutexGuard<R, G, T> {
409 ArcReentrantMutexGuard {
410 remutex: self.clone(),
411 marker: PhantomData,
412 }
413 }
414
415 /// Acquires a reentrant mutex through an `Arc`.
416 ///
417 /// This method is similar to the `lock` method; however, it requires the `ReentrantMutex` to be inside of an
418 /// `Arc` and the resulting mutex guard has no lifetime requirements.
419 #[cfg(feature = "arc_lock")]
420 #[inline]
421 pub fn lock_arc(self: &Arc<Self>) -> ArcReentrantMutexGuard<R, G, T> {
422 self.raw.lock();
423 // SAFETY: locking guarantee is upheld
424 unsafe { self.guard_arc() }
425 }
426
427 /// Attempts to acquire a reentrant mutex through an `Arc`.
428 ///
429 /// This method is similar to the `try_lock` method; however, it requires the `ReentrantMutex` to be inside
430 /// of an `Arc` and the resulting mutex guard has no lifetime requirements.
431 #[cfg(feature = "arc_lock")]
432 #[inline]
433 pub fn try_lock_arc(self: &Arc<Self>) -> Option<ArcReentrantMutexGuard<R, G, T>> {
434 if self.raw.try_lock() {
435 // SAFETY: locking guarantee is upheld
436 Some(unsafe { self.guard_arc() })
437 } else {
438 None
439 }
440 }
441}
442
443impl<R: RawMutexFair, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
444 /// Forcibly unlocks the mutex using a fair unlock protocol.
445 ///
446 /// This is useful when combined with `mem::forget` to hold a lock without
447 /// the need to maintain a `ReentrantMutexGuard` object alive, for example when
448 /// dealing with FFI.
449 ///
450 /// # Safety
451 ///
452 /// This method must only be called if the current thread logically owns a
453 /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`.
454 /// Behavior is undefined if a mutex is unlocked when not locked.
455 #[inline]
456 pub unsafe fn force_unlock_fair(&self) {
457 self.raw.unlock_fair();
458 }
459}
460
461impl<R: RawMutexTimed, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
462 /// Attempts to acquire this lock until a timeout is reached.
463 ///
464 /// If the lock could not be acquired before the timeout expired, then
465 /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
466 /// be unlocked when the guard is dropped.
467 #[inline]
468 pub fn try_lock_for(&self, timeout: R::Duration) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
469 if self.raw.try_lock_for(timeout) {
470 // SAFETY: The lock is held, as required.
471 Some(unsafe { self.guard() })
472 } else {
473 None
474 }
475 }
476
477 /// Attempts to acquire this lock until a timeout is reached.
478 ///
479 /// If the lock could not be acquired before the timeout expired, then
480 /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
481 /// be unlocked when the guard is dropped.
482 #[inline]
483 pub fn try_lock_until(&self, timeout: R::Instant) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
484 if self.raw.try_lock_until(timeout) {
485 // SAFETY: The lock is held, as required.
486 Some(unsafe { self.guard() })
487 } else {
488 None
489 }
490 }
491
492 /// Attempts to acquire this lock until a timeout is reached, through an `Arc`.
493 ///
494 /// This method is similar to the `try_lock_for` method; however, it requires the `ReentrantMutex` to be
495 /// inside of an `Arc` and the resulting mutex guard has no lifetime requirements.
496 #[cfg(feature = "arc_lock")]
497 #[inline]
498 pub fn try_lock_arc_for(
499 self: &Arc<Self>,
500 timeout: R::Duration,
501 ) -> Option<ArcReentrantMutexGuard<R, G, T>> {
502 if self.raw.try_lock_for(timeout) {
503 // SAFETY: locking guarantee is upheld
504 Some(unsafe { self.guard_arc() })
505 } else {
506 None
507 }
508 }
509
510 /// Attempts to acquire this lock until a timeout is reached, through an `Arc`.
511 ///
512 /// This method is similar to the `try_lock_until` method; however, it requires the `ReentrantMutex` to be
513 /// inside of an `Arc` and the resulting mutex guard has no lifetime requirements.
514 #[cfg(feature = "arc_lock")]
515 #[inline]
516 pub fn try_lock_arc_until(
517 self: &Arc<Self>,
518 timeout: R::Instant,
519 ) -> Option<ArcReentrantMutexGuard<R, G, T>> {
520 if self.raw.try_lock_until(timeout) {
521 // SAFETY: locking guarantee is upheld
522 Some(unsafe { self.guard_arc() })
523 } else {
524 None
525 }
526 }
527}
528
529impl<R: RawMutex, G: GetThreadId, T: ?Sized + Default> Default for ReentrantMutex<R, G, T> {
530 #[inline]
531 fn default() -> ReentrantMutex<R, G, T> {
532 ReentrantMutex::new(val:Default::default())
533 }
534}
535
536impl<R: RawMutex, G: GetThreadId, T> From<T> for ReentrantMutex<R, G, T> {
537 #[inline]
538 fn from(t: T) -> ReentrantMutex<R, G, T> {
539 ReentrantMutex::new(val:t)
540 }
541}
542
543impl<R: RawMutex, G: GetThreadId, T: ?Sized + fmt::Debug> fmt::Debug for ReentrantMutex<R, G, T> {
544 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
545 match self.try_lock() {
546 Some(guard: ReentrantMutexGuard<'_, R, …, …>) => f&mut DebugStruct<'_, '_>
547 .debug_struct("ReentrantMutex")
548 .field(name:"data", &&*guard)
549 .finish(),
550 None => {
551 struct LockedPlaceholder;
552 impl fmt::Debug for LockedPlaceholder {
553 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
554 f.write_str(data:"<locked>")
555 }
556 }
557
558 f&mut DebugStruct<'_, '_>.debug_struct("ReentrantMutex")
559 .field(name:"data", &LockedPlaceholder)
560 .finish()
561 }
562 }
563 }
564}
565
566// Copied and modified from serde
567#[cfg(feature = "serde")]
568impl<R, G, T> Serialize for ReentrantMutex<R, G, T>
569where
570 R: RawMutex,
571 G: GetThreadId,
572 T: Serialize + ?Sized,
573{
574 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
575 where
576 S: Serializer,
577 {
578 self.lock().serialize(serializer)
579 }
580}
581
582#[cfg(feature = "serde")]
583impl<'de, R, G, T> Deserialize<'de> for ReentrantMutex<R, G, T>
584where
585 R: RawMutex,
586 G: GetThreadId,
587 T: Deserialize<'de> + ?Sized,
588{
589 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
590 where
591 D: Deserializer<'de>,
592 {
593 Deserialize::deserialize(deserializer).map(ReentrantMutex::new)
594 }
595}
596
597/// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure
598/// is dropped (falls out of scope), the lock will be unlocked.
599///
600/// The data protected by the mutex can be accessed through this guard via its
601/// `Deref` implementation.
602#[clippy::has_significant_drop]
603#[must_use = "if unused the ReentrantMutex will immediately unlock"]
604pub struct ReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> {
605 remutex: &'a ReentrantMutex<R, G, T>,
606 marker: PhantomData<(&'a T, GuardNoSend)>,
607}
608
609unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
610 for ReentrantMutexGuard<'a, R, G, T>
611{
612}
613
614impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGuard<'a, R, G, T> {
615 /// Returns a reference to the original `ReentrantMutex` object.
616 pub fn remutex(s: &Self) -> &'a ReentrantMutex<R, G, T> {
617 s.remutex
618 }
619
620 /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data.
621 ///
622 /// This operation cannot fail as the `ReentrantMutexGuard` passed
623 /// in already locked the mutex.
624 ///
625 /// This is an associated function that needs to be
626 /// used as `ReentrantMutexGuard::map(...)`. A method would interfere with methods of
627 /// the same name on the contents of the locked data.
628 #[inline]
629 pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U>
630 where
631 F: FnOnce(&T) -> &U,
632 {
633 let raw = &s.remutex.raw;
634 let data = f(unsafe { &*s.remutex.data.get() });
635 mem::forget(s);
636 MappedReentrantMutexGuard {
637 raw,
638 data,
639 marker: PhantomData,
640 }
641 }
642
643 /// Attempts to make a new `MappedReentrantMutexGuard` for a component of the
644 /// locked data. The original guard is return if the closure returns `None`.
645 ///
646 /// This operation cannot fail as the `ReentrantMutexGuard` passed
647 /// in already locked the mutex.
648 ///
649 /// This is an associated function that needs to be
650 /// used as `ReentrantMutexGuard::try_map(...)`. A method would interfere with methods of
651 /// the same name on the contents of the locked data.
652 #[inline]
653 pub fn try_map<U: ?Sized, F>(
654 s: Self,
655 f: F,
656 ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
657 where
658 F: FnOnce(&T) -> Option<&U>,
659 {
660 let raw = &s.remutex.raw;
661 let data = match f(unsafe { &*s.remutex.data.get() }) {
662 Some(data) => data,
663 None => return Err(s),
664 };
665 mem::forget(s);
666 Ok(MappedReentrantMutexGuard {
667 raw,
668 data,
669 marker: PhantomData,
670 })
671 }
672
673 /// Temporarily unlocks the mutex to execute the given function.
674 ///
675 /// This is safe because `&mut` guarantees that there exist no other
676 /// references to the data protected by the mutex.
677 #[inline]
678 pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
679 where
680 F: FnOnce() -> U,
681 {
682 // Safety: A ReentrantMutexGuard always holds the lock.
683 unsafe {
684 s.remutex.raw.unlock();
685 }
686 defer!(s.remutex.raw.lock());
687 f()
688 }
689}
690
691impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
692 ReentrantMutexGuard<'a, R, G, T>
693{
694 /// Unlocks the mutex using a fair unlock protocol.
695 ///
696 /// By default, mutexes are unfair and allow the current thread to re-lock
697 /// the mutex before another has the chance to acquire the lock, even if
698 /// that thread has been blocked on the mutex for a long time. This is the
699 /// default because it allows much higher throughput as it avoids forcing a
700 /// context switch on every mutex unlock. This can result in one thread
701 /// acquiring a mutex many more times than other threads.
702 ///
703 /// However in some cases it can be beneficial to ensure fairness by forcing
704 /// the lock to pass on to a waiting thread if there is one. This is done by
705 /// using this method instead of dropping the `ReentrantMutexGuard` normally.
706 #[inline]
707 pub fn unlock_fair(s: Self) {
708 // Safety: A ReentrantMutexGuard always holds the lock
709 unsafe {
710 s.remutex.raw.unlock_fair();
711 }
712 mem::forget(s);
713 }
714
715 /// Temporarily unlocks the mutex to execute the given function.
716 ///
717 /// The mutex is unlocked a fair unlock protocol.
718 ///
719 /// This is safe because `&mut` guarantees that there exist no other
720 /// references to the data protected by the mutex.
721 #[inline]
722 pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
723 where
724 F: FnOnce() -> U,
725 {
726 // Safety: A ReentrantMutexGuard always holds the lock
727 unsafe {
728 s.remutex.raw.unlock_fair();
729 }
730 defer!(s.remutex.raw.lock());
731 f()
732 }
733
734 /// Temporarily yields the mutex to a waiting thread if there is one.
735 ///
736 /// This method is functionally equivalent to calling `unlock_fair` followed
737 /// by `lock`, however it can be much more efficient in the case where there
738 /// are no waiting threads.
739 #[inline]
740 pub fn bump(s: &mut Self) {
741 // Safety: A ReentrantMutexGuard always holds the lock
742 unsafe {
743 s.remutex.raw.bump();
744 }
745 }
746}
747
748impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref
749 for ReentrantMutexGuard<'a, R, G, T>
750{
751 type Target = T;
752 #[inline]
753 fn deref(&self) -> &T {
754 unsafe { &*self.remutex.data.get() }
755 }
756}
757
758impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
759 for ReentrantMutexGuard<'a, R, G, T>
760{
761 #[inline]
762 fn drop(&mut self) {
763 // Safety: A ReentrantMutexGuard always holds the lock.
764 unsafe {
765 self.remutex.raw.unlock();
766 }
767 }
768}
769
770impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
771 for ReentrantMutexGuard<'a, R, G, T>
772{
773 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
774 fmt::Debug::fmt(&**self, f)
775 }
776}
777
778impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
779 for ReentrantMutexGuard<'a, R, G, T>
780{
781 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
782 (**self).fmt(f)
783 }
784}
785
786#[cfg(feature = "owning_ref")]
787unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
788 for ReentrantMutexGuard<'a, R, G, T>
789{
790}
791
792/// An RAII mutex guard returned by the `Arc` locking operations on `ReentrantMutex`.
793///
794/// This is similar to the `ReentrantMutexGuard` struct, except instead of using a reference to unlock the
795/// `Mutex` it uses an `Arc<ReentrantMutex>`. This has several advantages, most notably that it has an `'static`
796/// lifetime.
797#[cfg(feature = "arc_lock")]
798#[clippy::has_significant_drop]
799#[must_use = "if unused the ReentrantMutex will immediately unlock"]
800pub struct ArcReentrantMutexGuard<R: RawMutex, G: GetThreadId, T: ?Sized> {
801 remutex: Arc<ReentrantMutex<R, G, T>>,
802 marker: PhantomData<GuardNoSend>,
803}
804
805#[cfg(feature = "arc_lock")]
806impl<R: RawMutex, G: GetThreadId, T: ?Sized> ArcReentrantMutexGuard<R, G, T> {
807 /// Returns a reference to the `ReentrantMutex` this object is guarding, contained in its `Arc`.
808 pub fn remutex(s: &Self) -> &Arc<ReentrantMutex<R, G, T>> {
809 &s.remutex
810 }
811
812 /// Temporarily unlocks the mutex to execute the given function.
813 ///
814 /// This is safe because `&mut` guarantees that there exist no other
815 /// references to the data protected by the mutex.
816 #[inline]
817 pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
818 where
819 F: FnOnce() -> U,
820 {
821 // Safety: A ReentrantMutexGuard always holds the lock.
822 unsafe {
823 s.remutex.raw.unlock();
824 }
825 defer!(s.remutex.raw.lock());
826 f()
827 }
828}
829
830#[cfg(feature = "arc_lock")]
831impl<R: RawMutexFair, G: GetThreadId, T: ?Sized> ArcReentrantMutexGuard<R, G, T> {
832 /// Unlocks the mutex using a fair unlock protocol.
833 ///
834 /// This is functionally identical to the `unlock_fair` method on [`ReentrantMutexGuard`].
835 #[inline]
836 pub fn unlock_fair(s: Self) {
837 // Safety: A ReentrantMutexGuard always holds the lock
838 unsafe {
839 s.remutex.raw.unlock_fair();
840 }
841
842 // SAFETY: ensure that the Arc's refcount is decremented
843 let mut s = ManuallyDrop::new(s);
844 unsafe { ptr::drop_in_place(&mut s.remutex) };
845 }
846
847 /// Temporarily unlocks the mutex to execute the given function.
848 ///
849 /// This is functionally identical to the `unlocked_fair` method on [`ReentrantMutexGuard`].
850 #[inline]
851 pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
852 where
853 F: FnOnce() -> U,
854 {
855 // Safety: A ReentrantMutexGuard always holds the lock
856 unsafe {
857 s.remutex.raw.unlock_fair();
858 }
859 defer!(s.remutex.raw.lock());
860 f()
861 }
862
863 /// Temporarily yields the mutex to a waiting thread if there is one.
864 ///
865 /// This is functionally equivalent to the `bump` method on [`ReentrantMutexGuard`].
866 #[inline]
867 pub fn bump(s: &mut Self) {
868 // Safety: A ReentrantMutexGuard always holds the lock
869 unsafe {
870 s.remutex.raw.bump();
871 }
872 }
873}
874
875#[cfg(feature = "arc_lock")]
876impl<R: RawMutex, G: GetThreadId, T: ?Sized> Deref for ArcReentrantMutexGuard<R, G, T> {
877 type Target = T;
878 #[inline]
879 fn deref(&self) -> &T {
880 unsafe { &*self.remutex.data.get() }
881 }
882}
883
884#[cfg(feature = "arc_lock")]
885impl<R: RawMutex, G: GetThreadId, T: ?Sized> Drop for ArcReentrantMutexGuard<R, G, T> {
886 #[inline]
887 fn drop(&mut self) {
888 // Safety: A ReentrantMutexGuard always holds the lock.
889 unsafe {
890 self.remutex.raw.unlock();
891 }
892 }
893}
894
895/// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a
896/// subfield of the protected data.
897///
898/// The main difference between `MappedReentrantMutexGuard` and `ReentrantMutexGuard` is that the
899/// former doesn't support temporarily unlocking and re-locking, since that
900/// could introduce soundness issues if the locked object is modified by another
901/// thread.
902#[clippy::has_significant_drop]
903#[must_use = "if unused the ReentrantMutex will immediately unlock"]
904pub struct MappedReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> {
905 raw: &'a RawReentrantMutex<R, G>,
906 data: *const T,
907 marker: PhantomData<&'a T>,
908}
909
910unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
911 for MappedReentrantMutexGuard<'a, R, G, T>
912{
913}
914
915impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
916 MappedReentrantMutexGuard<'a, R, G, T>
917{
918 /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data.
919 ///
920 /// This operation cannot fail as the `MappedReentrantMutexGuard` passed
921 /// in already locked the mutex.
922 ///
923 /// This is an associated function that needs to be
924 /// used as `MappedReentrantMutexGuard::map(...)`. A method would interfere with methods of
925 /// the same name on the contents of the locked data.
926 #[inline]
927 pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U>
928 where
929 F: FnOnce(&T) -> &U,
930 {
931 let raw = s.raw;
932 let data = f(unsafe { &*s.data });
933 mem::forget(s);
934 MappedReentrantMutexGuard {
935 raw,
936 data,
937 marker: PhantomData,
938 }
939 }
940
941 /// Attempts to make a new `MappedReentrantMutexGuard` for a component of the
942 /// locked data. The original guard is return if the closure returns `None`.
943 ///
944 /// This operation cannot fail as the `MappedReentrantMutexGuard` passed
945 /// in already locked the mutex.
946 ///
947 /// This is an associated function that needs to be
948 /// used as `MappedReentrantMutexGuard::try_map(...)`. A method would interfere with methods of
949 /// the same name on the contents of the locked data.
950 #[inline]
951 pub fn try_map<U: ?Sized, F>(
952 s: Self,
953 f: F,
954 ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
955 where
956 F: FnOnce(&T) -> Option<&U>,
957 {
958 let raw = s.raw;
959 let data = match f(unsafe { &*s.data }) {
960 Some(data) => data,
961 None => return Err(s),
962 };
963 mem::forget(s);
964 Ok(MappedReentrantMutexGuard {
965 raw,
966 data,
967 marker: PhantomData,
968 })
969 }
970}
971
972impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
973 MappedReentrantMutexGuard<'a, R, G, T>
974{
975 /// Unlocks the mutex using a fair unlock protocol.
976 ///
977 /// By default, mutexes are unfair and allow the current thread to re-lock
978 /// the mutex before another has the chance to acquire the lock, even if
979 /// that thread has been blocked on the mutex for a long time. This is the
980 /// default because it allows much higher throughput as it avoids forcing a
981 /// context switch on every mutex unlock. This can result in one thread
982 /// acquiring a mutex many more times than other threads.
983 ///
984 /// However in some cases it can be beneficial to ensure fairness by forcing
985 /// the lock to pass on to a waiting thread if there is one. This is done by
986 /// using this method instead of dropping the `ReentrantMutexGuard` normally.
987 #[inline]
988 pub fn unlock_fair(s: Self) {
989 // Safety: A MappedReentrantMutexGuard always holds the lock
990 unsafe {
991 s.raw.unlock_fair();
992 }
993 mem::forget(s);
994 }
995}
996
997impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref
998 for MappedReentrantMutexGuard<'a, R, G, T>
999{
1000 type Target = T;
1001 #[inline]
1002 fn deref(&self) -> &T {
1003 unsafe { &*self.data }
1004 }
1005}
1006
1007impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
1008 for MappedReentrantMutexGuard<'a, R, G, T>
1009{
1010 #[inline]
1011 fn drop(&mut self) {
1012 // Safety: A MappedReentrantMutexGuard always holds the lock.
1013 unsafe {
1014 self.raw.unlock();
1015 }
1016 }
1017}
1018
1019impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
1020 for MappedReentrantMutexGuard<'a, R, G, T>
1021{
1022 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1023 fmt::Debug::fmt(&**self, f)
1024 }
1025}
1026
1027impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1028 for MappedReentrantMutexGuard<'a, R, G, T>
1029{
1030 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1031 (**self).fmt(f)
1032 }
1033}
1034
1035#[cfg(feature = "owning_ref")]
1036unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
1037 for MappedReentrantMutexGuard<'a, R, G, T>
1038{
1039}
1040