1 | // Copyright 2016 Amanieu d'Antras |
2 | // |
3 | // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or |
4 | // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or |
5 | // http://opensource.org/licenses/MIT>, at your option. This file may not be |
6 | // copied, modified, or distributed except according to those terms. |
7 | |
8 | use crate::raw_fair_mutex::RawFairMutex; |
9 | |
10 | /// A mutual exclusive primitive that is always fair, useful for protecting shared data |
11 | /// |
12 | /// This mutex will block threads waiting for the lock to become available. The |
13 | /// mutex can be statically initialized or created by the `new` |
14 | /// constructor. Each mutex has a type parameter which represents the data that |
15 | /// it is protecting. The data can only be accessed through the RAII guards |
16 | /// returned from `lock` and `try_lock`, which guarantees that the data is only |
17 | /// ever accessed when the mutex is locked. |
18 | /// |
19 | /// The regular mutex provided by `parking_lot` uses eventual fairness |
20 | /// (after some time it will default to the fair algorithm), but eventual |
21 | /// fairness does not provide the same guarantees an always fair method would. |
22 | /// Fair mutexes are generally slower, but sometimes needed. |
23 | /// |
24 | /// In a fair mutex the waiters form a queue, and the lock is always granted to |
25 | /// the next requester in the queue, in first-in first-out order. This ensures |
26 | /// that one thread cannot starve others by quickly re-acquiring the lock after |
27 | /// releasing it. |
28 | /// |
29 | /// A fair mutex may not be interesting if threads have different priorities (this is known as |
30 | /// priority inversion). |
31 | /// |
32 | /// # Differences from the standard library `Mutex` |
33 | /// |
34 | /// - No poisoning, the lock is released normally on panic. |
35 | /// - Only requires 1 byte of space, whereas the standard library boxes the |
36 | /// `FairMutex` due to platform limitations. |
37 | /// - Can be statically constructed. |
38 | /// - Does not require any drop glue when dropped. |
39 | /// - Inline fast path for the uncontended case. |
40 | /// - Efficient handling of micro-contention using adaptive spinning. |
41 | /// - Allows raw locking & unlocking without a guard. |
42 | /// |
43 | /// # Examples |
44 | /// |
45 | /// ``` |
46 | /// use parking_lot::FairMutex; |
47 | /// use std::sync::{Arc, mpsc::channel}; |
48 | /// use std::thread; |
49 | /// |
50 | /// const N: usize = 10; |
51 | /// |
52 | /// // Spawn a few threads to increment a shared variable (non-atomically), and |
53 | /// // let the main thread know once all increments are done. |
54 | /// // |
55 | /// // Here we're using an Arc to share memory among threads, and the data inside |
56 | /// // the Arc is protected with a mutex. |
57 | /// let data = Arc::new(FairMutex::new(0)); |
58 | /// |
59 | /// let (tx, rx) = channel(); |
60 | /// for _ in 0..10 { |
61 | /// let (data, tx) = (Arc::clone(&data), tx.clone()); |
62 | /// thread::spawn(move || { |
63 | /// // The shared state can only be accessed once the lock is held. |
64 | /// // Our non-atomic increment is safe because we're the only thread |
65 | /// // which can access the shared state when the lock is held. |
66 | /// let mut data = data.lock(); |
67 | /// *data += 1; |
68 | /// if *data == N { |
69 | /// tx.send(()).unwrap(); |
70 | /// } |
71 | /// // the lock is unlocked here when `data` goes out of scope. |
72 | /// }); |
73 | /// } |
74 | /// |
75 | /// rx.recv().unwrap(); |
76 | /// ``` |
77 | pub type FairMutex<T> = lock_api::Mutex<RawFairMutex, T>; |
78 | |
79 | /// Creates a new fair mutex in an unlocked state ready for use. |
80 | /// |
81 | /// This allows creating a fair mutex in a constant context on stable Rust. |
82 | pub const fn const_fair_mutex<T>(val: T) -> FairMutex<T> { |
83 | FairMutex::const_new(<RawFairMutex as lock_api::RawMutex>::INIT, val) |
84 | } |
85 | |
86 | /// An RAII implementation of a "scoped lock" of a mutex. When this structure is |
87 | /// dropped (falls out of scope), the lock will be unlocked. |
88 | /// |
89 | /// The data protected by the mutex can be accessed through this guard via its |
90 | /// `Deref` and `DerefMut` implementations. |
91 | pub type FairMutexGuard<'a, T> = lock_api::MutexGuard<'a, RawFairMutex, T>; |
92 | |
93 | /// An RAII mutex guard returned by `FairMutexGuard::map`, which can point to a |
94 | /// subfield of the protected data. |
95 | /// |
96 | /// The main difference between `MappedFairMutexGuard` and `FairMutexGuard` is that the |
97 | /// former doesn't support temporarily unlocking and re-locking, since that |
98 | /// could introduce soundness issues if the locked object is modified by another |
99 | /// thread. |
100 | pub type MappedFairMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawFairMutex, T>; |
101 | |
102 | #[cfg (test)] |
103 | mod tests { |
104 | use crate::FairMutex; |
105 | use std::sync::atomic::{AtomicUsize, Ordering}; |
106 | use std::sync::mpsc::channel; |
107 | use std::sync::Arc; |
108 | use std::thread; |
109 | |
110 | #[cfg (feature = "serde" )] |
111 | use bincode::{deserialize, serialize}; |
112 | |
113 | #[derive (Eq, PartialEq, Debug)] |
114 | struct NonCopy(i32); |
115 | |
116 | #[test ] |
117 | fn smoke() { |
118 | let m = FairMutex::new(()); |
119 | drop(m.lock()); |
120 | drop(m.lock()); |
121 | } |
122 | |
123 | #[test ] |
124 | fn lots_and_lots() { |
125 | const J: u32 = 1000; |
126 | const K: u32 = 3; |
127 | |
128 | let m = Arc::new(FairMutex::new(0)); |
129 | |
130 | fn inc(m: &FairMutex<u32>) { |
131 | for _ in 0..J { |
132 | *m.lock() += 1; |
133 | } |
134 | } |
135 | |
136 | let (tx, rx) = channel(); |
137 | for _ in 0..K { |
138 | let tx2 = tx.clone(); |
139 | let m2 = m.clone(); |
140 | thread::spawn(move || { |
141 | inc(&m2); |
142 | tx2.send(()).unwrap(); |
143 | }); |
144 | let tx2 = tx.clone(); |
145 | let m2 = m.clone(); |
146 | thread::spawn(move || { |
147 | inc(&m2); |
148 | tx2.send(()).unwrap(); |
149 | }); |
150 | } |
151 | |
152 | drop(tx); |
153 | for _ in 0..2 * K { |
154 | rx.recv().unwrap(); |
155 | } |
156 | assert_eq!(*m.lock(), J * K * 2); |
157 | } |
158 | |
159 | #[test ] |
160 | fn try_lock() { |
161 | let m = FairMutex::new(()); |
162 | *m.try_lock().unwrap() = (); |
163 | } |
164 | |
165 | #[test ] |
166 | fn test_into_inner() { |
167 | let m = FairMutex::new(NonCopy(10)); |
168 | assert_eq!(m.into_inner(), NonCopy(10)); |
169 | } |
170 | |
171 | #[test ] |
172 | fn test_into_inner_drop() { |
173 | struct Foo(Arc<AtomicUsize>); |
174 | impl Drop for Foo { |
175 | fn drop(&mut self) { |
176 | self.0.fetch_add(1, Ordering::SeqCst); |
177 | } |
178 | } |
179 | let num_drops = Arc::new(AtomicUsize::new(0)); |
180 | let m = FairMutex::new(Foo(num_drops.clone())); |
181 | assert_eq!(num_drops.load(Ordering::SeqCst), 0); |
182 | { |
183 | let _inner = m.into_inner(); |
184 | assert_eq!(num_drops.load(Ordering::SeqCst), 0); |
185 | } |
186 | assert_eq!(num_drops.load(Ordering::SeqCst), 1); |
187 | } |
188 | |
189 | #[test ] |
190 | fn test_get_mut() { |
191 | let mut m = FairMutex::new(NonCopy(10)); |
192 | *m.get_mut() = NonCopy(20); |
193 | assert_eq!(m.into_inner(), NonCopy(20)); |
194 | } |
195 | |
196 | #[test ] |
197 | fn test_mutex_arc_nested() { |
198 | // Tests nested mutexes and access |
199 | // to underlying data. |
200 | let arc = Arc::new(FairMutex::new(1)); |
201 | let arc2 = Arc::new(FairMutex::new(arc)); |
202 | let (tx, rx) = channel(); |
203 | let _t = thread::spawn(move || { |
204 | let lock = arc2.lock(); |
205 | let lock2 = lock.lock(); |
206 | assert_eq!(*lock2, 1); |
207 | tx.send(()).unwrap(); |
208 | }); |
209 | rx.recv().unwrap(); |
210 | } |
211 | |
212 | #[test ] |
213 | fn test_mutex_arc_access_in_unwind() { |
214 | let arc = Arc::new(FairMutex::new(1)); |
215 | let arc2 = arc.clone(); |
216 | let _ = thread::spawn(move || { |
217 | struct Unwinder { |
218 | i: Arc<FairMutex<i32>>, |
219 | } |
220 | impl Drop for Unwinder { |
221 | fn drop(&mut self) { |
222 | *self.i.lock() += 1; |
223 | } |
224 | } |
225 | let _u = Unwinder { i: arc2 }; |
226 | panic!(); |
227 | }) |
228 | .join(); |
229 | let lock = arc.lock(); |
230 | assert_eq!(*lock, 2); |
231 | } |
232 | |
233 | #[test ] |
234 | fn test_mutex_unsized() { |
235 | let mutex: &FairMutex<[i32]> = &FairMutex::new([1, 2, 3]); |
236 | { |
237 | let b = &mut *mutex.lock(); |
238 | b[0] = 4; |
239 | b[2] = 5; |
240 | } |
241 | let comp: &[i32] = &[4, 2, 5]; |
242 | assert_eq!(&*mutex.lock(), comp); |
243 | } |
244 | |
245 | #[test ] |
246 | fn test_mutexguard_sync() { |
247 | fn sync<T: Sync>(_: T) {} |
248 | |
249 | let mutex = FairMutex::new(()); |
250 | sync(mutex.lock()); |
251 | } |
252 | |
253 | #[test ] |
254 | fn test_mutex_debug() { |
255 | let mutex = FairMutex::new(vec![0u8, 10]); |
256 | |
257 | assert_eq!(format!("{:?}" , mutex), "Mutex { data: [0, 10] }" ); |
258 | let _lock = mutex.lock(); |
259 | assert_eq!(format!("{:?}" , mutex), "Mutex { data: <locked> }" ); |
260 | } |
261 | |
262 | #[cfg (feature = "serde" )] |
263 | #[test ] |
264 | fn test_serde() { |
265 | let contents: Vec<u8> = vec![0, 1, 2]; |
266 | let mutex = FairMutex::new(contents.clone()); |
267 | |
268 | let serialized = serialize(&mutex).unwrap(); |
269 | let deserialized: FairMutex<Vec<u8>> = deserialize(&serialized).unwrap(); |
270 | |
271 | assert_eq!(*(mutex.lock()), *(deserialized.lock())); |
272 | assert_eq!(contents, *(deserialized.lock())); |
273 | } |
274 | } |
275 | |