1 | // Copyright 2016 Amanieu d'Antras |
2 | // |
3 | // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or |
4 | // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or |
5 | // http://opensource.org/licenses/MIT>, at your option. This file may not be |
6 | // copied, modified, or distributed except according to those terms. |
7 | |
8 | use crate::raw_fair_mutex::RawFairMutex; |
9 | use lock_api; |
10 | |
11 | /// A mutual exclusive primitive that is always fair, useful for protecting shared data |
12 | /// |
13 | /// This mutex will block threads waiting for the lock to become available. The |
14 | /// mutex can be statically initialized or created by the `new` |
15 | /// constructor. Each mutex has a type parameter which represents the data that |
16 | /// it is protecting. The data can only be accessed through the RAII guards |
17 | /// returned from `lock` and `try_lock`, which guarantees that the data is only |
18 | /// ever accessed when the mutex is locked. |
19 | /// |
20 | /// The regular mutex provided by `parking_lot` uses eventual fairness |
21 | /// (after some time it will default to the fair algorithm), but eventual |
22 | /// fairness does not provide the same guarantees an always fair method would. |
23 | /// Fair mutexes are generally slower, but sometimes needed. |
24 | /// |
25 | /// In a fair mutex the waiters form a queue, and the lock is always granted to |
26 | /// the next requester in the queue, in first-in first-out order. This ensures |
27 | /// that one thread cannot starve others by quickly re-acquiring the lock after |
28 | /// releasing it. |
29 | /// |
30 | /// A fair mutex may not be interesting if threads have different priorities (this is known as |
31 | /// priority inversion). |
32 | /// |
33 | /// # Differences from the standard library `Mutex` |
34 | /// |
35 | /// - No poisoning, the lock is released normally on panic. |
36 | /// - Only requires 1 byte of space, whereas the standard library boxes the |
37 | /// `FairMutex` due to platform limitations. |
38 | /// - Can be statically constructed. |
39 | /// - Does not require any drop glue when dropped. |
40 | /// - Inline fast path for the uncontended case. |
41 | /// - Efficient handling of micro-contention using adaptive spinning. |
42 | /// - Allows raw locking & unlocking without a guard. |
43 | /// |
44 | /// # Examples |
45 | /// |
46 | /// ``` |
47 | /// use parking_lot::FairMutex; |
48 | /// use std::sync::{Arc, mpsc::channel}; |
49 | /// use std::thread; |
50 | /// |
51 | /// const N: usize = 10; |
52 | /// |
53 | /// // Spawn a few threads to increment a shared variable (non-atomically), and |
54 | /// // let the main thread know once all increments are done. |
55 | /// // |
56 | /// // Here we're using an Arc to share memory among threads, and the data inside |
57 | /// // the Arc is protected with a mutex. |
58 | /// let data = Arc::new(FairMutex::new(0)); |
59 | /// |
60 | /// let (tx, rx) = channel(); |
61 | /// for _ in 0..10 { |
62 | /// let (data, tx) = (Arc::clone(&data), tx.clone()); |
63 | /// thread::spawn(move || { |
64 | /// // The shared state can only be accessed once the lock is held. |
65 | /// // Our non-atomic increment is safe because we're the only thread |
66 | /// // which can access the shared state when the lock is held. |
67 | /// let mut data = data.lock(); |
68 | /// *data += 1; |
69 | /// if *data == N { |
70 | /// tx.send(()).unwrap(); |
71 | /// } |
72 | /// // the lock is unlocked here when `data` goes out of scope. |
73 | /// }); |
74 | /// } |
75 | /// |
76 | /// rx.recv().unwrap(); |
77 | /// ``` |
78 | pub type FairMutex<T> = lock_api::Mutex<RawFairMutex, T>; |
79 | |
80 | /// Creates a new fair mutex in an unlocked state ready for use. |
81 | /// |
82 | /// This allows creating a fair mutex in a constant context on stable Rust. |
83 | pub const fn const_fair_mutex<T>(val: T) -> FairMutex<T> { |
84 | FairMutex::const_new(<RawFairMutex as lock_api::RawMutex>::INIT, val) |
85 | } |
86 | |
87 | /// An RAII implementation of a "scoped lock" of a mutex. When this structure is |
88 | /// dropped (falls out of scope), the lock will be unlocked. |
89 | /// |
90 | /// The data protected by the mutex can be accessed through this guard via its |
91 | /// `Deref` and `DerefMut` implementations. |
92 | pub type FairMutexGuard<'a, T> = lock_api::MutexGuard<'a, RawFairMutex, T>; |
93 | |
94 | /// An RAII mutex guard returned by `FairMutexGuard::map`, which can point to a |
95 | /// subfield of the protected data. |
96 | /// |
97 | /// The main difference between `MappedFairMutexGuard` and `FairMutexGuard` is that the |
98 | /// former doesn't support temporarily unlocking and re-locking, since that |
99 | /// could introduce soundness issues if the locked object is modified by another |
100 | /// thread. |
101 | pub type MappedFairMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawFairMutex, T>; |
102 | |
103 | #[cfg (test)] |
104 | mod tests { |
105 | use crate::FairMutex; |
106 | use std::sync::atomic::{AtomicUsize, Ordering}; |
107 | use std::sync::mpsc::channel; |
108 | use std::sync::Arc; |
109 | use std::thread; |
110 | |
111 | #[cfg (feature = "serde" )] |
112 | use bincode::{deserialize, serialize}; |
113 | |
114 | #[derive(Eq, PartialEq, Debug)] |
115 | struct NonCopy(i32); |
116 | |
117 | #[test] |
118 | fn smoke() { |
119 | let m = FairMutex::new(()); |
120 | drop(m.lock()); |
121 | drop(m.lock()); |
122 | } |
123 | |
124 | #[test] |
125 | fn lots_and_lots() { |
126 | const J: u32 = 1000; |
127 | const K: u32 = 3; |
128 | |
129 | let m = Arc::new(FairMutex::new(0)); |
130 | |
131 | fn inc(m: &FairMutex<u32>) { |
132 | for _ in 0..J { |
133 | *m.lock() += 1; |
134 | } |
135 | } |
136 | |
137 | let (tx, rx) = channel(); |
138 | for _ in 0..K { |
139 | let tx2 = tx.clone(); |
140 | let m2 = m.clone(); |
141 | thread::spawn(move || { |
142 | inc(&m2); |
143 | tx2.send(()).unwrap(); |
144 | }); |
145 | let tx2 = tx.clone(); |
146 | let m2 = m.clone(); |
147 | thread::spawn(move || { |
148 | inc(&m2); |
149 | tx2.send(()).unwrap(); |
150 | }); |
151 | } |
152 | |
153 | drop(tx); |
154 | for _ in 0..2 * K { |
155 | rx.recv().unwrap(); |
156 | } |
157 | assert_eq!(*m.lock(), J * K * 2); |
158 | } |
159 | |
160 | #[test] |
161 | fn try_lock() { |
162 | let m = FairMutex::new(()); |
163 | *m.try_lock().unwrap() = (); |
164 | } |
165 | |
166 | #[test] |
167 | fn test_into_inner() { |
168 | let m = FairMutex::new(NonCopy(10)); |
169 | assert_eq!(m.into_inner(), NonCopy(10)); |
170 | } |
171 | |
172 | #[test] |
173 | fn test_into_inner_drop() { |
174 | struct Foo(Arc<AtomicUsize>); |
175 | impl Drop for Foo { |
176 | fn drop(&mut self) { |
177 | self.0.fetch_add(1, Ordering::SeqCst); |
178 | } |
179 | } |
180 | let num_drops = Arc::new(AtomicUsize::new(0)); |
181 | let m = FairMutex::new(Foo(num_drops.clone())); |
182 | assert_eq!(num_drops.load(Ordering::SeqCst), 0); |
183 | { |
184 | let _inner = m.into_inner(); |
185 | assert_eq!(num_drops.load(Ordering::SeqCst), 0); |
186 | } |
187 | assert_eq!(num_drops.load(Ordering::SeqCst), 1); |
188 | } |
189 | |
190 | #[test] |
191 | fn test_get_mut() { |
192 | let mut m = FairMutex::new(NonCopy(10)); |
193 | *m.get_mut() = NonCopy(20); |
194 | assert_eq!(m.into_inner(), NonCopy(20)); |
195 | } |
196 | |
197 | #[test] |
198 | fn test_mutex_arc_nested() { |
199 | // Tests nested mutexes and access |
200 | // to underlying data. |
201 | let arc = Arc::new(FairMutex::new(1)); |
202 | let arc2 = Arc::new(FairMutex::new(arc)); |
203 | let (tx, rx) = channel(); |
204 | let _t = thread::spawn(move || { |
205 | let lock = arc2.lock(); |
206 | let lock2 = lock.lock(); |
207 | assert_eq!(*lock2, 1); |
208 | tx.send(()).unwrap(); |
209 | }); |
210 | rx.recv().unwrap(); |
211 | } |
212 | |
213 | #[test] |
214 | fn test_mutex_arc_access_in_unwind() { |
215 | let arc = Arc::new(FairMutex::new(1)); |
216 | let arc2 = arc.clone(); |
217 | let _ = thread::spawn(move || { |
218 | struct Unwinder { |
219 | i: Arc<FairMutex<i32>>, |
220 | } |
221 | impl Drop for Unwinder { |
222 | fn drop(&mut self) { |
223 | *self.i.lock() += 1; |
224 | } |
225 | } |
226 | let _u = Unwinder { i: arc2 }; |
227 | panic!(); |
228 | }) |
229 | .join(); |
230 | let lock = arc.lock(); |
231 | assert_eq!(*lock, 2); |
232 | } |
233 | |
234 | #[test] |
235 | fn test_mutex_unsized() { |
236 | let mutex: &FairMutex<[i32]> = &FairMutex::new([1, 2, 3]); |
237 | { |
238 | let b = &mut *mutex.lock(); |
239 | b[0] = 4; |
240 | b[2] = 5; |
241 | } |
242 | let comp: &[i32] = &[4, 2, 5]; |
243 | assert_eq!(&*mutex.lock(), comp); |
244 | } |
245 | |
246 | #[test] |
247 | fn test_mutexguard_sync() { |
248 | fn sync<T: Sync>(_: T) {} |
249 | |
250 | let mutex = FairMutex::new(()); |
251 | sync(mutex.lock()); |
252 | } |
253 | |
254 | #[test] |
255 | fn test_mutex_debug() { |
256 | let mutex = FairMutex::new(vec![0u8, 10]); |
257 | |
258 | assert_eq!(format!("{:?}" , mutex), "Mutex { data: [0, 10] }" ); |
259 | let _lock = mutex.lock(); |
260 | assert_eq!(format!("{:?}" , mutex), "Mutex { data: <locked> }" ); |
261 | } |
262 | |
263 | #[cfg (feature = "serde" )] |
264 | #[test] |
265 | fn test_serde() { |
266 | let contents: Vec<u8> = vec![0, 1, 2]; |
267 | let mutex = FairMutex::new(contents.clone()); |
268 | |
269 | let serialized = serialize(&mutex).unwrap(); |
270 | let deserialized: FairMutex<Vec<u8>> = deserialize(&serialized).unwrap(); |
271 | |
272 | assert_eq!(*(mutex.lock()), *(deserialized.lock())); |
273 | assert_eq!(contents, *(deserialized.lock())); |
274 | } |
275 | } |
276 | |