1 | // Copyright 2016 Amanieu d'Antras |
2 | // |
3 | // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or |
4 | // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or |
5 | // http://opensource.org/licenses/MIT>, at your option. This file may not be |
6 | // copied, modified, or distributed except according to those terms. |
7 | |
8 | use crate::raw_rwlock::RawRwLock; |
9 | use lock_api; |
10 | |
11 | /// A reader-writer lock |
12 | /// |
13 | /// This type of lock allows a number of readers or at most one writer at any |
14 | /// point in time. The write portion of this lock typically allows modification |
15 | /// of the underlying data (exclusive access) and the read portion of this lock |
16 | /// typically allows for read-only access (shared access). |
17 | /// |
18 | /// This lock uses a task-fair locking policy which avoids both reader and |
19 | /// writer starvation. This means that readers trying to acquire the lock will |
20 | /// block even if the lock is unlocked when there are writers waiting to acquire |
21 | /// the lock. Because of this, attempts to recursively acquire a read lock |
22 | /// within a single thread may result in a deadlock. |
23 | /// |
24 | /// The type parameter `T` represents the data that this lock protects. It is |
25 | /// required that `T` satisfies `Send` to be shared across threads and `Sync` to |
26 | /// allow concurrent access through readers. The RAII guards returned from the |
27 | /// locking methods implement `Deref` (and `DerefMut` for the `write` methods) |
28 | /// to allow access to the contained of the lock. |
29 | /// |
30 | /// # Fairness |
31 | /// |
32 | /// A typical unfair lock can often end up in a situation where a single thread |
33 | /// quickly acquires and releases the same lock in succession, which can starve |
34 | /// other threads waiting to acquire the rwlock. While this improves throughput |
35 | /// because it doesn't force a context switch when a thread tries to re-acquire |
36 | /// a rwlock it has just released, this can starve other threads. |
37 | /// |
38 | /// This rwlock uses [eventual fairness](https://trac.webkit.org/changeset/203350) |
39 | /// to ensure that the lock will be fair on average without sacrificing |
40 | /// throughput. This is done by forcing a fair unlock on average every 0.5ms, |
41 | /// which will force the lock to go to the next thread waiting for the rwlock. |
42 | /// |
43 | /// Additionally, any critical section longer than 1ms will always use a fair |
44 | /// unlock, which has a negligible impact on throughput considering the length |
45 | /// of the critical section. |
46 | /// |
47 | /// You can also force a fair unlock by calling `RwLockReadGuard::unlock_fair` |
48 | /// or `RwLockWriteGuard::unlock_fair` when unlocking a mutex instead of simply |
49 | /// dropping the guard. |
50 | /// |
51 | /// # Differences from the standard library `RwLock` |
52 | /// |
53 | /// - Supports atomically downgrading a write lock into a read lock. |
54 | /// - Task-fair locking policy instead of an unspecified platform default. |
55 | /// - No poisoning, the lock is released normally on panic. |
56 | /// - Only requires 1 word of space, whereas the standard library boxes the |
57 | /// `RwLock` due to platform limitations. |
58 | /// - Can be statically constructed. |
59 | /// - Does not require any drop glue when dropped. |
60 | /// - Inline fast path for the uncontended case. |
61 | /// - Efficient handling of micro-contention using adaptive spinning. |
62 | /// - Allows raw locking & unlocking without a guard. |
63 | /// - Supports eventual fairness so that the rwlock is fair on average. |
64 | /// - Optionally allows making the rwlock fair by calling |
65 | /// `RwLockReadGuard::unlock_fair` and `RwLockWriteGuard::unlock_fair`. |
66 | /// |
67 | /// # Examples |
68 | /// |
69 | /// ``` |
70 | /// use parking_lot::RwLock; |
71 | /// |
72 | /// let lock = RwLock::new(5); |
73 | /// |
74 | /// // many reader locks can be held at once |
75 | /// { |
76 | /// let r1 = lock.read(); |
77 | /// let r2 = lock.read(); |
78 | /// assert_eq!(*r1, 5); |
79 | /// assert_eq!(*r2, 5); |
80 | /// } // read locks are dropped at this point |
81 | /// |
82 | /// // only one write lock may be held, however |
83 | /// { |
84 | /// let mut w = lock.write(); |
85 | /// *w += 1; |
86 | /// assert_eq!(*w, 6); |
87 | /// } // write lock is dropped here |
88 | /// ``` |
89 | pub type RwLock<T> = lock_api::RwLock<RawRwLock, T>; |
90 | |
91 | /// Creates a new instance of an `RwLock<T>` which is unlocked. |
92 | /// |
93 | /// This allows creating a `RwLock<T>` in a constant context on stable Rust. |
94 | pub const fn const_rwlock<T>(val: T) -> RwLock<T> { |
95 | RwLock::const_new(<RawRwLock as lock_api::RawRwLock>::INIT, val) |
96 | } |
97 | |
98 | /// RAII structure used to release the shared read access of a lock when |
99 | /// dropped. |
100 | pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, RawRwLock, T>; |
101 | |
102 | /// RAII structure used to release the exclusive write access of a lock when |
103 | /// dropped. |
104 | pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, RawRwLock, T>; |
105 | |
106 | /// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a |
107 | /// subfield of the protected data. |
108 | /// |
109 | /// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the |
110 | /// former doesn't support temporarily unlocking and re-locking, since that |
111 | /// could introduce soundness issues if the locked object is modified by another |
112 | /// thread. |
113 | pub type MappedRwLockReadGuard<'a, T> = lock_api::MappedRwLockReadGuard<'a, RawRwLock, T>; |
114 | |
115 | /// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a |
116 | /// subfield of the protected data. |
117 | /// |
118 | /// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the |
119 | /// former doesn't support temporarily unlocking and re-locking, since that |
120 | /// could introduce soundness issues if the locked object is modified by another |
121 | /// thread. |
122 | pub type MappedRwLockWriteGuard<'a, T> = lock_api::MappedRwLockWriteGuard<'a, RawRwLock, T>; |
123 | |
124 | /// RAII structure used to release the upgradable read access of a lock when |
125 | /// dropped. |
126 | pub type RwLockUpgradableReadGuard<'a, T> = lock_api::RwLockUpgradableReadGuard<'a, RawRwLock, T>; |
127 | |
128 | #[cfg (test)] |
129 | mod tests { |
130 | use crate::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; |
131 | use rand::Rng; |
132 | use std::sync::atomic::{AtomicUsize, Ordering}; |
133 | use std::sync::mpsc::channel; |
134 | use std::sync::Arc; |
135 | use std::thread; |
136 | use std::time::Duration; |
137 | |
138 | #[cfg (feature = "serde" )] |
139 | use bincode::{deserialize, serialize}; |
140 | |
141 | #[derive(Eq, PartialEq, Debug)] |
142 | struct NonCopy(i32); |
143 | |
144 | #[test] |
145 | fn smoke() { |
146 | let l = RwLock::new(()); |
147 | drop(l.read()); |
148 | drop(l.write()); |
149 | drop(l.upgradable_read()); |
150 | drop((l.read(), l.read())); |
151 | drop((l.read(), l.upgradable_read())); |
152 | drop(l.write()); |
153 | } |
154 | |
155 | #[test] |
156 | fn frob() { |
157 | const N: u32 = 10; |
158 | const M: u32 = 1000; |
159 | |
160 | let r = Arc::new(RwLock::new(())); |
161 | |
162 | let (tx, rx) = channel::<()>(); |
163 | for _ in 0..N { |
164 | let tx = tx.clone(); |
165 | let r = r.clone(); |
166 | thread::spawn(move || { |
167 | let mut rng = rand::thread_rng(); |
168 | for _ in 0..M { |
169 | if rng.gen_bool(1.0 / N as f64) { |
170 | drop(r.write()); |
171 | } else { |
172 | drop(r.read()); |
173 | } |
174 | } |
175 | drop(tx); |
176 | }); |
177 | } |
178 | drop(tx); |
179 | let _ = rx.recv(); |
180 | } |
181 | |
182 | #[test] |
183 | fn test_rw_arc_no_poison_wr() { |
184 | let arc = Arc::new(RwLock::new(1)); |
185 | let arc2 = arc.clone(); |
186 | let _: Result<(), _> = thread::spawn(move || { |
187 | let _lock = arc2.write(); |
188 | panic!(); |
189 | }) |
190 | .join(); |
191 | let lock = arc.read(); |
192 | assert_eq!(*lock, 1); |
193 | } |
194 | |
195 | #[test] |
196 | fn test_rw_arc_no_poison_ww() { |
197 | let arc = Arc::new(RwLock::new(1)); |
198 | let arc2 = arc.clone(); |
199 | let _: Result<(), _> = thread::spawn(move || { |
200 | let _lock = arc2.write(); |
201 | panic!(); |
202 | }) |
203 | .join(); |
204 | let lock = arc.write(); |
205 | assert_eq!(*lock, 1); |
206 | } |
207 | |
208 | #[test] |
209 | fn test_rw_arc_no_poison_rr() { |
210 | let arc = Arc::new(RwLock::new(1)); |
211 | let arc2 = arc.clone(); |
212 | let _: Result<(), _> = thread::spawn(move || { |
213 | let _lock = arc2.read(); |
214 | panic!(); |
215 | }) |
216 | .join(); |
217 | let lock = arc.read(); |
218 | assert_eq!(*lock, 1); |
219 | } |
220 | |
221 | #[test] |
222 | fn test_rw_arc_no_poison_rw() { |
223 | let arc = Arc::new(RwLock::new(1)); |
224 | let arc2 = arc.clone(); |
225 | let _: Result<(), _> = thread::spawn(move || { |
226 | let _lock = arc2.read(); |
227 | panic!() |
228 | }) |
229 | .join(); |
230 | let lock = arc.write(); |
231 | assert_eq!(*lock, 1); |
232 | } |
233 | |
234 | #[test] |
235 | fn test_ruw_arc() { |
236 | let arc = Arc::new(RwLock::new(0)); |
237 | let arc2 = arc.clone(); |
238 | let (tx, rx) = channel(); |
239 | |
240 | thread::spawn(move || { |
241 | for _ in 0..10 { |
242 | let mut lock = arc2.write(); |
243 | let tmp = *lock; |
244 | *lock = -1; |
245 | thread::yield_now(); |
246 | *lock = tmp + 1; |
247 | } |
248 | tx.send(()).unwrap(); |
249 | }); |
250 | |
251 | let mut children = Vec::new(); |
252 | |
253 | // Upgradable readers try to catch the writer in the act and also |
254 | // try to touch the value |
255 | for _ in 0..5 { |
256 | let arc3 = arc.clone(); |
257 | children.push(thread::spawn(move || { |
258 | let lock = arc3.upgradable_read(); |
259 | let tmp = *lock; |
260 | assert!(tmp >= 0); |
261 | thread::yield_now(); |
262 | let mut lock = RwLockUpgradableReadGuard::upgrade(lock); |
263 | assert_eq!(tmp, *lock); |
264 | *lock = -1; |
265 | thread::yield_now(); |
266 | *lock = tmp + 1; |
267 | })); |
268 | } |
269 | |
270 | // Readers try to catch the writers in the act |
271 | for _ in 0..5 { |
272 | let arc4 = arc.clone(); |
273 | children.push(thread::spawn(move || { |
274 | let lock = arc4.read(); |
275 | assert!(*lock >= 0); |
276 | })); |
277 | } |
278 | |
279 | // Wait for children to pass their asserts |
280 | for r in children { |
281 | assert!(r.join().is_ok()); |
282 | } |
283 | |
284 | // Wait for writer to finish |
285 | rx.recv().unwrap(); |
286 | let lock = arc.read(); |
287 | assert_eq!(*lock, 15); |
288 | } |
289 | |
290 | #[test] |
291 | fn test_rw_arc() { |
292 | let arc = Arc::new(RwLock::new(0)); |
293 | let arc2 = arc.clone(); |
294 | let (tx, rx) = channel(); |
295 | |
296 | thread::spawn(move || { |
297 | let mut lock = arc2.write(); |
298 | for _ in 0..10 { |
299 | let tmp = *lock; |
300 | *lock = -1; |
301 | thread::yield_now(); |
302 | *lock = tmp + 1; |
303 | } |
304 | tx.send(()).unwrap(); |
305 | }); |
306 | |
307 | // Readers try to catch the writer in the act |
308 | let mut children = Vec::new(); |
309 | for _ in 0..5 { |
310 | let arc3 = arc.clone(); |
311 | children.push(thread::spawn(move || { |
312 | let lock = arc3.read(); |
313 | assert!(*lock >= 0); |
314 | })); |
315 | } |
316 | |
317 | // Wait for children to pass their asserts |
318 | for r in children { |
319 | assert!(r.join().is_ok()); |
320 | } |
321 | |
322 | // Wait for writer to finish |
323 | rx.recv().unwrap(); |
324 | let lock = arc.read(); |
325 | assert_eq!(*lock, 10); |
326 | } |
327 | |
328 | #[test] |
329 | fn test_rw_arc_access_in_unwind() { |
330 | let arc = Arc::new(RwLock::new(1)); |
331 | let arc2 = arc.clone(); |
332 | let _ = thread::spawn(move || { |
333 | struct Unwinder { |
334 | i: Arc<RwLock<isize>>, |
335 | } |
336 | impl Drop for Unwinder { |
337 | fn drop(&mut self) { |
338 | let mut lock = self.i.write(); |
339 | *lock += 1; |
340 | } |
341 | } |
342 | let _u = Unwinder { i: arc2 }; |
343 | panic!(); |
344 | }) |
345 | .join(); |
346 | let lock = arc.read(); |
347 | assert_eq!(*lock, 2); |
348 | } |
349 | |
350 | #[test] |
351 | fn test_rwlock_unsized() { |
352 | let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]); |
353 | { |
354 | let b = &mut *rw.write(); |
355 | b[0] = 4; |
356 | b[2] = 5; |
357 | } |
358 | let comp: &[i32] = &[4, 2, 5]; |
359 | assert_eq!(&*rw.read(), comp); |
360 | } |
361 | |
362 | #[test] |
363 | fn test_rwlock_try_read() { |
364 | let lock = RwLock::new(0isize); |
365 | { |
366 | let read_guard = lock.read(); |
367 | |
368 | let read_result = lock.try_read(); |
369 | assert!( |
370 | read_result.is_some(), |
371 | "try_read should succeed while read_guard is in scope" |
372 | ); |
373 | |
374 | drop(read_guard); |
375 | } |
376 | { |
377 | let upgrade_guard = lock.upgradable_read(); |
378 | |
379 | let read_result = lock.try_read(); |
380 | assert!( |
381 | read_result.is_some(), |
382 | "try_read should succeed while upgrade_guard is in scope" |
383 | ); |
384 | |
385 | drop(upgrade_guard); |
386 | } |
387 | { |
388 | let write_guard = lock.write(); |
389 | |
390 | let read_result = lock.try_read(); |
391 | assert!( |
392 | read_result.is_none(), |
393 | "try_read should fail while write_guard is in scope" |
394 | ); |
395 | |
396 | drop(write_guard); |
397 | } |
398 | } |
399 | |
400 | #[test] |
401 | fn test_rwlock_try_write() { |
402 | let lock = RwLock::new(0isize); |
403 | { |
404 | let read_guard = lock.read(); |
405 | |
406 | let write_result = lock.try_write(); |
407 | assert!( |
408 | write_result.is_none(), |
409 | "try_write should fail while read_guard is in scope" |
410 | ); |
411 | assert!(lock.is_locked()); |
412 | assert!(!lock.is_locked_exclusive()); |
413 | |
414 | drop(read_guard); |
415 | } |
416 | { |
417 | let upgrade_guard = lock.upgradable_read(); |
418 | |
419 | let write_result = lock.try_write(); |
420 | assert!( |
421 | write_result.is_none(), |
422 | "try_write should fail while upgrade_guard is in scope" |
423 | ); |
424 | assert!(lock.is_locked()); |
425 | assert!(!lock.is_locked_exclusive()); |
426 | |
427 | drop(upgrade_guard); |
428 | } |
429 | { |
430 | let write_guard = lock.write(); |
431 | |
432 | let write_result = lock.try_write(); |
433 | assert!( |
434 | write_result.is_none(), |
435 | "try_write should fail while write_guard is in scope" |
436 | ); |
437 | assert!(lock.is_locked()); |
438 | assert!(lock.is_locked_exclusive()); |
439 | |
440 | drop(write_guard); |
441 | } |
442 | } |
443 | |
444 | #[test] |
445 | fn test_rwlock_try_upgrade() { |
446 | let lock = RwLock::new(0isize); |
447 | { |
448 | let read_guard = lock.read(); |
449 | |
450 | let upgrade_result = lock.try_upgradable_read(); |
451 | assert!( |
452 | upgrade_result.is_some(), |
453 | "try_upgradable_read should succeed while read_guard is in scope" |
454 | ); |
455 | |
456 | drop(read_guard); |
457 | } |
458 | { |
459 | let upgrade_guard = lock.upgradable_read(); |
460 | |
461 | let upgrade_result = lock.try_upgradable_read(); |
462 | assert!( |
463 | upgrade_result.is_none(), |
464 | "try_upgradable_read should fail while upgrade_guard is in scope" |
465 | ); |
466 | |
467 | drop(upgrade_guard); |
468 | } |
469 | { |
470 | let write_guard = lock.write(); |
471 | |
472 | let upgrade_result = lock.try_upgradable_read(); |
473 | assert!( |
474 | upgrade_result.is_none(), |
475 | "try_upgradable should fail while write_guard is in scope" |
476 | ); |
477 | |
478 | drop(write_guard); |
479 | } |
480 | } |
481 | |
482 | #[test] |
483 | fn test_into_inner() { |
484 | let m = RwLock::new(NonCopy(10)); |
485 | assert_eq!(m.into_inner(), NonCopy(10)); |
486 | } |
487 | |
488 | #[test] |
489 | fn test_into_inner_drop() { |
490 | struct Foo(Arc<AtomicUsize>); |
491 | impl Drop for Foo { |
492 | fn drop(&mut self) { |
493 | self.0.fetch_add(1, Ordering::SeqCst); |
494 | } |
495 | } |
496 | let num_drops = Arc::new(AtomicUsize::new(0)); |
497 | let m = RwLock::new(Foo(num_drops.clone())); |
498 | assert_eq!(num_drops.load(Ordering::SeqCst), 0); |
499 | { |
500 | let _inner = m.into_inner(); |
501 | assert_eq!(num_drops.load(Ordering::SeqCst), 0); |
502 | } |
503 | assert_eq!(num_drops.load(Ordering::SeqCst), 1); |
504 | } |
505 | |
506 | #[test] |
507 | fn test_get_mut() { |
508 | let mut m = RwLock::new(NonCopy(10)); |
509 | *m.get_mut() = NonCopy(20); |
510 | assert_eq!(m.into_inner(), NonCopy(20)); |
511 | } |
512 | |
513 | #[test] |
514 | fn test_rwlockguard_sync() { |
515 | fn sync<T: Sync>(_: T) {} |
516 | |
517 | let rwlock = RwLock::new(()); |
518 | sync(rwlock.read()); |
519 | sync(rwlock.write()); |
520 | } |
521 | |
522 | #[test] |
523 | fn test_rwlock_downgrade() { |
524 | let x = Arc::new(RwLock::new(0)); |
525 | let mut handles = Vec::new(); |
526 | for _ in 0..8 { |
527 | let x = x.clone(); |
528 | handles.push(thread::spawn(move || { |
529 | for _ in 0..100 { |
530 | let mut writer = x.write(); |
531 | *writer += 1; |
532 | let cur_val = *writer; |
533 | let reader = RwLockWriteGuard::downgrade(writer); |
534 | assert_eq!(cur_val, *reader); |
535 | } |
536 | })); |
537 | } |
538 | for handle in handles { |
539 | handle.join().unwrap() |
540 | } |
541 | assert_eq!(*x.read(), 800); |
542 | } |
543 | |
544 | #[test] |
545 | fn test_rwlock_recursive() { |
546 | let arc = Arc::new(RwLock::new(1)); |
547 | let arc2 = arc.clone(); |
548 | let lock1 = arc.read(); |
549 | let t = thread::spawn(move || { |
550 | let _lock = arc2.write(); |
551 | }); |
552 | |
553 | if cfg!(not(all(target_env = "sgx" , target_vendor = "fortanix" ))) { |
554 | thread::sleep(Duration::from_millis(100)); |
555 | } else { |
556 | // FIXME: https://github.com/fortanix/rust-sgx/issues/31 |
557 | for _ in 0..100 { |
558 | thread::yield_now(); |
559 | } |
560 | } |
561 | |
562 | // A normal read would block here since there is a pending writer |
563 | let lock2 = arc.read_recursive(); |
564 | |
565 | // Unblock the thread and join it. |
566 | drop(lock1); |
567 | drop(lock2); |
568 | t.join().unwrap(); |
569 | } |
570 | |
571 | #[test] |
572 | fn test_rwlock_debug() { |
573 | let x = RwLock::new(vec![0u8, 10]); |
574 | |
575 | assert_eq!(format!("{:?}" , x), "RwLock { data: [0, 10] }" ); |
576 | let _lock = x.write(); |
577 | assert_eq!(format!("{:?}" , x), "RwLock { data: <locked> }" ); |
578 | } |
579 | |
580 | #[test] |
581 | fn test_clone() { |
582 | let rwlock = RwLock::new(Arc::new(1)); |
583 | let a = rwlock.read_recursive(); |
584 | let b = a.clone(); |
585 | assert_eq!(Arc::strong_count(&b), 2); |
586 | } |
587 | |
588 | #[cfg (feature = "serde" )] |
589 | #[test] |
590 | fn test_serde() { |
591 | let contents: Vec<u8> = vec![0, 1, 2]; |
592 | let mutex = RwLock::new(contents.clone()); |
593 | |
594 | let serialized = serialize(&mutex).unwrap(); |
595 | let deserialized: RwLock<Vec<u8>> = deserialize(&serialized).unwrap(); |
596 | |
597 | assert_eq!(*(mutex.read()), *(deserialized.read())); |
598 | assert_eq!(contents, *(deserialized.read())); |
599 | } |
600 | |
601 | #[test] |
602 | fn test_issue_203() { |
603 | struct Bar(RwLock<()>); |
604 | |
605 | impl Drop for Bar { |
606 | fn drop(&mut self) { |
607 | let _n = self.0.write(); |
608 | } |
609 | } |
610 | |
611 | thread_local! { |
612 | static B: Bar = Bar(RwLock::new(())); |
613 | } |
614 | |
615 | thread::spawn(|| { |
616 | B.with(|_| ()); |
617 | |
618 | let a = RwLock::new(()); |
619 | let _a = a.read(); |
620 | }) |
621 | .join() |
622 | .unwrap(); |
623 | } |
624 | |
625 | #[test] |
626 | fn test_rw_write_is_locked() { |
627 | let lock = RwLock::new(0isize); |
628 | { |
629 | let _read_guard = lock.read(); |
630 | |
631 | assert!(lock.is_locked()); |
632 | assert!(!lock.is_locked_exclusive()); |
633 | } |
634 | |
635 | { |
636 | let _write_guard = lock.write(); |
637 | |
638 | assert!(lock.is_locked()); |
639 | assert!(lock.is_locked_exclusive()); |
640 | } |
641 | } |
642 | } |
643 | |