1 | //! A lock that provides data access to either one writer or many readers. |
2 | |
3 | use crate::{ |
4 | atomic::{AtomicUsize, Ordering}, |
5 | RelaxStrategy, Spin, |
6 | }; |
7 | use core::{ |
8 | cell::UnsafeCell, |
9 | fmt, |
10 | marker::PhantomData, |
11 | mem, |
12 | mem::ManuallyDrop, |
13 | ops::{Deref, DerefMut}, |
14 | }; |
15 | |
16 | /// A lock that provides data access to either one writer or many readers. |
17 | /// |
18 | /// This lock behaves in a similar manner to its namesake `std::sync::RwLock` but uses |
19 | /// spinning for synchronisation instead. Unlike its namespace, this lock does not |
20 | /// track lock poisoning. |
21 | /// |
22 | /// This type of lock allows a number of readers or at most one writer at any |
23 | /// point in time. The write portion of this lock typically allows modification |
24 | /// of the underlying data (exclusive access) and the read portion of this lock |
25 | /// typically allows for read-only access (shared access). |
26 | /// |
27 | /// The type parameter `T` represents the data that this lock protects. It is |
28 | /// required that `T` satisfies `Send` to be shared across tasks and `Sync` to |
29 | /// allow concurrent access through readers. The RAII guards returned from the |
30 | /// locking methods implement `Deref` (and `DerefMut` for the `write` methods) |
31 | /// to allow access to the contained of the lock. |
32 | /// |
33 | /// An [`RwLockUpgradableGuard`](RwLockUpgradableGuard) can be upgraded to a |
34 | /// writable guard through the [`RwLockUpgradableGuard::upgrade`](RwLockUpgradableGuard::upgrade) |
35 | /// [`RwLockUpgradableGuard::try_upgrade`](RwLockUpgradableGuard::try_upgrade) functions. |
36 | /// Writable or upgradeable guards can be downgraded through their respective `downgrade` |
37 | /// functions. |
38 | /// |
39 | /// Based on Facebook's |
40 | /// [`folly/RWSpinLock.h`](https://github.com/facebook/folly/blob/a0394d84f2d5c3e50ebfd0566f9d3acb52cfab5a/folly/synchronization/RWSpinLock.h). |
41 | /// This implementation is unfair to writers - if the lock always has readers, then no writers will |
42 | /// ever get a chance. Using an upgradeable lock guard can *somewhat* alleviate this issue as no |
43 | /// new readers are allowed when an upgradeable guard is held, but upgradeable guards can be taken |
44 | /// when there are existing readers. However if the lock is that highly contended and writes are |
45 | /// crucial then this implementation may be a poor choice. |
46 | /// |
47 | /// # Examples |
48 | /// |
49 | /// ``` |
50 | /// use spin; |
51 | /// |
52 | /// let lock = spin::RwLock::new(5); |
53 | /// |
54 | /// // many reader locks can be held at once |
55 | /// { |
56 | /// let r1 = lock.read(); |
57 | /// let r2 = lock.read(); |
58 | /// assert_eq!(*r1, 5); |
59 | /// assert_eq!(*r2, 5); |
60 | /// } // read locks are dropped at this point |
61 | /// |
62 | /// // only one write lock may be held, however |
63 | /// { |
64 | /// let mut w = lock.write(); |
65 | /// *w += 1; |
66 | /// assert_eq!(*w, 6); |
67 | /// } // write lock is dropped here |
68 | /// ``` |
69 | pub struct RwLock<T: ?Sized, R = Spin> { |
70 | phantom: PhantomData<R>, |
71 | lock: AtomicUsize, |
72 | data: UnsafeCell<T>, |
73 | } |
74 | |
75 | const READER: usize = 1 << 2; |
76 | const UPGRADED: usize = 1 << 1; |
77 | const WRITER: usize = 1; |
78 | |
79 | /// A guard that provides immutable data access. |
80 | /// |
81 | /// When the guard falls out of scope it will decrement the read count, |
82 | /// potentially releasing the lock. |
83 | pub struct RwLockReadGuard<'a, T: 'a + ?Sized> { |
84 | lock: &'a AtomicUsize, |
85 | data: *const T, |
86 | } |
87 | |
88 | /// A guard that provides mutable data access. |
89 | /// |
90 | /// When the guard falls out of scope it will release the lock. |
91 | pub struct RwLockWriteGuard<'a, T: 'a + ?Sized, R = Spin> { |
92 | phantom: PhantomData<R>, |
93 | inner: &'a RwLock<T, R>, |
94 | data: *mut T, |
95 | } |
96 | |
97 | /// A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`]. |
98 | /// |
99 | /// No writers or other upgradeable guards can exist while this is in scope. New reader |
100 | /// creation is prevented (to alleviate writer starvation) but there may be existing readers |
101 | /// when the lock is acquired. |
102 | /// |
103 | /// When the guard falls out of scope it will release the lock. |
104 | pub struct RwLockUpgradableGuard<'a, T: 'a + ?Sized, R = Spin> { |
105 | phantom: PhantomData<R>, |
106 | inner: &'a RwLock<T, R>, |
107 | data: *const T, |
108 | } |
109 | |
110 | // Same unsafe impls as `std::sync::RwLock` |
111 | unsafe impl<T: ?Sized + Send, R> Send for RwLock<T, R> {} |
112 | unsafe impl<T: ?Sized + Send + Sync, R> Sync for RwLock<T, R> {} |
113 | |
114 | unsafe impl<T: ?Sized + Send + Sync, R> Send for RwLockWriteGuard<'_, T, R> {} |
115 | unsafe impl<T: ?Sized + Send + Sync, R> Sync for RwLockWriteGuard<'_, T, R> {} |
116 | |
117 | unsafe impl<T: ?Sized + Sync> Send for RwLockReadGuard<'_, T> {} |
118 | unsafe impl<T: ?Sized + Sync> Sync for RwLockReadGuard<'_, T> {} |
119 | |
120 | unsafe impl<T: ?Sized + Send + Sync, R> Send for RwLockUpgradableGuard<'_, T, R> {} |
121 | unsafe impl<T: ?Sized + Send + Sync, R> Sync for RwLockUpgradableGuard<'_, T, R> {} |
122 | |
123 | impl<T, R> RwLock<T, R> { |
124 | /// Creates a new spinlock wrapping the supplied data. |
125 | /// |
126 | /// May be used statically: |
127 | /// |
128 | /// ``` |
129 | /// use spin; |
130 | /// |
131 | /// static RW_LOCK: spin::RwLock<()> = spin::RwLock::new(()); |
132 | /// |
133 | /// fn demo() { |
134 | /// let lock = RW_LOCK.read(); |
135 | /// // do something with lock |
136 | /// drop(lock); |
137 | /// } |
138 | /// ``` |
139 | #[inline ] |
140 | pub const fn new(data: T) -> Self { |
141 | RwLock { |
142 | phantom: PhantomData, |
143 | lock: AtomicUsize::new(0), |
144 | data: UnsafeCell::new(data), |
145 | } |
146 | } |
147 | |
148 | /// Consumes this `RwLock`, returning the underlying data. |
149 | #[inline ] |
150 | pub fn into_inner(self) -> T { |
151 | // We know statically that there are no outstanding references to |
152 | // `self` so there's no need to lock. |
153 | let RwLock { data, .. } = self; |
154 | data.into_inner() |
155 | } |
156 | /// Returns a mutable pointer to the underying data. |
157 | /// |
158 | /// This is mostly meant to be used for applications which require manual unlocking, but where |
159 | /// storing both the lock and the pointer to the inner data gets inefficient. |
160 | /// |
161 | /// While this is safe, writing to the data is undefined behavior unless the current thread has |
162 | /// acquired a write lock, and reading requires either a read or write lock. |
163 | /// |
164 | /// # Example |
165 | /// ``` |
166 | /// let lock = spin::RwLock::new(42); |
167 | /// |
168 | /// unsafe { |
169 | /// core::mem::forget(lock.write()); |
170 | /// |
171 | /// assert_eq!(lock.as_mut_ptr().read(), 42); |
172 | /// lock.as_mut_ptr().write(58); |
173 | /// |
174 | /// lock.force_write_unlock(); |
175 | /// } |
176 | /// |
177 | /// assert_eq!(*lock.read(), 58); |
178 | /// |
179 | /// ``` |
180 | #[inline (always)] |
181 | pub fn as_mut_ptr(&self) -> *mut T { |
182 | self.data.get() |
183 | } |
184 | } |
185 | |
186 | impl<T: ?Sized, R: RelaxStrategy> RwLock<T, R> { |
187 | /// Locks this rwlock with shared read access, blocking the current thread |
188 | /// until it can be acquired. |
189 | /// |
190 | /// The calling thread will be blocked until there are no more writers which |
191 | /// hold the lock. There may be other readers currently inside the lock when |
192 | /// this method returns. This method does not provide any guarantees with |
193 | /// respect to the ordering of whether contentious readers or writers will |
194 | /// acquire the lock first. |
195 | /// |
196 | /// Returns an RAII guard which will release this thread's shared access |
197 | /// once it is dropped. |
198 | /// |
199 | /// ``` |
200 | /// let mylock = spin::RwLock::new(0); |
201 | /// { |
202 | /// let mut data = mylock.read(); |
203 | /// // The lock is now locked and the data can be read |
204 | /// println!("{}" , *data); |
205 | /// // The lock is dropped |
206 | /// } |
207 | /// ``` |
208 | #[inline ] |
209 | pub fn read(&self) -> RwLockReadGuard<T> { |
210 | loop { |
211 | match self.try_read() { |
212 | Some(guard) => return guard, |
213 | None => R::relax(), |
214 | } |
215 | } |
216 | } |
217 | |
218 | /// Lock this rwlock with exclusive write access, blocking the current |
219 | /// thread until it can be acquired. |
220 | /// |
221 | /// This function will not return while other writers or other readers |
222 | /// currently have access to the lock. |
223 | /// |
224 | /// Returns an RAII guard which will drop the write access of this rwlock |
225 | /// when dropped. |
226 | /// |
227 | /// ``` |
228 | /// let mylock = spin::RwLock::new(0); |
229 | /// { |
230 | /// let mut data = mylock.write(); |
231 | /// // The lock is now locked and the data can be written |
232 | /// *data += 1; |
233 | /// // The lock is dropped |
234 | /// } |
235 | /// ``` |
236 | #[inline ] |
237 | pub fn write(&self) -> RwLockWriteGuard<T, R> { |
238 | loop { |
239 | match self.try_write_internal(false) { |
240 | Some(guard) => return guard, |
241 | None => R::relax(), |
242 | } |
243 | } |
244 | } |
245 | |
246 | /// Obtain a readable lock guard that can later be upgraded to a writable lock guard. |
247 | /// Upgrades can be done through the [`RwLockUpgradableGuard::upgrade`](RwLockUpgradableGuard::upgrade) method. |
248 | #[inline ] |
249 | pub fn upgradeable_read(&self) -> RwLockUpgradableGuard<T, R> { |
250 | loop { |
251 | match self.try_upgradeable_read() { |
252 | Some(guard) => return guard, |
253 | None => R::relax(), |
254 | } |
255 | } |
256 | } |
257 | } |
258 | |
259 | impl<T: ?Sized, R> RwLock<T, R> { |
260 | // Acquire a read lock, returning the new lock value. |
261 | fn acquire_reader(&self) -> usize { |
262 | // An arbitrary cap that allows us to catch overflows long before they happen |
263 | const MAX_READERS: usize = core::usize::MAX / READER / 2; |
264 | |
265 | let value = self.lock.fetch_add(READER, Ordering::Acquire); |
266 | |
267 | if value > MAX_READERS * READER { |
268 | self.lock.fetch_sub(READER, Ordering::Relaxed); |
269 | panic!("Too many lock readers, cannot safely proceed" ); |
270 | } else { |
271 | value |
272 | } |
273 | } |
274 | |
275 | /// Attempt to acquire this lock with shared read access. |
276 | /// |
277 | /// This function will never block and will return immediately if `read` |
278 | /// would otherwise succeed. Returns `Some` of an RAII guard which will |
279 | /// release the shared access of this thread when dropped, or `None` if the |
280 | /// access could not be granted. This method does not provide any |
281 | /// guarantees with respect to the ordering of whether contentious readers |
282 | /// or writers will acquire the lock first. |
283 | /// |
284 | /// ``` |
285 | /// let mylock = spin::RwLock::new(0); |
286 | /// { |
287 | /// match mylock.try_read() { |
288 | /// Some(data) => { |
289 | /// // The lock is now locked and the data can be read |
290 | /// println!("{}" , *data); |
291 | /// // The lock is dropped |
292 | /// }, |
293 | /// None => (), // no cigar |
294 | /// }; |
295 | /// } |
296 | /// ``` |
297 | #[inline ] |
298 | pub fn try_read(&self) -> Option<RwLockReadGuard<T>> { |
299 | let value = self.acquire_reader(); |
300 | |
301 | // We check the UPGRADED bit here so that new readers are prevented when an UPGRADED lock is held. |
302 | // This helps reduce writer starvation. |
303 | if value & (WRITER | UPGRADED) != 0 { |
304 | // Lock is taken, undo. |
305 | self.lock.fetch_sub(READER, Ordering::Release); |
306 | None |
307 | } else { |
308 | Some(RwLockReadGuard { |
309 | lock: &self.lock, |
310 | data: unsafe { &*self.data.get() }, |
311 | }) |
312 | } |
313 | } |
314 | |
315 | /// Return the number of readers that currently hold the lock (including upgradable readers). |
316 | /// |
317 | /// # Safety |
318 | /// |
319 | /// This function provides no synchronization guarantees and so its result should be considered 'out of date' |
320 | /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic. |
321 | pub fn reader_count(&self) -> usize { |
322 | let state = self.lock.load(Ordering::Relaxed); |
323 | state / READER + (state & UPGRADED) / UPGRADED |
324 | } |
325 | |
326 | /// Return the number of writers that currently hold the lock. |
327 | /// |
328 | /// Because [`RwLock`] guarantees exclusive mutable access, this function may only return either `0` or `1`. |
329 | /// |
330 | /// # Safety |
331 | /// |
332 | /// This function provides no synchronization guarantees and so its result should be considered 'out of date' |
333 | /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic. |
334 | pub fn writer_count(&self) -> usize { |
335 | (self.lock.load(Ordering::Relaxed) & WRITER) / WRITER |
336 | } |
337 | |
338 | /// Force decrement the reader count. |
339 | /// |
340 | /// # Safety |
341 | /// |
342 | /// This is *extremely* unsafe if there are outstanding `RwLockReadGuard`s |
343 | /// live, or if called more times than `read` has been called, but can be |
344 | /// useful in FFI contexts where the caller doesn't know how to deal with |
345 | /// RAII. The underlying atomic operation uses `Ordering::Release`. |
346 | #[inline ] |
347 | pub unsafe fn force_read_decrement(&self) { |
348 | debug_assert!(self.lock.load(Ordering::Relaxed) & !WRITER > 0); |
349 | self.lock.fetch_sub(READER, Ordering::Release); |
350 | } |
351 | |
352 | /// Force unlock exclusive write access. |
353 | /// |
354 | /// # Safety |
355 | /// |
356 | /// This is *extremely* unsafe if there are outstanding `RwLockWriteGuard`s |
357 | /// live, or if called when there are current readers, but can be useful in |
358 | /// FFI contexts where the caller doesn't know how to deal with RAII. The |
359 | /// underlying atomic operation uses `Ordering::Release`. |
360 | #[inline ] |
361 | pub unsafe fn force_write_unlock(&self) { |
362 | debug_assert_eq!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED), 0); |
363 | self.lock.fetch_and(!(WRITER | UPGRADED), Ordering::Release); |
364 | } |
365 | |
366 | #[inline (always)] |
367 | fn try_write_internal(&self, strong: bool) -> Option<RwLockWriteGuard<T, R>> { |
368 | if compare_exchange( |
369 | &self.lock, |
370 | 0, |
371 | WRITER, |
372 | Ordering::Acquire, |
373 | Ordering::Relaxed, |
374 | strong, |
375 | ) |
376 | .is_ok() |
377 | { |
378 | Some(RwLockWriteGuard { |
379 | phantom: PhantomData, |
380 | inner: self, |
381 | data: unsafe { &mut *self.data.get() }, |
382 | }) |
383 | } else { |
384 | None |
385 | } |
386 | } |
387 | |
388 | /// Attempt to lock this rwlock with exclusive write access. |
389 | /// |
390 | /// This function does not ever block, and it will return `None` if a call |
391 | /// to `write` would otherwise block. If successful, an RAII guard is |
392 | /// returned. |
393 | /// |
394 | /// ``` |
395 | /// let mylock = spin::RwLock::new(0); |
396 | /// { |
397 | /// match mylock.try_write() { |
398 | /// Some(mut data) => { |
399 | /// // The lock is now locked and the data can be written |
400 | /// *data += 1; |
401 | /// // The lock is implicitly dropped |
402 | /// }, |
403 | /// None => (), // no cigar |
404 | /// }; |
405 | /// } |
406 | /// ``` |
407 | #[inline ] |
408 | pub fn try_write(&self) -> Option<RwLockWriteGuard<T, R>> { |
409 | self.try_write_internal(true) |
410 | } |
411 | |
412 | /// Tries to obtain an upgradeable lock guard. |
413 | #[inline ] |
414 | pub fn try_upgradeable_read(&self) -> Option<RwLockUpgradableGuard<T, R>> { |
415 | if self.lock.fetch_or(UPGRADED, Ordering::Acquire) & (WRITER | UPGRADED) == 0 { |
416 | Some(RwLockUpgradableGuard { |
417 | phantom: PhantomData, |
418 | inner: self, |
419 | data: unsafe { &*self.data.get() }, |
420 | }) |
421 | } else { |
422 | // We can't unflip the UPGRADED bit back just yet as there is another upgradeable or write lock. |
423 | // When they unlock, they will clear the bit. |
424 | None |
425 | } |
426 | } |
427 | |
428 | /// Returns a mutable reference to the underlying data. |
429 | /// |
430 | /// Since this call borrows the `RwLock` mutably, no actual locking needs to |
431 | /// take place -- the mutable borrow statically guarantees no locks exist. |
432 | /// |
433 | /// # Examples |
434 | /// |
435 | /// ``` |
436 | /// let mut lock = spin::RwLock::new(0); |
437 | /// *lock.get_mut() = 10; |
438 | /// assert_eq!(*lock.read(), 10); |
439 | /// ``` |
440 | pub fn get_mut(&mut self) -> &mut T { |
441 | // We know statically that there are no other references to `self`, so |
442 | // there's no need to lock the inner lock. |
443 | unsafe { &mut *self.data.get() } |
444 | } |
445 | } |
446 | |
447 | impl<T: ?Sized + fmt::Debug, R> fmt::Debug for RwLock<T, R> { |
448 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
449 | match self.try_read() { |
450 | Some(guard: RwLockReadGuard<'_, T>) => write!(f, "RwLock {{ data: " ) |
451 | .and_then(|()| (&*guard).fmt(f)) |
452 | .and_then(|()| write!(f, " }}" )), |
453 | None => write!(f, "RwLock {{ <locked> }}" ), |
454 | } |
455 | } |
456 | } |
457 | |
458 | impl<T: ?Sized + Default, R> Default for RwLock<T, R> { |
459 | fn default() -> Self { |
460 | Self::new(data:Default::default()) |
461 | } |
462 | } |
463 | |
464 | impl<T, R> From<T> for RwLock<T, R> { |
465 | fn from(data: T) -> Self { |
466 | Self::new(data) |
467 | } |
468 | } |
469 | |
470 | impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> { |
471 | /// Leak the lock guard, yielding a reference to the underlying data. |
472 | /// |
473 | /// Note that this function will permanently lock the original lock for all but reading locks. |
474 | /// |
475 | /// ``` |
476 | /// let mylock = spin::RwLock::new(0); |
477 | /// |
478 | /// let data: &i32 = spin::RwLockReadGuard::leak(mylock.read()); |
479 | /// |
480 | /// assert_eq!(*data, 0); |
481 | /// ``` |
482 | #[inline ] |
483 | pub fn leak(this: Self) -> &'rwlock T { |
484 | let this: ManuallyDrop> = ManuallyDrop::new(this); |
485 | // Safety: We know statically that only we are referencing data |
486 | unsafe { &*this.data } |
487 | } |
488 | } |
489 | |
490 | impl<'rwlock, T: ?Sized + fmt::Debug> fmt::Debug for RwLockReadGuard<'rwlock, T> { |
491 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
492 | fmt::Debug::fmt(&**self, f) |
493 | } |
494 | } |
495 | |
496 | impl<'rwlock, T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'rwlock, T> { |
497 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
498 | fmt::Display::fmt(&**self, f) |
499 | } |
500 | } |
501 | |
502 | impl<'rwlock, T: ?Sized, R: RelaxStrategy> RwLockUpgradableGuard<'rwlock, T, R> { |
503 | /// Upgrades an upgradeable lock guard to a writable lock guard. |
504 | /// |
505 | /// ``` |
506 | /// let mylock = spin::RwLock::new(0); |
507 | /// |
508 | /// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable |
509 | /// let writable = upgradeable.upgrade(); |
510 | /// ``` |
511 | #[inline ] |
512 | pub fn upgrade(mut self) -> RwLockWriteGuard<'rwlock, T, R> { |
513 | loop { |
514 | self = match self.try_upgrade_internal(strong:false) { |
515 | Ok(guard: RwLockWriteGuard<'_, T, R>) => return guard, |
516 | Err(e: RwLockUpgradableGuard<'_, …, …>) => e, |
517 | }; |
518 | |
519 | R::relax(); |
520 | } |
521 | } |
522 | } |
523 | |
524 | impl<'rwlock, T: ?Sized, R> RwLockUpgradableGuard<'rwlock, T, R> { |
525 | #[inline (always)] |
526 | fn try_upgrade_internal(self, strong: bool) -> Result<RwLockWriteGuard<'rwlock, T, R>, Self> { |
527 | if compare_exchange( |
528 | &self.inner.lock, |
529 | UPGRADED, |
530 | WRITER, |
531 | Ordering::Acquire, |
532 | Ordering::Relaxed, |
533 | strong, |
534 | ) |
535 | .is_ok() |
536 | { |
537 | let inner = self.inner; |
538 | |
539 | // Forget the old guard so its destructor doesn't run (before mutably aliasing data below) |
540 | mem::forget(self); |
541 | |
542 | // Upgrade successful |
543 | Ok(RwLockWriteGuard { |
544 | phantom: PhantomData, |
545 | inner, |
546 | data: unsafe { &mut *inner.data.get() }, |
547 | }) |
548 | } else { |
549 | Err(self) |
550 | } |
551 | } |
552 | |
553 | /// Tries to upgrade an upgradeable lock guard to a writable lock guard. |
554 | /// |
555 | /// ``` |
556 | /// let mylock = spin::RwLock::new(0); |
557 | /// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable |
558 | /// |
559 | /// match upgradeable.try_upgrade() { |
560 | /// Ok(writable) => /* upgrade successful - use writable lock guard */ (), |
561 | /// Err(upgradeable) => /* upgrade unsuccessful */ (), |
562 | /// }; |
563 | /// ``` |
564 | #[inline ] |
565 | pub fn try_upgrade(self) -> Result<RwLockWriteGuard<'rwlock, T, R>, Self> { |
566 | self.try_upgrade_internal(true) |
567 | } |
568 | |
569 | #[inline ] |
570 | /// Downgrades the upgradeable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin. |
571 | /// |
572 | /// ``` |
573 | /// let mylock = spin::RwLock::new(1); |
574 | /// |
575 | /// let upgradeable = mylock.upgradeable_read(); |
576 | /// assert!(mylock.try_read().is_none()); |
577 | /// assert_eq!(*upgradeable, 1); |
578 | /// |
579 | /// let readable = upgradeable.downgrade(); // This is guaranteed not to spin |
580 | /// assert!(mylock.try_read().is_some()); |
581 | /// assert_eq!(*readable, 1); |
582 | /// ``` |
583 | pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> { |
584 | // Reserve the read guard for ourselves |
585 | self.inner.acquire_reader(); |
586 | |
587 | let inner = self.inner; |
588 | |
589 | // Dropping self removes the UPGRADED bit |
590 | mem::drop(self); |
591 | |
592 | RwLockReadGuard { |
593 | lock: &inner.lock, |
594 | data: unsafe { &*inner.data.get() }, |
595 | } |
596 | } |
597 | |
598 | /// Leak the lock guard, yielding a reference to the underlying data. |
599 | /// |
600 | /// Note that this function will permanently lock the original lock. |
601 | /// |
602 | /// ``` |
603 | /// let mylock = spin::RwLock::new(0); |
604 | /// |
605 | /// let data: &i32 = spin::RwLockUpgradableGuard::leak(mylock.upgradeable_read()); |
606 | /// |
607 | /// assert_eq!(*data, 0); |
608 | /// ``` |
609 | #[inline ] |
610 | pub fn leak(this: Self) -> &'rwlock T { |
611 | let this = ManuallyDrop::new(this); |
612 | // Safety: We know statically that only we are referencing data |
613 | unsafe { &*this.data } |
614 | } |
615 | } |
616 | |
617 | impl<'rwlock, T: ?Sized + fmt::Debug, R> fmt::Debug for RwLockUpgradableGuard<'rwlock, T, R> { |
618 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
619 | fmt::Debug::fmt(&**self, f) |
620 | } |
621 | } |
622 | |
623 | impl<'rwlock, T: ?Sized + fmt::Display, R> fmt::Display for RwLockUpgradableGuard<'rwlock, T, R> { |
624 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
625 | fmt::Display::fmt(&**self, f) |
626 | } |
627 | } |
628 | |
629 | impl<'rwlock, T: ?Sized, R> RwLockWriteGuard<'rwlock, T, R> { |
630 | /// Downgrades the writable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin. |
631 | /// |
632 | /// ``` |
633 | /// let mylock = spin::RwLock::new(0); |
634 | /// |
635 | /// let mut writable = mylock.write(); |
636 | /// *writable = 1; |
637 | /// |
638 | /// let readable = writable.downgrade(); // This is guaranteed not to spin |
639 | /// # let readable_2 = mylock.try_read().unwrap(); |
640 | /// assert_eq!(*readable, 1); |
641 | /// ``` |
642 | #[inline ] |
643 | pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> { |
644 | // Reserve the read guard for ourselves |
645 | self.inner.acquire_reader(); |
646 | |
647 | let inner = self.inner; |
648 | |
649 | // Dropping self removes the UPGRADED bit |
650 | mem::drop(self); |
651 | |
652 | RwLockReadGuard { |
653 | lock: &inner.lock, |
654 | data: unsafe { &*inner.data.get() }, |
655 | } |
656 | } |
657 | |
658 | /// Downgrades the writable lock guard to an upgradable, shared lock guard. Cannot fail and is guaranteed not to spin. |
659 | /// |
660 | /// ``` |
661 | /// let mylock = spin::RwLock::new(0); |
662 | /// |
663 | /// let mut writable = mylock.write(); |
664 | /// *writable = 1; |
665 | /// |
666 | /// let readable = writable.downgrade_to_upgradeable(); // This is guaranteed not to spin |
667 | /// assert_eq!(*readable, 1); |
668 | /// ``` |
669 | #[inline ] |
670 | pub fn downgrade_to_upgradeable(self) -> RwLockUpgradableGuard<'rwlock, T, R> { |
671 | debug_assert_eq!( |
672 | self.inner.lock.load(Ordering::Acquire) & (WRITER | UPGRADED), |
673 | WRITER |
674 | ); |
675 | |
676 | // Reserve the read guard for ourselves |
677 | self.inner.lock.store(UPGRADED, Ordering::Release); |
678 | |
679 | let inner = self.inner; |
680 | |
681 | // Dropping self removes the UPGRADED bit |
682 | mem::forget(self); |
683 | |
684 | RwLockUpgradableGuard { |
685 | phantom: PhantomData, |
686 | inner, |
687 | data: unsafe { &*inner.data.get() }, |
688 | } |
689 | } |
690 | |
691 | /// Leak the lock guard, yielding a mutable reference to the underlying data. |
692 | /// |
693 | /// Note that this function will permanently lock the original lock. |
694 | /// |
695 | /// ``` |
696 | /// let mylock = spin::RwLock::new(0); |
697 | /// |
698 | /// let data: &mut i32 = spin::RwLockWriteGuard::leak(mylock.write()); |
699 | /// |
700 | /// *data = 1; |
701 | /// assert_eq!(*data, 1); |
702 | /// ``` |
703 | #[inline ] |
704 | pub fn leak(this: Self) -> &'rwlock mut T { |
705 | let mut this = ManuallyDrop::new(this); |
706 | // Safety: We know statically that only we are referencing data |
707 | unsafe { &mut *this.data } |
708 | } |
709 | } |
710 | |
711 | impl<'rwlock, T: ?Sized + fmt::Debug, R> fmt::Debug for RwLockWriteGuard<'rwlock, T, R> { |
712 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
713 | fmt::Debug::fmt(&**self, f) |
714 | } |
715 | } |
716 | |
717 | impl<'rwlock, T: ?Sized + fmt::Display, R> fmt::Display for RwLockWriteGuard<'rwlock, T, R> { |
718 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
719 | fmt::Display::fmt(&**self, f) |
720 | } |
721 | } |
722 | |
723 | impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> { |
724 | type Target = T; |
725 | |
726 | fn deref(&self) -> &T { |
727 | // Safety: We know statically that only we are referencing data |
728 | unsafe { &*self.data } |
729 | } |
730 | } |
731 | |
732 | impl<'rwlock, T: ?Sized, R> Deref for RwLockUpgradableGuard<'rwlock, T, R> { |
733 | type Target = T; |
734 | |
735 | fn deref(&self) -> &T { |
736 | // Safety: We know statically that only we are referencing data |
737 | unsafe { &*self.data } |
738 | } |
739 | } |
740 | |
741 | impl<'rwlock, T: ?Sized, R> Deref for RwLockWriteGuard<'rwlock, T, R> { |
742 | type Target = T; |
743 | |
744 | fn deref(&self) -> &T { |
745 | // Safety: We know statically that only we are referencing data |
746 | unsafe { &*self.data } |
747 | } |
748 | } |
749 | |
750 | impl<'rwlock, T: ?Sized, R> DerefMut for RwLockWriteGuard<'rwlock, T, R> { |
751 | fn deref_mut(&mut self) -> &mut T { |
752 | // Safety: We know statically that only we are referencing data |
753 | unsafe { &mut *self.data } |
754 | } |
755 | } |
756 | |
757 | impl<'rwlock, T: ?Sized> Drop for RwLockReadGuard<'rwlock, T> { |
758 | fn drop(&mut self) { |
759 | debug_assert!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED) > 0); |
760 | self.lock.fetch_sub(READER, order:Ordering::Release); |
761 | } |
762 | } |
763 | |
764 | impl<'rwlock, T: ?Sized, R> Drop for RwLockUpgradableGuard<'rwlock, T, R> { |
765 | fn drop(&mut self) { |
766 | debug_assert_eq!( |
767 | self.inner.lock.load(Ordering::Relaxed) & (WRITER | UPGRADED), |
768 | UPGRADED |
769 | ); |
770 | self.inner.lock.fetch_sub(UPGRADED, order:Ordering::AcqRel); |
771 | } |
772 | } |
773 | |
774 | impl<'rwlock, T: ?Sized, R> Drop for RwLockWriteGuard<'rwlock, T, R> { |
775 | fn drop(&mut self) { |
776 | debug_assert_eq!(self.inner.lock.load(Ordering::Relaxed) & WRITER, WRITER); |
777 | |
778 | // Writer is responsible for clearing both WRITER and UPGRADED bits. |
779 | // The UPGRADED bit may be set if an upgradeable lock attempts an upgrade while this lock is held. |
780 | self.inner |
781 | .lock |
782 | .fetch_and(!(WRITER | UPGRADED), order:Ordering::Release); |
783 | } |
784 | } |
785 | |
786 | #[inline (always)] |
787 | fn compare_exchange( |
788 | atomic: &AtomicUsize, |
789 | current: usize, |
790 | new: usize, |
791 | success: Ordering, |
792 | failure: Ordering, |
793 | strong: bool, |
794 | ) -> Result<usize, usize> { |
795 | if strong { |
796 | atomic.compare_exchange(current, new, success, failure) |
797 | } else { |
798 | atomic.compare_exchange_weak(current, new, success, failure) |
799 | } |
800 | } |
801 | |
802 | #[cfg (feature = "lock_api" )] |
803 | unsafe impl<R: RelaxStrategy> lock_api_crate::RawRwLock for RwLock<(), R> { |
804 | type GuardMarker = lock_api_crate::GuardSend; |
805 | |
806 | const INIT: Self = Self::new(()); |
807 | |
808 | #[inline (always)] |
809 | fn lock_exclusive(&self) { |
810 | // Prevent guard destructor running |
811 | core::mem::forget(self.write()); |
812 | } |
813 | |
814 | #[inline (always)] |
815 | fn try_lock_exclusive(&self) -> bool { |
816 | // Prevent guard destructor running |
817 | self.try_write().map(|g| core::mem::forget(g)).is_some() |
818 | } |
819 | |
820 | #[inline (always)] |
821 | unsafe fn unlock_exclusive(&self) { |
822 | drop(RwLockWriteGuard { |
823 | inner: self, |
824 | data: &mut (), |
825 | phantom: PhantomData, |
826 | }); |
827 | } |
828 | |
829 | #[inline (always)] |
830 | fn lock_shared(&self) { |
831 | // Prevent guard destructor running |
832 | core::mem::forget(self.read()); |
833 | } |
834 | |
835 | #[inline (always)] |
836 | fn try_lock_shared(&self) -> bool { |
837 | // Prevent guard destructor running |
838 | self.try_read().map(|g| core::mem::forget(g)).is_some() |
839 | } |
840 | |
841 | #[inline (always)] |
842 | unsafe fn unlock_shared(&self) { |
843 | drop(RwLockReadGuard { |
844 | lock: &self.lock, |
845 | data: &(), |
846 | }); |
847 | } |
848 | |
849 | #[inline (always)] |
850 | fn is_locked(&self) -> bool { |
851 | self.lock.load(Ordering::Relaxed) != 0 |
852 | } |
853 | } |
854 | |
855 | #[cfg (feature = "lock_api" )] |
856 | unsafe impl<R: RelaxStrategy> lock_api_crate::RawRwLockUpgrade for RwLock<(), R> { |
857 | #[inline (always)] |
858 | fn lock_upgradable(&self) { |
859 | // Prevent guard destructor running |
860 | core::mem::forget(self.upgradeable_read()); |
861 | } |
862 | |
863 | #[inline (always)] |
864 | fn try_lock_upgradable(&self) -> bool { |
865 | // Prevent guard destructor running |
866 | self.try_upgradeable_read() |
867 | .map(|g| core::mem::forget(g)) |
868 | .is_some() |
869 | } |
870 | |
871 | #[inline (always)] |
872 | unsafe fn unlock_upgradable(&self) { |
873 | drop(RwLockUpgradableGuard { |
874 | inner: self, |
875 | data: &(), |
876 | phantom: PhantomData, |
877 | }); |
878 | } |
879 | |
880 | #[inline (always)] |
881 | unsafe fn upgrade(&self) { |
882 | let tmp_guard = RwLockUpgradableGuard { |
883 | inner: self, |
884 | data: &(), |
885 | phantom: PhantomData, |
886 | }; |
887 | core::mem::forget(tmp_guard.upgrade()); |
888 | } |
889 | |
890 | #[inline (always)] |
891 | unsafe fn try_upgrade(&self) -> bool { |
892 | let tmp_guard = RwLockUpgradableGuard { |
893 | inner: self, |
894 | data: &(), |
895 | phantom: PhantomData, |
896 | }; |
897 | tmp_guard |
898 | .try_upgrade() |
899 | .map(|g| core::mem::forget(g)) |
900 | .is_ok() |
901 | } |
902 | } |
903 | |
904 | #[cfg (feature = "lock_api" )] |
905 | unsafe impl<R: RelaxStrategy> lock_api_crate::RawRwLockDowngrade for RwLock<(), R> { |
906 | unsafe fn downgrade(&self) { |
907 | let tmp_guard: RwLockWriteGuard<'_, (), …> = RwLockWriteGuard { |
908 | inner: self, |
909 | data: &mut (), |
910 | phantom: PhantomData, |
911 | }; |
912 | core::mem::forget(tmp_guard.downgrade()); |
913 | } |
914 | } |
915 | |
916 | #[cfg (feature = "lock_api1" )] |
917 | unsafe impl lock_api::RawRwLockUpgradeDowngrade for RwLock<()> { |
918 | unsafe fn downgrade_upgradable(&self) { |
919 | let tmp_guard = RwLockUpgradableGuard { |
920 | inner: self, |
921 | data: &(), |
922 | phantom: PhantomData, |
923 | }; |
924 | core::mem::forget(tmp_guard.downgrade()); |
925 | } |
926 | |
927 | unsafe fn downgrade_to_upgradable(&self) { |
928 | let tmp_guard = RwLockWriteGuard { |
929 | inner: self, |
930 | data: &mut (), |
931 | phantom: PhantomData, |
932 | }; |
933 | core::mem::forget(tmp_guard.downgrade_to_upgradeable()); |
934 | } |
935 | } |
936 | |
937 | #[cfg (test)] |
938 | mod tests { |
939 | use std::prelude::v1::*; |
940 | |
941 | use std::sync::atomic::{AtomicUsize, Ordering}; |
942 | use std::sync::mpsc::channel; |
943 | use std::sync::Arc; |
944 | use std::thread; |
945 | |
946 | type RwLock<T> = super::RwLock<T>; |
947 | |
948 | #[derive (Eq, PartialEq, Debug)] |
949 | struct NonCopy(i32); |
950 | |
951 | #[test ] |
952 | fn smoke() { |
953 | let l = RwLock::new(()); |
954 | drop(l.read()); |
955 | drop(l.write()); |
956 | drop((l.read(), l.read())); |
957 | drop(l.write()); |
958 | } |
959 | |
960 | // TODO: needs RNG |
961 | //#[test] |
962 | //fn frob() { |
963 | // static R: RwLock = RwLock::new(); |
964 | // const N: usize = 10; |
965 | // const M: usize = 1000; |
966 | // |
967 | // let (tx, rx) = channel::<()>(); |
968 | // for _ in 0..N { |
969 | // let tx = tx.clone(); |
970 | // thread::spawn(move|| { |
971 | // let mut rng = rand::thread_rng(); |
972 | // for _ in 0..M { |
973 | // if rng.gen_weighted_bool(N) { |
974 | // drop(R.write()); |
975 | // } else { |
976 | // drop(R.read()); |
977 | // } |
978 | // } |
979 | // drop(tx); |
980 | // }); |
981 | // } |
982 | // drop(tx); |
983 | // let _ = rx.recv(); |
984 | // unsafe { R.destroy(); } |
985 | //} |
986 | |
987 | #[test ] |
988 | fn test_rw_arc() { |
989 | let arc = Arc::new(RwLock::new(0)); |
990 | let arc2 = arc.clone(); |
991 | let (tx, rx) = channel(); |
992 | |
993 | let t = thread::spawn(move || { |
994 | let mut lock = arc2.write(); |
995 | for _ in 0..10 { |
996 | let tmp = *lock; |
997 | *lock = -1; |
998 | thread::yield_now(); |
999 | *lock = tmp + 1; |
1000 | } |
1001 | tx.send(()).unwrap(); |
1002 | }); |
1003 | |
1004 | // Readers try to catch the writer in the act |
1005 | let mut children = Vec::new(); |
1006 | for _ in 0..5 { |
1007 | let arc3 = arc.clone(); |
1008 | children.push(thread::spawn(move || { |
1009 | let lock = arc3.read(); |
1010 | assert!(*lock >= 0); |
1011 | })); |
1012 | } |
1013 | |
1014 | // Wait for children to pass their asserts |
1015 | for r in children { |
1016 | assert!(r.join().is_ok()); |
1017 | } |
1018 | |
1019 | // Wait for writer to finish |
1020 | rx.recv().unwrap(); |
1021 | let lock = arc.read(); |
1022 | assert_eq!(*lock, 10); |
1023 | |
1024 | assert!(t.join().is_ok()); |
1025 | } |
1026 | |
1027 | #[test ] |
1028 | fn test_rw_access_in_unwind() { |
1029 | let arc = Arc::new(RwLock::new(1)); |
1030 | let arc2 = arc.clone(); |
1031 | let _ = thread::spawn(move || -> () { |
1032 | struct Unwinder { |
1033 | i: Arc<RwLock<isize>>, |
1034 | } |
1035 | impl Drop for Unwinder { |
1036 | fn drop(&mut self) { |
1037 | let mut lock = self.i.write(); |
1038 | *lock += 1; |
1039 | } |
1040 | } |
1041 | let _u = Unwinder { i: arc2 }; |
1042 | panic!(); |
1043 | }) |
1044 | .join(); |
1045 | let lock = arc.read(); |
1046 | assert_eq!(*lock, 2); |
1047 | } |
1048 | |
1049 | #[test ] |
1050 | fn test_rwlock_unsized() { |
1051 | let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]); |
1052 | { |
1053 | let b = &mut *rw.write(); |
1054 | b[0] = 4; |
1055 | b[2] = 5; |
1056 | } |
1057 | let comp: &[i32] = &[4, 2, 5]; |
1058 | assert_eq!(&*rw.read(), comp); |
1059 | } |
1060 | |
1061 | #[test ] |
1062 | fn test_rwlock_try_write() { |
1063 | use std::mem::drop; |
1064 | |
1065 | let lock = RwLock::new(0isize); |
1066 | let read_guard = lock.read(); |
1067 | |
1068 | let write_result = lock.try_write(); |
1069 | match write_result { |
1070 | None => (), |
1071 | Some(_) => assert!( |
1072 | false, |
1073 | "try_write should not succeed while read_guard is in scope" |
1074 | ), |
1075 | } |
1076 | |
1077 | drop(read_guard); |
1078 | } |
1079 | |
1080 | #[test ] |
1081 | fn test_rw_try_read() { |
1082 | let m = RwLock::new(0); |
1083 | ::std::mem::forget(m.write()); |
1084 | assert!(m.try_read().is_none()); |
1085 | } |
1086 | |
1087 | #[test ] |
1088 | fn test_into_inner() { |
1089 | let m = RwLock::new(NonCopy(10)); |
1090 | assert_eq!(m.into_inner(), NonCopy(10)); |
1091 | } |
1092 | |
1093 | #[test ] |
1094 | fn test_into_inner_drop() { |
1095 | struct Foo(Arc<AtomicUsize>); |
1096 | impl Drop for Foo { |
1097 | fn drop(&mut self) { |
1098 | self.0.fetch_add(1, Ordering::SeqCst); |
1099 | } |
1100 | } |
1101 | let num_drops = Arc::new(AtomicUsize::new(0)); |
1102 | let m = RwLock::new(Foo(num_drops.clone())); |
1103 | assert_eq!(num_drops.load(Ordering::SeqCst), 0); |
1104 | { |
1105 | let _inner = m.into_inner(); |
1106 | assert_eq!(num_drops.load(Ordering::SeqCst), 0); |
1107 | } |
1108 | assert_eq!(num_drops.load(Ordering::SeqCst), 1); |
1109 | } |
1110 | |
1111 | #[test ] |
1112 | fn test_force_read_decrement() { |
1113 | let m = RwLock::new(()); |
1114 | ::std::mem::forget(m.read()); |
1115 | ::std::mem::forget(m.read()); |
1116 | ::std::mem::forget(m.read()); |
1117 | assert!(m.try_write().is_none()); |
1118 | unsafe { |
1119 | m.force_read_decrement(); |
1120 | m.force_read_decrement(); |
1121 | } |
1122 | assert!(m.try_write().is_none()); |
1123 | unsafe { |
1124 | m.force_read_decrement(); |
1125 | } |
1126 | assert!(m.try_write().is_some()); |
1127 | } |
1128 | |
1129 | #[test ] |
1130 | fn test_force_write_unlock() { |
1131 | let m = RwLock::new(()); |
1132 | ::std::mem::forget(m.write()); |
1133 | assert!(m.try_read().is_none()); |
1134 | unsafe { |
1135 | m.force_write_unlock(); |
1136 | } |
1137 | assert!(m.try_read().is_some()); |
1138 | } |
1139 | |
1140 | #[test ] |
1141 | fn test_upgrade_downgrade() { |
1142 | let m = RwLock::new(()); |
1143 | { |
1144 | let _r = m.read(); |
1145 | let upg = m.try_upgradeable_read().unwrap(); |
1146 | assert!(m.try_read().is_none()); |
1147 | assert!(m.try_write().is_none()); |
1148 | assert!(upg.try_upgrade().is_err()); |
1149 | } |
1150 | { |
1151 | let w = m.write(); |
1152 | assert!(m.try_upgradeable_read().is_none()); |
1153 | let _r = w.downgrade(); |
1154 | assert!(m.try_upgradeable_read().is_some()); |
1155 | assert!(m.try_read().is_some()); |
1156 | assert!(m.try_write().is_none()); |
1157 | } |
1158 | { |
1159 | let _u = m.upgradeable_read(); |
1160 | assert!(m.try_upgradeable_read().is_none()); |
1161 | } |
1162 | |
1163 | assert!(m.try_upgradeable_read().unwrap().try_upgrade().is_ok()); |
1164 | } |
1165 | } |
1166 | |