1use crate::sync::batch_semaphore::{Semaphore, TryAcquireError};
2use crate::sync::mutex::TryLockError;
3#[cfg(all(tokio_unstable, feature = "tracing"))]
4use crate::util::trace;
5use std::cell::UnsafeCell;
6use std::marker;
7use std::marker::PhantomData;
8use std::sync::Arc;
9
10pub(crate) mod owned_read_guard;
11pub(crate) mod owned_write_guard;
12pub(crate) mod owned_write_guard_mapped;
13pub(crate) mod read_guard;
14pub(crate) mod write_guard;
15pub(crate) mod write_guard_mapped;
16pub(crate) use owned_read_guard::OwnedRwLockReadGuard;
17pub(crate) use owned_write_guard::OwnedRwLockWriteGuard;
18pub(crate) use owned_write_guard_mapped::OwnedRwLockMappedWriteGuard;
19pub(crate) use read_guard::RwLockReadGuard;
20pub(crate) use write_guard::RwLockWriteGuard;
21pub(crate) use write_guard_mapped::RwLockMappedWriteGuard;
22
23#[cfg(not(loom))]
24const MAX_READS: u32 = std::u32::MAX >> 3;
25
26#[cfg(loom)]
27const MAX_READS: u32 = 10;
28
29/// An asynchronous reader-writer lock.
30///
31/// This type of lock allows a number of readers or at most one writer at any
32/// point in time. The write portion of this lock typically allows modification
33/// of the underlying data (exclusive access) and the read portion of this lock
34/// typically allows for read-only access (shared access).
35///
36/// In comparison, a [`Mutex`] does not distinguish between readers or writers
37/// that acquire the lock, therefore causing any tasks waiting for the lock to
38/// become available to yield. An `RwLock` will allow any number of readers to
39/// acquire the lock as long as a writer is not holding the lock.
40///
41/// The priority policy of Tokio's read-write lock is _fair_ (or
42/// [_write-preferring_]), in order to ensure that readers cannot starve
43/// writers. Fairness is ensured using a first-in, first-out queue for the tasks
44/// awaiting the lock; if a task that wishes to acquire the write lock is at the
45/// head of the queue, read locks will not be given out until the write lock has
46/// been released. This is in contrast to the Rust standard library's
47/// `std::sync::RwLock`, where the priority policy is dependent on the
48/// operating system's implementation.
49///
50/// The type parameter `T` represents the data that this lock protects. It is
51/// required that `T` satisfies [`Send`] to be shared across threads. The RAII guards
52/// returned from the locking methods implement [`Deref`](trait@std::ops::Deref)
53/// (and [`DerefMut`](trait@std::ops::DerefMut)
54/// for the `write` methods) to allow access to the content of the lock.
55///
56/// # Examples
57///
58/// ```
59/// use tokio::sync::RwLock;
60///
61/// #[tokio::main]
62/// async fn main() {
63/// let lock = RwLock::new(5);
64///
65/// // many reader locks can be held at once
66/// {
67/// let r1 = lock.read().await;
68/// let r2 = lock.read().await;
69/// assert_eq!(*r1, 5);
70/// assert_eq!(*r2, 5);
71/// } // read locks are dropped at this point
72///
73/// // only one write lock may be held, however
74/// {
75/// let mut w = lock.write().await;
76/// *w += 1;
77/// assert_eq!(*w, 6);
78/// } // write lock is dropped here
79/// }
80/// ```
81///
82/// [`Mutex`]: struct@super::Mutex
83/// [`RwLock`]: struct@RwLock
84/// [`RwLockReadGuard`]: struct@RwLockReadGuard
85/// [`RwLockWriteGuard`]: struct@RwLockWriteGuard
86/// [`Send`]: trait@std::marker::Send
87/// [_write-preferring_]: https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock#Priority_policies
88pub struct RwLock<T: ?Sized> {
89 #[cfg(all(tokio_unstable, feature = "tracing"))]
90 resource_span: tracing::Span,
91
92 // maximum number of concurrent readers
93 mr: u32,
94
95 //semaphore to coordinate read and write access to T
96 s: Semaphore,
97
98 //inner data T
99 c: UnsafeCell<T>,
100}
101
102#[test]
103#[cfg(not(loom))]
104fn bounds() {
105 fn check_send<T: Send>() {}
106 fn check_sync<T: Sync>() {}
107 fn check_unpin<T: Unpin>() {}
108 // This has to take a value, since the async fn's return type is unnameable.
109 fn check_send_sync_val<T: Send + Sync>(_t: T) {}
110
111 check_send::<RwLock<u32>>();
112 check_sync::<RwLock<u32>>();
113 check_unpin::<RwLock<u32>>();
114
115 check_send::<RwLockReadGuard<'_, u32>>();
116 check_sync::<RwLockReadGuard<'_, u32>>();
117 check_unpin::<RwLockReadGuard<'_, u32>>();
118
119 check_send::<OwnedRwLockReadGuard<u32, i32>>();
120 check_sync::<OwnedRwLockReadGuard<u32, i32>>();
121 check_unpin::<OwnedRwLockReadGuard<u32, i32>>();
122
123 check_send::<RwLockWriteGuard<'_, u32>>();
124 check_sync::<RwLockWriteGuard<'_, u32>>();
125 check_unpin::<RwLockWriteGuard<'_, u32>>();
126
127 check_send::<RwLockMappedWriteGuard<'_, u32>>();
128 check_sync::<RwLockMappedWriteGuard<'_, u32>>();
129 check_unpin::<RwLockMappedWriteGuard<'_, u32>>();
130
131 check_send::<OwnedRwLockWriteGuard<u32>>();
132 check_sync::<OwnedRwLockWriteGuard<u32>>();
133 check_unpin::<OwnedRwLockWriteGuard<u32>>();
134
135 check_send::<OwnedRwLockMappedWriteGuard<u32, i32>>();
136 check_sync::<OwnedRwLockMappedWriteGuard<u32, i32>>();
137 check_unpin::<OwnedRwLockMappedWriteGuard<u32, i32>>();
138
139 let rwlock = Arc::new(RwLock::new(0));
140 check_send_sync_val(rwlock.read());
141 check_send_sync_val(Arc::clone(&rwlock).read_owned());
142 check_send_sync_val(rwlock.write());
143 check_send_sync_val(Arc::clone(&rwlock).write_owned());
144}
145
146// As long as T: Send + Sync, it's fine to send and share RwLock<T> between threads.
147// If T were not Send, sending and sharing a RwLock<T> would be bad, since you can access T through
148// RwLock<T>.
149unsafe impl<T> Send for RwLock<T> where T: ?Sized + Send {}
150unsafe impl<T> Sync for RwLock<T> where T: ?Sized + Send + Sync {}
151// NB: These impls need to be explicit since we're storing a raw pointer.
152// Safety: Stores a raw pointer to `T`, so if `T` is `Sync`, the lock guard over
153// `T` is `Send`.
154unsafe impl<T> Send for RwLockReadGuard<'_, T> where T: ?Sized + Sync {}
155unsafe impl<T> Sync for RwLockReadGuard<'_, T> where T: ?Sized + Send + Sync {}
156// T is required to be `Send` because an OwnedRwLockReadGuard can be used to drop the value held in
157// the RwLock, unlike RwLockReadGuard.
158unsafe impl<T, U> Send for OwnedRwLockReadGuard<T, U>
159where
160 T: ?Sized + Send + Sync,
161 U: ?Sized + Sync,
162{
163}
164unsafe impl<T, U> Sync for OwnedRwLockReadGuard<T, U>
165where
166 T: ?Sized + Send + Sync,
167 U: ?Sized + Send + Sync,
168{
169}
170unsafe impl<T> Sync for RwLockWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
171unsafe impl<T> Sync for OwnedRwLockWriteGuard<T> where T: ?Sized + Send + Sync {}
172unsafe impl<T> Sync for RwLockMappedWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
173unsafe impl<T, U> Sync for OwnedRwLockMappedWriteGuard<T, U>
174where
175 T: ?Sized + Send + Sync,
176 U: ?Sized + Send + Sync,
177{
178}
179// Safety: Stores a raw pointer to `T`, so if `T` is `Sync`, the lock guard over
180// `T` is `Send` - but since this is also provides mutable access, we need to
181// make sure that `T` is `Send` since its value can be sent across thread
182// boundaries.
183unsafe impl<T> Send for RwLockWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
184unsafe impl<T> Send for OwnedRwLockWriteGuard<T> where T: ?Sized + Send + Sync {}
185unsafe impl<T> Send for RwLockMappedWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
186unsafe impl<T, U> Send for OwnedRwLockMappedWriteGuard<T, U>
187where
188 T: ?Sized + Send + Sync,
189 U: ?Sized + Send + Sync,
190{
191}
192
193impl<T: ?Sized> RwLock<T> {
194 /// Creates a new instance of an `RwLock<T>` which is unlocked.
195 ///
196 /// # Examples
197 ///
198 /// ```
199 /// use tokio::sync::RwLock;
200 ///
201 /// let lock = RwLock::new(5);
202 /// ```
203 #[track_caller]
204 pub fn new(value: T) -> RwLock<T>
205 where
206 T: Sized,
207 {
208 #[cfg(all(tokio_unstable, feature = "tracing"))]
209 let resource_span = {
210 let location = std::panic::Location::caller();
211 let resource_span = tracing::trace_span!(
212 "runtime.resource",
213 concrete_type = "RwLock",
214 kind = "Sync",
215 loc.file = location.file(),
216 loc.line = location.line(),
217 loc.col = location.column(),
218 );
219
220 resource_span.in_scope(|| {
221 tracing::trace!(
222 target: "runtime::resource::state_update",
223 max_readers = MAX_READS,
224 );
225
226 tracing::trace!(
227 target: "runtime::resource::state_update",
228 write_locked = false,
229 );
230
231 tracing::trace!(
232 target: "runtime::resource::state_update",
233 current_readers = 0,
234 );
235 });
236
237 resource_span
238 };
239
240 #[cfg(all(tokio_unstable, feature = "tracing"))]
241 let s = resource_span.in_scope(|| Semaphore::new(MAX_READS as usize));
242
243 #[cfg(any(not(tokio_unstable), not(feature = "tracing")))]
244 let s = Semaphore::new(MAX_READS as usize);
245
246 RwLock {
247 mr: MAX_READS,
248 c: UnsafeCell::new(value),
249 s,
250 #[cfg(all(tokio_unstable, feature = "tracing"))]
251 resource_span,
252 }
253 }
254
255 /// Creates a new instance of an `RwLock<T>` which is unlocked
256 /// and allows a maximum of `max_reads` concurrent readers.
257 ///
258 /// # Examples
259 ///
260 /// ```
261 /// use tokio::sync::RwLock;
262 ///
263 /// let lock = RwLock::with_max_readers(5, 1024);
264 /// ```
265 ///
266 /// # Panics
267 ///
268 /// Panics if `max_reads` is more than `u32::MAX >> 3`.
269 #[track_caller]
270 pub fn with_max_readers(value: T, max_reads: u32) -> RwLock<T>
271 where
272 T: Sized,
273 {
274 assert!(
275 max_reads <= MAX_READS,
276 "a RwLock may not be created with more than {} readers",
277 MAX_READS
278 );
279
280 #[cfg(all(tokio_unstable, feature = "tracing"))]
281 let resource_span = {
282 let location = std::panic::Location::caller();
283
284 let resource_span = tracing::trace_span!(
285 "runtime.resource",
286 concrete_type = "RwLock",
287 kind = "Sync",
288 loc.file = location.file(),
289 loc.line = location.line(),
290 loc.col = location.column(),
291 );
292
293 resource_span.in_scope(|| {
294 tracing::trace!(
295 target: "runtime::resource::state_update",
296 max_readers = max_reads,
297 );
298
299 tracing::trace!(
300 target: "runtime::resource::state_update",
301 write_locked = false,
302 );
303
304 tracing::trace!(
305 target: "runtime::resource::state_update",
306 current_readers = 0,
307 );
308 });
309
310 resource_span
311 };
312
313 #[cfg(all(tokio_unstable, feature = "tracing"))]
314 let s = resource_span.in_scope(|| Semaphore::new(max_reads as usize));
315
316 #[cfg(any(not(tokio_unstable), not(feature = "tracing")))]
317 let s = Semaphore::new(max_reads as usize);
318
319 RwLock {
320 mr: max_reads,
321 c: UnsafeCell::new(value),
322 s,
323 #[cfg(all(tokio_unstable, feature = "tracing"))]
324 resource_span,
325 }
326 }
327
328 /// Creates a new instance of an `RwLock<T>` which is unlocked.
329 ///
330 /// # Examples
331 ///
332 /// ```
333 /// use tokio::sync::RwLock;
334 ///
335 /// static LOCK: RwLock<i32> = RwLock::const_new(5);
336 /// ```
337 #[cfg(all(feature = "parking_lot", not(all(loom, test))))]
338 #[cfg_attr(docsrs, doc(cfg(feature = "parking_lot")))]
339 pub const fn const_new(value: T) -> RwLock<T>
340 where
341 T: Sized,
342 {
343 RwLock {
344 mr: MAX_READS,
345 c: UnsafeCell::new(value),
346 s: Semaphore::const_new(MAX_READS as usize),
347 #[cfg(all(tokio_unstable, feature = "tracing"))]
348 resource_span: tracing::Span::none(),
349 }
350 }
351
352 /// Creates a new instance of an `RwLock<T>` which is unlocked
353 /// and allows a maximum of `max_reads` concurrent readers.
354 ///
355 /// # Examples
356 ///
357 /// ```
358 /// use tokio::sync::RwLock;
359 ///
360 /// static LOCK: RwLock<i32> = RwLock::const_with_max_readers(5, 1024);
361 /// ```
362 #[cfg(all(feature = "parking_lot", not(all(loom, test))))]
363 #[cfg_attr(docsrs, doc(cfg(feature = "parking_lot")))]
364 pub const fn const_with_max_readers(value: T, mut max_reads: u32) -> RwLock<T>
365 where
366 T: Sized,
367 {
368 max_reads &= MAX_READS;
369 RwLock {
370 mr: max_reads,
371 c: UnsafeCell::new(value),
372 s: Semaphore::const_new(max_reads as usize),
373 #[cfg(all(tokio_unstable, feature = "tracing"))]
374 resource_span: tracing::Span::none(),
375 }
376 }
377
378 /// Locks this `RwLock` with shared read access, causing the current task
379 /// to yield until the lock has been acquired.
380 ///
381 /// The calling task will yield until there are no writers which hold the
382 /// lock. There may be other readers inside the lock when the task resumes.
383 ///
384 /// Note that under the priority policy of [`RwLock`], read locks are not
385 /// granted until prior write locks, to prevent starvation. Therefore
386 /// deadlock may occur if a read lock is held by the current task, a write
387 /// lock attempt is made, and then a subsequent read lock attempt is made
388 /// by the current task.
389 ///
390 /// Returns an RAII guard which will drop this read access of the `RwLock`
391 /// when dropped.
392 ///
393 /// # Cancel safety
394 ///
395 /// This method uses a queue to fairly distribute locks in the order they
396 /// were requested. Cancelling a call to `read` makes you lose your place in
397 /// the queue.
398 ///
399 /// # Examples
400 ///
401 /// ```
402 /// use std::sync::Arc;
403 /// use tokio::sync::RwLock;
404 ///
405 /// #[tokio::main]
406 /// async fn main() {
407 /// let lock = Arc::new(RwLock::new(1));
408 /// let c_lock = lock.clone();
409 ///
410 /// let n = lock.read().await;
411 /// assert_eq!(*n, 1);
412 ///
413 /// tokio::spawn(async move {
414 /// // While main has an active read lock, we acquire one too.
415 /// let r = c_lock.read().await;
416 /// assert_eq!(*r, 1);
417 /// }).await.expect("The spawned task has panicked");
418 ///
419 /// // Drop the guard after the spawned task finishes.
420 /// drop(n);
421 /// }
422 /// ```
423 pub async fn read(&self) -> RwLockReadGuard<'_, T> {
424 let acquire_fut = async {
425 self.s.acquire(1).await.unwrap_or_else(|_| {
426 // The semaphore was closed. but, we never explicitly close it, and we have a
427 // handle to it through the Arc, which means that this can never happen.
428 unreachable!()
429 });
430
431 RwLockReadGuard {
432 s: &self.s,
433 data: self.c.get(),
434 marker: PhantomData,
435 #[cfg(all(tokio_unstable, feature = "tracing"))]
436 resource_span: self.resource_span.clone(),
437 }
438 };
439
440 #[cfg(all(tokio_unstable, feature = "tracing"))]
441 let acquire_fut = trace::async_op(
442 move || acquire_fut,
443 self.resource_span.clone(),
444 "RwLock::read",
445 "poll",
446 false,
447 );
448
449 #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing
450 let guard = acquire_fut.await;
451
452 #[cfg(all(tokio_unstable, feature = "tracing"))]
453 self.resource_span.in_scope(|| {
454 tracing::trace!(
455 target: "runtime::resource::state_update",
456 current_readers = 1,
457 current_readers.op = "add",
458 )
459 });
460
461 guard
462 }
463
464 /// Blockingly locks this `RwLock` with shared read access.
465 ///
466 /// This method is intended for use cases where you
467 /// need to use this rwlock in asynchronous code as well as in synchronous code.
468 ///
469 /// Returns an RAII guard which will drop the read access of this `RwLock` when dropped.
470 ///
471 /// # Panics
472 ///
473 /// This function panics if called within an asynchronous execution context.
474 ///
475 /// - If you find yourself in an asynchronous execution context and needing
476 /// to call some (synchronous) function which performs one of these
477 /// `blocking_` operations, then consider wrapping that call inside
478 /// [`spawn_blocking()`][crate::runtime::Handle::spawn_blocking]
479 /// (or [`block_in_place()`][crate::task::block_in_place]).
480 ///
481 /// # Examples
482 ///
483 /// ```
484 /// use std::sync::Arc;
485 /// use tokio::sync::RwLock;
486 ///
487 /// #[tokio::main]
488 /// async fn main() {
489 /// let rwlock = Arc::new(RwLock::new(1));
490 /// let mut write_lock = rwlock.write().await;
491 ///
492 /// let blocking_task = tokio::task::spawn_blocking({
493 /// let rwlock = Arc::clone(&rwlock);
494 /// move || {
495 /// // This shall block until the `write_lock` is released.
496 /// let read_lock = rwlock.blocking_read();
497 /// assert_eq!(*read_lock, 0);
498 /// }
499 /// });
500 ///
501 /// *write_lock -= 1;
502 /// drop(write_lock); // release the lock.
503 ///
504 /// // Await the completion of the blocking task.
505 /// blocking_task.await.unwrap();
506 ///
507 /// // Assert uncontended.
508 /// assert!(rwlock.try_write().is_ok());
509 /// }
510 /// ```
511 #[track_caller]
512 #[cfg(feature = "sync")]
513 pub fn blocking_read(&self) -> RwLockReadGuard<'_, T> {
514 crate::future::block_on(self.read())
515 }
516
517 /// Locks this `RwLock` with shared read access, causing the current task
518 /// to yield until the lock has been acquired.
519 ///
520 /// The calling task will yield until there are no writers which hold the
521 /// lock. There may be other readers inside the lock when the task resumes.
522 ///
523 /// This method is identical to [`RwLock::read`], except that the returned
524 /// guard references the `RwLock` with an [`Arc`] rather than by borrowing
525 /// it. Therefore, the `RwLock` must be wrapped in an `Arc` to call this
526 /// method, and the guard will live for the `'static` lifetime, as it keeps
527 /// the `RwLock` alive by holding an `Arc`.
528 ///
529 /// Note that under the priority policy of [`RwLock`], read locks are not
530 /// granted until prior write locks, to prevent starvation. Therefore
531 /// deadlock may occur if a read lock is held by the current task, a write
532 /// lock attempt is made, and then a subsequent read lock attempt is made
533 /// by the current task.
534 ///
535 /// Returns an RAII guard which will drop this read access of the `RwLock`
536 /// when dropped.
537 ///
538 /// # Cancel safety
539 ///
540 /// This method uses a queue to fairly distribute locks in the order they
541 /// were requested. Cancelling a call to `read_owned` makes you lose your
542 /// place in the queue.
543 ///
544 /// # Examples
545 ///
546 /// ```
547 /// use std::sync::Arc;
548 /// use tokio::sync::RwLock;
549 ///
550 /// #[tokio::main]
551 /// async fn main() {
552 /// let lock = Arc::new(RwLock::new(1));
553 /// let c_lock = lock.clone();
554 ///
555 /// let n = lock.read_owned().await;
556 /// assert_eq!(*n, 1);
557 ///
558 /// tokio::spawn(async move {
559 /// // While main has an active read lock, we acquire one too.
560 /// let r = c_lock.read_owned().await;
561 /// assert_eq!(*r, 1);
562 /// }).await.expect("The spawned task has panicked");
563 ///
564 /// // Drop the guard after the spawned task finishes.
565 /// drop(n);
566 ///}
567 /// ```
568 pub async fn read_owned(self: Arc<Self>) -> OwnedRwLockReadGuard<T> {
569 #[cfg(all(tokio_unstable, feature = "tracing"))]
570 let resource_span = self.resource_span.clone();
571
572 let acquire_fut = async {
573 self.s.acquire(1).await.unwrap_or_else(|_| {
574 // The semaphore was closed. but, we never explicitly close it, and we have a
575 // handle to it through the Arc, which means that this can never happen.
576 unreachable!()
577 });
578
579 OwnedRwLockReadGuard {
580 #[cfg(all(tokio_unstable, feature = "tracing"))]
581 resource_span: self.resource_span.clone(),
582 data: self.c.get(),
583 lock: self,
584 _p: PhantomData,
585 }
586 };
587
588 #[cfg(all(tokio_unstable, feature = "tracing"))]
589 let acquire_fut = trace::async_op(
590 move || acquire_fut,
591 resource_span,
592 "RwLock::read_owned",
593 "poll",
594 false,
595 );
596
597 #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing
598 let guard = acquire_fut.await;
599
600 #[cfg(all(tokio_unstable, feature = "tracing"))]
601 guard.resource_span.in_scope(|| {
602 tracing::trace!(
603 target: "runtime::resource::state_update",
604 current_readers = 1,
605 current_readers.op = "add",
606 )
607 });
608
609 guard
610 }
611
612 /// Attempts to acquire this `RwLock` with shared read access.
613 ///
614 /// If the access couldn't be acquired immediately, returns [`TryLockError`].
615 /// Otherwise, an RAII guard is returned which will release read access
616 /// when dropped.
617 ///
618 /// [`TryLockError`]: TryLockError
619 ///
620 /// # Examples
621 ///
622 /// ```
623 /// use std::sync::Arc;
624 /// use tokio::sync::RwLock;
625 ///
626 /// #[tokio::main]
627 /// async fn main() {
628 /// let lock = Arc::new(RwLock::new(1));
629 /// let c_lock = lock.clone();
630 ///
631 /// let v = lock.try_read().unwrap();
632 /// assert_eq!(*v, 1);
633 ///
634 /// tokio::spawn(async move {
635 /// // While main has an active read lock, we acquire one too.
636 /// let n = c_lock.read().await;
637 /// assert_eq!(*n, 1);
638 /// }).await.expect("The spawned task has panicked");
639 ///
640 /// // Drop the guard when spawned task finishes.
641 /// drop(v);
642 /// }
643 /// ```
644 pub fn try_read(&self) -> Result<RwLockReadGuard<'_, T>, TryLockError> {
645 match self.s.try_acquire(1) {
646 Ok(permit) => permit,
647 Err(TryAcquireError::NoPermits) => return Err(TryLockError(())),
648 Err(TryAcquireError::Closed) => unreachable!(),
649 }
650
651 let guard = RwLockReadGuard {
652 s: &self.s,
653 data: self.c.get(),
654 marker: marker::PhantomData,
655 #[cfg(all(tokio_unstable, feature = "tracing"))]
656 resource_span: self.resource_span.clone(),
657 };
658
659 #[cfg(all(tokio_unstable, feature = "tracing"))]
660 self.resource_span.in_scope(|| {
661 tracing::trace!(
662 target: "runtime::resource::state_update",
663 current_readers = 1,
664 current_readers.op = "add",
665 )
666 });
667
668 Ok(guard)
669 }
670
671 /// Attempts to acquire this `RwLock` with shared read access.
672 ///
673 /// If the access couldn't be acquired immediately, returns [`TryLockError`].
674 /// Otherwise, an RAII guard is returned which will release read access
675 /// when dropped.
676 ///
677 /// This method is identical to [`RwLock::try_read`], except that the
678 /// returned guard references the `RwLock` with an [`Arc`] rather than by
679 /// borrowing it. Therefore, the `RwLock` must be wrapped in an `Arc` to
680 /// call this method, and the guard will live for the `'static` lifetime,
681 /// as it keeps the `RwLock` alive by holding an `Arc`.
682 ///
683 /// [`TryLockError`]: TryLockError
684 ///
685 /// # Examples
686 ///
687 /// ```
688 /// use std::sync::Arc;
689 /// use tokio::sync::RwLock;
690 ///
691 /// #[tokio::main]
692 /// async fn main() {
693 /// let lock = Arc::new(RwLock::new(1));
694 /// let c_lock = lock.clone();
695 ///
696 /// let v = lock.try_read_owned().unwrap();
697 /// assert_eq!(*v, 1);
698 ///
699 /// tokio::spawn(async move {
700 /// // While main has an active read lock, we acquire one too.
701 /// let n = c_lock.read_owned().await;
702 /// assert_eq!(*n, 1);
703 /// }).await.expect("The spawned task has panicked");
704 ///
705 /// // Drop the guard when spawned task finishes.
706 /// drop(v);
707 /// }
708 /// ```
709 pub fn try_read_owned(self: Arc<Self>) -> Result<OwnedRwLockReadGuard<T>, TryLockError> {
710 match self.s.try_acquire(1) {
711 Ok(permit) => permit,
712 Err(TryAcquireError::NoPermits) => return Err(TryLockError(())),
713 Err(TryAcquireError::Closed) => unreachable!(),
714 }
715
716 let guard = OwnedRwLockReadGuard {
717 #[cfg(all(tokio_unstable, feature = "tracing"))]
718 resource_span: self.resource_span.clone(),
719 data: self.c.get(),
720 lock: self,
721 _p: PhantomData,
722 };
723
724 #[cfg(all(tokio_unstable, feature = "tracing"))]
725 guard.resource_span.in_scope(|| {
726 tracing::trace!(
727 target: "runtime::resource::state_update",
728 current_readers = 1,
729 current_readers.op = "add",
730 )
731 });
732
733 Ok(guard)
734 }
735
736 /// Locks this `RwLock` with exclusive write access, causing the current
737 /// task to yield until the lock has been acquired.
738 ///
739 /// The calling task will yield while other writers or readers currently
740 /// have access to the lock.
741 ///
742 /// Returns an RAII guard which will drop the write access of this `RwLock`
743 /// when dropped.
744 ///
745 /// # Cancel safety
746 ///
747 /// This method uses a queue to fairly distribute locks in the order they
748 /// were requested. Cancelling a call to `write` makes you lose your place
749 /// in the queue.
750 ///
751 /// # Examples
752 ///
753 /// ```
754 /// use tokio::sync::RwLock;
755 ///
756 /// #[tokio::main]
757 /// async fn main() {
758 /// let lock = RwLock::new(1);
759 ///
760 /// let mut n = lock.write().await;
761 /// *n = 2;
762 ///}
763 /// ```
764 pub async fn write(&self) -> RwLockWriteGuard<'_, T> {
765 let acquire_fut = async {
766 self.s.acquire(self.mr).await.unwrap_or_else(|_| {
767 // The semaphore was closed. but, we never explicitly close it, and we have a
768 // handle to it through the Arc, which means that this can never happen.
769 unreachable!()
770 });
771
772 RwLockWriteGuard {
773 permits_acquired: self.mr,
774 s: &self.s,
775 data: self.c.get(),
776 marker: marker::PhantomData,
777 #[cfg(all(tokio_unstable, feature = "tracing"))]
778 resource_span: self.resource_span.clone(),
779 }
780 };
781
782 #[cfg(all(tokio_unstable, feature = "tracing"))]
783 let acquire_fut = trace::async_op(
784 move || acquire_fut,
785 self.resource_span.clone(),
786 "RwLock::write",
787 "poll",
788 false,
789 );
790
791 #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing
792 let guard = acquire_fut.await;
793
794 #[cfg(all(tokio_unstable, feature = "tracing"))]
795 self.resource_span.in_scope(|| {
796 tracing::trace!(
797 target: "runtime::resource::state_update",
798 write_locked = true,
799 write_locked.op = "override",
800 )
801 });
802
803 guard
804 }
805
806 /// Blockingly locks this `RwLock` with exclusive write access.
807 ///
808 /// This method is intended for use cases where you
809 /// need to use this rwlock in asynchronous code as well as in synchronous code.
810 ///
811 /// Returns an RAII guard which will drop the write access of this `RwLock` when dropped.
812 ///
813 /// # Panics
814 ///
815 /// This function panics if called within an asynchronous execution context.
816 ///
817 /// - If you find yourself in an asynchronous execution context and needing
818 /// to call some (synchronous) function which performs one of these
819 /// `blocking_` operations, then consider wrapping that call inside
820 /// [`spawn_blocking()`][crate::runtime::Handle::spawn_blocking]
821 /// (or [`block_in_place()`][crate::task::block_in_place]).
822 ///
823 /// # Examples
824 ///
825 /// ```
826 /// use std::sync::Arc;
827 /// use tokio::{sync::RwLock};
828 ///
829 /// #[tokio::main]
830 /// async fn main() {
831 /// let rwlock = Arc::new(RwLock::new(1));
832 /// let read_lock = rwlock.read().await;
833 ///
834 /// let blocking_task = tokio::task::spawn_blocking({
835 /// let rwlock = Arc::clone(&rwlock);
836 /// move || {
837 /// // This shall block until the `read_lock` is released.
838 /// let mut write_lock = rwlock.blocking_write();
839 /// *write_lock = 2;
840 /// }
841 /// });
842 ///
843 /// assert_eq!(*read_lock, 1);
844 /// // Release the last outstanding read lock.
845 /// drop(read_lock);
846 ///
847 /// // Await the completion of the blocking task.
848 /// blocking_task.await.unwrap();
849 ///
850 /// // Assert uncontended.
851 /// let read_lock = rwlock.try_read().unwrap();
852 /// assert_eq!(*read_lock, 2);
853 /// }
854 /// ```
855 #[track_caller]
856 #[cfg(feature = "sync")]
857 pub fn blocking_write(&self) -> RwLockWriteGuard<'_, T> {
858 crate::future::block_on(self.write())
859 }
860
861 /// Locks this `RwLock` with exclusive write access, causing the current
862 /// task to yield until the lock has been acquired.
863 ///
864 /// The calling task will yield while other writers or readers currently
865 /// have access to the lock.
866 ///
867 /// This method is identical to [`RwLock::write`], except that the returned
868 /// guard references the `RwLock` with an [`Arc`] rather than by borrowing
869 /// it. Therefore, the `RwLock` must be wrapped in an `Arc` to call this
870 /// method, and the guard will live for the `'static` lifetime, as it keeps
871 /// the `RwLock` alive by holding an `Arc`.
872 ///
873 /// Returns an RAII guard which will drop the write access of this `RwLock`
874 /// when dropped.
875 ///
876 /// # Cancel safety
877 ///
878 /// This method uses a queue to fairly distribute locks in the order they
879 /// were requested. Cancelling a call to `write_owned` makes you lose your
880 /// place in the queue.
881 ///
882 /// # Examples
883 ///
884 /// ```
885 /// use std::sync::Arc;
886 /// use tokio::sync::RwLock;
887 ///
888 /// #[tokio::main]
889 /// async fn main() {
890 /// let lock = Arc::new(RwLock::new(1));
891 ///
892 /// let mut n = lock.write_owned().await;
893 /// *n = 2;
894 ///}
895 /// ```
896 pub async fn write_owned(self: Arc<Self>) -> OwnedRwLockWriteGuard<T> {
897 #[cfg(all(tokio_unstable, feature = "tracing"))]
898 let resource_span = self.resource_span.clone();
899
900 let acquire_fut = async {
901 self.s.acquire(self.mr).await.unwrap_or_else(|_| {
902 // The semaphore was closed. but, we never explicitly close it, and we have a
903 // handle to it through the Arc, which means that this can never happen.
904 unreachable!()
905 });
906
907 OwnedRwLockWriteGuard {
908 #[cfg(all(tokio_unstable, feature = "tracing"))]
909 resource_span: self.resource_span.clone(),
910 permits_acquired: self.mr,
911 data: self.c.get(),
912 lock: self,
913 _p: PhantomData,
914 }
915 };
916
917 #[cfg(all(tokio_unstable, feature = "tracing"))]
918 let acquire_fut = trace::async_op(
919 move || acquire_fut,
920 resource_span,
921 "RwLock::write_owned",
922 "poll",
923 false,
924 );
925
926 #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing
927 let guard = acquire_fut.await;
928
929 #[cfg(all(tokio_unstable, feature = "tracing"))]
930 guard.resource_span.in_scope(|| {
931 tracing::trace!(
932 target: "runtime::resource::state_update",
933 write_locked = true,
934 write_locked.op = "override",
935 )
936 });
937
938 guard
939 }
940
941 /// Attempts to acquire this `RwLock` with exclusive write access.
942 ///
943 /// If the access couldn't be acquired immediately, returns [`TryLockError`].
944 /// Otherwise, an RAII guard is returned which will release write access
945 /// when dropped.
946 ///
947 /// [`TryLockError`]: TryLockError
948 ///
949 /// # Examples
950 ///
951 /// ```
952 /// use tokio::sync::RwLock;
953 ///
954 /// #[tokio::main]
955 /// async fn main() {
956 /// let rw = RwLock::new(1);
957 ///
958 /// let v = rw.read().await;
959 /// assert_eq!(*v, 1);
960 ///
961 /// assert!(rw.try_write().is_err());
962 /// }
963 /// ```
964 pub fn try_write(&self) -> Result<RwLockWriteGuard<'_, T>, TryLockError> {
965 match self.s.try_acquire(self.mr) {
966 Ok(permit) => permit,
967 Err(TryAcquireError::NoPermits) => return Err(TryLockError(())),
968 Err(TryAcquireError::Closed) => unreachable!(),
969 }
970
971 let guard = RwLockWriteGuard {
972 permits_acquired: self.mr,
973 s: &self.s,
974 data: self.c.get(),
975 marker: marker::PhantomData,
976 #[cfg(all(tokio_unstable, feature = "tracing"))]
977 resource_span: self.resource_span.clone(),
978 };
979
980 #[cfg(all(tokio_unstable, feature = "tracing"))]
981 self.resource_span.in_scope(|| {
982 tracing::trace!(
983 target: "runtime::resource::state_update",
984 write_locked = true,
985 write_locked.op = "override",
986 )
987 });
988
989 Ok(guard)
990 }
991
992 /// Attempts to acquire this `RwLock` with exclusive write access.
993 ///
994 /// If the access couldn't be acquired immediately, returns [`TryLockError`].
995 /// Otherwise, an RAII guard is returned which will release write access
996 /// when dropped.
997 ///
998 /// This method is identical to [`RwLock::try_write`], except that the
999 /// returned guard references the `RwLock` with an [`Arc`] rather than by
1000 /// borrowing it. Therefore, the `RwLock` must be wrapped in an `Arc` to
1001 /// call this method, and the guard will live for the `'static` lifetime,
1002 /// as it keeps the `RwLock` alive by holding an `Arc`.
1003 ///
1004 /// [`TryLockError`]: TryLockError
1005 ///
1006 /// # Examples
1007 ///
1008 /// ```
1009 /// use std::sync::Arc;
1010 /// use tokio::sync::RwLock;
1011 ///
1012 /// #[tokio::main]
1013 /// async fn main() {
1014 /// let rw = Arc::new(RwLock::new(1));
1015 ///
1016 /// let v = Arc::clone(&rw).read_owned().await;
1017 /// assert_eq!(*v, 1);
1018 ///
1019 /// assert!(rw.try_write_owned().is_err());
1020 /// }
1021 /// ```
1022 pub fn try_write_owned(self: Arc<Self>) -> Result<OwnedRwLockWriteGuard<T>, TryLockError> {
1023 match self.s.try_acquire(self.mr) {
1024 Ok(permit) => permit,
1025 Err(TryAcquireError::NoPermits) => return Err(TryLockError(())),
1026 Err(TryAcquireError::Closed) => unreachable!(),
1027 }
1028
1029 let guard = OwnedRwLockWriteGuard {
1030 #[cfg(all(tokio_unstable, feature = "tracing"))]
1031 resource_span: self.resource_span.clone(),
1032 permits_acquired: self.mr,
1033 data: self.c.get(),
1034 lock: self,
1035 _p: PhantomData,
1036 };
1037
1038 #[cfg(all(tokio_unstable, feature = "tracing"))]
1039 guard.resource_span.in_scope(|| {
1040 tracing::trace!(
1041 target: "runtime::resource::state_update",
1042 write_locked = true,
1043 write_locked.op = "override",
1044 )
1045 });
1046
1047 Ok(guard)
1048 }
1049
1050 /// Returns a mutable reference to the underlying data.
1051 ///
1052 /// Since this call borrows the `RwLock` mutably, no actual locking needs to
1053 /// take place -- the mutable borrow statically guarantees no locks exist.
1054 ///
1055 /// # Examples
1056 ///
1057 /// ```
1058 /// use tokio::sync::RwLock;
1059 ///
1060 /// fn main() {
1061 /// let mut lock = RwLock::new(1);
1062 ///
1063 /// let n = lock.get_mut();
1064 /// *n = 2;
1065 /// }
1066 /// ```
1067 pub fn get_mut(&mut self) -> &mut T {
1068 unsafe {
1069 // Safety: This is https://github.com/rust-lang/rust/pull/76936
1070 &mut *self.c.get()
1071 }
1072 }
1073
1074 /// Consumes the lock, returning the underlying data.
1075 pub fn into_inner(self) -> T
1076 where
1077 T: Sized,
1078 {
1079 self.c.into_inner()
1080 }
1081}
1082
1083impl<T> From<T> for RwLock<T> {
1084 fn from(s: T) -> Self {
1085 Self::new(s)
1086 }
1087}
1088
1089impl<T: ?Sized> Default for RwLock<T>
1090where
1091 T: Default,
1092{
1093 fn default() -> Self {
1094 Self::new(T::default())
1095 }
1096}
1097
1098impl<T: ?Sized> std::fmt::Debug for RwLock<T>
1099where
1100 T: std::fmt::Debug,
1101{
1102 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1103 let mut d: DebugStruct<'_, '_> = f.debug_struct(name:"RwLock");
1104 match self.try_read() {
1105 Ok(inner: RwLockReadGuard<'_, T>) => d.field(name:"data", &&*inner),
1106 Err(_) => d.field(name:"data", &format_args!("<locked>")),
1107 };
1108 d.finish()
1109 }
1110}
1111