1 | use crate::sync::batch_semaphore::{Semaphore, TryAcquireError}; |
2 | use crate::sync::mutex::TryLockError; |
3 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
4 | use crate::util::trace; |
5 | use std::cell::UnsafeCell; |
6 | use std::marker; |
7 | use std::marker::PhantomData; |
8 | use std::sync::Arc; |
9 | |
10 | pub(crate) mod owned_read_guard; |
11 | pub(crate) mod owned_write_guard; |
12 | pub(crate) mod owned_write_guard_mapped; |
13 | pub(crate) mod read_guard; |
14 | pub(crate) mod write_guard; |
15 | pub(crate) mod write_guard_mapped; |
16 | pub(crate) use owned_read_guard::OwnedRwLockReadGuard; |
17 | pub(crate) use owned_write_guard::OwnedRwLockWriteGuard; |
18 | pub(crate) use owned_write_guard_mapped::OwnedRwLockMappedWriteGuard; |
19 | pub(crate) use read_guard::RwLockReadGuard; |
20 | pub(crate) use write_guard::RwLockWriteGuard; |
21 | pub(crate) use write_guard_mapped::RwLockMappedWriteGuard; |
22 | |
23 | #[cfg (not(loom))] |
24 | const MAX_READS: u32 = std::u32::MAX >> 3; |
25 | |
26 | #[cfg (loom)] |
27 | const MAX_READS: u32 = 10; |
28 | |
29 | /// An asynchronous reader-writer lock. |
30 | /// |
31 | /// This type of lock allows a number of readers or at most one writer at any |
32 | /// point in time. The write portion of this lock typically allows modification |
33 | /// of the underlying data (exclusive access) and the read portion of this lock |
34 | /// typically allows for read-only access (shared access). |
35 | /// |
36 | /// In comparison, a [`Mutex`] does not distinguish between readers or writers |
37 | /// that acquire the lock, therefore causing any tasks waiting for the lock to |
38 | /// become available to yield. An `RwLock` will allow any number of readers to |
39 | /// acquire the lock as long as a writer is not holding the lock. |
40 | /// |
41 | /// The priority policy of Tokio's read-write lock is _fair_ (or |
42 | /// [_write-preferring_]), in order to ensure that readers cannot starve |
43 | /// writers. Fairness is ensured using a first-in, first-out queue for the tasks |
44 | /// awaiting the lock; if a task that wishes to acquire the write lock is at the |
45 | /// head of the queue, read locks will not be given out until the write lock has |
46 | /// been released. This is in contrast to the Rust standard library's |
47 | /// `std::sync::RwLock`, where the priority policy is dependent on the |
48 | /// operating system's implementation. |
49 | /// |
50 | /// The type parameter `T` represents the data that this lock protects. It is |
51 | /// required that `T` satisfies [`Send`] to be shared across threads. The RAII guards |
52 | /// returned from the locking methods implement [`Deref`](trait@std::ops::Deref) |
53 | /// (and [`DerefMut`](trait@std::ops::DerefMut) |
54 | /// for the `write` methods) to allow access to the content of the lock. |
55 | /// |
56 | /// # Examples |
57 | /// |
58 | /// ``` |
59 | /// use tokio::sync::RwLock; |
60 | /// |
61 | /// #[tokio::main] |
62 | /// async fn main() { |
63 | /// let lock = RwLock::new(5); |
64 | /// |
65 | /// // many reader locks can be held at once |
66 | /// { |
67 | /// let r1 = lock.read().await; |
68 | /// let r2 = lock.read().await; |
69 | /// assert_eq!(*r1, 5); |
70 | /// assert_eq!(*r2, 5); |
71 | /// } // read locks are dropped at this point |
72 | /// |
73 | /// // only one write lock may be held, however |
74 | /// { |
75 | /// let mut w = lock.write().await; |
76 | /// *w += 1; |
77 | /// assert_eq!(*w, 6); |
78 | /// } // write lock is dropped here |
79 | /// } |
80 | /// ``` |
81 | /// |
82 | /// [`Mutex`]: struct@super::Mutex |
83 | /// [`RwLock`]: struct@RwLock |
84 | /// [`RwLockReadGuard`]: struct@RwLockReadGuard |
85 | /// [`RwLockWriteGuard`]: struct@RwLockWriteGuard |
86 | /// [`Send`]: trait@std::marker::Send |
87 | /// [_write-preferring_]: https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock#Priority_policies |
88 | pub struct RwLock<T: ?Sized> { |
89 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
90 | resource_span: tracing::Span, |
91 | |
92 | // maximum number of concurrent readers |
93 | mr: u32, |
94 | |
95 | //semaphore to coordinate read and write access to T |
96 | s: Semaphore, |
97 | |
98 | //inner data T |
99 | c: UnsafeCell<T>, |
100 | } |
101 | |
102 | #[test ] |
103 | #[cfg (not(loom))] |
104 | fn bounds() { |
105 | fn check_send<T: Send>() {} |
106 | fn check_sync<T: Sync>() {} |
107 | fn check_unpin<T: Unpin>() {} |
108 | // This has to take a value, since the async fn's return type is unnameable. |
109 | fn check_send_sync_val<T: Send + Sync>(_t: T) {} |
110 | |
111 | check_send::<RwLock<u32>>(); |
112 | check_sync::<RwLock<u32>>(); |
113 | check_unpin::<RwLock<u32>>(); |
114 | |
115 | check_send::<RwLockReadGuard<'_, u32>>(); |
116 | check_sync::<RwLockReadGuard<'_, u32>>(); |
117 | check_unpin::<RwLockReadGuard<'_, u32>>(); |
118 | |
119 | check_send::<OwnedRwLockReadGuard<u32, i32>>(); |
120 | check_sync::<OwnedRwLockReadGuard<u32, i32>>(); |
121 | check_unpin::<OwnedRwLockReadGuard<u32, i32>>(); |
122 | |
123 | check_send::<RwLockWriteGuard<'_, u32>>(); |
124 | check_sync::<RwLockWriteGuard<'_, u32>>(); |
125 | check_unpin::<RwLockWriteGuard<'_, u32>>(); |
126 | |
127 | check_send::<RwLockMappedWriteGuard<'_, u32>>(); |
128 | check_sync::<RwLockMappedWriteGuard<'_, u32>>(); |
129 | check_unpin::<RwLockMappedWriteGuard<'_, u32>>(); |
130 | |
131 | check_send::<OwnedRwLockWriteGuard<u32>>(); |
132 | check_sync::<OwnedRwLockWriteGuard<u32>>(); |
133 | check_unpin::<OwnedRwLockWriteGuard<u32>>(); |
134 | |
135 | check_send::<OwnedRwLockMappedWriteGuard<u32, i32>>(); |
136 | check_sync::<OwnedRwLockMappedWriteGuard<u32, i32>>(); |
137 | check_unpin::<OwnedRwLockMappedWriteGuard<u32, i32>>(); |
138 | |
139 | let rwlock = Arc::new(RwLock::new(0)); |
140 | check_send_sync_val(rwlock.read()); |
141 | check_send_sync_val(Arc::clone(&rwlock).read_owned()); |
142 | check_send_sync_val(rwlock.write()); |
143 | check_send_sync_val(Arc::clone(&rwlock).write_owned()); |
144 | } |
145 | |
146 | // As long as T: Send + Sync, it's fine to send and share RwLock<T> between threads. |
147 | // If T were not Send, sending and sharing a RwLock<T> would be bad, since you can access T through |
148 | // RwLock<T>. |
149 | unsafe impl<T> Send for RwLock<T> where T: ?Sized + Send {} |
150 | unsafe impl<T> Sync for RwLock<T> where T: ?Sized + Send + Sync {} |
151 | // NB: These impls need to be explicit since we're storing a raw pointer. |
152 | // Safety: Stores a raw pointer to `T`, so if `T` is `Sync`, the lock guard over |
153 | // `T` is `Send`. |
154 | unsafe impl<T> Send for RwLockReadGuard<'_, T> where T: ?Sized + Sync {} |
155 | unsafe impl<T> Sync for RwLockReadGuard<'_, T> where T: ?Sized + Send + Sync {} |
156 | // T is required to be `Send` because an OwnedRwLockReadGuard can be used to drop the value held in |
157 | // the RwLock, unlike RwLockReadGuard. |
158 | unsafe impl<T, U> Send for OwnedRwLockReadGuard<T, U> |
159 | where |
160 | T: ?Sized + Send + Sync, |
161 | U: ?Sized + Sync, |
162 | { |
163 | } |
164 | unsafe impl<T, U> Sync for OwnedRwLockReadGuard<T, U> |
165 | where |
166 | T: ?Sized + Send + Sync, |
167 | U: ?Sized + Send + Sync, |
168 | { |
169 | } |
170 | unsafe impl<T> Sync for RwLockWriteGuard<'_, T> where T: ?Sized + Send + Sync {} |
171 | unsafe impl<T> Sync for OwnedRwLockWriteGuard<T> where T: ?Sized + Send + Sync {} |
172 | unsafe impl<T> Sync for RwLockMappedWriteGuard<'_, T> where T: ?Sized + Send + Sync {} |
173 | unsafe impl<T, U> Sync for OwnedRwLockMappedWriteGuard<T, U> |
174 | where |
175 | T: ?Sized + Send + Sync, |
176 | U: ?Sized + Send + Sync, |
177 | { |
178 | } |
179 | // Safety: Stores a raw pointer to `T`, so if `T` is `Sync`, the lock guard over |
180 | // `T` is `Send` - but since this is also provides mutable access, we need to |
181 | // make sure that `T` is `Send` since its value can be sent across thread |
182 | // boundaries. |
183 | unsafe impl<T> Send for RwLockWriteGuard<'_, T> where T: ?Sized + Send + Sync {} |
184 | unsafe impl<T> Send for OwnedRwLockWriteGuard<T> where T: ?Sized + Send + Sync {} |
185 | unsafe impl<T> Send for RwLockMappedWriteGuard<'_, T> where T: ?Sized + Send + Sync {} |
186 | unsafe impl<T, U> Send for OwnedRwLockMappedWriteGuard<T, U> |
187 | where |
188 | T: ?Sized + Send + Sync, |
189 | U: ?Sized + Send + Sync, |
190 | { |
191 | } |
192 | |
193 | impl<T: ?Sized> RwLock<T> { |
194 | /// Creates a new instance of an `RwLock<T>` which is unlocked. |
195 | /// |
196 | /// # Examples |
197 | /// |
198 | /// ``` |
199 | /// use tokio::sync::RwLock; |
200 | /// |
201 | /// let lock = RwLock::new(5); |
202 | /// ``` |
203 | #[track_caller ] |
204 | pub fn new(value: T) -> RwLock<T> |
205 | where |
206 | T: Sized, |
207 | { |
208 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
209 | let resource_span = { |
210 | let location = std::panic::Location::caller(); |
211 | let resource_span = tracing::trace_span!( |
212 | parent: None, |
213 | "runtime.resource" , |
214 | concrete_type = "RwLock" , |
215 | kind = "Sync" , |
216 | loc.file = location.file(), |
217 | loc.line = location.line(), |
218 | loc.col = location.column(), |
219 | ); |
220 | |
221 | resource_span.in_scope(|| { |
222 | tracing::trace!( |
223 | target: "runtime::resource::state_update" , |
224 | max_readers = MAX_READS, |
225 | ); |
226 | |
227 | tracing::trace!( |
228 | target: "runtime::resource::state_update" , |
229 | write_locked = false, |
230 | ); |
231 | |
232 | tracing::trace!( |
233 | target: "runtime::resource::state_update" , |
234 | current_readers = 0, |
235 | ); |
236 | }); |
237 | |
238 | resource_span |
239 | }; |
240 | |
241 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
242 | let s = resource_span.in_scope(|| Semaphore::new(MAX_READS as usize)); |
243 | |
244 | #[cfg (any(not(tokio_unstable), not(feature = "tracing" )))] |
245 | let s = Semaphore::new(MAX_READS as usize); |
246 | |
247 | RwLock { |
248 | mr: MAX_READS, |
249 | c: UnsafeCell::new(value), |
250 | s, |
251 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
252 | resource_span, |
253 | } |
254 | } |
255 | |
256 | /// Creates a new instance of an `RwLock<T>` which is unlocked |
257 | /// and allows a maximum of `max_reads` concurrent readers. |
258 | /// |
259 | /// # Examples |
260 | /// |
261 | /// ``` |
262 | /// use tokio::sync::RwLock; |
263 | /// |
264 | /// let lock = RwLock::with_max_readers(5, 1024); |
265 | /// ``` |
266 | /// |
267 | /// # Panics |
268 | /// |
269 | /// Panics if `max_reads` is more than `u32::MAX >> 3`. |
270 | #[track_caller ] |
271 | pub fn with_max_readers(value: T, max_reads: u32) -> RwLock<T> |
272 | where |
273 | T: Sized, |
274 | { |
275 | assert!( |
276 | max_reads <= MAX_READS, |
277 | "a RwLock may not be created with more than {} readers" , |
278 | MAX_READS |
279 | ); |
280 | |
281 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
282 | let resource_span = { |
283 | let location = std::panic::Location::caller(); |
284 | |
285 | let resource_span = tracing::trace_span!( |
286 | parent: None, |
287 | "runtime.resource" , |
288 | concrete_type = "RwLock" , |
289 | kind = "Sync" , |
290 | loc.file = location.file(), |
291 | loc.line = location.line(), |
292 | loc.col = location.column(), |
293 | ); |
294 | |
295 | resource_span.in_scope(|| { |
296 | tracing::trace!( |
297 | target: "runtime::resource::state_update" , |
298 | max_readers = max_reads, |
299 | ); |
300 | |
301 | tracing::trace!( |
302 | target: "runtime::resource::state_update" , |
303 | write_locked = false, |
304 | ); |
305 | |
306 | tracing::trace!( |
307 | target: "runtime::resource::state_update" , |
308 | current_readers = 0, |
309 | ); |
310 | }); |
311 | |
312 | resource_span |
313 | }; |
314 | |
315 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
316 | let s = resource_span.in_scope(|| Semaphore::new(max_reads as usize)); |
317 | |
318 | #[cfg (any(not(tokio_unstable), not(feature = "tracing" )))] |
319 | let s = Semaphore::new(max_reads as usize); |
320 | |
321 | RwLock { |
322 | mr: max_reads, |
323 | c: UnsafeCell::new(value), |
324 | s, |
325 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
326 | resource_span, |
327 | } |
328 | } |
329 | |
330 | /// Creates a new instance of an `RwLock<T>` which is unlocked. |
331 | /// |
332 | /// When using the `tracing` [unstable feature], a `RwLock` created with |
333 | /// `const_new` will not be instrumented. As such, it will not be visible |
334 | /// in [`tokio-console`]. Instead, [`RwLock::new`] should be used to create |
335 | /// an instrumented object if that is needed. |
336 | /// |
337 | /// # Examples |
338 | /// |
339 | /// ``` |
340 | /// use tokio::sync::RwLock; |
341 | /// |
342 | /// static LOCK: RwLock<i32> = RwLock::const_new(5); |
343 | /// ``` |
344 | /// |
345 | /// [`tokio-console`]: https://github.com/tokio-rs/console |
346 | /// [unstable feature]: crate#unstable-features |
347 | #[cfg (not(all(loom, test)))] |
348 | pub const fn const_new(value: T) -> RwLock<T> |
349 | where |
350 | T: Sized, |
351 | { |
352 | RwLock { |
353 | mr: MAX_READS, |
354 | c: UnsafeCell::new(value), |
355 | s: Semaphore::const_new(MAX_READS as usize), |
356 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
357 | resource_span: tracing::Span::none(), |
358 | } |
359 | } |
360 | |
361 | /// Creates a new instance of an `RwLock<T>` which is unlocked |
362 | /// and allows a maximum of `max_reads` concurrent readers. |
363 | /// |
364 | /// # Examples |
365 | /// |
366 | /// ``` |
367 | /// use tokio::sync::RwLock; |
368 | /// |
369 | /// static LOCK: RwLock<i32> = RwLock::const_with_max_readers(5, 1024); |
370 | /// ``` |
371 | #[cfg (not(all(loom, test)))] |
372 | pub const fn const_with_max_readers(value: T, max_reads: u32) -> RwLock<T> |
373 | where |
374 | T: Sized, |
375 | { |
376 | assert!(max_reads <= MAX_READS); |
377 | |
378 | RwLock { |
379 | mr: max_reads, |
380 | c: UnsafeCell::new(value), |
381 | s: Semaphore::const_new(max_reads as usize), |
382 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
383 | resource_span: tracing::Span::none(), |
384 | } |
385 | } |
386 | |
387 | /// Locks this `RwLock` with shared read access, causing the current task |
388 | /// to yield until the lock has been acquired. |
389 | /// |
390 | /// The calling task will yield until there are no writers which hold the |
391 | /// lock. There may be other readers inside the lock when the task resumes. |
392 | /// |
393 | /// Note that under the priority policy of [`RwLock`], read locks are not |
394 | /// granted until prior write locks, to prevent starvation. Therefore |
395 | /// deadlock may occur if a read lock is held by the current task, a write |
396 | /// lock attempt is made, and then a subsequent read lock attempt is made |
397 | /// by the current task. |
398 | /// |
399 | /// Returns an RAII guard which will drop this read access of the `RwLock` |
400 | /// when dropped. |
401 | /// |
402 | /// # Cancel safety |
403 | /// |
404 | /// This method uses a queue to fairly distribute locks in the order they |
405 | /// were requested. Cancelling a call to `read` makes you lose your place in |
406 | /// the queue. |
407 | /// |
408 | /// # Examples |
409 | /// |
410 | /// ``` |
411 | /// use std::sync::Arc; |
412 | /// use tokio::sync::RwLock; |
413 | /// |
414 | /// #[tokio::main] |
415 | /// async fn main() { |
416 | /// let lock = Arc::new(RwLock::new(1)); |
417 | /// let c_lock = lock.clone(); |
418 | /// |
419 | /// let n = lock.read().await; |
420 | /// assert_eq!(*n, 1); |
421 | /// |
422 | /// tokio::spawn(async move { |
423 | /// // While main has an active read lock, we acquire one too. |
424 | /// let r = c_lock.read().await; |
425 | /// assert_eq!(*r, 1); |
426 | /// }).await.expect("The spawned task has panicked" ); |
427 | /// |
428 | /// // Drop the guard after the spawned task finishes. |
429 | /// drop(n); |
430 | /// } |
431 | /// ``` |
432 | pub async fn read(&self) -> RwLockReadGuard<'_, T> { |
433 | let acquire_fut = async { |
434 | self.s.acquire(1).await.unwrap_or_else(|_| { |
435 | // The semaphore was closed. but, we never explicitly close it, and we have a |
436 | // handle to it through the Arc, which means that this can never happen. |
437 | unreachable!() |
438 | }); |
439 | |
440 | RwLockReadGuard { |
441 | s: &self.s, |
442 | data: self.c.get(), |
443 | marker: PhantomData, |
444 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
445 | resource_span: self.resource_span.clone(), |
446 | } |
447 | }; |
448 | |
449 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
450 | let acquire_fut = trace::async_op( |
451 | move || acquire_fut, |
452 | self.resource_span.clone(), |
453 | "RwLock::read" , |
454 | "poll" , |
455 | false, |
456 | ); |
457 | |
458 | #[allow (clippy::let_and_return)] // this lint triggers when disabling tracing |
459 | let guard = acquire_fut.await; |
460 | |
461 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
462 | self.resource_span.in_scope(|| { |
463 | tracing::trace!( |
464 | target: "runtime::resource::state_update" , |
465 | current_readers = 1, |
466 | current_readers.op = "add" , |
467 | ) |
468 | }); |
469 | |
470 | guard |
471 | } |
472 | |
473 | /// Blockingly locks this `RwLock` with shared read access. |
474 | /// |
475 | /// This method is intended for use cases where you |
476 | /// need to use this rwlock in asynchronous code as well as in synchronous code. |
477 | /// |
478 | /// Returns an RAII guard which will drop the read access of this `RwLock` when dropped. |
479 | /// |
480 | /// # Panics |
481 | /// |
482 | /// This function panics if called within an asynchronous execution context. |
483 | /// |
484 | /// - If you find yourself in an asynchronous execution context and needing |
485 | /// to call some (synchronous) function which performs one of these |
486 | /// `blocking_` operations, then consider wrapping that call inside |
487 | /// [`spawn_blocking()`][crate::runtime::Handle::spawn_blocking] |
488 | /// (or [`block_in_place()`][crate::task::block_in_place]). |
489 | /// |
490 | /// # Examples |
491 | /// |
492 | /// ``` |
493 | /// use std::sync::Arc; |
494 | /// use tokio::sync::RwLock; |
495 | /// |
496 | /// #[tokio::main] |
497 | /// async fn main() { |
498 | /// let rwlock = Arc::new(RwLock::new(1)); |
499 | /// let mut write_lock = rwlock.write().await; |
500 | /// |
501 | /// let blocking_task = tokio::task::spawn_blocking({ |
502 | /// let rwlock = Arc::clone(&rwlock); |
503 | /// move || { |
504 | /// // This shall block until the `write_lock` is released. |
505 | /// let read_lock = rwlock.blocking_read(); |
506 | /// assert_eq!(*read_lock, 0); |
507 | /// } |
508 | /// }); |
509 | /// |
510 | /// *write_lock -= 1; |
511 | /// drop(write_lock); // release the lock. |
512 | /// |
513 | /// // Await the completion of the blocking task. |
514 | /// blocking_task.await.unwrap(); |
515 | /// |
516 | /// // Assert uncontended. |
517 | /// assert!(rwlock.try_write().is_ok()); |
518 | /// } |
519 | /// ``` |
520 | #[track_caller ] |
521 | #[cfg (feature = "sync" )] |
522 | pub fn blocking_read(&self) -> RwLockReadGuard<'_, T> { |
523 | crate::future::block_on(self.read()) |
524 | } |
525 | |
526 | /// Locks this `RwLock` with shared read access, causing the current task |
527 | /// to yield until the lock has been acquired. |
528 | /// |
529 | /// The calling task will yield until there are no writers which hold the |
530 | /// lock. There may be other readers inside the lock when the task resumes. |
531 | /// |
532 | /// This method is identical to [`RwLock::read`], except that the returned |
533 | /// guard references the `RwLock` with an [`Arc`] rather than by borrowing |
534 | /// it. Therefore, the `RwLock` must be wrapped in an `Arc` to call this |
535 | /// method, and the guard will live for the `'static` lifetime, as it keeps |
536 | /// the `RwLock` alive by holding an `Arc`. |
537 | /// |
538 | /// Note that under the priority policy of [`RwLock`], read locks are not |
539 | /// granted until prior write locks, to prevent starvation. Therefore |
540 | /// deadlock may occur if a read lock is held by the current task, a write |
541 | /// lock attempt is made, and then a subsequent read lock attempt is made |
542 | /// by the current task. |
543 | /// |
544 | /// Returns an RAII guard which will drop this read access of the `RwLock` |
545 | /// when dropped. |
546 | /// |
547 | /// # Cancel safety |
548 | /// |
549 | /// This method uses a queue to fairly distribute locks in the order they |
550 | /// were requested. Cancelling a call to `read_owned` makes you lose your |
551 | /// place in the queue. |
552 | /// |
553 | /// # Examples |
554 | /// |
555 | /// ``` |
556 | /// use std::sync::Arc; |
557 | /// use tokio::sync::RwLock; |
558 | /// |
559 | /// #[tokio::main] |
560 | /// async fn main() { |
561 | /// let lock = Arc::new(RwLock::new(1)); |
562 | /// let c_lock = lock.clone(); |
563 | /// |
564 | /// let n = lock.read_owned().await; |
565 | /// assert_eq!(*n, 1); |
566 | /// |
567 | /// tokio::spawn(async move { |
568 | /// // While main has an active read lock, we acquire one too. |
569 | /// let r = c_lock.read_owned().await; |
570 | /// assert_eq!(*r, 1); |
571 | /// }).await.expect("The spawned task has panicked" ); |
572 | /// |
573 | /// // Drop the guard after the spawned task finishes. |
574 | /// drop(n); |
575 | ///} |
576 | /// ``` |
577 | pub async fn read_owned(self: Arc<Self>) -> OwnedRwLockReadGuard<T> { |
578 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
579 | let resource_span = self.resource_span.clone(); |
580 | |
581 | let acquire_fut = async { |
582 | self.s.acquire(1).await.unwrap_or_else(|_| { |
583 | // The semaphore was closed. but, we never explicitly close it, and we have a |
584 | // handle to it through the Arc, which means that this can never happen. |
585 | unreachable!() |
586 | }); |
587 | |
588 | OwnedRwLockReadGuard { |
589 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
590 | resource_span: self.resource_span.clone(), |
591 | data: self.c.get(), |
592 | lock: self, |
593 | _p: PhantomData, |
594 | } |
595 | }; |
596 | |
597 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
598 | let acquire_fut = trace::async_op( |
599 | move || acquire_fut, |
600 | resource_span, |
601 | "RwLock::read_owned" , |
602 | "poll" , |
603 | false, |
604 | ); |
605 | |
606 | #[allow (clippy::let_and_return)] // this lint triggers when disabling tracing |
607 | let guard = acquire_fut.await; |
608 | |
609 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
610 | guard.resource_span.in_scope(|| { |
611 | tracing::trace!( |
612 | target: "runtime::resource::state_update" , |
613 | current_readers = 1, |
614 | current_readers.op = "add" , |
615 | ) |
616 | }); |
617 | |
618 | guard |
619 | } |
620 | |
621 | /// Attempts to acquire this `RwLock` with shared read access. |
622 | /// |
623 | /// If the access couldn't be acquired immediately, returns [`TryLockError`]. |
624 | /// Otherwise, an RAII guard is returned which will release read access |
625 | /// when dropped. |
626 | /// |
627 | /// [`TryLockError`]: TryLockError |
628 | /// |
629 | /// # Examples |
630 | /// |
631 | /// ``` |
632 | /// use std::sync::Arc; |
633 | /// use tokio::sync::RwLock; |
634 | /// |
635 | /// #[tokio::main] |
636 | /// async fn main() { |
637 | /// let lock = Arc::new(RwLock::new(1)); |
638 | /// let c_lock = lock.clone(); |
639 | /// |
640 | /// let v = lock.try_read().unwrap(); |
641 | /// assert_eq!(*v, 1); |
642 | /// |
643 | /// tokio::spawn(async move { |
644 | /// // While main has an active read lock, we acquire one too. |
645 | /// let n = c_lock.read().await; |
646 | /// assert_eq!(*n, 1); |
647 | /// }).await.expect("The spawned task has panicked" ); |
648 | /// |
649 | /// // Drop the guard when spawned task finishes. |
650 | /// drop(v); |
651 | /// } |
652 | /// ``` |
653 | pub fn try_read(&self) -> Result<RwLockReadGuard<'_, T>, TryLockError> { |
654 | match self.s.try_acquire(1) { |
655 | Ok(permit) => permit, |
656 | Err(TryAcquireError::NoPermits) => return Err(TryLockError(())), |
657 | Err(TryAcquireError::Closed) => unreachable!(), |
658 | } |
659 | |
660 | let guard = RwLockReadGuard { |
661 | s: &self.s, |
662 | data: self.c.get(), |
663 | marker: marker::PhantomData, |
664 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
665 | resource_span: self.resource_span.clone(), |
666 | }; |
667 | |
668 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
669 | self.resource_span.in_scope(|| { |
670 | tracing::trace!( |
671 | target: "runtime::resource::state_update" , |
672 | current_readers = 1, |
673 | current_readers.op = "add" , |
674 | ) |
675 | }); |
676 | |
677 | Ok(guard) |
678 | } |
679 | |
680 | /// Attempts to acquire this `RwLock` with shared read access. |
681 | /// |
682 | /// If the access couldn't be acquired immediately, returns [`TryLockError`]. |
683 | /// Otherwise, an RAII guard is returned which will release read access |
684 | /// when dropped. |
685 | /// |
686 | /// This method is identical to [`RwLock::try_read`], except that the |
687 | /// returned guard references the `RwLock` with an [`Arc`] rather than by |
688 | /// borrowing it. Therefore, the `RwLock` must be wrapped in an `Arc` to |
689 | /// call this method, and the guard will live for the `'static` lifetime, |
690 | /// as it keeps the `RwLock` alive by holding an `Arc`. |
691 | /// |
692 | /// [`TryLockError`]: TryLockError |
693 | /// |
694 | /// # Examples |
695 | /// |
696 | /// ``` |
697 | /// use std::sync::Arc; |
698 | /// use tokio::sync::RwLock; |
699 | /// |
700 | /// #[tokio::main] |
701 | /// async fn main() { |
702 | /// let lock = Arc::new(RwLock::new(1)); |
703 | /// let c_lock = lock.clone(); |
704 | /// |
705 | /// let v = lock.try_read_owned().unwrap(); |
706 | /// assert_eq!(*v, 1); |
707 | /// |
708 | /// tokio::spawn(async move { |
709 | /// // While main has an active read lock, we acquire one too. |
710 | /// let n = c_lock.read_owned().await; |
711 | /// assert_eq!(*n, 1); |
712 | /// }).await.expect("The spawned task has panicked" ); |
713 | /// |
714 | /// // Drop the guard when spawned task finishes. |
715 | /// drop(v); |
716 | /// } |
717 | /// ``` |
718 | pub fn try_read_owned(self: Arc<Self>) -> Result<OwnedRwLockReadGuard<T>, TryLockError> { |
719 | match self.s.try_acquire(1) { |
720 | Ok(permit) => permit, |
721 | Err(TryAcquireError::NoPermits) => return Err(TryLockError(())), |
722 | Err(TryAcquireError::Closed) => unreachable!(), |
723 | } |
724 | |
725 | let guard = OwnedRwLockReadGuard { |
726 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
727 | resource_span: self.resource_span.clone(), |
728 | data: self.c.get(), |
729 | lock: self, |
730 | _p: PhantomData, |
731 | }; |
732 | |
733 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
734 | guard.resource_span.in_scope(|| { |
735 | tracing::trace!( |
736 | target: "runtime::resource::state_update" , |
737 | current_readers = 1, |
738 | current_readers.op = "add" , |
739 | ) |
740 | }); |
741 | |
742 | Ok(guard) |
743 | } |
744 | |
745 | /// Locks this `RwLock` with exclusive write access, causing the current |
746 | /// task to yield until the lock has been acquired. |
747 | /// |
748 | /// The calling task will yield while other writers or readers currently |
749 | /// have access to the lock. |
750 | /// |
751 | /// Returns an RAII guard which will drop the write access of this `RwLock` |
752 | /// when dropped. |
753 | /// |
754 | /// # Cancel safety |
755 | /// |
756 | /// This method uses a queue to fairly distribute locks in the order they |
757 | /// were requested. Cancelling a call to `write` makes you lose your place |
758 | /// in the queue. |
759 | /// |
760 | /// # Examples |
761 | /// |
762 | /// ``` |
763 | /// use tokio::sync::RwLock; |
764 | /// |
765 | /// #[tokio::main] |
766 | /// async fn main() { |
767 | /// let lock = RwLock::new(1); |
768 | /// |
769 | /// let mut n = lock.write().await; |
770 | /// *n = 2; |
771 | ///} |
772 | /// ``` |
773 | pub async fn write(&self) -> RwLockWriteGuard<'_, T> { |
774 | let acquire_fut = async { |
775 | self.s.acquire(self.mr as usize).await.unwrap_or_else(|_| { |
776 | // The semaphore was closed. but, we never explicitly close it, and we have a |
777 | // handle to it through the Arc, which means that this can never happen. |
778 | unreachable!() |
779 | }); |
780 | |
781 | RwLockWriteGuard { |
782 | permits_acquired: self.mr, |
783 | s: &self.s, |
784 | data: self.c.get(), |
785 | marker: marker::PhantomData, |
786 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
787 | resource_span: self.resource_span.clone(), |
788 | } |
789 | }; |
790 | |
791 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
792 | let acquire_fut = trace::async_op( |
793 | move || acquire_fut, |
794 | self.resource_span.clone(), |
795 | "RwLock::write" , |
796 | "poll" , |
797 | false, |
798 | ); |
799 | |
800 | #[allow (clippy::let_and_return)] // this lint triggers when disabling tracing |
801 | let guard = acquire_fut.await; |
802 | |
803 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
804 | self.resource_span.in_scope(|| { |
805 | tracing::trace!( |
806 | target: "runtime::resource::state_update" , |
807 | write_locked = true, |
808 | write_locked.op = "override" , |
809 | ) |
810 | }); |
811 | |
812 | guard |
813 | } |
814 | |
815 | /// Blockingly locks this `RwLock` with exclusive write access. |
816 | /// |
817 | /// This method is intended for use cases where you |
818 | /// need to use this rwlock in asynchronous code as well as in synchronous code. |
819 | /// |
820 | /// Returns an RAII guard which will drop the write access of this `RwLock` when dropped. |
821 | /// |
822 | /// # Panics |
823 | /// |
824 | /// This function panics if called within an asynchronous execution context. |
825 | /// |
826 | /// - If you find yourself in an asynchronous execution context and needing |
827 | /// to call some (synchronous) function which performs one of these |
828 | /// `blocking_` operations, then consider wrapping that call inside |
829 | /// [`spawn_blocking()`][crate::runtime::Handle::spawn_blocking] |
830 | /// (or [`block_in_place()`][crate::task::block_in_place]). |
831 | /// |
832 | /// # Examples |
833 | /// |
834 | /// ``` |
835 | /// use std::sync::Arc; |
836 | /// use tokio::{sync::RwLock}; |
837 | /// |
838 | /// #[tokio::main] |
839 | /// async fn main() { |
840 | /// let rwlock = Arc::new(RwLock::new(1)); |
841 | /// let read_lock = rwlock.read().await; |
842 | /// |
843 | /// let blocking_task = tokio::task::spawn_blocking({ |
844 | /// let rwlock = Arc::clone(&rwlock); |
845 | /// move || { |
846 | /// // This shall block until the `read_lock` is released. |
847 | /// let mut write_lock = rwlock.blocking_write(); |
848 | /// *write_lock = 2; |
849 | /// } |
850 | /// }); |
851 | /// |
852 | /// assert_eq!(*read_lock, 1); |
853 | /// // Release the last outstanding read lock. |
854 | /// drop(read_lock); |
855 | /// |
856 | /// // Await the completion of the blocking task. |
857 | /// blocking_task.await.unwrap(); |
858 | /// |
859 | /// // Assert uncontended. |
860 | /// let read_lock = rwlock.try_read().unwrap(); |
861 | /// assert_eq!(*read_lock, 2); |
862 | /// } |
863 | /// ``` |
864 | #[track_caller ] |
865 | #[cfg (feature = "sync" )] |
866 | pub fn blocking_write(&self) -> RwLockWriteGuard<'_, T> { |
867 | crate::future::block_on(self.write()) |
868 | } |
869 | |
870 | /// Locks this `RwLock` with exclusive write access, causing the current |
871 | /// task to yield until the lock has been acquired. |
872 | /// |
873 | /// The calling task will yield while other writers or readers currently |
874 | /// have access to the lock. |
875 | /// |
876 | /// This method is identical to [`RwLock::write`], except that the returned |
877 | /// guard references the `RwLock` with an [`Arc`] rather than by borrowing |
878 | /// it. Therefore, the `RwLock` must be wrapped in an `Arc` to call this |
879 | /// method, and the guard will live for the `'static` lifetime, as it keeps |
880 | /// the `RwLock` alive by holding an `Arc`. |
881 | /// |
882 | /// Returns an RAII guard which will drop the write access of this `RwLock` |
883 | /// when dropped. |
884 | /// |
885 | /// # Cancel safety |
886 | /// |
887 | /// This method uses a queue to fairly distribute locks in the order they |
888 | /// were requested. Cancelling a call to `write_owned` makes you lose your |
889 | /// place in the queue. |
890 | /// |
891 | /// # Examples |
892 | /// |
893 | /// ``` |
894 | /// use std::sync::Arc; |
895 | /// use tokio::sync::RwLock; |
896 | /// |
897 | /// #[tokio::main] |
898 | /// async fn main() { |
899 | /// let lock = Arc::new(RwLock::new(1)); |
900 | /// |
901 | /// let mut n = lock.write_owned().await; |
902 | /// *n = 2; |
903 | ///} |
904 | /// ``` |
905 | pub async fn write_owned(self: Arc<Self>) -> OwnedRwLockWriteGuard<T> { |
906 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
907 | let resource_span = self.resource_span.clone(); |
908 | |
909 | let acquire_fut = async { |
910 | self.s.acquire(self.mr as usize).await.unwrap_or_else(|_| { |
911 | // The semaphore was closed. but, we never explicitly close it, and we have a |
912 | // handle to it through the Arc, which means that this can never happen. |
913 | unreachable!() |
914 | }); |
915 | |
916 | OwnedRwLockWriteGuard { |
917 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
918 | resource_span: self.resource_span.clone(), |
919 | permits_acquired: self.mr, |
920 | data: self.c.get(), |
921 | lock: self, |
922 | _p: PhantomData, |
923 | } |
924 | }; |
925 | |
926 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
927 | let acquire_fut = trace::async_op( |
928 | move || acquire_fut, |
929 | resource_span, |
930 | "RwLock::write_owned" , |
931 | "poll" , |
932 | false, |
933 | ); |
934 | |
935 | #[allow (clippy::let_and_return)] // this lint triggers when disabling tracing |
936 | let guard = acquire_fut.await; |
937 | |
938 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
939 | guard.resource_span.in_scope(|| { |
940 | tracing::trace!( |
941 | target: "runtime::resource::state_update" , |
942 | write_locked = true, |
943 | write_locked.op = "override" , |
944 | ) |
945 | }); |
946 | |
947 | guard |
948 | } |
949 | |
950 | /// Attempts to acquire this `RwLock` with exclusive write access. |
951 | /// |
952 | /// If the access couldn't be acquired immediately, returns [`TryLockError`]. |
953 | /// Otherwise, an RAII guard is returned which will release write access |
954 | /// when dropped. |
955 | /// |
956 | /// [`TryLockError`]: TryLockError |
957 | /// |
958 | /// # Examples |
959 | /// |
960 | /// ``` |
961 | /// use tokio::sync::RwLock; |
962 | /// |
963 | /// #[tokio::main] |
964 | /// async fn main() { |
965 | /// let rw = RwLock::new(1); |
966 | /// |
967 | /// let v = rw.read().await; |
968 | /// assert_eq!(*v, 1); |
969 | /// |
970 | /// assert!(rw.try_write().is_err()); |
971 | /// } |
972 | /// ``` |
973 | pub fn try_write(&self) -> Result<RwLockWriteGuard<'_, T>, TryLockError> { |
974 | match self.s.try_acquire(self.mr as usize) { |
975 | Ok(permit) => permit, |
976 | Err(TryAcquireError::NoPermits) => return Err(TryLockError(())), |
977 | Err(TryAcquireError::Closed) => unreachable!(), |
978 | } |
979 | |
980 | let guard = RwLockWriteGuard { |
981 | permits_acquired: self.mr, |
982 | s: &self.s, |
983 | data: self.c.get(), |
984 | marker: marker::PhantomData, |
985 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
986 | resource_span: self.resource_span.clone(), |
987 | }; |
988 | |
989 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
990 | self.resource_span.in_scope(|| { |
991 | tracing::trace!( |
992 | target: "runtime::resource::state_update" , |
993 | write_locked = true, |
994 | write_locked.op = "override" , |
995 | ) |
996 | }); |
997 | |
998 | Ok(guard) |
999 | } |
1000 | |
1001 | /// Attempts to acquire this `RwLock` with exclusive write access. |
1002 | /// |
1003 | /// If the access couldn't be acquired immediately, returns [`TryLockError`]. |
1004 | /// Otherwise, an RAII guard is returned which will release write access |
1005 | /// when dropped. |
1006 | /// |
1007 | /// This method is identical to [`RwLock::try_write`], except that the |
1008 | /// returned guard references the `RwLock` with an [`Arc`] rather than by |
1009 | /// borrowing it. Therefore, the `RwLock` must be wrapped in an `Arc` to |
1010 | /// call this method, and the guard will live for the `'static` lifetime, |
1011 | /// as it keeps the `RwLock` alive by holding an `Arc`. |
1012 | /// |
1013 | /// [`TryLockError`]: TryLockError |
1014 | /// |
1015 | /// # Examples |
1016 | /// |
1017 | /// ``` |
1018 | /// use std::sync::Arc; |
1019 | /// use tokio::sync::RwLock; |
1020 | /// |
1021 | /// #[tokio::main] |
1022 | /// async fn main() { |
1023 | /// let rw = Arc::new(RwLock::new(1)); |
1024 | /// |
1025 | /// let v = Arc::clone(&rw).read_owned().await; |
1026 | /// assert_eq!(*v, 1); |
1027 | /// |
1028 | /// assert!(rw.try_write_owned().is_err()); |
1029 | /// } |
1030 | /// ``` |
1031 | pub fn try_write_owned(self: Arc<Self>) -> Result<OwnedRwLockWriteGuard<T>, TryLockError> { |
1032 | match self.s.try_acquire(self.mr as usize) { |
1033 | Ok(permit) => permit, |
1034 | Err(TryAcquireError::NoPermits) => return Err(TryLockError(())), |
1035 | Err(TryAcquireError::Closed) => unreachable!(), |
1036 | } |
1037 | |
1038 | let guard = OwnedRwLockWriteGuard { |
1039 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
1040 | resource_span: self.resource_span.clone(), |
1041 | permits_acquired: self.mr, |
1042 | data: self.c.get(), |
1043 | lock: self, |
1044 | _p: PhantomData, |
1045 | }; |
1046 | |
1047 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
1048 | guard.resource_span.in_scope(|| { |
1049 | tracing::trace!( |
1050 | target: "runtime::resource::state_update" , |
1051 | write_locked = true, |
1052 | write_locked.op = "override" , |
1053 | ) |
1054 | }); |
1055 | |
1056 | Ok(guard) |
1057 | } |
1058 | |
1059 | /// Returns a mutable reference to the underlying data. |
1060 | /// |
1061 | /// Since this call borrows the `RwLock` mutably, no actual locking needs to |
1062 | /// take place -- the mutable borrow statically guarantees no locks exist. |
1063 | /// |
1064 | /// # Examples |
1065 | /// |
1066 | /// ``` |
1067 | /// use tokio::sync::RwLock; |
1068 | /// |
1069 | /// fn main() { |
1070 | /// let mut lock = RwLock::new(1); |
1071 | /// |
1072 | /// let n = lock.get_mut(); |
1073 | /// *n = 2; |
1074 | /// } |
1075 | /// ``` |
1076 | pub fn get_mut(&mut self) -> &mut T { |
1077 | unsafe { |
1078 | // Safety: This is https://github.com/rust-lang/rust/pull/76936 |
1079 | &mut *self.c.get() |
1080 | } |
1081 | } |
1082 | |
1083 | /// Consumes the lock, returning the underlying data. |
1084 | pub fn into_inner(self) -> T |
1085 | where |
1086 | T: Sized, |
1087 | { |
1088 | self.c.into_inner() |
1089 | } |
1090 | } |
1091 | |
1092 | impl<T> From<T> for RwLock<T> { |
1093 | fn from(s: T) -> Self { |
1094 | Self::new(s) |
1095 | } |
1096 | } |
1097 | |
1098 | impl<T: ?Sized> Default for RwLock<T> |
1099 | where |
1100 | T: Default, |
1101 | { |
1102 | fn default() -> Self { |
1103 | Self::new(T::default()) |
1104 | } |
1105 | } |
1106 | |
1107 | impl<T: ?Sized> std::fmt::Debug for RwLock<T> |
1108 | where |
1109 | T: std::fmt::Debug, |
1110 | { |
1111 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { |
1112 | let mut d: DebugStruct<'_, '_> = f.debug_struct(name:"RwLock" ); |
1113 | match self.try_read() { |
1114 | Ok(inner: RwLockReadGuard<'_, T>) => d.field(name:"data" , &&*inner), |
1115 | Err(_) => d.field(name:"data" , &format_args!("<locked>" )), |
1116 | }; |
1117 | d.finish() |
1118 | } |
1119 | } |
1120 | |