1 | use crate::sync::rwlock::owned_read_guard::OwnedRwLockReadGuard; |
2 | use crate::sync::rwlock::owned_write_guard_mapped::OwnedRwLockMappedWriteGuard; |
3 | use crate::sync::rwlock::RwLock; |
4 | use std::marker::PhantomData; |
5 | use std::sync::Arc; |
6 | use std::{fmt, mem, ops, ptr}; |
7 | |
8 | /// Owned RAII structure used to release the exclusive write access of a lock when |
9 | /// dropped. |
10 | /// |
11 | /// This structure is created by the [`write_owned`] method |
12 | /// on [`RwLock`]. |
13 | /// |
14 | /// [`write_owned`]: method@crate::sync::RwLock::write_owned |
15 | /// [`RwLock`]: struct@crate::sync::RwLock |
16 | #[clippy::has_significant_drop] |
17 | pub struct OwnedRwLockWriteGuard<T: ?Sized> { |
18 | // When changing the fields in this struct, make sure to update the |
19 | // `skip_drop` method. |
20 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
21 | pub(super) resource_span: tracing::Span, |
22 | pub(super) permits_acquired: u32, |
23 | pub(super) lock: Arc<RwLock<T>>, |
24 | pub(super) data: *mut T, |
25 | pub(super) _p: PhantomData<T>, |
26 | } |
27 | |
28 | #[allow (dead_code)] // Unused fields are still used in Drop. |
29 | struct Inner<T: ?Sized> { |
30 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
31 | resource_span: tracing::Span, |
32 | permits_acquired: u32, |
33 | lock: Arc<RwLock<T>>, |
34 | data: *const T, |
35 | } |
36 | |
37 | impl<T: ?Sized> OwnedRwLockWriteGuard<T> { |
38 | fn skip_drop(self) -> Inner<T> { |
39 | let me = mem::ManuallyDrop::new(self); |
40 | // SAFETY: This duplicates the values in every field of the guard, then |
41 | // forgets the originals, so in the end no value is duplicated. |
42 | unsafe { |
43 | Inner { |
44 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
45 | resource_span: ptr::read(&me.resource_span), |
46 | permits_acquired: me.permits_acquired, |
47 | lock: ptr::read(&me.lock), |
48 | data: me.data, |
49 | } |
50 | } |
51 | } |
52 | |
53 | /// Makes a new [`OwnedRwLockMappedWriteGuard`] for a component of the locked |
54 | /// data. |
55 | /// |
56 | /// This operation cannot fail as the `OwnedRwLockWriteGuard` passed in |
57 | /// already locked the data. |
58 | /// |
59 | /// This is an associated function that needs to be used as |
60 | /// `OwnedRwLockWriteGuard::map(..)`. A method would interfere with methods |
61 | /// of the same name on the contents of the locked data. |
62 | /// |
63 | /// # Examples |
64 | /// |
65 | /// ``` |
66 | /// use std::sync::Arc; |
67 | /// use tokio::sync::{RwLock, OwnedRwLockWriteGuard}; |
68 | /// |
69 | /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] |
70 | /// struct Foo(u32); |
71 | /// |
72 | /// # #[tokio::main] |
73 | /// # async fn main() { |
74 | /// let lock = Arc::new(RwLock::new(Foo(1))); |
75 | /// |
76 | /// { |
77 | /// let lock = Arc::clone(&lock); |
78 | /// let mut mapped = OwnedRwLockWriteGuard::map(lock.write_owned().await, |f| &mut f.0); |
79 | /// *mapped = 2; |
80 | /// } |
81 | /// |
82 | /// assert_eq!(Foo(2), *lock.read().await); |
83 | /// # } |
84 | /// ``` |
85 | #[inline ] |
86 | pub fn map<F, U: ?Sized>(mut this: Self, f: F) -> OwnedRwLockMappedWriteGuard<T, U> |
87 | where |
88 | F: FnOnce(&mut T) -> &mut U, |
89 | { |
90 | let data = f(&mut *this) as *mut U; |
91 | let this = this.skip_drop(); |
92 | |
93 | OwnedRwLockMappedWriteGuard { |
94 | permits_acquired: this.permits_acquired, |
95 | lock: this.lock, |
96 | data, |
97 | _p: PhantomData, |
98 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
99 | resource_span: this.resource_span, |
100 | } |
101 | } |
102 | |
103 | /// Makes a new [`OwnedRwLockReadGuard`] for a component of the locked data. |
104 | /// |
105 | /// This operation cannot fail as the `OwnedRwLockWriteGuard` passed in already |
106 | /// locked the data. |
107 | /// |
108 | /// This is an associated function that needs to be used as |
109 | /// `OwnedRwLockWriteGuard::downgrade_map(..)`. A method would interfere with methods of |
110 | /// the same name on the contents of the locked data. |
111 | /// |
112 | /// Inside of `f`, you retain exclusive access to the data, despite only being given a `&T`. Handing out a |
113 | /// `&mut T` would result in unsoundness, as you could use interior mutability. |
114 | /// |
115 | /// # Examples |
116 | /// |
117 | /// ``` |
118 | /// use std::sync::Arc; |
119 | /// use tokio::sync::{RwLock, OwnedRwLockWriteGuard}; |
120 | /// |
121 | /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] |
122 | /// struct Foo(u32); |
123 | /// |
124 | /// # #[tokio::main] |
125 | /// # async fn main() { |
126 | /// let lock = Arc::new(RwLock::new(Foo(1))); |
127 | /// |
128 | /// let guard = Arc::clone(&lock).write_owned().await; |
129 | /// let mapped = OwnedRwLockWriteGuard::downgrade_map(guard, |f| &f.0); |
130 | /// let foo = lock.read_owned().await; |
131 | /// assert_eq!(foo.0, *mapped); |
132 | /// # } |
133 | /// ``` |
134 | #[inline ] |
135 | pub fn downgrade_map<F, U: ?Sized>(this: Self, f: F) -> OwnedRwLockReadGuard<T, U> |
136 | where |
137 | F: FnOnce(&T) -> &U, |
138 | { |
139 | let data = f(&*this) as *const U; |
140 | let this = this.skip_drop(); |
141 | let guard = OwnedRwLockReadGuard { |
142 | lock: this.lock, |
143 | data, |
144 | _p: PhantomData, |
145 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
146 | resource_span: this.resource_span, |
147 | }; |
148 | |
149 | // Release all but one of the permits held by the write guard |
150 | let to_release = (this.permits_acquired - 1) as usize; |
151 | guard.lock.s.release(to_release); |
152 | |
153 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
154 | guard.resource_span.in_scope(|| { |
155 | tracing::trace!( |
156 | target: "runtime::resource::state_update" , |
157 | write_locked = false, |
158 | write_locked.op = "override" , |
159 | ) |
160 | }); |
161 | |
162 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
163 | guard.resource_span.in_scope(|| { |
164 | tracing::trace!( |
165 | target: "runtime::resource::state_update" , |
166 | current_readers = 1, |
167 | current_readers.op = "add" , |
168 | ) |
169 | }); |
170 | |
171 | guard |
172 | } |
173 | |
174 | /// Attempts to make a new [`OwnedRwLockMappedWriteGuard`] for a component |
175 | /// of the locked data. The original guard is returned if the closure |
176 | /// returns `None`. |
177 | /// |
178 | /// This operation cannot fail as the `OwnedRwLockWriteGuard` passed in |
179 | /// already locked the data. |
180 | /// |
181 | /// This is an associated function that needs to be |
182 | /// used as `OwnedRwLockWriteGuard::try_map(...)`. A method would interfere |
183 | /// with methods of the same name on the contents of the locked data. |
184 | /// |
185 | /// [`RwLockMappedWriteGuard`]: struct@crate::sync::RwLockMappedWriteGuard |
186 | /// |
187 | /// # Examples |
188 | /// |
189 | /// ``` |
190 | /// use std::sync::Arc; |
191 | /// use tokio::sync::{RwLock, OwnedRwLockWriteGuard}; |
192 | /// |
193 | /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] |
194 | /// struct Foo(u32); |
195 | /// |
196 | /// # #[tokio::main] |
197 | /// # async fn main() { |
198 | /// let lock = Arc::new(RwLock::new(Foo(1))); |
199 | /// |
200 | /// { |
201 | /// let guard = Arc::clone(&lock).write_owned().await; |
202 | /// let mut guard = OwnedRwLockWriteGuard::try_map(guard, |f| Some(&mut f.0)).expect("should not fail" ); |
203 | /// *guard = 2; |
204 | /// } |
205 | /// |
206 | /// assert_eq!(Foo(2), *lock.read().await); |
207 | /// # } |
208 | /// ``` |
209 | #[inline ] |
210 | pub fn try_map<F, U: ?Sized>( |
211 | mut this: Self, |
212 | f: F, |
213 | ) -> Result<OwnedRwLockMappedWriteGuard<T, U>, Self> |
214 | where |
215 | F: FnOnce(&mut T) -> Option<&mut U>, |
216 | { |
217 | let data = match f(&mut *this) { |
218 | Some(data) => data as *mut U, |
219 | None => return Err(this), |
220 | }; |
221 | let this = this.skip_drop(); |
222 | |
223 | Ok(OwnedRwLockMappedWriteGuard { |
224 | permits_acquired: this.permits_acquired, |
225 | lock: this.lock, |
226 | data, |
227 | _p: PhantomData, |
228 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
229 | resource_span: this.resource_span, |
230 | }) |
231 | } |
232 | |
233 | /// Attempts to make a new [`OwnedRwLockReadGuard`] for a component of |
234 | /// the locked data. The original guard is returned if the closure returns |
235 | /// `None`. |
236 | /// |
237 | /// This operation cannot fail as the `OwnedRwLockWriteGuard` passed in already |
238 | /// locked the data. |
239 | /// |
240 | /// This is an associated function that needs to be |
241 | /// used as `OwnedRwLockWriteGuard::try_downgrade_map(...)`. A method would interfere with |
242 | /// methods of the same name on the contents of the locked data. |
243 | /// |
244 | /// Inside of `f`, you retain exclusive access to the data, despite only being given a `&T`. Handing out a |
245 | /// `&mut T` would result in unsoundness, as you could use interior mutability. |
246 | /// |
247 | /// If this function returns `Err(...)`, the lock is never unlocked nor downgraded. |
248 | /// |
249 | /// # Examples |
250 | /// |
251 | /// ``` |
252 | /// use std::sync::Arc; |
253 | /// use tokio::sync::{RwLock, OwnedRwLockWriteGuard}; |
254 | /// |
255 | /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] |
256 | /// struct Foo(u32); |
257 | /// |
258 | /// # #[tokio::main] |
259 | /// # async fn main() { |
260 | /// let lock = Arc::new(RwLock::new(Foo(1))); |
261 | /// |
262 | /// let guard = Arc::clone(&lock).write_owned().await; |
263 | /// let guard = OwnedRwLockWriteGuard::try_downgrade_map(guard, |f| Some(&f.0)).expect("should not fail" ); |
264 | /// let foo = lock.read_owned().await; |
265 | /// assert_eq!(foo.0, *guard); |
266 | /// # } |
267 | /// ``` |
268 | #[inline ] |
269 | pub fn try_downgrade_map<F, U: ?Sized>( |
270 | this: Self, |
271 | f: F, |
272 | ) -> Result<OwnedRwLockReadGuard<T, U>, Self> |
273 | where |
274 | F: FnOnce(&T) -> Option<&U>, |
275 | { |
276 | let data = match f(&*this) { |
277 | Some(data) => data as *const U, |
278 | None => return Err(this), |
279 | }; |
280 | let this = this.skip_drop(); |
281 | let guard = OwnedRwLockReadGuard { |
282 | lock: this.lock, |
283 | data, |
284 | _p: PhantomData, |
285 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
286 | resource_span: this.resource_span, |
287 | }; |
288 | |
289 | // Release all but one of the permits held by the write guard |
290 | let to_release = (this.permits_acquired - 1) as usize; |
291 | guard.lock.s.release(to_release); |
292 | |
293 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
294 | guard.resource_span.in_scope(|| { |
295 | tracing::trace!( |
296 | target: "runtime::resource::state_update" , |
297 | write_locked = false, |
298 | write_locked.op = "override" , |
299 | ) |
300 | }); |
301 | |
302 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
303 | guard.resource_span.in_scope(|| { |
304 | tracing::trace!( |
305 | target: "runtime::resource::state_update" , |
306 | current_readers = 1, |
307 | current_readers.op = "add" , |
308 | ) |
309 | }); |
310 | |
311 | Ok(guard) |
312 | } |
313 | |
314 | /// Converts this `OwnedRwLockWriteGuard` into an |
315 | /// `OwnedRwLockMappedWriteGuard`. This method can be used to store a |
316 | /// non-mapped guard in a struct field that expects a mapped guard. |
317 | /// |
318 | /// This is equivalent to calling `OwnedRwLockWriteGuard::map(guard, |me| me)`. |
319 | #[inline ] |
320 | pub fn into_mapped(this: Self) -> OwnedRwLockMappedWriteGuard<T> { |
321 | Self::map(this, |me| me) |
322 | } |
323 | |
324 | /// Atomically downgrades a write lock into a read lock without allowing |
325 | /// any writers to take exclusive access of the lock in the meantime. |
326 | /// |
327 | /// **Note:** This won't *necessarily* allow any additional readers to acquire |
328 | /// locks, since [`RwLock`] is fair and it is possible that a writer is next |
329 | /// in line. |
330 | /// |
331 | /// Returns an RAII guard which will drop this read access of the `RwLock` |
332 | /// when dropped. |
333 | /// |
334 | /// # Examples |
335 | /// |
336 | /// ``` |
337 | /// # use tokio::sync::RwLock; |
338 | /// # use std::sync::Arc; |
339 | /// # |
340 | /// # #[tokio::main] |
341 | /// # async fn main() { |
342 | /// let lock = Arc::new(RwLock::new(1)); |
343 | /// |
344 | /// let n = lock.clone().write_owned().await; |
345 | /// |
346 | /// let cloned_lock = lock.clone(); |
347 | /// let handle = tokio::spawn(async move { |
348 | /// *cloned_lock.write_owned().await = 2; |
349 | /// }); |
350 | /// |
351 | /// let n = n.downgrade(); |
352 | /// assert_eq!(*n, 1, "downgrade is atomic" ); |
353 | /// |
354 | /// drop(n); |
355 | /// handle.await.unwrap(); |
356 | /// assert_eq!(*lock.read().await, 2, "second writer obtained write lock" ); |
357 | /// # } |
358 | /// ``` |
359 | pub fn downgrade(self) -> OwnedRwLockReadGuard<T> { |
360 | let this = self.skip_drop(); |
361 | let guard = OwnedRwLockReadGuard { |
362 | lock: this.lock, |
363 | data: this.data, |
364 | _p: PhantomData, |
365 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
366 | resource_span: this.resource_span, |
367 | }; |
368 | |
369 | // Release all but one of the permits held by the write guard |
370 | let to_release = (this.permits_acquired - 1) as usize; |
371 | guard.lock.s.release(to_release); |
372 | |
373 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
374 | guard.resource_span.in_scope(|| { |
375 | tracing::trace!( |
376 | target: "runtime::resource::state_update" , |
377 | write_locked = false, |
378 | write_locked.op = "override" , |
379 | ) |
380 | }); |
381 | |
382 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
383 | guard.resource_span.in_scope(|| { |
384 | tracing::trace!( |
385 | target: "runtime::resource::state_update" , |
386 | current_readers = 1, |
387 | current_readers.op = "add" , |
388 | ) |
389 | }); |
390 | |
391 | guard |
392 | } |
393 | } |
394 | |
395 | impl<T: ?Sized> ops::Deref for OwnedRwLockWriteGuard<T> { |
396 | type Target = T; |
397 | |
398 | fn deref(&self) -> &T { |
399 | unsafe { &*self.data } |
400 | } |
401 | } |
402 | |
403 | impl<T: ?Sized> ops::DerefMut for OwnedRwLockWriteGuard<T> { |
404 | fn deref_mut(&mut self) -> &mut T { |
405 | unsafe { &mut *self.data } |
406 | } |
407 | } |
408 | |
409 | impl<T: ?Sized> fmt::Debug for OwnedRwLockWriteGuard<T> |
410 | where |
411 | T: fmt::Debug, |
412 | { |
413 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
414 | fmt::Debug::fmt(&**self, f) |
415 | } |
416 | } |
417 | |
418 | impl<T: ?Sized> fmt::Display for OwnedRwLockWriteGuard<T> |
419 | where |
420 | T: fmt::Display, |
421 | { |
422 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
423 | fmt::Display::fmt(&**self, f) |
424 | } |
425 | } |
426 | |
427 | impl<T: ?Sized> Drop for OwnedRwLockWriteGuard<T> { |
428 | fn drop(&mut self) { |
429 | self.lock.s.release(self.permits_acquired as usize); |
430 | |
431 | #[cfg (all(tokio_unstable, feature = "tracing" ))] |
432 | self.resource_span.in_scope(|| { |
433 | tracing::trace!( |
434 | target: "runtime::resource::state_update" , |
435 | write_locked = false, |
436 | write_locked.op = "override" , |
437 | ) |
438 | }); |
439 | } |
440 | } |
441 | |