1 | #![cfg_attr (not(feature = "full" ), allow(dead_code))] |
2 | |
3 | use crate::loom::sync::atomic::AtomicUsize; |
4 | use crate::loom::sync::{Arc, Condvar, Mutex}; |
5 | |
6 | use std::sync::atomic::Ordering::SeqCst; |
7 | use std::time::Duration; |
8 | |
9 | #[derive (Debug)] |
10 | pub(crate) struct ParkThread { |
11 | inner: Arc<Inner>, |
12 | } |
13 | |
14 | /// Unblocks a thread that was blocked by `ParkThread`. |
15 | #[derive (Clone, Debug)] |
16 | pub(crate) struct UnparkThread { |
17 | inner: Arc<Inner>, |
18 | } |
19 | |
20 | #[derive (Debug)] |
21 | struct Inner { |
22 | state: AtomicUsize, |
23 | mutex: Mutex<()>, |
24 | condvar: Condvar, |
25 | } |
26 | |
27 | const EMPTY: usize = 0; |
28 | const PARKED: usize = 1; |
29 | const NOTIFIED: usize = 2; |
30 | |
31 | tokio_thread_local! { |
32 | static CURRENT_PARKER: ParkThread = ParkThread::new(); |
33 | } |
34 | |
35 | // Bit of a hack, but it is only for loom |
36 | #[cfg (loom)] |
37 | tokio_thread_local! { |
38 | pub(crate) static CURRENT_THREAD_PARK_COUNT: AtomicUsize = AtomicUsize::new(0); |
39 | } |
40 | |
41 | // ==== impl ParkThread ==== |
42 | |
43 | impl ParkThread { |
44 | pub(crate) fn new() -> Self { |
45 | Self { |
46 | inner: Arc::new(Inner { |
47 | state: AtomicUsize::new(EMPTY), |
48 | mutex: Mutex::new(()), |
49 | condvar: Condvar::new(), |
50 | }), |
51 | } |
52 | } |
53 | |
54 | pub(crate) fn unpark(&self) -> UnparkThread { |
55 | let inner = self.inner.clone(); |
56 | UnparkThread { inner } |
57 | } |
58 | |
59 | pub(crate) fn park(&mut self) { |
60 | #[cfg (loom)] |
61 | CURRENT_THREAD_PARK_COUNT.with(|count| count.fetch_add(1, SeqCst)); |
62 | self.inner.park(); |
63 | } |
64 | |
65 | pub(crate) fn park_timeout(&mut self, duration: Duration) { |
66 | #[cfg (loom)] |
67 | CURRENT_THREAD_PARK_COUNT.with(|count| count.fetch_add(1, SeqCst)); |
68 | self.inner.park_timeout(duration); |
69 | } |
70 | |
71 | pub(crate) fn shutdown(&mut self) { |
72 | self.inner.shutdown(); |
73 | } |
74 | } |
75 | |
76 | // ==== impl Inner ==== |
77 | |
78 | impl Inner { |
79 | fn park(&self) { |
80 | // If we were previously notified then we consume this notification and |
81 | // return quickly. |
82 | if self |
83 | .state |
84 | .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) |
85 | .is_ok() |
86 | { |
87 | return; |
88 | } |
89 | |
90 | // Otherwise we need to coordinate going to sleep |
91 | let mut m = self.mutex.lock(); |
92 | |
93 | match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { |
94 | Ok(_) => {} |
95 | Err(NOTIFIED) => { |
96 | // We must read here, even though we know it will be `NOTIFIED`. |
97 | // This is because `unpark` may have been called again since we read |
98 | // `NOTIFIED` in the `compare_exchange` above. We must perform an |
99 | // acquire operation that synchronizes with that `unpark` to observe |
100 | // any writes it made before the call to unpark. To do that we must |
101 | // read from the write it made to `state`. |
102 | let old = self.state.swap(EMPTY, SeqCst); |
103 | debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly" ); |
104 | |
105 | return; |
106 | } |
107 | Err(actual) => panic!("inconsistent park state; actual = {actual}" ), |
108 | } |
109 | |
110 | loop { |
111 | m = self.condvar.wait(m).unwrap(); |
112 | |
113 | if self |
114 | .state |
115 | .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) |
116 | .is_ok() |
117 | { |
118 | // got a notification |
119 | return; |
120 | } |
121 | |
122 | // spurious wakeup, go back to sleep |
123 | } |
124 | } |
125 | |
126 | /// Parks the current thread for at most `dur`. |
127 | fn park_timeout(&self, dur: Duration) { |
128 | // Like `park` above we have a fast path for an already-notified thread, |
129 | // and afterwards we start coordinating for a sleep. Return quickly. |
130 | if self |
131 | .state |
132 | .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) |
133 | .is_ok() |
134 | { |
135 | return; |
136 | } |
137 | |
138 | if dur == Duration::from_millis(0) { |
139 | return; |
140 | } |
141 | |
142 | let m = self.mutex.lock(); |
143 | |
144 | match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { |
145 | Ok(_) => {} |
146 | Err(NOTIFIED) => { |
147 | // We must read again here, see `park`. |
148 | let old = self.state.swap(EMPTY, SeqCst); |
149 | debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly" ); |
150 | |
151 | return; |
152 | } |
153 | Err(actual) => panic!("inconsistent park_timeout state; actual = {actual}" ), |
154 | } |
155 | |
156 | #[cfg (not(all(target_family = "wasm" , not(target_feature = "atomics" ))))] |
157 | // Wait with a timeout, and if we spuriously wake up or otherwise wake up |
158 | // from a notification, we just want to unconditionally set the state back to |
159 | // empty, either consuming a notification or un-flagging ourselves as |
160 | // parked. |
161 | let (_m, _result) = self.condvar.wait_timeout(m, dur).unwrap(); |
162 | |
163 | #[cfg (all(target_family = "wasm" , not(target_feature = "atomics" )))] |
164 | // Wasm without atomics doesn't have threads, so just sleep. |
165 | { |
166 | let _m = m; |
167 | std::thread::sleep(dur); |
168 | } |
169 | |
170 | match self.state.swap(EMPTY, SeqCst) { |
171 | NOTIFIED => {} // got a notification, hurray! |
172 | PARKED => {} // no notification, alas |
173 | n => panic!("inconsistent park_timeout state: {n}" ), |
174 | } |
175 | } |
176 | |
177 | fn unpark(&self) { |
178 | // To ensure the unparked thread will observe any writes we made before |
179 | // this call, we must perform a release operation that `park` can |
180 | // synchronize with. To do that we must write `NOTIFIED` even if `state` |
181 | // is already `NOTIFIED`. That is why this must be a swap rather than a |
182 | // compare-and-swap that returns if it reads `NOTIFIED` on failure. |
183 | match self.state.swap(NOTIFIED, SeqCst) { |
184 | EMPTY => return, // no one was waiting |
185 | NOTIFIED => return, // already unparked |
186 | PARKED => {} // gotta go wake someone up |
187 | _ => panic!("inconsistent state in unpark" ), |
188 | } |
189 | |
190 | // There is a period between when the parked thread sets `state` to |
191 | // `PARKED` (or last checked `state` in the case of a spurious wake |
192 | // up) and when it actually waits on `cvar`. If we were to notify |
193 | // during this period it would be ignored and then when the parked |
194 | // thread went to sleep it would never wake up. Fortunately, it has |
195 | // `lock` locked at this stage so we can acquire `lock` to wait until |
196 | // it is ready to receive the notification. |
197 | // |
198 | // Releasing `lock` before the call to `notify_one` means that when the |
199 | // parked thread wakes it doesn't get woken only to have to wait for us |
200 | // to release `lock`. |
201 | drop(self.mutex.lock()); |
202 | |
203 | self.condvar.notify_one(); |
204 | } |
205 | |
206 | fn shutdown(&self) { |
207 | self.condvar.notify_all(); |
208 | } |
209 | } |
210 | |
211 | impl Default for ParkThread { |
212 | fn default() -> Self { |
213 | Self::new() |
214 | } |
215 | } |
216 | |
217 | // ===== impl UnparkThread ===== |
218 | |
219 | impl UnparkThread { |
220 | pub(crate) fn unpark(&self) { |
221 | self.inner.unpark(); |
222 | } |
223 | } |
224 | |
225 | use crate::loom::thread::AccessError; |
226 | use std::future::Future; |
227 | use std::marker::PhantomData; |
228 | use std::rc::Rc; |
229 | use std::task::{RawWaker, RawWakerVTable, Waker}; |
230 | |
231 | /// Blocks the current thread using a condition variable. |
232 | #[derive (Debug)] |
233 | pub(crate) struct CachedParkThread { |
234 | _anchor: PhantomData<Rc<()>>, |
235 | } |
236 | |
237 | impl CachedParkThread { |
238 | /// Creates a new `ParkThread` handle for the current thread. |
239 | /// |
240 | /// This type cannot be moved to other threads, so it should be created on |
241 | /// the thread that the caller intends to park. |
242 | pub(crate) fn new() -> CachedParkThread { |
243 | CachedParkThread { |
244 | _anchor: PhantomData, |
245 | } |
246 | } |
247 | |
248 | pub(crate) fn waker(&self) -> Result<Waker, AccessError> { |
249 | self.unpark().map(UnparkThread::into_waker) |
250 | } |
251 | |
252 | fn unpark(&self) -> Result<UnparkThread, AccessError> { |
253 | self.with_current(ParkThread::unpark) |
254 | } |
255 | |
256 | pub(crate) fn park(&mut self) { |
257 | self.with_current(|park_thread| park_thread.inner.park()) |
258 | .unwrap(); |
259 | } |
260 | |
261 | pub(crate) fn park_timeout(&mut self, duration: Duration) { |
262 | self.with_current(|park_thread| park_thread.inner.park_timeout(duration)) |
263 | .unwrap(); |
264 | } |
265 | |
266 | /// Gets a reference to the `ParkThread` handle for this thread. |
267 | fn with_current<F, R>(&self, f: F) -> Result<R, AccessError> |
268 | where |
269 | F: FnOnce(&ParkThread) -> R, |
270 | { |
271 | CURRENT_PARKER.try_with(|inner| f(inner)) |
272 | } |
273 | |
274 | pub(crate) fn block_on<F: Future>(&mut self, f: F) -> Result<F::Output, AccessError> { |
275 | use std::task::Context; |
276 | use std::task::Poll::Ready; |
277 | |
278 | let waker = self.waker()?; |
279 | let mut cx = Context::from_waker(&waker); |
280 | |
281 | pin!(f); |
282 | |
283 | loop { |
284 | if let Ready(v) = crate::task::coop::budget(|| f.as_mut().poll(&mut cx)) { |
285 | return Ok(v); |
286 | } |
287 | |
288 | self.park(); |
289 | } |
290 | } |
291 | } |
292 | |
293 | impl UnparkThread { |
294 | pub(crate) fn into_waker(self) -> Waker { |
295 | unsafe { |
296 | let raw: RawWaker = unparker_to_raw_waker(self.inner); |
297 | Waker::from_raw(waker:raw) |
298 | } |
299 | } |
300 | } |
301 | |
302 | impl Inner { |
303 | #[allow (clippy::wrong_self_convention)] |
304 | fn into_raw(this: Arc<Inner>) -> *const () { |
305 | Arc::into_raw(this) as *const () |
306 | } |
307 | |
308 | unsafe fn from_raw(ptr: *const ()) -> Arc<Inner> { |
309 | Arc::from_raw(ptr as *const Inner) |
310 | } |
311 | } |
312 | |
313 | unsafe fn unparker_to_raw_waker(unparker: Arc<Inner>) -> RawWaker { |
314 | RawWaker::new( |
315 | data:Inner::into_raw(unparker), |
316 | &RawWakerVTable::new(clone, wake, wake_by_ref, drop_waker), |
317 | ) |
318 | } |
319 | |
320 | unsafe fn clone(raw: *const ()) -> RawWaker { |
321 | Arc::increment_strong_count(ptr:raw as *const Inner); |
322 | unparker_to_raw_waker(unparker:Inner::from_raw(ptr:raw)) |
323 | } |
324 | |
325 | unsafe fn drop_waker(raw: *const ()) { |
326 | drop(Inner::from_raw(ptr:raw)); |
327 | } |
328 | |
329 | unsafe fn wake(raw: *const ()) { |
330 | let unparker: Arc = Inner::from_raw(ptr:raw); |
331 | unparker.unpark(); |
332 | } |
333 | |
334 | unsafe fn wake_by_ref(raw: *const ()) { |
335 | let raw: *const Inner = raw as *const Inner; |
336 | (*raw).unpark(); |
337 | } |
338 | |
339 | #[cfg (loom)] |
340 | pub(crate) fn current_thread_park_count() -> usize { |
341 | CURRENT_THREAD_PARK_COUNT.with(|count| count.load(SeqCst)) |
342 | } |
343 | |