1 | #![cfg_attr (not(feature = "full" ), allow(dead_code))] |
2 | |
3 | use crate::loom::sync::atomic::AtomicUsize; |
4 | use crate::loom::sync::{Arc, Condvar, Mutex}; |
5 | |
6 | use std::sync::atomic::Ordering::SeqCst; |
7 | use std::time::Duration; |
8 | |
9 | #[derive (Debug)] |
10 | pub(crate) struct ParkThread { |
11 | inner: Arc<Inner>, |
12 | } |
13 | |
14 | /// Unblocks a thread that was blocked by `ParkThread`. |
15 | #[derive (Clone, Debug)] |
16 | pub(crate) struct UnparkThread { |
17 | inner: Arc<Inner>, |
18 | } |
19 | |
20 | #[derive (Debug)] |
21 | struct Inner { |
22 | state: AtomicUsize, |
23 | mutex: Mutex<()>, |
24 | condvar: Condvar, |
25 | } |
26 | |
27 | const EMPTY: usize = 0; |
28 | const PARKED: usize = 1; |
29 | const NOTIFIED: usize = 2; |
30 | |
31 | tokio_thread_local! { |
32 | static CURRENT_PARKER: ParkThread = ParkThread::new(); |
33 | } |
34 | |
35 | // Bit of a hack, but it is only for loom |
36 | #[cfg (loom)] |
37 | tokio_thread_local! { |
38 | static CURRENT_THREAD_PARK_COUNT: AtomicUsize = AtomicUsize::new(0); |
39 | } |
40 | |
41 | // ==== impl ParkThread ==== |
42 | |
43 | impl ParkThread { |
44 | pub(crate) fn new() -> Self { |
45 | Self { |
46 | inner: Arc::new(Inner { |
47 | state: AtomicUsize::new(EMPTY), |
48 | mutex: Mutex::new(()), |
49 | condvar: Condvar::new(), |
50 | }), |
51 | } |
52 | } |
53 | |
54 | pub(crate) fn unpark(&self) -> UnparkThread { |
55 | let inner = self.inner.clone(); |
56 | UnparkThread { inner } |
57 | } |
58 | |
59 | pub(crate) fn park(&mut self) { |
60 | #[cfg (loom)] |
61 | CURRENT_THREAD_PARK_COUNT.with(|count| count.fetch_add(1, SeqCst)); |
62 | self.inner.park(); |
63 | } |
64 | |
65 | pub(crate) fn park_timeout(&mut self, duration: Duration) { |
66 | #[cfg (loom)] |
67 | CURRENT_THREAD_PARK_COUNT.with(|count| count.fetch_add(1, SeqCst)); |
68 | |
69 | // Wasm doesn't have threads, so just sleep. |
70 | #[cfg (not(tokio_wasm))] |
71 | self.inner.park_timeout(duration); |
72 | #[cfg (tokio_wasm)] |
73 | std::thread::sleep(duration); |
74 | } |
75 | |
76 | pub(crate) fn shutdown(&mut self) { |
77 | self.inner.shutdown(); |
78 | } |
79 | } |
80 | |
81 | // ==== impl Inner ==== |
82 | |
83 | impl Inner { |
84 | /// Parks the current thread for at most `dur`. |
85 | fn park(&self) { |
86 | // If we were previously notified then we consume this notification and |
87 | // return quickly. |
88 | if self |
89 | .state |
90 | .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) |
91 | .is_ok() |
92 | { |
93 | return; |
94 | } |
95 | |
96 | // Otherwise we need to coordinate going to sleep |
97 | let mut m = self.mutex.lock(); |
98 | |
99 | match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { |
100 | Ok(_) => {} |
101 | Err(NOTIFIED) => { |
102 | // We must read here, even though we know it will be `NOTIFIED`. |
103 | // This is because `unpark` may have been called again since we read |
104 | // `NOTIFIED` in the `compare_exchange` above. We must perform an |
105 | // acquire operation that synchronizes with that `unpark` to observe |
106 | // any writes it made before the call to unpark. To do that we must |
107 | // read from the write it made to `state`. |
108 | let old = self.state.swap(EMPTY, SeqCst); |
109 | debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly" ); |
110 | |
111 | return; |
112 | } |
113 | Err(actual) => panic!("inconsistent park state; actual = {}" , actual), |
114 | } |
115 | |
116 | loop { |
117 | m = self.condvar.wait(m).unwrap(); |
118 | |
119 | if self |
120 | .state |
121 | .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) |
122 | .is_ok() |
123 | { |
124 | // got a notification |
125 | return; |
126 | } |
127 | |
128 | // spurious wakeup, go back to sleep |
129 | } |
130 | } |
131 | |
132 | fn park_timeout(&self, dur: Duration) { |
133 | // Like `park` above we have a fast path for an already-notified thread, |
134 | // and afterwards we start coordinating for a sleep. Return quickly. |
135 | if self |
136 | .state |
137 | .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) |
138 | .is_ok() |
139 | { |
140 | return; |
141 | } |
142 | |
143 | if dur == Duration::from_millis(0) { |
144 | return; |
145 | } |
146 | |
147 | let m = self.mutex.lock(); |
148 | |
149 | match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { |
150 | Ok(_) => {} |
151 | Err(NOTIFIED) => { |
152 | // We must read again here, see `park`. |
153 | let old = self.state.swap(EMPTY, SeqCst); |
154 | debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly" ); |
155 | |
156 | return; |
157 | } |
158 | Err(actual) => panic!("inconsistent park_timeout state; actual = {}" , actual), |
159 | } |
160 | |
161 | // Wait with a timeout, and if we spuriously wake up or otherwise wake up |
162 | // from a notification, we just want to unconditionally set the state back to |
163 | // empty, either consuming a notification or un-flagging ourselves as |
164 | // parked. |
165 | let (_m, _result) = self.condvar.wait_timeout(m, dur).unwrap(); |
166 | |
167 | match self.state.swap(EMPTY, SeqCst) { |
168 | NOTIFIED => {} // got a notification, hurray! |
169 | PARKED => {} // no notification, alas |
170 | n => panic!("inconsistent park_timeout state: {}" , n), |
171 | } |
172 | } |
173 | |
174 | fn unpark(&self) { |
175 | // To ensure the unparked thread will observe any writes we made before |
176 | // this call, we must perform a release operation that `park` can |
177 | // synchronize with. To do that we must write `NOTIFIED` even if `state` |
178 | // is already `NOTIFIED`. That is why this must be a swap rather than a |
179 | // compare-and-swap that returns if it reads `NOTIFIED` on failure. |
180 | match self.state.swap(NOTIFIED, SeqCst) { |
181 | EMPTY => return, // no one was waiting |
182 | NOTIFIED => return, // already unparked |
183 | PARKED => {} // gotta go wake someone up |
184 | _ => panic!("inconsistent state in unpark" ), |
185 | } |
186 | |
187 | // There is a period between when the parked thread sets `state` to |
188 | // `PARKED` (or last checked `state` in the case of a spurious wake |
189 | // up) and when it actually waits on `cvar`. If we were to notify |
190 | // during this period it would be ignored and then when the parked |
191 | // thread went to sleep it would never wake up. Fortunately, it has |
192 | // `lock` locked at this stage so we can acquire `lock` to wait until |
193 | // it is ready to receive the notification. |
194 | // |
195 | // Releasing `lock` before the call to `notify_one` means that when the |
196 | // parked thread wakes it doesn't get woken only to have to wait for us |
197 | // to release `lock`. |
198 | drop(self.mutex.lock()); |
199 | |
200 | self.condvar.notify_one() |
201 | } |
202 | |
203 | fn shutdown(&self) { |
204 | self.condvar.notify_all(); |
205 | } |
206 | } |
207 | |
208 | impl Default for ParkThread { |
209 | fn default() -> Self { |
210 | Self::new() |
211 | } |
212 | } |
213 | |
214 | // ===== impl UnparkThread ===== |
215 | |
216 | impl UnparkThread { |
217 | pub(crate) fn unpark(&self) { |
218 | self.inner.unpark(); |
219 | } |
220 | } |
221 | |
222 | use crate::loom::thread::AccessError; |
223 | use std::future::Future; |
224 | use std::marker::PhantomData; |
225 | use std::mem; |
226 | use std::rc::Rc; |
227 | use std::task::{RawWaker, RawWakerVTable, Waker}; |
228 | |
229 | /// Blocks the current thread using a condition variable. |
230 | #[derive (Debug)] |
231 | pub(crate) struct CachedParkThread { |
232 | _anchor: PhantomData<Rc<()>>, |
233 | } |
234 | |
235 | impl CachedParkThread { |
236 | /// Creates a new `ParkThread` handle for the current thread. |
237 | /// |
238 | /// This type cannot be moved to other threads, so it should be created on |
239 | /// the thread that the caller intends to park. |
240 | pub(crate) fn new() -> CachedParkThread { |
241 | CachedParkThread { |
242 | _anchor: PhantomData, |
243 | } |
244 | } |
245 | |
246 | pub(crate) fn waker(&self) -> Result<Waker, AccessError> { |
247 | self.unpark().map(|unpark| unpark.into_waker()) |
248 | } |
249 | |
250 | fn unpark(&self) -> Result<UnparkThread, AccessError> { |
251 | self.with_current(|park_thread| park_thread.unpark()) |
252 | } |
253 | |
254 | pub(crate) fn park(&mut self) { |
255 | self.with_current(|park_thread| park_thread.inner.park()) |
256 | .unwrap(); |
257 | } |
258 | |
259 | pub(crate) fn park_timeout(&mut self, duration: Duration) { |
260 | self.with_current(|park_thread| park_thread.inner.park_timeout(duration)) |
261 | .unwrap(); |
262 | } |
263 | |
264 | /// Gets a reference to the `ParkThread` handle for this thread. |
265 | fn with_current<F, R>(&self, f: F) -> Result<R, AccessError> |
266 | where |
267 | F: FnOnce(&ParkThread) -> R, |
268 | { |
269 | CURRENT_PARKER.try_with(|inner| f(inner)) |
270 | } |
271 | |
272 | pub(crate) fn block_on<F: Future>(&mut self, f: F) -> Result<F::Output, AccessError> { |
273 | use std::task::Context; |
274 | use std::task::Poll::Ready; |
275 | |
276 | // `get_unpark()` should not return a Result |
277 | let waker = self.waker()?; |
278 | let mut cx = Context::from_waker(&waker); |
279 | |
280 | pin!(f); |
281 | |
282 | loop { |
283 | if let Ready(v) = crate::runtime::coop::budget(|| f.as_mut().poll(&mut cx)) { |
284 | return Ok(v); |
285 | } |
286 | |
287 | self.park(); |
288 | } |
289 | } |
290 | } |
291 | |
292 | impl UnparkThread { |
293 | pub(crate) fn into_waker(self) -> Waker { |
294 | unsafe { |
295 | let raw: RawWaker = unparker_to_raw_waker(self.inner); |
296 | Waker::from_raw(waker:raw) |
297 | } |
298 | } |
299 | } |
300 | |
301 | impl Inner { |
302 | #[allow (clippy::wrong_self_convention)] |
303 | fn into_raw(this: Arc<Inner>) -> *const () { |
304 | Arc::into_raw(this) as *const () |
305 | } |
306 | |
307 | unsafe fn from_raw(ptr: *const ()) -> Arc<Inner> { |
308 | Arc::from_raw(ptr as *const Inner) |
309 | } |
310 | } |
311 | |
312 | unsafe fn unparker_to_raw_waker(unparker: Arc<Inner>) -> RawWaker { |
313 | RawWaker::new( |
314 | data:Inner::into_raw(unparker), |
315 | &RawWakerVTable::new(clone, wake, wake_by_ref, drop_waker), |
316 | ) |
317 | } |
318 | |
319 | unsafe fn clone(raw: *const ()) -> RawWaker { |
320 | let unparker: Arc = Inner::from_raw(ptr:raw); |
321 | |
322 | // Increment the ref count |
323 | mem::forget(unparker.clone()); |
324 | |
325 | unparker_to_raw_waker(unparker) |
326 | } |
327 | |
328 | unsafe fn drop_waker(raw: *const ()) { |
329 | let _ = Inner::from_raw(ptr:raw); |
330 | } |
331 | |
332 | unsafe fn wake(raw: *const ()) { |
333 | let unparker: Arc = Inner::from_raw(ptr:raw); |
334 | unparker.unpark(); |
335 | } |
336 | |
337 | unsafe fn wake_by_ref(raw: *const ()) { |
338 | let unparker: Arc = Inner::from_raw(ptr:raw); |
339 | unparker.unpark(); |
340 | |
341 | // We don't actually own a reference to the unparker |
342 | mem::forget(unparker); |
343 | } |
344 | |
345 | #[cfg (loom)] |
346 | pub(crate) fn current_thread_park_count() -> usize { |
347 | CURRENT_THREAD_PARK_COUNT.with(|count| count.load(SeqCst)) |
348 | } |
349 | |