1 | // <stop_token> -*- C++ -*- |
2 | |
3 | // Copyright (C) 2019-2021 Free Software Foundation, Inc. |
4 | // |
5 | // This file is part of the GNU ISO C++ Library. This library is free |
6 | // software; you can redistribute it and/or modify it under the |
7 | // terms of the GNU General Public License as published by the |
8 | // Free Software Foundation; either version 3, or (at your option) |
9 | // any later version. |
10 | |
11 | // This library is distributed in the hope that it will be useful, |
12 | // but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | // GNU General Public License for more details. |
15 | |
16 | // Under Section 7 of GPL version 3, you are granted additional |
17 | // permissions described in the GCC Runtime Library Exception, version |
18 | // 3.1, as published by the Free Software Foundation. |
19 | |
20 | // You should have received a copy of the GNU General Public License and |
21 | // a copy of the GCC Runtime Library Exception along with this program; |
22 | // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see |
23 | // <http://www.gnu.org/licenses/>. |
24 | |
25 | /** @file include/stop_token |
26 | * This is a Standard C++ Library header. |
27 | */ |
28 | |
29 | #ifndef _GLIBCXX_STOP_TOKEN |
30 | #define _GLIBCXX_STOP_TOKEN |
31 | |
32 | #if __cplusplus > 201703L |
33 | |
34 | #include <atomic> |
35 | #include <bits/std_thread.h> |
36 | |
37 | #include <semaphore> |
38 | |
39 | #define __cpp_lib_jthread 201911L |
40 | |
41 | namespace std _GLIBCXX_VISIBILITY(default) |
42 | { |
43 | _GLIBCXX_BEGIN_NAMESPACE_VERSION |
44 | |
45 | /// Tag type indicating a stop_source should have no shared-stop-state. |
46 | struct nostopstate_t { explicit nostopstate_t() = default; }; |
47 | inline constexpr nostopstate_t nostopstate{}; |
48 | |
49 | class stop_source; |
50 | |
51 | /// Allow testing whether a stop request has been made on a `stop_source`. |
52 | class stop_token |
53 | { |
54 | public: |
55 | stop_token() noexcept = default; |
56 | |
57 | stop_token(const stop_token&) noexcept = default; |
58 | stop_token(stop_token&&) noexcept = default; |
59 | |
60 | ~stop_token() = default; |
61 | |
62 | stop_token& |
63 | operator=(const stop_token&) noexcept = default; |
64 | |
65 | stop_token& |
66 | operator=(stop_token&&) noexcept = default; |
67 | |
68 | [[nodiscard]] |
69 | bool |
70 | stop_possible() const noexcept |
71 | { |
72 | return static_cast<bool>(_M_state) && _M_state->_M_stop_possible(); |
73 | } |
74 | |
75 | [[nodiscard]] |
76 | bool |
77 | stop_requested() const noexcept |
78 | { |
79 | return static_cast<bool>(_M_state) && _M_state->_M_stop_requested(); |
80 | } |
81 | |
82 | void |
83 | swap(stop_token& __rhs) noexcept |
84 | { _M_state.swap(__rhs._M_state); } |
85 | |
86 | [[nodiscard]] |
87 | friend bool |
88 | operator==(const stop_token& __a, const stop_token& __b) |
89 | { return __a._M_state == __b._M_state; } |
90 | |
91 | friend void |
92 | swap(stop_token& __lhs, stop_token& __rhs) noexcept |
93 | { __lhs.swap(__rhs); } |
94 | |
95 | private: |
96 | friend class stop_source; |
97 | template<typename _Callback> |
98 | friend class stop_callback; |
99 | |
100 | static void |
101 | _S_yield() noexcept |
102 | { |
103 | #if defined __i386__ || defined __x86_64__ |
104 | __builtin_ia32_pause(); |
105 | #endif |
106 | this_thread::yield(); |
107 | } |
108 | |
109 | #ifndef __cpp_lib_semaphore |
110 | struct binary_semaphore |
111 | { |
112 | explicit binary_semaphore(int __d) : _M_counter(__d > 0) { } |
113 | |
114 | void release() { _M_counter.fetch_add(1, memory_order::release); } |
115 | |
116 | void acquire() |
117 | { |
118 | int __old = 1; |
119 | while (!_M_counter.compare_exchange_weak(__old, 0, |
120 | memory_order::acquire, |
121 | memory_order::relaxed)) |
122 | { |
123 | __old = 1; |
124 | _S_yield(); |
125 | } |
126 | } |
127 | |
128 | atomic<int> _M_counter; |
129 | }; |
130 | #endif |
131 | |
132 | struct _Stop_cb |
133 | { |
134 | using __cb_type = void(_Stop_cb*) noexcept; |
135 | __cb_type* _M_callback; |
136 | _Stop_cb* _M_prev = nullptr; |
137 | _Stop_cb* _M_next = nullptr; |
138 | bool* _M_destroyed = nullptr; |
139 | binary_semaphore _M_done{0}; |
140 | |
141 | [[__gnu__::__nonnull__]] |
142 | explicit |
143 | _Stop_cb(__cb_type* __cb) |
144 | : _M_callback(__cb) |
145 | { } |
146 | |
147 | void _M_run() noexcept { _M_callback(this); } |
148 | }; |
149 | |
150 | struct _Stop_state_t |
151 | { |
152 | using value_type = uint32_t; |
153 | static constexpr value_type _S_stop_requested_bit = 1; |
154 | static constexpr value_type _S_locked_bit = 2; |
155 | static constexpr value_type _S_ssrc_counter_inc = 4; |
156 | |
157 | std::atomic<value_type> _M_owners{1}; |
158 | std::atomic<value_type> _M_value{_S_ssrc_counter_inc}; |
159 | _Stop_cb* _M_head = nullptr; |
160 | std::thread::id _M_requester; |
161 | |
162 | _Stop_state_t() = default; |
163 | |
164 | bool |
165 | _M_stop_possible() noexcept |
166 | { |
167 | // true if a stop request has already been made or there are still |
168 | // stop_source objects that would allow one to be made. |
169 | return _M_value.load(memory_order::acquire) & ~_S_locked_bit; |
170 | } |
171 | |
172 | bool |
173 | _M_stop_requested() noexcept |
174 | { |
175 | return _M_value.load(memory_order::acquire) & _S_stop_requested_bit; |
176 | } |
177 | |
178 | void |
179 | _M_add_owner() noexcept |
180 | { |
181 | _M_owners.fetch_add(1, memory_order::relaxed); |
182 | } |
183 | |
184 | void |
185 | _M_release_ownership() noexcept |
186 | { |
187 | if (_M_owners.fetch_sub(1, memory_order::acq_rel) == 1) |
188 | delete this; |
189 | } |
190 | |
191 | void |
192 | _M_add_ssrc() noexcept |
193 | { |
194 | _M_value.fetch_add(_S_ssrc_counter_inc, memory_order::relaxed); |
195 | } |
196 | |
197 | void |
198 | _M_sub_ssrc() noexcept |
199 | { |
200 | _M_value.fetch_sub(_S_ssrc_counter_inc, memory_order::release); |
201 | } |
202 | |
203 | // Obtain lock. |
204 | void |
205 | _M_lock() noexcept |
206 | { |
207 | // Can use relaxed loads to get the current value. |
208 | // The successful call to _M_try_lock is an acquire operation. |
209 | auto __old = _M_value.load(memory_order::relaxed); |
210 | while (!_M_try_lock(__old, memory_order::relaxed)) |
211 | { } |
212 | } |
213 | |
214 | // Precondition: calling thread holds the lock. |
215 | void |
216 | _M_unlock() noexcept |
217 | { |
218 | _M_value.fetch_sub(_S_locked_bit, memory_order::release); |
219 | } |
220 | |
221 | bool |
222 | _M_request_stop() noexcept |
223 | { |
224 | // obtain lock and set stop_requested bit |
225 | auto __old = _M_value.load(memory_order::acquire); |
226 | do |
227 | { |
228 | if (__old & _S_stop_requested_bit) // stop request already made |
229 | return false; |
230 | } |
231 | while (!_M_try_lock_and_stop(__old)); |
232 | |
233 | _M_requester = this_thread::get_id(); |
234 | |
235 | while (_M_head) |
236 | { |
237 | bool __last_cb; |
238 | _Stop_cb* __cb = _M_head; |
239 | _M_head = _M_head->_M_next; |
240 | if (_M_head) |
241 | { |
242 | _M_head->_M_prev = nullptr; |
243 | __last_cb = false; |
244 | } |
245 | else |
246 | __last_cb = true; |
247 | |
248 | // Allow other callbacks to be unregistered while __cb runs. |
249 | _M_unlock(); |
250 | |
251 | bool __destroyed = false; |
252 | __cb->_M_destroyed = &__destroyed; |
253 | |
254 | // run callback |
255 | __cb->_M_run(); |
256 | |
257 | if (!__destroyed) |
258 | { |
259 | __cb->_M_destroyed = nullptr; |
260 | |
261 | // synchronize with destructor of stop_callback that owns *__cb |
262 | if (!__gnu_cxx::__is_single_threaded()) |
263 | __cb->_M_done.release(); |
264 | } |
265 | |
266 | // Avoid relocking if we already know there are no more callbacks. |
267 | if (__last_cb) |
268 | return true; |
269 | |
270 | _M_lock(); |
271 | } |
272 | |
273 | _M_unlock(); |
274 | return true; |
275 | } |
276 | |
277 | [[__gnu__::__nonnull__]] |
278 | bool |
279 | _M_register_callback(_Stop_cb* __cb) noexcept |
280 | { |
281 | auto __old = _M_value.load(memory_order::acquire); |
282 | do |
283 | { |
284 | if (__old & _S_stop_requested_bit) // stop request already made |
285 | { |
286 | __cb->_M_run(); // run synchronously |
287 | return false; |
288 | } |
289 | |
290 | if (__old < _S_ssrc_counter_inc) // no stop_source owns *this |
291 | // No need to register callback if no stop request can be made. |
292 | // Returning false also means the stop_callback does not share |
293 | // ownership of this state, but that's not observable. |
294 | return false; |
295 | } |
296 | while (!_M_try_lock(__old)); |
297 | |
298 | __cb->_M_next = _M_head; |
299 | if (_M_head) |
300 | { |
301 | _M_head->_M_prev = __cb; |
302 | } |
303 | _M_head = __cb; |
304 | _M_unlock(); |
305 | return true; |
306 | } |
307 | |
308 | // Called by ~stop_callback just before destroying *__cb. |
309 | [[__gnu__::__nonnull__]] |
310 | void |
311 | _M_remove_callback(_Stop_cb* __cb) |
312 | { |
313 | _M_lock(); |
314 | |
315 | if (__cb == _M_head) |
316 | { |
317 | _M_head = _M_head->_M_next; |
318 | if (_M_head) |
319 | _M_head->_M_prev = nullptr; |
320 | _M_unlock(); |
321 | return; |
322 | } |
323 | else if (__cb->_M_prev) |
324 | { |
325 | __cb->_M_prev->_M_next = __cb->_M_next; |
326 | if (__cb->_M_next) |
327 | __cb->_M_next->_M_prev = __cb->_M_prev; |
328 | _M_unlock(); |
329 | return; |
330 | } |
331 | |
332 | _M_unlock(); |
333 | |
334 | // Callback is not in the list, so must have been removed by a call to |
335 | // _M_request_stop. |
336 | |
337 | // Despite appearances there is no data race on _M_requester. The only |
338 | // write to it happens before the callback is removed from the list, |
339 | // and removing it from the list happens before this read. |
340 | if (!(_M_requester == this_thread::get_id())) |
341 | { |
342 | // Synchronize with completion of callback. |
343 | __cb->_M_done.acquire(); |
344 | // Safe for ~stop_callback to destroy *__cb now. |
345 | return; |
346 | } |
347 | |
348 | if (__cb->_M_destroyed) |
349 | *__cb->_M_destroyed = true; |
350 | } |
351 | |
352 | // Try to obtain the lock. |
353 | // Returns true if the lock is acquired (with memory order acquire). |
354 | // Otherwise, sets __curval = _M_value.load(__failure) and returns false. |
355 | // Might fail spuriously, so must be called in a loop. |
356 | bool |
357 | _M_try_lock(value_type& __curval, |
358 | memory_order __failure = memory_order::acquire) noexcept |
359 | { |
360 | return _M_do_try_lock(__curval, 0, memory_order::acquire, __failure); |
361 | } |
362 | |
363 | // Try to obtain the lock to make a stop request. |
364 | // Returns true if the lock is acquired and the _S_stop_requested_bit is |
365 | // set (with memory order acq_rel so that other threads see the request). |
366 | // Otherwise, sets __curval = _M_value.load(memory_order::acquire) and |
367 | // returns false. |
368 | // Might fail spuriously, so must be called in a loop. |
369 | bool |
370 | _M_try_lock_and_stop(value_type& __curval) noexcept |
371 | { |
372 | return _M_do_try_lock(__curval, _S_stop_requested_bit, |
373 | memory_order::acq_rel, memory_order::acquire); |
374 | } |
375 | |
376 | bool |
377 | _M_do_try_lock(value_type& __curval, value_type __newbits, |
378 | memory_order __success, memory_order __failure) noexcept |
379 | { |
380 | if (__curval & _S_locked_bit) |
381 | { |
382 | _S_yield(); |
383 | __curval = _M_value.load(__failure); |
384 | return false; |
385 | } |
386 | __newbits |= _S_locked_bit; |
387 | return _M_value.compare_exchange_weak(__curval, __curval | __newbits, |
388 | __success, __failure); |
389 | } |
390 | }; |
391 | |
392 | struct _Stop_state_ref |
393 | { |
394 | _Stop_state_ref() = default; |
395 | |
396 | explicit |
397 | _Stop_state_ref(const stop_source&) |
398 | : _M_ptr(new _Stop_state_t()) |
399 | { } |
400 | |
401 | _Stop_state_ref(const _Stop_state_ref& __other) noexcept |
402 | : _M_ptr(__other._M_ptr) |
403 | { |
404 | if (_M_ptr) |
405 | _M_ptr->_M_add_owner(); |
406 | } |
407 | |
408 | _Stop_state_ref(_Stop_state_ref&& __other) noexcept |
409 | : _M_ptr(__other._M_ptr) |
410 | { |
411 | __other._M_ptr = nullptr; |
412 | } |
413 | |
414 | _Stop_state_ref& |
415 | operator=(const _Stop_state_ref& __other) noexcept |
416 | { |
417 | if (auto __ptr = __other._M_ptr; __ptr != _M_ptr) |
418 | { |
419 | if (__ptr) |
420 | __ptr->_M_add_owner(); |
421 | if (_M_ptr) |
422 | _M_ptr->_M_release_ownership(); |
423 | _M_ptr = __ptr; |
424 | } |
425 | return *this; |
426 | } |
427 | |
428 | _Stop_state_ref& |
429 | operator=(_Stop_state_ref&& __other) noexcept |
430 | { |
431 | _Stop_state_ref(std::move(__other)).swap(*this); |
432 | return *this; |
433 | } |
434 | |
435 | ~_Stop_state_ref() |
436 | { |
437 | if (_M_ptr) |
438 | _M_ptr->_M_release_ownership(); |
439 | } |
440 | |
441 | void |
442 | swap(_Stop_state_ref& __other) noexcept |
443 | { std::swap(_M_ptr, __other._M_ptr); } |
444 | |
445 | explicit operator bool() const noexcept { return _M_ptr != nullptr; } |
446 | |
447 | _Stop_state_t* operator->() const noexcept { return _M_ptr; } |
448 | |
449 | #if __cpp_impl_three_way_comparison >= 201907L |
450 | friend bool |
451 | operator==(const _Stop_state_ref&, const _Stop_state_ref&) = default; |
452 | #else |
453 | friend bool |
454 | operator==(const _Stop_state_ref& __lhs, const _Stop_state_ref& __rhs) |
455 | noexcept |
456 | { return __lhs._M_ptr == __rhs._M_ptr; } |
457 | |
458 | friend bool |
459 | operator!=(const _Stop_state_ref& __lhs, const _Stop_state_ref& __rhs) |
460 | noexcept |
461 | { return __lhs._M_ptr != __rhs._M_ptr; } |
462 | #endif |
463 | |
464 | private: |
465 | _Stop_state_t* _M_ptr = nullptr; |
466 | }; |
467 | |
468 | _Stop_state_ref _M_state; |
469 | |
470 | explicit |
471 | stop_token(const _Stop_state_ref& __state) noexcept |
472 | : _M_state{__state} |
473 | { } |
474 | }; |
475 | |
476 | /// A type that allows a stop request to be made. |
477 | class stop_source |
478 | { |
479 | public: |
480 | stop_source() : _M_state(*this) |
481 | { } |
482 | |
483 | explicit stop_source(std::nostopstate_t) noexcept |
484 | { } |
485 | |
486 | stop_source(const stop_source& __other) noexcept |
487 | : _M_state(__other._M_state) |
488 | { |
489 | if (_M_state) |
490 | _M_state->_M_add_ssrc(); |
491 | } |
492 | |
493 | stop_source(stop_source&&) noexcept = default; |
494 | |
495 | stop_source& |
496 | operator=(const stop_source& __other) noexcept |
497 | { |
498 | if (_M_state != __other._M_state) |
499 | { |
500 | stop_source __sink(std::move(*this)); |
501 | _M_state = __other._M_state; |
502 | if (_M_state) |
503 | _M_state->_M_add_ssrc(); |
504 | } |
505 | return *this; |
506 | } |
507 | |
508 | stop_source& |
509 | operator=(stop_source&&) noexcept = default; |
510 | |
511 | ~stop_source() |
512 | { |
513 | if (_M_state) |
514 | _M_state->_M_sub_ssrc(); |
515 | } |
516 | |
517 | [[nodiscard]] |
518 | bool |
519 | stop_possible() const noexcept |
520 | { |
521 | return static_cast<bool>(_M_state); |
522 | } |
523 | |
524 | [[nodiscard]] |
525 | bool |
526 | stop_requested() const noexcept |
527 | { |
528 | return static_cast<bool>(_M_state) && _M_state->_M_stop_requested(); |
529 | } |
530 | |
531 | bool |
532 | request_stop() const noexcept |
533 | { |
534 | if (stop_possible()) |
535 | return _M_state->_M_request_stop(); |
536 | return false; |
537 | } |
538 | |
539 | [[nodiscard]] |
540 | stop_token |
541 | get_token() const noexcept |
542 | { |
543 | return stop_token{_M_state}; |
544 | } |
545 | |
546 | void |
547 | swap(stop_source& __other) noexcept |
548 | { |
549 | _M_state.swap(__other._M_state); |
550 | } |
551 | |
552 | [[nodiscard]] |
553 | friend bool |
554 | operator==(const stop_source& __a, const stop_source& __b) noexcept |
555 | { |
556 | return __a._M_state == __b._M_state; |
557 | } |
558 | |
559 | friend void |
560 | swap(stop_source& __lhs, stop_source& __rhs) noexcept |
561 | { |
562 | __lhs.swap(__rhs); |
563 | } |
564 | |
565 | private: |
566 | stop_token::_Stop_state_ref _M_state; |
567 | }; |
568 | |
569 | /// A wrapper for callbacks to be run when a stop request is made. |
570 | template<typename _Callback> |
571 | class [[nodiscard]] stop_callback |
572 | { |
573 | static_assert(is_nothrow_destructible_v<_Callback>); |
574 | static_assert(is_invocable_v<_Callback>); |
575 | |
576 | public: |
577 | using callback_type = _Callback; |
578 | |
579 | template<typename _Cb, |
580 | enable_if_t<is_constructible_v<_Callback, _Cb>, int> = 0> |
581 | explicit |
582 | stop_callback(const stop_token& __token, _Cb&& __cb) |
583 | noexcept(is_nothrow_constructible_v<_Callback, _Cb>) |
584 | : _M_cb(std::forward<_Cb>(__cb)) |
585 | { |
586 | if (auto __state = __token._M_state) |
587 | { |
588 | if (__state->_M_register_callback(&_M_cb)) |
589 | _M_state.swap(__state); |
590 | } |
591 | } |
592 | |
593 | template<typename _Cb, |
594 | enable_if_t<is_constructible_v<_Callback, _Cb>, int> = 0> |
595 | explicit |
596 | stop_callback(stop_token&& __token, _Cb&& __cb) |
597 | noexcept(is_nothrow_constructible_v<_Callback, _Cb>) |
598 | : _M_cb(std::forward<_Cb>(__cb)) |
599 | { |
600 | if (auto& __state = __token._M_state) |
601 | { |
602 | if (__state->_M_register_callback(&_M_cb)) |
603 | _M_state.swap(__state); |
604 | } |
605 | } |
606 | |
607 | ~stop_callback() |
608 | { |
609 | if (_M_state) |
610 | { |
611 | _M_state->_M_remove_callback(&_M_cb); |
612 | } |
613 | } |
614 | |
615 | stop_callback(const stop_callback&) = delete; |
616 | stop_callback& operator=(const stop_callback&) = delete; |
617 | stop_callback(stop_callback&&) = delete; |
618 | stop_callback& operator=(stop_callback&&) = delete; |
619 | |
620 | private: |
621 | struct _Cb_impl : stop_token::_Stop_cb |
622 | { |
623 | template<typename _Cb> |
624 | explicit |
625 | _Cb_impl(_Cb&& __cb) |
626 | : _Stop_cb(&_S_execute), |
627 | _M_cb(std::forward<_Cb>(__cb)) |
628 | { } |
629 | |
630 | _Callback _M_cb; |
631 | |
632 | [[__gnu__::__nonnull__]] |
633 | static void |
634 | _S_execute(_Stop_cb* __that) noexcept |
635 | { |
636 | _Callback& __cb = static_cast<_Cb_impl*>(__that)->_M_cb; |
637 | std::forward<_Callback>(__cb)(); |
638 | } |
639 | }; |
640 | |
641 | _Cb_impl _M_cb; |
642 | stop_token::_Stop_state_ref _M_state; |
643 | }; |
644 | |
645 | template<typename _Callback> |
646 | stop_callback(stop_token, _Callback) -> stop_callback<_Callback>; |
647 | |
648 | _GLIBCXX_END_NAMESPACE_VERSION |
649 | } // namespace |
650 | #endif // __cplusplus > 201703L |
651 | #endif // _GLIBCXX_STOP_TOKEN |
652 | |