Warning: This file is not a C or C++ file. It does not have highlighting.
| 1 | // -*- C++ -*- |
|---|---|
| 2 | //===----------------------------------------------------------------------===// |
| 3 | // |
| 4 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 5 | // See https://llvm.org/LICENSE.txt for license information. |
| 6 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | |
| 10 | #ifndef _LIBCPP___STOP_TOKEN_ATOMIC_UNIQUE_LOCK_H |
| 11 | #define _LIBCPP___STOP_TOKEN_ATOMIC_UNIQUE_LOCK_H |
| 12 | |
| 13 | #include <__bit/popcount.h> |
| 14 | #include <__config> |
| 15 | #include <atomic> |
| 16 | |
| 17 | #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) |
| 18 | # pragma GCC system_header |
| 19 | #endif |
| 20 | |
| 21 | _LIBCPP_BEGIN_NAMESPACE_STD |
| 22 | |
| 23 | #if _LIBCPP_STD_VER >= 20 |
| 24 | |
| 25 | // This class implements an RAII unique_lock without a mutex. |
| 26 | // It uses std::atomic<State>, |
| 27 | // where State contains a lock bit and might contain other data, |
| 28 | // and LockedBit is the value of State when the lock bit is set, e.g 1 << 2 |
| 29 | template <class _State, _State _LockedBit> |
| 30 | class _LIBCPP_AVAILABILITY_SYNC __atomic_unique_lock { |
| 31 | static_assert(std::__popcount(static_cast<unsigned long long>(_LockedBit)) == 1, |
| 32 | "LockedBit must be an integer where only one bit is set"); |
| 33 | |
| 34 | std::atomic<_State>& __state_; |
| 35 | bool __is_locked_; |
| 36 | |
| 37 | public: |
| 38 | _LIBCPP_HIDE_FROM_ABI explicit __atomic_unique_lock(std::atomic<_State>& __state) noexcept |
| 39 | : __state_(__state), __is_locked_(true) { |
| 40 | __lock(); |
| 41 | } |
| 42 | |
| 43 | template <class _Pred> |
| 44 | _LIBCPP_HIDE_FROM_ABI __atomic_unique_lock(std::atomic<_State>& __state, _Pred&& __give_up_locking) noexcept |
| 45 | : __state_(__state), __is_locked_(false) { |
| 46 | __is_locked_ = __lock_impl(__give_up_locking, __set_locked_bit, std::memory_order_acquire); |
| 47 | } |
| 48 | |
| 49 | template <class _Pred, class _UnaryFunction> |
| 50 | _LIBCPP_HIDE_FROM_ABI __atomic_unique_lock( |
| 51 | std::atomic<_State>& __state, |
| 52 | _Pred&& __give_up_locking, |
| 53 | _UnaryFunction&& __state_after_lock, |
| 54 | std::memory_order __locked_ordering) noexcept |
| 55 | : __state_(__state), __is_locked_(false) { |
| 56 | __is_locked_ = __lock_impl(__give_up_locking, __state_after_lock, __locked_ordering); |
| 57 | } |
| 58 | |
| 59 | __atomic_unique_lock(const __atomic_unique_lock&) = delete; |
| 60 | __atomic_unique_lock(__atomic_unique_lock&&) = delete; |
| 61 | __atomic_unique_lock& operator=(const __atomic_unique_lock&) = delete; |
| 62 | __atomic_unique_lock& operator=(__atomic_unique_lock&&) = delete; |
| 63 | |
| 64 | _LIBCPP_HIDE_FROM_ABI ~__atomic_unique_lock() { |
| 65 | if (__is_locked_) { |
| 66 | __unlock(); |
| 67 | } |
| 68 | } |
| 69 | |
| 70 | _LIBCPP_HIDE_FROM_ABI bool __owns_lock() const noexcept { return __is_locked_; } |
| 71 | |
| 72 | _LIBCPP_HIDE_FROM_ABI void __lock() noexcept { |
| 73 | const auto __never_give_up_locking = [](_State) { return false; }; |
| 74 | // std::memory_order_acquire because we'd like to make sure that all the read operations after the lock can read the |
| 75 | // up-to-date values. |
| 76 | __lock_impl(__never_give_up_locking, __set_locked_bit, std::memory_order_acquire); |
| 77 | __is_locked_ = true; |
| 78 | } |
| 79 | |
| 80 | _LIBCPP_HIDE_FROM_ABI void __unlock() noexcept { |
| 81 | // unset the _LockedBit. `memory_order_release` because we need to make sure all the write operations before calling |
| 82 | // `__unlock` will be made visible to other threads |
| 83 | __state_.fetch_and(static_cast<_State>(~_LockedBit), std::memory_order_release); |
| 84 | __state_.notify_all(); |
| 85 | __is_locked_ = false; |
| 86 | } |
| 87 | |
| 88 | private: |
| 89 | template <class _Pred, class _UnaryFunction> |
| 90 | _LIBCPP_HIDE_FROM_ABI bool |
| 91 | __lock_impl(_Pred&& __give_up_locking, // while trying to lock the state, if the predicate returns true, give up |
| 92 | // locking and return |
| 93 | _UnaryFunction&& __state_after_lock, |
| 94 | std::memory_order __locked_ordering) noexcept { |
| 95 | // At this stage, until we exit the inner while loop, other than the atomic state, we are not reading any order |
| 96 | // dependent values that is written on other threads, or writing anything that needs to be seen on other threads. |
| 97 | // Therefore `memory_order_relaxed` is enough. |
| 98 | _State __current_state = __state_.load(std::memory_order_relaxed); |
| 99 | do { |
| 100 | while (true) { |
| 101 | if (__give_up_locking(__current_state)) { |
| 102 | // user provided early return condition. fail to lock |
| 103 | return false; |
| 104 | } else if ((__current_state & _LockedBit) != 0) { |
| 105 | // another thread has locked the state, we need to wait |
| 106 | __state_.wait(__current_state, std::memory_order_relaxed); |
| 107 | // when it is woken up by notifyAll or spuriously, the __state_ |
| 108 | // might have changed. reload the state |
| 109 | // Note that the new state's _LockedBit may or may not equal to 0 |
| 110 | __current_state = __state_.load(std::memory_order_relaxed); |
| 111 | } else { |
| 112 | // at least for now, it is not locked. we can try `compare_exchange_weak` to lock it. |
| 113 | // Note that the variable `__current_state`'s lock bit has to be 0 at this point. |
| 114 | break; |
| 115 | } |
| 116 | } |
| 117 | } while (!__state_.compare_exchange_weak( |
| 118 | __current_state, // if __state_ has the same value of __current_state, lock bit must be zero before exchange and |
| 119 | // we are good to lock/exchange and return. If _state has a different value, because other |
| 120 | // threads locked it between the `break` statement above and this statement, exchange will fail |
| 121 | // and go back to the inner while loop above. |
| 122 | __state_after_lock(__current_state), // state after lock. Usually it should be __current_state | _LockedBit. |
| 123 | // Some use cases need to set other bits at the same time as an atomic |
| 124 | // operation therefore we accept a function |
| 125 | __locked_ordering, // sucessful exchange order. Usually it should be std::memory_order_acquire. |
| 126 | // Some use cases need more strict ordering therefore we accept it as a parameter |
| 127 | std::memory_order_relaxed // fail to exchange order. We don't need any ordering as we are going back to the |
| 128 | // inner while loop |
| 129 | )); |
| 130 | return true; |
| 131 | } |
| 132 | |
| 133 | _LIBCPP_HIDE_FROM_ABI static constexpr auto __set_locked_bit = [](_State __state) { return __state | _LockedBit; }; |
| 134 | }; |
| 135 | |
| 136 | #endif // _LIBCPP_STD_VER >= 20 && _LIBCPP_HAS_THREADS |
| 137 | |
| 138 | _LIBCPP_END_NAMESPACE_STD |
| 139 | |
| 140 | #endif // _LIBCPP___STOP_TOKEN_ATOMIC_UNIQUE_LOCK_H |
| 141 |
Warning: This file is not a C or C++ file. It does not have highlighting.
