| 1 | |
| 2 | // Copyright Oliver Kowalke 2016. |
| 3 | // Distributed under the Boost Software License, Version 1.0. |
| 4 | // (See accompanying file LICENSE_1_0.txt or copy at |
| 5 | // http://www.boost.org/LICENSE_1_0.txt) |
| 6 | |
| 7 | #ifndef BOOST_FIBERS_SPINLOCK_TTAS_ADAPTIVE_H |
| 8 | #define BOOST_FIBERS_SPINLOCK_TTAS_ADAPTIVE_H |
| 9 | |
| 10 | #include <algorithm> |
| 11 | #include <atomic> |
| 12 | #include <chrono> |
| 13 | #include <cmath> |
| 14 | #include <random> |
| 15 | #include <thread> |
| 16 | |
| 17 | #include <boost/fiber/detail/config.hpp> |
| 18 | #include <boost/fiber/detail/cpu_relax.hpp> |
| 19 | #include <boost/fiber/detail/spinlock_status.hpp> |
| 20 | |
| 21 | // based on informations from: |
| 22 | // https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops |
| 23 | // https://software.intel.com/en-us/articles/long-duration-spin-wait-loops-on-hyper-threading-technology-enabled-intel-processors |
| 24 | |
| 25 | namespace boost { |
| 26 | namespace fibers { |
| 27 | namespace detail { |
| 28 | |
| 29 | class spinlock_ttas_adaptive { |
| 30 | private: |
| 31 | template< typename FBSplk > |
| 32 | friend class spinlock_rtm; |
| 33 | |
| 34 | std::atomic< spinlock_status > state_{ spinlock_status::unlocked }; |
| 35 | std::atomic< std::size_t > retries_{ 0 }; |
| 36 | |
| 37 | public: |
| 38 | spinlock_ttas_adaptive() = default; |
| 39 | |
| 40 | spinlock_ttas_adaptive( spinlock_ttas_adaptive const&) = delete; |
| 41 | spinlock_ttas_adaptive & operator=( spinlock_ttas_adaptive const&) = delete; |
| 42 | |
| 43 | void lock() noexcept { |
| 44 | static thread_local std::minstd_rand generator{ std::random_device{}() }; |
| 45 | std::size_t collisions = 0 ; |
| 46 | for (;;) { |
| 47 | std::size_t retries = 0; |
| 48 | const std::size_t prev_retries = retries_.load( m: std::memory_order_relaxed); |
| 49 | const std::size_t max_relax_retries = (std::min)( |
| 50 | a: static_cast< std::size_t >( BOOST_FIBERS_SPIN_BEFORE_SLEEP0), b: 2 * prev_retries + 10); |
| 51 | const std::size_t max_sleep_retries = (std::min)( |
| 52 | a: static_cast< std::size_t >( BOOST_FIBERS_SPIN_BEFORE_YIELD), b: 2 * prev_retries + 10); |
| 53 | // avoid using multiple pause instructions for a delay of a specific cycle count |
| 54 | // the delay of cpu_relax() (pause on Intel) depends on the processor family |
| 55 | // the cycle count can not guaranteed from one system to the next |
| 56 | // -> check the shared variable 'state_' in between each cpu_relax() to prevent |
| 57 | // unnecessarily long delays on some systems |
| 58 | // test shared variable 'status_' |
| 59 | // first access to 'state_' -> chache miss |
| 60 | // sucessive acccess to 'state_' -> cache hit |
| 61 | // if 'state_' was released by other fiber |
| 62 | // cached 'state_' is invalidated -> cache miss |
| 63 | while ( spinlock_status::locked == state_.load( m: std::memory_order_relaxed) ) { |
| 64 | #if !defined(BOOST_FIBERS_SPIN_SINGLE_CORE) |
| 65 | if ( max_relax_retries > retries) { |
| 66 | ++retries; |
| 67 | // give CPU a hint that this thread is in a "spin-wait" loop |
| 68 | // delays the next instruction's execution for a finite period of time (depends on processor family) |
| 69 | // the CPU is not under demand, parts of the pipeline are no longer being used |
| 70 | // -> reduces the power consumed by the CPU |
| 71 | // -> prevent pipeline stalls |
| 72 | cpu_relax(); |
| 73 | } else if ( max_sleep_retries > retries) { |
| 74 | ++retries; |
| 75 | // std::this_thread::sleep_for( 0us) has a fairly long instruction path length, |
| 76 | // combined with an expensive ring3 to ring 0 transition costing about 1000 cycles |
| 77 | // std::this_thread::sleep_for( 0us) lets give up this_thread the remaining part of its time slice |
| 78 | // if and only if a thread of equal or greater priority is ready to run |
| 79 | static constexpr std::chrono::microseconds us0{ 0 }; |
| 80 | std::this_thread::sleep_for( rtime: us0); |
| 81 | } else { |
| 82 | // std::this_thread::yield() allows this_thread to give up the remaining part of its time slice, |
| 83 | // but only to another thread on the same processor |
| 84 | // instead of constant checking, a thread only checks if no other useful work is pending |
| 85 | std::this_thread::yield(); |
| 86 | } |
| 87 | #else |
| 88 | std::this_thread::yield(); |
| 89 | #endif |
| 90 | } |
| 91 | // test-and-set shared variable 'status_' |
| 92 | // everytime 'status_' is signaled over the bus, even if the test failes |
| 93 | if ( spinlock_status::locked == state_.exchange( i: spinlock_status::locked, m: std::memory_order_acquire) ) { |
| 94 | // spinlock now contended |
| 95 | // utilize 'Binary Exponential Backoff' algorithm |
| 96 | // linear_congruential_engine is a random number engine based on Linear congruential generator (LCG) |
| 97 | std::uniform_int_distribution< std::size_t > distribution{ |
| 98 | 0, static_cast< std::size_t >( 1) << (std::min)(a: collisions, b: static_cast< std::size_t >( BOOST_FIBERS_CONTENTION_WINDOW_THRESHOLD)) }; |
| 99 | const std::size_t z = distribution( generator); |
| 100 | ++collisions; |
| 101 | for ( std::size_t i = 0; i < z; ++i) { |
| 102 | // -> reduces the power consumed by the CPU |
| 103 | // -> prevent pipeline stalls |
| 104 | cpu_relax(); |
| 105 | } |
| 106 | } else { |
| 107 | retries_.store( i: prev_retries + (retries - prev_retries) / 8, m: std::memory_order_relaxed); |
| 108 | // success, thread has acquired the lock |
| 109 | break; |
| 110 | } |
| 111 | } |
| 112 | } |
| 113 | |
| 114 | bool try_lock() noexcept { |
| 115 | return spinlock_status::unlocked == state_.exchange( i: spinlock_status::locked, m: std::memory_order_acquire); |
| 116 | } |
| 117 | |
| 118 | void unlock() noexcept { |
| 119 | state_.store( i: spinlock_status::unlocked, m: std::memory_order_release); |
| 120 | } |
| 121 | }; |
| 122 | |
| 123 | }}} |
| 124 | |
| 125 | #endif // BOOST_FIBERS_SPINLOCK_TTAS_ADAPTIVE_H |
| 126 | |