| 1 | |
| 2 | // Copyright Oliver Kowalke 2013. |
| 3 | // Distributed under the Boost Software License, Version 1.0. |
| 4 | // (See accompanying file LICENSE_1_0.txt or copy at |
| 5 | // http://www.boost.org/LICENSE_1_0.txt) |
| 6 | |
| 7 | #include "boost/fiber/context.hpp" |
| 8 | |
| 9 | #include <cstdlib> |
| 10 | #include <mutex> |
| 11 | #include <new> |
| 12 | |
| 13 | #include "boost/fiber/exceptions.hpp" |
| 14 | #include "boost/fiber/scheduler.hpp" |
| 15 | #include "boost/fiber/algo/round_robin.hpp" |
| 16 | |
| 17 | #ifdef BOOST_HAS_ABI_HEADERS |
| 18 | # include BOOST_ABI_PREFIX |
| 19 | #endif |
| 20 | |
| 21 | namespace boost { |
| 22 | namespace fibers { |
| 23 | |
| 24 | class main_context final : public context { |
| 25 | public: |
| 26 | main_context() noexcept : |
| 27 | context{ 1, type::main_context, launch::post } { |
| 28 | } |
| 29 | }; |
| 30 | |
| 31 | class dispatcher_context final : public context { |
| 32 | private: |
| 33 | boost::context::fiber |
| 34 | #if (defined(BOOST_USE_UCONTEXT)||defined(BOOST_USE_WINFIB)) |
| 35 | run_( boost::context::fiber && c) { |
| 36 | std::move( c).resume(); |
| 37 | #else |
| 38 | run_( boost::context::fiber &&) { |
| 39 | #endif |
| 40 | // execute scheduler::dispatch() |
| 41 | return get_scheduler()->dispatch(); |
| 42 | } |
| 43 | |
| 44 | public: |
| 45 | dispatcher_context( boost::context::preallocated const& palloc, stack_allocator_wrapper&& salloc) : |
| 46 | context{ 0, type::dispatcher_context, launch::post } { |
| 47 | c_ = boost::context::fiber{ std::allocator_arg, palloc, std::move(salloc), |
| 48 | std::bind( f: & dispatcher_context::run_, args: this, args: std::placeholders::_1) }; |
| 49 | #if (defined(BOOST_USE_UCONTEXT)||defined(BOOST_USE_WINFIB)) |
| 50 | c_ = std::move( c_).resume(); |
| 51 | #endif |
| 52 | } |
| 53 | }; |
| 54 | |
| 55 | static intrusive_ptr< context > make_dispatcher_context(stack_allocator_wrapper&& salloc) { |
| 56 | auto sctx = salloc.allocate(); |
| 57 | // reserve space for control structure |
| 58 | void * storage = reinterpret_cast< void * >( |
| 59 | ( reinterpret_cast< uintptr_t >( sctx.sp) - static_cast< uintptr_t >( sizeof( dispatcher_context) ) ) |
| 60 | & ~ static_cast< uintptr_t >( 0xff) ); |
| 61 | void * stack_bottom = reinterpret_cast< void * >( |
| 62 | reinterpret_cast< uintptr_t >( sctx.sp) - static_cast< uintptr_t >( sctx.size) ); |
| 63 | const std::size_t size = reinterpret_cast< uintptr_t >( storage) - reinterpret_cast< uintptr_t >( stack_bottom); |
| 64 | // placement new of context on top of fiber's stack |
| 65 | return intrusive_ptr< context >{ |
| 66 | new ( storage) dispatcher_context{ |
| 67 | boost::context::preallocated{ storage, size, sctx }, std::move( salloc) } }; |
| 68 | } |
| 69 | |
| 70 | // schwarz counter |
| 71 | struct context_initializer { |
| 72 | static thread_local context * active_; |
| 73 | static thread_local std::size_t counter_; |
| 74 | |
| 75 | using default_scheduler = algo::round_robin; |
| 76 | |
| 77 | template< typename ... Args > |
| 78 | context_initializer(Args && ... args) { |
| 79 | if ( 0 == counter_++) { |
| 80 | initialize(std::forward< Args >( args) ... ); |
| 81 | } |
| 82 | } |
| 83 | |
| 84 | ~context_initializer() { |
| 85 | if ( 0 == --counter_) { |
| 86 | deinitialize(); |
| 87 | } |
| 88 | } |
| 89 | |
| 90 | void initialize() |
| 91 | { |
| 92 | initialize(algo: new default_scheduler(), salloc: make_stack_allocator_wrapper<default_stack>()); |
| 93 | } |
| 94 | |
| 95 | void initialize(algo::algorithm::ptr_t algo, stack_allocator_wrapper&& salloc) |
| 96 | { |
| 97 | // main fiber context of this thread |
| 98 | context * main_ctx = new main_context{}; |
| 99 | // scheduler of this thread |
| 100 | auto sched = new scheduler(algo); |
| 101 | // attach main context to scheduler |
| 102 | sched->attach_main_context( main_ctx); |
| 103 | // create and attach dispatcher context to scheduler |
| 104 | sched->attach_dispatcher_context( make_dispatcher_context(salloc: std::move(salloc)) ); |
| 105 | // make main context to active context |
| 106 | active_ = main_ctx; |
| 107 | } |
| 108 | |
| 109 | void deinitialize() |
| 110 | { |
| 111 | context * main_ctx = active_; |
| 112 | BOOST_ASSERT( main_ctx->is_context( type::main_context) ); |
| 113 | scheduler * sched = main_ctx->get_scheduler(); |
| 114 | delete sched; |
| 115 | delete main_ctx; |
| 116 | } |
| 117 | }; |
| 118 | |
| 119 | // zero-initialization |
| 120 | thread_local context * context_initializer::active_{ nullptr }; |
| 121 | thread_local std::size_t context_initializer::counter_{ 0 }; |
| 122 | |
| 123 | bool context::initialize_thread(algo::algorithm::ptr_t algo, stack_allocator_wrapper&& salloc) noexcept |
| 124 | { |
| 125 | if (context_initializer::counter_ == 0) |
| 126 | { |
| 127 | // Initilization is not done yet, so do it now with a local variable |
| 128 | // context_initializer which will decrease the counter when leaving this function. |
| 129 | context_initializer ctx_initializer(algo, std::move(salloc)); |
| 130 | |
| 131 | // Now call active() to register a thread local context_initializer which will |
| 132 | // ensure resources are free'ed when the thread exits. |
| 133 | active(); |
| 134 | |
| 135 | return true; |
| 136 | } |
| 137 | else |
| 138 | { |
| 139 | // It's too late already to initialize the dispatcher stack allocator, still we can update |
| 140 | // the algo. |
| 141 | active()->get_scheduler()->set_algo(algo); |
| 142 | |
| 143 | return false; |
| 144 | } |
| 145 | } |
| 146 | |
| 147 | context * |
| 148 | context::active() noexcept { |
| 149 | // initialized the first time control passes; per thread |
| 150 | thread_local static context_initializer ctx_initializer; |
| 151 | return context_initializer::active_; |
| 152 | } |
| 153 | |
| 154 | void |
| 155 | context::reset_active() noexcept { |
| 156 | context_initializer::active_ = nullptr; |
| 157 | } |
| 158 | |
| 159 | context::~context() { |
| 160 | // protect for concurrent access |
| 161 | std::unique_lock< detail::spinlock > lk{ splk_ }; |
| 162 | BOOST_ASSERT( ! ready_is_linked() ); |
| 163 | BOOST_ASSERT( ! remote_ready_is_linked() ); |
| 164 | BOOST_ASSERT( ! sleep_is_linked() ); |
| 165 | if ( is_context( t: type::dispatcher_context) ) { |
| 166 | BOOST_ASSERT( nullptr == active() ); |
| 167 | } |
| 168 | BOOST_ASSERT( wait_queue_.empty() ); |
| 169 | delete properties_; |
| 170 | } |
| 171 | |
| 172 | context::id |
| 173 | context::get_id() const noexcept { |
| 174 | return id{ const_cast< context * >( this) }; |
| 175 | } |
| 176 | |
| 177 | void |
| 178 | context::resume() noexcept { |
| 179 | context * prev = this; |
| 180 | // context_initializer::active_ will point to `this` |
| 181 | // prev will point to previous active context |
| 182 | std::swap( a&: context_initializer::active_, b&: prev); |
| 183 | // pass pointer to the context that resumes `this` |
| 184 | std::move( c_).resume_with(fn: [prev](boost::context::fiber && c){ |
| 185 | prev->c_ = std::move( c); |
| 186 | return boost::context::fiber{}; |
| 187 | }); |
| 188 | } |
| 189 | |
| 190 | void |
| 191 | context::resume( detail::spinlock_lock & lk) noexcept { |
| 192 | context * prev = this; |
| 193 | // context_initializer::active_ will point to `this` |
| 194 | // prev will point to previous active context |
| 195 | std::swap( a&: context_initializer::active_, b&: prev); |
| 196 | // pass pointer to the context that resumes `this` |
| 197 | std::move( c_).resume_with(fn: [prev,&lk](boost::context::fiber && c){ |
| 198 | prev->c_ = std::move( c); |
| 199 | lk.unlock(); |
| 200 | return boost::context::fiber{}; |
| 201 | }); |
| 202 | } |
| 203 | |
| 204 | void |
| 205 | context::resume( context * ready_ctx) noexcept { |
| 206 | context * prev = this; |
| 207 | // context_initializer::active_ will point to `this` |
| 208 | // prev will point to previous active context |
| 209 | std::swap( a&: context_initializer::active_, b&: prev); |
| 210 | // pass pointer to the context that resumes `this` |
| 211 | std::move( c_).resume_with(fn: [prev,ready_ctx](boost::context::fiber && c){ |
| 212 | prev->c_ = std::move( c); |
| 213 | context::active()->schedule( ready_ctx); |
| 214 | return boost::context::fiber{}; |
| 215 | }); |
| 216 | } |
| 217 | |
| 218 | void |
| 219 | context::suspend() noexcept { |
| 220 | get_scheduler()->suspend(); |
| 221 | } |
| 222 | |
| 223 | void |
| 224 | context::suspend( detail::spinlock_lock & lk) noexcept { |
| 225 | get_scheduler()->suspend( lk); |
| 226 | } |
| 227 | |
| 228 | void |
| 229 | context::join() { |
| 230 | // get active context |
| 231 | context * active_ctx = context::active(); |
| 232 | // protect for concurrent access |
| 233 | std::unique_lock< detail::spinlock > lk{ splk_ }; |
| 234 | // wait for context which is not terminated |
| 235 | if ( ! terminated_) { |
| 236 | // push active context to wait-queue, member |
| 237 | // of the context which has to be joined by |
| 238 | // the active context |
| 239 | wait_queue_.suspend_and_wait( lk, active_ctx); |
| 240 | // active context resumed |
| 241 | BOOST_ASSERT( context::active() == active_ctx); |
| 242 | } |
| 243 | } |
| 244 | |
| 245 | void |
| 246 | context::yield() noexcept { |
| 247 | // yield active context |
| 248 | get_scheduler()->yield( context::active() ); |
| 249 | } |
| 250 | |
| 251 | boost::context::fiber |
| 252 | context::suspend_with_cc() noexcept { |
| 253 | context * prev = this; |
| 254 | // context_initializer::active_ will point to `this` |
| 255 | // prev will point to previous active context |
| 256 | std::swap( a&: context_initializer::active_, b&: prev); |
| 257 | // pass pointer to the context that resumes `this` |
| 258 | return std::move( c_).resume_with(fn: [prev](boost::context::fiber && c){ |
| 259 | prev->c_ = std::move( c); |
| 260 | return boost::context::fiber{}; |
| 261 | }); |
| 262 | } |
| 263 | |
| 264 | boost::context::fiber |
| 265 | context::terminate() noexcept { |
| 266 | // protect for concurrent access |
| 267 | std::unique_lock< detail::spinlock > lk{ splk_ }; |
| 268 | // mark as terminated |
| 269 | terminated_ = true; |
| 270 | // notify all waiting fibers |
| 271 | wait_queue_.notify_all(); |
| 272 | BOOST_ASSERT( wait_queue_.empty() ); |
| 273 | // release fiber-specific-data |
| 274 | for ( fss_data_t::value_type & data : fss_data_) { |
| 275 | data.second.do_cleanup(); |
| 276 | } |
| 277 | fss_data_.clear(); |
| 278 | // switch to another context |
| 279 | return get_scheduler()->terminate( lk, this); |
| 280 | } |
| 281 | |
| 282 | bool |
| 283 | context::wait_until( std::chrono::steady_clock::time_point const& tp) noexcept { |
| 284 | BOOST_ASSERT( nullptr != get_scheduler() ); |
| 285 | BOOST_ASSERT( this == active() ); |
| 286 | return get_scheduler()->wait_until( this, tp); |
| 287 | } |
| 288 | |
| 289 | bool |
| 290 | context::wait_until( std::chrono::steady_clock::time_point const& tp, |
| 291 | detail::spinlock_lock & lk, |
| 292 | waker && w) noexcept { |
| 293 | BOOST_ASSERT( nullptr != get_scheduler() ); |
| 294 | BOOST_ASSERT( this == active() ); |
| 295 | return get_scheduler()->wait_until( this, tp, lk, std::move(w)); |
| 296 | } |
| 297 | |
| 298 | |
| 299 | bool context::wake(const size_t epoch) noexcept |
| 300 | { |
| 301 | size_t expected = epoch; |
| 302 | bool is_last_waker = waker_epoch_.compare_exchange_strong(i1&: expected, i2: epoch + 1, m: std::memory_order_acq_rel); |
| 303 | if ( ! is_last_waker) { |
| 304 | // waker_epoch_ has been incremented before, so consider this wake |
| 305 | // operation as outdated and do nothing |
| 306 | return false; |
| 307 | } |
| 308 | |
| 309 | BOOST_ASSERT( context::active() != this); |
| 310 | if ( context::active()->get_scheduler() == get_scheduler()) { |
| 311 | get_scheduler()->schedule( this); |
| 312 | } else { |
| 313 | get_scheduler()->schedule_from_remote( this); |
| 314 | } |
| 315 | return true; |
| 316 | } |
| 317 | |
| 318 | |
| 319 | void |
| 320 | context::schedule( context * ctx) noexcept { |
| 321 | //BOOST_ASSERT( nullptr != ctx); |
| 322 | BOOST_ASSERT( this != ctx); |
| 323 | BOOST_ASSERT( nullptr != get_scheduler() ); |
| 324 | BOOST_ASSERT( nullptr != ctx->get_scheduler() ); |
| 325 | #if ! defined(BOOST_FIBERS_NO_ATOMICS) |
| 326 | // FIXME: comparing scheduler address' must be synchronized? |
| 327 | // what if ctx is migrated between threads |
| 328 | // (other scheduler assigned) |
| 329 | if ( scheduler_ == ctx->get_scheduler() ) { |
| 330 | // local |
| 331 | get_scheduler()->schedule( ctx); |
| 332 | } else { |
| 333 | // remote |
| 334 | ctx->get_scheduler()->schedule_from_remote( ctx); |
| 335 | } |
| 336 | #else |
| 337 | BOOST_ASSERT( get_scheduler() == ctx->get_scheduler() ); |
| 338 | get_scheduler()->schedule( ctx); |
| 339 | #endif |
| 340 | } |
| 341 | |
| 342 | void * |
| 343 | context::get_fss_data( void const * vp) const { |
| 344 | auto key = reinterpret_cast< uintptr_t >( vp); |
| 345 | auto i = fss_data_.find( x: key); |
| 346 | return fss_data_.end() != i ? i->second.vp : nullptr; |
| 347 | } |
| 348 | |
| 349 | void |
| 350 | context::set_fss_data( void const * vp, |
| 351 | detail::fss_cleanup_function::ptr_t const& cleanup_fn, |
| 352 | void * data, |
| 353 | bool cleanup_existing) { |
| 354 | BOOST_ASSERT( cleanup_fn); |
| 355 | auto key = reinterpret_cast< uintptr_t >( vp); |
| 356 | auto i = fss_data_.find( x: key); |
| 357 | if ( fss_data_.end() != i) { |
| 358 | if( cleanup_existing) { |
| 359 | i->second.do_cleanup(); |
| 360 | } |
| 361 | if ( nullptr != data) { |
| 362 | i->second = fss_data{ data, cleanup_fn }; |
| 363 | } else { |
| 364 | fss_data_.erase( position: i); |
| 365 | } |
| 366 | } else { |
| 367 | fss_data_.insert( |
| 368 | x: std::make_pair( |
| 369 | x&: key, |
| 370 | y: fss_data{ data, cleanup_fn } ) ); |
| 371 | } |
| 372 | } |
| 373 | |
| 374 | void |
| 375 | context::set_properties( fiber_properties * props) noexcept { |
| 376 | delete properties_; |
| 377 | properties_ = props; |
| 378 | } |
| 379 | |
| 380 | bool |
| 381 | context::worker_is_linked() const noexcept { |
| 382 | return worker_hook_.is_linked(); |
| 383 | } |
| 384 | |
| 385 | bool |
| 386 | context::ready_is_linked() const noexcept { |
| 387 | return ready_hook_.is_linked(); |
| 388 | } |
| 389 | |
| 390 | bool |
| 391 | context::remote_ready_is_linked() const noexcept { |
| 392 | return remote_ready_hook_.is_linked(); |
| 393 | } |
| 394 | |
| 395 | bool |
| 396 | context::sleep_is_linked() const noexcept { |
| 397 | return sleep_hook_.is_linked(); |
| 398 | } |
| 399 | |
| 400 | bool |
| 401 | context::terminated_is_linked() const noexcept { |
| 402 | return terminated_hook_.is_linked(); |
| 403 | } |
| 404 | |
| 405 | void |
| 406 | context::worker_unlink() noexcept { |
| 407 | BOOST_ASSERT( worker_is_linked() ); |
| 408 | worker_hook_.unlink(); |
| 409 | } |
| 410 | |
| 411 | void |
| 412 | context::ready_unlink() noexcept { |
| 413 | BOOST_ASSERT( ready_is_linked() ); |
| 414 | ready_hook_.unlink(); |
| 415 | } |
| 416 | |
| 417 | void |
| 418 | context::sleep_unlink() noexcept { |
| 419 | BOOST_ASSERT( sleep_is_linked() ); |
| 420 | sleep_hook_.unlink(); |
| 421 | } |
| 422 | |
| 423 | void |
| 424 | context::detach() noexcept { |
| 425 | BOOST_ASSERT( context::active() != this); |
| 426 | get_scheduler()->detach_worker_context( this); |
| 427 | } |
| 428 | |
| 429 | void |
| 430 | context::attach( context * ctx) noexcept { |
| 431 | BOOST_ASSERT( nullptr != ctx); |
| 432 | get_scheduler()->attach_worker_context( ctx); |
| 433 | } |
| 434 | |
| 435 | }} |
| 436 | |
| 437 | #ifdef BOOST_HAS_ABI_HEADERS |
| 438 | # include BOOST_ABI_SUFFIX |
| 439 | #endif |
| 440 | |