• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 //          Copyright Oliver Kowalke 2013.
3 // Distributed under the Boost Software License, Version 1.0.
4 //    (See accompanying file LICENSE_1_0.txt or copy at
5 //          http://www.boost.org/LICENSE_1_0.txt)
6 
7 #include "boost/fiber/context.hpp"
8 
9 #include <cstdlib>
10 #include <mutex>
11 #include <new>
12 
13 #include "boost/fiber/exceptions.hpp"
14 #include "boost/fiber/scheduler.hpp"
15 
16 #ifdef BOOST_HAS_ABI_HEADERS
17 #  include BOOST_ABI_PREFIX
18 #endif
19 
20 namespace boost {
21 namespace fibers {
22 
23 class main_context final : public context {
24 public:
main_context()25     main_context() noexcept :
26         context{ 1, type::main_context, launch::post } {
27     }
28 };
29 
30 class dispatcher_context final : public context {
31 private:
32     boost::context::fiber
run_(boost::context::fiber && c)33     run_( boost::context::fiber && c) {
34 #if (defined(BOOST_USE_UCONTEXT)||defined(BOOST_USE_WINFIB))
35         std::move( c).resume();
36 #endif
37 		// execute scheduler::dispatch()
38 		return get_scheduler()->dispatch();
39     }
40 
41 public:
dispatcher_context(boost::context::preallocated const & palloc,default_stack && salloc)42     dispatcher_context( boost::context::preallocated const& palloc, default_stack && salloc) :
43         context{ 0, type::dispatcher_context, launch::post } {
44         c_ = boost::context::fiber{ std::allocator_arg, palloc, salloc,
45                                     std::bind( & dispatcher_context::run_, this, std::placeholders::_1) };
46 #if (defined(BOOST_USE_UCONTEXT)||defined(BOOST_USE_WINFIB))
47         c_ = std::move( c_).resume();
48 #endif
49     }
50 };
51 
make_dispatcher_context()52 static intrusive_ptr< context > make_dispatcher_context() {
53     default_stack salloc; // use default satck-size
54     auto sctx = salloc.allocate();
55     // reserve space for control structure
56     void * storage = reinterpret_cast< void * >(
57             ( reinterpret_cast< uintptr_t >( sctx.sp) - static_cast< uintptr_t >( sizeof( dispatcher_context) ) )
58             & ~ static_cast< uintptr_t >( 0xff) );
59     void * stack_bottom = reinterpret_cast< void * >(
60             reinterpret_cast< uintptr_t >( sctx.sp) - static_cast< uintptr_t >( sctx.size) );
61     const std::size_t size = reinterpret_cast< uintptr_t >( storage) - reinterpret_cast< uintptr_t >( stack_bottom);
62     // placement new of context on top of fiber's stack
63     return intrusive_ptr< context >{
64         new ( storage) dispatcher_context{
65                 boost::context::preallocated{ storage, size, sctx }, std::move( salloc) } };
66 }
67 
68 // schwarz counter
69 struct context_initializer {
70     static thread_local context *   active_;
71     static thread_local std::size_t counter_;
72 
context_initializerboost::fibers::context_initializer73     context_initializer() {
74         if ( 0 == counter_++) {
75             // main fiber context of this thread
76             context * main_ctx = new main_context{};
77             // scheduler of this thread
78             auto sched = new scheduler{};
79             // attach main context to scheduler
80             sched->attach_main_context( main_ctx);
81             // create and attach dispatcher context to scheduler
82             sched->attach_dispatcher_context( make_dispatcher_context() );
83             // make main context to active context
84             active_ = main_ctx;
85         }
86     }
87 
~context_initializerboost::fibers::context_initializer88     ~context_initializer() {
89         if ( 0 == --counter_) {
90             context * main_ctx = active_;
91             BOOST_ASSERT( main_ctx->is_context( type::main_context) );
92             scheduler * sched = main_ctx->get_scheduler();
93             delete sched;
94             delete main_ctx;
95         }
96     }
97 };
98 
99 // zero-initialization
100 thread_local context * context_initializer::active_{ nullptr };
101 thread_local std::size_t context_initializer::counter_{ 0 };
102 
103 context *
active()104 context::active() noexcept {
105     // initialized the first time control passes; per thread
106     thread_local static context_initializer ctx_initializer;
107     return context_initializer::active_;
108 }
109 
110 void
reset_active()111 context::reset_active() noexcept {
112     context_initializer::active_ = nullptr;
113 }
114 
~context()115 context::~context() {
116     // protect for concurrent access
117     std::unique_lock< detail::spinlock > lk{ splk_ };
118     BOOST_ASSERT( ! ready_is_linked() );
119     BOOST_ASSERT( ! remote_ready_is_linked() );
120     BOOST_ASSERT( ! sleep_is_linked() );
121     BOOST_ASSERT( ! wait_is_linked() );
122     if ( is_context( type::dispatcher_context) ) {
123         // dispatcher-context is resumed by main-context
124         // while the scheduler is deconstructed
125 #ifdef BOOST_DISABLE_ASSERTS
126         wait_queue_.pop_front();
127 #else
128         context * ctx = & wait_queue_.front();
129         wait_queue_.pop_front();
130         BOOST_ASSERT( ctx->is_context( type::main_context) );
131         BOOST_ASSERT( nullptr == active() );
132 #endif
133     }
134     BOOST_ASSERT( wait_queue_.empty() );
135     delete properties_;
136 }
137 
138 context::id
get_id() const139 context::get_id() const noexcept {
140     return id{ const_cast< context * >( this) };
141 }
142 
143 void
resume()144 context::resume() noexcept {
145     context * prev = this;
146     // context_initializer::active_ will point to `this`
147     // prev will point to previous active context
148     std::swap( context_initializer::active_, prev);
149     // pass pointer to the context that resumes `this`
150     std::move( c_).resume_with([prev](boost::context::fiber && c){
151                 prev->c_ = std::move( c);
152                 return boost::context::fiber{};
153             });
154 }
155 
156 void
resume(detail::spinlock_lock & lk)157 context::resume( detail::spinlock_lock & lk) noexcept {
158     context * prev = this;
159     // context_initializer::active_ will point to `this`
160     // prev will point to previous active context
161     std::swap( context_initializer::active_, prev);
162     // pass pointer to the context that resumes `this`
163     std::move( c_).resume_with([prev,&lk](boost::context::fiber && c){
164                 prev->c_ = std::move( c);
165                 lk.unlock();
166                 return boost::context::fiber{};
167             });
168 }
169 
170 void
resume(context * ready_ctx)171 context::resume( context * ready_ctx) noexcept {
172     context * prev = this;
173     // context_initializer::active_ will point to `this`
174     // prev will point to previous active context
175     std::swap( context_initializer::active_, prev);
176     // pass pointer to the context that resumes `this`
177     std::move( c_).resume_with([prev,ready_ctx](boost::context::fiber && c){
178                 prev->c_ = std::move( c);
179                 context::active()->schedule( ready_ctx);
180                 return boost::context::fiber{};
181             });
182 }
183 
184 void
suspend()185 context::suspend() noexcept {
186     get_scheduler()->suspend();
187 }
188 
189 void
suspend(detail::spinlock_lock & lk)190 context::suspend( detail::spinlock_lock & lk) noexcept {
191     get_scheduler()->suspend( lk);
192 }
193 
194 void
join()195 context::join() {
196     // get active context
197     context * active_ctx = context::active();
198     // protect for concurrent access
199     std::unique_lock< detail::spinlock > lk{ splk_ };
200     // wait for context which is not terminated
201     if ( ! terminated_) {
202         // push active context to wait-queue, member
203         // of the context which has to be joined by
204         // the active context
205         active_ctx->wait_link( wait_queue_);
206         // suspend active context
207         active_ctx->get_scheduler()->suspend( lk);
208         // active context resumed
209         BOOST_ASSERT( context::active() == active_ctx);
210     }
211 }
212 
213 void
yield()214 context::yield() noexcept {
215     // yield active context
216     get_scheduler()->yield( context::active() );
217 }
218 
219 boost::context::fiber
suspend_with_cc()220 context::suspend_with_cc() noexcept {
221     context * prev = this;
222     // context_initializer::active_ will point to `this`
223     // prev will point to previous active context
224     std::swap( context_initializer::active_, prev);
225     // pass pointer to the context that resumes `this`
226     return std::move( c_).resume_with([prev](boost::context::fiber && c){
227                 prev->c_ = std::move( c);
228                 return boost::context::fiber{};
229             });
230 }
231 
232 boost::context::fiber
terminate()233 context::terminate() noexcept {
234     // protect for concurrent access
235     std::unique_lock< detail::spinlock > lk{ splk_ };
236     // mark as terminated
237     terminated_ = true;
238     // notify all waiting fibers
239     while ( ! wait_queue_.empty() ) {
240         context * ctx = & wait_queue_.front();
241         // remove fiber from wait-queue
242         wait_queue_.pop_front();
243         // notify scheduler
244         schedule( ctx);
245     }
246     BOOST_ASSERT( wait_queue_.empty() );
247     // release fiber-specific-data
248     for ( fss_data_t::value_type & data : fss_data_) {
249         data.second.do_cleanup();
250     }
251     fss_data_.clear();
252     // switch to another context
253     return get_scheduler()->terminate( lk, this);
254 }
255 
256 bool
wait_until(std::chrono::steady_clock::time_point const & tp)257 context::wait_until( std::chrono::steady_clock::time_point const& tp) noexcept {
258     BOOST_ASSERT( nullptr != get_scheduler() );
259     BOOST_ASSERT( this == active() );
260     return get_scheduler()->wait_until( this, tp);
261 }
262 
263 bool
wait_until(std::chrono::steady_clock::time_point const & tp,detail::spinlock_lock & lk)264 context::wait_until( std::chrono::steady_clock::time_point const& tp,
265                      detail::spinlock_lock & lk) noexcept {
266     BOOST_ASSERT( nullptr != get_scheduler() );
267     BOOST_ASSERT( this == active() );
268     return get_scheduler()->wait_until( this, tp, lk);
269 }
270 
271 void
schedule(context * ctx)272 context::schedule( context * ctx) noexcept {
273     //BOOST_ASSERT( nullptr != ctx);
274     BOOST_ASSERT( this != ctx);
275     BOOST_ASSERT( nullptr != get_scheduler() );
276     BOOST_ASSERT( nullptr != ctx->get_scheduler() );
277 #if ! defined(BOOST_FIBERS_NO_ATOMICS)
278     // FIXME: comparing scheduler address' must be synchronized?
279     //        what if ctx is migrated between threads
280     //        (other scheduler assigned)
281     if ( scheduler_ == ctx->get_scheduler() ) {
282         // local
283         get_scheduler()->schedule( ctx);
284     } else {
285         // remote
286         ctx->get_scheduler()->schedule_from_remote( ctx);
287     }
288 #else
289     BOOST_ASSERT( get_scheduler() == ctx->get_scheduler() );
290     get_scheduler()->schedule( ctx);
291 #endif
292 }
293 
294 void *
get_fss_data(void const * vp) const295 context::get_fss_data( void const * vp) const {
296     auto key = reinterpret_cast< uintptr_t >( vp);
297     auto i = fss_data_.find( key);
298     return fss_data_.end() != i ? i->second.vp : nullptr;
299 }
300 
301 void
set_fss_data(void const * vp,detail::fss_cleanup_function::ptr_t const & cleanup_fn,void * data,bool cleanup_existing)302 context::set_fss_data( void const * vp,
303                        detail::fss_cleanup_function::ptr_t const& cleanup_fn,
304                        void * data,
305                        bool cleanup_existing) {
306     BOOST_ASSERT( cleanup_fn);
307     auto key = reinterpret_cast< uintptr_t >( vp);
308     auto i = fss_data_.find( key);
309     if ( fss_data_.end() != i) {
310         if( cleanup_existing) {
311             i->second.do_cleanup();
312         }
313         if ( nullptr != data) {
314             i->second = fss_data{ data, cleanup_fn };
315         } else {
316             fss_data_.erase( i);
317         }
318     } else {
319         fss_data_.insert(
320             std::make_pair(
321                 key,
322                 fss_data{ data, cleanup_fn } ) );
323     }
324 }
325 
326 void
set_properties(fiber_properties * props)327 context::set_properties( fiber_properties * props) noexcept {
328     delete properties_;
329     properties_ = props;
330 }
331 
332 bool
worker_is_linked() const333 context::worker_is_linked() const noexcept {
334     return worker_hook_.is_linked();
335 }
336 
337 bool
ready_is_linked() const338 context::ready_is_linked() const noexcept {
339     return ready_hook_.is_linked();
340 }
341 
342 bool
remote_ready_is_linked() const343 context::remote_ready_is_linked() const noexcept {
344     return remote_ready_hook_.is_linked();
345 }
346 
347 bool
sleep_is_linked() const348 context::sleep_is_linked() const noexcept {
349     return sleep_hook_.is_linked();
350 }
351 
352 bool
terminated_is_linked() const353 context::terminated_is_linked() const noexcept {
354     return terminated_hook_.is_linked();
355 }
356 
357 bool
wait_is_linked() const358 context::wait_is_linked() const noexcept {
359     return wait_hook_.is_linked();
360 }
361 
362 void
worker_unlink()363 context::worker_unlink() noexcept {
364     BOOST_ASSERT( worker_is_linked() );
365     worker_hook_.unlink();
366 }
367 
368 void
ready_unlink()369 context::ready_unlink() noexcept {
370     BOOST_ASSERT( ready_is_linked() );
371     ready_hook_.unlink();
372 }
373 
374 void
sleep_unlink()375 context::sleep_unlink() noexcept {
376     BOOST_ASSERT( sleep_is_linked() );
377     sleep_hook_.unlink();
378 }
379 
380 void
wait_unlink()381 context::wait_unlink() noexcept {
382     BOOST_ASSERT( wait_is_linked() );
383     wait_hook_.unlink();
384 }
385 
386 void
detach()387 context::detach() noexcept {
388     BOOST_ASSERT( context::active() != this);
389     get_scheduler()->detach_worker_context( this);
390 }
391 
392 void
attach(context * ctx)393 context::attach( context * ctx) noexcept {
394     BOOST_ASSERT( nullptr != ctx);
395     get_scheduler()->attach_worker_context( ctx);
396 }
397 
398 }}
399 
400 #ifdef BOOST_HAS_ABI_HEADERS
401 #  include BOOST_ABI_SUFFIX
402 #endif
403