1 2 // Copyright Oliver Kowalke 2016. 3 // Distributed under the Boost Software License, Version 1.0. 4 // (See accompanying file LICENSE_1_0.txt or copy at 5 // http://www.boost.org/LICENSE_1_0.txt) 6 7 #ifndef BOOST_FIBERS_SPINLOCK_TTAS_H 8 #define BOOST_FIBERS_SPINLOCK_TTAS_H 9 10 #include <atomic> 11 #include <chrono> 12 #include <cmath> 13 #include <random> 14 #include <thread> 15 16 #include <boost/fiber/detail/config.hpp> 17 #include <boost/fiber/detail/cpu_relax.hpp> 18 #include <boost/fiber/detail/spinlock_status.hpp> 19 20 // based on informations from: 21 // https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops 22 // https://software.intel.com/en-us/articles/long-duration-spin-wait-loops-on-hyper-threading-technology-enabled-intel-processors 23 24 namespace boost { 25 namespace fibers { 26 namespace detail { 27 28 class spinlock_ttas { 29 private: 30 template< typename FBSplk > 31 friend class spinlock_rtm; 32 33 std::atomic< spinlock_status > state_{ spinlock_status::unlocked }; 34 35 public: 36 spinlock_ttas() = default; 37 38 spinlock_ttas( spinlock_ttas const&) = delete; 39 spinlock_ttas & operator=( spinlock_ttas const&) = delete; 40 lock()41 void lock() noexcept { 42 static thread_local std::minstd_rand generator{ std::random_device{}() }; 43 std::size_t collisions = 0 ; 44 for (;;) { 45 // avoid using multiple pause instructions for a delay of a specific cycle count 46 // the delay of cpu_relax() (pause on Intel) depends on the processor family 47 // the cycle count can not guaranteed from one system to the next 48 // -> check the shared variable 'state_' in between each cpu_relax() to prevent 49 // unnecessarily long delays on some systems 50 std::size_t retries = 0; 51 // test shared variable 'status_' 52 // first access to 'state_' -> chache miss 53 // sucessive acccess to 'state_' -> cache hit 54 // if 'state_' was released by other fiber 55 // cached 'state_' is invalidated -> cache miss 56 while ( spinlock_status::locked == state_.load( std::memory_order_relaxed) ) { 57 #if !defined(BOOST_FIBERS_SPIN_SINGLE_CORE) 58 if ( BOOST_FIBERS_SPIN_BEFORE_SLEEP0 > retries) { 59 ++retries; 60 // give CPU a hint that this thread is in a "spin-wait" loop 61 // delays the next instruction's execution for a finite period of time (depends on processor family) 62 // the CPU is not under demand, parts of the pipeline are no longer being used 63 // -> reduces the power consumed by the CPU 64 // -> prevent pipeline stalls 65 cpu_relax(); 66 } else if ( BOOST_FIBERS_SPIN_BEFORE_YIELD > retries) { 67 // std::this_thread::sleep_for( 0us) has a fairly long instruction path length, 68 // combined with an expensive ring3 to ring 0 transition costing about 1000 cycles 69 // std::this_thread::sleep_for( 0us) lets give up this_thread the remaining part of its time slice 70 // if and only if a thread of equal or greater priority is ready to run 71 static constexpr std::chrono::microseconds us0{ 0 }; 72 std::this_thread::sleep_for( us0); 73 } else { 74 // std::this_thread::yield() allows this_thread to give up the remaining part of its time slice, 75 // but only to another thread on the same processor 76 // instead of constant checking, a thread only checks if no other useful work is pending 77 std::this_thread::yield(); 78 } 79 #else 80 std::this_thread::yield(); 81 #endif 82 } 83 // test-and-set shared variable 'status_' 84 // everytime 'status_' is signaled over the bus, even if the test failes 85 if ( spinlock_status::locked == state_.exchange( spinlock_status::locked, std::memory_order_acquire) ) { 86 // spinlock now contended 87 // utilize 'Binary Exponential Backoff' algorithm 88 // linear_congruential_engine is a random number engine based on Linear congruential generator (LCG) 89 std::uniform_int_distribution< std::size_t > distribution{ 90 0, static_cast< std::size_t >( 1) << (std::min)(collisions, static_cast< std::size_t >( BOOST_FIBERS_CONTENTION_WINDOW_THRESHOLD)) }; 91 const std::size_t z = distribution( generator); 92 ++collisions; 93 for ( std::size_t i = 0; i < z; ++i) { 94 // -> reduces the power consumed by the CPU 95 // -> prevent pipeline stalls 96 cpu_relax(); 97 } 98 } else { 99 // success, thread has acquired the lock 100 break; 101 } 102 } 103 } 104 try_lock()105 bool try_lock() noexcept { 106 return spinlock_status::unlocked == state_.exchange( spinlock_status::locked, std::memory_order_acquire); 107 } 108 unlock()109 void unlock() noexcept { 110 state_.store( spinlock_status::unlocked, std::memory_order_release); 111 } 112 }; 113 114 }}} 115 116 #endif // BOOST_FIBERS_SPINLOCK_TTAS_H 117