• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Distributed under the Boost Software License, Version 1.0.
3  * (See accompanying file LICENSE_1_0.txt or copy at
4  * http://www.boost.org/LICENSE_1_0.txt)
5  *
6  * Copyright (c) 2011 Helge Bahmann
7  * Copyright (c) 2013-2014 Andrey Semashev
8  */
9 /*!
10  * \file   lockpool.cpp
11  *
12  * This file contains implementation of the lockpool used to emulate atomic ops.
13  */
14 
15 #include <cstddef>
16 #include <boost/config.hpp>
17 #include <boost/assert.hpp>
18 #include <boost/memory_order.hpp>
19 #include <boost/atomic/capabilities.hpp>
20 
21 #if BOOST_ATOMIC_FLAG_LOCK_FREE == 2
22 #include <boost/atomic/detail/operations_lockfree.hpp>
23 #elif !defined(BOOST_HAS_PTHREADS)
24 #error Boost.Atomic: Unsupported target platform, POSIX threads are required when native atomic operations are not available
25 #else
26 #include <pthread.h>
27 #define BOOST_ATOMIC_USE_PTHREAD
28 #endif
29 
30 #include <boost/atomic/detail/lockpool.hpp>
31 #include <boost/atomic/detail/pause.hpp>
32 
33 #if defined(BOOST_MSVC)
34 #pragma warning(push)
35 // 'struct_name' : structure was padded due to __declspec(align())
36 #pragma warning(disable: 4324)
37 #endif
38 
39 namespace boost {
40 namespace atomics {
41 namespace detail {
42 
43 namespace {
44 
45 // Cache line size, in bytes
46 // NOTE: This constant is made as a macro because some compilers (gcc 4.4 for one) don't allow enums or namespace scope constants in alignment attributes
47 #if defined(__s390__) || defined(__s390x__)
48 #define BOOST_ATOMIC_CACHE_LINE_SIZE 256
49 #elif defined(powerpc) || defined(__powerpc__) || defined(__ppc__)
50 #define BOOST_ATOMIC_CACHE_LINE_SIZE 128
51 #else
52 #define BOOST_ATOMIC_CACHE_LINE_SIZE 64
53 #endif
54 
55 #if defined(BOOST_ATOMIC_USE_PTHREAD)
56 typedef pthread_mutex_t lock_type;
57 #else
58 typedef atomics::detail::operations< 1u, false > lock_operations;
59 typedef lock_operations::storage_type lock_type;
60 #endif
61 
62 enum
63 {
64     padding_size = (sizeof(lock_type) <= BOOST_ATOMIC_CACHE_LINE_SIZE ?
65         (BOOST_ATOMIC_CACHE_LINE_SIZE - sizeof(lock_type)) :
66         (BOOST_ATOMIC_CACHE_LINE_SIZE - sizeof(lock_type) % BOOST_ATOMIC_CACHE_LINE_SIZE))
67 };
68 
69 template< unsigned int PaddingSize >
BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE)70 struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock
71 {
72     lock_type lock;
73     // The additional padding is needed to avoid false sharing between locks
74     char padding[PaddingSize];
75 };
76 
77 template< >
BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE)78 struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock< 0u >
79 {
80     lock_type lock;
81 };
82 
83 typedef padded_lock< padding_size > padded_lock_t;
84 
85 static padded_lock_t g_lock_pool[41]
86 #if defined(BOOST_ATOMIC_USE_PTHREAD)
87 =
88 {
89     { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
90     { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
91     { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
92     { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
93     { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
94     { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
95     { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
96     { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
97     { PTHREAD_MUTEX_INITIALIZER }
98 }
99 #endif
100 ;
101 
102 } // namespace
103 
104 
105 #if !defined(BOOST_ATOMIC_USE_PTHREAD)
106 
107 // NOTE: This function must NOT be inline. Otherwise MSVC 9 will sometimes generate broken code for modulus operation which result in crashes.
scoped_lock(const volatile void * addr)108 BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr) BOOST_NOEXCEPT :
109     m_lock(&g_lock_pool[reinterpret_cast< std::size_t >(addr) % (sizeof(g_lock_pool) / sizeof(*g_lock_pool))].lock)
110 {
111     while (lock_operations::test_and_set(*static_cast< lock_type* >(m_lock), memory_order_acquire))
112     {
113         do
114         {
115             atomics::detail::pause();
116         }
117         while (!!lock_operations::load(*static_cast< lock_type* >(m_lock), memory_order_relaxed));
118     }
119 }
120 
~scoped_lock()121 BOOST_ATOMIC_DECL lockpool::scoped_lock::~scoped_lock() BOOST_NOEXCEPT
122 {
123     lock_operations::clear(*static_cast< lock_type* >(m_lock), memory_order_release);
124 }
125 
126 BOOST_ATOMIC_DECL void signal_fence() BOOST_NOEXCEPT;
127 
128 #else // !defined(BOOST_ATOMIC_USE_PTHREAD)
129 
scoped_lock(const volatile void * addr)130 BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr) BOOST_NOEXCEPT :
131     m_lock(&g_lock_pool[reinterpret_cast< std::size_t >(addr) % (sizeof(g_lock_pool) / sizeof(*g_lock_pool))].lock)
132 {
133     BOOST_VERIFY(pthread_mutex_lock(static_cast< pthread_mutex_t* >(m_lock)) == 0);
134 }
135 
~scoped_lock()136 BOOST_ATOMIC_DECL lockpool::scoped_lock::~scoped_lock() BOOST_NOEXCEPT
137 {
138     BOOST_VERIFY(pthread_mutex_unlock(static_cast< pthread_mutex_t* >(m_lock)) == 0);
139 }
140 
141 #endif // !defined(BOOST_ATOMIC_USE_PTHREAD)
142 
thread_fence()143 BOOST_ATOMIC_DECL void lockpool::thread_fence() BOOST_NOEXCEPT
144 {
145 #if BOOST_ATOMIC_THREAD_FENCE > 0
146     atomics::detail::thread_fence(memory_order_seq_cst);
147 #else
148     // Emulate full fence by locking/unlocking a mutex
149     scoped_lock lock(0);
150 #endif
151 }
152 
signal_fence()153 BOOST_ATOMIC_DECL void lockpool::signal_fence() BOOST_NOEXCEPT
154 {
155     // This function is intentionally non-inline, even if empty. This forces the compiler to treat its call as a compiler barrier.
156 #if BOOST_ATOMIC_SIGNAL_FENCE > 0
157     atomics::detail::signal_fence(memory_order_seq_cst);
158 #endif
159 }
160 
161 } // namespace detail
162 } // namespace atomics
163 } // namespace boost
164 
165 #if defined(BOOST_MSVC)
166 #pragma warning(pop)
167 #endif
168