1 /*
2 * Distributed under the Boost Software License, Version 1.0.
3 * (See accompanying file LICENSE_1_0.txt or copy at
4 * http://www.boost.org/LICENSE_1_0.txt)
5 *
6 * Copyright (c) 2014 Andrey Semashev
7 */
8 /*!
9 * \file atomic/detail/ops_gcc_atomic.hpp
10 *
11 * This header contains implementation of the \c operations template.
12 */
13
14 #ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_
15 #define BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_
16
17 #include <cstddef>
18 #include <boost/memory_order.hpp>
19 #include <boost/atomic/detail/config.hpp>
20 #include <boost/atomic/detail/storage_type.hpp>
21 #include <boost/atomic/detail/operations_fwd.hpp>
22 #include <boost/atomic/capabilities.hpp>
23 #if (defined(__clang__) || (defined(BOOST_GCC) && (BOOST_GCC+0) >= 70000)) && (defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B))
24 #include <boost/atomic/detail/ops_gcc_x86_dcas.hpp>
25 #include <boost/atomic/detail/ops_cas_based.hpp>
26 #endif
27
28 #if __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE ||\
29 __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE || __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE ||\
30 __GCC_ATOMIC_CHAR_LOCK_FREE != BOOST_ATOMIC_CHAR_LOCK_FREE || __GCC_ATOMIC_BOOL_LOCK_FREE != BOOST_ATOMIC_BOOL_LOCK_FREE ||\
31 __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE
32 // There are platforms where we need to use larger storage types
33 #include <boost/atomic/detail/int_sizes.hpp>
34 #include <boost/atomic/detail/ops_extending_cas_based.hpp>
35 #endif
36
37 #ifdef BOOST_HAS_PRAGMA_ONCE
38 #pragma once
39 #endif
40
41 #if defined(__INTEL_COMPILER)
42 // This is used to suppress warning #32013 described below for Intel Compiler.
43 // In debug builds the compiler does not inline any functions, so basically
44 // every atomic function call results in this warning. I don't know any other
45 // way to selectively disable just this one warning.
46 #pragma system_header
47 #endif
48
49 namespace boost {
50 namespace atomics {
51 namespace detail {
52
53 /*!
54 * The function converts \c boost::memory_order values to the compiler-specific constants.
55 *
56 * NOTE: The intention is that the function is optimized away by the compiler, and the
57 * compiler-specific constants are passed to the intrinsics. Unfortunately, constexpr doesn't
58 * work in this case because the standard atomics interface require memory ordering
59 * constants to be passed as function arguments, at which point they stop being constexpr.
60 * However, it is crucial that the compiler sees constants and not runtime values,
61 * because otherwise it just ignores the ordering value and always uses seq_cst.
62 * This is the case with Intel C++ Compiler 14.0.3 (Composer XE 2013 SP1, update 3) and
63 * gcc 4.8.2. Intel Compiler issues a warning in this case:
64 *
65 * warning #32013: Invalid memory order specified. Defaulting to seq_cst memory order.
66 *
67 * while gcc acts silently.
68 *
69 * To mitigate the problem ALL functions, including the atomic<> members must be
70 * declared with BOOST_FORCEINLINE. In this case the compilers are able to see that
71 * all functions are called with constant orderings and call intrinstcts properly.
72 *
73 * Unfortunately, this still doesn't work in debug mode as the compiler doesn't
74 * propagate constants even when functions are marked with BOOST_FORCEINLINE. In this case
75 * all atomic operaions will be executed with seq_cst semantics.
76 */
convert_memory_order_to_gcc(memory_order order)77 BOOST_FORCEINLINE BOOST_CONSTEXPR int convert_memory_order_to_gcc(memory_order order) BOOST_NOEXCEPT
78 {
79 return (order == memory_order_relaxed ? __ATOMIC_RELAXED : (order == memory_order_consume ? __ATOMIC_CONSUME :
80 (order == memory_order_acquire ? __ATOMIC_ACQUIRE : (order == memory_order_release ? __ATOMIC_RELEASE :
81 (order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_SEQ_CST)))));
82 }
83
84 template< std::size_t Size, bool Signed >
85 struct gcc_atomic_operations
86 {
87 typedef typename make_storage_type< Size >::type storage_type;
88 typedef typename make_storage_type< Size >::aligned aligned_storage_type;
89
90 static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
91 static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
92 static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
93
94 // Note: In the current implementation, gcc_atomic_operations are used only when the particularly sized __atomic
95 // intrinsics are always lock-free (i.e. the corresponding LOCK_FREE macro is 2). Therefore it is safe to
96 // always set is_always_lock_free to true here.
97 static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
98
storeboost::atomics::detail::gcc_atomic_operations99 static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
100 {
101 __atomic_store_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
102 }
103
loadboost::atomics::detail::gcc_atomic_operations104 static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
105 {
106 return __atomic_load_n(&storage, atomics::detail::convert_memory_order_to_gcc(order));
107 }
108
fetch_addboost::atomics::detail::gcc_atomic_operations109 static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
110 {
111 return __atomic_fetch_add(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
112 }
113
fetch_subboost::atomics::detail::gcc_atomic_operations114 static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
115 {
116 return __atomic_fetch_sub(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
117 }
118
exchangeboost::atomics::detail::gcc_atomic_operations119 static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
120 {
121 return __atomic_exchange_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
122 }
123
compare_exchange_strongboost::atomics::detail::gcc_atomic_operations124 static BOOST_FORCEINLINE bool compare_exchange_strong(
125 storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
126 {
127 return __atomic_compare_exchange_n
128 (
129 &storage, &expected, desired, false,
130 atomics::detail::convert_memory_order_to_gcc(success_order),
131 atomics::detail::convert_memory_order_to_gcc(failure_order)
132 );
133 }
134
compare_exchange_weakboost::atomics::detail::gcc_atomic_operations135 static BOOST_FORCEINLINE bool compare_exchange_weak(
136 storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
137 {
138 return __atomic_compare_exchange_n
139 (
140 &storage, &expected, desired, true,
141 atomics::detail::convert_memory_order_to_gcc(success_order),
142 atomics::detail::convert_memory_order_to_gcc(failure_order)
143 );
144 }
145
fetch_andboost::atomics::detail::gcc_atomic_operations146 static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
147 {
148 return __atomic_fetch_and(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
149 }
150
fetch_orboost::atomics::detail::gcc_atomic_operations151 static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
152 {
153 return __atomic_fetch_or(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
154 }
155
fetch_xorboost::atomics::detail::gcc_atomic_operations156 static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
157 {
158 return __atomic_fetch_xor(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
159 }
160
test_and_setboost::atomics::detail::gcc_atomic_operations161 static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
162 {
163 return __atomic_test_and_set(&storage, atomics::detail::convert_memory_order_to_gcc(order));
164 }
165
clearboost::atomics::detail::gcc_atomic_operations166 static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
167 {
168 __atomic_clear(const_cast< storage_type* >(&storage), atomics::detail::convert_memory_order_to_gcc(order));
169 }
170 };
171
172 #if BOOST_ATOMIC_INT128_LOCK_FREE > 0
173 #if (defined(__clang__) || (defined(BOOST_GCC) && (BOOST_GCC+0) >= 70000)) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
174
175 // Workaround for clang bug: http://llvm.org/bugs/show_bug.cgi?id=19149
176 // Clang 3.4 does not implement 128-bit __atomic* intrinsics even though it defines __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
177 // A similar problem exists with gcc 7 as well, as it requires to link with libatomic to use 16-byte intrinsics:
178 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80878
179 template< bool Signed >
180 struct operations< 16u, Signed > :
181 public cas_based_operations< gcc_dcas_x86_64< Signed > >
182 {
183 static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
184 static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
185 };
186
187 #else
188
189 template< bool Signed >
190 struct operations< 16u, Signed > :
191 public gcc_atomic_operations< 16u, Signed >
192 {
193 };
194
195 #endif
196 #endif
197
198
199 #if BOOST_ATOMIC_INT64_LOCK_FREE > 0
200 #if defined(__clang__) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
201
202 // Workaround for clang bug http://llvm.org/bugs/show_bug.cgi?id=19355
203 template< bool Signed >
204 struct operations< 8u, Signed > :
205 public cas_based_operations< gcc_dcas_x86< Signed > >
206 {
207 static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
208 static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
209 };
210
211 #elif (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\
212 (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\
213 (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\
214 (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\
215 (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 8 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE)
216
217 #define BOOST_ATOMIC_DETAIL_INT64_EXTENDED
218
219 template< bool Signed >
220 struct operations< 8u, Signed > :
221 public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 8u, Signed >
222 {
223 };
224
225 #else
226
227 template< bool Signed >
228 struct operations< 8u, Signed > :
229 public gcc_atomic_operations< 8u, Signed >
230 {
231 };
232
233 #endif
234 #endif
235
236 #if BOOST_ATOMIC_INT32_LOCK_FREE > 0
237 #if (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\
238 (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\
239 (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\
240 (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\
241 (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 4 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE)
242
243 #define BOOST_ATOMIC_DETAIL_INT32_EXTENDED
244
245 #if !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
246
247 template< bool Signed >
248 struct operations< 4u, Signed > :
249 public extending_cas_based_operations< gcc_atomic_operations< 8u, Signed >, 4u, Signed >
250 {
251 };
252
253 #else // !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
254
255 template< bool Signed >
256 struct operations< 4u, Signed > :
257 public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 4u, Signed >
258 {
259 };
260
261 #endif // !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
262
263 #else
264
265 template< bool Signed >
266 struct operations< 4u, Signed > :
267 public gcc_atomic_operations< 4u, Signed >
268 {
269 };
270
271 #endif
272 #endif
273
274 #if BOOST_ATOMIC_INT16_LOCK_FREE > 0
275 #if (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\
276 (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\
277 (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\
278 (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\
279 (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 2 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE)
280
281 #define BOOST_ATOMIC_DETAIL_INT16_EXTENDED
282
283 #if !defined(BOOST_ATOMIC_DETAIL_INT32_EXTENDED)
284
285 template< bool Signed >
286 struct operations< 2u, Signed > :
287 public extending_cas_based_operations< gcc_atomic_operations< 4u, Signed >, 2u, Signed >
288 {
289 };
290
291 #elif !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
292
293 template< bool Signed >
294 struct operations< 2u, Signed > :
295 public extending_cas_based_operations< gcc_atomic_operations< 8u, Signed >, 2u, Signed >
296 {
297 };
298
299 #else
300
301 template< bool Signed >
302 struct operations< 2u, Signed > :
303 public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 2u, Signed >
304 {
305 };
306
307 #endif
308
309 #else
310
311 template< bool Signed >
312 struct operations< 2u, Signed > :
313 public gcc_atomic_operations< 2u, Signed >
314 {
315 };
316
317 #endif
318 #endif
319
320 #if BOOST_ATOMIC_INT8_LOCK_FREE > 0
321 #if (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 1 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\
322 (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 1 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\
323 (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 1 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\
324 (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 1 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\
325 (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 1 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE) ||\
326 (__GCC_ATOMIC_CHAR_LOCK_FREE != BOOST_ATOMIC_CHAR_LOCK_FREE) ||\
327 (__GCC_ATOMIC_BOOL_LOCK_FREE != BOOST_ATOMIC_BOOL_LOCK_FREE)
328
329 #if !defined(BOOST_ATOMIC_DETAIL_INT16_EXTENDED)
330
331 template< bool Signed >
332 struct operations< 1u, Signed > :
333 public extending_cas_based_operations< gcc_atomic_operations< 2u, Signed >, 1u, Signed >
334 {
335 };
336
337 #elif !defined(BOOST_ATOMIC_DETAIL_INT32_EXTENDED)
338
339 template< bool Signed >
340 struct operations< 1u, Signed > :
341 public extending_cas_based_operations< gcc_atomic_operations< 4u, Signed >, 1u, Signed >
342 {
343 };
344
345 #elif !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
346
347 template< bool Signed >
348 struct operations< 1u, Signed > :
349 public extending_cas_based_operations< gcc_atomic_operations< 8u, Signed >, 1u, Signed >
350 {
351 };
352
353 #else
354
355 template< bool Signed >
356 struct operations< 1u, Signed > :
357 public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 1u, Signed >
358 {
359 };
360
361 #endif
362
363 #else
364
365 template< bool Signed >
366 struct operations< 1u, Signed > :
367 public gcc_atomic_operations< 1u, Signed >
368 {
369 };
370
371 #endif
372 #endif
373
374 #undef BOOST_ATOMIC_DETAIL_INT16_EXTENDED
375 #undef BOOST_ATOMIC_DETAIL_INT32_EXTENDED
376 #undef BOOST_ATOMIC_DETAIL_INT64_EXTENDED
377
thread_fence(memory_order order)378 BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
379 {
380 __atomic_thread_fence(atomics::detail::convert_memory_order_to_gcc(order));
381 }
382
signal_fence(memory_order order)383 BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
384 {
385 __atomic_signal_fence(atomics::detail::convert_memory_order_to_gcc(order));
386 }
387
388 } // namespace detail
389 } // namespace atomics
390 } // namespace boost
391
392 #endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_
393