1 /*
2 * Distributed under the Boost Software License, Version 1.0.
3 * (See accompanying file LICENSE_1_0.txt or copy at
4 * http://www.boost.org/LICENSE_1_0.txt)
5 *
6 * Copyright (c) 2011 Helge Bahmann
7 * Copyright (c) 2013-2014, 2020 Andrey Semashev
8 */
9 /*!
10 * \file lock_pool.cpp
11 *
12 * This file contains implementation of the lock pool used to emulate atomic ops.
13 */
14
15 #include <boost/predef/os/windows.h>
16 #if BOOST_OS_WINDOWS
17 // Include boost/winapi/config.hpp first to make sure target Windows version is selected by Boost.WinAPI
18 #include <boost/winapi/config.hpp>
19 #include <boost/predef/platform.h>
20 #endif
21
22 #include <cstddef>
23 #include <cstring>
24 #include <cstdlib>
25 #include <new>
26 #include <limits>
27 #include <boost/config.hpp>
28 #include <boost/assert.hpp>
29 #include <boost/static_assert.hpp>
30 #include <boost/memory_order.hpp>
31 #include <boost/atomic/capabilities.hpp>
32 #include <boost/atomic/detail/config.hpp>
33 #include <boost/atomic/detail/intptr.hpp>
34 #include <boost/atomic/detail/aligned_variable.hpp>
35 #include <boost/atomic/detail/core_operations.hpp>
36 #include <boost/atomic/detail/extra_operations.hpp>
37 #include <boost/atomic/detail/fence_operations.hpp>
38 #include <boost/atomic/detail/lock_pool.hpp>
39 #include <boost/atomic/detail/pause.hpp>
40 #include <boost/atomic/detail/once_flag.hpp>
41 #include <boost/atomic/detail/type_traits/alignment_of.hpp>
42
43 #include <boost/preprocessor/config/limits.hpp>
44 #include <boost/preprocessor/iteration/iterate.hpp>
45
46 #if BOOST_OS_WINDOWS
47 #include <boost/winapi/basic_types.hpp>
48 #include <boost/winapi/thread.hpp>
49 #include <boost/winapi/wait_constants.hpp>
50 #if BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN6
51 #include <boost/winapi/srw_lock.hpp>
52 #include <boost/winapi/condition_variable.hpp>
53 #else // BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN6
54 #include <boost/winapi/critical_section.hpp>
55 #include <boost/winapi/semaphore.hpp>
56 #include <boost/winapi/handles.hpp>
57 #include <boost/winapi/wait.hpp>
58 #endif // BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN6
59 #define BOOST_ATOMIC_USE_WINAPI
60 #else // BOOST_OS_WINDOWS
61 #include <boost/atomic/detail/futex.hpp>
62 #if defined(BOOST_ATOMIC_DETAIL_HAS_FUTEX) && BOOST_ATOMIC_INT32_LOCK_FREE == 2
63 #define BOOST_ATOMIC_USE_FUTEX
64 #else // BOOST_OS_LINUX
65 #include <pthread.h>
66 #define BOOST_ATOMIC_USE_PTHREAD
67 #endif // BOOST_OS_LINUX
68 #include <cerrno>
69 #endif // BOOST_OS_WINDOWS
70
71 #include <boost/atomic/detail/header.hpp>
72
73 // Cache line size, in bytes
74 // NOTE: This constant is made as a macro because some compilers (gcc 4.4 for one) don't allow enums or namespace scope constants in alignment attributes
75 #if defined(__s390__) || defined(__s390x__)
76 #define BOOST_ATOMIC_CACHE_LINE_SIZE 256
77 #elif defined(powerpc) || defined(__powerpc__) || defined(__ppc__)
78 #define BOOST_ATOMIC_CACHE_LINE_SIZE 128
79 #else
80 #define BOOST_ATOMIC_CACHE_LINE_SIZE 64
81 #endif
82
83 namespace boost {
84 namespace atomics {
85 namespace detail {
86 namespace lock_pool {
87
88 namespace {
89
90 struct wait_state;
91 struct lock_state;
92
93 //! Base class for a wait state
94 struct wait_state_base
95 {
96 //! Number of waiters referencing this state
97 std::size_t m_ref_count;
98 //! Index of this wait state in the list
99 std::size_t m_index;
100
wait_state_baseboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state_base101 explicit wait_state_base(std::size_t index) BOOST_NOEXCEPT :
102 m_ref_count(0u),
103 m_index(index)
104 {
105 }
106
107 BOOST_DELETED_FUNCTION(wait_state_base(wait_state_base const&))
108 BOOST_DELETED_FUNCTION(wait_state_base& operator= (wait_state_base const&))
109 };
110
111 //! List of wait states. Must be a POD structure.
112 struct wait_state_list
113 {
114 //! List header
115 struct header
116 {
117 //! List size
118 std::size_t size;
119 //! List capacity
120 std::size_t capacity;
121 };
122
123 /*!
124 * \brief Pointer to the list header
125 *
126 * The list buffer consists of three adjacent areas: header object, array of atomic pointers and array of pointers to the wait_state structures.
127 * Each of the arrays have header.capacity elements, of which the first header.size elements correspond to the currently ongoing wait operations
128 * and the rest are spare elements. Spare wait_state structures may still be allocated (in which case the wait_state pointer is not null) and
129 * can be reused on future requests. Spare atomic pointers are null and unused.
130 *
131 * This memory layout was designed to optimize wait state lookup by atomic address and also support memory pooling to reduce dynamic memory allocations.
132 */
133 header* m_header;
134 //! The flag indicates that memory pooling is disabled. Set on process cleanup.
135 bool m_free_memory;
136
137 //! Alignment of pointer arrays in the buffer
138 static BOOST_CONSTEXPR_OR_CONST std::size_t entries_alignment = atomics::detail::alignment_of< void* >::value < 16u ? atomics::detail::alignment_of< void* >::value : 16u;
139 //! Offset from the list header to the beginning of the array of atomic pointers in the buffer
140 static BOOST_CONSTEXPR_OR_CONST std::size_t entries_offset = (sizeof(header) + entries_alignment - 1u) & ~static_cast< std::size_t >(entries_alignment - 1u);
141
142 //! Returns a pointer to the array of atomic pointers
get_atomic_pointersboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state_list143 static const volatile void** get_atomic_pointers(header* p) BOOST_NOEXCEPT
144 {
145 BOOST_ASSERT(p != NULL);
146 return reinterpret_cast< const volatile void** >(reinterpret_cast< unsigned char* >(p) + entries_offset);
147 }
148
149 //! Returns a pointer to the array of atomic pointers
get_atomic_pointersboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state_list150 const volatile void** get_atomic_pointers() const BOOST_NOEXCEPT
151 {
152 return get_atomic_pointers(m_header);
153 }
154
155 //! Returns a pointer to the array of pointers to the wait states
get_wait_statesboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state_list156 static wait_state** get_wait_states(const volatile void** ptrs, std::size_t capacity) BOOST_NOEXCEPT
157 {
158 return reinterpret_cast< wait_state** >(const_cast< void** >(ptrs + capacity));
159 }
160
161 //! Returns a pointer to the array of pointers to the wait states
get_wait_statesboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state_list162 static wait_state** get_wait_states(header* p) BOOST_NOEXCEPT
163 {
164 return get_wait_states(get_atomic_pointers(p), p->capacity);
165 }
166
167 //! Returns a pointer to the array of pointers to the wait states
get_wait_statesboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state_list168 wait_state** get_wait_states() const BOOST_NOEXCEPT
169 {
170 return get_wait_states(m_header);
171 }
172
173 //! Finds an element with the given pointer to the atomic object
findboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state_list174 wait_state* find(const volatile void* addr) const BOOST_NOEXCEPT
175 {
176 wait_state* ws = NULL;
177 if (BOOST_LIKELY(m_header != NULL))
178 {
179 const volatile void** addrs = get_atomic_pointers();
180 for (std::size_t i = 0u, n = m_header->size; i < n; ++i)
181 {
182 if (addrs[i] == addr)
183 {
184 ws = get_wait_states()[i];
185 break;
186 }
187 }
188 }
189
190 return ws;
191 }
192
193 //! Finds an existing element with the given pointer to the atomic object or allocates a new one. Returns NULL in case of failure.
194 wait_state* find_or_create(const volatile void* addr) BOOST_NOEXCEPT;
195 //! Releases the previously created wait state
196 void erase(wait_state* w) BOOST_NOEXCEPT;
197
198 //! Deallocates spare entries and the list buffer if no allocated entries are left
199 void free_spare() BOOST_NOEXCEPT;
200 //! Allocates new buffer for the list entries. Returns NULL in case of failure.
201 static header* allocate_buffer(std::size_t new_capacity, header* old_header = NULL) BOOST_NOEXCEPT;
202 };
203
204 #define BOOST_ATOMIC_WAIT_STATE_LIST_INIT { NULL, false }
205
206 // In the platform-specific definitions below, lock_state must be a POD structure and wait_state must derive from wait_state_base.
207
208 #if defined(BOOST_ATOMIC_USE_PTHREAD)
209
210 //! State of a wait operation associated with an atomic object
211 struct wait_state :
212 public wait_state_base
213 {
214 //! Condition variable
215 pthread_cond_t m_cond;
216
wait_stateboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state217 explicit wait_state(std::size_t index) BOOST_NOEXCEPT :
218 wait_state_base(index)
219 {
220 BOOST_VERIFY(pthread_cond_init(&m_cond, NULL) == 0);
221 }
222
~wait_stateboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state223 ~wait_state() BOOST_NOEXCEPT
224 {
225 pthread_cond_destroy(&m_cond);
226 }
227
228 //! Blocks in the wait operation until notified
229 void wait(lock_state& state) BOOST_NOEXCEPT;
230
231 //! Wakes up one thread blocked in the wait operation
notify_oneboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state232 void notify_one(lock_state&) BOOST_NOEXCEPT
233 {
234 BOOST_VERIFY(pthread_cond_signal(&m_cond) == 0);
235 }
236 //! Wakes up all threads blocked in the wait operation
notify_allboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state237 void notify_all(lock_state&) BOOST_NOEXCEPT
238 {
239 BOOST_VERIFY(pthread_cond_broadcast(&m_cond) == 0);
240 }
241 };
242
243 //! Lock pool entry
244 struct lock_state
245 {
246 //! Mutex
247 pthread_mutex_t m_mutex;
248 //! Wait states
249 wait_state_list m_wait_states;
250
251 //! Locks the mutex for a short duration
short_lockboost::atomics::detail::lock_pool::__anon0584aaec0111::lock_state252 void short_lock() BOOST_NOEXCEPT
253 {
254 long_lock();
255 }
256
257 //! Locks the mutex for a long duration
long_lockboost::atomics::detail::lock_pool::__anon0584aaec0111::lock_state258 void long_lock() BOOST_NOEXCEPT
259 {
260 for (unsigned int i = 0u; i < 5u; ++i)
261 {
262 if (BOOST_LIKELY(pthread_mutex_trylock(&m_mutex) == 0))
263 return;
264
265 atomics::detail::pause();
266 }
267
268 BOOST_VERIFY(pthread_mutex_lock(&m_mutex) == 0);
269 }
270
271 //! Unlocks the mutex
unlockboost::atomics::detail::lock_pool::__anon0584aaec0111::lock_state272 void unlock() BOOST_NOEXCEPT
273 {
274 BOOST_VERIFY(pthread_mutex_unlock(&m_mutex) == 0);
275 }
276 };
277
278 #define BOOST_ATOMIC_LOCK_STATE_INIT { PTHREAD_MUTEX_INITIALIZER, BOOST_ATOMIC_WAIT_STATE_LIST_INIT }
279
280 //! Blocks in the wait operation until notified
wait(lock_state & state)281 inline void wait_state::wait(lock_state& state) BOOST_NOEXCEPT
282 {
283 BOOST_VERIFY(pthread_cond_wait(&m_cond, &state.m_mutex) == 0);
284 }
285
286 #elif defined(BOOST_ATOMIC_USE_FUTEX)
287
288 typedef atomics::detail::core_operations< 4u, false, false > futex_operations;
289 // The storage type must be a 32-bit object, as required by futex API
290 BOOST_STATIC_ASSERT_MSG(futex_operations::is_always_lock_free && sizeof(futex_operations::storage_type) == 4u, "Boost.Atomic unsupported target platform: native atomic operations not implemented for 32-bit integers");
291 typedef atomics::detail::extra_operations< futex_operations, futex_operations::storage_size, futex_operations::is_signed > futex_extra_operations;
292
293 namespace mutex_bits {
294
295 //! The bit indicates a locked mutex
296 BOOST_CONSTEXPR_OR_CONST futex_operations::storage_type locked = 1u;
297 //! The bit indicates that there is at least one thread blocked waiting for the mutex to be released
298 BOOST_CONSTEXPR_OR_CONST futex_operations::storage_type contended = 1u << 1;
299 //! The lowest bit of the counter bits used to mitigate ABA problem. This and any higher bits in the mutex state constitute the counter.
300 BOOST_CONSTEXPR_OR_CONST futex_operations::storage_type counter_one = 1u << 2;
301
302 } // namespace mutex_bits
303
304 //! State of a wait operation associated with an atomic object
305 struct wait_state :
306 public wait_state_base
307 {
308 //! Condition variable futex. Used as the counter of notify calls.
309 BOOST_ATOMIC_DETAIL_ALIGNED_VAR(futex_operations::storage_alignment, futex_operations::storage_type, m_cond);
310 //! Number of currently blocked waiters
311 futex_operations::storage_type m_waiter_count;
312
wait_stateboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state313 explicit wait_state(std::size_t index) BOOST_NOEXCEPT :
314 wait_state_base(index),
315 m_cond(0u),
316 m_waiter_count(0u)
317 {
318 }
319
320 //! Blocks in the wait operation until notified
321 void wait(lock_state& state) BOOST_NOEXCEPT;
322
323 //! Wakes up one thread blocked in the wait operation
324 void notify_one(lock_state& state) BOOST_NOEXCEPT;
325 //! Wakes up all threads blocked in the wait operation
326 void notify_all(lock_state& state) BOOST_NOEXCEPT;
327 };
328
329 //! Lock pool entry
330 struct lock_state
331 {
332 //! Mutex futex
333 BOOST_ATOMIC_DETAIL_ALIGNED_VAR(futex_operations::storage_alignment, futex_operations::storage_type, m_mutex);
334 //! Wait states
335 wait_state_list m_wait_states;
336
337 //! Locks the mutex for a short duration
short_lockboost::atomics::detail::lock_pool::__anon0584aaec0111::lock_state338 void short_lock() BOOST_NOEXCEPT
339 {
340 long_lock();
341 }
342
343 //! Locks the mutex for a long duration
long_lockboost::atomics::detail::lock_pool::__anon0584aaec0111::lock_state344 void long_lock() BOOST_NOEXCEPT
345 {
346 for (unsigned int i = 0u; i < 10u; ++i)
347 {
348 futex_operations::storage_type prev_state = futex_operations::load(m_mutex, boost::memory_order_relaxed);
349 if (BOOST_LIKELY((prev_state & mutex_bits::locked) == 0u))
350 {
351 futex_operations::storage_type new_state = prev_state | mutex_bits::locked;
352 if (BOOST_LIKELY(futex_operations::compare_exchange_strong(m_mutex, prev_state, new_state, boost::memory_order_acquire, boost::memory_order_relaxed)))
353 return;
354 }
355
356 atomics::detail::pause();
357 }
358
359 lock_slow_path();
360 }
361
362 //! Locks the mutex for a long duration
lock_slow_pathboost::atomics::detail::lock_pool::__anon0584aaec0111::lock_state363 void lock_slow_path() BOOST_NOEXCEPT
364 {
365 futex_operations::storage_type prev_state = futex_operations::load(m_mutex, boost::memory_order_relaxed);
366 while (true)
367 {
368 if (BOOST_LIKELY((prev_state & mutex_bits::locked) == 0u))
369 {
370 futex_operations::storage_type new_state = prev_state | mutex_bits::locked;
371 if (BOOST_LIKELY(futex_operations::compare_exchange_weak(m_mutex, prev_state, new_state, boost::memory_order_acquire, boost::memory_order_relaxed)))
372 return;
373 }
374 else
375 {
376 futex_operations::storage_type new_state = prev_state | mutex_bits::contended;
377 if (BOOST_LIKELY(futex_operations::compare_exchange_weak(m_mutex, prev_state, new_state, boost::memory_order_relaxed, boost::memory_order_relaxed)))
378 {
379 atomics::detail::futex_wait_private(&m_mutex, new_state);
380 prev_state = futex_operations::load(m_mutex, boost::memory_order_relaxed);
381 }
382 }
383 }
384 }
385
386 //! Unlocks the mutex
unlockboost::atomics::detail::lock_pool::__anon0584aaec0111::lock_state387 void unlock() BOOST_NOEXCEPT
388 {
389 futex_operations::storage_type prev_state = futex_operations::load(m_mutex, boost::memory_order_relaxed);
390 futex_operations::storage_type new_state;
391 while (true)
392 {
393 new_state = (prev_state & (~mutex_bits::locked)) + mutex_bits::counter_one;
394 if (BOOST_LIKELY(futex_operations::compare_exchange_weak(m_mutex, prev_state, new_state, boost::memory_order_release, boost::memory_order_relaxed)))
395 break;
396 }
397
398 if ((prev_state & mutex_bits::contended) != 0u)
399 {
400 int woken_count = atomics::detail::futex_signal_private(&m_mutex);
401 if (woken_count == 0)
402 {
403 prev_state = new_state;
404 new_state &= ~mutex_bits::contended;
405 futex_operations::compare_exchange_strong(m_mutex, prev_state, new_state, boost::memory_order_relaxed, boost::memory_order_relaxed);
406 }
407 }
408 }
409 };
410
411 #if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_ALIGNAS)
412 #define BOOST_ATOMIC_LOCK_STATE_INIT { 0u, BOOST_ATOMIC_WAIT_STATE_LIST_INIT }
413 #else
414 #define BOOST_ATOMIC_LOCK_STATE_INIT { { 0u }, BOOST_ATOMIC_WAIT_STATE_LIST_INIT }
415 #endif
416
417 //! Blocks in the wait operation until notified
wait(lock_state & state)418 inline void wait_state::wait(lock_state& state) BOOST_NOEXCEPT
419 {
420 const futex_operations::storage_type prev_cond = m_cond;
421 ++m_waiter_count;
422
423 state.unlock();
424
425 while (true)
426 {
427 int err = atomics::detail::futex_wait_private(&m_cond, prev_cond);
428 if (BOOST_LIKELY(err != EINTR))
429 break;
430 }
431
432 state.long_lock();
433
434 --m_waiter_count;
435 }
436
437 //! Wakes up one thread blocked in the wait operation
notify_one(lock_state & state)438 inline void wait_state::notify_one(lock_state& state) BOOST_NOEXCEPT
439 {
440 ++m_cond;
441
442 if (BOOST_LIKELY(m_waiter_count > 0u))
443 {
444 // Move one blocked thread to the mutex futex and mark the mutex contended so that the thread is unblocked on unlock()
445 atomics::detail::futex_requeue_private(&m_cond, &state.m_mutex, 0u, 1u);
446 futex_extra_operations::opaque_or(state.m_mutex, mutex_bits::contended, boost::memory_order_relaxed);
447 }
448 }
449
450 //! Wakes up all threads blocked in the wait operation
notify_all(lock_state & state)451 inline void wait_state::notify_all(lock_state& state) BOOST_NOEXCEPT
452 {
453 ++m_cond;
454
455 if (BOOST_LIKELY(m_waiter_count > 0u))
456 {
457 // Move blocked threads to the mutex futex and mark the mutex contended so that a thread is unblocked on unlock()
458 atomics::detail::futex_requeue_private(&m_cond, &state.m_mutex, 0u);
459 futex_extra_operations::opaque_or(state.m_mutex, mutex_bits::contended, boost::memory_order_relaxed);
460 }
461 }
462
463 #else
464
465 #if BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN6
466
467 //! State of a wait operation associated with an atomic object
468 struct wait_state :
469 public wait_state_base
470 {
471 //! Condition variable
472 boost::winapi::CONDITION_VARIABLE_ m_cond;
473
wait_stateboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state474 explicit wait_state(std::size_t index) BOOST_NOEXCEPT :
475 wait_state_base(index)
476 {
477 boost::winapi::InitializeConditionVariable(&m_cond);
478 }
479
480 //! Blocks in the wait operation until notified
481 void wait(lock_state& state) BOOST_NOEXCEPT;
482
483 //! Wakes up one thread blocked in the wait operation
notify_oneboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state484 void notify_one(lock_state&) BOOST_NOEXCEPT
485 {
486 boost::winapi::WakeConditionVariable(&m_cond);
487 }
488 //! Wakes up all threads blocked in the wait operation
notify_allboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state489 void notify_all(lock_state&) BOOST_NOEXCEPT
490 {
491 boost::winapi::WakeAllConditionVariable(&m_cond);
492 }
493 };
494
495 //! Lock pool entry
496 struct lock_state
497 {
498 //! Mutex
499 boost::winapi::SRWLOCK_ m_mutex;
500 //! Wait states
501 wait_state_list m_wait_states;
502
503 //! Locks the mutex for a short duration
short_lockboost::atomics::detail::lock_pool::__anon0584aaec0111::lock_state504 void short_lock() BOOST_NOEXCEPT
505 {
506 long_lock();
507 }
508
509 //! Locks the mutex for a long duration
long_lockboost::atomics::detail::lock_pool::__anon0584aaec0111::lock_state510 void long_lock() BOOST_NOEXCEPT
511 {
512 // Presumably, AcquireSRWLockExclusive already implements spinning internally, so there's no point in doing this ourselves.
513 boost::winapi::AcquireSRWLockExclusive(&m_mutex);
514 }
515
516 //! Unlocks the mutex
unlockboost::atomics::detail::lock_pool::__anon0584aaec0111::lock_state517 void unlock() BOOST_NOEXCEPT
518 {
519 boost::winapi::ReleaseSRWLockExclusive(&m_mutex);
520 }
521 };
522
523 #define BOOST_ATOMIC_LOCK_STATE_INIT { BOOST_WINAPI_SRWLOCK_INIT, BOOST_ATOMIC_WAIT_STATE_LIST_INIT }
524
525 //! Blocks in the wait operation until notified
wait(lock_state & state)526 inline void wait_state::wait(lock_state& state) BOOST_NOEXCEPT
527 {
528 boost::winapi::SleepConditionVariableSRW(&m_cond, &state.m_mutex, boost::winapi::infinite, 0u);
529 }
530
531 #else // BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN6
532
533 typedef atomics::detail::core_operations< 4u, false, false > mutex_operations;
534 BOOST_STATIC_ASSERT_MSG(mutex_operations::is_always_lock_free, "Boost.Atomic unsupported target platform: native atomic operations not implemented for 32-bit integers");
535
536 namespace fallback_mutex_bits {
537
538 //! The bit indicates a locked mutex
539 BOOST_CONSTEXPR_OR_CONST mutex_operations::storage_type locked = 1u;
540 //! The bit indicates that the critical section is initialized and should be used instead of the fallback mutex
541 BOOST_CONSTEXPR_OR_CONST mutex_operations::storage_type critical_section_initialized = 1u << 1;
542
543 } // namespace mutex_bits
544
545 //! State of a wait operation associated with an atomic object
546 struct wait_state :
547 public wait_state_base
548 {
549 /*!
550 * \brief A semaphore used to block one or more threads
551 *
552 * A semaphore can be used to block a thread if it has no ongoing notifications (i.e. \c m_notify_count is 0).
553 * If there is no such semaphore, the thread has to allocate a new one to block on. This is to guarantee
554 * that a thread that is blocked after a notification is not immediately released by the semaphore while
555 * there are previously blocked threads.
556 *
557 * Semaphores are organized in a circular doubly linked list. A single semaphore object represents a list
558 * of one semaphore and is said to be "singular".
559 */
560 struct semaphore
561 {
562 //! Pointer to the next semaphore in the list
563 semaphore* m_next;
564 //! Pointer to the previous semaphore in the list
565 semaphore* m_prev;
566
567 //! Semaphore handle
568 boost::winapi::HANDLE_ m_semaphore;
569 //! Number of threads blocked on the semaphore
570 boost::winapi::ULONG_ m_waiter_count;
571 //! Number of threads released by notifications
572 boost::winapi::ULONG_ m_notify_count;
573
semaphoreboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state::semaphore574 semaphore() BOOST_NOEXCEPT :
575 m_semaphore(boost::winapi::create_anonymous_semaphore(NULL, 0, (std::numeric_limits< boost::winapi::LONG_ >::max)())),
576 m_waiter_count(0u),
577 m_notify_count(0u)
578 {
579 m_next = m_prev = this;
580 }
581
~semaphoreboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state::semaphore582 ~semaphore() BOOST_NOEXCEPT
583 {
584 BOOST_ASSERT(is_singular());
585
586 if (BOOST_LIKELY(m_semaphore != boost::winapi::invalid_handle_value))
587 boost::winapi::CloseHandle(m_semaphore);
588 }
589
590 //! Creates a new semaphore or returns null in case of failure
createboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state::semaphore591 static semaphore* create() BOOST_NOEXCEPT
592 {
593 semaphore* p = new (std::nothrow) semaphore();
594 if (BOOST_UNLIKELY(p != NULL && p->m_semaphore == boost::winapi::invalid_handle_value))
595 {
596 delete p;
597 p = NULL;
598 }
599 return p;
600 }
601
602 //! Returns \c true if the semaphore is the single element of the list
is_singularboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state::semaphore603 bool is_singular() const BOOST_NOEXCEPT
604 {
605 return m_next == this /* && m_prev == this */;
606 }
607
608 //! Inserts the semaphore list after the specified other semaphore
link_afterboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state::semaphore609 void link_after(semaphore* that) BOOST_NOEXCEPT
610 {
611 link_before(that->m_next);
612 }
613
614 //! Inserts the semaphore list before the specified other semaphore
link_beforeboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state::semaphore615 void link_before(semaphore* that) BOOST_NOEXCEPT
616 {
617 semaphore* prev = that->m_prev;
618 that->m_prev = m_prev;
619 m_prev->m_next = that;
620 m_prev = prev;
621 prev->m_next = this;
622 }
623
624 //! Removes the semaphore from the list
unlinkboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state::semaphore625 void unlink() BOOST_NOEXCEPT
626 {
627 // Load pointers beforehand, in case we are the only element in the list
628 semaphore* next = m_next;
629 semaphore* prev = m_prev;
630 prev->m_next = next;
631 next->m_prev = prev;
632 m_next = m_prev = this;
633 }
634
635 BOOST_DELETED_FUNCTION(semaphore(semaphore const&))
636 BOOST_DELETED_FUNCTION(semaphore& operator= (semaphore const&))
637 };
638
639 //! Doubly linked circular list of semaphores
640 class semaphore_list
641 {
642 private:
643 semaphore* m_head;
644
645 public:
semaphore_list()646 semaphore_list() BOOST_NOEXCEPT :
647 m_head(NULL)
648 {
649 }
650
651 //! Returns \c true if the list is empty
empty() const652 bool empty() const BOOST_NOEXCEPT
653 {
654 return m_head == NULL;
655 }
656
657 //! Returns the first semaphore in the list
front() const658 semaphore* front() const BOOST_NOEXCEPT
659 {
660 return m_head;
661 }
662
663 //! Returns the first semaphore in the list and leaves the list empty
eject()664 semaphore* eject() BOOST_NOEXCEPT
665 {
666 semaphore* sem = m_head;
667 m_head = NULL;
668 return sem;
669 }
670
671 //! Inserts the semaphore at the beginning of the list
push_front(semaphore * sem)672 void push_front(semaphore* sem) BOOST_NOEXCEPT
673 {
674 if (m_head)
675 sem->link_before(m_head);
676
677 m_head = sem;
678 }
679
680 //! Removes the first semaphore from the beginning of the list
pop_front()681 semaphore* pop_front() BOOST_NOEXCEPT
682 {
683 BOOST_ASSERT(!empty());
684 semaphore* sem = m_head;
685 erase(sem);
686 return sem;
687 }
688
689 //! Removes the semaphore from the list
erase(semaphore * sem)690 void erase(semaphore* sem) BOOST_NOEXCEPT
691 {
692 if (sem->is_singular())
693 {
694 BOOST_ASSERT(m_head == sem);
695 m_head = NULL;
696 }
697 else
698 {
699 if (m_head == sem)
700 m_head = sem->m_next;
701 sem->unlink();
702 }
703 }
704
705 BOOST_DELETED_FUNCTION(semaphore_list(semaphore_list const&))
706 BOOST_DELETED_FUNCTION(semaphore_list& operator= (semaphore_list const&))
707 };
708
709 //! List of semaphores used for notifying. Here, every semaphore has m_notify_count > 0 && m_waiter_count > 0.
710 semaphore_list m_notify_semaphores;
711 //! List of semaphores used for waiting. Here, every semaphore has m_notify_count == 0 && m_waiter_count > 0.
712 semaphore_list m_wait_semaphores;
713 //! List of free semaphores. Here, every semaphore has m_notify_count == 0 && m_waiter_count == 0.
714 semaphore_list m_free_semaphores;
715
wait_stateboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state716 explicit wait_state(std::size_t index) BOOST_NOEXCEPT :
717 wait_state_base(index)
718 {
719 }
720
~wait_stateboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state721 ~wait_state() BOOST_NOEXCEPT
722 {
723 // All wait and notification operations must have been completed
724 BOOST_ASSERT(m_notify_semaphores.empty());
725 BOOST_ASSERT(m_wait_semaphores.empty());
726
727 semaphore* sem = m_free_semaphores.eject();
728 if (sem)
729 {
730 while (true)
731 {
732 bool was_last = sem->is_singular();
733 semaphore* next = sem->m_next;
734 sem->unlink();
735
736 delete sem;
737
738 if (was_last)
739 break;
740
741 sem = next;
742 }
743 }
744 }
745
746 //! Blocks in the wait operation until notified
747 void wait(lock_state& state) BOOST_NOEXCEPT;
748 //! Fallback implementation of wait
749 void wait_fallback(lock_state& state) BOOST_NOEXCEPT;
750
751 //! Wakes up one thread blocked in the wait operation
notify_oneboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state752 void notify_one(lock_state&) BOOST_NOEXCEPT
753 {
754 if (m_notify_semaphores.empty())
755 {
756 if (m_wait_semaphores.empty())
757 return;
758
759 // Move the semaphore with waiters to the notify list
760 m_notify_semaphores.push_front(m_wait_semaphores.pop_front());
761 }
762
763 semaphore* sem = m_notify_semaphores.front();
764 ++sem->m_notify_count;
765
766 if (sem->m_notify_count == sem->m_waiter_count)
767 {
768 // Remove this semaphore from the list. The waiter will re-insert it into the waiter or free list once there are no more pending notifications in it.
769 m_notify_semaphores.erase(sem);
770 }
771
772 boost::winapi::ReleaseSemaphore(sem->m_semaphore, 1, NULL);
773 }
774
775 //! Wakes up all threads blocked in the wait operation
notify_allboost::atomics::detail::lock_pool::__anon0584aaec0111::wait_state776 void notify_all(lock_state&) BOOST_NOEXCEPT
777 {
778 // Combine all notify and waiter semaphores in one list
779 semaphore* sem = m_notify_semaphores.eject();
780 if (sem)
781 {
782 if (!m_wait_semaphores.empty())
783 {
784 m_wait_semaphores.eject()->link_before(sem);
785 }
786 }
787 else
788 {
789 sem = m_wait_semaphores.eject();
790 }
791
792 if (sem)
793 {
794 while (true)
795 {
796 bool was_last = sem->is_singular();
797 semaphore* next = sem->m_next;
798 sem->unlink();
799
800 boost::winapi::ULONG_ count = sem->m_waiter_count - sem->m_notify_count;
801 sem->m_notify_count += count;
802
803 boost::winapi::ReleaseSemaphore(sem->m_semaphore, count, NULL);
804
805 if (was_last)
806 break;
807
808 sem = next;
809 }
810 }
811 }
812 };
813
814 //! Lock pool entry
815 struct lock_state
816 {
817 //! Mutex
818 boost::winapi::CRITICAL_SECTION_ m_mutex;
819 //! Fallback mutex. Used as indicator of critical section initialization state and a fallback mutex, if critical section cannot be initialized.
820 BOOST_ATOMIC_DETAIL_ALIGNED_VAR(mutex_operations::storage_alignment, mutex_operations::storage_type, m_mutex_fallback);
821 //! Wait states
822 wait_state_list m_wait_states;
823
824 //! Locks the mutex for a short duration
short_lockboost::atomics::detail::lock_pool::__anon0584aaec0111::lock_state825 void short_lock() BOOST_NOEXCEPT
826 {
827 long_lock();
828 }
829
830 //! Locks the mutex for a long duration
long_lockboost::atomics::detail::lock_pool::__anon0584aaec0111::lock_state831 void long_lock() BOOST_NOEXCEPT
832 {
833 mutex_operations::storage_type fallback_state = mutex_operations::load(m_mutex_fallback, boost::memory_order_relaxed);
834 while (true)
835 {
836 if (BOOST_LIKELY(fallback_state == fallback_mutex_bits::critical_section_initialized))
837 {
838 lock_cs:
839 boost::winapi::EnterCriticalSection(&m_mutex);
840 return;
841 }
842
843 while (fallback_state == 0u)
844 {
845 if (!mutex_operations::compare_exchange_weak(m_mutex_fallback, fallback_state, fallback_mutex_bits::locked, boost::memory_order_acquire, boost::memory_order_relaxed))
846 continue;
847
848 if (BOOST_LIKELY(!!boost::winapi::InitializeCriticalSectionAndSpinCount(&m_mutex, 100u)))
849 {
850 mutex_operations::store(m_mutex_fallback, fallback_mutex_bits::critical_section_initialized, boost::memory_order_release);
851 goto lock_cs;
852 }
853
854 // We failed to init the critical section, leave the fallback mutex locked and return
855 return;
856 }
857
858 if (fallback_state == fallback_mutex_bits::locked)
859 {
860 // Wait intil the fallback mutex is unlocked
861 boost::winapi::SwitchToThread();
862 fallback_state = mutex_operations::load(m_mutex_fallback, boost::memory_order_relaxed);
863 }
864 }
865 }
866
867 //! Unlocks the mutex
unlockboost::atomics::detail::lock_pool::__anon0584aaec0111::lock_state868 void unlock() BOOST_NOEXCEPT
869 {
870 mutex_operations::storage_type fallback_state = mutex_operations::load(m_mutex_fallback, boost::memory_order_relaxed);
871 if (BOOST_LIKELY(fallback_state == fallback_mutex_bits::critical_section_initialized))
872 {
873 boost::winapi::LeaveCriticalSection(&m_mutex);
874 return;
875 }
876
877 mutex_operations::store(m_mutex_fallback, 0u, boost::memory_order_release);
878 }
879 };
880
881 #if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_ALIGNAS)
882 #define BOOST_ATOMIC_LOCK_STATE_INIT { {}, 0u, BOOST_ATOMIC_WAIT_STATE_LIST_INIT }
883 #else
884 #define BOOST_ATOMIC_LOCK_STATE_INIT { {}, { 0u }, BOOST_ATOMIC_WAIT_STATE_LIST_INIT }
885 #endif
886
887 //! Blocks in the wait operation until notified
wait(lock_state & state)888 inline void wait_state::wait(lock_state& state) BOOST_NOEXCEPT
889 {
890 // Find a semaphore to block on
891 semaphore* sem = m_wait_semaphores.front();
892 if (sem)
893 {
894 while (sem->m_waiter_count >= static_cast< boost::winapi::ULONG_ >((std::numeric_limits< boost::winapi::LONG_ >::max)()))
895 {
896 if (sem->m_next == m_wait_semaphores.front())
897 {
898 sem = NULL;
899 break;
900 }
901
902 sem = sem->m_next;
903 }
904 }
905
906 if (!sem)
907 {
908 if (BOOST_LIKELY(!m_free_semaphores.empty()))
909 {
910 sem = m_free_semaphores.pop_front();
911 }
912 else
913 {
914 sem = semaphore::create();
915 if (BOOST_UNLIKELY(!sem))
916 {
917 wait_fallback(state);
918 return;
919 }
920 }
921
922 m_wait_semaphores.push_front(sem);
923 }
924
925 ++sem->m_waiter_count;
926
927 state.unlock();
928
929 boost::winapi::WaitForSingleObject(sem->m_semaphore, boost::winapi::infinite);
930
931 state.long_lock();
932
933 --sem->m_waiter_count;
934
935 if (sem->m_notify_count > 0u)
936 {
937 // This semaphore is either in the notify list or not in a list at all
938 if (--sem->m_notify_count == 0u)
939 {
940 if (!sem->is_singular() || sem == m_notify_semaphores.front())
941 m_notify_semaphores.erase(sem);
942
943 semaphore_list* list = sem->m_waiter_count == 0u ? &m_free_semaphores : &m_wait_semaphores;
944 list->push_front(sem);
945 }
946 }
947 else if (sem->m_waiter_count == 0u)
948 {
949 // Move the semaphore to the free list
950 m_wait_semaphores.erase(sem);
951 m_free_semaphores.push_front(sem);
952 }
953 }
954
955 //! Fallback implementation of wait
wait_fallback(lock_state & state)956 inline void wait_state::wait_fallback(lock_state& state) BOOST_NOEXCEPT
957 {
958 state.unlock();
959
960 boost::winapi::Sleep(0);
961
962 state.long_lock();
963 }
964
965 #endif // BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN6
966
967 #endif
968
969 enum
970 {
971 tail_size = sizeof(lock_state) % BOOST_ATOMIC_CACHE_LINE_SIZE,
972 padding_size = tail_size > 0 ? BOOST_ATOMIC_CACHE_LINE_SIZE - tail_size : 0u
973 };
974
975 template< unsigned int PaddingSize >
BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE)976 struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock_state
977 {
978 lock_state state;
979 // The additional padding is needed to avoid false sharing between locks
980 char padding[PaddingSize];
981 };
982
983 template< >
BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE)984 struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock_state< 0u >
985 {
986 lock_state state;
987 };
988
989 typedef padded_lock_state< padding_size > padded_lock_state_t;
990
991 #if !defined(BOOST_ATOMIC_LOCK_POOL_SIZE_LOG2)
992 #define BOOST_ATOMIC_LOCK_POOL_SIZE_LOG2 8
993 #endif
994 #if (BOOST_ATOMIC_LOCK_POOL_SIZE_LOG2) < 0
995 #error "Boost.Atomic: BOOST_ATOMIC_LOCK_POOL_SIZE_LOG2 macro value is negative"
996 #endif
997 #define BOOST_ATOMIC_DETAIL_LOCK_POOL_SIZE (1ull << (BOOST_ATOMIC_LOCK_POOL_SIZE_LOG2))
998
999 //! Lock pool size. Must be a power of two.
1000 BOOST_CONSTEXPR_OR_CONST std::size_t lock_pool_size = static_cast< std::size_t >(1u) << (BOOST_ATOMIC_LOCK_POOL_SIZE_LOG2);
1001
1002 static padded_lock_state_t g_lock_pool[lock_pool_size] =
1003 {
1004 #if BOOST_ATOMIC_DETAIL_LOCK_POOL_SIZE > 256u
1005 #if (BOOST_ATOMIC_DETAIL_LOCK_POOL_SIZE / 256u) > BOOST_PP_LIMIT_ITERATION
1006 #error "Boost.Atomic: BOOST_ATOMIC_LOCK_POOL_SIZE_LOG2 macro value is too large"
1007 #endif
1008 #define BOOST_PP_ITERATION_PARAMS_1 (3, (1, (BOOST_ATOMIC_DETAIL_LOCK_POOL_SIZE / 256u), "lock_pool_init256.ipp"))
1009 #else // BOOST_ATOMIC_DETAIL_LOCK_POOL_SIZE > 256u
1010 #define BOOST_PP_ITERATION_PARAMS_1 (3, (1, BOOST_ATOMIC_DETAIL_LOCK_POOL_SIZE, "lock_pool_init1.ipp"))
1011 #endif // BOOST_ATOMIC_DETAIL_LOCK_POOL_SIZE > 256u
1012 #include BOOST_PP_ITERATE()
1013 #undef BOOST_PP_ITERATION_PARAMS_1
1014 };
1015
1016 //! Pool cleanup function
cleanup_lock_pool()1017 void cleanup_lock_pool()
1018 {
1019 for (std::size_t i = 0u; i < lock_pool_size; ++i)
1020 {
1021 lock_state& state = g_lock_pool[i].state;
1022 state.long_lock();
1023 state.m_wait_states.m_free_memory = true;
1024 state.m_wait_states.free_spare();
1025 state.unlock();
1026 }
1027 }
1028
1029 BOOST_STATIC_ASSERT_MSG(once_flag_operations::is_always_lock_free, "Boost.Atomic unsupported target platform: native atomic operations not implemented for bytes");
1030 static once_flag g_pool_cleanup_registered = {};
1031
1032 //! Returns index of the lock pool entry for the given pointer value
get_lock_index(atomics::detail::uintptr_t h)1033 BOOST_FORCEINLINE std::size_t get_lock_index(atomics::detail::uintptr_t h) BOOST_NOEXCEPT
1034 {
1035 return h & (lock_pool_size - 1u);
1036 }
1037
1038 //! Finds an existing element with the given pointer to the atomic object or allocates a new one
find_or_create(const volatile void * addr)1039 inline wait_state* wait_state_list::find_or_create(const volatile void* addr) BOOST_NOEXCEPT
1040 {
1041 if (BOOST_UNLIKELY(m_header == NULL))
1042 {
1043 BOOST_CONSTEXPR_OR_CONST std::size_t initial_capacity = (16u / sizeof(void*)) < 2u ? 2u : (16u / sizeof(void*));
1044 m_header = allocate_buffer(initial_capacity);
1045 if (BOOST_UNLIKELY(m_header == NULL))
1046 return NULL;
1047 }
1048 else
1049 {
1050 wait_state* ws = this->find(addr);
1051 if (BOOST_LIKELY(ws != NULL))
1052 return ws;
1053
1054 if (BOOST_UNLIKELY(m_header->size == m_header->capacity))
1055 {
1056 header* new_header = allocate_buffer(m_header->capacity * 2u, m_header);
1057 if (BOOST_UNLIKELY(new_header == NULL))
1058 return NULL;
1059 std::free(static_cast< void* >(m_header));
1060 m_header = new_header;
1061 }
1062 }
1063
1064 const std::size_t index = m_header->size;
1065 BOOST_ASSERT(index < m_header->capacity);
1066
1067 wait_state** pw = get_wait_states() + index;
1068 wait_state* w = *pw;
1069 if (BOOST_UNLIKELY(w == NULL))
1070 {
1071 w = new (std::nothrow) wait_state(index);
1072 if (BOOST_UNLIKELY(w == NULL))
1073 return NULL;
1074 *pw = w;
1075 }
1076
1077 get_atomic_pointers()[index] = addr;
1078
1079 ++m_header->size;
1080
1081 return w;
1082 }
1083
1084 //! Releases the previously created wait state
erase(wait_state * w)1085 inline void wait_state_list::erase(wait_state* w) BOOST_NOEXCEPT
1086 {
1087 BOOST_ASSERT(m_header != NULL);
1088
1089 const volatile void** pa = get_atomic_pointers();
1090 wait_state** pw = get_wait_states();
1091
1092 std::size_t index = w->m_index;
1093
1094 BOOST_ASSERT(index < m_header->size);
1095 BOOST_ASSERT(pw[index] == w);
1096
1097 std::size_t last_index = m_header->size - 1u;
1098
1099 if (index != last_index)
1100 {
1101 pa[index] = pa[last_index];
1102 pa[last_index] = NULL;
1103
1104 wait_state* last_w = pw[last_index];
1105 pw[index] = last_w;
1106 pw[last_index] = w;
1107
1108 last_w->m_index = index;
1109 w->m_index = last_index;
1110 }
1111 else
1112 {
1113 pa[index] = NULL;
1114 }
1115
1116 --m_header->size;
1117
1118 if (BOOST_UNLIKELY(m_free_memory))
1119 free_spare();
1120 }
1121
1122 //! Allocates new buffer for the list entries
allocate_buffer(std::size_t new_capacity,header * old_header)1123 wait_state_list::header* wait_state_list::allocate_buffer(std::size_t new_capacity, header* old_header) BOOST_NOEXCEPT
1124 {
1125 if (BOOST_UNLIKELY(once_flag_operations::load(g_pool_cleanup_registered.m_flag, boost::memory_order_relaxed) == 0u))
1126 {
1127 if (once_flag_operations::exchange(g_pool_cleanup_registered.m_flag, 1u, boost::memory_order_relaxed) == 0u)
1128 std::atexit(&cleanup_lock_pool);
1129 }
1130
1131 const std::size_t new_buffer_size = entries_offset + new_capacity * sizeof(void*) * 2u;
1132
1133 void* p = std::malloc(new_buffer_size);
1134 if (BOOST_UNLIKELY(p == NULL))
1135 return NULL;
1136
1137 header* h = new (p) header;
1138 const volatile void** a = new (get_atomic_pointers(h)) const volatile void*[new_capacity];
1139 wait_state** w = new (get_wait_states(a, new_capacity)) wait_state*[new_capacity];
1140
1141 if (BOOST_LIKELY(old_header != NULL))
1142 {
1143 BOOST_ASSERT(new_capacity >= old_header->capacity);
1144
1145 h->size = old_header->size;
1146
1147 const volatile void** old_a = get_atomic_pointers(old_header);
1148 std::memcpy(a, old_a, old_header->size * sizeof(const volatile void*));
1149 std::memset(a + old_header->size * sizeof(const volatile void*), 0, (new_capacity - old_header->size) * sizeof(const volatile void*));
1150
1151 wait_state** old_w = get_wait_states(old_a, old_header->capacity);
1152 std::memcpy(w, old_w, old_header->capacity * sizeof(wait_state*)); // copy spare wait state pointers
1153 std::memset(w + old_header->capacity * sizeof(wait_state*), 0, (new_capacity - old_header->capacity) * sizeof(wait_state*));
1154 }
1155 else
1156 {
1157 std::memset(p, 0, new_buffer_size);
1158 }
1159
1160 h->capacity = new_capacity;
1161
1162 return h;
1163 }
1164
1165 //! Deallocates spare entries and the list buffer if no allocated entries are left
free_spare()1166 void wait_state_list::free_spare() BOOST_NOEXCEPT
1167 {
1168 if (BOOST_LIKELY(m_header != NULL))
1169 {
1170 wait_state** ws = get_wait_states();
1171 for (std::size_t i = m_header->size, n = m_header->capacity; i < n; ++i)
1172 {
1173 wait_state* w = ws[i];
1174 if (!w)
1175 break;
1176
1177 delete w;
1178 ws[i] = NULL;
1179 }
1180
1181 if (m_header->size == 0u)
1182 {
1183 std::free(static_cast< void* >(m_header));
1184 m_header = NULL;
1185 }
1186 }
1187 }
1188
1189 } // namespace
1190
1191
short_lock(atomics::detail::uintptr_t h)1192 BOOST_ATOMIC_DECL void* short_lock(atomics::detail::uintptr_t h) BOOST_NOEXCEPT
1193 {
1194 lock_state& ls = g_lock_pool[get_lock_index(h)].state;
1195 ls.short_lock();
1196 return &ls;
1197 }
1198
long_lock(atomics::detail::uintptr_t h)1199 BOOST_ATOMIC_DECL void* long_lock(atomics::detail::uintptr_t h) BOOST_NOEXCEPT
1200 {
1201 lock_state& ls = g_lock_pool[get_lock_index(h)].state;
1202 ls.long_lock();
1203 return &ls;
1204 }
1205
unlock(void * vls)1206 BOOST_ATOMIC_DECL void unlock(void* vls) BOOST_NOEXCEPT
1207 {
1208 static_cast< lock_state* >(vls)->unlock();
1209 }
1210
1211
allocate_wait_state(void * vls,const volatile void * addr)1212 BOOST_ATOMIC_DECL void* allocate_wait_state(void* vls, const volatile void* addr) BOOST_NOEXCEPT
1213 {
1214 BOOST_ASSERT(vls != NULL);
1215
1216 lock_state* ls = static_cast< lock_state* >(vls);
1217
1218 // Note: find_or_create may fail to allocate memory. However, C++20 specifies that wait/notify operations
1219 // are noexcept, so allocate_wait_state must succeed. To implement this we return NULL in case of failure and test for NULL
1220 // in other wait/notify functions so that all of them become nop (which is a conforming, though inefficient behavior).
1221 wait_state* ws = ls->m_wait_states.find_or_create(addr);
1222
1223 if (BOOST_LIKELY(ws != NULL))
1224 ++ws->m_ref_count;
1225
1226 return ws;
1227 }
1228
free_wait_state(void * vls,void * vws)1229 BOOST_ATOMIC_DECL void free_wait_state(void* vls, void* vws) BOOST_NOEXCEPT
1230 {
1231 BOOST_ASSERT(vls != NULL);
1232
1233 wait_state* ws = static_cast< wait_state* >(vws);
1234 if (BOOST_LIKELY(ws != NULL))
1235 {
1236 if (--ws->m_ref_count == 0u)
1237 {
1238 lock_state* ls = static_cast< lock_state* >(vls);
1239 ls->m_wait_states.erase(ws);
1240 }
1241 }
1242 }
1243
wait(void * vls,void * vws)1244 BOOST_ATOMIC_DECL void wait(void* vls, void* vws) BOOST_NOEXCEPT
1245 {
1246 BOOST_ASSERT(vls != NULL);
1247
1248 lock_state* ls = static_cast< lock_state* >(vls);
1249 wait_state* ws = static_cast< wait_state* >(vws);
1250 if (BOOST_LIKELY(ws != NULL))
1251 {
1252 ws->wait(*ls);
1253 }
1254 else
1255 {
1256 // A conforming wait operation must unlock and lock the mutex to allow a notify to complete
1257 ls->unlock();
1258 atomics::detail::wait_some();
1259 ls->long_lock();
1260 }
1261 }
1262
notify_one(void * vls,const volatile void * addr)1263 BOOST_ATOMIC_DECL void notify_one(void* vls, const volatile void* addr) BOOST_NOEXCEPT
1264 {
1265 BOOST_ASSERT(vls != NULL);
1266
1267 lock_state* ls = static_cast< lock_state* >(vls);
1268 wait_state* ws = ls->m_wait_states.find(addr);
1269 if (BOOST_LIKELY(ws != NULL))
1270 ws->notify_one(*ls);
1271 }
1272
notify_all(void * vls,const volatile void * addr)1273 BOOST_ATOMIC_DECL void notify_all(void* vls, const volatile void* addr) BOOST_NOEXCEPT
1274 {
1275 BOOST_ASSERT(vls != NULL);
1276
1277 lock_state* ls = static_cast< lock_state* >(vls);
1278 wait_state* ws = ls->m_wait_states.find(addr);
1279 if (BOOST_LIKELY(ws != NULL))
1280 ws->notify_all(*ls);
1281 }
1282
1283
thread_fence()1284 BOOST_ATOMIC_DECL void thread_fence() BOOST_NOEXCEPT
1285 {
1286 #if BOOST_ATOMIC_THREAD_FENCE == 2
1287 atomics::detail::fence_operations::thread_fence(memory_order_seq_cst);
1288 #else
1289 // Emulate full fence by locking/unlocking a mutex
1290 lock_pool::unlock(lock_pool::short_lock(0u));
1291 #endif
1292 }
1293
signal_fence()1294 BOOST_ATOMIC_DECL void signal_fence() BOOST_NOEXCEPT
1295 {
1296 // This function is intentionally non-inline, even if empty. This forces the compiler to treat its call as a compiler barrier.
1297 #if BOOST_ATOMIC_SIGNAL_FENCE == 2
1298 atomics::detail::fence_operations::signal_fence(memory_order_seq_cst);
1299 #endif
1300 }
1301
1302 } // namespace lock_pool
1303 } // namespace detail
1304 } // namespace atomics
1305 } // namespace boost
1306
1307 #include <boost/atomic/detail/footer.hpp>
1308