• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 // UNSUPPORTED: c++03
10 // UNSUPPORTED: libcxxabi-no-threads
11 // UNSUPPORTED: no-exceptions
12 
13 #define TESTING_CXA_GUARD
14 #include "../src/cxa_guard_impl.h"
15 #include <unordered_map>
16 #include <thread>
17 #include <atomic>
18 #include <array>
19 #include <cassert>
20 #include <memory>
21 #include <vector>
22 
23 #include "make_test_thread.h"
24 #include "test_macros.h"
25 
26 
27 using namespace __cxxabiv1;
28 
29 // Misc test configuration. It's used to tune the flakyness of the test.
30 // ThreadsPerTest - The number of threads used
31 constexpr int ThreadsPerTest = 10;
32 // The number of instances of a test to run concurrently.
33 constexpr int ConcurrentRunsPerTest = 10;
34 // The number of times to rerun each test.
35 constexpr int TestSamples = 50;
36 
37 
38 
BusyWait()39 void BusyWait() {
40   std::this_thread::yield();
41 }
42 
YieldAfterBarrier()43 void YieldAfterBarrier() {
44   std::this_thread::sleep_for(std::chrono::nanoseconds(10));
45   std::this_thread::yield();
46 }
47 
48 struct Barrier {
BarrierBarrier49   explicit Barrier(int n) : m_threads(n), m_remaining(n) { }
50   Barrier(Barrier const&) = delete;
51   Barrier& operator=(Barrier const&) = delete;
52 
arrive_and_waitBarrier53   void arrive_and_wait() const {
54     --m_remaining;
55     while (m_remaining.load()) {
56       BusyWait();
57     }
58   }
59 
arrive_and_dropBarrier60   void arrive_and_drop()  const {
61     --m_remaining;
62   }
63 
wait_for_threadsBarrier64   void wait_for_threads(int n) const {
65     while ((m_threads - m_remaining.load()) < n) {
66       std::this_thread::yield();
67     }
68   }
69 
70 private:
71   const int m_threads;
72   mutable std::atomic<int> m_remaining;
73 };
74 
75 
76 enum class InitResult {
77   COMPLETE,
78   PERFORMED,
79   WAITED,
80   ABORTED
81 };
82 constexpr InitResult COMPLETE = InitResult::COMPLETE;
83 constexpr InitResult PERFORMED = InitResult::PERFORMED;
84 constexpr InitResult WAITED = InitResult::WAITED;
85 constexpr InitResult ABORTED = InitResult::ABORTED;
86 
87 
88 template <class Impl, class GuardType, class Init>
check_guard(GuardType * g,Init init)89 InitResult check_guard(GuardType *g, Init init) {
90   uint8_t *first_byte = reinterpret_cast<uint8_t*>(g);
91   if (std::__libcpp_atomic_load(first_byte, std::_AO_Acquire) == 0) {
92     Impl impl(g);
93     if (impl.cxa_guard_acquire() == INIT_IS_PENDING) {
94 #ifndef TEST_HAS_NO_EXCEPTIONS
95       try {
96 #endif
97         init();
98         impl.cxa_guard_release();
99         return PERFORMED;
100 #ifndef TEST_HAS_NO_EXCEPTIONS
101       } catch (...) {
102         impl.cxa_guard_abort();
103         return ABORTED;
104       }
105 #endif
106     }
107     return WAITED;
108   }
109   return COMPLETE;
110 }
111 
112 
113 template <class GuardType, class Impl>
114 struct FunctionLocalStatic {
FunctionLocalStaticFunctionLocalStatic115   FunctionLocalStatic() {}
116   FunctionLocalStatic(FunctionLocalStatic const&) = delete;
117 
118   template <class InitFunc>
accessFunctionLocalStatic119   InitResult access(InitFunc&& init) {
120     auto res = check_guard<Impl>(&guard_object, init);
121     ++result_counts[static_cast<int>(res)];
122     return res;
123   }
124 
125   template <class InitFn>
126   struct AccessCallback {
operator ()FunctionLocalStatic::AccessCallback127     void operator()() const { this_obj->access(init); }
128 
129     FunctionLocalStatic *this_obj;
130     InitFn init;
131   };
132 
133   template <class InitFn, class Callback = AccessCallback< InitFn >  >
access_callbackFunctionLocalStatic134   Callback access_callback(InitFn init) {
135     return Callback{this, init};
136   }
137 
get_countFunctionLocalStatic138   int get_count(InitResult I) const {
139     return result_counts[static_cast<int>(I)].load();
140   }
141 
num_completedFunctionLocalStatic142   int num_completed() const {
143     return get_count(COMPLETE) + get_count(PERFORMED) + get_count(WAITED);
144   }
145 
num_waitingFunctionLocalStatic146   int num_waiting() const {
147     return waiting_threads.load();
148   }
149 
150 private:
151   GuardType guard_object = {};
152   std::atomic<int> waiting_threads{0};
153   std::array<std::atomic<int>, 4> result_counts{};
154   static_assert(static_cast<int>(ABORTED) == 3, "only 4 result kinds expected");
155 };
156 
157 struct ThreadGroup {
158   ThreadGroup() = default;
159   ThreadGroup(ThreadGroup const&) = delete;
160 
161   template <class ...Args>
CreateThreadGroup162   void Create(Args&& ...args) {
163     threads.emplace_back(std::forward<Args>(args)...);
164   }
165 
166   template <class Callback>
CreateThreadsWithBarrierThreadGroup167   void CreateThreadsWithBarrier(int N, Callback cb) {
168     auto start = std::make_shared<Barrier>(N + 1);
169     for (int I=0; I < N; ++I) {
170       Create([start, cb]() {
171         start->arrive_and_wait();
172         cb();
173       });
174     }
175     start->arrive_and_wait();
176   }
177 
JoinAllThreadGroup178   void JoinAll() {
179     for (auto& t : threads) {
180       t.join();
181     }
182   }
183 
184 private:
185   std::vector<std::thread> threads;
186 };
187 
188 
189 template <class GuardType, class Impl>
test_free_for_all(int num_waiters)190 void test_free_for_all(int num_waiters) {
191   FunctionLocalStatic<GuardType, Impl> test_obj;
192 
193   ThreadGroup threads;
194 
195   bool already_init = false;
196   threads.CreateThreadsWithBarrier(num_waiters,
197     test_obj.access_callback([&]() {
198       assert(!already_init);
199       already_init = true;
200     })
201   );
202 
203   // wait for the other threads to finish initialization.
204   threads.JoinAll();
205 
206   assert(test_obj.get_count(PERFORMED) == 1);
207   assert(test_obj.get_count(COMPLETE) + test_obj.get_count(WAITED) == num_waiters - 1);
208 }
209 
210 template <class GuardType, class Impl>
test_waiting_for_init(int num_waiters)211 void test_waiting_for_init(int num_waiters) {
212     FunctionLocalStatic<GuardType, Impl> test_obj;
213 
214     ThreadGroup threads;
215 
216     Barrier start_init(2);
217     threads.Create(test_obj.access_callback(
218       [&]() {
219         start_init.arrive_and_wait();
220         // Take our sweet time completing the initialization...
221         //
222         // There's a race condition between the other threads reaching the
223         // start_init barrier, and them actually hitting the cxa guard.
224         // But we're trying to test the waiting logic, we want as many
225         // threads to enter the waiting loop as possible.
226         YieldAfterBarrier();
227       }
228     ));
229     start_init.wait_for_threads(1);
230 
231     threads.CreateThreadsWithBarrier(num_waiters,
232         test_obj.access_callback([]() { assert(false); })
233     );
234     // unblock the initializing thread
235     start_init.arrive_and_drop();
236 
237     // wait for the other threads to finish initialization.
238     threads.JoinAll();
239 
240     assert(test_obj.get_count(PERFORMED) == 1);
241     assert(test_obj.get_count(ABORTED) == 0);
242     assert(test_obj.get_count(COMPLETE) + test_obj.get_count(WAITED) == num_waiters);
243 }
244 
245 
246 template <class GuardType, class Impl>
test_aborted_init(int num_waiters)247 void test_aborted_init(int num_waiters) {
248   FunctionLocalStatic<GuardType, Impl> test_obj;
249 
250   Barrier start_init(2);
251   ThreadGroup threads;
252   threads.Create(test_obj.access_callback(
253     [&]() {
254       start_init.arrive_and_wait();
255       YieldAfterBarrier();
256       throw 42;
257     })
258   );
259   start_init.wait_for_threads(1);
260 
261   bool already_init = false;
262   threads.CreateThreadsWithBarrier(num_waiters,
263       test_obj.access_callback([&]() {
264         assert(!already_init);
265         already_init = true;
266       })
267     );
268   // unblock the initializing thread
269   start_init.arrive_and_drop();
270 
271   // wait for the other threads to finish initialization.
272   threads.JoinAll();
273 
274   assert(test_obj.get_count(ABORTED) == 1);
275   assert(test_obj.get_count(PERFORMED) == 1);
276   assert(test_obj.get_count(WAITED) + test_obj.get_count(COMPLETE) == num_waiters - 1);
277 }
278 
279 
280 template <class GuardType, class Impl>
test_completed_init(int num_waiters)281 void test_completed_init(int num_waiters) {
282 
283   FunctionLocalStatic<GuardType, Impl> test_obj;
284 
285   test_obj.access([]() {}); // initialize the object
286   assert(test_obj.num_waiting() == 0);
287   assert(test_obj.num_completed() == 1);
288   assert(test_obj.get_count(PERFORMED) == 1);
289 
290   ThreadGroup threads;
291   threads.CreateThreadsWithBarrier(num_waiters,
292       test_obj.access_callback([]() { assert(false); })
293   );
294   // wait for the other threads to finish initialization.
295   threads.JoinAll();
296 
297   assert(test_obj.get_count(ABORTED) == 0);
298   assert(test_obj.get_count(PERFORMED) == 1);
299   assert(test_obj.get_count(WAITED) == 0);
300   assert(test_obj.get_count(COMPLETE) == num_waiters);
301 }
302 
303 template <class Impl>
test_impl()304 void test_impl() {
305   using TestFn = void(*)(int);
306   TestFn TestList[] = {
307     test_free_for_all<uint32_t, Impl>,
308     test_free_for_all<uint32_t, Impl>,
309     test_waiting_for_init<uint32_t, Impl>,
310     test_waiting_for_init<uint64_t, Impl>,
311     test_aborted_init<uint32_t, Impl>,
312     test_aborted_init<uint64_t, Impl>,
313     test_completed_init<uint32_t, Impl>,
314     test_completed_init<uint64_t, Impl>
315   };
316 
317   for (auto test_func : TestList) {
318       ThreadGroup test_threads;
319       test_threads.CreateThreadsWithBarrier(ConcurrentRunsPerTest, [=]() {
320         for (int I = 0; I < TestSamples; ++I) {
321           test_func(ThreadsPerTest);
322         }
323       });
324       test_threads.JoinAll();
325     }
326   }
327 
test_all_impls()328 void test_all_impls() {
329   using MutexImpl = SelectImplementation<Implementation::GlobalLock>::type;
330 
331   // Attempt to test the Futex based implementation if it's supported on the
332   // target platform.
333   using RealFutexImpl = SelectImplementation<Implementation::Futex>::type;
334   using FutexImpl = typename std::conditional<
335       PlatformSupportsFutex(),
336       RealFutexImpl,
337       MutexImpl
338   >::type;
339 
340   test_impl<MutexImpl>();
341   if (PlatformSupportsFutex())
342     test_impl<FutexImpl>();
343 }
344 
345 // A dummy
346 template <bool Dummy = true>
test_futex_syscall()347 void test_futex_syscall() {
348   if (!PlatformSupportsFutex())
349     return;
350   int lock1 = 0;
351   int lock2 = 0;
352   int lock3 = 0;
353   std::thread waiter1 = support::make_test_thread([&]() {
354     int expect = 0;
355     PlatformFutexWait(&lock1, expect);
356     assert(lock1 == 1);
357   });
358   std::thread waiter2 = support::make_test_thread([&]() {
359     int expect = 0;
360     PlatformFutexWait(&lock2, expect);
361     assert(lock2 == 2);
362   });
363   std::thread waiter3 = support::make_test_thread([&]() {
364     int expect = 42; // not the value
365     PlatformFutexWait(&lock3, expect); // doesn't block
366   });
367   std::thread waker = support::make_test_thread([&]() {
368     lock1 = 1;
369     PlatformFutexWake(&lock1);
370     lock2 = 2;
371     PlatformFutexWake(&lock2);
372   });
373   waiter1.join();
374   waiter2.join();
375   waiter3.join();
376   waker.join();
377 }
378 
main(int,char **)379 int main(int, char**) {
380   // Test each multi-threaded implementation with real threads.
381   test_all_impls();
382   // Test the basic sanity of the futex syscall wrappers.
383   test_futex_syscall();
384 
385   return 0;
386 }
387