1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <gtest/gtest.h>
30
31 #if __has_include(<threads.h>)
32
33 #define HAVE_THREADS_H
34 #include <threads.h>
35
36 static int g_call_once_call_count;
37
increment_call_count()38 static void increment_call_count() {
39 ++g_call_once_call_count;
40 }
41
42 static int g_dtor_call_count;
43
tss_dtor(void * ptr)44 static void tss_dtor(void* ptr) {
45 ++g_dtor_call_count;
46 free(ptr);
47 }
48
return_arg(void * arg)49 static int return_arg(void* arg) {
50 return static_cast<int>(reinterpret_cast<uintptr_t>(arg));
51 }
52
exit_arg(void * arg)53 static int exit_arg(void* arg) {
54 thrd_exit(static_cast<int>(reinterpret_cast<uintptr_t>(arg)));
55 }
56
57 #endif
58
59 #include <time.h>
60
61 #include <thread>
62
63 #include "BionicDeathTest.h"
64 #include "SignalUtils.h"
65
TEST(threads,call_once)66 TEST(threads, call_once) {
67 #if !defined(HAVE_THREADS_H)
68 GTEST_SKIP() << "<threads.h> unavailable";
69 #else
70 once_flag flag = ONCE_FLAG_INIT;
71 call_once(&flag, increment_call_count);
72 call_once(&flag, increment_call_count);
73 std::thread([&flag] {
74 call_once(&flag, increment_call_count);
75 }).join();
76 ASSERT_EQ(1, g_call_once_call_count);
77 #endif
78 }
79
TEST(threads,cnd_broadcast__cnd_wait)80 TEST(threads, cnd_broadcast__cnd_wait) {
81 #if !defined(HAVE_THREADS_H)
82 GTEST_SKIP() << "<threads.h> unavailable";
83 #else
84 mtx_t m;
85 ASSERT_EQ(thrd_success, mtx_init(&m, mtx_plain));
86
87 cnd_t c;
88 ASSERT_EQ(thrd_success, cnd_init(&c));
89
90 std::atomic_int i = 0;
91
92 auto waiter = [&c, &i, &m] {
93 ASSERT_EQ(thrd_success, mtx_lock(&m));
94 while (i != 1) ASSERT_EQ(thrd_success, cnd_wait(&c, &m));
95 ASSERT_EQ(thrd_success, mtx_unlock(&m));
96 };
97 std::thread t1(waiter);
98 std::thread t2(waiter);
99 std::thread t3(waiter);
100
101 ASSERT_EQ(thrd_success, mtx_lock(&m));
102 i = 1;
103 ASSERT_EQ(thrd_success, mtx_unlock(&m));
104
105 ASSERT_EQ(thrd_success, cnd_broadcast(&c));
106
107 t1.join();
108 t2.join();
109 t3.join();
110
111 mtx_destroy(&m);
112 cnd_destroy(&c);
113 #endif
114 }
115
TEST(threads,cnd_init__cnd_destroy)116 TEST(threads, cnd_init__cnd_destroy) {
117 #if !defined(HAVE_THREADS_H)
118 GTEST_SKIP() << "<threads.h> unavailable";
119 #else
120 cnd_t c;
121 ASSERT_EQ(thrd_success, cnd_init(&c));
122 cnd_destroy(&c);
123 #endif
124 }
125
TEST(threads,cnd_signal__cnd_wait)126 TEST(threads, cnd_signal__cnd_wait) {
127 #if !defined(HAVE_THREADS_H)
128 GTEST_SKIP() << "<threads.h> unavailable";
129 #else
130 mtx_t m;
131 ASSERT_EQ(thrd_success, mtx_init(&m, mtx_plain));
132 cnd_t c;
133 ASSERT_EQ(thrd_success, cnd_init(&c));
134
135 std::atomic_int count = 0;
136 auto waiter = [&c, &m, &count] {
137 ASSERT_EQ(thrd_success, mtx_lock(&m));
138 ASSERT_EQ(thrd_success, cnd_wait(&c, &m));
139 ASSERT_EQ(thrd_success, mtx_unlock(&m));
140 ++count;
141 };
142 std::thread t1(waiter);
143 std::thread t2(waiter);
144 std::thread t3(waiter);
145
146 // This is inherently racy, but attempts to distinguish between cnd_signal and
147 // cnd_broadcast.
148 usleep(100000);
149 ASSERT_EQ(thrd_success, cnd_signal(&c));
150 while (count == 0) {
151 }
152 usleep(100000);
153 ASSERT_EQ(1, count);
154
155 ASSERT_EQ(thrd_success, cnd_signal(&c));
156 while (count == 1) {
157 }
158 usleep(100000);
159 ASSERT_EQ(2, count);
160
161 ASSERT_EQ(thrd_success, cnd_signal(&c));
162 while (count == 2) {
163 }
164 usleep(100000);
165 ASSERT_EQ(3, count);
166
167 t1.join();
168 t2.join();
169 t3.join();
170
171 mtx_destroy(&m);
172 cnd_destroy(&c);
173 #endif
174 }
175
TEST(threads,cnd_timedwait_timedout)176 TEST(threads, cnd_timedwait_timedout) {
177 #if !defined(HAVE_THREADS_H)
178 GTEST_SKIP() << "<threads.h> unavailable";
179 #else
180 mtx_t m;
181 ASSERT_EQ(thrd_success, mtx_init(&m, mtx_timed));
182 ASSERT_EQ(thrd_success, mtx_lock(&m));
183
184 cnd_t c;
185 ASSERT_EQ(thrd_success, cnd_init(&c));
186
187 timespec ts = {};
188 ASSERT_EQ(thrd_timedout, cnd_timedwait(&c, &m, &ts));
189 #endif
190 }
191
TEST(threads,cnd_timedwait)192 TEST(threads, cnd_timedwait) {
193 #if !defined(HAVE_THREADS_H)
194 GTEST_SKIP() << "<threads.h> unavailable";
195 #else
196 mtx_t m;
197 ASSERT_EQ(thrd_success, mtx_init(&m, mtx_timed));
198
199 cnd_t c;
200 ASSERT_EQ(thrd_success, cnd_init(&c));
201
202 std::atomic_bool done = false;
203 std::thread t([&c, &m, &done] {
204 ASSERT_EQ(thrd_success, mtx_lock(&m));
205
206 // cnd_timewait's time is *absolute*.
207 timespec ts;
208 ASSERT_EQ(TIME_UTC, timespec_get(&ts, TIME_UTC));
209 ts.tv_sec += 666;
210
211 ASSERT_EQ(thrd_success, cnd_timedwait(&c, &m, &ts));
212 done = true;
213 ASSERT_EQ(thrd_success, mtx_unlock(&m));
214 });
215
216 while (!done) ASSERT_EQ(thrd_success, cnd_signal(&c));
217
218 t.join();
219 #endif
220 }
221
TEST(threads,mtx_init)222 TEST(threads, mtx_init) {
223 #if !defined(HAVE_THREADS_H)
224 GTEST_SKIP() << "<threads.h> unavailable";
225 #else
226 mtx_t m;
227 ASSERT_EQ(thrd_success, mtx_init(&m, mtx_plain));
228 ASSERT_EQ(thrd_success, mtx_init(&m, mtx_timed));
229 ASSERT_EQ(thrd_success, mtx_init(&m, mtx_plain | mtx_recursive));
230 ASSERT_EQ(thrd_success, mtx_init(&m, mtx_timed | mtx_recursive));
231 ASSERT_EQ(thrd_error, mtx_init(&m, 123));
232 ASSERT_EQ(thrd_error, mtx_init(&m, mtx_recursive));
233 #endif
234 }
235
TEST(threads,mtx_destroy)236 TEST(threads, mtx_destroy) {
237 #if !defined(HAVE_THREADS_H)
238 GTEST_SKIP() << "<threads.h> unavailable";
239 #else
240 mtx_t m;
241 ASSERT_EQ(thrd_success, mtx_init(&m, mtx_plain));
242 mtx_destroy(&m);
243 #endif
244 }
245
TEST(threads,mtx_lock_plain)246 TEST(threads, mtx_lock_plain) {
247 #if !defined(HAVE_THREADS_H)
248 GTEST_SKIP() << "<threads.h> unavailable";
249 #else
250 mtx_t m;
251 ASSERT_EQ(thrd_success, mtx_init(&m, mtx_plain));
252
253 ASSERT_EQ(thrd_success, mtx_lock(&m));
254 ASSERT_EQ(thrd_busy, mtx_trylock(&m));
255 ASSERT_EQ(thrd_success, mtx_unlock(&m));
256
257 mtx_destroy(&m);
258 #endif
259 }
260
TEST(threads,mtx_lock_recursive)261 TEST(threads, mtx_lock_recursive) {
262 #if !defined(HAVE_THREADS_H)
263 GTEST_SKIP() << "<threads.h> unavailable";
264 #else
265 mtx_t m;
266 ASSERT_EQ(thrd_success, mtx_init(&m, mtx_plain | mtx_recursive));
267
268 ASSERT_EQ(thrd_success, mtx_lock(&m));
269 ASSERT_EQ(thrd_success, mtx_trylock(&m));
270 ASSERT_EQ(thrd_success, mtx_unlock(&m));
271 ASSERT_EQ(thrd_success, mtx_unlock(&m));
272
273 mtx_destroy(&m);
274 #endif
275 }
276
TEST(threads,mtx_timedlock)277 TEST(threads, mtx_timedlock) {
278 #if !defined(HAVE_THREADS_H)
279 GTEST_SKIP() << "<threads.h> unavailable";
280 #else
281 mtx_t m;
282 ASSERT_EQ(thrd_success, mtx_init(&m, mtx_timed));
283
284 timespec ts = {};
285 ASSERT_EQ(thrd_success, mtx_timedlock(&m, &ts));
286
287 std::thread([&m] {
288 timespec ts = { .tv_nsec = 500000 };
289 ASSERT_EQ(thrd_timedout, mtx_timedlock(&m, &ts));
290 }).join();
291
292 ASSERT_EQ(thrd_success, mtx_unlock(&m));
293 mtx_destroy(&m);
294 #endif
295 }
296
297
TEST(threads,mtx_unlock)298 TEST(threads, mtx_unlock) {
299 #if !defined(HAVE_THREADS_H)
300 GTEST_SKIP() << "<threads.h> unavailable";
301 #else
302 mtx_t m;
303 ASSERT_EQ(thrd_success, mtx_init(&m, mtx_plain));
304 ASSERT_EQ(thrd_success, mtx_lock(&m));
305 std::thread([&m] {
306 ASSERT_EQ(thrd_busy, mtx_trylock(&m));
307 }).join();
308 ASSERT_EQ(thrd_success, mtx_unlock(&m));
309 std::thread([&m] {
310 ASSERT_EQ(thrd_success, mtx_trylock(&m));
311 }).join();
312 #endif
313 }
314
TEST(threads,thrd_current__thrd_equal)315 TEST(threads, thrd_current__thrd_equal) {
316 #if !defined(HAVE_THREADS_H)
317 GTEST_SKIP() << "<threads.h> unavailable";
318 #else
319 thrd_t t1 = thrd_current();
320 // (As a side-effect, this demonstrates interoperability with std::thread.)
321 std::thread([&t1] {
322 thrd_t t2 = thrd_current();
323 ASSERT_FALSE(thrd_equal(t1, t2));
324 thrd_t t2_2 = thrd_current();
325 ASSERT_TRUE(thrd_equal(t2, t2_2));
326 }).join();
327 thrd_t t1_2 = thrd_current();
328 ASSERT_TRUE(thrd_equal(t1, t1_2));
329 #endif
330 }
331
TEST(threads,thrd_create__thrd_detach)332 TEST(threads, thrd_create__thrd_detach) {
333 #if !defined(HAVE_THREADS_H)
334 GTEST_SKIP() << "<threads.h> unavailable";
335 #else
336 thrd_t t;
337 ASSERT_EQ(thrd_success, thrd_create(&t, exit_arg, reinterpret_cast<void*>(1)));
338 ASSERT_EQ(thrd_success, thrd_detach(t));
339 #endif
340 }
341
TEST(threads,thrd_create__thrd_exit)342 TEST(threads, thrd_create__thrd_exit) {
343 #if !defined(HAVE_THREADS_H)
344 GTEST_SKIP() << "<threads.h> unavailable";
345 #else
346 // Similar to the thrd_join test, but with a function that calls thrd_exit
347 // instead.
348 thrd_t t;
349 int result;
350 ASSERT_EQ(thrd_success, thrd_create(&t, exit_arg, reinterpret_cast<void*>(1)));
351 ASSERT_EQ(thrd_success, thrd_join(t, &result));
352 ASSERT_EQ(1, result);
353
354 ASSERT_EQ(thrd_success, thrd_create(&t, exit_arg, reinterpret_cast<void*>(2)));
355 ASSERT_EQ(thrd_success, thrd_join(t, &result));
356 ASSERT_EQ(2, result);
357
358 // The `result` argument can be null if you don't care...
359 ASSERT_EQ(thrd_success, thrd_create(&t, exit_arg, reinterpret_cast<void*>(3)));
360 ASSERT_EQ(thrd_success, thrd_join(t, nullptr));
361 #endif
362 }
363
364 class threads_DeathTest : public BionicDeathTest {};
365
TEST(threads_DeathTest,thrd_exit_main_thread)366 TEST(threads_DeathTest, thrd_exit_main_thread) {
367 #if !defined(HAVE_THREADS_H)
368 GTEST_SKIP() << "<threads.h> unavailable";
369 #else
370 // "The program terminates normally after the last thread has been terminated.
371 // The behavior is as if the program called the exit function with the status
372 // EXIT_SUCCESS at thread termination time." (ISO/IEC 9899:2018)
373 ASSERT_EXIT(thrd_exit(12), ::testing::ExitedWithCode(EXIT_SUCCESS), "");
374 #endif
375 }
376
TEST(threads,thrd_create__thrd_join)377 TEST(threads, thrd_create__thrd_join) {
378 #if !defined(HAVE_THREADS_H)
379 GTEST_SKIP() << "<threads.h> unavailable";
380 #else
381 // Similar to the thrd_exit test, but with a function that calls return
382 // instead.
383 thrd_t t;
384 int result;
385 ASSERT_EQ(thrd_success, thrd_create(&t, return_arg, reinterpret_cast<void*>(1)));
386 ASSERT_EQ(thrd_success, thrd_join(t, &result));
387 ASSERT_EQ(1, result);
388
389 ASSERT_EQ(thrd_success, thrd_create(&t, return_arg, reinterpret_cast<void*>(2)));
390 ASSERT_EQ(thrd_success, thrd_join(t, &result));
391 ASSERT_EQ(2, result);
392
393 // The `result` argument can be null if you don't care...
394 ASSERT_EQ(thrd_success, thrd_create(&t, return_arg, reinterpret_cast<void*>(3)));
395 ASSERT_EQ(thrd_success, thrd_join(t, nullptr));
396 #endif
397 }
398
TEST(threads,thrd_sleep_signal)399 TEST(threads, thrd_sleep_signal) {
400 #if !defined(HAVE_THREADS_H)
401 GTEST_SKIP() << "<threads.h> unavailable";
402 #else
403 ScopedSignalHandler ssh{SIGALRM, [](int) {}};
404 std::thread t([] {
405 timespec long_time = { .tv_sec = 666 };
406 timespec remaining = {};
407 ASSERT_EQ(-1, thrd_sleep(&long_time, &remaining));
408 uint64_t t = remaining.tv_sec * 1000000000 + remaining.tv_nsec;
409 ASSERT_LE(t, 666ULL * 1000000000);
410 });
411 usleep(100000); // 0.1s
412 pthread_kill(t.native_handle(), SIGALRM);
413 t.join();
414 #endif
415 }
416
TEST(threads,thrd_sleep_signal_nullptr)417 TEST(threads, thrd_sleep_signal_nullptr) {
418 #if !defined(HAVE_THREADS_H)
419 GTEST_SKIP() << "<threads.h> unavailable";
420 #else
421 ScopedSignalHandler ssh{SIGALRM, [](int) {}};
422 std::thread t([] {
423 timespec long_time = { .tv_sec = 666 };
424 ASSERT_EQ(-1, thrd_sleep(&long_time, nullptr));
425 });
426 usleep(100000); // 0.1s
427 pthread_kill(t.native_handle(), SIGALRM);
428 t.join();
429 #endif
430 }
431
TEST(threads,thrd_sleep_error)432 TEST(threads, thrd_sleep_error) {
433 #if !defined(HAVE_THREADS_H)
434 GTEST_SKIP() << "<threads.h> unavailable";
435 #else
436 timespec invalid = { .tv_sec = -1 };
437 ASSERT_EQ(-2, thrd_sleep(&invalid, nullptr));
438 #endif
439 }
440
TEST(threads,thrd_yield)441 TEST(threads, thrd_yield) {
442 #if !defined(HAVE_THREADS_H)
443 GTEST_SKIP() << "<threads.h> unavailable";
444 #else
445 thrd_yield();
446 #endif
447 }
448
TEST(threads,TSS_DTOR_ITERATIONS_macro)449 TEST(threads, TSS_DTOR_ITERATIONS_macro) {
450 #if !defined(HAVE_THREADS_H)
451 GTEST_SKIP() << "<threads.h> unavailable";
452 #else
453 ASSERT_EQ(PTHREAD_DESTRUCTOR_ITERATIONS, TSS_DTOR_ITERATIONS);
454 #endif
455 }
456
TEST(threads,tss_create)457 TEST(threads, tss_create) {
458 #if !defined(HAVE_THREADS_H)
459 GTEST_SKIP() << "<threads.h> unavailable";
460 #else
461 tss_t key;
462 ASSERT_EQ(thrd_success, tss_create(&key, nullptr));
463 tss_delete(key);
464 #endif
465 }
466
TEST(threads,tss_create_dtor)467 TEST(threads, tss_create_dtor) {
468 #if !defined(HAVE_THREADS_H)
469 GTEST_SKIP() << "<threads.h> unavailable";
470 #else
471 tss_dtor_t dtor = tss_dtor;
472 tss_t key;
473 ASSERT_EQ(thrd_success, tss_create(&key, dtor));
474
475 ASSERT_EQ(thrd_success, tss_set(key, strdup("hello")));
476 std::thread([&key] {
477 ASSERT_EQ(thrd_success, tss_set(key, strdup("world")));
478 }).join();
479 // Thread exit calls the destructor...
480 ASSERT_EQ(1, g_dtor_call_count);
481
482 // "[A call to tss_set] will not invoke the destructor associated with the
483 // key on the value being replaced" (ISO/IEC 9899:2018).
484 g_dtor_call_count = 0;
485 ASSERT_EQ(thrd_success, tss_set(key, strdup("hello")));
486 ASSERT_EQ(0, g_dtor_call_count);
487
488 // "Calling tss_delete will not result in the invocation of any
489 // destructors" (ISO/IEC 9899:2018).
490 // The destructor for "hello" won't be called until *this* thread exits.
491 g_dtor_call_count = 0;
492 tss_delete(key);
493 ASSERT_EQ(0, g_dtor_call_count);
494 #endif
495 }
496
TEST(threads,tss_get__tss_set)497 TEST(threads, tss_get__tss_set) {
498 #if !defined(HAVE_THREADS_H)
499 GTEST_SKIP() << "<threads.h> unavailable";
500 #else
501 tss_t key;
502 ASSERT_EQ(thrd_success, tss_create(&key, nullptr));
503
504 ASSERT_EQ(thrd_success, tss_set(key, const_cast<char*>("hello")));
505 ASSERT_STREQ("hello", reinterpret_cast<char*>(tss_get(key)));
506 std::thread([&key] {
507 ASSERT_EQ(nullptr, tss_get(key));
508 ASSERT_EQ(thrd_success, tss_set(key, const_cast<char*>("world")));
509 ASSERT_STREQ("world", reinterpret_cast<char*>(tss_get(key)));
510 }).join();
511 ASSERT_STREQ("hello", reinterpret_cast<char*>(tss_get(key)));
512
513 tss_delete(key);
514 #endif
515 }
516