• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <gtest/gtest.h>
18 
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <limits.h>
22 #include <malloc.h>
23 #include <pthread.h>
24 #include <signal.h>
25 #include <stdio.h>
26 #include <sys/mman.h>
27 #include <sys/prctl.h>
28 #include <sys/resource.h>
29 #include <sys/syscall.h>
30 #include <time.h>
31 #include <unistd.h>
32 #include <unwind.h>
33 
34 #include <atomic>
35 #include <future>
36 #include <vector>
37 
38 #include <android-base/macros.h>
39 #include <android-base/parseint.h>
40 #include <android-base/scopeguard.h>
41 #include <android-base/strings.h>
42 
43 #include "private/bionic_constants.h"
44 #include "BionicDeathTest.h"
45 #include "SignalUtils.h"
46 #include "utils.h"
47 
TEST(pthread,pthread_key_create)48 TEST(pthread, pthread_key_create) {
49   pthread_key_t key;
50   ASSERT_EQ(0, pthread_key_create(&key, nullptr));
51   ASSERT_EQ(0, pthread_key_delete(key));
52   // Can't delete a key that's already been deleted.
53   ASSERT_EQ(EINVAL, pthread_key_delete(key));
54 }
55 
TEST(pthread,pthread_keys_max)56 TEST(pthread, pthread_keys_max) {
57   // POSIX says PTHREAD_KEYS_MAX should be at least _POSIX_THREAD_KEYS_MAX.
58   ASSERT_GE(PTHREAD_KEYS_MAX, _POSIX_THREAD_KEYS_MAX);
59 }
60 
TEST(pthread,sysconf_SC_THREAD_KEYS_MAX_eq_PTHREAD_KEYS_MAX)61 TEST(pthread, sysconf_SC_THREAD_KEYS_MAX_eq_PTHREAD_KEYS_MAX) {
62   int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX);
63   ASSERT_EQ(sysconf_max, PTHREAD_KEYS_MAX);
64 }
65 
TEST(pthread,pthread_key_many_distinct)66 TEST(pthread, pthread_key_many_distinct) {
67   // As gtest uses pthread keys, we can't allocate exactly PTHREAD_KEYS_MAX
68   // pthread keys, but We should be able to allocate at least this many keys.
69   int nkeys = PTHREAD_KEYS_MAX / 2;
70   std::vector<pthread_key_t> keys;
71 
72   auto scope_guard = android::base::make_scope_guard([&keys] {
73     for (const auto& key : keys) {
74       EXPECT_EQ(0, pthread_key_delete(key));
75     }
76   });
77 
78   for (int i = 0; i < nkeys; ++i) {
79     pthread_key_t key;
80     // If this fails, it's likely that LIBC_PTHREAD_KEY_RESERVED_COUNT is wrong.
81     ASSERT_EQ(0, pthread_key_create(&key, nullptr)) << i << " of " << nkeys;
82     keys.push_back(key);
83     ASSERT_EQ(0, pthread_setspecific(key, reinterpret_cast<void*>(i)));
84   }
85 
86   for (int i = keys.size() - 1; i >= 0; --i) {
87     ASSERT_EQ(reinterpret_cast<void*>(i), pthread_getspecific(keys.back()));
88     pthread_key_t key = keys.back();
89     keys.pop_back();
90     ASSERT_EQ(0, pthread_key_delete(key));
91   }
92 }
93 
TEST(pthread,pthread_key_not_exceed_PTHREAD_KEYS_MAX)94 TEST(pthread, pthread_key_not_exceed_PTHREAD_KEYS_MAX) {
95   std::vector<pthread_key_t> keys;
96   int rv = 0;
97 
98   // Pthread keys are used by gtest, so PTHREAD_KEYS_MAX should
99   // be more than we are allowed to allocate now.
100   for (int i = 0; i < PTHREAD_KEYS_MAX; i++) {
101     pthread_key_t key;
102     rv = pthread_key_create(&key, nullptr);
103     if (rv == EAGAIN) {
104       break;
105     }
106     EXPECT_EQ(0, rv);
107     keys.push_back(key);
108   }
109 
110   // Don't leak keys.
111   for (const auto& key : keys) {
112     EXPECT_EQ(0, pthread_key_delete(key));
113   }
114   keys.clear();
115 
116   // We should have eventually reached the maximum number of keys and received
117   // EAGAIN.
118   ASSERT_EQ(EAGAIN, rv);
119 }
120 
TEST(pthread,pthread_key_delete)121 TEST(pthread, pthread_key_delete) {
122   void* expected = reinterpret_cast<void*>(1234);
123   pthread_key_t key;
124   ASSERT_EQ(0, pthread_key_create(&key, nullptr));
125   ASSERT_EQ(0, pthread_setspecific(key, expected));
126   ASSERT_EQ(expected, pthread_getspecific(key));
127   ASSERT_EQ(0, pthread_key_delete(key));
128   // After deletion, pthread_getspecific returns nullptr.
129   ASSERT_EQ(nullptr, pthread_getspecific(key));
130   // And you can't use pthread_setspecific with the deleted key.
131   ASSERT_EQ(EINVAL, pthread_setspecific(key, expected));
132 }
133 
TEST(pthread,pthread_key_fork)134 TEST(pthread, pthread_key_fork) {
135   void* expected = reinterpret_cast<void*>(1234);
136   pthread_key_t key;
137   ASSERT_EQ(0, pthread_key_create(&key, nullptr));
138   ASSERT_EQ(0, pthread_setspecific(key, expected));
139   ASSERT_EQ(expected, pthread_getspecific(key));
140 
141   pid_t pid = fork();
142   ASSERT_NE(-1, pid) << strerror(errno);
143 
144   if (pid == 0) {
145     // The surviving thread inherits all the forking thread's TLS values...
146     ASSERT_EQ(expected, pthread_getspecific(key));
147     _exit(99);
148   }
149 
150   AssertChildExited(pid, 99);
151 
152   ASSERT_EQ(expected, pthread_getspecific(key));
153   ASSERT_EQ(0, pthread_key_delete(key));
154 }
155 
DirtyKeyFn(void * key)156 static void* DirtyKeyFn(void* key) {
157   return pthread_getspecific(*reinterpret_cast<pthread_key_t*>(key));
158 }
159 
TEST(pthread,pthread_key_dirty)160 TEST(pthread, pthread_key_dirty) {
161   pthread_key_t key;
162   ASSERT_EQ(0, pthread_key_create(&key, nullptr));
163 
164   size_t stack_size = 640 * 1024;
165   void* stack = mmap(nullptr, stack_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
166   ASSERT_NE(MAP_FAILED, stack);
167   memset(stack, 0xff, stack_size);
168 
169   pthread_attr_t attr;
170   ASSERT_EQ(0, pthread_attr_init(&attr));
171   ASSERT_EQ(0, pthread_attr_setstack(&attr, stack, stack_size));
172 
173   pthread_t t;
174   ASSERT_EQ(0, pthread_create(&t, &attr, DirtyKeyFn, &key));
175 
176   void* result;
177   ASSERT_EQ(0, pthread_join(t, &result));
178   ASSERT_EQ(nullptr, result); // Not ~0!
179 
180   ASSERT_EQ(0, munmap(stack, stack_size));
181   ASSERT_EQ(0, pthread_key_delete(key));
182 }
183 
TEST(pthread,static_pthread_key_used_before_creation)184 TEST(pthread, static_pthread_key_used_before_creation) {
185 #if defined(__BIONIC__)
186   // See http://b/19625804. The bug is about a static/global pthread key being used before creation.
187   // So here tests if the static/global default value 0 can be detected as invalid key.
188   static pthread_key_t key;
189   ASSERT_EQ(nullptr, pthread_getspecific(key));
190   ASSERT_EQ(EINVAL, pthread_setspecific(key, nullptr));
191   ASSERT_EQ(EINVAL, pthread_key_delete(key));
192 #else
193   GTEST_SKIP() << "bionic-only test";
194 #endif
195 }
196 
IdFn(void * arg)197 static void* IdFn(void* arg) {
198   return arg;
199 }
200 
201 class SpinFunctionHelper {
202  public:
SpinFunctionHelper()203   SpinFunctionHelper() {
204     SpinFunctionHelper::spin_flag_ = true;
205   }
206 
~SpinFunctionHelper()207   ~SpinFunctionHelper() {
208     UnSpin();
209   }
210 
GetFunction()211   auto GetFunction() -> void* (*)(void*) {
212     return SpinFunctionHelper::SpinFn;
213   }
214 
UnSpin()215   void UnSpin() {
216     SpinFunctionHelper::spin_flag_ = false;
217   }
218 
219  private:
SpinFn(void *)220   static void* SpinFn(void*) {
221     while (spin_flag_) {}
222     return nullptr;
223   }
224   static std::atomic<bool> spin_flag_;
225 };
226 
227 // It doesn't matter if spin_flag_ is used in several tests,
228 // because it is always set to false after each test. Each thread
229 // loops on spin_flag_ can find it becomes false at some time.
230 std::atomic<bool> SpinFunctionHelper::spin_flag_;
231 
JoinFn(void * arg)232 static void* JoinFn(void* arg) {
233   return reinterpret_cast<void*>(pthread_join(reinterpret_cast<pthread_t>(arg), nullptr));
234 }
235 
AssertDetached(pthread_t t,bool is_detached)236 static void AssertDetached(pthread_t t, bool is_detached) {
237   pthread_attr_t attr;
238   ASSERT_EQ(0, pthread_getattr_np(t, &attr));
239   int detach_state;
240   ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &detach_state));
241   pthread_attr_destroy(&attr);
242   ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED));
243 }
244 
MakeDeadThread(pthread_t & t)245 static void MakeDeadThread(pthread_t& t) {
246   ASSERT_EQ(0, pthread_create(&t, nullptr, IdFn, nullptr));
247   ASSERT_EQ(0, pthread_join(t, nullptr));
248 }
249 
TEST(pthread,pthread_create)250 TEST(pthread, pthread_create) {
251   void* expected_result = reinterpret_cast<void*>(123);
252   // Can we create a thread?
253   pthread_t t;
254   ASSERT_EQ(0, pthread_create(&t, nullptr, IdFn, expected_result));
255   // If we join, do we get the expected value back?
256   void* result;
257   ASSERT_EQ(0, pthread_join(t, &result));
258   ASSERT_EQ(expected_result, result);
259 }
260 
TEST(pthread,pthread_create_EAGAIN)261 TEST(pthread, pthread_create_EAGAIN) {
262   pthread_attr_t attributes;
263   ASSERT_EQ(0, pthread_attr_init(&attributes));
264   ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, static_cast<size_t>(-1) & ~(getpagesize() - 1)));
265 
266   pthread_t t;
267   ASSERT_EQ(EAGAIN, pthread_create(&t, &attributes, IdFn, nullptr));
268 }
269 
TEST(pthread,pthread_no_join_after_detach)270 TEST(pthread, pthread_no_join_after_detach) {
271   SpinFunctionHelper spin_helper;
272 
273   pthread_t t1;
274   ASSERT_EQ(0, pthread_create(&t1, nullptr, spin_helper.GetFunction(), nullptr));
275 
276   // After a pthread_detach...
277   ASSERT_EQ(0, pthread_detach(t1));
278   AssertDetached(t1, true);
279 
280   // ...pthread_join should fail.
281   ASSERT_EQ(EINVAL, pthread_join(t1, nullptr));
282 }
283 
TEST(pthread,pthread_no_op_detach_after_join)284 TEST(pthread, pthread_no_op_detach_after_join) {
285   SpinFunctionHelper spin_helper;
286 
287   pthread_t t1;
288   ASSERT_EQ(0, pthread_create(&t1, nullptr, spin_helper.GetFunction(), nullptr));
289 
290   // If thread 2 is already waiting to join thread 1...
291   pthread_t t2;
292   ASSERT_EQ(0, pthread_create(&t2, nullptr, JoinFn, reinterpret_cast<void*>(t1)));
293 
294   sleep(1); // (Give t2 a chance to call pthread_join.)
295 
296 #if defined(__BIONIC__)
297   ASSERT_EQ(EINVAL, pthread_detach(t1));
298 #else
299   ASSERT_EQ(0, pthread_detach(t1));
300 #endif
301   AssertDetached(t1, false);
302 
303   spin_helper.UnSpin();
304 
305   // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
306   void* join_result;
307   ASSERT_EQ(0, pthread_join(t2, &join_result));
308   ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
309 }
310 
TEST(pthread,pthread_join_self)311 TEST(pthread, pthread_join_self) {
312   ASSERT_EQ(EDEADLK, pthread_join(pthread_self(), nullptr));
313 }
314 
315 struct TestBug37410 {
316   pthread_t main_thread;
317   pthread_mutex_t mutex;
318 
mainTestBug37410319   static void main() {
320     TestBug37410 data;
321     data.main_thread = pthread_self();
322     ASSERT_EQ(0, pthread_mutex_init(&data.mutex, nullptr));
323     ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
324 
325     pthread_t t;
326     ASSERT_EQ(0, pthread_create(&t, nullptr, TestBug37410::thread_fn, reinterpret_cast<void*>(&data)));
327 
328     // Wait for the thread to be running...
329     ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
330     ASSERT_EQ(0, pthread_mutex_unlock(&data.mutex));
331 
332     // ...and exit.
333     pthread_exit(nullptr);
334   }
335 
336  private:
thread_fnTestBug37410337   static void* thread_fn(void* arg) {
338     TestBug37410* data = reinterpret_cast<TestBug37410*>(arg);
339 
340     // Unlocking data->mutex will cause the main thread to exit, invalidating *data. Save the handle.
341     pthread_t main_thread = data->main_thread;
342 
343     // Let the main thread know we're running.
344     pthread_mutex_unlock(&data->mutex);
345 
346     // And wait for the main thread to exit.
347     pthread_join(main_thread, nullptr);
348 
349     return nullptr;
350   }
351 };
352 
353 // Even though this isn't really a death test, we have to say "DeathTest" here so gtest knows to
354 // run this test (which exits normally) in its own process.
355 
356 class pthread_DeathTest : public BionicDeathTest {};
357 
TEST_F(pthread_DeathTest,pthread_bug_37410)358 TEST_F(pthread_DeathTest, pthread_bug_37410) {
359   // http://code.google.com/p/android/issues/detail?id=37410
360   ASSERT_EXIT(TestBug37410::main(), ::testing::ExitedWithCode(0), "");
361 }
362 
SignalHandlerFn(void * arg)363 static void* SignalHandlerFn(void* arg) {
364   sigset64_t wait_set;
365   sigfillset64(&wait_set);
366   return reinterpret_cast<void*>(sigwait64(&wait_set, reinterpret_cast<int*>(arg)));
367 }
368 
TEST(pthread,pthread_sigmask)369 TEST(pthread, pthread_sigmask) {
370   // Check that SIGUSR1 isn't blocked.
371   sigset_t original_set;
372   sigemptyset(&original_set);
373   ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, nullptr, &original_set));
374   ASSERT_FALSE(sigismember(&original_set, SIGUSR1));
375 
376   // Block SIGUSR1.
377   sigset_t set;
378   sigemptyset(&set);
379   sigaddset(&set, SIGUSR1);
380   ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, &set, nullptr));
381 
382   // Check that SIGUSR1 is blocked.
383   sigset_t final_set;
384   sigemptyset(&final_set);
385   ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, nullptr, &final_set));
386   ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
387   // ...and that sigprocmask agrees with pthread_sigmask.
388   sigemptyset(&final_set);
389   ASSERT_EQ(0, sigprocmask(SIG_BLOCK, nullptr, &final_set));
390   ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
391 
392   // Spawn a thread that calls sigwait and tells us what it received.
393   pthread_t signal_thread;
394   int received_signal = -1;
395   ASSERT_EQ(0, pthread_create(&signal_thread, nullptr, SignalHandlerFn, &received_signal));
396 
397   // Send that thread SIGUSR1.
398   pthread_kill(signal_thread, SIGUSR1);
399 
400   // See what it got.
401   void* join_result;
402   ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
403   ASSERT_EQ(SIGUSR1, received_signal);
404   ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
405 
406   // Restore the original signal mask.
407   ASSERT_EQ(0, pthread_sigmask(SIG_SETMASK, &original_set, nullptr));
408 }
409 
TEST(pthread,pthread_sigmask64_SIGTRMIN)410 TEST(pthread, pthread_sigmask64_SIGTRMIN) {
411   // Check that SIGRTMIN isn't blocked.
412   sigset64_t original_set;
413   sigemptyset64(&original_set);
414   ASSERT_EQ(0, pthread_sigmask64(SIG_BLOCK, nullptr, &original_set));
415   ASSERT_FALSE(sigismember64(&original_set, SIGRTMIN));
416 
417   // Block SIGRTMIN.
418   sigset64_t set;
419   sigemptyset64(&set);
420   sigaddset64(&set, SIGRTMIN);
421   ASSERT_EQ(0, pthread_sigmask64(SIG_BLOCK, &set, nullptr));
422 
423   // Check that SIGRTMIN is blocked.
424   sigset64_t final_set;
425   sigemptyset64(&final_set);
426   ASSERT_EQ(0, pthread_sigmask64(SIG_BLOCK, nullptr, &final_set));
427   ASSERT_TRUE(sigismember64(&final_set, SIGRTMIN));
428   // ...and that sigprocmask64 agrees with pthread_sigmask64.
429   sigemptyset64(&final_set);
430   ASSERT_EQ(0, sigprocmask64(SIG_BLOCK, nullptr, &final_set));
431   ASSERT_TRUE(sigismember64(&final_set, SIGRTMIN));
432 
433   // Spawn a thread that calls sigwait64 and tells us what it received.
434   pthread_t signal_thread;
435   int received_signal = -1;
436   ASSERT_EQ(0, pthread_create(&signal_thread, nullptr, SignalHandlerFn, &received_signal));
437 
438   // Send that thread SIGRTMIN.
439   pthread_kill(signal_thread, SIGRTMIN);
440 
441   // See what it got.
442   void* join_result;
443   ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
444   ASSERT_EQ(SIGRTMIN, received_signal);
445   ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
446 
447   // Restore the original signal mask.
448   ASSERT_EQ(0, pthread_sigmask64(SIG_SETMASK, &original_set, nullptr));
449 }
450 
test_pthread_setname_np__pthread_getname_np(pthread_t t)451 static void test_pthread_setname_np__pthread_getname_np(pthread_t t) {
452   ASSERT_EQ(0, pthread_setname_np(t, "short"));
453   char name[32];
454   ASSERT_EQ(0, pthread_getname_np(t, name, sizeof(name)));
455   ASSERT_STREQ("short", name);
456 
457   // The limit is 15 characters --- the kernel's buffer is 16, but includes a NUL.
458   ASSERT_EQ(0, pthread_setname_np(t, "123456789012345"));
459   ASSERT_EQ(0, pthread_getname_np(t, name, sizeof(name)));
460   ASSERT_STREQ("123456789012345", name);
461 
462   ASSERT_EQ(ERANGE, pthread_setname_np(t, "1234567890123456"));
463 
464   // The passed-in buffer should be at least 16 bytes.
465   ASSERT_EQ(0, pthread_getname_np(t, name, 16));
466   ASSERT_EQ(ERANGE, pthread_getname_np(t, name, 15));
467 }
468 
TEST(pthread,pthread_setname_np__pthread_getname_np__self)469 TEST(pthread, pthread_setname_np__pthread_getname_np__self) {
470   test_pthread_setname_np__pthread_getname_np(pthread_self());
471 }
472 
TEST(pthread,pthread_setname_np__pthread_getname_np__other)473 TEST(pthread, pthread_setname_np__pthread_getname_np__other) {
474   SpinFunctionHelper spin_helper;
475 
476   pthread_t t;
477   ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
478   test_pthread_setname_np__pthread_getname_np(t);
479   spin_helper.UnSpin();
480   ASSERT_EQ(0, pthread_join(t, nullptr));
481 }
482 
483 // http://b/28051133: a kernel misfeature means that you can't change the
484 // name of another thread if you've set PR_SET_DUMPABLE to 0.
TEST(pthread,pthread_setname_np__pthread_getname_np__other_PR_SET_DUMPABLE)485 TEST(pthread, pthread_setname_np__pthread_getname_np__other_PR_SET_DUMPABLE) {
486   ASSERT_EQ(0, prctl(PR_SET_DUMPABLE, 0)) << strerror(errno);
487 
488   SpinFunctionHelper spin_helper;
489 
490   pthread_t t;
491   ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
492   test_pthread_setname_np__pthread_getname_np(t);
493   spin_helper.UnSpin();
494   ASSERT_EQ(0, pthread_join(t, nullptr));
495 }
496 
TEST_F(pthread_DeathTest,pthread_setname_np__no_such_thread)497 TEST_F(pthread_DeathTest, pthread_setname_np__no_such_thread) {
498   pthread_t dead_thread;
499   MakeDeadThread(dead_thread);
500 
501   EXPECT_DEATH(pthread_setname_np(dead_thread, "short 3"),
502                "invalid pthread_t (.*) passed to pthread_setname_np");
503 }
504 
TEST_F(pthread_DeathTest,pthread_setname_np__null_thread)505 TEST_F(pthread_DeathTest, pthread_setname_np__null_thread) {
506   pthread_t null_thread = 0;
507   EXPECT_EQ(ENOENT, pthread_setname_np(null_thread, "short 3"));
508 }
509 
TEST_F(pthread_DeathTest,pthread_getname_np__no_such_thread)510 TEST_F(pthread_DeathTest, pthread_getname_np__no_such_thread) {
511   pthread_t dead_thread;
512   MakeDeadThread(dead_thread);
513 
514   char name[64];
515   EXPECT_DEATH(pthread_getname_np(dead_thread, name, sizeof(name)),
516                "invalid pthread_t (.*) passed to pthread_getname_np");
517 }
518 
TEST_F(pthread_DeathTest,pthread_getname_np__null_thread)519 TEST_F(pthread_DeathTest, pthread_getname_np__null_thread) {
520   pthread_t null_thread = 0;
521 
522   char name[64];
523   EXPECT_EQ(ENOENT, pthread_getname_np(null_thread, name, sizeof(name)));
524 }
525 
TEST(pthread,pthread_kill__0)526 TEST(pthread, pthread_kill__0) {
527   // Signal 0 just tests that the thread exists, so it's safe to call on ourselves.
528   ASSERT_EQ(0, pthread_kill(pthread_self(), 0));
529 }
530 
TEST(pthread,pthread_kill__invalid_signal)531 TEST(pthread, pthread_kill__invalid_signal) {
532   ASSERT_EQ(EINVAL, pthread_kill(pthread_self(), -1));
533 }
534 
pthread_kill__in_signal_handler_helper(int signal_number)535 static void pthread_kill__in_signal_handler_helper(int signal_number) {
536   static int count = 0;
537   ASSERT_EQ(SIGALRM, signal_number);
538   if (++count == 1) {
539     // Can we call pthread_kill from a signal handler?
540     ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
541   }
542 }
543 
TEST(pthread,pthread_kill__in_signal_handler)544 TEST(pthread, pthread_kill__in_signal_handler) {
545   ScopedSignalHandler ssh(SIGALRM, pthread_kill__in_signal_handler_helper);
546   ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
547 }
548 
TEST(pthread,pthread_kill__exited_thread)549 TEST(pthread, pthread_kill__exited_thread) {
550   static std::promise<pid_t> tid_promise;
551   pthread_t thread;
552   ASSERT_EQ(0, pthread_create(&thread, nullptr,
553                               [](void*) -> void* {
554                                 tid_promise.set_value(gettid());
555                                 return nullptr;
556                               },
557                               nullptr));
558 
559   pid_t tid = tid_promise.get_future().get();
560   while (TEMP_FAILURE_RETRY(syscall(__NR_tgkill, getpid(), tid, 0)) != -1) {
561     continue;
562   }
563   ASSERT_EQ(ESRCH, errno);
564 
565   ASSERT_EQ(ESRCH, pthread_kill(thread, 0));
566 }
567 
TEST_F(pthread_DeathTest,pthread_detach__no_such_thread)568 TEST_F(pthread_DeathTest, pthread_detach__no_such_thread) {
569   pthread_t dead_thread;
570   MakeDeadThread(dead_thread);
571 
572   EXPECT_DEATH(pthread_detach(dead_thread),
573                "invalid pthread_t (.*) passed to pthread_detach");
574 }
575 
TEST_F(pthread_DeathTest,pthread_detach__null_thread)576 TEST_F(pthread_DeathTest, pthread_detach__null_thread) {
577   pthread_t null_thread = 0;
578   EXPECT_EQ(ESRCH, pthread_detach(null_thread));
579 }
580 
TEST(pthread,pthread_getcpuclockid__clock_gettime)581 TEST(pthread, pthread_getcpuclockid__clock_gettime) {
582   SpinFunctionHelper spin_helper;
583 
584   pthread_t t;
585   ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
586 
587   clockid_t c;
588   ASSERT_EQ(0, pthread_getcpuclockid(t, &c));
589   timespec ts;
590   ASSERT_EQ(0, clock_gettime(c, &ts));
591   spin_helper.UnSpin();
592   ASSERT_EQ(0, pthread_join(t, nullptr));
593 }
594 
TEST_F(pthread_DeathTest,pthread_getcpuclockid__no_such_thread)595 TEST_F(pthread_DeathTest, pthread_getcpuclockid__no_such_thread) {
596   pthread_t dead_thread;
597   MakeDeadThread(dead_thread);
598 
599   clockid_t c;
600   EXPECT_DEATH(pthread_getcpuclockid(dead_thread, &c),
601                "invalid pthread_t (.*) passed to pthread_getcpuclockid");
602 }
603 
TEST_F(pthread_DeathTest,pthread_getcpuclockid__null_thread)604 TEST_F(pthread_DeathTest, pthread_getcpuclockid__null_thread) {
605   pthread_t null_thread = 0;
606   clockid_t c;
607   EXPECT_EQ(ESRCH, pthread_getcpuclockid(null_thread, &c));
608 }
609 
TEST_F(pthread_DeathTest,pthread_getschedparam__no_such_thread)610 TEST_F(pthread_DeathTest, pthread_getschedparam__no_such_thread) {
611   pthread_t dead_thread;
612   MakeDeadThread(dead_thread);
613 
614   int policy;
615   sched_param param;
616   EXPECT_DEATH(pthread_getschedparam(dead_thread, &policy, &param),
617                "invalid pthread_t (.*) passed to pthread_getschedparam");
618 }
619 
TEST_F(pthread_DeathTest,pthread_getschedparam__null_thread)620 TEST_F(pthread_DeathTest, pthread_getschedparam__null_thread) {
621   pthread_t null_thread = 0;
622   int policy;
623   sched_param param;
624   EXPECT_EQ(ESRCH, pthread_getschedparam(null_thread, &policy, &param));
625 }
626 
TEST_F(pthread_DeathTest,pthread_setschedparam__no_such_thread)627 TEST_F(pthread_DeathTest, pthread_setschedparam__no_such_thread) {
628   pthread_t dead_thread;
629   MakeDeadThread(dead_thread);
630 
631   int policy = 0;
632   sched_param param;
633   EXPECT_DEATH(pthread_setschedparam(dead_thread, policy, &param),
634                "invalid pthread_t (.*) passed to pthread_setschedparam");
635 }
636 
TEST_F(pthread_DeathTest,pthread_setschedparam__null_thread)637 TEST_F(pthread_DeathTest, pthread_setschedparam__null_thread) {
638   pthread_t null_thread = 0;
639   int policy = 0;
640   sched_param param;
641   EXPECT_EQ(ESRCH, pthread_setschedparam(null_thread, policy, &param));
642 }
643 
TEST_F(pthread_DeathTest,pthread_setschedprio__no_such_thread)644 TEST_F(pthread_DeathTest, pthread_setschedprio__no_such_thread) {
645   pthread_t dead_thread;
646   MakeDeadThread(dead_thread);
647 
648   EXPECT_DEATH(pthread_setschedprio(dead_thread, 123),
649                "invalid pthread_t (.*) passed to pthread_setschedprio");
650 }
651 
TEST_F(pthread_DeathTest,pthread_setschedprio__null_thread)652 TEST_F(pthread_DeathTest, pthread_setschedprio__null_thread) {
653   pthread_t null_thread = 0;
654   EXPECT_EQ(ESRCH, pthread_setschedprio(null_thread, 123));
655 }
656 
TEST_F(pthread_DeathTest,pthread_join__no_such_thread)657 TEST_F(pthread_DeathTest, pthread_join__no_such_thread) {
658   pthread_t dead_thread;
659   MakeDeadThread(dead_thread);
660 
661   EXPECT_DEATH(pthread_join(dead_thread, nullptr),
662                "invalid pthread_t (.*) passed to pthread_join");
663 }
664 
TEST_F(pthread_DeathTest,pthread_join__null_thread)665 TEST_F(pthread_DeathTest, pthread_join__null_thread) {
666   pthread_t null_thread = 0;
667   EXPECT_EQ(ESRCH, pthread_join(null_thread, nullptr));
668 }
669 
TEST_F(pthread_DeathTest,pthread_kill__no_such_thread)670 TEST_F(pthread_DeathTest, pthread_kill__no_such_thread) {
671   pthread_t dead_thread;
672   MakeDeadThread(dead_thread);
673 
674   EXPECT_DEATH(pthread_kill(dead_thread, 0),
675                "invalid pthread_t (.*) passed to pthread_kill");
676 }
677 
TEST_F(pthread_DeathTest,pthread_kill__null_thread)678 TEST_F(pthread_DeathTest, pthread_kill__null_thread) {
679   pthread_t null_thread = 0;
680   EXPECT_EQ(ESRCH, pthread_kill(null_thread, 0));
681 }
682 
TEST(pthread,pthread_join__multijoin)683 TEST(pthread, pthread_join__multijoin) {
684   SpinFunctionHelper spin_helper;
685 
686   pthread_t t1;
687   ASSERT_EQ(0, pthread_create(&t1, nullptr, spin_helper.GetFunction(), nullptr));
688 
689   pthread_t t2;
690   ASSERT_EQ(0, pthread_create(&t2, nullptr, JoinFn, reinterpret_cast<void*>(t1)));
691 
692   sleep(1); // (Give t2 a chance to call pthread_join.)
693 
694   // Multiple joins to the same thread should fail.
695   ASSERT_EQ(EINVAL, pthread_join(t1, nullptr));
696 
697   spin_helper.UnSpin();
698 
699   // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
700   void* join_result;
701   ASSERT_EQ(0, pthread_join(t2, &join_result));
702   ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
703 }
704 
TEST(pthread,pthread_join__race)705 TEST(pthread, pthread_join__race) {
706   // http://b/11693195 --- pthread_join could return before the thread had actually exited.
707   // If the joiner unmapped the thread's stack, that could lead to SIGSEGV in the thread.
708   for (size_t i = 0; i < 1024; ++i) {
709     size_t stack_size = 640*1024;
710     void* stack = mmap(nullptr, stack_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
711 
712     pthread_attr_t a;
713     pthread_attr_init(&a);
714     pthread_attr_setstack(&a, stack, stack_size);
715 
716     pthread_t t;
717     ASSERT_EQ(0, pthread_create(&t, &a, IdFn, nullptr));
718     ASSERT_EQ(0, pthread_join(t, nullptr));
719     ASSERT_EQ(0, munmap(stack, stack_size));
720   }
721 }
722 
GetActualGuardSizeFn(void * arg)723 static void* GetActualGuardSizeFn(void* arg) {
724   pthread_attr_t attributes;
725   pthread_getattr_np(pthread_self(), &attributes);
726   pthread_attr_getguardsize(&attributes, reinterpret_cast<size_t*>(arg));
727   return nullptr;
728 }
729 
GetActualGuardSize(const pthread_attr_t & attributes)730 static size_t GetActualGuardSize(const pthread_attr_t& attributes) {
731   size_t result;
732   pthread_t t;
733   pthread_create(&t, &attributes, GetActualGuardSizeFn, &result);
734   pthread_join(t, nullptr);
735   return result;
736 }
737 
GetActualStackSizeFn(void * arg)738 static void* GetActualStackSizeFn(void* arg) {
739   pthread_attr_t attributes;
740   pthread_getattr_np(pthread_self(), &attributes);
741   pthread_attr_getstacksize(&attributes, reinterpret_cast<size_t*>(arg));
742   return nullptr;
743 }
744 
GetActualStackSize(const pthread_attr_t & attributes)745 static size_t GetActualStackSize(const pthread_attr_t& attributes) {
746   size_t result;
747   pthread_t t;
748   pthread_create(&t, &attributes, GetActualStackSizeFn, &result);
749   pthread_join(t, nullptr);
750   return result;
751 }
752 
TEST(pthread,pthread_attr_setguardsize_tiny)753 TEST(pthread, pthread_attr_setguardsize_tiny) {
754   pthread_attr_t attributes;
755   ASSERT_EQ(0, pthread_attr_init(&attributes));
756 
757   // No such thing as too small: will be rounded up to one page by pthread_create.
758   ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128));
759   size_t guard_size;
760   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
761   ASSERT_EQ(128U, guard_size);
762   ASSERT_EQ(4096U, GetActualGuardSize(attributes));
763 }
764 
TEST(pthread,pthread_attr_setguardsize_reasonable)765 TEST(pthread, pthread_attr_setguardsize_reasonable) {
766   pthread_attr_t attributes;
767   ASSERT_EQ(0, pthread_attr_init(&attributes));
768 
769   // Large enough and a multiple of the page size.
770   ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024));
771   size_t guard_size;
772   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
773   ASSERT_EQ(32*1024U, guard_size);
774   ASSERT_EQ(32*1024U, GetActualGuardSize(attributes));
775 }
776 
TEST(pthread,pthread_attr_setguardsize_needs_rounding)777 TEST(pthread, pthread_attr_setguardsize_needs_rounding) {
778   pthread_attr_t attributes;
779   ASSERT_EQ(0, pthread_attr_init(&attributes));
780 
781   // Large enough but not a multiple of the page size.
782   ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1));
783   size_t guard_size;
784   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
785   ASSERT_EQ(32*1024U + 1, guard_size);
786   ASSERT_EQ(36*1024U, GetActualGuardSize(attributes));
787 }
788 
TEST(pthread,pthread_attr_setguardsize_enormous)789 TEST(pthread, pthread_attr_setguardsize_enormous) {
790   pthread_attr_t attributes;
791   ASSERT_EQ(0, pthread_attr_init(&attributes));
792 
793   // Larger than the stack itself. (Historically we mistakenly carved
794   // the guard out of the stack itself, rather than adding it after the
795   // end.)
796   ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024*1024));
797   size_t guard_size;
798   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
799   ASSERT_EQ(32*1024*1024U, guard_size);
800   ASSERT_EQ(32*1024*1024U, GetActualGuardSize(attributes));
801 }
802 
TEST(pthread,pthread_attr_setstacksize)803 TEST(pthread, pthread_attr_setstacksize) {
804   pthread_attr_t attributes;
805   ASSERT_EQ(0, pthread_attr_init(&attributes));
806 
807   // Get the default stack size.
808   size_t default_stack_size;
809   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &default_stack_size));
810 
811   // Too small.
812   ASSERT_EQ(EINVAL, pthread_attr_setstacksize(&attributes, 128));
813   size_t stack_size;
814   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
815   ASSERT_EQ(default_stack_size, stack_size);
816   ASSERT_GE(GetActualStackSize(attributes), default_stack_size);
817 
818   // Large enough and a multiple of the page size; may be rounded up by pthread_create.
819   ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024));
820   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
821   ASSERT_EQ(32*1024U, stack_size);
822   ASSERT_GE(GetActualStackSize(attributes), 32*1024U);
823 
824   // Large enough but not aligned; will be rounded up by pthread_create.
825   ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024 + 1));
826   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
827   ASSERT_EQ(32*1024U + 1, stack_size);
828 #if defined(__BIONIC__)
829   ASSERT_GT(GetActualStackSize(attributes), 32*1024U + 1);
830 #else // __BIONIC__
831   // glibc rounds down, in violation of POSIX. They document this in their BUGS section.
832   ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);
833 #endif // __BIONIC__
834 }
835 
TEST(pthread,pthread_rwlockattr_smoke)836 TEST(pthread, pthread_rwlockattr_smoke) {
837   pthread_rwlockattr_t attr;
838   ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
839 
840   int pshared_value_array[] = {PTHREAD_PROCESS_PRIVATE, PTHREAD_PROCESS_SHARED};
841   for (size_t i = 0; i < sizeof(pshared_value_array) / sizeof(pshared_value_array[0]); ++i) {
842     ASSERT_EQ(0, pthread_rwlockattr_setpshared(&attr, pshared_value_array[i]));
843     int pshared;
844     ASSERT_EQ(0, pthread_rwlockattr_getpshared(&attr, &pshared));
845     ASSERT_EQ(pshared_value_array[i], pshared);
846   }
847 
848   int kind_array[] = {PTHREAD_RWLOCK_PREFER_READER_NP,
849                       PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP};
850   for (size_t i = 0; i < sizeof(kind_array) / sizeof(kind_array[0]); ++i) {
851     ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_array[i]));
852     int kind;
853     ASSERT_EQ(0, pthread_rwlockattr_getkind_np(&attr, &kind));
854     ASSERT_EQ(kind_array[i], kind);
855   }
856 
857   ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
858 }
859 
TEST(pthread,pthread_rwlock_init_same_as_PTHREAD_RWLOCK_INITIALIZER)860 TEST(pthread, pthread_rwlock_init_same_as_PTHREAD_RWLOCK_INITIALIZER) {
861   pthread_rwlock_t lock1 = PTHREAD_RWLOCK_INITIALIZER;
862   pthread_rwlock_t lock2;
863   ASSERT_EQ(0, pthread_rwlock_init(&lock2, nullptr));
864   ASSERT_EQ(0, memcmp(&lock1, &lock2, sizeof(lock1)));
865 }
866 
TEST(pthread,pthread_rwlock_smoke)867 TEST(pthread, pthread_rwlock_smoke) {
868   pthread_rwlock_t l;
869   ASSERT_EQ(0, pthread_rwlock_init(&l, nullptr));
870 
871   // Single read lock
872   ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
873   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
874 
875   // Multiple read lock
876   ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
877   ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
878   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
879   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
880 
881   // Write lock
882   ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
883   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
884 
885   // Try writer lock
886   ASSERT_EQ(0, pthread_rwlock_trywrlock(&l));
887   ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
888   ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&l));
889   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
890 
891   // Try reader lock
892   ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
893   ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
894   ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
895   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
896   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
897 
898   // Try writer lock after unlock
899   ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
900   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
901 
902   // EDEADLK in "read after write"
903   ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
904   ASSERT_EQ(EDEADLK, pthread_rwlock_rdlock(&l));
905   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
906 
907   // EDEADLK in "write after write"
908   ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
909   ASSERT_EQ(EDEADLK, pthread_rwlock_wrlock(&l));
910   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
911 
912   ASSERT_EQ(0, pthread_rwlock_destroy(&l));
913 }
914 
915 struct RwlockWakeupHelperArg {
916   pthread_rwlock_t lock;
917   enum Progress {
918     LOCK_INITIALIZED,
919     LOCK_WAITING,
920     LOCK_RELEASED,
921     LOCK_ACCESSED,
922     LOCK_TIMEDOUT,
923   };
924   std::atomic<Progress> progress;
925   std::atomic<pid_t> tid;
926   std::function<int (pthread_rwlock_t*)> trylock_function;
927   std::function<int (pthread_rwlock_t*)> lock_function;
928   std::function<int (pthread_rwlock_t*, const timespec*)> timed_lock_function;
929   clockid_t clock;
930 };
931 
pthread_rwlock_wakeup_helper(RwlockWakeupHelperArg * arg)932 static void pthread_rwlock_wakeup_helper(RwlockWakeupHelperArg* arg) {
933   arg->tid = gettid();
934   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
935   arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
936 
937   ASSERT_EQ(EBUSY, arg->trylock_function(&arg->lock));
938   ASSERT_EQ(0, arg->lock_function(&arg->lock));
939   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress);
940   ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock));
941 
942   arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED;
943 }
944 
test_pthread_rwlock_reader_wakeup_writer(std::function<int (pthread_rwlock_t *)> lock_function)945 static void test_pthread_rwlock_reader_wakeup_writer(std::function<int (pthread_rwlock_t*)> lock_function) {
946   RwlockWakeupHelperArg wakeup_arg;
947   ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
948   ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
949   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
950   wakeup_arg.tid = 0;
951   wakeup_arg.trylock_function = &pthread_rwlock_trywrlock;
952   wakeup_arg.lock_function = lock_function;
953 
954   pthread_t thread;
955   ASSERT_EQ(0, pthread_create(&thread, nullptr,
956     reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_helper), &wakeup_arg));
957   WaitUntilThreadSleep(wakeup_arg.tid);
958   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
959 
960   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
961   ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
962 
963   ASSERT_EQ(0, pthread_join(thread, nullptr));
964   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
965   ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
966 }
967 
TEST(pthread,pthread_rwlock_reader_wakeup_writer)968 TEST(pthread, pthread_rwlock_reader_wakeup_writer) {
969   test_pthread_rwlock_reader_wakeup_writer(pthread_rwlock_wrlock);
970 }
971 
TEST(pthread,pthread_rwlock_reader_wakeup_writer_timedwait)972 TEST(pthread, pthread_rwlock_reader_wakeup_writer_timedwait) {
973   timespec ts;
974   ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
975   ts.tv_sec += 1;
976   test_pthread_rwlock_reader_wakeup_writer([&](pthread_rwlock_t* lock) {
977     return pthread_rwlock_timedwrlock(lock, &ts);
978   });
979 }
980 
TEST(pthread,pthread_rwlock_reader_wakeup_writer_timedwait_monotonic_np)981 TEST(pthread, pthread_rwlock_reader_wakeup_writer_timedwait_monotonic_np) {
982 #if defined(__BIONIC__)
983   timespec ts;
984   ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
985   ts.tv_sec += 1;
986   test_pthread_rwlock_reader_wakeup_writer(
987       [&](pthread_rwlock_t* lock) { return pthread_rwlock_timedwrlock_monotonic_np(lock, &ts); });
988 #else   // __BIONIC__
989   GTEST_SKIP() << "pthread_rwlock_timedwrlock_monotonic_np not available";
990 #endif  // __BIONIC__
991 }
992 
TEST(pthread,pthread_rwlock_reader_wakeup_writer_clockwait)993 TEST(pthread, pthread_rwlock_reader_wakeup_writer_clockwait) {
994 #if defined(__BIONIC__)
995   timespec ts;
996   ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
997   ts.tv_sec += 1;
998   test_pthread_rwlock_reader_wakeup_writer([&](pthread_rwlock_t* lock) {
999     return pthread_rwlock_clockwrlock(lock, CLOCK_MONOTONIC, &ts);
1000   });
1001 
1002   ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1003   ts.tv_sec += 1;
1004   test_pthread_rwlock_reader_wakeup_writer([&](pthread_rwlock_t* lock) {
1005     return pthread_rwlock_clockwrlock(lock, CLOCK_REALTIME, &ts);
1006   });
1007 #else   // __BIONIC__
1008   GTEST_SKIP() << "pthread_rwlock_clockwrlock not available";
1009 #endif  // __BIONIC__
1010 }
1011 
test_pthread_rwlock_writer_wakeup_reader(std::function<int (pthread_rwlock_t *)> lock_function)1012 static void test_pthread_rwlock_writer_wakeup_reader(std::function<int (pthread_rwlock_t*)> lock_function) {
1013   RwlockWakeupHelperArg wakeup_arg;
1014   ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
1015   ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
1016   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
1017   wakeup_arg.tid = 0;
1018   wakeup_arg.trylock_function = &pthread_rwlock_tryrdlock;
1019   wakeup_arg.lock_function = lock_function;
1020 
1021   pthread_t thread;
1022   ASSERT_EQ(0, pthread_create(&thread, nullptr,
1023     reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_helper), &wakeup_arg));
1024   WaitUntilThreadSleep(wakeup_arg.tid);
1025   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
1026 
1027   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
1028   ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
1029 
1030   ASSERT_EQ(0, pthread_join(thread, nullptr));
1031   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
1032   ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
1033 }
1034 
TEST(pthread,pthread_rwlock_writer_wakeup_reader)1035 TEST(pthread, pthread_rwlock_writer_wakeup_reader) {
1036   test_pthread_rwlock_writer_wakeup_reader(pthread_rwlock_rdlock);
1037 }
1038 
TEST(pthread,pthread_rwlock_writer_wakeup_reader_timedwait)1039 TEST(pthread, pthread_rwlock_writer_wakeup_reader_timedwait) {
1040   timespec ts;
1041   ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1042   ts.tv_sec += 1;
1043   test_pthread_rwlock_writer_wakeup_reader([&](pthread_rwlock_t* lock) {
1044     return pthread_rwlock_timedrdlock(lock, &ts);
1045   });
1046 }
1047 
TEST(pthread,pthread_rwlock_writer_wakeup_reader_timedwait_monotonic_np)1048 TEST(pthread, pthread_rwlock_writer_wakeup_reader_timedwait_monotonic_np) {
1049 #if defined(__BIONIC__)
1050   timespec ts;
1051   ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1052   ts.tv_sec += 1;
1053   test_pthread_rwlock_writer_wakeup_reader(
1054       [&](pthread_rwlock_t* lock) { return pthread_rwlock_timedrdlock_monotonic_np(lock, &ts); });
1055 #else   // __BIONIC__
1056   GTEST_SKIP() << "pthread_rwlock_timedrdlock_monotonic_np not available";
1057 #endif  // __BIONIC__
1058 }
1059 
TEST(pthread,pthread_rwlock_writer_wakeup_reader_clockwait)1060 TEST(pthread, pthread_rwlock_writer_wakeup_reader_clockwait) {
1061 #if defined(__BIONIC__)
1062   timespec ts;
1063   ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1064   ts.tv_sec += 1;
1065   test_pthread_rwlock_writer_wakeup_reader([&](pthread_rwlock_t* lock) {
1066     return pthread_rwlock_clockrdlock(lock, CLOCK_MONOTONIC, &ts);
1067   });
1068 
1069   ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1070   ts.tv_sec += 1;
1071   test_pthread_rwlock_writer_wakeup_reader([&](pthread_rwlock_t* lock) {
1072     return pthread_rwlock_clockrdlock(lock, CLOCK_REALTIME, &ts);
1073   });
1074 #else   // __BIONIC__
1075   GTEST_SKIP() << "pthread_rwlock_clockrdlock not available";
1076 #endif  // __BIONIC__
1077 }
1078 
pthread_rwlock_wakeup_timeout_helper(RwlockWakeupHelperArg * arg)1079 static void pthread_rwlock_wakeup_timeout_helper(RwlockWakeupHelperArg* arg) {
1080   arg->tid = gettid();
1081   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
1082   arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
1083 
1084   ASSERT_EQ(EBUSY, arg->trylock_function(&arg->lock));
1085 
1086   timespec ts;
1087   ASSERT_EQ(0, clock_gettime(arg->clock, &ts));
1088   ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
1089   ts.tv_nsec = -1;
1090   ASSERT_EQ(EINVAL, arg->timed_lock_function(&arg->lock, &ts));
1091   ts.tv_nsec = NS_PER_S;
1092   ASSERT_EQ(EINVAL, arg->timed_lock_function(&arg->lock, &ts));
1093   ts.tv_nsec = NS_PER_S - 1;
1094   ts.tv_sec = -1;
1095   ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
1096   ASSERT_EQ(0, clock_gettime(arg->clock, &ts));
1097   ts.tv_sec += 1;
1098   ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
1099   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, arg->progress);
1100   arg->progress = RwlockWakeupHelperArg::LOCK_TIMEDOUT;
1101 }
1102 
pthread_rwlock_timedrdlock_timeout_helper(clockid_t clock,int (* lock_function)(pthread_rwlock_t * __rwlock,const timespec * __timeout))1103 static void pthread_rwlock_timedrdlock_timeout_helper(
1104     clockid_t clock, int (*lock_function)(pthread_rwlock_t* __rwlock, const timespec* __timeout)) {
1105   RwlockWakeupHelperArg wakeup_arg;
1106   ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
1107   ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
1108   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
1109   wakeup_arg.tid = 0;
1110   wakeup_arg.trylock_function = &pthread_rwlock_tryrdlock;
1111   wakeup_arg.timed_lock_function = lock_function;
1112   wakeup_arg.clock = clock;
1113 
1114   pthread_t thread;
1115   ASSERT_EQ(0, pthread_create(&thread, nullptr,
1116       reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_timeout_helper), &wakeup_arg));
1117   WaitUntilThreadSleep(wakeup_arg.tid);
1118   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
1119 
1120   ASSERT_EQ(0, pthread_join(thread, nullptr));
1121   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_TIMEDOUT, wakeup_arg.progress);
1122   ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
1123   ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
1124 }
1125 
TEST(pthread,pthread_rwlock_timedrdlock_timeout)1126 TEST(pthread, pthread_rwlock_timedrdlock_timeout) {
1127   pthread_rwlock_timedrdlock_timeout_helper(CLOCK_REALTIME, pthread_rwlock_timedrdlock);
1128 }
1129 
TEST(pthread,pthread_rwlock_timedrdlock_monotonic_np_timeout)1130 TEST(pthread, pthread_rwlock_timedrdlock_monotonic_np_timeout) {
1131 #if defined(__BIONIC__)
1132   pthread_rwlock_timedrdlock_timeout_helper(CLOCK_MONOTONIC,
1133                                             pthread_rwlock_timedrdlock_monotonic_np);
1134 #else   // __BIONIC__
1135   GTEST_SKIP() << "pthread_rwlock_timedrdlock_monotonic_np not available";
1136 #endif  // __BIONIC__
1137 }
1138 
TEST(pthread,pthread_rwlock_clockrdlock_monotonic_timeout)1139 TEST(pthread, pthread_rwlock_clockrdlock_monotonic_timeout) {
1140 #if defined(__BIONIC__)
1141   pthread_rwlock_timedrdlock_timeout_helper(
1142       CLOCK_MONOTONIC, [](pthread_rwlock_t* __rwlock, const timespec* __timeout) {
1143         return pthread_rwlock_clockrdlock(__rwlock, CLOCK_MONOTONIC, __timeout);
1144       });
1145 #else   // __BIONIC__
1146   GTEST_SKIP() << "pthread_rwlock_clockrdlock not available";
1147 #endif  // __BIONIC__
1148 }
1149 
TEST(pthread,pthread_rwlock_clockrdlock_realtime_timeout)1150 TEST(pthread, pthread_rwlock_clockrdlock_realtime_timeout) {
1151 #if defined(__BIONIC__)
1152   pthread_rwlock_timedrdlock_timeout_helper(
1153       CLOCK_REALTIME, [](pthread_rwlock_t* __rwlock, const timespec* __timeout) {
1154         return pthread_rwlock_clockrdlock(__rwlock, CLOCK_REALTIME, __timeout);
1155       });
1156 #else   // __BIONIC__
1157   GTEST_SKIP() << "pthread_rwlock_clockrdlock not available";
1158 #endif  // __BIONIC__
1159 }
1160 
TEST(pthread,pthread_rwlock_clockrdlock_invalid)1161 TEST(pthread, pthread_rwlock_clockrdlock_invalid) {
1162 #if defined(__BIONIC__)
1163   pthread_rwlock_t lock = PTHREAD_RWLOCK_INITIALIZER;
1164   timespec ts;
1165   EXPECT_EQ(EINVAL, pthread_rwlock_clockrdlock(&lock, CLOCK_PROCESS_CPUTIME_ID, &ts));
1166 #else   // __BIONIC__
1167   GTEST_SKIP() << "pthread_rwlock_clockrdlock not available";
1168 #endif  // __BIONIC__
1169 }
1170 
pthread_rwlock_timedwrlock_timeout_helper(clockid_t clock,int (* lock_function)(pthread_rwlock_t * __rwlock,const timespec * __timeout))1171 static void pthread_rwlock_timedwrlock_timeout_helper(
1172     clockid_t clock, int (*lock_function)(pthread_rwlock_t* __rwlock, const timespec* __timeout)) {
1173   RwlockWakeupHelperArg wakeup_arg;
1174   ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
1175   ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
1176   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
1177   wakeup_arg.tid = 0;
1178   wakeup_arg.trylock_function = &pthread_rwlock_trywrlock;
1179   wakeup_arg.timed_lock_function = lock_function;
1180   wakeup_arg.clock = clock;
1181 
1182   pthread_t thread;
1183   ASSERT_EQ(0, pthread_create(&thread, nullptr,
1184       reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_timeout_helper), &wakeup_arg));
1185   WaitUntilThreadSleep(wakeup_arg.tid);
1186   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
1187 
1188   ASSERT_EQ(0, pthread_join(thread, nullptr));
1189   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_TIMEDOUT, wakeup_arg.progress);
1190   ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
1191   ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
1192 }
1193 
TEST(pthread,pthread_rwlock_timedwrlock_timeout)1194 TEST(pthread, pthread_rwlock_timedwrlock_timeout) {
1195   pthread_rwlock_timedwrlock_timeout_helper(CLOCK_REALTIME, pthread_rwlock_timedwrlock);
1196 }
1197 
TEST(pthread,pthread_rwlock_timedwrlock_monotonic_np_timeout)1198 TEST(pthread, pthread_rwlock_timedwrlock_monotonic_np_timeout) {
1199 #if defined(__BIONIC__)
1200   pthread_rwlock_timedwrlock_timeout_helper(CLOCK_MONOTONIC,
1201                                             pthread_rwlock_timedwrlock_monotonic_np);
1202 #else   // __BIONIC__
1203   GTEST_SKIP() << "pthread_rwlock_timedwrlock_monotonic_np not available";
1204 #endif  // __BIONIC__
1205 }
1206 
TEST(pthread,pthread_rwlock_clockwrlock_monotonic_timeout)1207 TEST(pthread, pthread_rwlock_clockwrlock_monotonic_timeout) {
1208 #if defined(__BIONIC__)
1209   pthread_rwlock_timedwrlock_timeout_helper(
1210       CLOCK_MONOTONIC, [](pthread_rwlock_t* __rwlock, const timespec* __timeout) {
1211         return pthread_rwlock_clockwrlock(__rwlock, CLOCK_MONOTONIC, __timeout);
1212       });
1213 #else   // __BIONIC__
1214   GTEST_SKIP() << "pthread_rwlock_clockwrlock not available";
1215 #endif  // __BIONIC__
1216 }
1217 
TEST(pthread,pthread_rwlock_clockwrlock_realtime_timeout)1218 TEST(pthread, pthread_rwlock_clockwrlock_realtime_timeout) {
1219 #if defined(__BIONIC__)
1220   pthread_rwlock_timedwrlock_timeout_helper(
1221       CLOCK_REALTIME, [](pthread_rwlock_t* __rwlock, const timespec* __timeout) {
1222         return pthread_rwlock_clockwrlock(__rwlock, CLOCK_REALTIME, __timeout);
1223       });
1224 #else   // __BIONIC__
1225   GTEST_SKIP() << "pthread_rwlock_clockwrlock not available";
1226 #endif  // __BIONIC__
1227 }
1228 
TEST(pthread,pthread_rwlock_clockwrlock_invalid)1229 TEST(pthread, pthread_rwlock_clockwrlock_invalid) {
1230 #if defined(__BIONIC__)
1231   pthread_rwlock_t lock = PTHREAD_RWLOCK_INITIALIZER;
1232   timespec ts;
1233   EXPECT_EQ(EINVAL, pthread_rwlock_clockwrlock(&lock, CLOCK_PROCESS_CPUTIME_ID, &ts));
1234 #else   // __BIONIC__
1235   GTEST_SKIP() << "pthread_rwlock_clockrwlock not available";
1236 #endif  // __BIONIC__
1237 }
1238 
1239 class RwlockKindTestHelper {
1240  private:
1241   struct ThreadArg {
1242     RwlockKindTestHelper* helper;
1243     std::atomic<pid_t>& tid;
1244 
ThreadArgRwlockKindTestHelper::ThreadArg1245     ThreadArg(RwlockKindTestHelper* helper, std::atomic<pid_t>& tid)
1246       : helper(helper), tid(tid) { }
1247   };
1248 
1249  public:
1250   pthread_rwlock_t lock;
1251 
1252  public:
RwlockKindTestHelper(int kind_type)1253   explicit RwlockKindTestHelper(int kind_type) {
1254     InitRwlock(kind_type);
1255   }
1256 
~RwlockKindTestHelper()1257   ~RwlockKindTestHelper() {
1258     DestroyRwlock();
1259   }
1260 
CreateWriterThread(pthread_t & thread,std::atomic<pid_t> & tid)1261   void CreateWriterThread(pthread_t& thread, std::atomic<pid_t>& tid) {
1262     tid = 0;
1263     ThreadArg* arg = new ThreadArg(this, tid);
1264     ASSERT_EQ(0, pthread_create(&thread, nullptr,
1265                                 reinterpret_cast<void* (*)(void*)>(WriterThreadFn), arg));
1266   }
1267 
CreateReaderThread(pthread_t & thread,std::atomic<pid_t> & tid)1268   void CreateReaderThread(pthread_t& thread, std::atomic<pid_t>& tid) {
1269     tid = 0;
1270     ThreadArg* arg = new ThreadArg(this, tid);
1271     ASSERT_EQ(0, pthread_create(&thread, nullptr,
1272                                 reinterpret_cast<void* (*)(void*)>(ReaderThreadFn), arg));
1273   }
1274 
1275  private:
InitRwlock(int kind_type)1276   void InitRwlock(int kind_type) {
1277     pthread_rwlockattr_t attr;
1278     ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
1279     ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_type));
1280     ASSERT_EQ(0, pthread_rwlock_init(&lock, &attr));
1281     ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
1282   }
1283 
DestroyRwlock()1284   void DestroyRwlock() {
1285     ASSERT_EQ(0, pthread_rwlock_destroy(&lock));
1286   }
1287 
WriterThreadFn(ThreadArg * arg)1288   static void WriterThreadFn(ThreadArg* arg) {
1289     arg->tid = gettid();
1290 
1291     RwlockKindTestHelper* helper = arg->helper;
1292     ASSERT_EQ(0, pthread_rwlock_wrlock(&helper->lock));
1293     ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
1294     delete arg;
1295   }
1296 
ReaderThreadFn(ThreadArg * arg)1297   static void ReaderThreadFn(ThreadArg* arg) {
1298     arg->tid = gettid();
1299 
1300     RwlockKindTestHelper* helper = arg->helper;
1301     ASSERT_EQ(0, pthread_rwlock_rdlock(&helper->lock));
1302     ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
1303     delete arg;
1304   }
1305 };
1306 
TEST(pthread,pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_READER_NP)1307 TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_READER_NP) {
1308   RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_READER_NP);
1309   ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
1310 
1311   pthread_t writer_thread;
1312   std::atomic<pid_t> writer_tid;
1313   helper.CreateWriterThread(writer_thread, writer_tid);
1314   WaitUntilThreadSleep(writer_tid);
1315 
1316   pthread_t reader_thread;
1317   std::atomic<pid_t> reader_tid;
1318   helper.CreateReaderThread(reader_thread, reader_tid);
1319   ASSERT_EQ(0, pthread_join(reader_thread, nullptr));
1320 
1321   ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
1322   ASSERT_EQ(0, pthread_join(writer_thread, nullptr));
1323 }
1324 
TEST(pthread,pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP)1325 TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP) {
1326   RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
1327   ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
1328 
1329   pthread_t writer_thread;
1330   std::atomic<pid_t> writer_tid;
1331   helper.CreateWriterThread(writer_thread, writer_tid);
1332   WaitUntilThreadSleep(writer_tid);
1333 
1334   pthread_t reader_thread;
1335   std::atomic<pid_t> reader_tid;
1336   helper.CreateReaderThread(reader_thread, reader_tid);
1337   WaitUntilThreadSleep(reader_tid);
1338 
1339   ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
1340   ASSERT_EQ(0, pthread_join(writer_thread, nullptr));
1341   ASSERT_EQ(0, pthread_join(reader_thread, nullptr));
1342 }
1343 
1344 static int g_once_fn_call_count = 0;
OnceFn()1345 static void OnceFn() {
1346   ++g_once_fn_call_count;
1347 }
1348 
TEST(pthread,pthread_once_smoke)1349 TEST(pthread, pthread_once_smoke) {
1350   pthread_once_t once_control = PTHREAD_ONCE_INIT;
1351   ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
1352   ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
1353   ASSERT_EQ(1, g_once_fn_call_count);
1354 }
1355 
1356 static std::string pthread_once_1934122_result = "";
1357 
Routine2()1358 static void Routine2() {
1359   pthread_once_1934122_result += "2";
1360 }
1361 
Routine1()1362 static void Routine1() {
1363   pthread_once_t once_control_2 = PTHREAD_ONCE_INIT;
1364   pthread_once_1934122_result += "1";
1365   pthread_once(&once_control_2, &Routine2);
1366 }
1367 
TEST(pthread,pthread_once_1934122)1368 TEST(pthread, pthread_once_1934122) {
1369   // Very old versions of Android couldn't call pthread_once from a
1370   // pthread_once init routine. http://b/1934122.
1371   pthread_once_t once_control_1 = PTHREAD_ONCE_INIT;
1372   ASSERT_EQ(0, pthread_once(&once_control_1, &Routine1));
1373   ASSERT_EQ("12", pthread_once_1934122_result);
1374 }
1375 
1376 static int g_atfork_prepare_calls = 0;
AtForkPrepare1()1377 static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 1; }
AtForkPrepare2()1378 static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 2; }
1379 static int g_atfork_parent_calls = 0;
AtForkParent1()1380 static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 1; }
AtForkParent2()1381 static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 2; }
1382 static int g_atfork_child_calls = 0;
AtForkChild1()1383 static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 1; }
AtForkChild2()1384 static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 2; }
1385 
TEST(pthread,pthread_atfork_smoke)1386 TEST(pthread, pthread_atfork_smoke) {
1387   ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
1388   ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
1389 
1390   pid_t pid = fork();
1391   ASSERT_NE(-1, pid) << strerror(errno);
1392 
1393   // Child and parent calls are made in the order they were registered.
1394   if (pid == 0) {
1395     ASSERT_EQ(12, g_atfork_child_calls);
1396     _exit(0);
1397   }
1398   ASSERT_EQ(12, g_atfork_parent_calls);
1399 
1400   // Prepare calls are made in the reverse order.
1401   ASSERT_EQ(21, g_atfork_prepare_calls);
1402   AssertChildExited(pid, 0);
1403 }
1404 
TEST(pthread,pthread_attr_getscope)1405 TEST(pthread, pthread_attr_getscope) {
1406   pthread_attr_t attr;
1407   ASSERT_EQ(0, pthread_attr_init(&attr));
1408 
1409   int scope;
1410   ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope));
1411   ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope);
1412 }
1413 
TEST(pthread,pthread_condattr_init)1414 TEST(pthread, pthread_condattr_init) {
1415   pthread_condattr_t attr;
1416   pthread_condattr_init(&attr);
1417 
1418   clockid_t clock;
1419   ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1420   ASSERT_EQ(CLOCK_REALTIME, clock);
1421 
1422   int pshared;
1423   ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
1424   ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
1425 }
1426 
TEST(pthread,pthread_condattr_setclock)1427 TEST(pthread, pthread_condattr_setclock) {
1428   pthread_condattr_t attr;
1429   pthread_condattr_init(&attr);
1430 
1431   ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME));
1432   clockid_t clock;
1433   ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1434   ASSERT_EQ(CLOCK_REALTIME, clock);
1435 
1436   ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1437   ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1438   ASSERT_EQ(CLOCK_MONOTONIC, clock);
1439 
1440   ASSERT_EQ(EINVAL, pthread_condattr_setclock(&attr, CLOCK_PROCESS_CPUTIME_ID));
1441 }
1442 
TEST(pthread,pthread_cond_broadcast__preserves_condattr_flags)1443 TEST(pthread, pthread_cond_broadcast__preserves_condattr_flags) {
1444 #if defined(__BIONIC__)
1445   pthread_condattr_t attr;
1446   pthread_condattr_init(&attr);
1447 
1448   ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1449   ASSERT_EQ(0, pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
1450 
1451   pthread_cond_t cond_var;
1452   ASSERT_EQ(0, pthread_cond_init(&cond_var, &attr));
1453 
1454   ASSERT_EQ(0, pthread_cond_signal(&cond_var));
1455   ASSERT_EQ(0, pthread_cond_broadcast(&cond_var));
1456 
1457   attr = static_cast<pthread_condattr_t>(*reinterpret_cast<uint32_t*>(cond_var.__private));
1458   clockid_t clock;
1459   ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1460   ASSERT_EQ(CLOCK_MONOTONIC, clock);
1461   int pshared;
1462   ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
1463   ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
1464 #else  // !defined(__BIONIC__)
1465   GTEST_SKIP() << "bionic-only test";
1466 #endif  // !defined(__BIONIC__)
1467 }
1468 
1469 class pthread_CondWakeupTest : public ::testing::Test {
1470  protected:
1471   pthread_mutex_t mutex;
1472   pthread_cond_t cond;
1473 
1474   enum Progress {
1475     INITIALIZED,
1476     WAITING,
1477     SIGNALED,
1478     FINISHED,
1479   };
1480   std::atomic<Progress> progress;
1481   pthread_t thread;
1482   std::function<int (pthread_cond_t* cond, pthread_mutex_t* mutex)> wait_function;
1483 
1484  protected:
SetUp()1485   void SetUp() override {
1486     ASSERT_EQ(0, pthread_mutex_init(&mutex, nullptr));
1487   }
1488 
InitCond(clockid_t clock=CLOCK_REALTIME)1489   void InitCond(clockid_t clock=CLOCK_REALTIME) {
1490     pthread_condattr_t attr;
1491     ASSERT_EQ(0, pthread_condattr_init(&attr));
1492     ASSERT_EQ(0, pthread_condattr_setclock(&attr, clock));
1493     ASSERT_EQ(0, pthread_cond_init(&cond, &attr));
1494     ASSERT_EQ(0, pthread_condattr_destroy(&attr));
1495   }
1496 
StartWaitingThread(std::function<int (pthread_cond_t * cond,pthread_mutex_t * mutex)> wait_function)1497   void StartWaitingThread(
1498       std::function<int(pthread_cond_t* cond, pthread_mutex_t* mutex)> wait_function) {
1499     progress = INITIALIZED;
1500     this->wait_function = wait_function;
1501     ASSERT_EQ(0, pthread_create(&thread, nullptr, reinterpret_cast<void* (*)(void*)>(WaitThreadFn),
1502                                 this));
1503     while (progress != WAITING) {
1504       usleep(5000);
1505     }
1506     usleep(5000);
1507   }
1508 
RunTimedTest(clockid_t clock,std::function<int (pthread_cond_t * cond,pthread_mutex_t * mutex,const timespec * timeout)> wait_function)1509   void RunTimedTest(
1510       clockid_t clock,
1511       std::function<int(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* timeout)>
1512           wait_function) {
1513     timespec ts;
1514     ASSERT_EQ(0, clock_gettime(clock, &ts));
1515     ts.tv_sec += 1;
1516 
1517     StartWaitingThread([&wait_function, &ts](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1518       return wait_function(cond, mutex, &ts);
1519     });
1520 
1521     progress = SIGNALED;
1522     ASSERT_EQ(0, pthread_cond_signal(&cond));
1523   }
1524 
RunTimedTest(clockid_t clock,std::function<int (pthread_cond_t * cond,pthread_mutex_t * mutex,clockid_t clock,const timespec * timeout)> wait_function)1525   void RunTimedTest(clockid_t clock, std::function<int(pthread_cond_t* cond, pthread_mutex_t* mutex,
1526                                                        clockid_t clock, const timespec* timeout)>
1527                                          wait_function) {
1528     RunTimedTest(clock, [clock, &wait_function](pthread_cond_t* cond, pthread_mutex_t* mutex,
1529                                                 const timespec* timeout) {
1530       return wait_function(cond, mutex, clock, timeout);
1531     });
1532   }
1533 
TearDown()1534   void TearDown() override {
1535     ASSERT_EQ(0, pthread_join(thread, nullptr));
1536     ASSERT_EQ(FINISHED, progress);
1537     ASSERT_EQ(0, pthread_cond_destroy(&cond));
1538     ASSERT_EQ(0, pthread_mutex_destroy(&mutex));
1539   }
1540 
1541  private:
WaitThreadFn(pthread_CondWakeupTest * test)1542   static void WaitThreadFn(pthread_CondWakeupTest* test) {
1543     ASSERT_EQ(0, pthread_mutex_lock(&test->mutex));
1544     test->progress = WAITING;
1545     while (test->progress == WAITING) {
1546       ASSERT_EQ(0, test->wait_function(&test->cond, &test->mutex));
1547     }
1548     ASSERT_EQ(SIGNALED, test->progress);
1549     test->progress = FINISHED;
1550     ASSERT_EQ(0, pthread_mutex_unlock(&test->mutex));
1551   }
1552 };
1553 
TEST_F(pthread_CondWakeupTest,signal_wait)1554 TEST_F(pthread_CondWakeupTest, signal_wait) {
1555   InitCond();
1556   StartWaitingThread([](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1557     return pthread_cond_wait(cond, mutex);
1558   });
1559   progress = SIGNALED;
1560   ASSERT_EQ(0, pthread_cond_signal(&cond));
1561 }
1562 
TEST_F(pthread_CondWakeupTest,broadcast_wait)1563 TEST_F(pthread_CondWakeupTest, broadcast_wait) {
1564   InitCond();
1565   StartWaitingThread([](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1566     return pthread_cond_wait(cond, mutex);
1567   });
1568   progress = SIGNALED;
1569   ASSERT_EQ(0, pthread_cond_broadcast(&cond));
1570 }
1571 
TEST_F(pthread_CondWakeupTest,signal_timedwait_CLOCK_REALTIME)1572 TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_REALTIME) {
1573   InitCond(CLOCK_REALTIME);
1574   RunTimedTest(CLOCK_REALTIME, pthread_cond_timedwait);
1575 }
1576 
TEST_F(pthread_CondWakeupTest,signal_timedwait_CLOCK_MONOTONIC)1577 TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_MONOTONIC) {
1578   InitCond(CLOCK_MONOTONIC);
1579   RunTimedTest(CLOCK_MONOTONIC, pthread_cond_timedwait);
1580 }
1581 
TEST_F(pthread_CondWakeupTest,signal_timedwait_CLOCK_MONOTONIC_np)1582 TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_MONOTONIC_np) {
1583 #if defined(__BIONIC__)
1584   InitCond(CLOCK_REALTIME);
1585   RunTimedTest(CLOCK_MONOTONIC, pthread_cond_timedwait_monotonic_np);
1586 #else   // __BIONIC__
1587   GTEST_SKIP() << "pthread_cond_timedwait_monotonic_np not available";
1588 #endif  // __BIONIC__
1589 }
1590 
TEST_F(pthread_CondWakeupTest,signal_clockwait_monotonic_monotonic)1591 TEST_F(pthread_CondWakeupTest, signal_clockwait_monotonic_monotonic) {
1592 #if defined(__BIONIC__)
1593   InitCond(CLOCK_MONOTONIC);
1594   RunTimedTest(CLOCK_MONOTONIC, pthread_cond_clockwait);
1595 #else   // __BIONIC__
1596   GTEST_SKIP() << "pthread_cond_clockwait not available";
1597 #endif  // __BIONIC__
1598 }
1599 
TEST_F(pthread_CondWakeupTest,signal_clockwait_monotonic_realtime)1600 TEST_F(pthread_CondWakeupTest, signal_clockwait_monotonic_realtime) {
1601 #if defined(__BIONIC__)
1602   InitCond(CLOCK_MONOTONIC);
1603   RunTimedTest(CLOCK_REALTIME, pthread_cond_clockwait);
1604 #else   // __BIONIC__
1605   GTEST_SKIP() << "pthread_cond_clockwait not available";
1606 #endif  // __BIONIC__
1607 }
1608 
TEST_F(pthread_CondWakeupTest,signal_clockwait_realtime_monotonic)1609 TEST_F(pthread_CondWakeupTest, signal_clockwait_realtime_monotonic) {
1610 #if defined(__BIONIC__)
1611   InitCond(CLOCK_REALTIME);
1612   RunTimedTest(CLOCK_MONOTONIC, pthread_cond_clockwait);
1613 #else   // __BIONIC__
1614   GTEST_SKIP() << "pthread_cond_clockwait not available";
1615 #endif  // __BIONIC__
1616 }
1617 
TEST_F(pthread_CondWakeupTest,signal_clockwait_realtime_realtime)1618 TEST_F(pthread_CondWakeupTest, signal_clockwait_realtime_realtime) {
1619 #if defined(__BIONIC__)
1620   InitCond(CLOCK_REALTIME);
1621   RunTimedTest(CLOCK_REALTIME, pthread_cond_clockwait);
1622 #else   // __BIONIC__
1623   GTEST_SKIP() << "pthread_cond_clockwait not available";
1624 #endif  // __BIONIC__
1625 }
1626 
pthread_cond_timedwait_timeout_helper(bool init_monotonic,clockid_t clock,int (* wait_function)(pthread_cond_t * __cond,pthread_mutex_t * __mutex,const timespec * __timeout))1627 static void pthread_cond_timedwait_timeout_helper(bool init_monotonic, clockid_t clock,
1628                                                   int (*wait_function)(pthread_cond_t* __cond,
1629                                                                        pthread_mutex_t* __mutex,
1630                                                                        const timespec* __timeout)) {
1631   pthread_mutex_t mutex;
1632   ASSERT_EQ(0, pthread_mutex_init(&mutex, nullptr));
1633   pthread_cond_t cond;
1634 
1635   if (init_monotonic) {
1636     pthread_condattr_t attr;
1637     pthread_condattr_init(&attr);
1638 
1639     ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1640     clockid_t clock;
1641     ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1642     ASSERT_EQ(CLOCK_MONOTONIC, clock);
1643 
1644     ASSERT_EQ(0, pthread_cond_init(&cond, &attr));
1645   } else {
1646     ASSERT_EQ(0, pthread_cond_init(&cond, nullptr));
1647   }
1648   ASSERT_EQ(0, pthread_mutex_lock(&mutex));
1649 
1650   timespec ts;
1651   ASSERT_EQ(0, clock_gettime(clock, &ts));
1652   ASSERT_EQ(ETIMEDOUT, wait_function(&cond, &mutex, &ts));
1653   ts.tv_nsec = -1;
1654   ASSERT_EQ(EINVAL, wait_function(&cond, &mutex, &ts));
1655   ts.tv_nsec = NS_PER_S;
1656   ASSERT_EQ(EINVAL, wait_function(&cond, &mutex, &ts));
1657   ts.tv_nsec = NS_PER_S - 1;
1658   ts.tv_sec = -1;
1659   ASSERT_EQ(ETIMEDOUT, wait_function(&cond, &mutex, &ts));
1660   ASSERT_EQ(0, pthread_mutex_unlock(&mutex));
1661 }
1662 
TEST(pthread,pthread_cond_timedwait_timeout)1663 TEST(pthread, pthread_cond_timedwait_timeout) {
1664   pthread_cond_timedwait_timeout_helper(false, CLOCK_REALTIME, pthread_cond_timedwait);
1665 }
1666 
TEST(pthread,pthread_cond_timedwait_monotonic_np_timeout)1667 TEST(pthread, pthread_cond_timedwait_monotonic_np_timeout) {
1668 #if defined(__BIONIC__)
1669   pthread_cond_timedwait_timeout_helper(false, CLOCK_MONOTONIC, pthread_cond_timedwait_monotonic_np);
1670   pthread_cond_timedwait_timeout_helper(true, CLOCK_MONOTONIC, pthread_cond_timedwait_monotonic_np);
1671 #else   // __BIONIC__
1672   GTEST_SKIP() << "pthread_cond_timedwait_monotonic_np not available";
1673 #endif  // __BIONIC__
1674 }
1675 
TEST(pthread,pthread_cond_clockwait_timeout)1676 TEST(pthread, pthread_cond_clockwait_timeout) {
1677 #if defined(__BIONIC__)
1678   pthread_cond_timedwait_timeout_helper(
1679       false, CLOCK_MONOTONIC,
1680       [](pthread_cond_t* __cond, pthread_mutex_t* __mutex, const timespec* __timeout) {
1681         return pthread_cond_clockwait(__cond, __mutex, CLOCK_MONOTONIC, __timeout);
1682       });
1683   pthread_cond_timedwait_timeout_helper(
1684       true, CLOCK_MONOTONIC,
1685       [](pthread_cond_t* __cond, pthread_mutex_t* __mutex, const timespec* __timeout) {
1686         return pthread_cond_clockwait(__cond, __mutex, CLOCK_MONOTONIC, __timeout);
1687       });
1688   pthread_cond_timedwait_timeout_helper(
1689       false, CLOCK_REALTIME,
1690       [](pthread_cond_t* __cond, pthread_mutex_t* __mutex, const timespec* __timeout) {
1691         return pthread_cond_clockwait(__cond, __mutex, CLOCK_REALTIME, __timeout);
1692       });
1693   pthread_cond_timedwait_timeout_helper(
1694       true, CLOCK_REALTIME,
1695       [](pthread_cond_t* __cond, pthread_mutex_t* __mutex, const timespec* __timeout) {
1696         return pthread_cond_clockwait(__cond, __mutex, CLOCK_REALTIME, __timeout);
1697       });
1698 #else   // __BIONIC__
1699   GTEST_SKIP() << "pthread_cond_clockwait not available";
1700 #endif  // __BIONIC__
1701 }
1702 
TEST(pthread,pthread_cond_clockwait_invalid)1703 TEST(pthread, pthread_cond_clockwait_invalid) {
1704 #if defined(__BIONIC__)
1705   pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
1706   pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
1707   timespec ts;
1708   EXPECT_EQ(EINVAL, pthread_cond_clockwait(&cond, &mutex, CLOCK_PROCESS_CPUTIME_ID, &ts));
1709 
1710 #else   // __BIONIC__
1711   GTEST_SKIP() << "pthread_cond_clockwait not available";
1712 #endif  // __BIONIC__
1713 }
1714 
TEST(pthread,pthread_attr_getstack__main_thread)1715 TEST(pthread, pthread_attr_getstack__main_thread) {
1716   // This test is only meaningful for the main thread, so make sure we're running on it!
1717   ASSERT_EQ(getpid(), syscall(__NR_gettid));
1718 
1719   // Get the main thread's attributes.
1720   pthread_attr_t attributes;
1721   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1722 
1723   // Check that we correctly report that the main thread has no guard page.
1724   size_t guard_size;
1725   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
1726   ASSERT_EQ(0U, guard_size); // The main thread has no guard page.
1727 
1728   // Get the stack base and the stack size (both ways).
1729   void* stack_base;
1730   size_t stack_size;
1731   ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1732   size_t stack_size2;
1733   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1734 
1735   // The two methods of asking for the stack size should agree.
1736   EXPECT_EQ(stack_size, stack_size2);
1737 
1738 #if defined(__BIONIC__)
1739   // Find stack in /proc/self/maps using a pointer to the stack.
1740   //
1741   // We do not use "[stack]" label because in native-bridge environment it is not
1742   // guaranteed to point to the right stack. A native bridge implementation may
1743   // keep separate stack for the guest code.
1744   void* maps_stack_hi = nullptr;
1745   std::vector<map_record> maps;
1746   ASSERT_TRUE(Maps::parse_maps(&maps));
1747   uintptr_t stack_address = reinterpret_cast<uintptr_t>(untag_address(&maps_stack_hi));
1748   for (const auto& map : maps) {
1749     if (map.addr_start <= stack_address && map.addr_end > stack_address){
1750       maps_stack_hi = reinterpret_cast<void*>(map.addr_end);
1751       break;
1752     }
1753   }
1754 
1755   // The high address of the /proc/self/maps stack region should equal stack_base + stack_size.
1756   // Remember that the stack grows down (and is mapped in on demand), so the low address of the
1757   // region isn't very interesting.
1758   EXPECT_EQ(maps_stack_hi, reinterpret_cast<uint8_t*>(stack_base) + stack_size);
1759 
1760   // The stack size should correspond to RLIMIT_STACK.
1761   rlimit rl;
1762   ASSERT_EQ(0, getrlimit(RLIMIT_STACK, &rl));
1763   uint64_t original_rlim_cur = rl.rlim_cur;
1764   if (rl.rlim_cur == RLIM_INFINITY) {
1765     rl.rlim_cur = 8 * 1024 * 1024; // Bionic reports unlimited stacks as 8MiB.
1766   }
1767   EXPECT_EQ(rl.rlim_cur, stack_size);
1768 
1769   auto guard = android::base::make_scope_guard([&rl, original_rlim_cur]() {
1770     rl.rlim_cur = original_rlim_cur;
1771     ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1772   });
1773 
1774   //
1775   // What if RLIMIT_STACK is smaller than the stack's current extent?
1776   //
1777   rl.rlim_cur = rl.rlim_max = 1024; // 1KiB. We know the stack must be at least a page already.
1778   rl.rlim_max = RLIM_INFINITY;
1779   ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1780 
1781   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1782   ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1783   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1784 
1785   EXPECT_EQ(stack_size, stack_size2);
1786   ASSERT_EQ(1024U, stack_size);
1787 
1788   //
1789   // What if RLIMIT_STACK isn't a whole number of pages?
1790   //
1791   rl.rlim_cur = rl.rlim_max = 6666; // Not a whole number of pages.
1792   rl.rlim_max = RLIM_INFINITY;
1793   ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1794 
1795   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1796   ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1797   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1798 
1799   EXPECT_EQ(stack_size, stack_size2);
1800   ASSERT_EQ(6666U, stack_size);
1801 #endif
1802 }
1803 
1804 struct GetStackSignalHandlerArg {
1805   volatile bool done;
1806   void* signal_stack_base;
1807   size_t signal_stack_size;
1808   void* main_stack_base;
1809   size_t main_stack_size;
1810 };
1811 
1812 static GetStackSignalHandlerArg getstack_signal_handler_arg;
1813 
getstack_signal_handler(int sig)1814 static void getstack_signal_handler(int sig) {
1815   ASSERT_EQ(SIGUSR1, sig);
1816   // Use sleep() to make current thread be switched out by the kernel to provoke the error.
1817   sleep(1);
1818   pthread_attr_t attr;
1819   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attr));
1820   void* stack_base;
1821   size_t stack_size;
1822   ASSERT_EQ(0, pthread_attr_getstack(&attr, &stack_base, &stack_size));
1823 
1824   // Verify if the stack used by the signal handler is the alternate stack just registered.
1825   ASSERT_LE(getstack_signal_handler_arg.signal_stack_base, &attr);
1826   ASSERT_LT(static_cast<void*>(untag_address(&attr)),
1827             static_cast<char*>(getstack_signal_handler_arg.signal_stack_base) +
1828                 getstack_signal_handler_arg.signal_stack_size);
1829 
1830   // Verify if the main thread's stack got in the signal handler is correct.
1831   ASSERT_EQ(getstack_signal_handler_arg.main_stack_base, stack_base);
1832   ASSERT_LE(getstack_signal_handler_arg.main_stack_size, stack_size);
1833 
1834   getstack_signal_handler_arg.done = true;
1835 }
1836 
1837 // The previous code obtained the main thread's stack by reading the entry in
1838 // /proc/self/task/<pid>/maps that was labeled [stack]. Unfortunately, on x86/x86_64, the kernel
1839 // relies on sp0 in task state segment(tss) to label the stack map with [stack]. If the kernel
1840 // switches a process while the main thread is in an alternate stack, then the kernel will label
1841 // the wrong map with [stack]. This test verifies that when the above situation happens, the main
1842 // thread's stack is found correctly.
TEST(pthread,pthread_attr_getstack_in_signal_handler)1843 TEST(pthread, pthread_attr_getstack_in_signal_handler) {
1844   // This test is only meaningful for the main thread, so make sure we're running on it!
1845   ASSERT_EQ(getpid(), syscall(__NR_gettid));
1846 
1847   const size_t sig_stack_size = 16 * 1024;
1848   void* sig_stack = mmap(nullptr, sig_stack_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS,
1849                          -1, 0);
1850   ASSERT_NE(MAP_FAILED, sig_stack);
1851   stack_t ss;
1852   ss.ss_sp = sig_stack;
1853   ss.ss_size = sig_stack_size;
1854   ss.ss_flags = 0;
1855   stack_t oss;
1856   ASSERT_EQ(0, sigaltstack(&ss, &oss));
1857 
1858   pthread_attr_t attr;
1859   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attr));
1860   void* main_stack_base;
1861   size_t main_stack_size;
1862   ASSERT_EQ(0, pthread_attr_getstack(&attr, &main_stack_base, &main_stack_size));
1863 
1864   ScopedSignalHandler handler(SIGUSR1, getstack_signal_handler, SA_ONSTACK);
1865   getstack_signal_handler_arg.done = false;
1866   getstack_signal_handler_arg.signal_stack_base = sig_stack;
1867   getstack_signal_handler_arg.signal_stack_size = sig_stack_size;
1868   getstack_signal_handler_arg.main_stack_base = main_stack_base;
1869   getstack_signal_handler_arg.main_stack_size = main_stack_size;
1870   kill(getpid(), SIGUSR1);
1871   ASSERT_EQ(true, getstack_signal_handler_arg.done);
1872 
1873   ASSERT_EQ(0, sigaltstack(&oss, nullptr));
1874   ASSERT_EQ(0, munmap(sig_stack, sig_stack_size));
1875 }
1876 
pthread_attr_getstack_18908062_helper(void *)1877 static void pthread_attr_getstack_18908062_helper(void*) {
1878   char local_variable;
1879   pthread_attr_t attributes;
1880   pthread_getattr_np(pthread_self(), &attributes);
1881   void* stack_base;
1882   size_t stack_size;
1883   pthread_attr_getstack(&attributes, &stack_base, &stack_size);
1884 
1885   // Test whether &local_variable is in [stack_base, stack_base + stack_size).
1886   ASSERT_LE(reinterpret_cast<char*>(stack_base), &local_variable);
1887   ASSERT_LT(untag_address(&local_variable), reinterpret_cast<char*>(stack_base) + stack_size);
1888 }
1889 
1890 // Check whether something on stack is in the range of
1891 // [stack_base, stack_base + stack_size). see b/18908062.
TEST(pthread,pthread_attr_getstack_18908062)1892 TEST(pthread, pthread_attr_getstack_18908062) {
1893   pthread_t t;
1894   ASSERT_EQ(0, pthread_create(&t, nullptr,
1895             reinterpret_cast<void* (*)(void*)>(pthread_attr_getstack_18908062_helper),
1896             nullptr));
1897   ASSERT_EQ(0, pthread_join(t, nullptr));
1898 }
1899 
1900 #if defined(__BIONIC__)
1901 static pthread_mutex_t pthread_gettid_np_mutex = PTHREAD_MUTEX_INITIALIZER;
1902 
pthread_gettid_np_helper(void * arg)1903 static void* pthread_gettid_np_helper(void* arg) {
1904   *reinterpret_cast<pid_t*>(arg) = gettid();
1905 
1906   // Wait for our parent to call pthread_gettid_np on us before exiting.
1907   pthread_mutex_lock(&pthread_gettid_np_mutex);
1908   pthread_mutex_unlock(&pthread_gettid_np_mutex);
1909   return nullptr;
1910 }
1911 #endif
1912 
TEST(pthread,pthread_gettid_np)1913 TEST(pthread, pthread_gettid_np) {
1914 #if defined(__BIONIC__)
1915   ASSERT_EQ(gettid(), pthread_gettid_np(pthread_self()));
1916 
1917   // Ensure the other thread doesn't exit until after we've called
1918   // pthread_gettid_np on it.
1919   pthread_mutex_lock(&pthread_gettid_np_mutex);
1920 
1921   pid_t t_gettid_result;
1922   pthread_t t;
1923   pthread_create(&t, nullptr, pthread_gettid_np_helper, &t_gettid_result);
1924 
1925   pid_t t_pthread_gettid_np_result = pthread_gettid_np(t);
1926 
1927   // Release the other thread and wait for it to exit.
1928   pthread_mutex_unlock(&pthread_gettid_np_mutex);
1929   ASSERT_EQ(0, pthread_join(t, nullptr));
1930 
1931   ASSERT_EQ(t_gettid_result, t_pthread_gettid_np_result);
1932 #else
1933   GTEST_SKIP() << "pthread_gettid_np not available";
1934 #endif
1935 }
1936 
1937 static size_t cleanup_counter = 0;
1938 
AbortCleanupRoutine(void *)1939 static void AbortCleanupRoutine(void*) {
1940   abort();
1941 }
1942 
CountCleanupRoutine(void *)1943 static void CountCleanupRoutine(void*) {
1944   ++cleanup_counter;
1945 }
1946 
PthreadCleanupTester()1947 static void PthreadCleanupTester() {
1948   pthread_cleanup_push(CountCleanupRoutine, nullptr);
1949   pthread_cleanup_push(CountCleanupRoutine, nullptr);
1950   pthread_cleanup_push(AbortCleanupRoutine, nullptr);
1951 
1952   pthread_cleanup_pop(0); // Pop the abort without executing it.
1953   pthread_cleanup_pop(1); // Pop one count while executing it.
1954   ASSERT_EQ(1U, cleanup_counter);
1955   // Exit while the other count is still on the cleanup stack.
1956   pthread_exit(nullptr);
1957 
1958   // Calls to pthread_cleanup_pop/pthread_cleanup_push must always be balanced.
1959   pthread_cleanup_pop(0);
1960 }
1961 
PthreadCleanupStartRoutine(void *)1962 static void* PthreadCleanupStartRoutine(void*) {
1963   PthreadCleanupTester();
1964   return nullptr;
1965 }
1966 
TEST(pthread,pthread_cleanup_push__pthread_cleanup_pop)1967 TEST(pthread, pthread_cleanup_push__pthread_cleanup_pop) {
1968   pthread_t t;
1969   ASSERT_EQ(0, pthread_create(&t, nullptr, PthreadCleanupStartRoutine, nullptr));
1970   ASSERT_EQ(0, pthread_join(t, nullptr));
1971   ASSERT_EQ(2U, cleanup_counter);
1972 }
1973 
TEST(pthread,PTHREAD_MUTEX_DEFAULT_is_PTHREAD_MUTEX_NORMAL)1974 TEST(pthread, PTHREAD_MUTEX_DEFAULT_is_PTHREAD_MUTEX_NORMAL) {
1975   ASSERT_EQ(PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_DEFAULT);
1976 }
1977 
TEST(pthread,pthread_mutexattr_gettype)1978 TEST(pthread, pthread_mutexattr_gettype) {
1979   pthread_mutexattr_t attr;
1980   ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1981 
1982   int attr_type;
1983 
1984   ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL));
1985   ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1986   ASSERT_EQ(PTHREAD_MUTEX_NORMAL, attr_type);
1987 
1988   ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK));
1989   ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1990   ASSERT_EQ(PTHREAD_MUTEX_ERRORCHECK, attr_type);
1991 
1992   ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
1993   ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1994   ASSERT_EQ(PTHREAD_MUTEX_RECURSIVE, attr_type);
1995 
1996   ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
1997 }
1998 
TEST(pthread,pthread_mutexattr_protocol)1999 TEST(pthread, pthread_mutexattr_protocol) {
2000   pthread_mutexattr_t attr;
2001   ASSERT_EQ(0, pthread_mutexattr_init(&attr));
2002 
2003   int protocol;
2004   ASSERT_EQ(0, pthread_mutexattr_getprotocol(&attr, &protocol));
2005   ASSERT_EQ(PTHREAD_PRIO_NONE, protocol);
2006   for (size_t repeat = 0; repeat < 2; ++repeat) {
2007     for (int set_protocol : {PTHREAD_PRIO_NONE, PTHREAD_PRIO_INHERIT}) {
2008       ASSERT_EQ(0, pthread_mutexattr_setprotocol(&attr, set_protocol));
2009       ASSERT_EQ(0, pthread_mutexattr_getprotocol(&attr, &protocol));
2010       ASSERT_EQ(protocol, set_protocol);
2011     }
2012   }
2013 }
2014 
2015 struct PthreadMutex {
2016   pthread_mutex_t lock;
2017 
PthreadMutexPthreadMutex2018   explicit PthreadMutex(int mutex_type, int protocol = PTHREAD_PRIO_NONE) {
2019     init(mutex_type, protocol);
2020   }
2021 
~PthreadMutexPthreadMutex2022   ~PthreadMutex() {
2023     destroy();
2024   }
2025 
2026  private:
initPthreadMutex2027   void init(int mutex_type, int protocol) {
2028     pthread_mutexattr_t attr;
2029     ASSERT_EQ(0, pthread_mutexattr_init(&attr));
2030     ASSERT_EQ(0, pthread_mutexattr_settype(&attr, mutex_type));
2031     ASSERT_EQ(0, pthread_mutexattr_setprotocol(&attr, protocol));
2032     ASSERT_EQ(0, pthread_mutex_init(&lock, &attr));
2033     ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
2034   }
2035 
destroyPthreadMutex2036   void destroy() {
2037     ASSERT_EQ(0, pthread_mutex_destroy(&lock));
2038   }
2039 
2040   DISALLOW_COPY_AND_ASSIGN(PthreadMutex);
2041 };
2042 
UnlockFromAnotherThread(pthread_mutex_t * mutex)2043 static int UnlockFromAnotherThread(pthread_mutex_t* mutex) {
2044   pthread_t thread;
2045   pthread_create(&thread, nullptr, [](void* mutex_voidp) -> void* {
2046     pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(mutex_voidp);
2047     intptr_t result = pthread_mutex_unlock(mutex);
2048     return reinterpret_cast<void*>(result);
2049   }, mutex);
2050   void* result;
2051   EXPECT_EQ(0, pthread_join(thread, &result));
2052   return reinterpret_cast<intptr_t>(result);
2053 };
2054 
TestPthreadMutexLockNormal(int protocol)2055 static void TestPthreadMutexLockNormal(int protocol) {
2056   PthreadMutex m(PTHREAD_MUTEX_NORMAL, protocol);
2057 
2058   ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
2059   if (protocol == PTHREAD_PRIO_INHERIT) {
2060     ASSERT_EQ(EPERM, UnlockFromAnotherThread(&m.lock));
2061   }
2062   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2063   ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
2064   ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock));
2065   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2066 }
2067 
TestPthreadMutexLockErrorCheck(int protocol)2068 static void TestPthreadMutexLockErrorCheck(int protocol) {
2069   PthreadMutex m(PTHREAD_MUTEX_ERRORCHECK, protocol);
2070 
2071   ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
2072   ASSERT_EQ(EPERM, UnlockFromAnotherThread(&m.lock));
2073   ASSERT_EQ(EDEADLK, pthread_mutex_lock(&m.lock));
2074   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2075   ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
2076   if (protocol == PTHREAD_PRIO_NONE) {
2077     ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock));
2078   } else {
2079     ASSERT_EQ(EDEADLK, pthread_mutex_trylock(&m.lock));
2080   }
2081   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2082   ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
2083 }
2084 
TestPthreadMutexLockRecursive(int protocol)2085 static void TestPthreadMutexLockRecursive(int protocol) {
2086   PthreadMutex m(PTHREAD_MUTEX_RECURSIVE, protocol);
2087 
2088   ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
2089   ASSERT_EQ(EPERM, UnlockFromAnotherThread(&m.lock));
2090   ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
2091   ASSERT_EQ(EPERM, UnlockFromAnotherThread(&m.lock));
2092   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2093   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2094   ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
2095   ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
2096   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2097   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2098   ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
2099 }
2100 
TEST(pthread,pthread_mutex_lock_NORMAL)2101 TEST(pthread, pthread_mutex_lock_NORMAL) {
2102   TestPthreadMutexLockNormal(PTHREAD_PRIO_NONE);
2103 }
2104 
TEST(pthread,pthread_mutex_lock_ERRORCHECK)2105 TEST(pthread, pthread_mutex_lock_ERRORCHECK) {
2106   TestPthreadMutexLockErrorCheck(PTHREAD_PRIO_NONE);
2107 }
2108 
TEST(pthread,pthread_mutex_lock_RECURSIVE)2109 TEST(pthread, pthread_mutex_lock_RECURSIVE) {
2110   TestPthreadMutexLockRecursive(PTHREAD_PRIO_NONE);
2111 }
2112 
TEST(pthread,pthread_mutex_lock_pi)2113 TEST(pthread, pthread_mutex_lock_pi) {
2114   TestPthreadMutexLockNormal(PTHREAD_PRIO_INHERIT);
2115   TestPthreadMutexLockErrorCheck(PTHREAD_PRIO_INHERIT);
2116   TestPthreadMutexLockRecursive(PTHREAD_PRIO_INHERIT);
2117 }
2118 
TEST(pthread,pthread_mutex_pi_count_limit)2119 TEST(pthread, pthread_mutex_pi_count_limit) {
2120 #if defined(__BIONIC__) && !defined(__LP64__)
2121   // Bionic only supports 65536 pi mutexes in 32-bit programs.
2122   pthread_mutexattr_t attr;
2123   ASSERT_EQ(0, pthread_mutexattr_init(&attr));
2124   ASSERT_EQ(0, pthread_mutexattr_setprotocol(&attr, PTHREAD_PRIO_INHERIT));
2125   std::vector<pthread_mutex_t> mutexes(65536);
2126   // Test if we can use 65536 pi mutexes at the same time.
2127   // Run 2 times to check if freed pi mutexes can be recycled.
2128   for (int repeat = 0; repeat < 2; ++repeat) {
2129     for (auto& m : mutexes) {
2130       ASSERT_EQ(0, pthread_mutex_init(&m, &attr));
2131     }
2132     pthread_mutex_t m;
2133     ASSERT_EQ(ENOMEM, pthread_mutex_init(&m, &attr));
2134     for (auto& m : mutexes) {
2135       ASSERT_EQ(0, pthread_mutex_lock(&m));
2136     }
2137     for (auto& m : mutexes) {
2138       ASSERT_EQ(0, pthread_mutex_unlock(&m));
2139     }
2140     for (auto& m : mutexes) {
2141       ASSERT_EQ(0, pthread_mutex_destroy(&m));
2142     }
2143   }
2144   ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
2145 #else
2146   GTEST_SKIP() << "pi mutex count not limited to 64Ki";
2147 #endif
2148 }
2149 
TEST(pthread,pthread_mutex_init_same_as_static_initializers)2150 TEST(pthread, pthread_mutex_init_same_as_static_initializers) {
2151   pthread_mutex_t lock_normal = PTHREAD_MUTEX_INITIALIZER;
2152   PthreadMutex m1(PTHREAD_MUTEX_NORMAL);
2153   ASSERT_EQ(0, memcmp(&lock_normal, &m1.lock, sizeof(pthread_mutex_t)));
2154   pthread_mutex_destroy(&lock_normal);
2155 
2156   pthread_mutex_t lock_errorcheck = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
2157   PthreadMutex m2(PTHREAD_MUTEX_ERRORCHECK);
2158   ASSERT_EQ(0, memcmp(&lock_errorcheck, &m2.lock, sizeof(pthread_mutex_t)));
2159   pthread_mutex_destroy(&lock_errorcheck);
2160 
2161   pthread_mutex_t lock_recursive = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
2162   PthreadMutex m3(PTHREAD_MUTEX_RECURSIVE);
2163   ASSERT_EQ(0, memcmp(&lock_recursive, &m3.lock, sizeof(pthread_mutex_t)));
2164   ASSERT_EQ(0, pthread_mutex_destroy(&lock_recursive));
2165 }
2166 
2167 class MutexWakeupHelper {
2168  private:
2169   PthreadMutex m;
2170   enum Progress {
2171     LOCK_INITIALIZED,
2172     LOCK_WAITING,
2173     LOCK_RELEASED,
2174     LOCK_ACCESSED
2175   };
2176   std::atomic<Progress> progress;
2177   std::atomic<pid_t> tid;
2178 
thread_fn(MutexWakeupHelper * helper)2179   static void thread_fn(MutexWakeupHelper* helper) {
2180     helper->tid = gettid();
2181     ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
2182     helper->progress = LOCK_WAITING;
2183 
2184     ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock));
2185     ASSERT_EQ(LOCK_RELEASED, helper->progress);
2186     ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock));
2187 
2188     helper->progress = LOCK_ACCESSED;
2189   }
2190 
2191  public:
MutexWakeupHelper(int mutex_type)2192   explicit MutexWakeupHelper(int mutex_type) : m(mutex_type) {
2193   }
2194 
test()2195   void test() {
2196     ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
2197     progress = LOCK_INITIALIZED;
2198     tid = 0;
2199 
2200     pthread_t thread;
2201     ASSERT_EQ(0, pthread_create(&thread, nullptr,
2202       reinterpret_cast<void* (*)(void*)>(MutexWakeupHelper::thread_fn), this));
2203 
2204     WaitUntilThreadSleep(tid);
2205     ASSERT_EQ(LOCK_WAITING, progress);
2206 
2207     progress = LOCK_RELEASED;
2208     ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2209 
2210     ASSERT_EQ(0, pthread_join(thread, nullptr));
2211     ASSERT_EQ(LOCK_ACCESSED, progress);
2212   }
2213 };
2214 
TEST(pthread,pthread_mutex_NORMAL_wakeup)2215 TEST(pthread, pthread_mutex_NORMAL_wakeup) {
2216   MutexWakeupHelper helper(PTHREAD_MUTEX_NORMAL);
2217   helper.test();
2218 }
2219 
TEST(pthread,pthread_mutex_ERRORCHECK_wakeup)2220 TEST(pthread, pthread_mutex_ERRORCHECK_wakeup) {
2221   MutexWakeupHelper helper(PTHREAD_MUTEX_ERRORCHECK);
2222   helper.test();
2223 }
2224 
TEST(pthread,pthread_mutex_RECURSIVE_wakeup)2225 TEST(pthread, pthread_mutex_RECURSIVE_wakeup) {
2226   MutexWakeupHelper helper(PTHREAD_MUTEX_RECURSIVE);
2227   helper.test();
2228 }
2229 
GetThreadPriority(pid_t tid)2230 static int GetThreadPriority(pid_t tid) {
2231   // sched_getparam() returns the static priority of a thread, which can't reflect a thread's
2232   // priority after priority inheritance. So read /proc/<pid>/stat to get the dynamic priority.
2233   std::string filename = android::base::StringPrintf("/proc/%d/stat", tid);
2234   std::string content;
2235   int result = INT_MAX;
2236   if (!android::base::ReadFileToString(filename, &content)) {
2237     return result;
2238   }
2239   std::vector<std::string> strs = android::base::Split(content, " ");
2240   if (strs.size() < 18) {
2241     return result;
2242   }
2243   if (!android::base::ParseInt(strs[17], &result)) {
2244     return INT_MAX;
2245   }
2246   return result;
2247 }
2248 
2249 class PIMutexWakeupHelper {
2250 private:
2251   PthreadMutex m;
2252   int protocol;
2253   enum Progress {
2254     LOCK_INITIALIZED,
2255     LOCK_CHILD_READY,
2256     LOCK_WAITING,
2257     LOCK_RELEASED,
2258   };
2259   std::atomic<Progress> progress;
2260   std::atomic<pid_t> main_tid;
2261   std::atomic<pid_t> child_tid;
2262   PthreadMutex start_thread_m;
2263 
thread_fn(PIMutexWakeupHelper * helper)2264   static void thread_fn(PIMutexWakeupHelper* helper) {
2265     helper->child_tid = gettid();
2266     ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
2267     ASSERT_EQ(0, setpriority(PRIO_PROCESS, gettid(), 1));
2268     ASSERT_EQ(21, GetThreadPriority(gettid()));
2269     ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock));
2270     helper->progress = LOCK_CHILD_READY;
2271     ASSERT_EQ(0, pthread_mutex_lock(&helper->start_thread_m.lock));
2272 
2273     ASSERT_EQ(0, pthread_mutex_unlock(&helper->start_thread_m.lock));
2274     WaitUntilThreadSleep(helper->main_tid);
2275     ASSERT_EQ(LOCK_WAITING, helper->progress);
2276 
2277     if (helper->protocol == PTHREAD_PRIO_INHERIT) {
2278       ASSERT_EQ(20, GetThreadPriority(gettid()));
2279     } else {
2280       ASSERT_EQ(21, GetThreadPriority(gettid()));
2281     }
2282     helper->progress = LOCK_RELEASED;
2283     ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock));
2284   }
2285 
2286 public:
PIMutexWakeupHelper(int mutex_type,int protocol)2287   explicit PIMutexWakeupHelper(int mutex_type, int protocol)
2288       : m(mutex_type, protocol), protocol(protocol), start_thread_m(PTHREAD_MUTEX_NORMAL) {
2289   }
2290 
test()2291   void test() {
2292     ASSERT_EQ(0, pthread_mutex_lock(&start_thread_m.lock));
2293     main_tid = gettid();
2294     ASSERT_EQ(20, GetThreadPriority(main_tid));
2295     progress = LOCK_INITIALIZED;
2296     child_tid = 0;
2297 
2298     pthread_t thread;
2299     ASSERT_EQ(0, pthread_create(&thread, nullptr,
2300               reinterpret_cast<void* (*)(void*)>(PIMutexWakeupHelper::thread_fn), this));
2301 
2302     WaitUntilThreadSleep(child_tid);
2303     ASSERT_EQ(LOCK_CHILD_READY, progress);
2304     ASSERT_EQ(0, pthread_mutex_unlock(&start_thread_m.lock));
2305     progress = LOCK_WAITING;
2306     ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
2307 
2308     ASSERT_EQ(LOCK_RELEASED, progress);
2309     ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2310     ASSERT_EQ(0, pthread_join(thread, nullptr));
2311   }
2312 };
2313 
TEST(pthread,pthread_mutex_pi_wakeup)2314 TEST(pthread, pthread_mutex_pi_wakeup) {
2315   for (int type : {PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_RECURSIVE, PTHREAD_MUTEX_ERRORCHECK}) {
2316     for (int protocol : {PTHREAD_PRIO_INHERIT}) {
2317       PIMutexWakeupHelper helper(type, protocol);
2318       helper.test();
2319     }
2320   }
2321 }
2322 
TEST(pthread,pthread_mutex_owner_tid_limit)2323 TEST(pthread, pthread_mutex_owner_tid_limit) {
2324 #if defined(__BIONIC__) && !defined(__LP64__)
2325   FILE* fp = fopen("/proc/sys/kernel/pid_max", "r");
2326   ASSERT_TRUE(fp != nullptr);
2327   long pid_max;
2328   ASSERT_EQ(1, fscanf(fp, "%ld", &pid_max));
2329   fclose(fp);
2330   // Bionic's pthread_mutex implementation on 32-bit devices uses 16 bits to represent owner tid.
2331   ASSERT_LE(pid_max, 65536);
2332 #else
2333   GTEST_SKIP() << "pthread_mutex supports 32-bit tid";
2334 #endif
2335 }
2336 
pthread_mutex_timedlock_helper(clockid_t clock,int (* lock_function)(pthread_mutex_t * __mutex,const timespec * __timeout))2337 static void pthread_mutex_timedlock_helper(clockid_t clock,
2338                                            int (*lock_function)(pthread_mutex_t* __mutex,
2339                                                                 const timespec* __timeout)) {
2340   pthread_mutex_t m;
2341   ASSERT_EQ(0, pthread_mutex_init(&m, nullptr));
2342 
2343   // If the mutex is already locked, pthread_mutex_timedlock should time out.
2344   ASSERT_EQ(0, pthread_mutex_lock(&m));
2345 
2346   timespec ts;
2347   ASSERT_EQ(0, clock_gettime(clock, &ts));
2348   ASSERT_EQ(ETIMEDOUT, lock_function(&m, &ts));
2349   ts.tv_nsec = -1;
2350   ASSERT_EQ(EINVAL, lock_function(&m, &ts));
2351   ts.tv_nsec = NS_PER_S;
2352   ASSERT_EQ(EINVAL, lock_function(&m, &ts));
2353   ts.tv_nsec = NS_PER_S - 1;
2354   ts.tv_sec = -1;
2355   ASSERT_EQ(ETIMEDOUT, lock_function(&m, &ts));
2356 
2357   // If the mutex is unlocked, pthread_mutex_timedlock should succeed.
2358   ASSERT_EQ(0, pthread_mutex_unlock(&m));
2359 
2360   ASSERT_EQ(0, clock_gettime(clock, &ts));
2361   ts.tv_sec += 1;
2362   ASSERT_EQ(0, lock_function(&m, &ts));
2363 
2364   ASSERT_EQ(0, pthread_mutex_unlock(&m));
2365   ASSERT_EQ(0, pthread_mutex_destroy(&m));
2366 }
2367 
TEST(pthread,pthread_mutex_timedlock)2368 TEST(pthread, pthread_mutex_timedlock) {
2369   pthread_mutex_timedlock_helper(CLOCK_REALTIME, pthread_mutex_timedlock);
2370 }
2371 
TEST(pthread,pthread_mutex_timedlock_monotonic_np)2372 TEST(pthread, pthread_mutex_timedlock_monotonic_np) {
2373 #if defined(__BIONIC__)
2374   pthread_mutex_timedlock_helper(CLOCK_MONOTONIC, pthread_mutex_timedlock_monotonic_np);
2375 #else   // __BIONIC__
2376   GTEST_SKIP() << "pthread_mutex_timedlock_monotonic_np not available";
2377 #endif  // __BIONIC__
2378 }
2379 
TEST(pthread,pthread_mutex_clocklock)2380 TEST(pthread, pthread_mutex_clocklock) {
2381 #if defined(__BIONIC__)
2382   pthread_mutex_timedlock_helper(
2383       CLOCK_MONOTONIC, [](pthread_mutex_t* __mutex, const timespec* __timeout) {
2384         return pthread_mutex_clocklock(__mutex, CLOCK_MONOTONIC, __timeout);
2385       });
2386   pthread_mutex_timedlock_helper(
2387       CLOCK_REALTIME, [](pthread_mutex_t* __mutex, const timespec* __timeout) {
2388         return pthread_mutex_clocklock(__mutex, CLOCK_REALTIME, __timeout);
2389       });
2390 #else   // __BIONIC__
2391   GTEST_SKIP() << "pthread_mutex_clocklock not available";
2392 #endif  // __BIONIC__
2393 }
2394 
pthread_mutex_timedlock_pi_helper(clockid_t clock,int (* lock_function)(pthread_mutex_t * __mutex,const timespec * __timeout))2395 static void pthread_mutex_timedlock_pi_helper(clockid_t clock,
2396                                               int (*lock_function)(pthread_mutex_t* __mutex,
2397                                                                    const timespec* __timeout)) {
2398   PthreadMutex m(PTHREAD_MUTEX_NORMAL, PTHREAD_PRIO_INHERIT);
2399 
2400   timespec ts;
2401   clock_gettime(clock, &ts);
2402   ts.tv_sec += 1;
2403   ASSERT_EQ(0, lock_function(&m.lock, &ts));
2404 
2405   struct ThreadArgs {
2406     clockid_t clock;
2407     int (*lock_function)(pthread_mutex_t* __mutex, const timespec* __timeout);
2408     PthreadMutex& m;
2409   };
2410 
2411   ThreadArgs thread_args = {
2412     .clock = clock,
2413     .lock_function = lock_function,
2414     .m = m,
2415   };
2416 
2417   auto ThreadFn = [](void* arg) -> void* {
2418     auto args = static_cast<ThreadArgs*>(arg);
2419     timespec ts;
2420     clock_gettime(args->clock, &ts);
2421     ts.tv_sec += 1;
2422     intptr_t result = args->lock_function(&args->m.lock, &ts);
2423     return reinterpret_cast<void*>(result);
2424   };
2425 
2426   pthread_t thread;
2427   ASSERT_EQ(0, pthread_create(&thread, nullptr, ThreadFn, &thread_args));
2428   void* result;
2429   ASSERT_EQ(0, pthread_join(thread, &result));
2430   ASSERT_EQ(ETIMEDOUT, reinterpret_cast<intptr_t>(result));
2431   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2432 }
2433 
TEST(pthread,pthread_mutex_timedlock_pi)2434 TEST(pthread, pthread_mutex_timedlock_pi) {
2435   pthread_mutex_timedlock_pi_helper(CLOCK_REALTIME, pthread_mutex_timedlock);
2436 }
2437 
TEST(pthread,pthread_mutex_timedlock_monotonic_np_pi)2438 TEST(pthread, pthread_mutex_timedlock_monotonic_np_pi) {
2439 #if defined(__BIONIC__)
2440   pthread_mutex_timedlock_pi_helper(CLOCK_MONOTONIC, pthread_mutex_timedlock_monotonic_np);
2441 #else   // __BIONIC__
2442   GTEST_SKIP() << "pthread_mutex_timedlock_monotonic_np not available";
2443 #endif  // __BIONIC__
2444 }
2445 
TEST(pthread,pthread_mutex_clocklock_pi)2446 TEST(pthread, pthread_mutex_clocklock_pi) {
2447 #if defined(__BIONIC__)
2448   pthread_mutex_timedlock_pi_helper(
2449       CLOCK_MONOTONIC, [](pthread_mutex_t* __mutex, const timespec* __timeout) {
2450         return pthread_mutex_clocklock(__mutex, CLOCK_MONOTONIC, __timeout);
2451       });
2452   pthread_mutex_timedlock_pi_helper(
2453       CLOCK_REALTIME, [](pthread_mutex_t* __mutex, const timespec* __timeout) {
2454         return pthread_mutex_clocklock(__mutex, CLOCK_REALTIME, __timeout);
2455       });
2456 #else   // __BIONIC__
2457   GTEST_SKIP() << "pthread_mutex_clocklock not available";
2458 #endif  // __BIONIC__
2459 }
2460 
TEST(pthread,pthread_mutex_clocklock_invalid)2461 TEST(pthread, pthread_mutex_clocklock_invalid) {
2462 #if defined(__BIONIC__)
2463   pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
2464   timespec ts;
2465   EXPECT_EQ(EINVAL, pthread_mutex_clocklock(&mutex, CLOCK_PROCESS_CPUTIME_ID, &ts));
2466 #else   // __BIONIC__
2467   GTEST_SKIP() << "pthread_mutex_clocklock not available";
2468 #endif  // __BIONIC__
2469 }
2470 
TEST(pthread,pthread_mutex_using_destroyed_mutex)2471 TEST(pthread, pthread_mutex_using_destroyed_mutex) {
2472 #if defined(__BIONIC__)
2473   pthread_mutex_t m;
2474   ASSERT_EQ(0, pthread_mutex_init(&m, nullptr));
2475   ASSERT_EQ(0, pthread_mutex_destroy(&m));
2476   ASSERT_EXIT(pthread_mutex_lock(&m), ::testing::KilledBySignal(SIGABRT),
2477               "pthread_mutex_lock called on a destroyed mutex");
2478   ASSERT_EXIT(pthread_mutex_unlock(&m), ::testing::KilledBySignal(SIGABRT),
2479               "pthread_mutex_unlock called on a destroyed mutex");
2480   ASSERT_EXIT(pthread_mutex_trylock(&m), ::testing::KilledBySignal(SIGABRT),
2481               "pthread_mutex_trylock called on a destroyed mutex");
2482   timespec ts;
2483   ASSERT_EXIT(pthread_mutex_timedlock(&m, &ts), ::testing::KilledBySignal(SIGABRT),
2484               "pthread_mutex_timedlock called on a destroyed mutex");
2485   ASSERT_EXIT(pthread_mutex_timedlock_monotonic_np(&m, &ts), ::testing::KilledBySignal(SIGABRT),
2486               "pthread_mutex_timedlock_monotonic_np called on a destroyed mutex");
2487   ASSERT_EXIT(pthread_mutex_clocklock(&m, CLOCK_MONOTONIC, &ts), ::testing::KilledBySignal(SIGABRT),
2488               "pthread_mutex_clocklock called on a destroyed mutex");
2489   ASSERT_EXIT(pthread_mutex_clocklock(&m, CLOCK_REALTIME, &ts), ::testing::KilledBySignal(SIGABRT),
2490               "pthread_mutex_clocklock called on a destroyed mutex");
2491   ASSERT_EXIT(pthread_mutex_clocklock(&m, CLOCK_PROCESS_CPUTIME_ID, &ts),
2492               ::testing::KilledBySignal(SIGABRT),
2493               "pthread_mutex_clocklock called on a destroyed mutex");
2494   ASSERT_EXIT(pthread_mutex_destroy(&m), ::testing::KilledBySignal(SIGABRT),
2495               "pthread_mutex_destroy called on a destroyed mutex");
2496 #else
2497   GTEST_SKIP() << "bionic-only test";
2498 #endif
2499 }
2500 
2501 class StrictAlignmentAllocator {
2502  public:
allocate(size_t size,size_t alignment)2503   void* allocate(size_t size, size_t alignment) {
2504     char* p = new char[size + alignment * 2];
2505     allocated_array.push_back(p);
2506     while (!is_strict_aligned(p, alignment)) {
2507       ++p;
2508     }
2509     return p;
2510   }
2511 
~StrictAlignmentAllocator()2512   ~StrictAlignmentAllocator() {
2513     for (const auto& p : allocated_array) {
2514       delete[] p;
2515     }
2516   }
2517 
2518  private:
is_strict_aligned(char * p,size_t alignment)2519   bool is_strict_aligned(char* p, size_t alignment) {
2520     return (reinterpret_cast<uintptr_t>(p) % (alignment * 2)) == alignment;
2521   }
2522 
2523   std::vector<char*> allocated_array;
2524 };
2525 
TEST(pthread,pthread_types_allow_four_bytes_alignment)2526 TEST(pthread, pthread_types_allow_four_bytes_alignment) {
2527 #if defined(__BIONIC__)
2528   // For binary compatibility with old version, we need to allow 4-byte aligned data for pthread types.
2529   StrictAlignmentAllocator allocator;
2530   pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>(
2531                              allocator.allocate(sizeof(pthread_mutex_t), 4));
2532   ASSERT_EQ(0, pthread_mutex_init(mutex, nullptr));
2533   ASSERT_EQ(0, pthread_mutex_lock(mutex));
2534   ASSERT_EQ(0, pthread_mutex_unlock(mutex));
2535   ASSERT_EQ(0, pthread_mutex_destroy(mutex));
2536 
2537   pthread_cond_t* cond = reinterpret_cast<pthread_cond_t*>(
2538                            allocator.allocate(sizeof(pthread_cond_t), 4));
2539   ASSERT_EQ(0, pthread_cond_init(cond, nullptr));
2540   ASSERT_EQ(0, pthread_cond_signal(cond));
2541   ASSERT_EQ(0, pthread_cond_broadcast(cond));
2542   ASSERT_EQ(0, pthread_cond_destroy(cond));
2543 
2544   pthread_rwlock_t* rwlock = reinterpret_cast<pthread_rwlock_t*>(
2545                                allocator.allocate(sizeof(pthread_rwlock_t), 4));
2546   ASSERT_EQ(0, pthread_rwlock_init(rwlock, nullptr));
2547   ASSERT_EQ(0, pthread_rwlock_rdlock(rwlock));
2548   ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
2549   ASSERT_EQ(0, pthread_rwlock_wrlock(rwlock));
2550   ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
2551   ASSERT_EQ(0, pthread_rwlock_destroy(rwlock));
2552 
2553 #else
2554   GTEST_SKIP() << "bionic-only test";
2555 #endif
2556 }
2557 
TEST(pthread,pthread_mutex_lock_null_32)2558 TEST(pthread, pthread_mutex_lock_null_32) {
2559 #if defined(__BIONIC__) && !defined(__LP64__)
2560   // For LP32, the pthread lock/unlock functions allow a NULL mutex and return
2561   // EINVAL in that case: http://b/19995172.
2562   //
2563   // We decorate the public defintion with _Nonnull so that people recompiling
2564   // their code with get a warning and might fix their bug, but need to pass
2565   // NULL here to test that we remain compatible.
2566   pthread_mutex_t* null_value = nullptr;
2567   ASSERT_EQ(EINVAL, pthread_mutex_lock(null_value));
2568 #else
2569   GTEST_SKIP() << "32-bit bionic-only test";
2570 #endif
2571 }
2572 
TEST(pthread,pthread_mutex_unlock_null_32)2573 TEST(pthread, pthread_mutex_unlock_null_32) {
2574 #if defined(__BIONIC__) && !defined(__LP64__)
2575   // For LP32, the pthread lock/unlock functions allow a NULL mutex and return
2576   // EINVAL in that case: http://b/19995172.
2577   //
2578   // We decorate the public defintion with _Nonnull so that people recompiling
2579   // their code with get a warning and might fix their bug, but need to pass
2580   // NULL here to test that we remain compatible.
2581   pthread_mutex_t* null_value = nullptr;
2582   ASSERT_EQ(EINVAL, pthread_mutex_unlock(null_value));
2583 #else
2584   GTEST_SKIP() << "32-bit bionic-only test";
2585 #endif
2586 }
2587 
TEST_F(pthread_DeathTest,pthread_mutex_lock_null_64)2588 TEST_F(pthread_DeathTest, pthread_mutex_lock_null_64) {
2589 #if defined(__BIONIC__) && defined(__LP64__)
2590   pthread_mutex_t* null_value = nullptr;
2591   ASSERT_EXIT(pthread_mutex_lock(null_value), testing::KilledBySignal(SIGSEGV), "");
2592 #else
2593   GTEST_SKIP() << "64-bit bionic-only test";
2594 #endif
2595 }
2596 
TEST_F(pthread_DeathTest,pthread_mutex_unlock_null_64)2597 TEST_F(pthread_DeathTest, pthread_mutex_unlock_null_64) {
2598 #if defined(__BIONIC__) && defined(__LP64__)
2599   pthread_mutex_t* null_value = nullptr;
2600   ASSERT_EXIT(pthread_mutex_unlock(null_value), testing::KilledBySignal(SIGSEGV), "");
2601 #else
2602   GTEST_SKIP() << "64-bit bionic-only test";
2603 #endif
2604 }
2605 
2606 extern _Unwind_Reason_Code FrameCounter(_Unwind_Context* ctx, void* arg);
2607 
2608 static volatile bool signal_handler_on_altstack_done;
2609 
2610 __attribute__((__noinline__))
signal_handler_backtrace()2611 static void signal_handler_backtrace() {
2612   // Check if we have enough stack space for unwinding.
2613   int count = 0;
2614   _Unwind_Backtrace(FrameCounter, &count);
2615   ASSERT_GT(count, 0);
2616 }
2617 
2618 __attribute__((__noinline__))
signal_handler_logging()2619 static void signal_handler_logging() {
2620   // Check if we have enough stack space for logging.
2621   std::string s(2048, '*');
2622   GTEST_LOG_(INFO) << s;
2623   signal_handler_on_altstack_done = true;
2624 }
2625 
2626 __attribute__((__noinline__))
signal_handler_snprintf()2627 static void signal_handler_snprintf() {
2628   // Check if we have enough stack space for snprintf to a PATH_MAX buffer, plus some extra.
2629   char buf[PATH_MAX + 2048];
2630   ASSERT_GT(snprintf(buf, sizeof(buf), "/proc/%d/status", getpid()), 0);
2631 }
2632 
SignalHandlerOnAltStack(int signo,siginfo_t *,void *)2633 static void SignalHandlerOnAltStack(int signo, siginfo_t*, void*) {
2634   ASSERT_EQ(SIGUSR1, signo);
2635   signal_handler_backtrace();
2636   signal_handler_logging();
2637   signal_handler_snprintf();
2638 }
2639 
TEST(pthread,big_enough_signal_stack)2640 TEST(pthread, big_enough_signal_stack) {
2641   signal_handler_on_altstack_done = false;
2642   ScopedSignalHandler handler(SIGUSR1, SignalHandlerOnAltStack, SA_SIGINFO | SA_ONSTACK);
2643   kill(getpid(), SIGUSR1);
2644   ASSERT_TRUE(signal_handler_on_altstack_done);
2645 }
2646 
TEST(pthread,pthread_barrierattr_smoke)2647 TEST(pthread, pthread_barrierattr_smoke) {
2648   pthread_barrierattr_t attr;
2649   ASSERT_EQ(0, pthread_barrierattr_init(&attr));
2650   int pshared;
2651   ASSERT_EQ(0, pthread_barrierattr_getpshared(&attr, &pshared));
2652   ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
2653   ASSERT_EQ(0, pthread_barrierattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
2654   ASSERT_EQ(0, pthread_barrierattr_getpshared(&attr, &pshared));
2655   ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
2656   ASSERT_EQ(0, pthread_barrierattr_destroy(&attr));
2657 }
2658 
2659 struct BarrierTestHelperData {
2660   size_t thread_count;
2661   pthread_barrier_t barrier;
2662   std::atomic<int> finished_mask;
2663   std::atomic<int> serial_thread_count;
2664   size_t iteration_count;
2665   std::atomic<size_t> finished_iteration_count;
2666 
BarrierTestHelperDataBarrierTestHelperData2667   BarrierTestHelperData(size_t thread_count, size_t iteration_count)
2668       : thread_count(thread_count), finished_mask(0), serial_thread_count(0),
2669         iteration_count(iteration_count), finished_iteration_count(0) {
2670   }
2671 };
2672 
2673 struct BarrierTestHelperArg {
2674   int id;
2675   BarrierTestHelperData* data;
2676 };
2677 
BarrierTestHelper(BarrierTestHelperArg * arg)2678 static void BarrierTestHelper(BarrierTestHelperArg* arg) {
2679   for (size_t i = 0; i < arg->data->iteration_count; ++i) {
2680     int result = pthread_barrier_wait(&arg->data->barrier);
2681     if (result == PTHREAD_BARRIER_SERIAL_THREAD) {
2682       arg->data->serial_thread_count++;
2683     } else {
2684       ASSERT_EQ(0, result);
2685     }
2686     int mask = arg->data->finished_mask.fetch_or(1 << arg->id);
2687     mask |= 1 << arg->id;
2688     if (mask == ((1 << arg->data->thread_count) - 1)) {
2689       ASSERT_EQ(1, arg->data->serial_thread_count);
2690       arg->data->finished_iteration_count++;
2691       arg->data->finished_mask = 0;
2692       arg->data->serial_thread_count = 0;
2693     }
2694   }
2695 }
2696 
TEST(pthread,pthread_barrier_smoke)2697 TEST(pthread, pthread_barrier_smoke) {
2698   const size_t BARRIER_ITERATION_COUNT = 10;
2699   const size_t BARRIER_THREAD_COUNT = 10;
2700   BarrierTestHelperData data(BARRIER_THREAD_COUNT, BARRIER_ITERATION_COUNT);
2701   ASSERT_EQ(0, pthread_barrier_init(&data.barrier, nullptr, data.thread_count));
2702   std::vector<pthread_t> threads(data.thread_count);
2703   std::vector<BarrierTestHelperArg> args(threads.size());
2704   for (size_t i = 0; i < threads.size(); ++i) {
2705     args[i].id = i;
2706     args[i].data = &data;
2707     ASSERT_EQ(0, pthread_create(&threads[i], nullptr,
2708                                 reinterpret_cast<void* (*)(void*)>(BarrierTestHelper), &args[i]));
2709   }
2710   for (size_t i = 0; i < threads.size(); ++i) {
2711     ASSERT_EQ(0, pthread_join(threads[i], nullptr));
2712   }
2713   ASSERT_EQ(data.iteration_count, data.finished_iteration_count);
2714   ASSERT_EQ(0, pthread_barrier_destroy(&data.barrier));
2715 }
2716 
2717 struct BarrierDestroyTestArg {
2718   std::atomic<int> tid;
2719   pthread_barrier_t* barrier;
2720 };
2721 
BarrierDestroyTestHelper(BarrierDestroyTestArg * arg)2722 static void BarrierDestroyTestHelper(BarrierDestroyTestArg* arg) {
2723   arg->tid = gettid();
2724   ASSERT_EQ(0, pthread_barrier_wait(arg->barrier));
2725 }
2726 
TEST(pthread,pthread_barrier_destroy)2727 TEST(pthread, pthread_barrier_destroy) {
2728   pthread_barrier_t barrier;
2729   ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, 2));
2730   pthread_t thread;
2731   BarrierDestroyTestArg arg;
2732   arg.tid = 0;
2733   arg.barrier = &barrier;
2734   ASSERT_EQ(0, pthread_create(&thread, nullptr,
2735                               reinterpret_cast<void* (*)(void*)>(BarrierDestroyTestHelper), &arg));
2736   WaitUntilThreadSleep(arg.tid);
2737   ASSERT_EQ(EBUSY, pthread_barrier_destroy(&barrier));
2738   ASSERT_EQ(PTHREAD_BARRIER_SERIAL_THREAD, pthread_barrier_wait(&barrier));
2739   // Verify if the barrier can be destroyed directly after pthread_barrier_wait().
2740   ASSERT_EQ(0, pthread_barrier_destroy(&barrier));
2741   ASSERT_EQ(0, pthread_join(thread, nullptr));
2742 #if defined(__BIONIC__)
2743   ASSERT_EQ(EINVAL, pthread_barrier_destroy(&barrier));
2744 #endif
2745 }
2746 
2747 struct BarrierOrderingTestHelperArg {
2748   pthread_barrier_t* barrier;
2749   size_t* array;
2750   size_t array_length;
2751   size_t id;
2752 };
2753 
BarrierOrderingTestHelper(BarrierOrderingTestHelperArg * arg)2754 void BarrierOrderingTestHelper(BarrierOrderingTestHelperArg* arg) {
2755   const size_t ITERATION_COUNT = 10000;
2756   for (size_t i = 1; i <= ITERATION_COUNT; ++i) {
2757     arg->array[arg->id] = i;
2758     int result = pthread_barrier_wait(arg->barrier);
2759     ASSERT_TRUE(result == 0 || result == PTHREAD_BARRIER_SERIAL_THREAD);
2760     for (size_t j = 0; j < arg->array_length; ++j) {
2761       ASSERT_EQ(i, arg->array[j]);
2762     }
2763     result = pthread_barrier_wait(arg->barrier);
2764     ASSERT_TRUE(result == 0 || result == PTHREAD_BARRIER_SERIAL_THREAD);
2765   }
2766 }
2767 
TEST(pthread,pthread_barrier_check_ordering)2768 TEST(pthread, pthread_barrier_check_ordering) {
2769   const size_t THREAD_COUNT = 4;
2770   pthread_barrier_t barrier;
2771   ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, THREAD_COUNT));
2772   size_t array[THREAD_COUNT];
2773   std::vector<pthread_t> threads(THREAD_COUNT);
2774   std::vector<BarrierOrderingTestHelperArg> args(THREAD_COUNT);
2775   for (size_t i = 0; i < THREAD_COUNT; ++i) {
2776     args[i].barrier = &barrier;
2777     args[i].array = array;
2778     args[i].array_length = THREAD_COUNT;
2779     args[i].id = i;
2780     ASSERT_EQ(0, pthread_create(&threads[i], nullptr,
2781                                 reinterpret_cast<void* (*)(void*)>(BarrierOrderingTestHelper),
2782                                 &args[i]));
2783   }
2784   for (size_t i = 0; i < THREAD_COUNT; ++i) {
2785     ASSERT_EQ(0, pthread_join(threads[i], nullptr));
2786   }
2787 }
2788 
TEST(pthread,pthread_barrier_init_zero_count)2789 TEST(pthread, pthread_barrier_init_zero_count) {
2790   pthread_barrier_t barrier;
2791   ASSERT_EQ(EINVAL, pthread_barrier_init(&barrier, nullptr, 0));
2792 }
2793 
TEST(pthread,pthread_spinlock_smoke)2794 TEST(pthread, pthread_spinlock_smoke) {
2795   pthread_spinlock_t lock;
2796   ASSERT_EQ(0, pthread_spin_init(&lock, 0));
2797   ASSERT_EQ(0, pthread_spin_trylock(&lock));
2798   ASSERT_EQ(0, pthread_spin_unlock(&lock));
2799   ASSERT_EQ(0, pthread_spin_lock(&lock));
2800   ASSERT_EQ(EBUSY, pthread_spin_trylock(&lock));
2801   ASSERT_EQ(0, pthread_spin_unlock(&lock));
2802   ASSERT_EQ(0, pthread_spin_destroy(&lock));
2803 }
2804 
TEST(pthread,pthread_attr_getdetachstate__pthread_attr_setdetachstate)2805 TEST(pthread, pthread_attr_getdetachstate__pthread_attr_setdetachstate) {
2806   pthread_attr_t attr;
2807   ASSERT_EQ(0, pthread_attr_init(&attr));
2808 
2809   int state;
2810   ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
2811   ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &state));
2812   ASSERT_EQ(PTHREAD_CREATE_DETACHED, state);
2813 
2814   ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE));
2815   ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &state));
2816   ASSERT_EQ(PTHREAD_CREATE_JOINABLE, state);
2817 
2818   ASSERT_EQ(EINVAL, pthread_attr_setdetachstate(&attr, 123));
2819   ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &state));
2820   ASSERT_EQ(PTHREAD_CREATE_JOINABLE, state);
2821 }
2822 
TEST(pthread,pthread_create__mmap_failures)2823 TEST(pthread, pthread_create__mmap_failures) {
2824   pthread_attr_t attr;
2825   ASSERT_EQ(0, pthread_attr_init(&attr));
2826   ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
2827 
2828   const auto kPageSize = sysconf(_SC_PAGE_SIZE);
2829 
2830   // Use up all the VMAs. By default this is 64Ki (though some will already be in use).
2831   std::vector<void*> pages;
2832   pages.reserve(64 * 1024);
2833   int prot = PROT_NONE;
2834   while (true) {
2835     void* page = mmap(nullptr, kPageSize, prot, MAP_ANON|MAP_PRIVATE, -1, 0);
2836     if (page == MAP_FAILED) break;
2837     pages.push_back(page);
2838     prot = (prot == PROT_NONE) ? PROT_READ : PROT_NONE;
2839   }
2840 
2841   // Try creating threads, freeing up a page each time we fail.
2842   size_t EAGAIN_count = 0;
2843   size_t i = 0;
2844   for (; i < pages.size(); ++i) {
2845     pthread_t t;
2846     int status = pthread_create(&t, &attr, IdFn, nullptr);
2847     if (status != EAGAIN) break;
2848     ++EAGAIN_count;
2849     ASSERT_EQ(0, munmap(pages[i], kPageSize));
2850   }
2851 
2852   // Creating a thread uses at least three VMAs: the combined stack and TLS, and a guard on each
2853   // side. So we should have seen at least three failures.
2854   ASSERT_GE(EAGAIN_count, 3U);
2855 
2856   for (; i < pages.size(); ++i) {
2857     ASSERT_EQ(0, munmap(pages[i], kPageSize));
2858   }
2859 }
2860 
TEST(pthread,pthread_setschedparam)2861 TEST(pthread, pthread_setschedparam) {
2862   sched_param p = { .sched_priority = INT_MIN };
2863   ASSERT_EQ(EINVAL, pthread_setschedparam(pthread_self(), INT_MIN, &p));
2864 }
2865 
TEST(pthread,pthread_setschedprio)2866 TEST(pthread, pthread_setschedprio) {
2867   ASSERT_EQ(EINVAL, pthread_setschedprio(pthread_self(), INT_MIN));
2868 }
2869 
TEST(pthread,pthread_attr_getinheritsched__pthread_attr_setinheritsched)2870 TEST(pthread, pthread_attr_getinheritsched__pthread_attr_setinheritsched) {
2871   pthread_attr_t attr;
2872   ASSERT_EQ(0, pthread_attr_init(&attr));
2873 
2874   int state;
2875   ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2876   ASSERT_EQ(0, pthread_attr_getinheritsched(&attr, &state));
2877   ASSERT_EQ(PTHREAD_INHERIT_SCHED, state);
2878 
2879   ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED));
2880   ASSERT_EQ(0, pthread_attr_getinheritsched(&attr, &state));
2881   ASSERT_EQ(PTHREAD_EXPLICIT_SCHED, state);
2882 
2883   ASSERT_EQ(EINVAL, pthread_attr_setinheritsched(&attr, 123));
2884   ASSERT_EQ(0, pthread_attr_getinheritsched(&attr, &state));
2885   ASSERT_EQ(PTHREAD_EXPLICIT_SCHED, state);
2886 }
2887 
TEST(pthread,pthread_attr_setinheritsched__PTHREAD_INHERIT_SCHED__PTHREAD_EXPLICIT_SCHED)2888 TEST(pthread, pthread_attr_setinheritsched__PTHREAD_INHERIT_SCHED__PTHREAD_EXPLICIT_SCHED) {
2889   pthread_attr_t attr;
2890   ASSERT_EQ(0, pthread_attr_init(&attr));
2891 
2892   // If we set invalid scheduling attributes but choose to inherit, everything's fine...
2893   sched_param param = { .sched_priority = sched_get_priority_max(SCHED_FIFO) + 1 };
2894   ASSERT_EQ(0, pthread_attr_setschedparam(&attr, &param));
2895   ASSERT_EQ(0, pthread_attr_setschedpolicy(&attr, SCHED_FIFO));
2896   ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2897 
2898   pthread_t t;
2899   ASSERT_EQ(0, pthread_create(&t, &attr, IdFn, nullptr));
2900   ASSERT_EQ(0, pthread_join(t, nullptr));
2901 
2902 #if defined(__LP64__)
2903   // If we ask to use them, though, we'll see a failure...
2904   ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED));
2905   ASSERT_EQ(EINVAL, pthread_create(&t, &attr, IdFn, nullptr));
2906 #else
2907   // For backwards compatibility with broken apps, we just ignore failures
2908   // to set scheduler attributes on LP32.
2909 #endif
2910 }
2911 
TEST(pthread,pthread_attr_setinheritsched_PTHREAD_INHERIT_SCHED_takes_effect)2912 TEST(pthread, pthread_attr_setinheritsched_PTHREAD_INHERIT_SCHED_takes_effect) {
2913   sched_param param = { .sched_priority = sched_get_priority_min(SCHED_FIFO) };
2914   int rc = pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
2915   if (rc == EPERM) GTEST_SKIP() << "pthread_setschedparam failed with EPERM";
2916   ASSERT_EQ(0, rc);
2917 
2918   pthread_attr_t attr;
2919   ASSERT_EQ(0, pthread_attr_init(&attr));
2920   ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2921 
2922   SpinFunctionHelper spin_helper;
2923   pthread_t t;
2924   ASSERT_EQ(0, pthread_create(&t, &attr, spin_helper.GetFunction(), nullptr));
2925   int actual_policy;
2926   sched_param actual_param;
2927   ASSERT_EQ(0, pthread_getschedparam(t, &actual_policy, &actual_param));
2928   ASSERT_EQ(SCHED_FIFO, actual_policy);
2929   spin_helper.UnSpin();
2930   ASSERT_EQ(0, pthread_join(t, nullptr));
2931 }
2932 
TEST(pthread,pthread_attr_setinheritsched_PTHREAD_EXPLICIT_SCHED_takes_effect)2933 TEST(pthread, pthread_attr_setinheritsched_PTHREAD_EXPLICIT_SCHED_takes_effect) {
2934   sched_param param = { .sched_priority = sched_get_priority_min(SCHED_FIFO) };
2935   int rc = pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
2936   if (rc == EPERM) GTEST_SKIP() << "pthread_setschedparam failed with EPERM";
2937   ASSERT_EQ(0, rc);
2938 
2939   pthread_attr_t attr;
2940   ASSERT_EQ(0, pthread_attr_init(&attr));
2941   ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED));
2942   ASSERT_EQ(0, pthread_attr_setschedpolicy(&attr, SCHED_OTHER));
2943 
2944   SpinFunctionHelper spin_helper;
2945   pthread_t t;
2946   ASSERT_EQ(0, pthread_create(&t, &attr, spin_helper.GetFunction(), nullptr));
2947   int actual_policy;
2948   sched_param actual_param;
2949   ASSERT_EQ(0, pthread_getschedparam(t, &actual_policy, &actual_param));
2950   ASSERT_EQ(SCHED_OTHER, actual_policy);
2951   spin_helper.UnSpin();
2952   ASSERT_EQ(0, pthread_join(t, nullptr));
2953 }
2954 
TEST(pthread,pthread_attr_setinheritsched__takes_effect_despite_SCHED_RESET_ON_FORK)2955 TEST(pthread, pthread_attr_setinheritsched__takes_effect_despite_SCHED_RESET_ON_FORK) {
2956   sched_param param = { .sched_priority = sched_get_priority_min(SCHED_FIFO) };
2957   int rc = pthread_setschedparam(pthread_self(), SCHED_FIFO | SCHED_RESET_ON_FORK, &param);
2958   if (rc == EPERM) GTEST_SKIP() << "pthread_setschedparam failed with EPERM";
2959   ASSERT_EQ(0, rc);
2960 
2961   pthread_attr_t attr;
2962   ASSERT_EQ(0, pthread_attr_init(&attr));
2963   ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2964 
2965   SpinFunctionHelper spin_helper;
2966   pthread_t t;
2967   ASSERT_EQ(0, pthread_create(&t, &attr, spin_helper.GetFunction(), nullptr));
2968   int actual_policy;
2969   sched_param actual_param;
2970   ASSERT_EQ(0, pthread_getschedparam(t, &actual_policy, &actual_param));
2971   ASSERT_EQ(SCHED_FIFO  | SCHED_RESET_ON_FORK, actual_policy);
2972   spin_helper.UnSpin();
2973   ASSERT_EQ(0, pthread_join(t, nullptr));
2974 }
2975