• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <gtest/gtest.h>
18 
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <limits.h>
22 #include <malloc.h>
23 #include <pthread.h>
24 #include <signal.h>
25 #include <sys/mman.h>
26 #include <sys/syscall.h>
27 #include <time.h>
28 #include <unistd.h>
29 
30 #include "private/ScopeGuard.h"
31 #include "ScopedSignalHandler.h"
32 
TEST(pthread,pthread_key_create)33 TEST(pthread, pthread_key_create) {
34   pthread_key_t key;
35   ASSERT_EQ(0, pthread_key_create(&key, NULL));
36   ASSERT_EQ(0, pthread_key_delete(key));
37   // Can't delete a key that's already been deleted.
38   ASSERT_EQ(EINVAL, pthread_key_delete(key));
39 }
40 
TEST(pthread,pthread_key_create_lots)41 TEST(pthread, pthread_key_create_lots) {
42 #if defined(__BIONIC__) // glibc uses keys internally that its sysconf value doesn't account for.
43   // POSIX says PTHREAD_KEYS_MAX should be at least 128.
44   ASSERT_GE(PTHREAD_KEYS_MAX, 128);
45 
46   int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX);
47 
48   // sysconf shouldn't return a smaller value.
49   ASSERT_GE(sysconf_max, PTHREAD_KEYS_MAX);
50 
51   // We can allocate _SC_THREAD_KEYS_MAX keys.
52   sysconf_max -= 2; // (Except that gtest takes two for itself.)
53   std::vector<pthread_key_t> keys;
54   for (int i = 0; i < sysconf_max; ++i) {
55     pthread_key_t key;
56     // If this fails, it's likely that GLOBAL_INIT_THREAD_LOCAL_BUFFER_COUNT is wrong.
57     ASSERT_EQ(0, pthread_key_create(&key, NULL)) << i << " of " << sysconf_max;
58     keys.push_back(key);
59   }
60 
61   // ...and that really is the maximum.
62   pthread_key_t key;
63   ASSERT_EQ(EAGAIN, pthread_key_create(&key, NULL));
64 
65   // (Don't leak all those keys!)
66   for (size_t i = 0; i < keys.size(); ++i) {
67     ASSERT_EQ(0, pthread_key_delete(keys[i]));
68   }
69 #else // __BIONIC__
70   GTEST_LOG_(INFO) << "This test does nothing.\n";
71 #endif // __BIONIC__
72 }
73 
TEST(pthread,pthread_key_delete)74 TEST(pthread, pthread_key_delete) {
75   void* expected = reinterpret_cast<void*>(1234);
76   pthread_key_t key;
77   ASSERT_EQ(0, pthread_key_create(&key, NULL));
78   ASSERT_EQ(0, pthread_setspecific(key, expected));
79   ASSERT_EQ(expected, pthread_getspecific(key));
80   ASSERT_EQ(0, pthread_key_delete(key));
81   // After deletion, pthread_getspecific returns NULL.
82   ASSERT_EQ(NULL, pthread_getspecific(key));
83   // And you can't use pthread_setspecific with the deleted key.
84   ASSERT_EQ(EINVAL, pthread_setspecific(key, expected));
85 }
86 
TEST(pthread,pthread_key_fork)87 TEST(pthread, pthread_key_fork) {
88   void* expected = reinterpret_cast<void*>(1234);
89   pthread_key_t key;
90   ASSERT_EQ(0, pthread_key_create(&key, NULL));
91   ASSERT_EQ(0, pthread_setspecific(key, expected));
92   ASSERT_EQ(expected, pthread_getspecific(key));
93 
94   pid_t pid = fork();
95   ASSERT_NE(-1, pid) << strerror(errno);
96 
97   if (pid == 0) {
98     // The surviving thread inherits all the forking thread's TLS values...
99     ASSERT_EQ(expected, pthread_getspecific(key));
100     _exit(99);
101   }
102 
103   int status;
104   ASSERT_EQ(pid, waitpid(pid, &status, 0));
105   ASSERT_TRUE(WIFEXITED(status));
106   ASSERT_EQ(99, WEXITSTATUS(status));
107 
108   ASSERT_EQ(expected, pthread_getspecific(key));
109 }
110 
DirtyKeyFn(void * key)111 static void* DirtyKeyFn(void* key) {
112   return pthread_getspecific(*reinterpret_cast<pthread_key_t*>(key));
113 }
114 
TEST(pthread,pthread_key_dirty)115 TEST(pthread, pthread_key_dirty) {
116   pthread_key_t key;
117   ASSERT_EQ(0, pthread_key_create(&key, NULL));
118 
119   size_t stack_size = 128 * 1024;
120   void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
121   ASSERT_NE(MAP_FAILED, stack);
122   memset(stack, 0xff, stack_size);
123 
124   pthread_attr_t attr;
125   ASSERT_EQ(0, pthread_attr_init(&attr));
126   ASSERT_EQ(0, pthread_attr_setstack(&attr, stack, stack_size));
127 
128   pthread_t t;
129   ASSERT_EQ(0, pthread_create(&t, &attr, DirtyKeyFn, &key));
130 
131   void* result;
132   ASSERT_EQ(0, pthread_join(t, &result));
133   ASSERT_EQ(nullptr, result); // Not ~0!
134 
135   ASSERT_EQ(0, munmap(stack, stack_size));
136 }
137 
IdFn(void * arg)138 static void* IdFn(void* arg) {
139   return arg;
140 }
141 
SleepFn(void * arg)142 static void* SleepFn(void* arg) {
143   sleep(reinterpret_cast<uintptr_t>(arg));
144   return NULL;
145 }
146 
SpinFn(void * arg)147 static void* SpinFn(void* arg) {
148   volatile bool* b = reinterpret_cast<volatile bool*>(arg);
149   while (!*b) {
150   }
151   return NULL;
152 }
153 
JoinFn(void * arg)154 static void* JoinFn(void* arg) {
155   return reinterpret_cast<void*>(pthread_join(reinterpret_cast<pthread_t>(arg), NULL));
156 }
157 
AssertDetached(pthread_t t,bool is_detached)158 static void AssertDetached(pthread_t t, bool is_detached) {
159   pthread_attr_t attr;
160   ASSERT_EQ(0, pthread_getattr_np(t, &attr));
161   int detach_state;
162   ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &detach_state));
163   pthread_attr_destroy(&attr);
164   ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED));
165 }
166 
MakeDeadThread(pthread_t & t)167 static void MakeDeadThread(pthread_t& t) {
168   ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, NULL));
169   void* result;
170   ASSERT_EQ(0, pthread_join(t, &result));
171 }
172 
TEST(pthread,pthread_create)173 TEST(pthread, pthread_create) {
174   void* expected_result = reinterpret_cast<void*>(123);
175   // Can we create a thread?
176   pthread_t t;
177   ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, expected_result));
178   // If we join, do we get the expected value back?
179   void* result;
180   ASSERT_EQ(0, pthread_join(t, &result));
181   ASSERT_EQ(expected_result, result);
182 }
183 
TEST(pthread,pthread_create_EAGAIN)184 TEST(pthread, pthread_create_EAGAIN) {
185   pthread_attr_t attributes;
186   ASSERT_EQ(0, pthread_attr_init(&attributes));
187   ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, static_cast<size_t>(-1) & ~(getpagesize() - 1)));
188 
189   pthread_t t;
190   ASSERT_EQ(EAGAIN, pthread_create(&t, &attributes, IdFn, NULL));
191 }
192 
TEST(pthread,pthread_no_join_after_detach)193 TEST(pthread, pthread_no_join_after_detach) {
194   pthread_t t1;
195   ASSERT_EQ(0, pthread_create(&t1, NULL, SleepFn, reinterpret_cast<void*>(5)));
196 
197   // After a pthread_detach...
198   ASSERT_EQ(0, pthread_detach(t1));
199   AssertDetached(t1, true);
200 
201   // ...pthread_join should fail.
202   void* result;
203   ASSERT_EQ(EINVAL, pthread_join(t1, &result));
204 }
205 
TEST(pthread,pthread_no_op_detach_after_join)206 TEST(pthread, pthread_no_op_detach_after_join) {
207   bool done = false;
208 
209   pthread_t t1;
210   ASSERT_EQ(0, pthread_create(&t1, NULL, SpinFn, &done));
211 
212   // If thread 2 is already waiting to join thread 1...
213   pthread_t t2;
214   ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1)));
215 
216   sleep(1); // (Give t2 a chance to call pthread_join.)
217 
218   // ...a call to pthread_detach on thread 1 will "succeed" (silently fail)...
219   ASSERT_EQ(0, pthread_detach(t1));
220   AssertDetached(t1, false);
221 
222   done = true;
223 
224   // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
225   void* join_result;
226   ASSERT_EQ(0, pthread_join(t2, &join_result));
227   ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
228 }
229 
TEST(pthread,pthread_join_self)230 TEST(pthread, pthread_join_self) {
231   void* result;
232   ASSERT_EQ(EDEADLK, pthread_join(pthread_self(), &result));
233 }
234 
235 struct TestBug37410 {
236   pthread_t main_thread;
237   pthread_mutex_t mutex;
238 
mainTestBug37410239   static void main() {
240     TestBug37410 data;
241     data.main_thread = pthread_self();
242     ASSERT_EQ(0, pthread_mutex_init(&data.mutex, NULL));
243     ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
244 
245     pthread_t t;
246     ASSERT_EQ(0, pthread_create(&t, NULL, TestBug37410::thread_fn, reinterpret_cast<void*>(&data)));
247 
248     // Wait for the thread to be running...
249     ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
250     ASSERT_EQ(0, pthread_mutex_unlock(&data.mutex));
251 
252     // ...and exit.
253     pthread_exit(NULL);
254   }
255 
256  private:
thread_fnTestBug37410257   static void* thread_fn(void* arg) {
258     TestBug37410* data = reinterpret_cast<TestBug37410*>(arg);
259 
260     // Let the main thread know we're running.
261     pthread_mutex_unlock(&data->mutex);
262 
263     // And wait for the main thread to exit.
264     pthread_join(data->main_thread, NULL);
265 
266     return NULL;
267   }
268 };
269 
270 // Even though this isn't really a death test, we have to say "DeathTest" here so gtest knows to
271 // run this test (which exits normally) in its own process.
TEST(pthread_DeathTest,pthread_bug_37410)272 TEST(pthread_DeathTest, pthread_bug_37410) {
273   // http://code.google.com/p/android/issues/detail?id=37410
274   ::testing::FLAGS_gtest_death_test_style = "threadsafe";
275   ASSERT_EXIT(TestBug37410::main(), ::testing::ExitedWithCode(0), "");
276 }
277 
SignalHandlerFn(void * arg)278 static void* SignalHandlerFn(void* arg) {
279   sigset_t wait_set;
280   sigfillset(&wait_set);
281   return reinterpret_cast<void*>(sigwait(&wait_set, reinterpret_cast<int*>(arg)));
282 }
283 
TEST(pthread,pthread_sigmask)284 TEST(pthread, pthread_sigmask) {
285   // Check that SIGUSR1 isn't blocked.
286   sigset_t original_set;
287   sigemptyset(&original_set);
288   ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &original_set));
289   ASSERT_FALSE(sigismember(&original_set, SIGUSR1));
290 
291   // Block SIGUSR1.
292   sigset_t set;
293   sigemptyset(&set);
294   sigaddset(&set, SIGUSR1);
295   ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, &set, NULL));
296 
297   // Check that SIGUSR1 is blocked.
298   sigset_t final_set;
299   sigemptyset(&final_set);
300   ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &final_set));
301   ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
302   // ...and that sigprocmask agrees with pthread_sigmask.
303   sigemptyset(&final_set);
304   ASSERT_EQ(0, sigprocmask(SIG_BLOCK, NULL, &final_set));
305   ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
306 
307   // Spawn a thread that calls sigwait and tells us what it received.
308   pthread_t signal_thread;
309   int received_signal = -1;
310   ASSERT_EQ(0, pthread_create(&signal_thread, NULL, SignalHandlerFn, &received_signal));
311 
312   // Send that thread SIGUSR1.
313   pthread_kill(signal_thread, SIGUSR1);
314 
315   // See what it got.
316   void* join_result;
317   ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
318   ASSERT_EQ(SIGUSR1, received_signal);
319   ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
320 
321   // Restore the original signal mask.
322   ASSERT_EQ(0, pthread_sigmask(SIG_SETMASK, &original_set, NULL));
323 }
324 
TEST(pthread,pthread_setname_np__too_long)325 TEST(pthread, pthread_setname_np__too_long) {
326 #if defined(__BIONIC__) // Not all build servers have a new enough glibc? TODO: remove when they're on gprecise.
327   ASSERT_EQ(ERANGE, pthread_setname_np(pthread_self(), "this name is far too long for linux"));
328 #else // __BIONIC__
329   GTEST_LOG_(INFO) << "This test does nothing.\n";
330 #endif // __BIONIC__
331 }
332 
TEST(pthread,pthread_setname_np__self)333 TEST(pthread, pthread_setname_np__self) {
334 #if defined(__BIONIC__) // Not all build servers have a new enough glibc? TODO: remove when they're on gprecise.
335   ASSERT_EQ(0, pthread_setname_np(pthread_self(), "short 1"));
336 #else // __BIONIC__
337   GTEST_LOG_(INFO) << "This test does nothing.\n";
338 #endif // __BIONIC__
339 }
340 
TEST(pthread,pthread_setname_np__other)341 TEST(pthread, pthread_setname_np__other) {
342 #if defined(__BIONIC__) // Not all build servers have a new enough glibc? TODO: remove when they're on gprecise.
343   // Emulator kernels don't currently support setting the name of other threads.
344   char* filename = NULL;
345   asprintf(&filename, "/proc/self/task/%d/comm", gettid());
346   struct stat sb;
347   bool has_comm = (stat(filename, &sb) != -1);
348   free(filename);
349 
350   if (has_comm) {
351     pthread_t t1;
352     ASSERT_EQ(0, pthread_create(&t1, NULL, SleepFn, reinterpret_cast<void*>(5)));
353     ASSERT_EQ(0, pthread_setname_np(t1, "short 2"));
354   } else {
355     fprintf(stderr, "skipping test: this kernel doesn't have /proc/self/task/tid/comm files!\n");
356   }
357 #else // __BIONIC__
358   GTEST_LOG_(INFO) << "This test does nothing.\n";
359 #endif // __BIONIC__
360 }
361 
TEST(pthread,pthread_setname_np__no_such_thread)362 TEST(pthread, pthread_setname_np__no_such_thread) {
363 #if defined(__BIONIC__) // Not all build servers have a new enough glibc? TODO: remove when they're on gprecise.
364   pthread_t dead_thread;
365   MakeDeadThread(dead_thread);
366 
367   // Call pthread_setname_np after thread has already exited.
368   ASSERT_EQ(ESRCH, pthread_setname_np(dead_thread, "short 3"));
369 #else // __BIONIC__
370   GTEST_LOG_(INFO) << "This test does nothing.\n";
371 #endif // __BIONIC__
372 }
373 
TEST(pthread,pthread_kill__0)374 TEST(pthread, pthread_kill__0) {
375   // Signal 0 just tests that the thread exists, so it's safe to call on ourselves.
376   ASSERT_EQ(0, pthread_kill(pthread_self(), 0));
377 }
378 
TEST(pthread,pthread_kill__invalid_signal)379 TEST(pthread, pthread_kill__invalid_signal) {
380   ASSERT_EQ(EINVAL, pthread_kill(pthread_self(), -1));
381 }
382 
pthread_kill__in_signal_handler_helper(int signal_number)383 static void pthread_kill__in_signal_handler_helper(int signal_number) {
384   static int count = 0;
385   ASSERT_EQ(SIGALRM, signal_number);
386   if (++count == 1) {
387     // Can we call pthread_kill from a signal handler?
388     ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
389   }
390 }
391 
TEST(pthread,pthread_kill__in_signal_handler)392 TEST(pthread, pthread_kill__in_signal_handler) {
393   ScopedSignalHandler ssh(SIGALRM, pthread_kill__in_signal_handler_helper);
394   ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
395 }
396 
TEST(pthread,pthread_detach__no_such_thread)397 TEST(pthread, pthread_detach__no_such_thread) {
398   pthread_t dead_thread;
399   MakeDeadThread(dead_thread);
400 
401   ASSERT_EQ(ESRCH, pthread_detach(dead_thread));
402 }
403 
TEST(pthread,pthread_detach__leak)404 TEST(pthread, pthread_detach__leak) {
405   size_t initial_bytes = 0;
406   // Run this loop more than once since the first loop causes some memory
407   // to be allocated permenantly. Run an extra loop to help catch any subtle
408   // memory leaks.
409   for (size_t loop = 0; loop < 3; loop++) {
410     // Set the initial bytes on the second loop since the memory in use
411     // should have stabilized.
412     if (loop == 1) {
413       initial_bytes = mallinfo().uordblks;
414     }
415 
416     pthread_attr_t attr;
417     ASSERT_EQ(0, pthread_attr_init(&attr));
418     ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE));
419 
420     std::vector<pthread_t> threads;
421     for (size_t i = 0; i < 32; ++i) {
422       pthread_t t;
423       ASSERT_EQ(0, pthread_create(&t, &attr, IdFn, NULL));
424       threads.push_back(t);
425     }
426 
427     sleep(1);
428 
429     for (size_t i = 0; i < 32; ++i) {
430       ASSERT_EQ(0, pthread_detach(threads[i])) << i;
431     }
432   }
433 
434   size_t final_bytes = mallinfo().uordblks;
435   int leaked_bytes = (final_bytes - initial_bytes);
436 
437   // User code (like this test) doesn't know how large pthread_internal_t is.
438   // We can be pretty sure it's more than 128 bytes.
439   ASSERT_LT(leaked_bytes, 32 /*threads*/ * 128 /*bytes*/);
440 }
441 
TEST(pthread,pthread_getcpuclockid__clock_gettime)442 TEST(pthread, pthread_getcpuclockid__clock_gettime) {
443   pthread_t t;
444   ASSERT_EQ(0, pthread_create(&t, NULL, SleepFn, reinterpret_cast<void*>(5)));
445 
446   clockid_t c;
447   ASSERT_EQ(0, pthread_getcpuclockid(t, &c));
448   timespec ts;
449   ASSERT_EQ(0, clock_gettime(c, &ts));
450 }
451 
TEST(pthread,pthread_getcpuclockid__no_such_thread)452 TEST(pthread, pthread_getcpuclockid__no_such_thread) {
453   pthread_t dead_thread;
454   MakeDeadThread(dead_thread);
455 
456   clockid_t c;
457   ASSERT_EQ(ESRCH, pthread_getcpuclockid(dead_thread, &c));
458 }
459 
TEST(pthread,pthread_getschedparam__no_such_thread)460 TEST(pthread, pthread_getschedparam__no_such_thread) {
461   pthread_t dead_thread;
462   MakeDeadThread(dead_thread);
463 
464   int policy;
465   sched_param param;
466   ASSERT_EQ(ESRCH, pthread_getschedparam(dead_thread, &policy, &param));
467 }
468 
TEST(pthread,pthread_setschedparam__no_such_thread)469 TEST(pthread, pthread_setschedparam__no_such_thread) {
470   pthread_t dead_thread;
471   MakeDeadThread(dead_thread);
472 
473   int policy = 0;
474   sched_param param;
475   ASSERT_EQ(ESRCH, pthread_setschedparam(dead_thread, policy, &param));
476 }
477 
TEST(pthread,pthread_join__no_such_thread)478 TEST(pthread, pthread_join__no_such_thread) {
479   pthread_t dead_thread;
480   MakeDeadThread(dead_thread);
481 
482   void* result;
483   ASSERT_EQ(ESRCH, pthread_join(dead_thread, &result));
484 }
485 
TEST(pthread,pthread_kill__no_such_thread)486 TEST(pthread, pthread_kill__no_such_thread) {
487   pthread_t dead_thread;
488   MakeDeadThread(dead_thread);
489 
490   ASSERT_EQ(ESRCH, pthread_kill(dead_thread, 0));
491 }
492 
TEST(pthread,pthread_join__multijoin)493 TEST(pthread, pthread_join__multijoin) {
494   bool done = false;
495 
496   pthread_t t1;
497   ASSERT_EQ(0, pthread_create(&t1, NULL, SpinFn, &done));
498 
499   pthread_t t2;
500   ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1)));
501 
502   sleep(1); // (Give t2 a chance to call pthread_join.)
503 
504   // Multiple joins to the same thread should fail.
505   ASSERT_EQ(EINVAL, pthread_join(t1, NULL));
506 
507   done = true;
508 
509   // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
510   void* join_result;
511   ASSERT_EQ(0, pthread_join(t2, &join_result));
512   ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
513 }
514 
TEST(pthread,pthread_join__race)515 TEST(pthread, pthread_join__race) {
516   // http://b/11693195 --- pthread_join could return before the thread had actually exited.
517   // If the joiner unmapped the thread's stack, that could lead to SIGSEGV in the thread.
518   for (size_t i = 0; i < 1024; ++i) {
519     size_t stack_size = 64*1024;
520     void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
521 
522     pthread_attr_t a;
523     pthread_attr_init(&a);
524     pthread_attr_setstack(&a, stack, stack_size);
525 
526     pthread_t t;
527     ASSERT_EQ(0, pthread_create(&t, &a, IdFn, NULL));
528     ASSERT_EQ(0, pthread_join(t, NULL));
529     ASSERT_EQ(0, munmap(stack, stack_size));
530   }
531 }
532 
GetActualGuardSizeFn(void * arg)533 static void* GetActualGuardSizeFn(void* arg) {
534   pthread_attr_t attributes;
535   pthread_getattr_np(pthread_self(), &attributes);
536   pthread_attr_getguardsize(&attributes, reinterpret_cast<size_t*>(arg));
537   return NULL;
538 }
539 
GetActualGuardSize(const pthread_attr_t & attributes)540 static size_t GetActualGuardSize(const pthread_attr_t& attributes) {
541   size_t result;
542   pthread_t t;
543   pthread_create(&t, &attributes, GetActualGuardSizeFn, &result);
544   void* join_result;
545   pthread_join(t, &join_result);
546   return result;
547 }
548 
GetActualStackSizeFn(void * arg)549 static void* GetActualStackSizeFn(void* arg) {
550   pthread_attr_t attributes;
551   pthread_getattr_np(pthread_self(), &attributes);
552   pthread_attr_getstacksize(&attributes, reinterpret_cast<size_t*>(arg));
553   return NULL;
554 }
555 
GetActualStackSize(const pthread_attr_t & attributes)556 static size_t GetActualStackSize(const pthread_attr_t& attributes) {
557   size_t result;
558   pthread_t t;
559   pthread_create(&t, &attributes, GetActualStackSizeFn, &result);
560   void* join_result;
561   pthread_join(t, &join_result);
562   return result;
563 }
564 
TEST(pthread,pthread_attr_setguardsize)565 TEST(pthread, pthread_attr_setguardsize) {
566   pthread_attr_t attributes;
567   ASSERT_EQ(0, pthread_attr_init(&attributes));
568 
569   // Get the default guard size.
570   size_t default_guard_size;
571   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &default_guard_size));
572 
573   // No such thing as too small: will be rounded up to one page by pthread_create.
574   ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128));
575   size_t guard_size;
576   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
577   ASSERT_EQ(128U, guard_size);
578   ASSERT_EQ(4096U, GetActualGuardSize(attributes));
579 
580   // Large enough and a multiple of the page size.
581   ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024));
582   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
583   ASSERT_EQ(32*1024U, guard_size);
584 
585   // Large enough but not a multiple of the page size; will be rounded up by pthread_create.
586   ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1));
587   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
588   ASSERT_EQ(32*1024U + 1, guard_size);
589 }
590 
TEST(pthread,pthread_attr_setstacksize)591 TEST(pthread, pthread_attr_setstacksize) {
592   pthread_attr_t attributes;
593   ASSERT_EQ(0, pthread_attr_init(&attributes));
594 
595   // Get the default stack size.
596   size_t default_stack_size;
597   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &default_stack_size));
598 
599   // Too small.
600   ASSERT_EQ(EINVAL, pthread_attr_setstacksize(&attributes, 128));
601   size_t stack_size;
602   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
603   ASSERT_EQ(default_stack_size, stack_size);
604   ASSERT_GE(GetActualStackSize(attributes), default_stack_size);
605 
606   // Large enough and a multiple of the page size.
607   ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024));
608   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
609   ASSERT_EQ(32*1024U, stack_size);
610   ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);
611 
612   // Large enough but not a multiple of the page size; will be rounded up by pthread_create.
613   ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024 + 1));
614   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
615   ASSERT_EQ(32*1024U + 1, stack_size);
616 #if defined(__BIONIC__)
617   // Bionic rounds up, which is what POSIX allows.
618   ASSERT_EQ(GetActualStackSize(attributes), (32 + 4)*1024U);
619 #else // __BIONIC__
620   // glibc rounds down, in violation of POSIX. They document this in their BUGS section.
621   ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);
622 #endif // __BIONIC__
623 }
624 
TEST(pthread,pthread_rwlock_smoke)625 TEST(pthread, pthread_rwlock_smoke) {
626   pthread_rwlock_t l;
627   ASSERT_EQ(0, pthread_rwlock_init(&l, NULL));
628 
629   // Single read lock
630   ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
631   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
632 
633   // Multiple read lock
634   ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
635   ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
636   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
637   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
638 
639   // Write lock
640   ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
641   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
642 
643   // Try writer lock
644   ASSERT_EQ(0, pthread_rwlock_trywrlock(&l));
645   ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
646   ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&l));
647   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
648 
649   // Try reader lock
650   ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
651   ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
652   ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
653   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
654   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
655 
656   // Try writer lock after unlock
657   ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
658   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
659 
660 #ifdef __BIONIC__
661   // EDEADLK in "read after write"
662   ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
663   ASSERT_EQ(EDEADLK, pthread_rwlock_rdlock(&l));
664   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
665 
666   // EDEADLK in "write after write"
667   ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
668   ASSERT_EQ(EDEADLK, pthread_rwlock_wrlock(&l));
669   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
670 #endif
671 
672   ASSERT_EQ(0, pthread_rwlock_destroy(&l));
673 }
674 
675 static int g_once_fn_call_count = 0;
OnceFn()676 static void OnceFn() {
677   ++g_once_fn_call_count;
678 }
679 
TEST(pthread,pthread_once_smoke)680 TEST(pthread, pthread_once_smoke) {
681   pthread_once_t once_control = PTHREAD_ONCE_INIT;
682   ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
683   ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
684   ASSERT_EQ(1, g_once_fn_call_count);
685 }
686 
687 static std::string pthread_once_1934122_result = "";
688 
Routine2()689 static void Routine2() {
690   pthread_once_1934122_result += "2";
691 }
692 
Routine1()693 static void Routine1() {
694   pthread_once_t once_control_2 = PTHREAD_ONCE_INIT;
695   pthread_once_1934122_result += "1";
696   pthread_once(&once_control_2, &Routine2);
697 }
698 
TEST(pthread,pthread_once_1934122)699 TEST(pthread, pthread_once_1934122) {
700   // Very old versions of Android couldn't call pthread_once from a
701   // pthread_once init routine. http://b/1934122.
702   pthread_once_t once_control_1 = PTHREAD_ONCE_INIT;
703   ASSERT_EQ(0, pthread_once(&once_control_1, &Routine1));
704   ASSERT_EQ("12", pthread_once_1934122_result);
705 }
706 
707 static int g_atfork_prepare_calls = 0;
AtForkPrepare1()708 static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls << 4) | 1; }
AtForkPrepare2()709 static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls << 4) | 2; }
710 static int g_atfork_parent_calls = 0;
AtForkParent1()711 static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls << 4) | 1; }
AtForkParent2()712 static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls << 4) | 2; }
713 static int g_atfork_child_calls = 0;
AtForkChild1()714 static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls << 4) | 1; }
AtForkChild2()715 static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls << 4) | 2; }
716 
TEST(pthread,pthread_atfork)717 TEST(pthread, pthread_atfork) {
718   ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
719   ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
720 
721   int pid = fork();
722   ASSERT_NE(-1, pid) << strerror(errno);
723 
724   // Child and parent calls are made in the order they were registered.
725   if (pid == 0) {
726     ASSERT_EQ(0x12, g_atfork_child_calls);
727     _exit(0);
728   }
729   ASSERT_EQ(0x12, g_atfork_parent_calls);
730 
731   // Prepare calls are made in the reverse order.
732   ASSERT_EQ(0x21, g_atfork_prepare_calls);
733 }
734 
TEST(pthread,pthread_attr_getscope)735 TEST(pthread, pthread_attr_getscope) {
736   pthread_attr_t attr;
737   ASSERT_EQ(0, pthread_attr_init(&attr));
738 
739   int scope;
740   ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope));
741   ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope);
742 }
743 
TEST(pthread,pthread_condattr_init)744 TEST(pthread, pthread_condattr_init) {
745   pthread_condattr_t attr;
746   pthread_condattr_init(&attr);
747 
748   clockid_t clock;
749   ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
750   ASSERT_EQ(CLOCK_REALTIME, clock);
751 
752   int pshared;
753   ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
754   ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
755 }
756 
TEST(pthread,pthread_condattr_setclock)757 TEST(pthread, pthread_condattr_setclock) {
758   pthread_condattr_t attr;
759   pthread_condattr_init(&attr);
760 
761   ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME));
762   clockid_t clock;
763   ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
764   ASSERT_EQ(CLOCK_REALTIME, clock);
765 
766   ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
767   ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
768   ASSERT_EQ(CLOCK_MONOTONIC, clock);
769 
770   ASSERT_EQ(EINVAL, pthread_condattr_setclock(&attr, CLOCK_PROCESS_CPUTIME_ID));
771 }
772 
TEST(pthread,pthread_cond_broadcast__preserves_condattr_flags)773 TEST(pthread, pthread_cond_broadcast__preserves_condattr_flags) {
774 #if defined(__BIONIC__) // This tests a bionic implementation detail.
775   pthread_condattr_t attr;
776   pthread_condattr_init(&attr);
777 
778   ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
779   ASSERT_EQ(0, pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
780 
781   pthread_cond_t cond_var;
782   ASSERT_EQ(0, pthread_cond_init(&cond_var, &attr));
783 
784   ASSERT_EQ(0, pthread_cond_signal(&cond_var));
785   ASSERT_EQ(0, pthread_cond_broadcast(&cond_var));
786 
787   attr = static_cast<pthread_condattr_t>(cond_var.value);
788   clockid_t clock;
789   ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
790   ASSERT_EQ(CLOCK_MONOTONIC, clock);
791   int pshared;
792   ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
793   ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
794 #else // __BIONIC__
795   GTEST_LOG_(INFO) << "This test does nothing.\n";
796 #endif // __BIONIC__
797 }
798 
TEST(pthread,pthread_mutex_timedlock)799 TEST(pthread, pthread_mutex_timedlock) {
800   pthread_mutex_t m;
801   ASSERT_EQ(0, pthread_mutex_init(&m, NULL));
802 
803   // If the mutex is already locked, pthread_mutex_timedlock should time out.
804   ASSERT_EQ(0, pthread_mutex_lock(&m));
805 
806   timespec ts;
807   ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
808   ts.tv_nsec += 1;
809   ASSERT_EQ(ETIMEDOUT, pthread_mutex_timedlock(&m, &ts));
810 
811   // If the mutex is unlocked, pthread_mutex_timedlock should succeed.
812   ASSERT_EQ(0, pthread_mutex_unlock(&m));
813 
814   ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
815   ts.tv_nsec += 1;
816   ASSERT_EQ(0, pthread_mutex_timedlock(&m, &ts));
817 
818   ASSERT_EQ(0, pthread_mutex_unlock(&m));
819   ASSERT_EQ(0, pthread_mutex_destroy(&m));
820 }
821 
TEST(pthread,pthread_attr_getstack__main_thread)822 TEST(pthread, pthread_attr_getstack__main_thread) {
823   // This test is only meaningful for the main thread, so make sure we're running on it!
824   ASSERT_EQ(getpid(), syscall(__NR_gettid));
825 
826   // Get the main thread's attributes.
827   pthread_attr_t attributes;
828   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
829 
830   // Check that we correctly report that the main thread has no guard page.
831   size_t guard_size;
832   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
833   ASSERT_EQ(0U, guard_size); // The main thread has no guard page.
834 
835   // Get the stack base and the stack size (both ways).
836   void* stack_base;
837   size_t stack_size;
838   ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
839   size_t stack_size2;
840   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
841 
842   // The two methods of asking for the stack size should agree.
843   EXPECT_EQ(stack_size, stack_size2);
844 
845   // What does /proc/self/maps' [stack] line say?
846   void* maps_stack_hi = NULL;
847   FILE* fp = fopen("/proc/self/maps", "r");
848   ASSERT_TRUE(fp != NULL);
849   char line[BUFSIZ];
850   while (fgets(line, sizeof(line), fp) != NULL) {
851     uintptr_t lo, hi;
852     char name[10];
853     sscanf(line, "%" PRIxPTR "-%" PRIxPTR " %*4s %*x %*x:%*x %*d %10s", &lo, &hi, name);
854     if (strcmp(name, "[stack]") == 0) {
855       maps_stack_hi = reinterpret_cast<void*>(hi);
856       break;
857     }
858   }
859   fclose(fp);
860 
861   // The stack size should correspond to RLIMIT_STACK.
862   rlimit rl;
863   ASSERT_EQ(0, getrlimit(RLIMIT_STACK, &rl));
864   uint64_t original_rlim_cur = rl.rlim_cur;
865 #if defined(__BIONIC__)
866   if (rl.rlim_cur == RLIM_INFINITY) {
867     rl.rlim_cur = 8 * 1024 * 1024; // Bionic reports unlimited stacks as 8MiB.
868   }
869 #endif
870   EXPECT_EQ(rl.rlim_cur, stack_size);
871 
872   auto guard = create_scope_guard([&rl, original_rlim_cur]() {
873     rl.rlim_cur = original_rlim_cur;
874     ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
875   });
876 
877   // The high address of the /proc/self/maps [stack] region should equal stack_base + stack_size.
878   // Remember that the stack grows down (and is mapped in on demand), so the low address of the
879   // region isn't very interesting.
880   EXPECT_EQ(maps_stack_hi, reinterpret_cast<uint8_t*>(stack_base) + stack_size);
881 
882   //
883   // What if RLIMIT_STACK is smaller than the stack's current extent?
884   //
885   rl.rlim_cur = rl.rlim_max = 1024; // 1KiB. We know the stack must be at least a page already.
886   rl.rlim_max = RLIM_INFINITY;
887   ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
888 
889   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
890   ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
891   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
892 
893   EXPECT_EQ(stack_size, stack_size2);
894   ASSERT_EQ(1024U, stack_size);
895 
896   //
897   // What if RLIMIT_STACK isn't a whole number of pages?
898   //
899   rl.rlim_cur = rl.rlim_max = 6666; // Not a whole number of pages.
900   rl.rlim_max = RLIM_INFINITY;
901   ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
902 
903   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
904   ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
905   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
906 
907   EXPECT_EQ(stack_size, stack_size2);
908   ASSERT_EQ(6666U, stack_size);
909 }
910