• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "pthread_internal.h"
30 
31 #include <errno.h>
32 #include <semaphore.h>
33 #include <stdlib.h>
34 #include <string.h>
35 #include <sys/mman.h>
36 
37 #include <async_safe/log.h>
38 #include <bionic/reserved_signals.h>
39 
40 #include "private/ErrnoRestorer.h"
41 #include "private/ScopedRWLock.h"
42 #include "private/bionic_futex.h"
43 #include "private/bionic_tls.h"
44 
45 static pthread_internal_t* g_thread_list = nullptr;
46 static pthread_rwlock_t g_thread_list_lock = PTHREAD_RWLOCK_INITIALIZER;
47 
__pthread_internal_add(pthread_internal_t * thread)48 pthread_t __pthread_internal_add(pthread_internal_t* thread) {
49   ScopedWriteLock locker(&g_thread_list_lock);
50 
51   // We insert at the head.
52   thread->next = g_thread_list;
53   thread->prev = nullptr;
54   if (thread->next != nullptr) {
55     thread->next->prev = thread;
56   }
57   g_thread_list = thread;
58   return reinterpret_cast<pthread_t>(thread);
59 }
60 
__pthread_internal_remove(pthread_internal_t * thread)61 void __pthread_internal_remove(pthread_internal_t* thread) {
62   ScopedWriteLock locker(&g_thread_list_lock);
63 
64   if (thread->next != nullptr) {
65     thread->next->prev = thread->prev;
66   }
67   if (thread->prev != nullptr) {
68     thread->prev->next = thread->next;
69   } else {
70     g_thread_list = thread->next;
71   }
72 }
73 
__pthread_internal_free(pthread_internal_t * thread)74 static void __pthread_internal_free(pthread_internal_t* thread) {
75   if (thread->mmap_size != 0) {
76     // Free mapped space, including thread stack and pthread_internal_t.
77     munmap(thread->mmap_base, thread->mmap_size);
78   }
79 }
80 
__pthread_internal_remove_and_free(pthread_internal_t * thread)81 void __pthread_internal_remove_and_free(pthread_internal_t* thread) {
82   __pthread_internal_remove(thread);
83   __pthread_internal_free(thread);
84 }
85 
__pthread_internal_gettid(pthread_t thread_id,const char * caller)86 pid_t __pthread_internal_gettid(pthread_t thread_id, const char* caller) {
87   pthread_internal_t* thread = __pthread_internal_find(thread_id, caller);
88   return thread ? thread->tid : -1;
89 }
90 
__pthread_internal_find(pthread_t thread_id,const char * caller)91 pthread_internal_t* __pthread_internal_find(pthread_t thread_id, const char* caller) {
92   pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(thread_id);
93 
94   // Check if we're looking for ourselves before acquiring the lock.
95   if (thread == __get_thread()) return thread;
96 
97   {
98     // Make sure to release the lock before the abort below. Otherwise,
99     // some apps might deadlock in their own crash handlers (see b/6565627).
100     ScopedReadLock locker(&g_thread_list_lock);
101     for (pthread_internal_t* t = g_thread_list; t != nullptr; t = t->next) {
102       if (t == thread) return thread;
103     }
104   }
105 
106   // Historically we'd return null, but from API level 26 we catch this error.
107   if (android_get_application_target_sdk_version() >= 26) {
108     if (thread == nullptr) {
109       // This seems to be a common mistake, and it's relatively harmless because
110       // there will never be a valid thread at address 0, whereas other invalid
111       // addresses might sometimes contain threads or things that look enough like
112       // threads for us to do some real damage by continuing.
113       // TODO: try getting rid of this when Treble lets us keep vendor blobs on an old API level.
114       async_safe_format_log(ANDROID_LOG_WARN, "libc", "invalid pthread_t (0) passed to %s", caller);
115     } else {
116       async_safe_fatal("invalid pthread_t %p passed to %s", thread, caller);
117     }
118   }
119   return nullptr;
120 }
121 
android_run_on_all_threads(bool (* func)(void *),void * arg)122 bool android_run_on_all_threads(bool (*func)(void*), void* arg) {
123   // Take the locks in this order to avoid inversion (pthread_create ->
124   // __pthread_internal_add).
125   ScopedWriteLock creation_locker(&g_thread_creation_lock);
126   ScopedReadLock list_locker(&g_thread_list_lock);
127 
128   // Call the function directly for the current thread so that we don't need to worry about
129   // the consequences of synchronizing with ourselves.
130   if (!func(arg)) {
131     return false;
132   }
133 
134   static sem_t g_sem;
135   if (sem_init(&g_sem, 0, 0) != 0) {
136     return false;
137   }
138 
139   static bool (*g_func)(void*);
140   static void *g_arg;
141   g_func = func;
142   g_arg = arg;
143 
144   static _Atomic(bool) g_retval;
145   atomic_init(&g_retval, true);
146 
147   auto handler = [](int, siginfo_t*, void*) {
148     ErrnoRestorer restorer;
149     if (!g_func(g_arg)) {
150       atomic_store(&g_retval, false);
151     }
152     sem_post(&g_sem);
153   };
154 
155   struct sigaction act = {}, oldact;
156   act.sa_flags = SA_SIGINFO;
157   act.sa_sigaction = handler;
158   sigfillset(&act.sa_mask);
159   if (sigaction(BIONIC_SIGNAL_RUN_ON_ALL_THREADS, &act, &oldact) != 0) {
160     sem_destroy(&g_sem);
161     return false;
162   }
163 
164   pid_t my_pid = getpid();
165   size_t num_tids = 0;
166   for (pthread_internal_t* t = g_thread_list; t != nullptr; t = t->next) {
167     // The function is called directly for the current thread above, so no need to send a signal to
168     // ourselves to call it here.
169     if (t == __get_thread()) continue;
170 
171     // If a thread is terminating (has blocked signals) or has already terminated, our signal will
172     // never be received, so we need to check for that condition and skip the thread if it is the
173     // case.
174     if (atomic_load(&t->terminating)) continue;
175 
176     if (tgkill(my_pid, t->tid, BIONIC_SIGNAL_RUN_ON_ALL_THREADS) == 0) {
177       ++num_tids;
178     } else {
179       atomic_store(&g_retval, false);
180     }
181   }
182 
183   for (size_t i = 0; i != num_tids; ++i) {
184     if (TEMP_FAILURE_RETRY(sem_wait(&g_sem)) != 0) {
185       atomic_store(&g_retval, false);
186       break;
187     }
188   }
189 
190   sigaction(BIONIC_SIGNAL_RUN_ON_ALL_THREADS, &oldact, 0);
191   sem_destroy(&g_sem);
192   return atomic_load(&g_retval);
193 }
194