• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "thread_list.h"
18 
19 #include <dirent.h>
20 #include <nativehelper/scoped_local_ref.h>
21 #include <nativehelper/scoped_utf_chars.h>
22 #include <sys/resource.h>  // For getpriority()
23 #include <sys/types.h>
24 #include <unistd.h>
25 
26 #include <map>
27 #include <sstream>
28 #include <tuple>
29 #include <vector>
30 
31 #include "android-base/stringprintf.h"
32 #include "art_field-inl.h"
33 #include "base/aborting.h"
34 #include "base/histogram-inl.h"
35 #include "base/mutex-inl.h"
36 #include "base/systrace.h"
37 #include "base/time_utils.h"
38 #include "base/timing_logger.h"
39 #include "debugger.h"
40 #include "gc/collector/concurrent_copying.h"
41 #include "gc/gc_pause_listener.h"
42 #include "gc/heap.h"
43 #include "gc/reference_processor.h"
44 #include "gc_root.h"
45 #include "jni/jni_internal.h"
46 #include "lock_word.h"
47 #include "mirror/string.h"
48 #include "monitor.h"
49 #include "native_stack_dump.h"
50 #include "obj_ptr-inl.h"
51 #include "scoped_thread_state_change-inl.h"
52 #include "thread.h"
53 #include "trace.h"
54 #include "unwindstack/AndroidUnwinder.h"
55 #include "well_known_classes.h"
56 
57 #if ART_USE_FUTEXES
58 #include <linux/futex.h>
59 #include <sys/syscall.h>
60 #endif  // ART_USE_FUTEXES
61 
62 namespace art HIDDEN {
63 
64 using android::base::StringPrintf;
65 
66 static constexpr uint64_t kLongThreadSuspendThreshold = MsToNs(5);
67 
68 // Whether we should try to dump the native stack of unattached threads. See commit ed8b723 for
69 // some history.
70 static constexpr bool kDumpUnattachedThreadNativeStackForSigQuit = true;
71 
ThreadList(uint64_t thread_suspend_timeout_ns)72 ThreadList::ThreadList(uint64_t thread_suspend_timeout_ns)
73     : suspend_all_count_(0),
74       unregistering_count_(0),
75       suspend_all_histogram_("suspend all histogram", 16, 64),
76       long_suspend_(false),
77       shut_down_(false),
78       thread_suspend_timeout_ns_(thread_suspend_timeout_ns),
79       empty_checkpoint_barrier_(new Barrier(0)) {
80   CHECK(Monitor::IsValidLockWord(LockWord::FromThinLockId(kMaxThreadId, 1, 0U)));
81 }
82 
~ThreadList()83 ThreadList::~ThreadList() {
84   CHECK(shut_down_);
85 }
86 
ShutDown()87 void ThreadList::ShutDown() {
88   ScopedTrace trace(__PRETTY_FUNCTION__);
89   // Detach the current thread if necessary. If we failed to start, there might not be any threads.
90   // We need to detach the current thread here in case there's another thread waiting to join with
91   // us.
92   bool contains = false;
93   Thread* self = Thread::Current();
94   {
95     MutexLock mu(self, *Locks::thread_list_lock_);
96     contains = Contains(self);
97   }
98   if (contains) {
99     Runtime::Current()->DetachCurrentThread();
100   }
101   WaitForOtherNonDaemonThreadsToExit();
102   // The only caller of this function, ~Runtime, has already disabled GC and
103   // ensured that the last GC is finished.
104   gc::Heap* const heap = Runtime::Current()->GetHeap();
105   CHECK(heap->IsGCDisabledForShutdown());
106 
107   // TODO: there's an unaddressed race here where a thread may attach during shutdown, see
108   //       Thread::Init.
109   SuspendAllDaemonThreadsForShutdown();
110 
111   shut_down_ = true;
112 }
113 
Contains(Thread * thread)114 bool ThreadList::Contains(Thread* thread) {
115   return find(list_.begin(), list_.end(), thread) != list_.end();
116 }
117 
GetLockOwner()118 pid_t ThreadList::GetLockOwner() {
119   return Locks::thread_list_lock_->GetExclusiveOwnerTid();
120 }
121 
DumpNativeStacks(std::ostream & os)122 void ThreadList::DumpNativeStacks(std::ostream& os) {
123   MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
124   unwindstack::AndroidLocalUnwinder unwinder;
125   for (const auto& thread : list_) {
126     os << "DUMPING THREAD " << thread->GetTid() << "\n";
127     DumpNativeStack(os, unwinder, thread->GetTid(), "\t");
128     os << "\n";
129   }
130 }
131 
DumpForSigQuit(std::ostream & os)132 void ThreadList::DumpForSigQuit(std::ostream& os) {
133   {
134     ScopedObjectAccess soa(Thread::Current());
135     // Only print if we have samples.
136     if (suspend_all_histogram_.SampleSize() > 0) {
137       Histogram<uint64_t>::CumulativeData data;
138       suspend_all_histogram_.CreateHistogram(&data);
139       suspend_all_histogram_.PrintConfidenceIntervals(os, 0.99, data);  // Dump time to suspend.
140     }
141   }
142   bool dump_native_stack = Runtime::Current()->GetDumpNativeStackOnSigQuit();
143   Dump(os, dump_native_stack);
144   DumpUnattachedThreads(os, dump_native_stack && kDumpUnattachedThreadNativeStackForSigQuit);
145 }
146 
DumpUnattachedThread(std::ostream & os,pid_t tid,bool dump_native_stack)147 static void DumpUnattachedThread(std::ostream& os, pid_t tid, bool dump_native_stack)
148     NO_THREAD_SAFETY_ANALYSIS {
149   // TODO: No thread safety analysis as DumpState with a null thread won't access fields, should
150   // refactor DumpState to avoid skipping analysis.
151   Thread::DumpState(os, nullptr, tid);
152   if (dump_native_stack) {
153     DumpNativeStack(os, tid, "  native: ");
154   }
155   os << std::endl;
156 }
157 
DumpUnattachedThreads(std::ostream & os,bool dump_native_stack)158 void ThreadList::DumpUnattachedThreads(std::ostream& os, bool dump_native_stack) {
159   DIR* d = opendir("/proc/self/task");
160   if (!d) {
161     return;
162   }
163 
164   Thread* self = Thread::Current();
165   dirent* e;
166   while ((e = readdir(d)) != nullptr) {
167     char* end;
168     pid_t tid = strtol(e->d_name, &end, 10);
169     if (!*end) {
170       Thread* thread;
171       {
172         MutexLock mu(self, *Locks::thread_list_lock_);
173         thread = FindThreadByTid(tid);
174       }
175       if (thread == nullptr) {
176         DumpUnattachedThread(os, tid, dump_native_stack);
177       }
178     }
179   }
180   closedir(d);
181 }
182 
183 // Dump checkpoint timeout in milliseconds. Larger amount on the target, since the device could be
184 // overloaded with ANR dumps.
185 static constexpr uint32_t kDumpWaitTimeout = kIsTargetBuild ? 100000 : 20000;
186 
187 // A closure used by Thread::Dump.
188 class DumpCheckpoint final : public Closure {
189  public:
DumpCheckpoint(bool dump_native_stack)190   DumpCheckpoint(bool dump_native_stack)
191       : lock_("Dump checkpoint lock", kGenericBottomLock),
192         os_(),
193         // Avoid verifying count in case a thread doesn't end up passing through the barrier.
194         // This avoids a SIGABRT that would otherwise happen in the destructor.
195         barrier_(0, /*verify_count_on_shutdown=*/false),
196         unwinder_(std::vector<std::string>{}, std::vector<std::string> {"oat", "odex"}),
197         dump_native_stack_(dump_native_stack) {
198   }
199 
Run(Thread * thread)200   void Run(Thread* thread) override {
201     // Note thread and self may not be equal if thread was already suspended at the point of the
202     // request.
203     Thread* self = Thread::Current();
204     CHECK(self != nullptr);
205     std::ostringstream local_os;
206     Thread::DumpOrder dump_order;
207     {
208       ScopedObjectAccess soa(self);
209       dump_order = thread->Dump(local_os, unwinder_, dump_native_stack_);
210     }
211     {
212       MutexLock mu(self, lock_);
213       // Sort, so that the most interesting threads for ANR are printed first (ANRs can be trimmed).
214       std::pair<Thread::DumpOrder, uint32_t> sort_key(dump_order, thread->GetThreadId());
215       os_.emplace(sort_key, std::move(local_os));
216     }
217     barrier_.Pass(self);
218   }
219 
220   // Called at the end to print all the dumps in sequential prioritized order.
Dump(Thread * self,std::ostream & os)221   void Dump(Thread* self, std::ostream& os) {
222     MutexLock mu(self, lock_);
223     for (const auto& it : os_) {
224       os << it.second.str() << std::endl;
225     }
226   }
227 
WaitForThreadsToRunThroughCheckpoint(size_t threads_running_checkpoint)228   void WaitForThreadsToRunThroughCheckpoint(size_t threads_running_checkpoint) {
229     Thread* self = Thread::Current();
230     ScopedThreadStateChange tsc(self, ThreadState::kWaitingForCheckPointsToRun);
231     bool timed_out = barrier_.Increment(self, threads_running_checkpoint, kDumpWaitTimeout);
232     if (timed_out) {
233       // Avoid a recursive abort.
234       LOG((kIsDebugBuild && (gAborting == 0)) ? ::android::base::FATAL : ::android::base::ERROR)
235           << "Unexpected time out during dump checkpoint.";
236     }
237   }
238 
239  private:
240   // Storage for the per-thread dumps (guarded by lock since they are generated in parallel).
241   // Map is used to obtain sorted order. The key is unique, but use multimap just in case.
242   Mutex lock_;
243   std::multimap<std::pair<Thread::DumpOrder, uint32_t>, std::ostringstream> os_ GUARDED_BY(lock_);
244   // The barrier to be passed through and for the requestor to wait upon.
245   Barrier barrier_;
246   // A backtrace map, so that all threads use a shared info and don't reacquire/parse separately.
247   unwindstack::AndroidLocalUnwinder unwinder_;
248   // Whether we should dump the native stack.
249   const bool dump_native_stack_;
250 };
251 
Dump(std::ostream & os,bool dump_native_stack)252 void ThreadList::Dump(std::ostream& os, bool dump_native_stack) {
253   Thread* self = Thread::Current();
254   {
255     MutexLock mu(self, *Locks::thread_list_lock_);
256     os << "DALVIK THREADS (" << list_.size() << "):\n";
257   }
258   if (self != nullptr) {
259     DumpCheckpoint checkpoint(dump_native_stack);
260     size_t threads_running_checkpoint;
261     {
262       // Use SOA to prevent deadlocks if multiple threads are calling Dump() at the same time.
263       ScopedObjectAccess soa(self);
264       threads_running_checkpoint = RunCheckpoint(&checkpoint);
265     }
266     if (threads_running_checkpoint != 0) {
267       checkpoint.WaitForThreadsToRunThroughCheckpoint(threads_running_checkpoint);
268     }
269     checkpoint.Dump(self, os);
270   } else {
271     DumpUnattachedThreads(os, dump_native_stack);
272   }
273 }
274 
AssertOtherThreadsAreSuspended(Thread * self)275 void ThreadList::AssertOtherThreadsAreSuspended(Thread* self) {
276   MutexLock mu(self, *Locks::thread_list_lock_);
277   MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
278   for (const auto& thread : list_) {
279     if (thread != self) {
280       CHECK(thread->IsSuspended())
281             << "\nUnsuspended thread: <<" << *thread << "\n"
282             << "self: <<" << *Thread::Current();
283     }
284   }
285 }
286 
287 #if HAVE_TIMED_RWLOCK
288 // Attempt to rectify locks so that we dump thread list with required locks before exiting.
UnsafeLogFatalForThreadSuspendAllTimeout()289 NO_RETURN static void UnsafeLogFatalForThreadSuspendAllTimeout() {
290   // Increment gAborting before doing the thread list dump since we don't want any failures from
291   // AssertThreadSuspensionIsAllowable in cases where thread suspension is not allowed.
292   // See b/69044468.
293   ++gAborting;
294   Runtime* runtime = Runtime::Current();
295   std::ostringstream ss;
296   ss << "Thread suspend timeout\n";
297   Locks::mutator_lock_->Dump(ss);
298   ss << "\n";
299   runtime->GetThreadList()->Dump(ss);
300   --gAborting;
301   LOG(FATAL) << ss.str();
302   exit(0);
303 }
304 #endif
305 
RunCheckpoint(Closure * checkpoint_function,Closure * callback,bool allow_lock_checking)306 size_t ThreadList::RunCheckpoint(Closure* checkpoint_function,
307                                  Closure* callback,
308                                  bool allow_lock_checking) {
309   Thread* self = Thread::Current();
310   Locks::mutator_lock_->AssertNotExclusiveHeld(self);
311   Locks::thread_list_lock_->AssertNotHeld(self);
312   Locks::thread_suspend_count_lock_->AssertNotHeld(self);
313 
314   std::vector<Thread*> suspended_count_modified_threads;
315   size_t count = 0;
316   {
317     // Call a checkpoint function for each thread. We directly invoke the function on behalf of
318     // suspended threads.
319     MutexLock mu(self, *Locks::thread_list_lock_);
320     if (kIsDebugBuild && allow_lock_checking) {
321       self->DisallowPreMonitorMutexes();
322     }
323     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
324     count = list_.size();
325     for (const auto& thread : list_) {
326       if (thread != self) {
327         bool requested_suspend = false;
328         while (true) {
329           if (thread->RequestCheckpoint(checkpoint_function)) {
330             // This thread will run its checkpoint some time in the near future.
331             if (requested_suspend) {
332               // The suspend request is now unnecessary.
333               thread->DecrementSuspendCount(self);
334               Thread::resume_cond_->Broadcast(self);
335               requested_suspend = false;
336             }
337             break;
338           } else {
339             // The thread was, and probably still is, suspended.
340             if (!requested_suspend) {
341               // This does not risk suspension cycles: We may have a pending suspension request,
342               // but it cannot block us: Checkpoint Run() functions may not suspend, thus we cannot
343               // be blocked from decrementing the count again.
344               thread->IncrementSuspendCount(self);
345               requested_suspend = true;
346             }
347             if (thread->IsSuspended()) {
348               // We saw it suspended after incrementing suspend count, so it will stay that way.
349               break;
350             }
351           }
352           // We only get here if the thread entered kRunnable again. Retry immediately.
353         }
354         // At this point, either the thread was runnable, and will run the checkpoint itself,
355         // or requested_suspend is true, and the thread is safely suspended.
356         if (requested_suspend) {
357           DCHECK(thread->IsSuspended());
358           suspended_count_modified_threads.push_back(thread);
359         }
360       }
361       // Thread either has honored or will honor the checkpoint, or it has been added to
362       // suspended_count_modified_threads.
363     }
364     // Run the callback to be called inside this critical section.
365     if (callback != nullptr) {
366       callback->Run(self);
367     }
368   }
369 
370   // Run the checkpoint on ourself while we wait for threads to suspend.
371   checkpoint_function->Run(self);
372 
373   bool mutator_lock_held = Locks::mutator_lock_->IsSharedHeld(self);
374   bool repeat = true;
375   // Run the checkpoint on the suspended threads.
376   while (repeat) {
377     repeat = false;
378     for (auto& thread : suspended_count_modified_threads) {
379       if (thread != nullptr) {
380         // We know for sure that the thread is suspended at this point.
381         DCHECK(thread->IsSuspended());
382         if (mutator_lock_held) {
383           // Make sure there is no pending flip function before running Java-heap-accessing
384           // checkpoint on behalf of thread.
385           Thread::EnsureFlipFunctionStarted(self, thread);
386           if (thread->GetStateAndFlags(std::memory_order_acquire)
387                   .IsAnyOfFlagsSet(Thread::FlipFunctionFlags())) {
388             // There is another thread running the flip function for 'thread'.
389             // Instead of waiting for it to complete, move to the next thread.
390             repeat = true;
391             continue;
392           }
393         }  // O.w. the checkpoint will not access Java data structures, and doesn't care whether
394            // the flip function has been called.
395         checkpoint_function->Run(thread);
396         {
397           MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
398           thread->DecrementSuspendCount(self);
399         }
400         // We are done with 'thread' so set it to nullptr so that next outer
401         // loop iteration, if any, skips 'thread'.
402         thread = nullptr;
403       }
404     }
405   }
406   DCHECK(std::all_of(suspended_count_modified_threads.cbegin(),
407                      suspended_count_modified_threads.cend(),
408                      [](Thread* thread) { return thread == nullptr; }));
409 
410   {
411     // Imitate ResumeAll, threads may be waiting on Thread::resume_cond_ since we raised their
412     // suspend count. Now the suspend_count_ is lowered so we must do the broadcast.
413     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
414     Thread::resume_cond_->Broadcast(self);
415   }
416 
417   if (kIsDebugBuild && allow_lock_checking) {
418     self->AllowPreMonitorMutexes();
419   }
420   return count;
421 }
422 
RunEmptyCheckpoint()423 void ThreadList::RunEmptyCheckpoint() {
424   Thread* self = Thread::Current();
425   Locks::mutator_lock_->AssertNotExclusiveHeld(self);
426   Locks::thread_list_lock_->AssertNotHeld(self);
427   Locks::thread_suspend_count_lock_->AssertNotHeld(self);
428   std::vector<uint32_t> runnable_thread_ids;
429   size_t count = 0;
430   Barrier* barrier = empty_checkpoint_barrier_.get();
431   barrier->Init(self, 0);
432   {
433     MutexLock mu(self, *Locks::thread_list_lock_);
434     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
435     for (Thread* thread : list_) {
436       if (thread != self) {
437         while (true) {
438           if (thread->RequestEmptyCheckpoint()) {
439             // This thread will run an empty checkpoint (decrement the empty checkpoint barrier)
440             // some time in the near future.
441             ++count;
442             if (kIsDebugBuild) {
443               runnable_thread_ids.push_back(thread->GetThreadId());
444             }
445             break;
446           }
447           if (thread->GetState() != ThreadState::kRunnable) {
448             // It's seen suspended, we are done because it must not be in the middle of a mutator
449             // heap access.
450             break;
451           }
452         }
453       }
454     }
455   }
456 
457   // Wake up the threads blocking for weak ref access so that they will respond to the empty
458   // checkpoint request. Otherwise we will hang as they are blocking in the kRunnable state.
459   Runtime::Current()->GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
460   Runtime::Current()->BroadcastForNewSystemWeaks(/*broadcast_for_checkpoint=*/true);
461   {
462     ScopedThreadStateChange tsc(self, ThreadState::kWaitingForCheckPointsToRun);
463     uint64_t total_wait_time = 0;
464     bool first_iter = true;
465     while (true) {
466       // Wake up the runnable threads blocked on the mutexes that another thread, which is blocked
467       // on a weak ref access, holds (indirectly blocking for weak ref access through another thread
468       // and a mutex.) This needs to be done periodically because the thread may be preempted
469       // between the CheckEmptyCheckpointFromMutex call and the subsequent futex wait in
470       // Mutex::ExclusiveLock, etc. when the wakeup via WakeupToRespondToEmptyCheckpoint
471       // arrives. This could cause a *very rare* deadlock, if not repeated. Most of the cases are
472       // handled in the first iteration.
473       for (BaseMutex* mutex : Locks::expected_mutexes_on_weak_ref_access_) {
474         mutex->WakeupToRespondToEmptyCheckpoint();
475       }
476       static constexpr uint64_t kEmptyCheckpointPeriodicTimeoutMs = 100;  // 100ms
477       static constexpr uint64_t kEmptyCheckpointTotalTimeoutMs = 600 * 1000;  // 10 minutes.
478       size_t barrier_count = first_iter ? count : 0;
479       first_iter = false;  // Don't add to the barrier count from the second iteration on.
480       bool timed_out = barrier->Increment(self, barrier_count, kEmptyCheckpointPeriodicTimeoutMs);
481       if (!timed_out) {
482         break;  // Success
483       }
484       // This is a very rare case.
485       total_wait_time += kEmptyCheckpointPeriodicTimeoutMs;
486       if (kIsDebugBuild && total_wait_time > kEmptyCheckpointTotalTimeoutMs) {
487         std::ostringstream ss;
488         ss << "Empty checkpoint timeout\n";
489         ss << "Barrier count " << barrier->GetCount(self) << "\n";
490         ss << "Runnable thread IDs";
491         for (uint32_t tid : runnable_thread_ids) {
492           ss << " " << tid;
493         }
494         ss << "\n";
495         Locks::mutator_lock_->Dump(ss);
496         ss << "\n";
497         LOG(FATAL_WITHOUT_ABORT) << ss.str();
498         // Some threads in 'runnable_thread_ids' are probably stuck. Try to dump their stacks.
499         // Avoid using ThreadList::Dump() initially because it is likely to get stuck as well.
500         {
501           ScopedObjectAccess soa(self);
502           MutexLock mu1(self, *Locks::thread_list_lock_);
503           for (Thread* thread : GetList()) {
504             uint32_t tid = thread->GetThreadId();
505             bool is_in_runnable_thread_ids =
506                 std::find(runnable_thread_ids.begin(), runnable_thread_ids.end(), tid) !=
507                 runnable_thread_ids.end();
508             if (is_in_runnable_thread_ids &&
509                 thread->ReadFlag(ThreadFlag::kEmptyCheckpointRequest)) {
510               // Found a runnable thread that hasn't responded to the empty checkpoint request.
511               // Assume it's stuck and safe to dump its stack.
512               thread->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT),
513                            /*dump_native_stack=*/ true,
514                            /*force_dump_stack=*/ true);
515             }
516           }
517         }
518         LOG(FATAL_WITHOUT_ABORT)
519             << "Dumped runnable threads that haven't responded to empty checkpoint.";
520         // Now use ThreadList::Dump() to dump more threads, noting it may get stuck.
521         Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
522         LOG(FATAL) << "Dumped all threads.";
523       }
524     }
525   }
526 }
527 
528 // Separate function to disable just the right amount of thread-safety analysis.
AcquireMutatorLockSharedUncontended(Thread * self)529 ALWAYS_INLINE void AcquireMutatorLockSharedUncontended(Thread* self)
530     ACQUIRE_SHARED(*Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS {
531   bool success = Locks::mutator_lock_->SharedTryLock(self, /*check=*/false);
532   CHECK(success);
533 }
534 
535 // A checkpoint/suspend-all hybrid to switch thread roots from
536 // from-space to to-space refs. Used to synchronize threads at a point
537 // to mark the initiation of marking while maintaining the to-space
538 // invariant.
FlipThreadRoots(Closure * thread_flip_visitor,Closure * flip_callback,gc::collector::GarbageCollector * collector,gc::GcPauseListener * pause_listener)539 void ThreadList::FlipThreadRoots(Closure* thread_flip_visitor,
540                                  Closure* flip_callback,
541                                  gc::collector::GarbageCollector* collector,
542                                  gc::GcPauseListener* pause_listener) {
543   TimingLogger::ScopedTiming split("ThreadListFlip", collector->GetTimings());
544   Thread* self = Thread::Current();
545   Locks::mutator_lock_->AssertNotHeld(self);
546   Locks::thread_list_lock_->AssertNotHeld(self);
547   Locks::thread_suspend_count_lock_->AssertNotHeld(self);
548   CHECK_NE(self->GetState(), ThreadState::kRunnable);
549 
550   collector->GetHeap()->ThreadFlipBegin(self);  // Sync with JNI critical calls.
551 
552   // ThreadFlipBegin happens before we suspend all the threads, so it does not
553   // count towards the pause.
554   const uint64_t suspend_start_time = NanoTime();
555   VLOG(threads) << "Suspending all for thread flip";
556   {
557     ScopedTrace trace("ThreadFlipSuspendAll");
558     SuspendAllInternal(self);
559   }
560 
561   std::vector<Thread*> flipping_threads;  // All suspended threads. Includes us.
562   int thread_count;
563   // Flipping threads might exit between the time we resume them and try to run the flip function.
564   // Track that in a parallel vector.
565   std::unique_ptr<ThreadExitFlag[]> exit_flags;
566 
567   {
568     TimingLogger::ScopedTiming t("FlipThreadSuspension", collector->GetTimings());
569     if (pause_listener != nullptr) {
570       pause_listener->StartPause();
571     }
572 
573     // Run the flip callback for the collector.
574     Locks::mutator_lock_->ExclusiveLock(self);
575     suspend_all_histogram_.AdjustAndAddValue(NanoTime() - suspend_start_time);
576     flip_callback->Run(self);
577 
578     {
579       MutexLock mu(self, *Locks::thread_list_lock_);
580       MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
581       thread_count = list_.size();
582       exit_flags.reset(new ThreadExitFlag[thread_count]);
583       flipping_threads.resize(thread_count, nullptr);
584       int i = 1;
585       for (Thread* thread : list_) {
586         // Set the flip function for all threads because once we start resuming any threads,
587         // they may need to run the flip function on behalf of other threads, even this one.
588         DCHECK(thread == self || thread->IsSuspended());
589         thread->SetFlipFunction(thread_flip_visitor);
590         // Put ourselves first, so other threads are more likely to have finished before we get
591         // there.
592         int thread_index = thread == self ? 0 : i++;
593         flipping_threads[thread_index] = thread;
594         thread->NotifyOnThreadExit(&exit_flags[thread_index]);
595       }
596       DCHECK(i == thread_count);
597     }
598 
599     if (pause_listener != nullptr) {
600       pause_listener->EndPause();
601     }
602   }
603   // Any new threads created after this will be created by threads that already ran their flip
604   // functions. In the normal GC use case in which the flip function converts all local references
605   // to to-space references, these newly created threads will also see only to-space references.
606 
607   // Resume threads, making sure that we do not release suspend_count_lock_ until we've reacquired
608   // the mutator_lock_ in shared mode, and decremented suspend_all_count_.  This avoids a
609   // concurrent SuspendAll, and ensures that newly started threads see a correct value of
610   // suspend_all_count.
611   {
612     MutexLock mu(self, *Locks::thread_list_lock_);
613     Locks::thread_suspend_count_lock_->Lock(self);
614     ResumeAllInternal(self);
615   }
616   collector->RegisterPause(NanoTime() - suspend_start_time);
617 
618   // Since all threads were suspended, they will attempt to run the flip function before
619   // reentering a runnable state. We will also attempt to run the flip functions ourselves.  Any
620   // intervening checkpoint request will do the same.  Exactly one of those flip function attempts
621   // will succeed, and the target thread will not be able to reenter a runnable state until one of
622   // them does.
623 
624   // Try to run the closure on the other threads.
625   TimingLogger::ScopedTiming split3("RunningThreadFlips", collector->GetTimings());
626   // Reacquire the mutator lock while holding suspend_count_lock. This cannot fail, since we
627   // do not acquire the mutator lock unless suspend_all_count was read as 0 while holding
628   // suspend_count_lock. We did not release suspend_count_lock since releasing the mutator
629   // lock.
630   AcquireMutatorLockSharedUncontended(self);
631 
632   Locks::thread_suspend_count_lock_->Unlock(self);
633   // Concurrent SuspendAll may now see zero suspend_all_count_, but block on mutator_lock_.
634 
635   collector->GetHeap()->ThreadFlipEnd(self);
636 
637   for (int i = 0; i < thread_count; ++i) {
638     bool finished;
639     Thread::EnsureFlipFunctionStarted(
640         self, flipping_threads[i], Thread::StateAndFlags(0), &exit_flags[i], &finished);
641     if (finished) {
642       MutexLock mu2(self, *Locks::thread_list_lock_);
643       flipping_threads[i]->UnregisterThreadExitFlag(&exit_flags[i]);
644       flipping_threads[i] = nullptr;
645     }
646   }
647   // Make sure all flips complete before we return.
648   for (int i = 0; i < thread_count; ++i) {
649     if (UNLIKELY(flipping_threads[i] != nullptr)) {
650       flipping_threads[i]->WaitForFlipFunctionTestingExited(self, &exit_flags[i]);
651       MutexLock mu2(self, *Locks::thread_list_lock_);
652       flipping_threads[i]->UnregisterThreadExitFlag(&exit_flags[i]);
653     }
654   }
655 
656   Thread::DCheckUnregisteredEverywhere(&exit_flags[0], &exit_flags[thread_count - 1]);
657 
658   Locks::mutator_lock_->SharedUnlock(self);
659 }
660 
661 // True only for debugging suspend timeout code. The resulting timeouts are short enough that
662 // failures are expected.
663 static constexpr bool kShortSuspendTimeouts = false;
664 
665 static constexpr unsigned kSuspendBarrierIters = kShortSuspendTimeouts ? 5 : 20;
666 
667 #if ART_USE_FUTEXES
668 
669 // Returns true if it timed out.
WaitOnceForSuspendBarrier(AtomicInteger * barrier,int32_t cur_val,uint64_t timeout_ns)670 static bool WaitOnceForSuspendBarrier(AtomicInteger* barrier,
671                                       int32_t cur_val,
672                                       uint64_t timeout_ns) {
673   timespec wait_timeout;
674   if (kShortSuspendTimeouts) {
675     timeout_ns = MsToNs(kSuspendBarrierIters);
676     CHECK_GE(NsToMs(timeout_ns / kSuspendBarrierIters), 1ul);
677   } else {
678     DCHECK_GE(NsToMs(timeout_ns / kSuspendBarrierIters), 10ul);
679   }
680   InitTimeSpec(false, CLOCK_MONOTONIC, NsToMs(timeout_ns / kSuspendBarrierIters), 0, &wait_timeout);
681   if (futex(barrier->Address(), FUTEX_WAIT_PRIVATE, cur_val, &wait_timeout, nullptr, 0) != 0) {
682     if (errno == ETIMEDOUT) {
683       return true;
684     } else if (errno != EAGAIN && errno != EINTR) {
685       PLOG(FATAL) << "futex wait for suspend barrier failed";
686     }
687   }
688   return false;
689 }
690 
691 #else
692 
WaitOnceForSuspendBarrier(AtomicInteger * barrier,int32_t cur_val,uint64_t timeout_ns)693 static bool WaitOnceForSuspendBarrier(AtomicInteger* barrier,
694                                       int32_t cur_val,
695                                       uint64_t timeout_ns) {
696   // In the normal case, aim for a couple of hundred milliseconds.
697   static constexpr unsigned kInnerIters =
698       kShortSuspendTimeouts ? 1'000 : (timeout_ns / 1000) / kSuspendBarrierIters;
699   DCHECK_GE(kInnerIters, 1'000u);
700   for (int i = 0; i < kInnerIters; ++i) {
701     sched_yield();
702     if (barrier->load(std::memory_order_acquire) == 0) {
703       return false;
704     }
705   }
706   return true;
707 }
708 
709 #endif  // ART_USE_FUTEXES
710 
WaitForSuspendBarrier(AtomicInteger * barrier,pid_t t,int attempt_of_4)711 std::optional<std::string> ThreadList::WaitForSuspendBarrier(AtomicInteger* barrier,
712                                                              pid_t t,
713                                                              int attempt_of_4) {
714   // Only fail after kIter timeouts, to make us robust against app freezing.
715 #if ART_USE_FUTEXES
716   const uint64_t start_time = NanoTime();
717 #endif
718   uint64_t timeout_ns =
719       attempt_of_4 == 0 ? thread_suspend_timeout_ns_ : thread_suspend_timeout_ns_ / 4;
720   if (attempt_of_4 != 1 && getpriority(PRIO_PROCESS, 0 /* this thread */) > 0) {
721     // We're a low priority thread, and thus have a longer ANR timeout. Double the suspend
722     // timeout. To avoid the getpriority system call in the common case, we fail to double the
723     // first of 4 waits, but then triple the third one to compensate.
724     if (attempt_of_4 == 3) {
725       timeout_ns *= 3;
726     } else {
727       timeout_ns *= 2;
728     }
729   }
730   bool collect_state = (t != 0 && (attempt_of_4 == 0 || attempt_of_4 == 4));
731   int32_t cur_val = barrier->load(std::memory_order_acquire);
732   if (cur_val <= 0) {
733     DCHECK_EQ(cur_val, 0);
734     return std::nullopt;
735   }
736   unsigned i = 0;
737   if (WaitOnceForSuspendBarrier(barrier, cur_val, timeout_ns)) {
738     i = 1;
739   }
740   cur_val = barrier->load(std::memory_order_acquire);
741   if (cur_val <= 0) {
742     DCHECK_EQ(cur_val, 0);
743     return std::nullopt;
744   }
745 
746   // Long wait; gather information in case of timeout.
747   std::string sampled_state = collect_state ? GetOsThreadStatQuick(t) : "";
748   while (i < kSuspendBarrierIters) {
749     if (WaitOnceForSuspendBarrier(barrier, cur_val, timeout_ns)) {
750       ++i;
751 #if ART_USE_FUTEXES
752       if (!kShortSuspendTimeouts) {
753         CHECK_GE(NanoTime() - start_time, i * timeout_ns / kSuspendBarrierIters - 1'000'000);
754       }
755 #endif
756     }
757     cur_val = barrier->load(std::memory_order_acquire);
758     if (cur_val <= 0) {
759       DCHECK_EQ(cur_val, 0);
760       return std::nullopt;
761     }
762   }
763   return collect_state ? "Target states: [" + sampled_state + ", " + GetOsThreadStatQuick(t) + "]" +
764                              std::to_string(cur_val) + "@" + std::to_string((uintptr_t)barrier) +
765                              " Final wait time: " + PrettyDuration(NanoTime() - start_time) :
766                          "";
767 }
768 
SuspendAll(const char * cause,bool long_suspend)769 void ThreadList::SuspendAll(const char* cause, bool long_suspend) {
770   Thread* self = Thread::Current();
771 
772   if (self != nullptr) {
773     VLOG(threads) << *self << " SuspendAll for " << cause << " starting...";
774   } else {
775     VLOG(threads) << "Thread[null] SuspendAll for " << cause << " starting...";
776   }
777   {
778     ScopedTrace trace("Suspending mutator threads");
779     const uint64_t start_time = NanoTime();
780 
781     SuspendAllInternal(self);
782     // All threads are known to have suspended (but a thread may still own the mutator lock)
783     // Make sure this thread grabs exclusive access to the mutator lock and its protected data.
784 #if HAVE_TIMED_RWLOCK
785     while (true) {
786       if (Locks::mutator_lock_->ExclusiveLockWithTimeout(self,
787                                                          NsToMs(thread_suspend_timeout_ns_),
788                                                          0)) {
789         break;
790       } else if (!long_suspend_) {
791         // Reading long_suspend without the mutator lock is slightly racy, in some rare cases, this
792         // could result in a thread suspend timeout.
793         // Timeout if we wait more than thread_suspend_timeout_ns_ nanoseconds.
794         UnsafeLogFatalForThreadSuspendAllTimeout();
795       }
796     }
797 #else
798     Locks::mutator_lock_->ExclusiveLock(self);
799 #endif
800 
801     long_suspend_ = long_suspend;
802 
803     const uint64_t end_time = NanoTime();
804     const uint64_t suspend_time = end_time - start_time;
805     suspend_all_histogram_.AdjustAndAddValue(suspend_time);
806     if (suspend_time > kLongThreadSuspendThreshold) {
807       LOG(WARNING) << "Suspending all threads took: " << PrettyDuration(suspend_time);
808     }
809 
810     if (kDebugLocking) {
811       // Debug check that all threads are suspended.
812       AssertOtherThreadsAreSuspended(self);
813     }
814   }
815 
816   // SuspendAllInternal blocks if we are in the middle of a flip.
817   DCHECK(!self->ReadFlag(ThreadFlag::kPendingFlipFunction));
818   DCHECK(!self->ReadFlag(ThreadFlag::kRunningFlipFunction));
819 
820   ATraceBegin((std::string("Mutator threads suspended for ") + cause).c_str());
821 
822   if (self != nullptr) {
823     VLOG(threads) << *self << " SuspendAll complete";
824   } else {
825     VLOG(threads) << "Thread[null] SuspendAll complete";
826   }
827 }
828 
829 // Ensures all threads running Java suspend and that those not running Java don't start.
SuspendAllInternal(Thread * self,SuspendReason reason)830 void ThreadList::SuspendAllInternal(Thread* self, SuspendReason reason) {
831   // self can be nullptr if this is an unregistered thread.
832   Locks::mutator_lock_->AssertNotExclusiveHeld(self);
833   Locks::thread_list_lock_->AssertNotHeld(self);
834   Locks::thread_suspend_count_lock_->AssertNotHeld(self);
835   if (kDebugLocking && self != nullptr) {
836     CHECK_NE(self->GetState(), ThreadState::kRunnable);
837   }
838 
839   // First request that all threads suspend, then wait for them to suspend before
840   // returning. This suspension scheme also relies on other behaviour:
841   // 1. Threads cannot be deleted while they are suspended or have a suspend-
842   //    request flag set - (see Unregister() below).
843   // 2. When threads are created, they are created in a suspended state (actually
844   //    kNative) and will never begin executing Java code without first checking
845   //    the suspend-request flag.
846 
847   // The atomic counter for number of threads that need to pass the barrier.
848   AtomicInteger pending_threads;
849 
850   for (int iter_count = 1;; ++iter_count) {
851     {
852       MutexLock mu(self, *Locks::thread_list_lock_);
853       MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
854       if (suspend_all_count_ == 0) {
855         // Never run multiple SuspendAlls concurrently.
856         // If we are asked to suspend ourselves, we proceed anyway, but must ignore suspend
857         // request from other threads until we resume them.
858         bool found_myself = false;
859         // Update global suspend all state for attaching threads.
860         ++suspend_all_count_;
861         pending_threads.store(list_.size() - (self == nullptr ? 0 : 1), std::memory_order_relaxed);
862         // Increment everybody else's suspend count.
863         for (const auto& thread : list_) {
864           if (thread == self) {
865             found_myself = true;
866           } else {
867             VLOG(threads) << "requesting thread suspend: " << *thread;
868             DCHECK_EQ(suspend_all_count_, 1);
869             thread->IncrementSuspendCount(self, &pending_threads, nullptr, reason);
870             if (thread->IsSuspended()) {
871               // Effectively pass the barrier on behalf of the already suspended thread.
872               // The thread itself cannot yet have acted on our request since we still hold the
873               // suspend_count_lock_, and it will notice that kActiveSuspendBarrier has already
874               // been cleared if and when it acquires the lock in PassActiveSuspendBarriers().
875               DCHECK_EQ(thread->tlsPtr_.active_suspendall_barrier, &pending_threads);
876               pending_threads.fetch_sub(1, std::memory_order_seq_cst);
877               thread->tlsPtr_.active_suspendall_barrier = nullptr;
878               if (!thread->HasActiveSuspendBarrier()) {
879                 thread->AtomicClearFlag(ThreadFlag::kActiveSuspendBarrier);
880               }
881             }
882             // else:
883             // The target thread was not yet suspended, and hence will be forced to execute
884             // TransitionFromRunnableToSuspended shortly. Since we set the kSuspendRequest flag
885             // before checking, and it checks kActiveSuspendBarrier after noticing kSuspendRequest,
886             // it must notice kActiveSuspendBarrier when it does. Thus it is guaranteed to
887             // decrement the suspend barrier. We're relying on store; load ordering here, but
888             // that's not a problem, since state and flags all reside in the same atomic, and
889             // are thus properly ordered, even for relaxed accesses.
890           }
891         }
892         self->AtomicSetFlag(ThreadFlag::kSuspensionImmune, std::memory_order_relaxed);
893         DCHECK(self == nullptr || found_myself);
894         break;
895       }
896     }
897     if (iter_count >= kMaxSuspendRetries) {
898       LOG(FATAL) << "Too many SuspendAll retries: " << iter_count;
899     } else {
900       MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
901       DCHECK_LE(suspend_all_count_, 1);
902       if (suspend_all_count_ != 0) {
903         // This may take a while, and we're not runnable, and thus would otherwise not block.
904         Thread::resume_cond_->WaitHoldingLocks(self);
905         continue;
906       }
907     }
908     // We're already not runnable, so an attempt to suspend us should succeed.
909   }
910 
911   Thread* culprit = nullptr;
912   pid_t tid = 0;
913   std::ostringstream oss;
914   for (int attempt_of_4 = 1; attempt_of_4 <= 4; ++attempt_of_4) {
915     auto result = WaitForSuspendBarrier(&pending_threads, tid, attempt_of_4);
916     if (!result.has_value()) {
917       // Wait succeeded.
918       break;
919     }
920     if (attempt_of_4 == 3) {
921       // Second to the last attempt; Try to gather more information in case we time out.
922       MutexLock mu(self, *Locks::thread_list_lock_);
923       MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
924       oss << "Unsuspended threads: ";
925       for (const auto& thread : list_) {
926         if (thread != self && !thread->IsSuspended()) {
927           culprit = thread;
928           oss << *thread << ", ";
929         }
930       }
931       if (culprit != nullptr) {
932         tid = culprit->GetTid();
933       }
934     } else if (attempt_of_4 == 4) {
935       // Final attempt still timed out.
936       if (culprit == nullptr) {
937         LOG(FATAL) << "SuspendAll timeout. Couldn't find holdouts.";
938       } else {
939         std::string name;
940         culprit->GetThreadName(name);
941         oss << "Info for " << *culprit << ":";
942         std::string thr_descr =
943             StringPrintf("%s tid: %d, state&flags: 0x%x, priority: %d,  barrier value: %d, ",
944                          name.c_str(),
945                          tid,
946                          culprit->GetStateAndFlags(std::memory_order_relaxed).GetValue(),
947                          culprit->GetNativePriority(),
948                          pending_threads.load());
949         oss << thr_descr << result.value();
950         culprit->AbortInThis("SuspendAll timeout: " + oss.str());
951       }
952     }
953   }
954 }
955 
ResumeAll()956 void ThreadList::ResumeAll() {
957   Thread* self = Thread::Current();
958   if (kDebugLocking) {
959     // Debug check that all threads are suspended.
960     AssertOtherThreadsAreSuspended(self);
961   }
962   MutexLock mu(self, *Locks::thread_list_lock_);
963   MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
964   ATraceEnd();  // Matching "Mutator threads suspended ..." in SuspendAll.
965   ResumeAllInternal(self);
966 }
967 
968 // Holds thread_list_lock_ and suspend_count_lock_
ResumeAllInternal(Thread * self)969 void ThreadList::ResumeAllInternal(Thread* self) {
970   DCHECK_NE(self->GetState(), ThreadState::kRunnable);
971   if (self != nullptr) {
972     VLOG(threads) << *self << " ResumeAll starting";
973   } else {
974     VLOG(threads) << "Thread[null] ResumeAll starting";
975   }
976 
977   ScopedTrace trace("Resuming mutator threads");
978 
979   long_suspend_ = false;
980 
981   Locks::mutator_lock_->ExclusiveUnlock(self);
982 
983   // Decrement the suspend counts for all threads.
984   for (const auto& thread : list_) {
985     if (thread != self) {
986       thread->DecrementSuspendCount(self);
987     }
988   }
989 
990   // Update global suspend all state for attaching threads. Unblocks other SuspendAlls once
991   // suspend_count_lock_ is released.
992   --suspend_all_count_;
993   self->AtomicClearFlag(ThreadFlag::kSuspensionImmune, std::memory_order_relaxed);
994   // Pending suspend requests for us will be handled when we become Runnable again.
995 
996   // Broadcast a notification to all suspended threads, some or all of
997   // which may choose to wake up.  No need to wait for them.
998   if (self != nullptr) {
999     VLOG(threads) << *self << " ResumeAll waking others";
1000   } else {
1001     VLOG(threads) << "Thread[null] ResumeAll waking others";
1002   }
1003   Thread::resume_cond_->Broadcast(self);
1004 
1005   if (self != nullptr) {
1006     VLOG(threads) << *self << " ResumeAll complete";
1007   } else {
1008     VLOG(threads) << "Thread[null] ResumeAll complete";
1009   }
1010 }
1011 
Resume(Thread * thread,SuspendReason reason)1012 bool ThreadList::Resume(Thread* thread, SuspendReason reason) {
1013   // This assumes there was an ATraceBegin when we suspended the thread.
1014   ATraceEnd();
1015 
1016   Thread* self = Thread::Current();
1017   DCHECK_NE(thread, self);
1018   VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") starting..." << reason;
1019 
1020   {
1021     // To check Contains.
1022     MutexLock mu(self, *Locks::thread_list_lock_);
1023     // To check IsSuspended.
1024     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1025     if (UNLIKELY(!thread->IsSuspended())) {
1026       LOG(reason == SuspendReason::kForUserCode ? ERROR : FATAL)
1027           << "Resume(" << reinterpret_cast<void*>(thread) << ") thread not suspended";
1028       return false;
1029     }
1030     if (!Contains(thread)) {
1031       // We only expect threads within the thread-list to have been suspended otherwise we can't
1032       // stop such threads from delete-ing themselves.
1033       LOG(reason == SuspendReason::kForUserCode ? ERROR : FATAL)
1034           << "Resume(" << reinterpret_cast<void*>(thread) << ") thread not within thread list";
1035       return false;
1036     }
1037     thread->DecrementSuspendCount(self, /*for_user_code=*/(reason == SuspendReason::kForUserCode));
1038     Thread::resume_cond_->Broadcast(self);
1039   }
1040 
1041   VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") finished waking others";
1042   return true;
1043 }
1044 
SuspendThread(Thread * self,Thread * thread,SuspendReason reason,ThreadState self_state,const char * func_name,int attempt_of_4)1045 bool ThreadList::SuspendThread(Thread* self,
1046                                Thread* thread,
1047                                SuspendReason reason,
1048                                ThreadState self_state,
1049                                const char* func_name,
1050                                int attempt_of_4) {
1051   bool is_suspended = false;
1052   VLOG(threads) << func_name << "starting";
1053   pid_t tid = thread->GetTid();
1054   uint8_t suspended_count;
1055   uint8_t checkpoint_count;
1056   WrappedSuspend1Barrier wrapped_barrier{};
1057   static_assert(sizeof wrapped_barrier.barrier_ == sizeof(uint32_t));
1058   ThreadExitFlag tef;
1059   bool exited = false;
1060   thread->NotifyOnThreadExit(&tef);
1061   int iter_count = 1;
1062   do {
1063     {
1064       Locks::mutator_lock_->AssertSharedHeld(self);
1065       Locks::thread_list_lock_->AssertHeld(self);
1066       // Note: this will transition to runnable and potentially suspend.
1067       DCHECK(Contains(thread));
1068       // This implementation fails if thread == self. Let the clients handle that case
1069       // appropriately.
1070       CHECK_NE(thread, self) << func_name << "(self)";
1071       VLOG(threads) << func_name << " suspending: " << *thread;
1072       {
1073         MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
1074         if (LIKELY(self->GetSuspendCount() == 0)) {
1075           suspended_count = thread->suspended_count_;
1076           checkpoint_count = thread->checkpoint_count_;
1077           thread->IncrementSuspendCount(self, nullptr, &wrapped_barrier, reason);
1078           if (thread->IsSuspended()) {
1079             // See the discussion in mutator_gc_coord.md and SuspendAllInternal for the race here.
1080             thread->RemoveFirstSuspend1Barrier(&wrapped_barrier);
1081             // PassActiveSuspendBarriers couldn't have seen our barrier, since it also acquires
1082             // 'thread_suspend_count_lock_'. `wrapped_barrier` will not be accessed.
1083             if (!thread->HasActiveSuspendBarrier()) {
1084               thread->AtomicClearFlag(ThreadFlag::kActiveSuspendBarrier);
1085             }
1086             is_suspended = true;
1087           }
1088           DCHECK_GT(thread->GetSuspendCount(), 0);
1089           break;
1090         }
1091         // Else we hold the suspend count lock but another thread is trying to suspend us,
1092         // making it unsafe to try to suspend another thread in case we get a cycle.
1093         // Start the loop again, which will allow this thread to be suspended.
1094       }
1095     }
1096     // All locks are released, and we should quickly exit the suspend-unfriendly state. Retry.
1097     if (iter_count >= kMaxSuspendRetries) {
1098       LOG(FATAL) << "Too many suspend retries";
1099     }
1100     Locks::thread_list_lock_->ExclusiveUnlock(self);
1101     {
1102       ScopedThreadSuspension sts(self, ThreadState::kSuspended);
1103       usleep(kThreadSuspendSleepUs);
1104       ++iter_count;
1105     }
1106     Locks::thread_list_lock_->ExclusiveLock(self);
1107     exited = tef.HasExited();
1108   } while (!exited);
1109   thread->UnregisterThreadExitFlag(&tef);
1110   Locks::thread_list_lock_->ExclusiveUnlock(self);
1111   self->TransitionFromRunnableToSuspended(self_state);
1112   if (exited) {
1113     // This is OK: There's a race in inflating a lock and the owner giving up ownership and then
1114     // dying.
1115     LOG(WARNING) << StringPrintf("Thread with tid %d exited before suspending", tid);
1116     return false;
1117   }
1118   // Now wait for target to decrement suspend barrier.
1119   std::optional<std::string> failure_info;
1120   if (!is_suspended) {
1121     failure_info = WaitForSuspendBarrier(&wrapped_barrier.barrier_, tid, attempt_of_4);
1122     if (!failure_info.has_value()) {
1123       is_suspended = true;
1124     }
1125   }
1126   while (!is_suspended) {
1127     if (attempt_of_4 > 0 && attempt_of_4 < 4) {
1128       // Caller will try again. Give up and resume the thread for now.  We need to make sure
1129       // that wrapped_barrier is removed from the list before we deallocate it.
1130       MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
1131       if (wrapped_barrier.barrier_.load() == 0) {
1132         // Succeeded in the meantime.
1133         is_suspended = true;
1134         continue;
1135       }
1136       thread->RemoveSuspend1Barrier(&wrapped_barrier);
1137       if (!thread->HasActiveSuspendBarrier()) {
1138         thread->AtomicClearFlag(ThreadFlag::kActiveSuspendBarrier);
1139       }
1140       // Do not call Resume(), since we are probably not fully suspended.
1141       thread->DecrementSuspendCount(self,
1142                                     /*for_user_code=*/(reason == SuspendReason::kForUserCode));
1143       Thread::resume_cond_->Broadcast(self);
1144       return false;
1145     }
1146     std::string name;
1147     thread->GetThreadName(name);
1148     WrappedSuspend1Barrier* first_barrier;
1149     {
1150       MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
1151       first_barrier = thread->tlsPtr_.active_suspend1_barriers;
1152     }
1153     // 'thread' should still have a suspend request pending, and hence stick around. Try to abort
1154     // there, since its stack trace is much more interesting than ours.
1155     std::string message = StringPrintf(
1156         "%s timed out: %d (%s), state&flags: 0x%x, priority: %d,"
1157         " barriers: %p, ours: %p, barrier value: %d, nsusps: %d, ncheckpts: %d, thread_info: %s",
1158         func_name,
1159         thread->GetTid(),
1160         name.c_str(),
1161         thread->GetStateAndFlags(std::memory_order_relaxed).GetValue(),
1162         thread->GetNativePriority(),
1163         first_barrier,
1164         &wrapped_barrier,
1165         wrapped_barrier.barrier_.load(),
1166         thread->suspended_count_ - suspended_count,
1167         thread->checkpoint_count_ - checkpoint_count,
1168         failure_info.value().c_str());
1169     // Check one last time whether thread passed the suspend barrier. Empirically this seems to
1170     // happen maybe between 1 and 5% of the time.
1171     if (wrapped_barrier.barrier_.load() != 0) {
1172       // thread still has a pointer to wrapped_barrier. Returning and continuing would be unsafe
1173       // without additional cleanup.
1174       thread->AbortInThis(message);
1175       UNREACHABLE();
1176     }
1177     is_suspended = true;
1178   }
1179   // wrapped_barrier.barrier_ will no longer be accessed.
1180   VLOG(threads) << func_name << " suspended: " << *thread;
1181   if (ATraceEnabled()) {
1182     std::string name;
1183     thread->GetThreadName(name);
1184     ATraceBegin(
1185         StringPrintf("%s suspended %s for tid=%d", func_name, name.c_str(), thread->GetTid())
1186             .c_str());
1187   }
1188   if (kIsDebugBuild) {
1189     CHECK(thread->IsSuspended());
1190     MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
1191     thread->CheckBarrierInactive(&wrapped_barrier);
1192   }
1193   return true;
1194 }
1195 
SuspendThreadByPeer(jobject peer,SuspendReason reason)1196 Thread* ThreadList::SuspendThreadByPeer(jobject peer, SuspendReason reason) {
1197   Thread* const self = Thread::Current();
1198   ThreadState old_self_state = self->GetState();
1199   self->TransitionFromSuspendedToRunnable();
1200   Locks::thread_list_lock_->ExclusiveLock(self);
1201   ObjPtr<mirror::Object> thread_ptr = self->DecodeJObject(peer);
1202   Thread* thread = Thread::FromManagedThread(self, thread_ptr);
1203   if (thread == nullptr || !Contains(thread)) {
1204     if (thread == nullptr) {
1205       ObjPtr<mirror::Object> name = WellKnownClasses::java_lang_Thread_name->GetObject(thread_ptr);
1206       std::string thr_name = (name == nullptr ? "<unknown>" : name->AsString()->ToModifiedUtf8());
1207       LOG(WARNING) << "No such thread for suspend"
1208                    << ": " << peer << ":" << thr_name;
1209     } else {
1210       LOG(WARNING) << "SuspendThreadByPeer failed for unattached thread: "
1211                    << reinterpret_cast<void*>(thread);
1212     }
1213     Locks::thread_list_lock_->ExclusiveUnlock(self);
1214     self->TransitionFromRunnableToSuspended(old_self_state);
1215     return nullptr;
1216   }
1217   VLOG(threads) << "SuspendThreadByPeer found thread: " << *thread;
1218   // Releases thread_list_lock_ and mutator lock.
1219   bool success = SuspendThread(self, thread, reason, old_self_state, __func__, 0);
1220   Locks::thread_list_lock_->AssertNotHeld(self);
1221   return success ? thread : nullptr;
1222 }
1223 
SuspendThreadByThreadId(uint32_t thread_id,SuspendReason reason,int attempt_of_4)1224 Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id,
1225                                             SuspendReason reason,
1226                                             int attempt_of_4) {
1227   Thread* const self = Thread::Current();
1228   ThreadState old_self_state = self->GetState();
1229   CHECK_NE(thread_id, kInvalidThreadId);
1230   VLOG(threads) << "SuspendThreadByThreadId starting";
1231   self->TransitionFromSuspendedToRunnable();
1232   Locks::thread_list_lock_->ExclusiveLock(self);
1233   Thread* thread = FindThreadByThreadId(thread_id);
1234   if (thread == nullptr) {
1235     // There's a race in inflating a lock and the owner giving up ownership and then dying.
1236     LOG(WARNING) << StringPrintf("No such thread id %d for suspend", thread_id);
1237     Locks::thread_list_lock_->ExclusiveUnlock(self);
1238     self->TransitionFromRunnableToSuspended(old_self_state);
1239     return nullptr;
1240   }
1241   DCHECK(Contains(thread));
1242   VLOG(threads) << "SuspendThreadByThreadId found thread: " << *thread;
1243   // Releases thread_list_lock_ and mutator lock.
1244   bool success = SuspendThread(self, thread, reason, old_self_state, __func__, attempt_of_4);
1245   Locks::thread_list_lock_->AssertNotHeld(self);
1246   return success ? thread : nullptr;
1247 }
1248 
FindThreadByThreadId(uint32_t thread_id)1249 Thread* ThreadList::FindThreadByThreadId(uint32_t thread_id) {
1250   for (const auto& thread : list_) {
1251     if (thread->GetThreadId() == thread_id) {
1252       return thread;
1253     }
1254   }
1255   return nullptr;
1256 }
1257 
FindThreadByTid(int tid)1258 Thread* ThreadList::FindThreadByTid(int tid) {
1259   for (const auto& thread : list_) {
1260     if (thread->GetTid() == tid) {
1261       return thread;
1262     }
1263   }
1264   return nullptr;
1265 }
1266 
WaitForOtherNonDaemonThreadsToExit(bool check_no_birth)1267 void ThreadList::WaitForOtherNonDaemonThreadsToExit(bool check_no_birth) {
1268   ScopedTrace trace(__PRETTY_FUNCTION__);
1269   Thread* self = Thread::Current();
1270   Locks::mutator_lock_->AssertNotHeld(self);
1271   while (true) {
1272     Locks::runtime_shutdown_lock_->Lock(self);
1273     if (check_no_birth) {
1274       // No more threads can be born after we start to shutdown.
1275       CHECK(Runtime::Current()->IsShuttingDownLocked());
1276       CHECK_EQ(Runtime::Current()->NumberOfThreadsBeingBorn(), 0U);
1277     } else {
1278       if (Runtime::Current()->NumberOfThreadsBeingBorn() != 0U) {
1279         // Awkward. Shutdown_cond_ is private, but the only live thread may not be registered yet.
1280         // Fortunately, this is used mostly for testing, and not performance-critical.
1281         Locks::runtime_shutdown_lock_->Unlock(self);
1282         usleep(1000);
1283         continue;
1284       }
1285     }
1286     MutexLock mu(self, *Locks::thread_list_lock_);
1287     Locks::runtime_shutdown_lock_->Unlock(self);
1288     // Also wait for any threads that are unregistering to finish. This is required so that no
1289     // threads access the thread list after it is deleted. TODO: This may not work for user daemon
1290     // threads since they could unregister at the wrong time.
1291     bool done = unregistering_count_ == 0;
1292     if (done) {
1293       for (const auto& thread : list_) {
1294         if (thread != self && !thread->IsDaemon()) {
1295           done = false;
1296           break;
1297         }
1298       }
1299     }
1300     if (done) {
1301       break;
1302     }
1303     // Wait for another thread to exit before re-checking.
1304     Locks::thread_exit_cond_->Wait(self);
1305   }
1306 }
1307 
SuspendAllDaemonThreadsForShutdown()1308 void ThreadList::SuspendAllDaemonThreadsForShutdown() {
1309   ScopedTrace trace(__PRETTY_FUNCTION__);
1310   Thread* self = Thread::Current();
1311   size_t daemons_left = 0;
1312   {
1313     // Tell all the daemons it's time to suspend.
1314     MutexLock mu(self, *Locks::thread_list_lock_);
1315     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1316     for (const auto& thread : list_) {
1317       // This is only run after all non-daemon threads have exited, so the remainder should all be
1318       // daemons.
1319       CHECK(thread->IsDaemon()) << *thread;
1320       if (thread != self) {
1321         thread->IncrementSuspendCount(self);
1322         ++daemons_left;
1323       }
1324       // We are shutting down the runtime, set the JNI functions of all the JNIEnvs to be
1325       // the sleep forever one.
1326       thread->GetJniEnv()->SetFunctionsToRuntimeShutdownFunctions();
1327     }
1328   }
1329   if (daemons_left == 0) {
1330     // No threads left; safe to shut down.
1331     return;
1332   }
1333   // There is not a clean way to shut down if we have daemons left. We have no mechanism for
1334   // killing them and reclaiming thread stacks. We also have no mechanism for waiting until they
1335   // have truly finished touching the memory we are about to deallocate. We do the best we can with
1336   // timeouts.
1337   //
1338   // If we have any daemons left, wait until they are (a) suspended and (b) they are not stuck
1339   // in a place where they are about to access runtime state and are not in a runnable state.
1340   // We attempt to do the latter by just waiting long enough for things to
1341   // quiesce. Examples: Monitor code or waking up from a condition variable.
1342   //
1343   // Give the threads a chance to suspend, complaining if they're slow. (a)
1344   bool have_complained = false;
1345   static constexpr size_t kTimeoutMicroseconds = 2000 * 1000;
1346   static constexpr size_t kSleepMicroseconds = 1000;
1347   bool all_suspended = false;
1348   for (size_t i = 0; !all_suspended && i < kTimeoutMicroseconds / kSleepMicroseconds; ++i) {
1349     bool found_running = false;
1350     {
1351       MutexLock mu(self, *Locks::thread_list_lock_);
1352       for (const auto& thread : list_) {
1353         if (thread != self && thread->GetState() == ThreadState::kRunnable) {
1354           if (!have_complained) {
1355             LOG(WARNING) << "daemon thread not yet suspended: " << *thread;
1356             have_complained = true;
1357           }
1358           found_running = true;
1359         }
1360       }
1361     }
1362     if (found_running) {
1363       // Sleep briefly before checking again. Max total sleep time is kTimeoutMicroseconds.
1364       usleep(kSleepMicroseconds);
1365     } else {
1366       all_suspended = true;
1367     }
1368   }
1369   if (!all_suspended) {
1370     // We can get here if a daemon thread executed a fastnative native call, so that it
1371     // remained in runnable state, and then made a JNI call after we called
1372     // SetFunctionsToRuntimeShutdownFunctions(), causing it to permanently stay in a harmless
1373     // but runnable state. See b/147804269 .
1374     LOG(WARNING) << "timed out suspending all daemon threads";
1375   }
1376   // Assume all threads are either suspended or somehow wedged.
1377   // Wait again for all the now "suspended" threads to actually quiesce. (b)
1378   static constexpr size_t kDaemonSleepTime = 400'000;
1379   usleep(kDaemonSleepTime);
1380   std::list<Thread*> list_copy;
1381   {
1382     MutexLock mu(self, *Locks::thread_list_lock_);
1383     // Half-way through the wait, set the "runtime deleted" flag, causing any newly awoken
1384     // threads to immediately go back to sleep without touching memory. This prevents us from
1385     // touching deallocated memory, but it also prevents mutexes from getting released. Thus we
1386     // only do this once we're reasonably sure that no system mutexes are still held.
1387     for (const auto& thread : list_) {
1388       DCHECK(thread == self || !all_suspended || thread->GetState() != ThreadState::kRunnable);
1389       // In the !all_suspended case, the target is probably sleeping.
1390       thread->GetJniEnv()->SetRuntimeDeleted();
1391       // Possibly contended Mutex acquisitions are unsafe after this.
1392       // Releasing thread_list_lock_ is OK, since it can't block.
1393     }
1394   }
1395   // Finally wait for any threads woken before we set the "runtime deleted" flags to finish
1396   // touching memory.
1397   usleep(kDaemonSleepTime);
1398 #if defined(__has_feature)
1399 #if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
1400   // Sleep a bit longer with -fsanitize=address, since everything is slower.
1401   usleep(2 * kDaemonSleepTime);
1402 #endif
1403 #endif
1404   // At this point no threads should be touching our data structures anymore.
1405 }
1406 
Register(Thread * self)1407 void ThreadList::Register(Thread* self) {
1408   DCHECK_EQ(self, Thread::Current());
1409   CHECK(!shut_down_);
1410 
1411   if (VLOG_IS_ON(threads)) {
1412     std::ostringstream oss;
1413     self->ShortDump(oss);  // We don't hold the mutator_lock_ yet and so cannot call Dump.
1414     LOG(INFO) << "ThreadList::Register() " << *self  << "\n" << oss.str();
1415   }
1416 
1417   // Atomically add self to the thread list and make its thread_suspend_count_ reflect ongoing
1418   // SuspendAll requests.
1419   MutexLock mu(self, *Locks::thread_list_lock_);
1420   MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1421   if (suspend_all_count_ == 1) {
1422     self->IncrementSuspendCount(self);
1423   } else {
1424     DCHECK_EQ(suspend_all_count_, 0);
1425   }
1426   CHECK(!Contains(self));
1427   list_.push_back(self);
1428   if (gUseReadBarrier) {
1429     gc::collector::ConcurrentCopying* const cc =
1430         Runtime::Current()->GetHeap()->ConcurrentCopyingCollector();
1431     // Initialize according to the state of the CC collector.
1432     self->SetIsGcMarkingAndUpdateEntrypoints(cc->IsMarking());
1433     if (cc->IsUsingReadBarrierEntrypoints()) {
1434       self->SetReadBarrierEntrypoints();
1435     }
1436     self->SetWeakRefAccessEnabled(cc->IsWeakRefAccessEnabled());
1437   }
1438 }
1439 
Unregister(Thread * self,bool should_run_callbacks)1440 void ThreadList::Unregister(Thread* self, bool should_run_callbacks) {
1441   DCHECK_EQ(self, Thread::Current());
1442   CHECK_NE(self->GetState(), ThreadState::kRunnable);
1443   Locks::mutator_lock_->AssertNotHeld(self);
1444   if (self->tls32_.disable_thread_flip_count != 0) {
1445     LOG(FATAL) << "Incomplete PrimitiveArrayCritical section at exit: " << *self << "count = "
1446                << self->tls32_.disable_thread_flip_count;
1447   }
1448 
1449   VLOG(threads) << "ThreadList::Unregister() " << *self;
1450 
1451   {
1452     MutexLock mu(self, *Locks::thread_list_lock_);
1453     ++unregistering_count_;
1454   }
1455 
1456   // Any time-consuming destruction, plus anything that can call back into managed code or
1457   // suspend and so on, must happen at this point, and not in ~Thread. The self->Destroy is what
1458   // causes the threads to join. It is important to do this after incrementing unregistering_count_
1459   // since we want the runtime to wait for the daemon threads to exit before deleting the thread
1460   // list.
1461   self->Destroy(should_run_callbacks);
1462 
1463   uint32_t thin_lock_id = self->GetThreadId();
1464   while (true) {
1465     // Remove and delete the Thread* while holding the thread_list_lock_ and
1466     // thread_suspend_count_lock_ so that the unregistering thread cannot be suspended.
1467     // Note: deliberately not using MutexLock that could hold a stale self pointer.
1468     {
1469       MutexLock mu(self, *Locks::thread_list_lock_);
1470       if (!Contains(self)) {
1471         std::string thread_name;
1472         self->GetThreadName(thread_name);
1473         std::ostringstream os;
1474         DumpNativeStack(os, GetTid(), "  native: ", nullptr);
1475         LOG(FATAL) << "Request to unregister unattached thread " << thread_name << "\n" << os.str();
1476         UNREACHABLE();
1477       } else {
1478         MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1479         Thread::StateAndFlags state_and_flags = self->GetStateAndFlags(std::memory_order_acquire);
1480         if (!state_and_flags.IsFlagSet(ThreadFlag::kRunningFlipFunction) &&
1481             !state_and_flags.IsFlagSet(ThreadFlag::kSuspendRequest)) {
1482           list_.remove(self);
1483           self->SignalExitFlags();
1484           break;
1485         }
1486       }
1487     }
1488     // In the case where we are not suspended yet, sleep to leave other threads time to execute.
1489     // This is important if there are realtime threads. b/111277984
1490     usleep(1);
1491     // We failed to remove the thread due to a suspend request or the like, loop and try again.
1492   }
1493 
1494   // We flush the trace buffer in Thread::Destroy. We have to check again here because once the
1495   // Thread::Destroy finishes we wait for any active suspend requests to finish before deleting
1496   // the thread. If a new trace was started during the wait period we may allocate the trace buffer
1497   // again. The trace buffer would only contain the method entry events for the methods on the stack
1498   // of an exiting thread. It is not required to flush these entries but we need to release the
1499   // buffer. Ideally we should either not generate trace events for a thread that is exiting or use
1500   // a different mechanism to report the initial events on a trace start that doesn't use per-thread
1501   // buffer. Both these approaches are not trivial to implement, so we are going with the approach
1502   // of just releasing the buffer here.
1503   if (UNLIKELY(self->GetMethodTraceBuffer() != nullptr)) {
1504     Trace::ReleaseThreadBuffer(self);
1505   }
1506   delete self;
1507 
1508   // Release the thread ID after the thread is finished and deleted to avoid cases where we can
1509   // temporarily have multiple threads with the same thread id. When this occurs, it causes
1510   // problems in FindThreadByThreadId / SuspendThreadByThreadId.
1511   ReleaseThreadId(nullptr, thin_lock_id);
1512 
1513   // Clear the TLS data, so that the underlying native thread is recognizably detached.
1514   // (It may wish to reattach later.)
1515 #ifdef __BIONIC__
1516   __get_tls()[TLS_SLOT_ART_THREAD_SELF] = nullptr;
1517 #else
1518   CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, nullptr), "detach self");
1519   Thread::self_tls_ = nullptr;
1520 #endif
1521 
1522   // Signal that a thread just detached.
1523   MutexLock mu(nullptr, *Locks::thread_list_lock_);
1524   --unregistering_count_;
1525   Locks::thread_exit_cond_->Broadcast(nullptr);
1526 }
1527 
ForEach(void (* callback)(Thread *,void *),void * context)1528 void ThreadList::ForEach(void (*callback)(Thread*, void*), void* context) {
1529   for (const auto& thread : list_) {
1530     callback(thread, context);
1531   }
1532 }
1533 
WaitForUnregisterToComplete(Thread * self)1534 void ThreadList::WaitForUnregisterToComplete(Thread* self) {
1535   // We hold thread_list_lock_ .
1536   while (unregistering_count_ != 0) {
1537     LOG(WARNING) << "Waiting for a thread to finish unregistering";
1538     Locks::thread_exit_cond_->Wait(self);
1539   }
1540 }
1541 
VisitRootsForSuspendedThreads(RootVisitor * visitor)1542 void ThreadList::VisitRootsForSuspendedThreads(RootVisitor* visitor) {
1543   Thread* const self = Thread::Current();
1544   std::vector<Thread*> threads_to_visit;
1545 
1546   // Tell threads to suspend and copy them into list.
1547   {
1548     MutexLock mu(self, *Locks::thread_list_lock_);
1549     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1550     for (Thread* thread : list_) {
1551       thread->IncrementSuspendCount(self);
1552       if (thread == self || thread->IsSuspended()) {
1553         threads_to_visit.push_back(thread);
1554       } else {
1555         thread->DecrementSuspendCount(self);
1556       }
1557     }
1558   }
1559 
1560   // Visit roots without holding thread_list_lock_ and thread_suspend_count_lock_ to prevent lock
1561   // order violations.
1562   for (Thread* thread : threads_to_visit) {
1563     thread->VisitRoots(visitor, kVisitRootFlagAllRoots);
1564   }
1565 
1566   // Restore suspend counts.
1567   {
1568     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1569     for (Thread* thread : threads_to_visit) {
1570       thread->DecrementSuspendCount(self);
1571     }
1572     Thread::resume_cond_->Broadcast(self);
1573   }
1574 }
1575 
VisitRoots(RootVisitor * visitor,VisitRootFlags flags) const1576 void ThreadList::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) const {
1577   MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
1578   for (const auto& thread : list_) {
1579     thread->VisitRoots(visitor, flags);
1580   }
1581 }
1582 
VisitReflectiveTargets(ReflectiveValueVisitor * visitor) const1583 void ThreadList::VisitReflectiveTargets(ReflectiveValueVisitor *visitor) const {
1584   MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
1585   for (const auto& thread : list_) {
1586     thread->VisitReflectiveTargets(visitor);
1587   }
1588 }
1589 
SweepInterpreterCaches(IsMarkedVisitor * visitor) const1590 void ThreadList::SweepInterpreterCaches(IsMarkedVisitor* visitor) const {
1591   MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
1592   for (const auto& thread : list_) {
1593     thread->SweepInterpreterCache(visitor);
1594   }
1595 }
1596 
AllocThreadId(Thread * self)1597 uint32_t ThreadList::AllocThreadId(Thread* self) {
1598   MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
1599   for (size_t i = 0; i < allocated_ids_.size(); ++i) {
1600     if (!allocated_ids_[i]) {
1601       allocated_ids_.set(i);
1602       return i + 1;  // Zero is reserved to mean "invalid".
1603     }
1604   }
1605   LOG(FATAL) << "Out of internal thread ids";
1606   UNREACHABLE();
1607 }
1608 
ReleaseThreadId(Thread * self,uint32_t id)1609 void ThreadList::ReleaseThreadId(Thread* self, uint32_t id) {
1610   MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
1611   --id;  // Zero is reserved to mean "invalid".
1612   DCHECK(allocated_ids_[id]) << id;
1613   allocated_ids_.reset(id);
1614 }
1615 
ScopedSuspendAll(const char * cause,bool long_suspend)1616 ScopedSuspendAll::ScopedSuspendAll(const char* cause, bool long_suspend) {
1617   Runtime::Current()->GetThreadList()->SuspendAll(cause, long_suspend);
1618 }
1619 
~ScopedSuspendAll()1620 ScopedSuspendAll::~ScopedSuspendAll() {
1621   Runtime::Current()->GetThreadList()->ResumeAll();
1622 }
1623 
1624 }  // namespace art
1625