• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define ATRACE_TAG ATRACE_TAG_DALVIK
18 
19 #include "thread.h"
20 
21 #include <cutils/trace.h>
22 #include <pthread.h>
23 #include <signal.h>
24 #include <sys/resource.h>
25 #include <sys/time.h>
26 
27 #include <algorithm>
28 #include <bitset>
29 #include <cerrno>
30 #include <iostream>
31 #include <list>
32 
33 #include "arch/context.h"
34 #include "base/mutex.h"
35 #include "class_linker.h"
36 #include "class_linker-inl.h"
37 #include "cutils/atomic.h"
38 #include "cutils/atomic-inline.h"
39 #include "debugger.h"
40 #include "dex_file-inl.h"
41 #include "entrypoints/entrypoint_utils.h"
42 #include "gc_map.h"
43 #include "gc/accounting/card_table-inl.h"
44 #include "gc/heap.h"
45 #include "gc/space/space.h"
46 #include "invoke_arg_array_builder.h"
47 #include "jni_internal.h"
48 #include "mirror/art_field-inl.h"
49 #include "mirror/art_method-inl.h"
50 #include "mirror/class-inl.h"
51 #include "mirror/class_loader.h"
52 #include "mirror/object_array-inl.h"
53 #include "mirror/stack_trace_element.h"
54 #include "monitor.h"
55 #include "object_utils.h"
56 #include "reflection.h"
57 #include "runtime.h"
58 #include "scoped_thread_state_change.h"
59 #include "ScopedLocalRef.h"
60 #include "ScopedUtfChars.h"
61 #include "sirt_ref.h"
62 #include "stack.h"
63 #include "stack_indirect_reference_table.h"
64 #include "thread-inl.h"
65 #include "thread_list.h"
66 #include "utils.h"
67 #include "verifier/dex_gc_map.h"
68 #include "verifier/method_verifier.h"
69 #include "vmap_table.h"
70 #include "well_known_classes.h"
71 
72 namespace art {
73 
74 bool Thread::is_started_ = false;
75 pthread_key_t Thread::pthread_key_self_;
76 ConditionVariable* Thread::resume_cond_ = NULL;
77 
78 static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
79 
InitCardTable()80 void Thread::InitCardTable() {
81   card_table_ = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin();
82 }
83 
84 #if !defined(__APPLE__)
UnimplementedEntryPoint()85 static void UnimplementedEntryPoint() {
86   UNIMPLEMENTED(FATAL);
87 }
88 #endif
89 
90 void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
91                      PortableEntryPoints* ppoints, QuickEntryPoints* qpoints);
92 
InitTlsEntryPoints()93 void Thread::InitTlsEntryPoints() {
94 #if !defined(__APPLE__)  // The Mac GCC is too old to accept this code.
95   // Insert a placeholder so we can easily tell if we call an unimplemented entry point.
96   uintptr_t* begin = reinterpret_cast<uintptr_t*>(&interpreter_entrypoints_);
97   uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(quick_entrypoints_));
98   for (uintptr_t* it = begin; it != end; ++it) {
99     *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
100   }
101   begin = reinterpret_cast<uintptr_t*>(&interpreter_entrypoints_);
102   end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(portable_entrypoints_));
103   for (uintptr_t* it = begin; it != end; ++it) {
104     *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
105   }
106 #endif
107   InitEntryPoints(&interpreter_entrypoints_, &jni_entrypoints_, &portable_entrypoints_,
108                   &quick_entrypoints_);
109 }
110 
SetDeoptimizationShadowFrame(ShadowFrame * sf)111 void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf) {
112   deoptimization_shadow_frame_ = sf;
113 }
114 
SetDeoptimizationReturnValue(const JValue & ret_val)115 void Thread::SetDeoptimizationReturnValue(const JValue& ret_val) {
116   deoptimization_return_value_.SetJ(ret_val.GetJ());
117 }
118 
GetAndClearDeoptimizationShadowFrame(JValue * ret_val)119 ShadowFrame* Thread::GetAndClearDeoptimizationShadowFrame(JValue* ret_val) {
120   ShadowFrame* sf = deoptimization_shadow_frame_;
121   deoptimization_shadow_frame_ = NULL;
122   ret_val->SetJ(deoptimization_return_value_.GetJ());
123   return sf;
124 }
125 
InitTid()126 void Thread::InitTid() {
127   tid_ = ::art::GetTid();
128 }
129 
InitAfterFork()130 void Thread::InitAfterFork() {
131   // One thread (us) survived the fork, but we have a new tid so we need to
132   // update the value stashed in this Thread*.
133   InitTid();
134 }
135 
CreateCallback(void * arg)136 void* Thread::CreateCallback(void* arg) {
137   Thread* self = reinterpret_cast<Thread*>(arg);
138   Runtime* runtime = Runtime::Current();
139   if (runtime == NULL) {
140     LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self;
141     return NULL;
142   }
143   {
144     // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true
145     //       after self->Init().
146     MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
147     // Check that if we got here we cannot be shutting down (as shutdown should never have started
148     // while threads are being born).
149     CHECK(!runtime->IsShuttingDown());
150     self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
151     Runtime::Current()->EndThreadBirth();
152   }
153   {
154     ScopedObjectAccess soa(self);
155 
156     // Copy peer into self, deleting global reference when done.
157     CHECK(self->jpeer_ != NULL);
158     self->opeer_ = soa.Decode<mirror::Object*>(self->jpeer_);
159     self->GetJniEnv()->DeleteGlobalRef(self->jpeer_);
160     self->jpeer_ = NULL;
161 
162     {
163       SirtRef<mirror::String> thread_name(self, self->GetThreadName(soa));
164       self->SetThreadName(thread_name->ToModifiedUtf8().c_str());
165     }
166     Dbg::PostThreadStart(self);
167 
168     // Invoke the 'run' method of our java.lang.Thread.
169     mirror::Object* receiver = self->opeer_;
170     jmethodID mid = WellKnownClasses::java_lang_Thread_run;
171     mirror::ArtMethod* m =
172         receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(soa.DecodeMethod(mid));
173     JValue result;
174     ArgArray arg_array(NULL, 0);
175     arg_array.Append(reinterpret_cast<uint32_t>(receiver));
176     m->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
177   }
178   // Detach and delete self.
179   Runtime::Current()->GetThreadList()->Unregister(self);
180 
181   return NULL;
182 }
183 
FromManagedThread(const ScopedObjectAccessUnchecked & soa,mirror::Object * thread_peer)184 Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa,
185                                   mirror::Object* thread_peer) {
186   mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer);
187   Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetInt(thread_peer)));
188   // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_
189   // to stop it from going away.
190   if (kIsDebugBuild) {
191     MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
192     if (result != NULL && !result->IsSuspended()) {
193       Locks::thread_list_lock_->AssertHeld(soa.Self());
194     }
195   }
196   return result;
197 }
198 
FromManagedThread(const ScopedObjectAccessUnchecked & soa,jobject java_thread)199 Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa, jobject java_thread) {
200   return FromManagedThread(soa, soa.Decode<mirror::Object*>(java_thread));
201 }
202 
FixStackSize(size_t stack_size)203 static size_t FixStackSize(size_t stack_size) {
204   // A stack size of zero means "use the default".
205   if (stack_size == 0) {
206     stack_size = Runtime::Current()->GetDefaultStackSize();
207   }
208 
209   // Dalvik used the bionic pthread default stack size for native threads,
210   // so include that here to support apps that expect large native stacks.
211   stack_size += 1 * MB;
212 
213   // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN.
214   if (stack_size < PTHREAD_STACK_MIN) {
215     stack_size = PTHREAD_STACK_MIN;
216   }
217 
218   // It's likely that callers are trying to ensure they have at least a certain amount of
219   // stack space, so we should add our reserved space on top of what they requested, rather
220   // than implicitly take it away from them.
221   stack_size += Thread::kStackOverflowReservedBytes;
222 
223   // Some systems require the stack size to be a multiple of the system page size, so round up.
224   stack_size = RoundUp(stack_size, kPageSize);
225 
226   return stack_size;
227 }
228 
CreateNativeThread(JNIEnv * env,jobject java_peer,size_t stack_size,bool is_daemon)229 void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
230   CHECK(java_peer != NULL);
231   Thread* self = static_cast<JNIEnvExt*>(env)->self;
232   Runtime* runtime = Runtime::Current();
233 
234   // Atomically start the birth of the thread ensuring the runtime isn't shutting down.
235   bool thread_start_during_shutdown = false;
236   {
237     MutexLock mu(self, *Locks::runtime_shutdown_lock_);
238     if (runtime->IsShuttingDown()) {
239       thread_start_during_shutdown = true;
240     } else {
241       runtime->StartThreadBirth();
242     }
243   }
244   if (thread_start_during_shutdown) {
245     ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError"));
246     env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown");
247     return;
248   }
249 
250   Thread* child_thread = new Thread(is_daemon);
251   // Use global JNI ref to hold peer live while child thread starts.
252   child_thread->jpeer_ = env->NewGlobalRef(java_peer);
253   stack_size = FixStackSize(stack_size);
254 
255   // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing to
256   // assign it.
257   env->SetIntField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer,
258                    reinterpret_cast<jint>(child_thread));
259 
260   pthread_t new_pthread;
261   pthread_attr_t attr;
262   CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
263   CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), "PTHREAD_CREATE_DETACHED");
264   CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size);
265   int pthread_create_result = pthread_create(&new_pthread, &attr, Thread::CreateCallback, child_thread);
266   CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread");
267 
268   if (pthread_create_result != 0) {
269     // pthread_create(3) failed, so clean up.
270     {
271       MutexLock mu(self, *Locks::runtime_shutdown_lock_);
272       runtime->EndThreadBirth();
273     }
274     // Manually delete the global reference since Thread::Init will not have been run.
275     env->DeleteGlobalRef(child_thread->jpeer_);
276     child_thread->jpeer_ = NULL;
277     delete child_thread;
278     child_thread = NULL;
279     // TODO: remove from thread group?
280     env->SetIntField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0);
281     {
282       std::string msg(StringPrintf("pthread_create (%s stack) failed: %s",
283                                    PrettySize(stack_size).c_str(), strerror(pthread_create_result)));
284       ScopedObjectAccess soa(env);
285       soa.Self()->ThrowOutOfMemoryError(msg.c_str());
286     }
287   }
288 }
289 
Init(ThreadList * thread_list,JavaVMExt * java_vm)290 void Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) {
291   // This function does all the initialization that must be run by the native thread it applies to.
292   // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so
293   // we can handshake with the corresponding native thread when it's ready.) Check this native
294   // thread hasn't been through here already...
295   CHECK(Thread::Current() == NULL);
296   SetUpAlternateSignalStack();
297   InitCpu();
298   InitTlsEntryPoints();
299   InitCardTable();
300   InitTid();
301   // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
302   // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
303   pthread_self_ = pthread_self();
304   CHECK(is_started_);
305   CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
306   DCHECK_EQ(Thread::Current(), this);
307 
308   thin_lock_id_ = thread_list->AllocThreadId(this);
309   InitStackHwm();
310 
311   jni_env_ = new JNIEnvExt(this, java_vm);
312   thread_list->Register(this);
313 }
314 
Attach(const char * thread_name,bool as_daemon,jobject thread_group,bool create_peer)315 Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group,
316                        bool create_peer) {
317   Thread* self;
318   Runtime* runtime = Runtime::Current();
319   if (runtime == NULL) {
320     LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name;
321     return NULL;
322   }
323   {
324     MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
325     if (runtime->IsShuttingDown()) {
326       LOG(ERROR) << "Thread attaching while runtime is shutting down: " << thread_name;
327       return NULL;
328     } else {
329       Runtime::Current()->StartThreadBirth();
330       self = new Thread(as_daemon);
331       self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
332       Runtime::Current()->EndThreadBirth();
333     }
334   }
335 
336   CHECK_NE(self->GetState(), kRunnable);
337   self->SetState(kNative);
338 
339   // If we're the main thread, ClassLinker won't be created until after we're attached,
340   // so that thread needs a two-stage attach. Regular threads don't need this hack.
341   // In the compiler, all threads need this hack, because no-one's going to be getting
342   // a native peer!
343   if (create_peer) {
344     self->CreatePeer(thread_name, as_daemon, thread_group);
345   } else {
346     // These aren't necessary, but they improve diagnostics for unit tests & command-line tools.
347     if (thread_name != NULL) {
348       self->name_->assign(thread_name);
349       ::art::SetThreadName(thread_name);
350     }
351   }
352 
353   return self;
354 }
355 
CreatePeer(const char * name,bool as_daemon,jobject thread_group)356 void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) {
357   Runtime* runtime = Runtime::Current();
358   CHECK(runtime->IsStarted());
359   JNIEnv* env = jni_env_;
360 
361   if (thread_group == NULL) {
362     thread_group = runtime->GetMainThreadGroup();
363   }
364   ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name));
365   jint thread_priority = GetNativePriority();
366   jboolean thread_is_daemon = as_daemon;
367 
368   ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread));
369   if (peer.get() == NULL) {
370     CHECK(IsExceptionPending());
371     return;
372   }
373   {
374     ScopedObjectAccess soa(this);
375     opeer_ = soa.Decode<mirror::Object*>(peer.get());
376   }
377   env->CallNonvirtualVoidMethod(peer.get(),
378                                 WellKnownClasses::java_lang_Thread,
379                                 WellKnownClasses::java_lang_Thread_init,
380                                 thread_group, thread_name.get(), thread_priority, thread_is_daemon);
381   AssertNoPendingException();
382 
383   Thread* self = this;
384   DCHECK_EQ(self, Thread::Current());
385   jni_env_->SetIntField(peer.get(), WellKnownClasses::java_lang_Thread_nativePeer,
386                         reinterpret_cast<jint>(self));
387 
388   ScopedObjectAccess soa(self);
389   SirtRef<mirror::String> peer_thread_name(soa.Self(), GetThreadName(soa));
390   if (peer_thread_name.get() == NULL) {
391     // The Thread constructor should have set the Thread.name to a
392     // non-null value. However, because we can run without code
393     // available (in the compiler, in tests), we manually assign the
394     // fields the constructor should have set.
395     soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->
396         SetBoolean(opeer_, thread_is_daemon);
397     soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->
398         SetObject(opeer_, soa.Decode<mirror::Object*>(thread_group));
399     soa.DecodeField(WellKnownClasses::java_lang_Thread_name)->
400         SetObject(opeer_, soa.Decode<mirror::Object*>(thread_name.get()));
401     soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->
402         SetInt(opeer_, thread_priority);
403     peer_thread_name.reset(GetThreadName(soa));
404   }
405   // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null.
406   if (peer_thread_name.get() != NULL) {
407     SetThreadName(peer_thread_name->ToModifiedUtf8().c_str());
408   }
409 }
410 
SetThreadName(const char * name)411 void Thread::SetThreadName(const char* name) {
412   name_->assign(name);
413   ::art::SetThreadName(name);
414   Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM"));
415 }
416 
InitStackHwm()417 void Thread::InitStackHwm() {
418   void* stack_base;
419   size_t stack_size;
420   GetThreadStack(pthread_self_, stack_base, stack_size);
421 
422   // TODO: include this in the thread dumps; potentially useful in SIGQUIT output?
423   VLOG(threads) << StringPrintf("Native stack is at %p (%s)", stack_base, PrettySize(stack_size).c_str());
424 
425   stack_begin_ = reinterpret_cast<byte*>(stack_base);
426   stack_size_ = stack_size;
427 
428   if (stack_size_ <= kStackOverflowReservedBytes) {
429     LOG(FATAL) << "Attempt to attach a thread with a too-small stack (" << stack_size_ << " bytes)";
430   }
431 
432   // TODO: move this into the Linux GetThreadStack implementation.
433 #if !defined(__APPLE__)
434   // If we're the main thread, check whether we were run with an unlimited stack. In that case,
435   // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection
436   // will be broken because we'll die long before we get close to 2GB.
437   bool is_main_thread = (::art::GetTid() == getpid());
438   if (is_main_thread) {
439     rlimit stack_limit;
440     if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) {
441       PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed";
442     }
443     if (stack_limit.rlim_cur == RLIM_INFINITY) {
444       // Find the default stack size for new threads...
445       pthread_attr_t default_attributes;
446       size_t default_stack_size;
447       CHECK_PTHREAD_CALL(pthread_attr_init, (&default_attributes), "default stack size query");
448       CHECK_PTHREAD_CALL(pthread_attr_getstacksize, (&default_attributes, &default_stack_size),
449                          "default stack size query");
450       CHECK_PTHREAD_CALL(pthread_attr_destroy, (&default_attributes), "default stack size query");
451 
452       // ...and use that as our limit.
453       size_t old_stack_size = stack_size_;
454       stack_size_ = default_stack_size;
455       stack_begin_ += (old_stack_size - stack_size_);
456       VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")"
457                     << " to " << PrettySize(stack_size_)
458                     << " with base " << reinterpret_cast<void*>(stack_begin_);
459     }
460   }
461 #endif
462 
463   // Set stack_end_ to the bottom of the stack saving space of stack overflows
464   ResetDefaultStackEnd();
465 
466   // Sanity check.
467   int stack_variable;
468   CHECK_GT(&stack_variable, reinterpret_cast<void*>(stack_end_));
469 }
470 
ShortDump(std::ostream & os) const471 void Thread::ShortDump(std::ostream& os) const {
472   os << "Thread[";
473   if (GetThinLockId() != 0) {
474     // If we're in kStarting, we won't have a thin lock id or tid yet.
475     os << GetThinLockId()
476              << ",tid=" << GetTid() << ',';
477   }
478   os << GetState()
479            << ",Thread*=" << this
480            << ",peer=" << opeer_
481            << ",\"" << *name_ << "\""
482            << "]";
483 }
484 
Dump(std::ostream & os) const485 void Thread::Dump(std::ostream& os) const {
486   DumpState(os);
487   DumpStack(os);
488 }
489 
GetThreadName(const ScopedObjectAccessUnchecked & soa) const490 mirror::String* Thread::GetThreadName(const ScopedObjectAccessUnchecked& soa) const {
491   mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
492   return (opeer_ != NULL) ? reinterpret_cast<mirror::String*>(f->GetObject(opeer_)) : NULL;
493 }
494 
GetThreadName(std::string & name) const495 void Thread::GetThreadName(std::string& name) const {
496   name.assign(*name_);
497 }
498 
GetCpuMicroTime() const499 uint64_t Thread::GetCpuMicroTime() const {
500 #if defined(HAVE_POSIX_CLOCKS)
501   clockid_t cpu_clock_id;
502   pthread_getcpuclockid(pthread_self_, &cpu_clock_id);
503   timespec now;
504   clock_gettime(cpu_clock_id, &now);
505   return static_cast<uint64_t>(now.tv_sec) * 1000000LL + now.tv_nsec / 1000LL;
506 #else
507   UNIMPLEMENTED(WARNING);
508   return -1;
509 #endif
510 }
511 
AtomicSetFlag(ThreadFlag flag)512 void Thread::AtomicSetFlag(ThreadFlag flag) {
513   android_atomic_or(flag, &state_and_flags_.as_int);
514 }
515 
AtomicClearFlag(ThreadFlag flag)516 void Thread::AtomicClearFlag(ThreadFlag flag) {
517   android_atomic_and(-1 ^ flag, &state_and_flags_.as_int);
518 }
519 
520 // Attempt to rectify locks so that we dump thread list with required locks before exiting.
UnsafeLogFatalForSuspendCount(Thread * self,Thread * thread)521 static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
522   LOG(ERROR) << *thread << " suspend count already zero.";
523   Locks::thread_suspend_count_lock_->Unlock(self);
524   if (!Locks::mutator_lock_->IsSharedHeld(self)) {
525     Locks::mutator_lock_->SharedTryLock(self);
526     if (!Locks::mutator_lock_->IsSharedHeld(self)) {
527       LOG(WARNING) << "Dumping thread list without holding mutator_lock_";
528     }
529   }
530   if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
531     Locks::thread_list_lock_->TryLock(self);
532     if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
533       LOG(WARNING) << "Dumping thread list without holding thread_list_lock_";
534     }
535   }
536   std::ostringstream ss;
537   Runtime::Current()->GetThreadList()->DumpLocked(ss);
538   LOG(FATAL) << ss.str();
539 }
540 
ModifySuspendCount(Thread * self,int delta,bool for_debugger)541 void Thread::ModifySuspendCount(Thread* self, int delta, bool for_debugger) {
542   DCHECK(delta == -1 || delta == +1 || delta == -debug_suspend_count_)
543       << delta << " " << debug_suspend_count_ << " " << this;
544   DCHECK_GE(suspend_count_, debug_suspend_count_) << this;
545   Locks::thread_suspend_count_lock_->AssertHeld(self);
546   if (this != self && !IsSuspended()) {
547     Locks::thread_list_lock_->AssertHeld(self);
548   }
549   if (UNLIKELY(delta < 0 && suspend_count_ <= 0)) {
550     UnsafeLogFatalForSuspendCount(self, this);
551     return;
552   }
553 
554   suspend_count_ += delta;
555   if (for_debugger) {
556     debug_suspend_count_ += delta;
557   }
558 
559   if (suspend_count_ == 0) {
560     AtomicClearFlag(kSuspendRequest);
561   } else {
562     AtomicSetFlag(kSuspendRequest);
563   }
564 }
565 
RunCheckpointFunction()566 void Thread::RunCheckpointFunction() {
567   CHECK(checkpoint_function_ != NULL);
568   ATRACE_BEGIN("Checkpoint function");
569   checkpoint_function_->Run(this);
570   ATRACE_END();
571 }
572 
RequestCheckpoint(Closure * function)573 bool Thread::RequestCheckpoint(Closure* function) {
574   CHECK(!ReadFlag(kCheckpointRequest)) << "Already have a pending checkpoint request";
575   checkpoint_function_ = function;
576   union StateAndFlags old_state_and_flags = state_and_flags_;
577   // We must be runnable to request a checkpoint.
578   old_state_and_flags.as_struct.state = kRunnable;
579   union StateAndFlags new_state_and_flags = old_state_and_flags;
580   new_state_and_flags.as_struct.flags |= kCheckpointRequest;
581   int succeeded = android_atomic_cmpxchg(old_state_and_flags.as_int, new_state_and_flags.as_int,
582                                          &state_and_flags_.as_int);
583   return succeeded == 0;
584 }
585 
FullSuspendCheck()586 void Thread::FullSuspendCheck() {
587   VLOG(threads) << this << " self-suspending";
588   ATRACE_BEGIN("Full suspend check");
589   // Make thread appear suspended to other threads, release mutator_lock_.
590   TransitionFromRunnableToSuspended(kSuspended);
591   // Transition back to runnable noting requests to suspend, re-acquire share on mutator_lock_.
592   TransitionFromSuspendedToRunnable();
593   ATRACE_END();
594   VLOG(threads) << this << " self-reviving";
595 }
596 
SuspendForDebugger(jobject peer,bool request_suspension,bool * timed_out)597 Thread* Thread::SuspendForDebugger(jobject peer, bool request_suspension, bool* timed_out) {
598   static const useconds_t kTimeoutUs = 30 * 1000000;  // 30s.
599   useconds_t total_delay_us = 0;
600   useconds_t delay_us = 0;
601   bool did_suspend_request = false;
602   *timed_out = false;
603   while (true) {
604     Thread* thread;
605     {
606       ScopedObjectAccess soa(Thread::Current());
607       Thread* self = soa.Self();
608       MutexLock mu(self, *Locks::thread_list_lock_);
609       thread = Thread::FromManagedThread(soa, peer);
610       if (thread == NULL) {
611         JNIEnv* env = self->GetJniEnv();
612         ScopedLocalRef<jstring> scoped_name_string(env,
613                                                    (jstring)env->GetObjectField(peer,
614                                                               WellKnownClasses::java_lang_Thread_name));
615         ScopedUtfChars scoped_name_chars(env, scoped_name_string.get());
616         if (scoped_name_chars.c_str() == NULL) {
617             LOG(WARNING) << "No such thread for suspend: " << peer;
618             env->ExceptionClear();
619         } else {
620             LOG(WARNING) << "No such thread for suspend: " << peer << ":" << scoped_name_chars.c_str();
621         }
622 
623         return NULL;
624       }
625       {
626         MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
627         if (request_suspension) {
628           thread->ModifySuspendCount(soa.Self(), +1, true /* for_debugger */);
629           request_suspension = false;
630           did_suspend_request = true;
631         }
632         // IsSuspended on the current thread will fail as the current thread is changed into
633         // Runnable above. As the suspend count is now raised if this is the current thread
634         // it will self suspend on transition to Runnable, making it hard to work with. It's simpler
635         // to just explicitly handle the current thread in the callers to this code.
636         CHECK_NE(thread, soa.Self()) << "Attempt to suspend the current thread for the debugger";
637         // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend
638         // count, or else we've waited and it has self suspended) or is the current thread, we're
639         // done.
640         if (thread->IsSuspended()) {
641           return thread;
642         }
643         if (total_delay_us >= kTimeoutUs) {
644           LOG(ERROR) << "Thread suspension timed out: " << peer;
645           if (did_suspend_request) {
646             thread->ModifySuspendCount(soa.Self(), -1, true /* for_debugger */);
647           }
648           *timed_out = true;
649           return NULL;
650         }
651       }
652       // Release locks and come out of runnable state.
653     }
654     for (int i = kLockLevelCount - 1; i >= 0; --i) {
655       BaseMutex* held_mutex = Thread::Current()->GetHeldMutex(static_cast<LockLevel>(i));
656       if (held_mutex != NULL) {
657         LOG(FATAL) << "Holding " << held_mutex->GetName()
658             << " while sleeping for thread suspension";
659       }
660     }
661     {
662       useconds_t new_delay_us = delay_us * 2;
663       CHECK_GE(new_delay_us, delay_us);
664       if (new_delay_us < 500000) {  // Don't allow sleeping to be more than 0.5s.
665         delay_us = new_delay_us;
666       }
667     }
668     if (delay_us == 0) {
669       sched_yield();
670       // Default to 1 milliseconds (note that this gets multiplied by 2 before the first sleep).
671       delay_us = 500;
672     } else {
673       usleep(delay_us);
674       total_delay_us += delay_us;
675     }
676   }
677 }
678 
DumpState(std::ostream & os,const Thread * thread,pid_t tid)679 void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
680   std::string group_name;
681   int priority;
682   bool is_daemon = false;
683   Thread* self = Thread::Current();
684 
685   if (self != NULL && thread != NULL && thread->opeer_ != NULL) {
686     ScopedObjectAccessUnchecked soa(self);
687     priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->GetInt(thread->opeer_);
688     is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->GetBoolean(thread->opeer_);
689 
690     mirror::Object* thread_group =
691         soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->opeer_);
692 
693     if (thread_group != NULL) {
694       mirror::ArtField* group_name_field =
695           soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
696       mirror::String* group_name_string =
697           reinterpret_cast<mirror::String*>(group_name_field->GetObject(thread_group));
698       group_name = (group_name_string != NULL) ? group_name_string->ToModifiedUtf8() : "<null>";
699     }
700   } else {
701     priority = GetNativePriority();
702   }
703 
704   std::string scheduler_group_name(GetSchedulerGroupName(tid));
705   if (scheduler_group_name.empty()) {
706     scheduler_group_name = "default";
707   }
708 
709   if (thread != NULL) {
710     os << '"' << *thread->name_ << '"';
711     if (is_daemon) {
712       os << " daemon";
713     }
714     os << " prio=" << priority
715        << " tid=" << thread->GetThinLockId()
716        << " " << thread->GetState();
717     if (thread->IsStillStarting()) {
718       os << " (still starting up)";
719     }
720     os << "\n";
721   } else {
722     os << '"' << ::art::GetThreadName(tid) << '"'
723        << " prio=" << priority
724        << " (not attached)\n";
725   }
726 
727   if (thread != NULL) {
728     MutexLock mu(self, *Locks::thread_suspend_count_lock_);
729     os << "  | group=\"" << group_name << "\""
730        << " sCount=" << thread->suspend_count_
731        << " dsCount=" << thread->debug_suspend_count_
732        << " obj=" << reinterpret_cast<void*>(thread->opeer_)
733        << " self=" << reinterpret_cast<const void*>(thread) << "\n";
734   }
735 
736   os << "  | sysTid=" << tid
737      << " nice=" << getpriority(PRIO_PROCESS, tid)
738      << " cgrp=" << scheduler_group_name;
739   if (thread != NULL) {
740     int policy;
741     sched_param sp;
742     CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->pthread_self_, &policy, &sp), __FUNCTION__);
743     os << " sched=" << policy << "/" << sp.sched_priority
744        << " handle=" << reinterpret_cast<void*>(thread->pthread_self_);
745   }
746   os << "\n";
747 
748   // Grab the scheduler stats for this thread.
749   std::string scheduler_stats;
750   if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) {
751     scheduler_stats.resize(scheduler_stats.size() - 1);  // Lose the trailing '\n'.
752   } else {
753     scheduler_stats = "0 0 0";
754   }
755 
756   char native_thread_state = '?';
757   int utime = 0;
758   int stime = 0;
759   int task_cpu = 0;
760   GetTaskStats(tid, native_thread_state, utime, stime, task_cpu);
761 
762   os << "  | state=" << native_thread_state
763      << " schedstat=( " << scheduler_stats << " )"
764      << " utm=" << utime
765      << " stm=" << stime
766      << " core=" << task_cpu
767      << " HZ=" << sysconf(_SC_CLK_TCK) << "\n";
768   if (thread != NULL) {
769     os << "  | stack=" << reinterpret_cast<void*>(thread->stack_begin_) << "-" << reinterpret_cast<void*>(thread->stack_end_)
770        << " stackSize=" << PrettySize(thread->stack_size_) << "\n";
771   }
772 }
773 
DumpState(std::ostream & os) const774 void Thread::DumpState(std::ostream& os) const {
775   Thread::DumpState(os, this, GetTid());
776 }
777 
778 struct StackDumpVisitor : public StackVisitor {
StackDumpVisitorart::StackDumpVisitor779   StackDumpVisitor(std::ostream& os, Thread* thread, Context* context, bool can_allocate)
780       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
781       : StackVisitor(thread, context), os(os), thread(thread), can_allocate(can_allocate),
782         last_method(NULL), last_line_number(0), repetition_count(0), frame_count(0) {
783   }
784 
~StackDumpVisitorart::StackDumpVisitor785   virtual ~StackDumpVisitor() {
786     if (frame_count == 0) {
787       os << "  (no managed stack frames)\n";
788     }
789   }
790 
VisitFrameart::StackDumpVisitor791   bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
792     mirror::ArtMethod* m = GetMethod();
793     if (m->IsRuntimeMethod()) {
794       return true;
795     }
796     const int kMaxRepetition = 3;
797     mirror::Class* c = m->GetDeclaringClass();
798     const mirror::DexCache* dex_cache = c->GetDexCache();
799     int line_number = -1;
800     if (dex_cache != NULL) {  // be tolerant of bad input
801       const DexFile& dex_file = *dex_cache->GetDexFile();
802       line_number = dex_file.GetLineNumFromPC(m, GetDexPc());
803     }
804     if (line_number == last_line_number && last_method == m) {
805       repetition_count++;
806     } else {
807       if (repetition_count >= kMaxRepetition) {
808         os << "  ... repeated " << (repetition_count - kMaxRepetition) << " times\n";
809       }
810       repetition_count = 0;
811       last_line_number = line_number;
812       last_method = m;
813     }
814     if (repetition_count < kMaxRepetition) {
815       os << "  at " << PrettyMethod(m, false);
816       if (m->IsNative()) {
817         os << "(Native method)";
818       } else {
819         mh.ChangeMethod(m);
820         const char* source_file(mh.GetDeclaringClassSourceFile());
821         os << "(" << (source_file != NULL ? source_file : "unavailable")
822            << ":" << line_number << ")";
823       }
824       os << "\n";
825       if (frame_count == 0) {
826         Monitor::DescribeWait(os, thread);
827       }
828       if (can_allocate) {
829         Monitor::VisitLocks(this, DumpLockedObject, &os);
830       }
831     }
832 
833     ++frame_count;
834     return true;
835   }
836 
DumpLockedObjectart::StackDumpVisitor837   static void DumpLockedObject(mirror::Object* o, void* context)
838       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
839     std::ostream& os = *reinterpret_cast<std::ostream*>(context);
840     os << "  - locked <" << o << "> (a " << PrettyTypeOf(o) << ")\n";
841   }
842 
843   std::ostream& os;
844   const Thread* thread;
845   const bool can_allocate;
846   MethodHelper mh;
847   mirror::ArtMethod* last_method;
848   int last_line_number;
849   int repetition_count;
850   int frame_count;
851 };
852 
ShouldShowNativeStack(const Thread * thread)853 static bool ShouldShowNativeStack(const Thread* thread)
854     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
855   ThreadState state = thread->GetState();
856 
857   // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting.
858   if (state > kWaiting && state < kStarting) {
859     return true;
860   }
861 
862   // In an Object.wait variant or Thread.sleep? That's not interesting.
863   if (state == kTimedWaiting || state == kSleeping || state == kWaiting) {
864     return false;
865   }
866 
867   // In some other native method? That's interesting.
868   // We don't just check kNative because native methods will be in state kSuspended if they're
869   // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the
870   // thread-startup states if it's early enough in their life cycle (http://b/7432159).
871   mirror::ArtMethod* current_method = thread->GetCurrentMethod(NULL);
872   return current_method != NULL && current_method->IsNative();
873 }
874 
DumpStack(std::ostream & os) const875 void Thread::DumpStack(std::ostream& os) const {
876   // TODO: we call this code when dying but may not have suspended the thread ourself. The
877   //       IsSuspended check is therefore racy with the use for dumping (normally we inhibit
878   //       the race with the thread_suspend_count_lock_).
879   // No point dumping for an abort in debug builds where we'll hit the not suspended check in stack.
880   bool dump_for_abort = (gAborting > 0) && !kIsDebugBuild;
881   if (this == Thread::Current() || IsSuspended() || dump_for_abort) {
882     // If we're currently in native code, dump that stack before dumping the managed stack.
883     if (dump_for_abort || ShouldShowNativeStack(this)) {
884       DumpKernelStack(os, GetTid(), "  kernel: ", false);
885       DumpNativeStack(os, GetTid(), "  native: ", false);
886     }
887     UniquePtr<Context> context(Context::Create());
888     StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(), !throwing_OutOfMemoryError_);
889     dumper.WalkStack();
890   } else {
891     os << "Not able to dump stack of thread that isn't suspended";
892   }
893 }
894 
ThreadExitCallback(void * arg)895 void Thread::ThreadExitCallback(void* arg) {
896   Thread* self = reinterpret_cast<Thread*>(arg);
897   if (self->thread_exit_check_count_ == 0) {
898     LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's going to use a pthread_key_create destructor?): " << *self;
899     CHECK(is_started_);
900     CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self");
901     self->thread_exit_check_count_ = 1;
902   } else {
903     LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self;
904   }
905 }
906 
Startup()907 void Thread::Startup() {
908   CHECK(!is_started_);
909   is_started_ = true;
910   {
911     // MutexLock to keep annotalysis happy.
912     //
913     // Note we use NULL for the thread because Thread::Current can
914     // return garbage since (is_started_ == true) and
915     // Thread::pthread_key_self_ is not yet initialized.
916     // This was seen on glibc.
917     MutexLock mu(NULL, *Locks::thread_suspend_count_lock_);
918     resume_cond_ = new ConditionVariable("Thread resumption condition variable",
919                                          *Locks::thread_suspend_count_lock_);
920   }
921 
922   // Allocate a TLS slot.
923   CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), "self key");
924 
925   // Double-check the TLS slot allocation.
926   if (pthread_getspecific(pthread_key_self_) != NULL) {
927     LOG(FATAL) << "Newly-created pthread TLS slot is not NULL";
928   }
929 }
930 
FinishStartup()931 void Thread::FinishStartup() {
932   Runtime* runtime = Runtime::Current();
933   CHECK(runtime->IsStarted());
934 
935   // Finish attaching the main thread.
936   ScopedObjectAccess soa(Thread::Current());
937   Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup());
938 
939   Runtime::Current()->GetClassLinker()->RunRootClinits();
940 }
941 
Shutdown()942 void Thread::Shutdown() {
943   CHECK(is_started_);
944   is_started_ = false;
945   CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key");
946   MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
947   if (resume_cond_ != NULL) {
948     delete resume_cond_;
949     resume_cond_ = NULL;
950   }
951 }
952 
Thread(bool daemon)953 Thread::Thread(bool daemon)
954     : suspend_count_(0),
955       card_table_(NULL),
956       exception_(NULL),
957       stack_end_(NULL),
958       managed_stack_(),
959       jni_env_(NULL),
960       self_(NULL),
961       opeer_(NULL),
962       jpeer_(NULL),
963       stack_begin_(NULL),
964       stack_size_(0),
965       stack_trace_sample_(NULL),
966       trace_clock_base_(0),
967       thin_lock_id_(0),
968       tid_(0),
969       wait_mutex_(new Mutex("a thread wait mutex")),
970       wait_cond_(new ConditionVariable("a thread wait condition variable", *wait_mutex_)),
971       wait_monitor_(NULL),
972       interrupted_(false),
973       wait_next_(NULL),
974       monitor_enter_object_(NULL),
975       top_sirt_(NULL),
976       runtime_(NULL),
977       class_loader_override_(NULL),
978       long_jump_context_(NULL),
979       throwing_OutOfMemoryError_(false),
980       debug_suspend_count_(0),
981       debug_invoke_req_(new DebugInvokeReq),
982       deoptimization_shadow_frame_(NULL),
983       instrumentation_stack_(new std::deque<instrumentation::InstrumentationStackFrame>),
984       name_(new std::string(kThreadNameDuringStartup)),
985       daemon_(daemon),
986       pthread_self_(0),
987       no_thread_suspension_(0),
988       last_no_thread_suspension_cause_(NULL),
989       checkpoint_function_(0),
990       thread_exit_check_count_(0) {
991   CHECK_EQ((sizeof(Thread) % 4), 0U) << sizeof(Thread);
992   state_and_flags_.as_struct.flags = 0;
993   state_and_flags_.as_struct.state = kNative;
994   memset(&held_mutexes_[0], 0, sizeof(held_mutexes_));
995 }
996 
IsStillStarting() const997 bool Thread::IsStillStarting() const {
998   // You might think you can check whether the state is kStarting, but for much of thread startup,
999   // the thread is in kNative; it might also be in kVmWait.
1000   // You might think you can check whether the peer is NULL, but the peer is actually created and
1001   // assigned fairly early on, and needs to be.
1002   // It turns out that the last thing to change is the thread name; that's a good proxy for "has
1003   // this thread _ever_ entered kRunnable".
1004   return (jpeer_ == NULL && opeer_ == NULL) || (*name_ == kThreadNameDuringStartup);
1005 }
1006 
AssertNoPendingException() const1007 void Thread::AssertNoPendingException() const {
1008   if (UNLIKELY(IsExceptionPending())) {
1009     ScopedObjectAccess soa(Thread::Current());
1010     mirror::Throwable* exception = GetException(NULL);
1011     LOG(FATAL) << "No pending exception expected: " << exception->Dump();
1012   }
1013 }
1014 
MonitorExitVisitor(const mirror::Object * object,void * arg)1015 static void MonitorExitVisitor(const mirror::Object* object, void* arg) NO_THREAD_SAFETY_ANALYSIS {
1016   Thread* self = reinterpret_cast<Thread*>(arg);
1017   mirror::Object* entered_monitor = const_cast<mirror::Object*>(object);
1018   if (self->HoldsLock(entered_monitor)) {
1019     LOG(WARNING) << "Calling MonitorExit on object "
1020                  << object << " (" << PrettyTypeOf(object) << ")"
1021                  << " left locked by native thread "
1022                  << *Thread::Current() << " which is detaching";
1023     entered_monitor->MonitorExit(self);
1024   }
1025 }
1026 
Destroy()1027 void Thread::Destroy() {
1028   Thread* self = this;
1029   DCHECK_EQ(self, Thread::Current());
1030 
1031   if (opeer_ != NULL) {
1032     ScopedObjectAccess soa(self);
1033     // We may need to call user-supplied managed code, do this before final clean-up.
1034     HandleUncaughtExceptions(soa);
1035     RemoveFromThreadGroup(soa);
1036 
1037     // this.nativePeer = 0;
1038     soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)->SetInt(opeer_, 0);
1039     Dbg::PostThreadDeath(self);
1040 
1041     // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone
1042     // who is waiting.
1043     mirror::Object* lock =
1044         soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(opeer_);
1045     // (This conditional is only needed for tests, where Thread.lock won't have been set.)
1046     if (lock != NULL) {
1047       ObjectLock locker(self, lock);
1048       locker.Notify();
1049     }
1050   }
1051 
1052   // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
1053   if (jni_env_ != NULL) {
1054     jni_env_->monitors.VisitRoots(MonitorExitVisitor, self);
1055   }
1056 }
1057 
~Thread()1058 Thread::~Thread() {
1059   if (jni_env_ != NULL && jpeer_ != NULL) {
1060     // If pthread_create fails we don't have a jni env here.
1061     jni_env_->DeleteGlobalRef(jpeer_);
1062     jpeer_ = NULL;
1063   }
1064   opeer_ = NULL;
1065 
1066   delete jni_env_;
1067   jni_env_ = NULL;
1068 
1069   CHECK_NE(GetState(), kRunnable);
1070   // We may be deleting a still born thread.
1071   SetStateUnsafe(kTerminated);
1072 
1073   delete wait_cond_;
1074   delete wait_mutex_;
1075 
1076   if (long_jump_context_ != NULL) {
1077     delete long_jump_context_;
1078   }
1079 
1080   delete debug_invoke_req_;
1081   delete instrumentation_stack_;
1082   delete name_;
1083   delete stack_trace_sample_;
1084 
1085   TearDownAlternateSignalStack();
1086 }
1087 
HandleUncaughtExceptions(ScopedObjectAccess & soa)1088 void Thread::HandleUncaughtExceptions(ScopedObjectAccess& soa) {
1089   if (!IsExceptionPending()) {
1090     return;
1091   }
1092   ScopedLocalRef<jobject> peer(jni_env_, soa.AddLocalReference<jobject>(opeer_));
1093   ScopedThreadStateChange tsc(this, kNative);
1094 
1095   // Get and clear the exception.
1096   ScopedLocalRef<jthrowable> exception(jni_env_, jni_env_->ExceptionOccurred());
1097   jni_env_->ExceptionClear();
1098 
1099   // If the thread has its own handler, use that.
1100   ScopedLocalRef<jobject> handler(jni_env_,
1101                                   jni_env_->GetObjectField(peer.get(),
1102                                                            WellKnownClasses::java_lang_Thread_uncaughtHandler));
1103   if (handler.get() == NULL) {
1104     // Otherwise use the thread group's default handler.
1105     handler.reset(jni_env_->GetObjectField(peer.get(), WellKnownClasses::java_lang_Thread_group));
1106   }
1107 
1108   // Call the handler.
1109   jni_env_->CallVoidMethod(handler.get(),
1110                            WellKnownClasses::java_lang_Thread$UncaughtExceptionHandler_uncaughtException,
1111                            peer.get(), exception.get());
1112 
1113   // If the handler threw, clear that exception too.
1114   jni_env_->ExceptionClear();
1115 }
1116 
RemoveFromThreadGroup(ScopedObjectAccess & soa)1117 void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) {
1118   // this.group.removeThread(this);
1119   // group can be null if we're in the compiler or a test.
1120   mirror::Object* ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(opeer_);
1121   if (ogroup != NULL) {
1122     ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup));
1123     ScopedLocalRef<jobject> peer(soa.Env(), soa.AddLocalReference<jobject>(opeer_));
1124     ScopedThreadStateChange tsc(soa.Self(), kNative);
1125     jni_env_->CallVoidMethod(group.get(), WellKnownClasses::java_lang_ThreadGroup_removeThread,
1126                              peer.get());
1127   }
1128 }
1129 
NumSirtReferences()1130 size_t Thread::NumSirtReferences() {
1131   size_t count = 0;
1132   for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
1133     count += cur->NumberOfReferences();
1134   }
1135   return count;
1136 }
1137 
SirtContains(jobject obj) const1138 bool Thread::SirtContains(jobject obj) const {
1139   mirror::Object** sirt_entry = reinterpret_cast<mirror::Object**>(obj);
1140   for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
1141     if (cur->Contains(sirt_entry)) {
1142       return true;
1143     }
1144   }
1145   // JNI code invoked from portable code uses shadow frames rather than the SIRT.
1146   return managed_stack_.ShadowFramesContain(sirt_entry);
1147 }
1148 
SirtVisitRoots(RootVisitor * visitor,void * arg)1149 void Thread::SirtVisitRoots(RootVisitor* visitor, void* arg) {
1150   for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
1151     size_t num_refs = cur->NumberOfReferences();
1152     for (size_t j = 0; j < num_refs; j++) {
1153       mirror::Object* object = cur->GetReference(j);
1154       if (object != NULL) {
1155         visitor(object, arg);
1156       }
1157     }
1158   }
1159 }
1160 
DecodeJObject(jobject obj) const1161 mirror::Object* Thread::DecodeJObject(jobject obj) const {
1162   Locks::mutator_lock_->AssertSharedHeld(this);
1163   if (obj == NULL) {
1164     return NULL;
1165   }
1166   IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
1167   IndirectRefKind kind = GetIndirectRefKind(ref);
1168   mirror::Object* result;
1169   // The "kinds" below are sorted by the frequency we expect to encounter them.
1170   if (kind == kLocal) {
1171     IndirectReferenceTable& locals = jni_env_->locals;
1172     result = const_cast<mirror::Object*>(locals.Get(ref));
1173   } else if (kind == kSirtOrInvalid) {
1174     // TODO: make stack indirect reference table lookup more efficient
1175     // Check if this is a local reference in the SIRT
1176     if (LIKELY(SirtContains(obj))) {
1177       result = *reinterpret_cast<mirror::Object**>(obj);  // Read from SIRT
1178     } else if (Runtime::Current()->GetJavaVM()->work_around_app_jni_bugs) {
1179       // Assume an invalid local reference is actually a direct pointer.
1180       result = reinterpret_cast<mirror::Object*>(obj);
1181     } else {
1182       result = kInvalidIndirectRefObject;
1183     }
1184   } else if (kind == kGlobal) {
1185     JavaVMExt* vm = Runtime::Current()->GetJavaVM();
1186     IndirectReferenceTable& globals = vm->globals;
1187     ReaderMutexLock mu(const_cast<Thread*>(this), vm->globals_lock);
1188     result = const_cast<mirror::Object*>(globals.Get(ref));
1189   } else {
1190     DCHECK_EQ(kind, kWeakGlobal);
1191     result = Runtime::Current()->GetJavaVM()->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
1192     if (result == kClearedJniWeakGlobal) {
1193       // This is a special case where it's okay to return NULL.
1194       return nullptr;
1195     }
1196   }
1197 
1198   if (UNLIKELY(result == NULL)) {
1199     JniAbortF(NULL, "use of deleted %s %p", ToStr<IndirectRefKind>(kind).c_str(), obj);
1200   } else {
1201     if (kIsDebugBuild && (result != kInvalidIndirectRefObject)) {
1202       Runtime::Current()->GetHeap()->VerifyObject(result);
1203     }
1204   }
1205   return result;
1206 }
1207 
1208 // Implements java.lang.Thread.interrupted.
Interrupted()1209 bool Thread::Interrupted() {
1210   MutexLock mu(Thread::Current(), *wait_mutex_);
1211   bool interrupted = interrupted_;
1212   interrupted_ = false;
1213   return interrupted;
1214 }
1215 
1216 // Implements java.lang.Thread.isInterrupted.
IsInterrupted()1217 bool Thread::IsInterrupted() {
1218   MutexLock mu(Thread::Current(), *wait_mutex_);
1219   return interrupted_;
1220 }
1221 
Interrupt()1222 void Thread::Interrupt() {
1223   Thread* self = Thread::Current();
1224   MutexLock mu(self, *wait_mutex_);
1225   if (interrupted_) {
1226     return;
1227   }
1228   interrupted_ = true;
1229   NotifyLocked(self);
1230 }
1231 
Notify()1232 void Thread::Notify() {
1233   Thread* self = Thread::Current();
1234   MutexLock mu(self, *wait_mutex_);
1235   NotifyLocked(self);
1236 }
1237 
NotifyLocked(Thread * self)1238 void Thread::NotifyLocked(Thread* self) {
1239   if (wait_monitor_ != NULL) {
1240     wait_cond_->Signal(self);
1241   }
1242 }
1243 
1244 class CountStackDepthVisitor : public StackVisitor {
1245  public:
1246   explicit CountStackDepthVisitor(Thread* thread)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)1247       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1248       : StackVisitor(thread, NULL),
1249         depth_(0), skip_depth_(0), skipping_(true) {}
1250 
VisitFrame()1251   bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1252     // We want to skip frames up to and including the exception's constructor.
1253     // Note we also skip the frame if it doesn't have a method (namely the callee
1254     // save frame)
1255     mirror::ArtMethod* m = GetMethod();
1256     if (skipping_ && !m->IsRuntimeMethod() &&
1257         !mirror::Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) {
1258       skipping_ = false;
1259     }
1260     if (!skipping_) {
1261       if (!m->IsRuntimeMethod()) {  // Ignore runtime frames (in particular callee save).
1262         ++depth_;
1263       }
1264     } else {
1265       ++skip_depth_;
1266     }
1267     return true;
1268   }
1269 
GetDepth() const1270   int GetDepth() const {
1271     return depth_;
1272   }
1273 
GetSkipDepth() const1274   int GetSkipDepth() const {
1275     return skip_depth_;
1276   }
1277 
1278  private:
1279   uint32_t depth_;
1280   uint32_t skip_depth_;
1281   bool skipping_;
1282 };
1283 
1284 class BuildInternalStackTraceVisitor : public StackVisitor {
1285  public:
BuildInternalStackTraceVisitor(Thread * self,Thread * thread,int skip_depth)1286   explicit BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth)
1287       : StackVisitor(thread, NULL), self_(self),
1288         skip_depth_(skip_depth), count_(0), dex_pc_trace_(NULL), method_trace_(NULL) {}
1289 
Init(int depth)1290   bool Init(int depth)
1291       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1292     // Allocate method trace with an extra slot that will hold the PC trace
1293     SirtRef<mirror::ObjectArray<mirror::Object> >
1294         method_trace(self_,
1295                      Runtime::Current()->GetClassLinker()->AllocObjectArray<mirror::Object>(self_,
1296                                                                                             depth + 1));
1297     if (method_trace.get() == NULL) {
1298       return false;
1299     }
1300     mirror::IntArray* dex_pc_trace = mirror::IntArray::Alloc(self_, depth);
1301     if (dex_pc_trace == NULL) {
1302       return false;
1303     }
1304     // Save PC trace in last element of method trace, also places it into the
1305     // object graph.
1306     method_trace->Set(depth, dex_pc_trace);
1307     // Set the Object*s and assert that no thread suspension is now possible.
1308     const char* last_no_suspend_cause =
1309         self_->StartAssertNoThreadSuspension("Building internal stack trace");
1310     CHECK(last_no_suspend_cause == NULL) << last_no_suspend_cause;
1311     method_trace_ = method_trace.get();
1312     dex_pc_trace_ = dex_pc_trace;
1313     return true;
1314   }
1315 
~BuildInternalStackTraceVisitor()1316   virtual ~BuildInternalStackTraceVisitor() {
1317     if (method_trace_ != NULL) {
1318       self_->EndAssertNoThreadSuspension(NULL);
1319     }
1320   }
1321 
VisitFrame()1322   bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1323     if (method_trace_ == NULL || dex_pc_trace_ == NULL) {
1324       return true;  // We're probably trying to fillInStackTrace for an OutOfMemoryError.
1325     }
1326     if (skip_depth_ > 0) {
1327       skip_depth_--;
1328       return true;
1329     }
1330     mirror::ArtMethod* m = GetMethod();
1331     if (m->IsRuntimeMethod()) {
1332       return true;  // Ignore runtime frames (in particular callee save).
1333     }
1334     method_trace_->Set(count_, m);
1335     dex_pc_trace_->Set(count_, m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc());
1336     ++count_;
1337     return true;
1338   }
1339 
GetInternalStackTrace() const1340   mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const {
1341     return method_trace_;
1342   }
1343 
1344  private:
1345   Thread* const self_;
1346   // How many more frames to skip.
1347   int32_t skip_depth_;
1348   // Current position down stack trace.
1349   uint32_t count_;
1350   // Array of dex PC values.
1351   mirror::IntArray* dex_pc_trace_;
1352   // An array of the methods on the stack, the last entry is a reference to the PC trace.
1353   mirror::ObjectArray<mirror::Object>* method_trace_;
1354 };
1355 
CreateInternalStackTrace(const ScopedObjectAccessUnchecked & soa) const1356 jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessUnchecked& soa) const {
1357   // Compute depth of stack
1358   CountStackDepthVisitor count_visitor(const_cast<Thread*>(this));
1359   count_visitor.WalkStack();
1360   int32_t depth = count_visitor.GetDepth();
1361   int32_t skip_depth = count_visitor.GetSkipDepth();
1362 
1363   // Build internal stack trace.
1364   BuildInternalStackTraceVisitor build_trace_visitor(soa.Self(), const_cast<Thread*>(this),
1365                                                      skip_depth);
1366   if (!build_trace_visitor.Init(depth)) {
1367     return NULL;  // Allocation failed.
1368   }
1369   build_trace_visitor.WalkStack();
1370   mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace();
1371   if (kIsDebugBuild) {
1372     for (int32_t i = 0; i < trace->GetLength(); ++i) {
1373       CHECK(trace->Get(i) != NULL);
1374     }
1375   }
1376   return soa.AddLocalReference<jobjectArray>(trace);
1377 }
1378 
InternalStackTraceToStackTraceElementArray(JNIEnv * env,jobject internal,jobjectArray output_array,int * stack_depth)1379 jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal,
1380     jobjectArray output_array, int* stack_depth) {
1381   // Transition into runnable state to work on Object*/Array*
1382   ScopedObjectAccess soa(env);
1383   // Decode the internal stack trace into the depth, method trace and PC trace
1384   mirror::ObjectArray<mirror::Object>* method_trace =
1385       soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal);
1386   int32_t depth = method_trace->GetLength() - 1;
1387 
1388   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1389 
1390   jobjectArray result;
1391   mirror::ObjectArray<mirror::StackTraceElement>* java_traces;
1392   if (output_array != NULL) {
1393     // Reuse the array we were given.
1394     result = output_array;
1395     java_traces = soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(output_array);
1396     // ...adjusting the number of frames we'll write to not exceed the array length.
1397     depth = std::min(depth, java_traces->GetLength());
1398   } else {
1399     // Create java_trace array and place in local reference table
1400     java_traces = class_linker->AllocStackTraceElementArray(soa.Self(), depth);
1401     if (java_traces == NULL) {
1402       return NULL;
1403     }
1404     result = soa.AddLocalReference<jobjectArray>(java_traces);
1405   }
1406 
1407   if (stack_depth != NULL) {
1408     *stack_depth = depth;
1409   }
1410 
1411   MethodHelper mh;
1412   for (int32_t i = 0; i < depth; ++i) {
1413     // Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
1414     mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(method_trace->Get(i));
1415     MethodHelper mh(method);
1416     int32_t line_number;
1417     SirtRef<mirror::String> class_name_object(soa.Self(), nullptr);
1418     SirtRef<mirror::String> source_name_object(soa.Self(), nullptr);
1419     if (method->IsProxyMethod()) {
1420       line_number = -1;
1421       class_name_object.reset(method->GetDeclaringClass()->GetName());
1422       // source_name_object intentionally left null for proxy methods
1423     } else {
1424       mirror::IntArray* pc_trace = down_cast<mirror::IntArray*>(method_trace->Get(depth));
1425       uint32_t dex_pc = pc_trace->Get(i);
1426       line_number = mh.GetLineNumFromDexPC(dex_pc);
1427       // Allocate element, potentially triggering GC
1428       // TODO: reuse class_name_object via Class::name_?
1429       const char* descriptor = mh.GetDeclaringClassDescriptor();
1430       CHECK(descriptor != nullptr);
1431       std::string class_name(PrettyDescriptor(descriptor));
1432       class_name_object.reset(mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str()));
1433       if (class_name_object.get() == nullptr) {
1434         return nullptr;
1435       }
1436       const char* source_file = mh.GetDeclaringClassSourceFile();
1437       if (source_file != nullptr) {
1438         source_name_object.reset(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
1439         if (source_name_object.get() == nullptr) {
1440           return nullptr;
1441         }
1442       }
1443     }
1444     const char* method_name = mh.GetName();
1445     CHECK(method_name != NULL);
1446     SirtRef<mirror::String> method_name_object(soa.Self(),
1447                                                mirror::String::AllocFromModifiedUtf8(soa.Self(),
1448                                                                                      method_name));
1449     if (method_name_object.get() == NULL) {
1450       return NULL;
1451     }
1452     mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(soa.Self(),
1453         class_name_object.get(), method_name_object.get(), source_name_object.get(), line_number);
1454     if (obj == NULL) {
1455       return NULL;
1456     }
1457 #ifdef MOVING_GARBAGE_COLLECTOR
1458     // Re-read after potential GC
1459     java_traces = Decode<ObjectArray<Object>*>(soa.Env(), result);
1460     method_trace = down_cast<ObjectArray<Object>*>(Decode<Object*>(soa.Env(), internal));
1461     pc_trace = down_cast<IntArray*>(method_trace->Get(depth));
1462 #endif
1463     java_traces->Set(i, obj);
1464   }
1465   return result;
1466 }
1467 
ThrowNewExceptionF(const ThrowLocation & throw_location,const char * exception_class_descriptor,const char * fmt,...)1468 void Thread::ThrowNewExceptionF(const ThrowLocation& throw_location,
1469                                 const char* exception_class_descriptor, const char* fmt, ...) {
1470   va_list args;
1471   va_start(args, fmt);
1472   ThrowNewExceptionV(throw_location, exception_class_descriptor,
1473                      fmt, args);
1474   va_end(args);
1475 }
1476 
ThrowNewExceptionV(const ThrowLocation & throw_location,const char * exception_class_descriptor,const char * fmt,va_list ap)1477 void Thread::ThrowNewExceptionV(const ThrowLocation& throw_location,
1478                                 const char* exception_class_descriptor,
1479                                 const char* fmt, va_list ap) {
1480   std::string msg;
1481   StringAppendV(&msg, fmt, ap);
1482   ThrowNewException(throw_location, exception_class_descriptor, msg.c_str());
1483 }
1484 
ThrowNewException(const ThrowLocation & throw_location,const char * exception_class_descriptor,const char * msg)1485 void Thread::ThrowNewException(const ThrowLocation& throw_location, const char* exception_class_descriptor,
1486                                const char* msg) {
1487   AssertNoPendingException();  // Callers should either clear or call ThrowNewWrappedException.
1488   ThrowNewWrappedException(throw_location, exception_class_descriptor, msg);
1489 }
1490 
ThrowNewWrappedException(const ThrowLocation & throw_location,const char * exception_class_descriptor,const char * msg)1491 void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
1492                                       const char* exception_class_descriptor,
1493                                       const char* msg) {
1494   DCHECK_EQ(this, Thread::Current());
1495   // Ensure we don't forget arguments over object allocation.
1496   SirtRef<mirror::Object> saved_throw_this(this, throw_location.GetThis());
1497   SirtRef<mirror::ArtMethod> saved_throw_method(this, throw_location.GetMethod());
1498   // Ignore the cause throw location. TODO: should we report this as a re-throw?
1499   SirtRef<mirror::Throwable> cause(this, GetException(NULL));
1500   ClearException();
1501   Runtime* runtime = Runtime::Current();
1502 
1503   mirror::ClassLoader* cl = NULL;
1504   if (throw_location.GetMethod() != NULL) {
1505     cl = throw_location.GetMethod()->GetDeclaringClass()->GetClassLoader();
1506   }
1507   SirtRef<mirror::Class>
1508       exception_class(this, runtime->GetClassLinker()->FindClass(exception_class_descriptor, cl));
1509   if (UNLIKELY(exception_class.get() == NULL)) {
1510     CHECK(IsExceptionPending());
1511     LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor);
1512     return;
1513   }
1514 
1515   if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(exception_class.get(), true, true))) {
1516     DCHECK(IsExceptionPending());
1517     return;
1518   }
1519   DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass());
1520   SirtRef<mirror::Throwable> exception(this,
1521                                 down_cast<mirror::Throwable*>(exception_class->AllocObject(this)));
1522 
1523   // Choose an appropriate constructor and set up the arguments.
1524   const char* signature;
1525   SirtRef<mirror::String> msg_string(this, NULL);
1526   if (msg != NULL) {
1527     // Ensure we remember this and the method over the String allocation.
1528     msg_string.reset(mirror::String::AllocFromModifiedUtf8(this, msg));
1529     if (UNLIKELY(msg_string.get() == NULL)) {
1530       CHECK(IsExceptionPending());  // OOME.
1531       return;
1532     }
1533     if (cause.get() == NULL) {
1534       signature = "(Ljava/lang/String;)V";
1535     } else {
1536       signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V";
1537     }
1538   } else {
1539     if (cause.get() == NULL) {
1540       signature = "()V";
1541     } else {
1542       signature = "(Ljava/lang/Throwable;)V";
1543     }
1544   }
1545   mirror::ArtMethod* exception_init_method =
1546       exception_class->FindDeclaredDirectMethod("<init>", signature);
1547 
1548   CHECK(exception_init_method != NULL) << "No <init>" << signature << " in "
1549       << PrettyDescriptor(exception_class_descriptor);
1550 
1551   if (UNLIKELY(!runtime->IsStarted())) {
1552     // Something is trying to throw an exception without a started runtime, which is the common
1553     // case in the compiler. We won't be able to invoke the constructor of the exception, so set
1554     // the exception fields directly.
1555     if (msg != NULL) {
1556       exception->SetDetailMessage(msg_string.get());
1557     }
1558     if (cause.get() != NULL) {
1559       exception->SetCause(cause.get());
1560     }
1561     ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(),
1562                                          throw_location.GetDexPc());
1563     SetException(gc_safe_throw_location, exception.get());
1564   } else {
1565     ArgArray args("VLL", 3);
1566     args.Append(reinterpret_cast<uint32_t>(exception.get()));
1567     if (msg != NULL) {
1568       args.Append(reinterpret_cast<uint32_t>(msg_string.get()));
1569     }
1570     if (cause.get() != NULL) {
1571       args.Append(reinterpret_cast<uint32_t>(cause.get()));
1572     }
1573     JValue result;
1574     exception_init_method->Invoke(this, args.GetArray(), args.GetNumBytes(), &result, 'V');
1575     if (LIKELY(!IsExceptionPending())) {
1576       ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(),
1577                                            throw_location.GetDexPc());
1578       SetException(gc_safe_throw_location, exception.get());
1579     }
1580   }
1581 }
1582 
ThrowOutOfMemoryError(const char * msg)1583 void Thread::ThrowOutOfMemoryError(const char* msg) {
1584   LOG(ERROR) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s",
1585       msg, (throwing_OutOfMemoryError_ ? " (recursive case)" : ""));
1586   ThrowLocation throw_location = GetCurrentLocationForThrow();
1587   if (!throwing_OutOfMemoryError_) {
1588     throwing_OutOfMemoryError_ = true;
1589     ThrowNewException(throw_location, "Ljava/lang/OutOfMemoryError;", msg);
1590     throwing_OutOfMemoryError_ = false;
1591   } else {
1592     Dump(LOG(ERROR));  // The pre-allocated OOME has no stack, so help out and log one.
1593     SetException(throw_location, Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1594   }
1595 }
1596 
CurrentFromGdb()1597 Thread* Thread::CurrentFromGdb() {
1598   return Thread::Current();
1599 }
1600 
DumpFromGdb() const1601 void Thread::DumpFromGdb() const {
1602   std::ostringstream ss;
1603   Dump(ss);
1604   std::string str(ss.str());
1605   // log to stderr for debugging command line processes
1606   std::cerr << str;
1607 #ifdef HAVE_ANDROID_OS
1608   // log to logcat for debugging frameworks processes
1609   LOG(INFO) << str;
1610 #endif
1611 }
1612 
1613 struct EntryPointInfo {
1614   uint32_t offset;
1615   const char* name;
1616 };
1617 #define INTERPRETER_ENTRY_POINT_INFO(x) { INTERPRETER_ENTRYPOINT_OFFSET(x).Uint32Value(), #x }
1618 #define JNI_ENTRY_POINT_INFO(x)         { JNI_ENTRYPOINT_OFFSET(x).Uint32Value(), #x }
1619 #define PORTABLE_ENTRY_POINT_INFO(x)    { PORTABLE_ENTRYPOINT_OFFSET(x).Uint32Value(), #x }
1620 #define QUICK_ENTRY_POINT_INFO(x)       { QUICK_ENTRYPOINT_OFFSET(x).Uint32Value(), #x }
1621 static const EntryPointInfo gThreadEntryPointInfo[] = {
1622   INTERPRETER_ENTRY_POINT_INFO(pInterpreterToInterpreterBridge),
1623   INTERPRETER_ENTRY_POINT_INFO(pInterpreterToCompiledCodeBridge),
1624   JNI_ENTRY_POINT_INFO(pDlsymLookup),
1625   PORTABLE_ENTRY_POINT_INFO(pPortableResolutionTrampoline),
1626   PORTABLE_ENTRY_POINT_INFO(pPortableToInterpreterBridge),
1627   QUICK_ENTRY_POINT_INFO(pAllocArray),
1628   QUICK_ENTRY_POINT_INFO(pAllocArrayWithAccessCheck),
1629   QUICK_ENTRY_POINT_INFO(pAllocObject),
1630   QUICK_ENTRY_POINT_INFO(pAllocObjectWithAccessCheck),
1631   QUICK_ENTRY_POINT_INFO(pCheckAndAllocArray),
1632   QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayWithAccessCheck),
1633   QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial),
1634   QUICK_ENTRY_POINT_INFO(pCanPutArrayElement),
1635   QUICK_ENTRY_POINT_INFO(pCheckCast),
1636   QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage),
1637   QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess),
1638   QUICK_ENTRY_POINT_INFO(pInitializeType),
1639   QUICK_ENTRY_POINT_INFO(pResolveString),
1640   QUICK_ENTRY_POINT_INFO(pSet32Instance),
1641   QUICK_ENTRY_POINT_INFO(pSet32Static),
1642   QUICK_ENTRY_POINT_INFO(pSet64Instance),
1643   QUICK_ENTRY_POINT_INFO(pSet64Static),
1644   QUICK_ENTRY_POINT_INFO(pSetObjInstance),
1645   QUICK_ENTRY_POINT_INFO(pSetObjStatic),
1646   QUICK_ENTRY_POINT_INFO(pGet32Instance),
1647   QUICK_ENTRY_POINT_INFO(pGet32Static),
1648   QUICK_ENTRY_POINT_INFO(pGet64Instance),
1649   QUICK_ENTRY_POINT_INFO(pGet64Static),
1650   QUICK_ENTRY_POINT_INFO(pGetObjInstance),
1651   QUICK_ENTRY_POINT_INFO(pGetObjStatic),
1652   QUICK_ENTRY_POINT_INFO(pHandleFillArrayData),
1653   QUICK_ENTRY_POINT_INFO(pJniMethodStart),
1654   QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized),
1655   QUICK_ENTRY_POINT_INFO(pJniMethodEnd),
1656   QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized),
1657   QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference),
1658   QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized),
1659   QUICK_ENTRY_POINT_INFO(pLockObject),
1660   QUICK_ENTRY_POINT_INFO(pUnlockObject),
1661   QUICK_ENTRY_POINT_INFO(pCmpgDouble),
1662   QUICK_ENTRY_POINT_INFO(pCmpgFloat),
1663   QUICK_ENTRY_POINT_INFO(pCmplDouble),
1664   QUICK_ENTRY_POINT_INFO(pCmplFloat),
1665   QUICK_ENTRY_POINT_INFO(pFmod),
1666   QUICK_ENTRY_POINT_INFO(pSqrt),
1667   QUICK_ENTRY_POINT_INFO(pL2d),
1668   QUICK_ENTRY_POINT_INFO(pFmodf),
1669   QUICK_ENTRY_POINT_INFO(pL2f),
1670   QUICK_ENTRY_POINT_INFO(pD2iz),
1671   QUICK_ENTRY_POINT_INFO(pF2iz),
1672   QUICK_ENTRY_POINT_INFO(pIdivmod),
1673   QUICK_ENTRY_POINT_INFO(pD2l),
1674   QUICK_ENTRY_POINT_INFO(pF2l),
1675   QUICK_ENTRY_POINT_INFO(pLdiv),
1676   QUICK_ENTRY_POINT_INFO(pLdivmod),
1677   QUICK_ENTRY_POINT_INFO(pLmul),
1678   QUICK_ENTRY_POINT_INFO(pShlLong),
1679   QUICK_ENTRY_POINT_INFO(pShrLong),
1680   QUICK_ENTRY_POINT_INFO(pUshrLong),
1681   QUICK_ENTRY_POINT_INFO(pIndexOf),
1682   QUICK_ENTRY_POINT_INFO(pMemcmp16),
1683   QUICK_ENTRY_POINT_INFO(pStringCompareTo),
1684   QUICK_ENTRY_POINT_INFO(pMemcpy),
1685   QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline),
1686   QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge),
1687   QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck),
1688   QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampoline),
1689   QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck),
1690   QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck),
1691   QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck),
1692   QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck),
1693   QUICK_ENTRY_POINT_INFO(pCheckSuspend),
1694   QUICK_ENTRY_POINT_INFO(pTestSuspend),
1695   QUICK_ENTRY_POINT_INFO(pDeliverException),
1696   QUICK_ENTRY_POINT_INFO(pThrowArrayBounds),
1697   QUICK_ENTRY_POINT_INFO(pThrowDivZero),
1698   QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethod),
1699   QUICK_ENTRY_POINT_INFO(pThrowNullPointer),
1700   QUICK_ENTRY_POINT_INFO(pThrowStackOverflow),
1701 };
1702 #undef QUICK_ENTRY_POINT_INFO
1703 
DumpThreadOffset(std::ostream & os,uint32_t offset,size_t size_of_pointers)1704 void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers) {
1705   CHECK_EQ(size_of_pointers, 4U);  // TODO: support 64-bit targets.
1706 
1707 #define DO_THREAD_OFFSET(x) \
1708     if (offset == static_cast<uint32_t>(OFFSETOF_VOLATILE_MEMBER(Thread, x))) { \
1709       os << # x; \
1710       return; \
1711     }
1712   DO_THREAD_OFFSET(state_and_flags_);
1713   DO_THREAD_OFFSET(card_table_);
1714   DO_THREAD_OFFSET(exception_);
1715   DO_THREAD_OFFSET(opeer_);
1716   DO_THREAD_OFFSET(jni_env_);
1717   DO_THREAD_OFFSET(self_);
1718   DO_THREAD_OFFSET(stack_end_);
1719   DO_THREAD_OFFSET(suspend_count_);
1720   DO_THREAD_OFFSET(thin_lock_id_);
1721   // DO_THREAD_OFFSET(top_of_managed_stack_);
1722   // DO_THREAD_OFFSET(top_of_managed_stack_pc_);
1723   DO_THREAD_OFFSET(top_sirt_);
1724 #undef DO_THREAD_OFFSET
1725 
1726   size_t entry_point_count = arraysize(gThreadEntryPointInfo);
1727   CHECK_EQ(entry_point_count * size_of_pointers,
1728            sizeof(InterpreterEntryPoints) + sizeof(JniEntryPoints) + sizeof(PortableEntryPoints) +
1729            sizeof(QuickEntryPoints));
1730   uint32_t expected_offset = OFFSETOF_MEMBER(Thread, interpreter_entrypoints_);
1731   for (size_t i = 0; i < entry_point_count; ++i) {
1732     CHECK_EQ(gThreadEntryPointInfo[i].offset, expected_offset) << gThreadEntryPointInfo[i].name;
1733     expected_offset += size_of_pointers;
1734     if (gThreadEntryPointInfo[i].offset == offset) {
1735       os << gThreadEntryPointInfo[i].name;
1736       return;
1737     }
1738   }
1739   os << offset;
1740 }
1741 
1742 static const bool kDebugExceptionDelivery = false;
1743 class CatchBlockStackVisitor : public StackVisitor {
1744  public:
CatchBlockStackVisitor(Thread * self,const ThrowLocation & throw_location,mirror::Throwable * exception,bool is_deoptimization)1745   CatchBlockStackVisitor(Thread* self, const ThrowLocation& throw_location,
1746                          mirror::Throwable* exception, bool is_deoptimization)
1747       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1748       : StackVisitor(self, self->GetLongJumpContext()),
1749         self_(self), exception_(exception), is_deoptimization_(is_deoptimization),
1750         to_find_(is_deoptimization ? NULL : exception->GetClass()), throw_location_(throw_location),
1751         handler_quick_frame_(NULL), handler_quick_frame_pc_(0), handler_dex_pc_(0),
1752         native_method_count_(0), clear_exception_(false),
1753         method_tracing_active_(is_deoptimization ||
1754                                Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()),
1755         instrumentation_frames_to_pop_(0), top_shadow_frame_(NULL), prev_shadow_frame_(NULL) {
1756     // Exception not in root sets, can't allow GC.
1757     last_no_assert_suspension_cause_ = self->StartAssertNoThreadSuspension("Finding catch block");
1758   }
1759 
~CatchBlockStackVisitor()1760   ~CatchBlockStackVisitor() {
1761     LOG(FATAL) << "UNREACHABLE";  // Expected to take long jump.
1762   }
1763 
VisitFrame()1764   bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1765     mirror::ArtMethod* method = GetMethod();
1766     if (method == NULL) {
1767       // This is the upcall, we remember the frame and last pc so that we may long jump to them.
1768       handler_quick_frame_pc_ = GetCurrentQuickFramePc();
1769       handler_quick_frame_ = GetCurrentQuickFrame();
1770       return false;  // End stack walk.
1771     } else {
1772       if (UNLIKELY(method_tracing_active_ &&
1773                    GetQuickInstrumentationExitPc() == GetReturnPc())) {
1774         // Keep count of the number of unwinds during instrumentation.
1775         instrumentation_frames_to_pop_++;
1776       }
1777       if (method->IsRuntimeMethod()) {
1778         // Ignore callee save method.
1779         DCHECK(method->IsCalleeSaveMethod());
1780         return true;
1781       } else if (is_deoptimization_) {
1782         return HandleDeoptimization(method);
1783       } else {
1784         return HandleTryItems(method);
1785       }
1786     }
1787   }
1788 
HandleTryItems(mirror::ArtMethod * method)1789   bool HandleTryItems(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1790     uint32_t dex_pc = DexFile::kDexNoIndex;
1791     if (method->IsNative()) {
1792       native_method_count_++;
1793     } else {
1794       dex_pc = GetDexPc();
1795     }
1796     if (dex_pc != DexFile::kDexNoIndex) {
1797       uint32_t found_dex_pc = method->FindCatchBlock(to_find_, dex_pc, &clear_exception_);
1798       if (found_dex_pc != DexFile::kDexNoIndex) {
1799         handler_dex_pc_ = found_dex_pc;
1800         handler_quick_frame_pc_ = method->ToNativePc(found_dex_pc);
1801         handler_quick_frame_ = GetCurrentQuickFrame();
1802         return false;  // End stack walk.
1803       }
1804     }
1805     return true;  // Continue stack walk.
1806   }
1807 
HandleDeoptimization(mirror::ArtMethod * m)1808   bool HandleDeoptimization(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1809     MethodHelper mh(m);
1810     const DexFile::CodeItem* code_item = mh.GetCodeItem();
1811     CHECK(code_item != NULL);
1812     uint16_t num_regs =  code_item->registers_size_;
1813     uint32_t dex_pc = GetDexPc();
1814     const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc);
1815     uint32_t new_dex_pc = dex_pc + inst->SizeInCodeUnits();
1816     ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, m, new_dex_pc);
1817     verifier::MethodVerifier verifier(&mh.GetDexFile(), mh.GetDexCache(), mh.GetClassLoader(),
1818                                       &mh.GetClassDef(), code_item,
1819                                       m->GetDexMethodIndex(), m, m->GetAccessFlags(), false, true);
1820     verifier.Verify();
1821     std::vector<int32_t> kinds = verifier.DescribeVRegs(dex_pc);
1822     for (uint16_t reg = 0; reg < num_regs; reg++) {
1823       VRegKind kind = static_cast<VRegKind>(kinds.at(reg * 2));
1824       switch (kind) {
1825         case kUndefined:
1826           new_frame->SetVReg(reg, 0xEBADDE09);
1827           break;
1828         case kConstant:
1829           new_frame->SetVReg(reg, kinds.at((reg * 2) + 1));
1830           break;
1831         case kReferenceVReg:
1832           new_frame->SetVRegReference(reg,
1833                                       reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kind)));
1834           break;
1835         default:
1836           new_frame->SetVReg(reg, GetVReg(m, reg, kind));
1837           break;
1838       }
1839     }
1840     if (prev_shadow_frame_ != NULL) {
1841       prev_shadow_frame_->SetLink(new_frame);
1842     } else {
1843       top_shadow_frame_ = new_frame;
1844     }
1845     prev_shadow_frame_ = new_frame;
1846     return true;
1847   }
1848 
DoLongJump()1849   void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1850     mirror::ArtMethod* catch_method = *handler_quick_frame_;
1851     if (catch_method == NULL) {
1852       if (kDebugExceptionDelivery) {
1853         LOG(INFO) << "Handler is upcall";
1854       }
1855     } else {
1856       CHECK(!is_deoptimization_);
1857       if (kDebugExceptionDelivery) {
1858         const DexFile& dex_file = *catch_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
1859         int line_number = dex_file.GetLineNumFromPC(catch_method, handler_dex_pc_);
1860         LOG(INFO) << "Handler: " << PrettyMethod(catch_method) << " (line: " << line_number << ")";
1861       }
1862     }
1863     if (clear_exception_) {
1864       // Exception was cleared as part of delivery.
1865       DCHECK(!self_->IsExceptionPending());
1866     } else {
1867       // Put exception back in root set with clear throw location.
1868       self_->SetException(ThrowLocation(), exception_);
1869     }
1870     self_->EndAssertNoThreadSuspension(last_no_assert_suspension_cause_);
1871     // Do instrumentation events after allowing thread suspension again.
1872     instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
1873     for (size_t i = 0; i < instrumentation_frames_to_pop_; ++i) {
1874       // We pop the instrumentation stack here so as not to corrupt it during the stack walk.
1875       if (i != instrumentation_frames_to_pop_ - 1 || self_->GetInstrumentationStack()->front().method_ != catch_method) {
1876         // Don't pop the instrumentation frame of the catch handler.
1877         instrumentation->PopMethodForUnwind(self_, is_deoptimization_);
1878       }
1879     }
1880     if (!is_deoptimization_) {
1881       instrumentation->ExceptionCaughtEvent(self_, throw_location_, catch_method, handler_dex_pc_,
1882                                             exception_);
1883     } else {
1884       // TODO: proper return value.
1885       self_->SetDeoptimizationShadowFrame(top_shadow_frame_);
1886     }
1887     // Place context back on thread so it will be available when we continue.
1888     self_->ReleaseLongJumpContext(context_);
1889     context_->SetSP(reinterpret_cast<uintptr_t>(handler_quick_frame_));
1890     CHECK_NE(handler_quick_frame_pc_, 0u);
1891     context_->SetPC(handler_quick_frame_pc_);
1892     context_->SmashCallerSaves();
1893     context_->DoLongJump();
1894   }
1895 
1896  private:
1897   Thread* const self_;
1898   mirror::Throwable* const exception_;
1899   const bool is_deoptimization_;
1900   // The type of the exception catch block to find.
1901   mirror::Class* const to_find_;
1902   // Location of the throw.
1903   const ThrowLocation& throw_location_;
1904   // Quick frame with found handler or last frame if no handler found.
1905   mirror::ArtMethod** handler_quick_frame_;
1906   // PC to branch to for the handler.
1907   uintptr_t handler_quick_frame_pc_;
1908   // Associated dex PC.
1909   uint32_t handler_dex_pc_;
1910   // Number of native methods passed in crawl (equates to number of SIRTs to pop)
1911   uint32_t native_method_count_;
1912   // Should the exception be cleared as the catch block has no move-exception?
1913   bool clear_exception_;
1914   // Is method tracing active?
1915   const bool method_tracing_active_;
1916   // Support for nesting no thread suspension checks.
1917   const char* last_no_assert_suspension_cause_;
1918   // Number of frames to pop in long jump.
1919   size_t instrumentation_frames_to_pop_;
1920   ShadowFrame* top_shadow_frame_;
1921   ShadowFrame* prev_shadow_frame_;
1922 };
1923 
QuickDeliverException()1924 void Thread::QuickDeliverException() {
1925   // Get exception from thread.
1926   ThrowLocation throw_location;
1927   mirror::Throwable* exception = GetException(&throw_location);
1928   CHECK(exception != NULL);
1929   // Don't leave exception visible while we try to find the handler, which may cause class
1930   // resolution.
1931   ClearException();
1932   bool is_deoptimization = (exception == reinterpret_cast<mirror::Throwable*>(-1));
1933   if (kDebugExceptionDelivery) {
1934     if (!is_deoptimization) {
1935       mirror::String* msg = exception->GetDetailMessage();
1936       std::string str_msg(msg != NULL ? msg->ToModifiedUtf8() : "");
1937       DumpStack(LOG(INFO) << "Delivering exception: " << PrettyTypeOf(exception)
1938                 << ": " << str_msg << "\n");
1939     } else {
1940       DumpStack(LOG(INFO) << "Deoptimizing: ");
1941     }
1942   }
1943   CatchBlockStackVisitor catch_finder(this, throw_location, exception, is_deoptimization);
1944   catch_finder.WalkStack(true);
1945   catch_finder.DoLongJump();
1946   LOG(FATAL) << "UNREACHABLE";
1947 }
1948 
GetLongJumpContext()1949 Context* Thread::GetLongJumpContext() {
1950   Context* result = long_jump_context_;
1951   if (result == NULL) {
1952     result = Context::Create();
1953   } else {
1954     long_jump_context_ = NULL;  // Avoid context being shared.
1955     result->Reset();
1956   }
1957   return result;
1958 }
1959 
1960 struct CurrentMethodVisitor : public StackVisitor {
CurrentMethodVisitorart::CurrentMethodVisitor1961   CurrentMethodVisitor(Thread* thread, Context* context)
1962       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1963       : StackVisitor(thread, context), this_object_(NULL), method_(NULL), dex_pc_(0) {}
VisitFrameart::CurrentMethodVisitor1964   virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1965     mirror::ArtMethod* m = GetMethod();
1966     if (m->IsRuntimeMethod()) {
1967       // Continue if this is a runtime method.
1968       return true;
1969     }
1970     if (context_ != NULL) {
1971       this_object_ = GetThisObject();
1972     }
1973     method_ = m;
1974     dex_pc_ = GetDexPc();
1975     return false;
1976   }
1977   mirror::Object* this_object_;
1978   mirror::ArtMethod* method_;
1979   uint32_t dex_pc_;
1980 };
1981 
GetCurrentMethod(uint32_t * dex_pc) const1982 mirror::ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc) const {
1983   CurrentMethodVisitor visitor(const_cast<Thread*>(this), NULL);
1984   visitor.WalkStack(false);
1985   if (dex_pc != NULL) {
1986     *dex_pc = visitor.dex_pc_;
1987   }
1988   return visitor.method_;
1989 }
1990 
GetCurrentLocationForThrow()1991 ThrowLocation Thread::GetCurrentLocationForThrow() {
1992   Context* context = GetLongJumpContext();
1993   CurrentMethodVisitor visitor(this, context);
1994   visitor.WalkStack(false);
1995   ReleaseLongJumpContext(context);
1996   return ThrowLocation(visitor.this_object_, visitor.method_, visitor.dex_pc_);
1997 }
1998 
HoldsLock(mirror::Object * object)1999 bool Thread::HoldsLock(mirror::Object* object) {
2000   if (object == NULL) {
2001     return false;
2002   }
2003   return object->GetThinLockId() == thin_lock_id_;
2004 }
2005 
2006 // RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor).
2007 template <typename RootVisitor>
2008 class ReferenceMapVisitor : public StackVisitor {
2009  public:
ReferenceMapVisitor(Thread * thread,Context * context,const RootVisitor & visitor)2010   ReferenceMapVisitor(Thread* thread, Context* context, const RootVisitor& visitor)
2011       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2012       : StackVisitor(thread, context), visitor_(visitor) {}
2013 
VisitFrame()2014   bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2015     if (false) {
2016       LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod())
2017           << StringPrintf("@ PC:%04x", GetDexPc());
2018     }
2019     ShadowFrame* shadow_frame = GetCurrentShadowFrame();
2020     if (shadow_frame != NULL) {
2021       mirror::ArtMethod* m = shadow_frame->GetMethod();
2022       size_t num_regs = shadow_frame->NumberOfVRegs();
2023       if (m->IsNative() || shadow_frame->HasReferenceArray()) {
2024         // SIRT for JNI or References for interpreter.
2025         for (size_t reg = 0; reg < num_regs; ++reg) {
2026           mirror::Object* ref = shadow_frame->GetVRegReference(reg);
2027           if (ref != NULL) {
2028             visitor_(ref, reg, this);
2029           }
2030         }
2031       } else {
2032         // Java method.
2033         // Portable path use DexGcMap and store in Method.native_gc_map_.
2034         const uint8_t* gc_map = m->GetNativeGcMap();
2035         CHECK(gc_map != NULL) << PrettyMethod(m);
2036         uint32_t gc_map_length = static_cast<uint32_t>((gc_map[0] << 24) |
2037                                                        (gc_map[1] << 16) |
2038                                                        (gc_map[2] << 8) |
2039                                                        (gc_map[3] << 0));
2040         verifier::DexPcToReferenceMap dex_gc_map(gc_map + 4, gc_map_length);
2041         uint32_t dex_pc = GetDexPc();
2042         const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
2043         DCHECK(reg_bitmap != NULL);
2044         num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs);
2045         for (size_t reg = 0; reg < num_regs; ++reg) {
2046           if (TestBitmap(reg, reg_bitmap)) {
2047             mirror::Object* ref = shadow_frame->GetVRegReference(reg);
2048             if (ref != NULL) {
2049               visitor_(ref, reg, this);
2050             }
2051           }
2052         }
2053       }
2054     } else {
2055       mirror::ArtMethod* m = GetMethod();
2056       // Process register map (which native and runtime methods don't have)
2057       if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
2058         const uint8_t* native_gc_map = m->GetNativeGcMap();
2059         CHECK(native_gc_map != NULL) << PrettyMethod(m);
2060         mh_.ChangeMethod(m);
2061         const DexFile::CodeItem* code_item = mh_.GetCodeItem();
2062         DCHECK(code_item != NULL) << PrettyMethod(m);  // Can't be NULL or how would we compile its instructions?
2063         NativePcOffsetToReferenceMap map(native_gc_map);
2064         size_t num_regs = std::min(map.RegWidth() * 8,
2065                                    static_cast<size_t>(code_item->registers_size_));
2066         if (num_regs > 0) {
2067           const uint8_t* reg_bitmap = map.FindBitMap(GetNativePcOffset());
2068           DCHECK(reg_bitmap != NULL);
2069           const VmapTable vmap_table(m->GetVmapTable());
2070           uint32_t core_spills = m->GetCoreSpillMask();
2071           uint32_t fp_spills = m->GetFpSpillMask();
2072           size_t frame_size = m->GetFrameSizeInBytes();
2073           // For all dex registers in the bitmap
2074           mirror::ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
2075           DCHECK(cur_quick_frame != NULL);
2076           for (size_t reg = 0; reg < num_regs; ++reg) {
2077             // Does this register hold a reference?
2078             if (TestBitmap(reg, reg_bitmap)) {
2079               uint32_t vmap_offset;
2080               mirror::Object* ref;
2081               if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
2082                 uintptr_t val = GetGPR(vmap_table.ComputeRegister(core_spills, vmap_offset,
2083                                                                   kReferenceVReg));
2084                 ref = reinterpret_cast<mirror::Object*>(val);
2085               } else {
2086                 ref = reinterpret_cast<mirror::Object*>(GetVReg(cur_quick_frame, code_item,
2087                                                                 core_spills, fp_spills, frame_size,
2088                                                                 reg));
2089               }
2090 
2091               if (ref != NULL) {
2092                 visitor_(ref, reg, this);
2093               }
2094             }
2095           }
2096         }
2097       }
2098     }
2099     return true;
2100   }
2101 
2102  private:
TestBitmap(int reg,const uint8_t * reg_vector)2103   static bool TestBitmap(int reg, const uint8_t* reg_vector) {
2104     return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0;
2105   }
2106 
2107   // Visitor for when we visit a root.
2108   const RootVisitor& visitor_;
2109 
2110   // A method helper we keep around to avoid dex file/cache re-computations.
2111   MethodHelper mh_;
2112 };
2113 
2114 class RootCallbackVisitor {
2115  public:
RootCallbackVisitor(RootVisitor * visitor,void * arg)2116   RootCallbackVisitor(RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) {}
2117 
operator ()(const mirror::Object * obj,size_t,const StackVisitor *) const2118   void operator()(const mirror::Object* obj, size_t, const StackVisitor*) const {
2119     visitor_(obj, arg_);
2120   }
2121 
2122  private:
2123   RootVisitor* visitor_;
2124   void* arg_;
2125 };
2126 
2127 class VerifyCallbackVisitor {
2128  public:
VerifyCallbackVisitor(VerifyRootVisitor * visitor,void * arg)2129   VerifyCallbackVisitor(VerifyRootVisitor* visitor, void* arg)
2130       : visitor_(visitor),
2131         arg_(arg) {
2132   }
2133 
operator ()(const mirror::Object * obj,size_t vreg,const StackVisitor * visitor) const2134   void operator()(const mirror::Object* obj, size_t vreg, const StackVisitor* visitor) const {
2135     visitor_(obj, arg_, vreg, visitor);
2136   }
2137 
2138  private:
2139   VerifyRootVisitor* const visitor_;
2140   void* const arg_;
2141 };
2142 
2143 struct VerifyRootWrapperArg {
2144   VerifyRootVisitor* visitor;
2145   void* arg;
2146 };
2147 
VerifyRootWrapperCallback(const mirror::Object * root,void * arg)2148 static void VerifyRootWrapperCallback(const mirror::Object* root, void* arg) {
2149   VerifyRootWrapperArg* wrapperArg = reinterpret_cast<VerifyRootWrapperArg*>(arg);
2150   wrapperArg->visitor(root, wrapperArg->arg, 0, NULL);
2151 }
2152 
VerifyRoots(VerifyRootVisitor * visitor,void * arg)2153 void Thread::VerifyRoots(VerifyRootVisitor* visitor, void* arg) {
2154   // We need to map from a RootVisitor to VerifyRootVisitor, so pass in nulls for arguments we
2155   // don't have.
2156   VerifyRootWrapperArg wrapperArg;
2157   wrapperArg.arg = arg;
2158   wrapperArg.visitor = visitor;
2159 
2160   if (opeer_ != NULL) {
2161     VerifyRootWrapperCallback(opeer_, &wrapperArg);
2162   }
2163   if (exception_ != NULL) {
2164     VerifyRootWrapperCallback(exception_, &wrapperArg);
2165   }
2166   throw_location_.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
2167   if (class_loader_override_ != NULL) {
2168     VerifyRootWrapperCallback(class_loader_override_, &wrapperArg);
2169   }
2170   jni_env_->locals.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
2171   jni_env_->monitors.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
2172 
2173   SirtVisitRoots(VerifyRootWrapperCallback, &wrapperArg);
2174 
2175   // Visit roots on this thread's stack
2176   Context* context = GetLongJumpContext();
2177   VerifyCallbackVisitor visitorToCallback(visitor, arg);
2178   ReferenceMapVisitor<VerifyCallbackVisitor> mapper(this, context, visitorToCallback);
2179   mapper.WalkStack();
2180   ReleaseLongJumpContext(context);
2181 
2182   std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack = GetInstrumentationStack();
2183   typedef std::deque<instrumentation::InstrumentationStackFrame>::const_iterator It;
2184   for (It it = instrumentation_stack->begin(), end = instrumentation_stack->end(); it != end; ++it) {
2185     mirror::Object* this_object = (*it).this_object_;
2186     if (this_object != NULL) {
2187       VerifyRootWrapperCallback(this_object, &wrapperArg);
2188     }
2189     mirror::ArtMethod* method = (*it).method_;
2190     VerifyRootWrapperCallback(method, &wrapperArg);
2191   }
2192 }
2193 
VisitRoots(RootVisitor * visitor,void * arg)2194 void Thread::VisitRoots(RootVisitor* visitor, void* arg) {
2195   if (opeer_ != NULL) {
2196     visitor(opeer_, arg);
2197   }
2198   if (exception_ != NULL) {
2199     visitor(exception_, arg);
2200   }
2201   throw_location_.VisitRoots(visitor, arg);
2202   if (class_loader_override_ != NULL) {
2203     visitor(class_loader_override_, arg);
2204   }
2205   jni_env_->locals.VisitRoots(visitor, arg);
2206   jni_env_->monitors.VisitRoots(visitor, arg);
2207 
2208   SirtVisitRoots(visitor, arg);
2209 
2210   // Visit roots on this thread's stack
2211   Context* context = GetLongJumpContext();
2212   RootCallbackVisitor visitorToCallback(visitor, arg);
2213   ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context, visitorToCallback);
2214   mapper.WalkStack();
2215   ReleaseLongJumpContext(context);
2216 
2217   for (const instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
2218     mirror::Object* this_object = frame.this_object_;
2219     if (this_object != NULL) {
2220       visitor(this_object, arg);
2221     }
2222     mirror::ArtMethod* method = frame.method_;
2223     visitor(method, arg);
2224   }
2225 }
2226 
VerifyObject(const mirror::Object * root,void * arg)2227 static void VerifyObject(const mirror::Object* root, void* arg) {
2228   gc::Heap* heap = reinterpret_cast<gc::Heap*>(arg);
2229   heap->VerifyObject(root);
2230 }
2231 
VerifyStackImpl()2232 void Thread::VerifyStackImpl() {
2233   UniquePtr<Context> context(Context::Create());
2234   RootCallbackVisitor visitorToCallback(VerifyObject, Runtime::Current()->GetHeap());
2235   ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitorToCallback);
2236   mapper.WalkStack();
2237 }
2238 
2239 // Set the stack end to that to be used during a stack overflow
SetStackEndForStackOverflow()2240 void Thread::SetStackEndForStackOverflow() {
2241   // During stack overflow we allow use of the full stack.
2242   if (stack_end_ == stack_begin_) {
2243     // However, we seem to have already extended to use the full stack.
2244     LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently "
2245                << kStackOverflowReservedBytes << ")?";
2246     DumpStack(LOG(ERROR));
2247     LOG(FATAL) << "Recursive stack overflow.";
2248   }
2249 
2250   stack_end_ = stack_begin_;
2251 }
2252 
operator <<(std::ostream & os,const Thread & thread)2253 std::ostream& operator<<(std::ostream& os, const Thread& thread) {
2254   thread.ShortDump(os);
2255   return os;
2256 }
2257 
2258 }  // namespace art
2259