• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_THREAD_H_
18 #define ART_RUNTIME_THREAD_H_
19 
20 #include <atomic>
21 #include <bitset>
22 #include <deque>
23 #include <iosfwd>
24 #include <list>
25 #include <memory>
26 #include <string>
27 
28 #include "base/atomic.h"
29 #include "base/enums.h"
30 #include "base/locks.h"
31 #include "base/macros.h"
32 #include "base/safe_map.h"
33 #include "base/value_object.h"
34 #include "entrypoints/jni/jni_entrypoints.h"
35 #include "entrypoints/quick/quick_entrypoints.h"
36 #include "handle.h"
37 #include "handle_scope.h"
38 #include "interpreter/interpreter_cache.h"
39 #include "jvalue.h"
40 #include "managed_stack.h"
41 #include "offsets.h"
42 #include "read_barrier_config.h"
43 #include "reflective_handle_scope.h"
44 #include "runtime_globals.h"
45 #include "runtime_stats.h"
46 #include "thread_state.h"
47 
48 class BacktraceMap;
49 
50 namespace art {
51 
52 namespace gc {
53 namespace accounting {
54 template<class T> class AtomicStack;
55 }  // namespace accounting
56 namespace collector {
57 class SemiSpace;
58 }  // namespace collector
59 }  // namespace gc
60 
61 namespace instrumentation {
62 struct InstrumentationStackFrame;
63 }  // namespace instrumentation
64 
65 namespace mirror {
66 class Array;
67 class Class;
68 class ClassLoader;
69 class Object;
70 template<class T> class ObjectArray;
71 template<class T> class PrimitiveArray;
72 typedef PrimitiveArray<int32_t> IntArray;
73 class StackTraceElement;
74 class String;
75 class Throwable;
76 }  // namespace mirror
77 
78 namespace verifier {
79 class MethodVerifier;
80 class VerifierDeps;
81 }  // namespace verifier
82 
83 class ArtMethod;
84 class BaseMutex;
85 class ClassLinker;
86 class Closure;
87 class Context;
88 class DeoptimizationContextRecord;
89 class DexFile;
90 class FrameIdToShadowFrame;
91 class IsMarkedVisitor;
92 class JavaVMExt;
93 class JNIEnvExt;
94 class Monitor;
95 class RootVisitor;
96 class ScopedObjectAccessAlreadyRunnable;
97 class ShadowFrame;
98 class StackedShadowFrameRecord;
99 enum class SuspendReason : char;
100 class Thread;
101 class ThreadList;
102 enum VisitRootFlags : uint8_t;
103 
104 // A piece of data that can be held in the CustomTls. The destructor will be called during thread
105 // shutdown. The thread the destructor is called on is not necessarily the same thread it was stored
106 // on.
107 class TLSData {
108  public:
~TLSData()109   virtual ~TLSData() {}
110 };
111 
112 // Thread priorities. These must match the Thread.MIN_PRIORITY,
113 // Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
114 enum ThreadPriority {
115   kMinThreadPriority = 1,
116   kNormThreadPriority = 5,
117   kMaxThreadPriority = 10,
118 };
119 
120 enum ThreadFlag {
121   kSuspendRequest   = 1,  // If set implies that suspend_count_ > 0 and the Thread should enter the
122                           // safepoint handler.
123   kCheckpointRequest = 2,  // Request that the thread do some checkpoint work and then continue.
124   kEmptyCheckpointRequest = 4,  // Request that the thread do empty checkpoint and then continue.
125   kActiveSuspendBarrier = 8,  // Register that at least 1 suspend barrier needs to be passed.
126 };
127 
128 enum class StackedShadowFrameType {
129   kShadowFrameUnderConstruction,
130   kDeoptimizationShadowFrame,
131 };
132 
133 // The type of method that triggers deoptimization. It contains info on whether
134 // the deoptimized method should advance dex_pc.
135 enum class DeoptimizationMethodType {
136   kKeepDexPc,  // dex pc is required to be kept upon deoptimization.
137   kDefault     // dex pc may or may not advance depending on other conditions.
138 };
139 
140 // This should match RosAlloc::kNumThreadLocalSizeBrackets.
141 static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
142 
143 // Thread's stack layout for implicit stack overflow checks:
144 //
145 //   +---------------------+  <- highest address of stack memory
146 //   |                     |
147 //   .                     .  <- SP
148 //   |                     |
149 //   |                     |
150 //   +---------------------+  <- stack_end
151 //   |                     |
152 //   |  Gap                |
153 //   |                     |
154 //   +---------------------+  <- stack_begin
155 //   |                     |
156 //   | Protected region    |
157 //   |                     |
158 //   +---------------------+  <- lowest address of stack memory
159 //
160 // The stack always grows down in memory.  At the lowest address is a region of memory
161 // that is set mprotect(PROT_NONE).  Any attempt to read/write to this region will
162 // result in a segmentation fault signal.  At any point, the thread's SP will be somewhere
163 // between the stack_end and the highest address in stack memory.  An implicit stack
164 // overflow check is a read of memory at a certain offset below the current SP (4K typically).
165 // If the thread's SP is below the stack_end address this will be a read into the protected
166 // region.  If the SP is above the stack_end address, the thread is guaranteed to have
167 // at least 4K of space.  Because stack overflow checks are only performed in generated code,
168 // if the thread makes a call out to a native function (through JNI), that native function
169 // might only have 4K of memory (if the SP is adjacent to stack_end).
170 
171 class Thread {
172  public:
173   static const size_t kStackOverflowImplicitCheckSize;
174   static constexpr bool kVerifyStack = kIsDebugBuild;
175 
176   // Creates a new native thread corresponding to the given managed peer.
177   // Used to implement Thread.start.
178   static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
179 
180   // Attaches the calling native thread to the runtime, returning the new native peer.
181   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
182   static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
183                         bool create_peer);
184   // Attaches the calling native thread to the runtime, returning the new native peer.
185   static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_peer);
186 
187   // Reset internal state of child thread after fork.
188   void InitAfterFork();
189 
190   // Get the currently executing thread, frequently referred to as 'self'. This call has reasonably
191   // high cost and so we favor passing self around when possible.
192   // TODO: mark as PURE so the compiler may coalesce and remove?
193   static Thread* Current();
194 
195   // On a runnable thread, check for pending thread suspension request and handle if pending.
196   void AllowThreadSuspension() REQUIRES_SHARED(Locks::mutator_lock_);
197 
198   // Process pending thread suspension request and handle if pending.
199   void CheckSuspend() REQUIRES_SHARED(Locks::mutator_lock_);
200 
201   // Process a pending empty checkpoint if pending.
202   void CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex);
203   void CheckEmptyCheckpointFromMutex();
204 
205   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
206                                    ObjPtr<mirror::Object> thread_peer)
207       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
208       REQUIRES_SHARED(Locks::mutator_lock_);
209   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
210       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
211       REQUIRES_SHARED(Locks::mutator_lock_);
212 
213   // Translates 172 to pAllocArrayFromCode and so on.
214   template<PointerSize size_of_pointers>
215   static void DumpThreadOffset(std::ostream& os, uint32_t offset);
216 
217   // Dumps a one-line summary of thread state (used for operator<<).
218   void ShortDump(std::ostream& os) const;
219 
220   // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
221   void Dump(std::ostream& os,
222             bool dump_native_stack = true,
223             BacktraceMap* backtrace_map = nullptr,
224             bool force_dump_stack = false) const
225       REQUIRES_SHARED(Locks::mutator_lock_);
226 
227   void DumpJavaStack(std::ostream& os,
228                      bool check_suspended = true,
229                      bool dump_locks = true) const
230       REQUIRES_SHARED(Locks::mutator_lock_);
231 
232   // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
233   // case we use 'tid' to identify the thread, and we'll include as much information as we can.
234   static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
235       REQUIRES_SHARED(Locks::mutator_lock_);
236 
GetState()237   ThreadState GetState() const {
238     DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
239     DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended);
240     return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state);
241   }
242 
243   ThreadState SetState(ThreadState new_state);
244 
GetSuspendCount()245   int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
246     return tls32_.suspend_count;
247   }
248 
GetUserCodeSuspendCount()249   int GetUserCodeSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_,
250                                                Locks::user_code_suspension_lock_) {
251     return tls32_.user_code_suspend_count;
252   }
253 
GetDebugSuspendCount()254   int GetDebugSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
255     return tls32_.debug_suspend_count;
256   }
257 
IsSuspended()258   bool IsSuspended() const {
259     union StateAndFlags state_and_flags;
260     state_and_flags.as_int = tls32_.state_and_flags.as_int;
261     return state_and_flags.as_struct.state != kRunnable &&
262         (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
263   }
264 
DecrDefineClassCount()265   void DecrDefineClassCount() {
266     tls32_.define_class_counter--;
267   }
268 
IncrDefineClassCount()269   void IncrDefineClassCount() {
270     tls32_.define_class_counter++;
271   }
GetDefineClassCount()272   uint32_t GetDefineClassCount() const {
273     return tls32_.define_class_counter;
274   }
275 
276   // If delta > 0 and (this != self or suspend_barrier is not null), this function may temporarily
277   // release thread_suspend_count_lock_ internally.
278   ALWAYS_INLINE
279   bool ModifySuspendCount(Thread* self,
280                           int delta,
281                           AtomicInteger* suspend_barrier,
282                           SuspendReason reason)
283       WARN_UNUSED
284       REQUIRES(Locks::thread_suspend_count_lock_);
285 
286   // Requests a checkpoint closure to run on another thread. The closure will be run when the thread
287   // gets suspended. This will return true if the closure was added and will (eventually) be
288   // executed. It returns false otherwise.
289   //
290   // Since multiple closures can be queued and some closures can delay other threads from running no
291   // closure should attempt to suspend another thread while running.
292   // TODO We should add some debug option that verifies this.
293   bool RequestCheckpoint(Closure* function)
294       REQUIRES(Locks::thread_suspend_count_lock_);
295 
296   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. This is
297   // due to the fact that Thread::Current() needs to go to sleep to allow the targeted thread to
298   // execute the checkpoint for us if it is Runnable. The suspend_state is the state that the thread
299   // will go into while it is awaiting the checkpoint to be run.
300   // NB Passing ThreadState::kRunnable may cause the current thread to wait in a condition variable
301   // while holding the mutator_lock_.  Callers should ensure that this will not cause any problems
302   // for the closure or the rest of the system.
303   // NB Since multiple closures can be queued and some closures can delay other threads from running
304   // no closure should attempt to suspend another thread while running.
305   bool RequestSynchronousCheckpoint(Closure* function,
306                                     ThreadState suspend_state = ThreadState::kWaiting)
307       REQUIRES_SHARED(Locks::mutator_lock_)
308       RELEASE(Locks::thread_list_lock_)
309       REQUIRES(!Locks::thread_suspend_count_lock_);
310 
311   bool RequestEmptyCheckpoint()
312       REQUIRES(Locks::thread_suspend_count_lock_);
313 
314   void SetFlipFunction(Closure* function);
315   Closure* GetFlipFunction();
316 
GetThreadLocalMarkStack()317   gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() {
318     CHECK(kUseReadBarrier);
319     return tlsPtr_.thread_local_mark_stack;
320   }
SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object> * stack)321   void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) {
322     CHECK(kUseReadBarrier);
323     tlsPtr_.thread_local_mark_stack = stack;
324   }
325 
326   // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
327   // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
328   void FullSuspendCheck()
329       REQUIRES(!Locks::thread_suspend_count_lock_)
330       REQUIRES_SHARED(Locks::mutator_lock_);
331 
332   // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
333   ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable()
334       REQUIRES(!Locks::thread_suspend_count_lock_)
335       SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
336 
337   // Transition from runnable into a state where mutator privileges are denied. Releases share of
338   // mutator lock.
339   ALWAYS_INLINE void TransitionFromRunnableToSuspended(ThreadState new_state)
340       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
341       UNLOCK_FUNCTION(Locks::mutator_lock_);
342 
343   // Once called thread suspension will cause an assertion failure.
StartAssertNoThreadSuspension(const char * cause)344   const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
345     Roles::uninterruptible_.Acquire();  // No-op.
346     if (kIsDebugBuild) {
347       CHECK(cause != nullptr);
348       const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
349       tls32_.no_thread_suspension++;
350       tlsPtr_.last_no_thread_suspension_cause = cause;
351       return previous_cause;
352     } else {
353       return nullptr;
354     }
355   }
356 
357   // End region where no thread suspension is expected.
EndAssertNoThreadSuspension(const char * old_cause)358   void EndAssertNoThreadSuspension(const char* old_cause) RELEASE(Roles::uninterruptible_) {
359     if (kIsDebugBuild) {
360       CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
361       CHECK_GT(tls32_.no_thread_suspension, 0U);
362       tls32_.no_thread_suspension--;
363       tlsPtr_.last_no_thread_suspension_cause = old_cause;
364     }
365     Roles::uninterruptible_.Release();  // No-op.
366   }
367 
368   // End region where no thread suspension is expected. Returns the current open region in case we
369   // want to reopen it. Used for ScopedAllowThreadSuspension. Not supported if no_thread_suspension
370   // is larger than one.
EndAssertNoThreadSuspension()371   const char* EndAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) WARN_UNUSED {
372     const char* ret = nullptr;
373     if (kIsDebugBuild) {
374       CHECK_EQ(tls32_.no_thread_suspension, 1u);
375       tls32_.no_thread_suspension--;
376       ret = tlsPtr_.last_no_thread_suspension_cause;
377       tlsPtr_.last_no_thread_suspension_cause = nullptr;
378     }
379     Roles::uninterruptible_.Release();  // No-op.
380     return ret;
381   }
382 
383   void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
384 
385   // Return true if thread suspension is allowable.
386   bool IsThreadSuspensionAllowable() const;
387 
IsDaemon()388   bool IsDaemon() const {
389     return tls32_.daemon;
390   }
391 
392   size_t NumberOfHeldMutexes() const;
393 
394   bool HoldsLock(ObjPtr<mirror::Object> object) const REQUIRES_SHARED(Locks::mutator_lock_);
395 
396   /*
397    * Changes the priority of this thread to match that of the java.lang.Thread object.
398    *
399    * We map a priority value from 1-10 to Linux "nice" values, where lower
400    * numbers indicate higher priority.
401    */
402   void SetNativePriority(int newPriority);
403 
404   /*
405    * Returns the priority of this thread by querying the system.
406    * This is useful when attaching a thread through JNI.
407    *
408    * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
409    */
410   int GetNativePriority() const;
411 
412   // Guaranteed to be non-zero.
GetThreadId()413   uint32_t GetThreadId() const {
414     return tls32_.thin_lock_thread_id;
415   }
416 
GetTid()417   pid_t GetTid() const {
418     return tls32_.tid;
419   }
420 
421   // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
422   ObjPtr<mirror::String> GetThreadName() const REQUIRES_SHARED(Locks::mutator_lock_);
423 
424   // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
425   // allocation, or locking.
426   void GetThreadName(std::string& name) const;
427 
428   // Sets the thread's name.
429   void SetThreadName(const char* name) REQUIRES_SHARED(Locks::mutator_lock_);
430 
431   // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
432   uint64_t GetCpuMicroTime() const;
433 
GetPeer()434   mirror::Object* GetPeer() const REQUIRES_SHARED(Locks::mutator_lock_) {
435     DCHECK(Thread::Current() == this) << "Use GetPeerFromOtherThread instead";
436     CHECK(tlsPtr_.jpeer == nullptr);
437     return tlsPtr_.opeer;
438   }
439   // GetPeer is not safe if called on another thread in the middle of the CC thread flip and
440   // the thread's stack may have not been flipped yet and peer may be a from-space (stale) ref.
441   // This function will explicitly mark/forward it.
442   mirror::Object* GetPeerFromOtherThread() const REQUIRES_SHARED(Locks::mutator_lock_);
443 
HasPeer()444   bool HasPeer() const {
445     return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
446   }
447 
GetStats()448   RuntimeStats* GetStats() {
449     return &tls64_.stats;
450   }
451 
452   bool IsStillStarting() const;
453 
IsExceptionPending()454   bool IsExceptionPending() const {
455     return tlsPtr_.exception != nullptr;
456   }
457 
IsAsyncExceptionPending()458   bool IsAsyncExceptionPending() const {
459     return tlsPtr_.async_exception != nullptr;
460   }
461 
GetException()462   mirror::Throwable* GetException() const REQUIRES_SHARED(Locks::mutator_lock_) {
463     return tlsPtr_.exception;
464   }
465 
466   void AssertPendingException() const;
467   void AssertPendingOOMException() const REQUIRES_SHARED(Locks::mutator_lock_);
468   void AssertNoPendingException() const;
469   void AssertNoPendingExceptionForNewException(const char* msg) const;
470 
471   void SetException(ObjPtr<mirror::Throwable> new_exception) REQUIRES_SHARED(Locks::mutator_lock_);
472 
473   // Set an exception that is asynchronously thrown from a different thread. This will be checked
474   // periodically and might overwrite the current 'Exception'. This can only be called from a
475   // checkpoint.
476   //
477   // The caller should also make sure that the thread has been deoptimized so that the exception
478   // could be detected on back-edges.
479   void SetAsyncException(ObjPtr<mirror::Throwable> new_exception)
480       REQUIRES_SHARED(Locks::mutator_lock_);
481 
ClearException()482   void ClearException() REQUIRES_SHARED(Locks::mutator_lock_) {
483     tlsPtr_.exception = nullptr;
484   }
485 
486   // Move the current async-exception to the main exception. This should be called when the current
487   // thread is ready to deal with any async exceptions. Returns true if there is an async exception
488   // that needs to be dealt with, false otherwise.
489   bool ObserveAsyncException() REQUIRES_SHARED(Locks::mutator_lock_);
490 
491   // Find catch block and perform long jump to appropriate exception handle
492   NO_RETURN void QuickDeliverException() REQUIRES_SHARED(Locks::mutator_lock_);
493 
494   Context* GetLongJumpContext();
ReleaseLongJumpContext(Context * context)495   void ReleaseLongJumpContext(Context* context) {
496     if (tlsPtr_.long_jump_context != nullptr) {
497       ReleaseLongJumpContextInternal();
498     }
499     tlsPtr_.long_jump_context = context;
500   }
501 
502   // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
503   // abort the runtime iff abort_on_error is true.
504   ArtMethod* GetCurrentMethod(uint32_t* dex_pc,
505                               bool check_suspended = true,
506                               bool abort_on_error = true) const
507       REQUIRES_SHARED(Locks::mutator_lock_);
508 
509   // Returns whether the given exception was thrown by the current Java method being executed
510   // (Note that this includes native Java methods).
511   bool IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const
512       REQUIRES_SHARED(Locks::mutator_lock_);
513 
SetTopOfStack(ArtMethod ** top_method)514   void SetTopOfStack(ArtMethod** top_method) {
515     tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
516   }
517 
SetTopOfStackTagged(ArtMethod ** top_method)518   void SetTopOfStackTagged(ArtMethod** top_method) {
519     tlsPtr_.managed_stack.SetTopQuickFrameTagged(top_method);
520   }
521 
SetTopOfShadowStack(ShadowFrame * top)522   void SetTopOfShadowStack(ShadowFrame* top) {
523     tlsPtr_.managed_stack.SetTopShadowFrame(top);
524   }
525 
HasManagedStack()526   bool HasManagedStack() const {
527     return tlsPtr_.managed_stack.HasTopQuickFrame() || tlsPtr_.managed_stack.HasTopShadowFrame();
528   }
529 
530   // If 'msg' is null, no detail message is set.
531   void ThrowNewException(const char* exception_class_descriptor, const char* msg)
532       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
533 
534   // If 'msg' is null, no detail message is set. An exception must be pending, and will be
535   // used as the new exception's cause.
536   void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
537       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
538 
539   void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
540       __attribute__((format(printf, 3, 4)))
541       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
542 
543   void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
544       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
545 
546   // OutOfMemoryError is special, because we need to pre-allocate an instance.
547   // Only the GC should call this.
548   void ThrowOutOfMemoryError(const char* msg) REQUIRES_SHARED(Locks::mutator_lock_)
549       REQUIRES(!Roles::uninterruptible_);
550 
551   static void Startup();
552   static void FinishStartup();
553   static void Shutdown();
554 
555   // Notify this thread's thread-group that this thread has started.
556   // Note: the given thread-group is used as a fast path and verified in debug build. If the value
557   //       is null, the thread's thread-group is loaded from the peer.
558   void NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject thread_group = nullptr)
559       REQUIRES_SHARED(Locks::mutator_lock_);
560 
561   // JNI methods
GetJniEnv()562   JNIEnvExt* GetJniEnv() const {
563     return tlsPtr_.jni_env;
564   }
565 
566   // Convert a jobject into a Object*
567   ObjPtr<mirror::Object> DecodeJObject(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
568   // Checks if the weak global ref has been cleared by the GC without decoding it.
569   bool IsJWeakCleared(jweak obj) const REQUIRES_SHARED(Locks::mutator_lock_);
570 
GetMonitorEnterObject()571   mirror::Object* GetMonitorEnterObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
572     return tlsPtr_.monitor_enter_object;
573   }
574 
SetMonitorEnterObject(mirror::Object * obj)575   void SetMonitorEnterObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
576     tlsPtr_.monitor_enter_object = obj;
577   }
578 
579   // Implements java.lang.Thread.interrupted.
580   bool Interrupted();
581   // Implements java.lang.Thread.isInterrupted.
582   bool IsInterrupted();
583   void Interrupt(Thread* self) REQUIRES(!wait_mutex_);
SetInterrupted(bool i)584   void SetInterrupted(bool i) {
585     tls32_.interrupted.store(i, std::memory_order_seq_cst);
586   }
587   void Notify() REQUIRES(!wait_mutex_);
588 
PoisonObjectPointers()589   ALWAYS_INLINE void PoisonObjectPointers() {
590     ++poison_object_cookie_;
591   }
592 
593   ALWAYS_INLINE static void PoisonObjectPointersIfDebug();
594 
GetPoisonObjectCookie()595   ALWAYS_INLINE uintptr_t GetPoisonObjectCookie() const {
596     return poison_object_cookie_;
597   }
598 
599   // Parking for 0ns of relative time means an untimed park, negative (though
600   // should be handled in java code) returns immediately
601   void Park(bool is_absolute, int64_t time) REQUIRES_SHARED(Locks::mutator_lock_);
602   void Unpark();
603 
604  private:
605   void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
606 
607  public:
GetWaitMutex()608   Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
609     return wait_mutex_;
610   }
611 
GetWaitConditionVariable()612   ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) {
613     return wait_cond_;
614   }
615 
GetWaitMonitor()616   Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) {
617     return wait_monitor_;
618   }
619 
SetWaitMonitor(Monitor * mon)620   void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) {
621     wait_monitor_ = mon;
622   }
623 
624   // Waiter link-list support.
GetWaitNext()625   Thread* GetWaitNext() const {
626     return tlsPtr_.wait_next;
627   }
628 
SetWaitNext(Thread * next)629   void SetWaitNext(Thread* next) {
630     tlsPtr_.wait_next = next;
631   }
632 
GetClassLoaderOverride()633   jobject GetClassLoaderOverride() {
634     return tlsPtr_.class_loader_override;
635   }
636 
637   void SetClassLoaderOverride(jobject class_loader_override);
638 
639   // Create the internal representation of a stack trace, that is more time
640   // and space efficient to compute than the StackTraceElement[].
641   template<bool kTransactionActive>
642   jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
643       REQUIRES_SHARED(Locks::mutator_lock_);
644 
645   // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
646   // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
647   // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
648   // with the number of valid frames in the returned array.
649   static jobjectArray InternalStackTraceToStackTraceElementArray(
650       const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
651       jobjectArray output_array = nullptr, int* stack_depth = nullptr)
652       REQUIRES_SHARED(Locks::mutator_lock_);
653 
654   jobjectArray CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
655       REQUIRES_SHARED(Locks::mutator_lock_);
656 
HasDebuggerShadowFrames()657   bool HasDebuggerShadowFrames() const {
658     return tlsPtr_.frame_id_to_shadow_frame != nullptr;
659   }
660 
661   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
662       REQUIRES_SHARED(Locks::mutator_lock_);
663 
664   void VisitReflectiveTargets(ReflectiveValueVisitor* visitor)
665       REQUIRES(Locks::mutator_lock_);
666 
VerifyStack()667   void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_) {
668     if (kVerifyStack) {
669       VerifyStackImpl();
670     }
671   }
672 
673   //
674   // Offsets of various members of native Thread class, used by compiled code.
675   //
676 
677   template<PointerSize pointer_size>
ThinLockIdOffset()678   static constexpr ThreadOffset<pointer_size> ThinLockIdOffset() {
679     return ThreadOffset<pointer_size>(
680         OFFSETOF_MEMBER(Thread, tls32_) +
681         OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
682   }
683 
684   template<PointerSize pointer_size>
InterruptedOffset()685   static constexpr ThreadOffset<pointer_size> InterruptedOffset() {
686     return ThreadOffset<pointer_size>(
687         OFFSETOF_MEMBER(Thread, tls32_) +
688         OFFSETOF_MEMBER(tls_32bit_sized_values, interrupted));
689   }
690 
691   template<PointerSize pointer_size>
ThreadFlagsOffset()692   static constexpr ThreadOffset<pointer_size> ThreadFlagsOffset() {
693     return ThreadOffset<pointer_size>(
694         OFFSETOF_MEMBER(Thread, tls32_) +
695         OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
696   }
697 
698   template<PointerSize pointer_size>
UseMterpOffset()699   static constexpr ThreadOffset<pointer_size> UseMterpOffset() {
700     return ThreadOffset<pointer_size>(
701         OFFSETOF_MEMBER(Thread, tls32_) +
702         OFFSETOF_MEMBER(tls_32bit_sized_values, use_mterp));
703   }
704 
705   template<PointerSize pointer_size>
IsGcMarkingOffset()706   static constexpr ThreadOffset<pointer_size> IsGcMarkingOffset() {
707     return ThreadOffset<pointer_size>(
708         OFFSETOF_MEMBER(Thread, tls32_) +
709         OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
710   }
711 
IsGcMarkingSize()712   static constexpr size_t IsGcMarkingSize() {
713     return sizeof(tls32_.is_gc_marking);
714   }
715 
716   // Deoptimize the Java stack.
717   void DeoptimizeWithDeoptimizationException(JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
718 
719  private:
720   template<PointerSize pointer_size>
ThreadOffsetFromTlsPtr(size_t tls_ptr_offset)721   static constexpr ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
722     size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
723     size_t scale = (pointer_size > kRuntimePointerSize) ?
724       static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize) : 1;
725     size_t shrink = (kRuntimePointerSize > pointer_size) ?
726       static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size) : 1;
727     return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
728   }
729 
730  public:
731   template<PointerSize pointer_size>
QuickEntryPointOffset(size_t quick_entrypoint_offset)732   static constexpr ThreadOffset<pointer_size> QuickEntryPointOffset(
733       size_t quick_entrypoint_offset) {
734     return ThreadOffsetFromTlsPtr<pointer_size>(
735         OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
736   }
737 
QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,PointerSize pointer_size)738   static constexpr uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
739                                                           PointerSize pointer_size) {
740     if (pointer_size == PointerSize::k32) {
741       return QuickEntryPointOffset<PointerSize::k32>(quick_entrypoint_offset).
742           Uint32Value();
743     } else {
744       return QuickEntryPointOffset<PointerSize::k64>(quick_entrypoint_offset).
745           Uint32Value();
746     }
747   }
748 
749   template<PointerSize pointer_size>
JniEntryPointOffset(size_t jni_entrypoint_offset)750   static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
751     return ThreadOffsetFromTlsPtr<pointer_size>(
752         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
753   }
754 
755   // Return the entry point offset integer value for ReadBarrierMarkRegX, where X is `reg`.
756   template <PointerSize pointer_size>
ReadBarrierMarkEntryPointsOffset(size_t reg)757   static constexpr int32_t ReadBarrierMarkEntryPointsOffset(size_t reg) {
758     // The entry point list defines 30 ReadBarrierMarkRegX entry points.
759     DCHECK_LT(reg, 30u);
760     // The ReadBarrierMarkRegX entry points are ordered by increasing
761     // register number in Thread::tls_Ptr_.quick_entrypoints.
762     return QUICK_ENTRYPOINT_OFFSET(pointer_size, pReadBarrierMarkReg00).Int32Value()
763         + static_cast<size_t>(pointer_size) * reg;
764   }
765 
766   template<PointerSize pointer_size>
SelfOffset()767   static constexpr ThreadOffset<pointer_size> SelfOffset() {
768     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
769   }
770 
771   template<PointerSize pointer_size>
MterpCurrentIBaseOffset()772   static constexpr ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
773     return ThreadOffsetFromTlsPtr<pointer_size>(
774         OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_current_ibase));
775   }
776 
777   template<PointerSize pointer_size>
ExceptionOffset()778   static constexpr ThreadOffset<pointer_size> ExceptionOffset() {
779     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
780   }
781 
782   template<PointerSize pointer_size>
PeerOffset()783   static constexpr ThreadOffset<pointer_size> PeerOffset() {
784     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
785   }
786 
787 
788   template<PointerSize pointer_size>
CardTableOffset()789   static constexpr ThreadOffset<pointer_size> CardTableOffset() {
790     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
791   }
792 
793   template<PointerSize pointer_size>
ThreadSuspendTriggerOffset()794   static constexpr ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
795     return ThreadOffsetFromTlsPtr<pointer_size>(
796         OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
797   }
798 
799   template<PointerSize pointer_size>
ThreadLocalPosOffset()800   static constexpr ThreadOffset<pointer_size> ThreadLocalPosOffset() {
801     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
802                                                                 thread_local_pos));
803   }
804 
805   template<PointerSize pointer_size>
ThreadLocalEndOffset()806   static constexpr ThreadOffset<pointer_size> ThreadLocalEndOffset() {
807     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
808                                                                 thread_local_end));
809   }
810 
811   template<PointerSize pointer_size>
ThreadLocalObjectsOffset()812   static constexpr ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
813     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
814                                                                 thread_local_objects));
815   }
816 
817   template<PointerSize pointer_size>
RosAllocRunsOffset()818   static constexpr ThreadOffset<pointer_size> RosAllocRunsOffset() {
819     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
820                                                                 rosalloc_runs));
821   }
822 
823   template<PointerSize pointer_size>
ThreadLocalAllocStackTopOffset()824   static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
825     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
826                                                                 thread_local_alloc_stack_top));
827   }
828 
829   template<PointerSize pointer_size>
ThreadLocalAllocStackEndOffset()830   static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
831     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
832                                                                 thread_local_alloc_stack_end));
833   }
834 
835   // Size of stack less any space reserved for stack overflow
GetStackSize()836   size_t GetStackSize() const {
837     return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
838   }
839 
840   ALWAYS_INLINE uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const;
841 
GetStackEnd()842   uint8_t* GetStackEnd() const {
843     return tlsPtr_.stack_end;
844   }
845 
846   // Set the stack end to that to be used during a stack overflow
847   void SetStackEndForStackOverflow() REQUIRES_SHARED(Locks::mutator_lock_);
848 
849   // Set the stack end to that to be used during regular execution
850   ALWAYS_INLINE void ResetDefaultStackEnd();
851 
IsHandlingStackOverflow()852   bool IsHandlingStackOverflow() const {
853     return tlsPtr_.stack_end == tlsPtr_.stack_begin;
854   }
855 
856   template<PointerSize pointer_size>
StackEndOffset()857   static constexpr ThreadOffset<pointer_size> StackEndOffset() {
858     return ThreadOffsetFromTlsPtr<pointer_size>(
859         OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
860   }
861 
862   template<PointerSize pointer_size>
JniEnvOffset()863   static constexpr ThreadOffset<pointer_size> JniEnvOffset() {
864     return ThreadOffsetFromTlsPtr<pointer_size>(
865         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
866   }
867 
868   template<PointerSize pointer_size>
TopOfManagedStackOffset()869   static constexpr ThreadOffset<pointer_size> TopOfManagedStackOffset() {
870     return ThreadOffsetFromTlsPtr<pointer_size>(
871         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
872         ManagedStack::TaggedTopQuickFrameOffset());
873   }
874 
GetManagedStack()875   const ManagedStack* GetManagedStack() const {
876     return &tlsPtr_.managed_stack;
877   }
878 
879   // Linked list recording fragments of managed stack.
PushManagedStackFragment(ManagedStack * fragment)880   void PushManagedStackFragment(ManagedStack* fragment) {
881     tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
882   }
PopManagedStackFragment(const ManagedStack & fragment)883   void PopManagedStackFragment(const ManagedStack& fragment) {
884     tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
885   }
886 
887   ALWAYS_INLINE ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame);
888   ALWAYS_INLINE ShadowFrame* PopShadowFrame();
889 
890   template<PointerSize pointer_size>
TopShadowFrameOffset()891   static constexpr ThreadOffset<pointer_size> TopShadowFrameOffset() {
892     return ThreadOffsetFromTlsPtr<pointer_size>(
893         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
894         ManagedStack::TopShadowFrameOffset());
895   }
896 
897   // Is the given obj in this thread's stack indirect reference table?
898   bool HandleScopeContains(jobject obj) const;
899 
900   void HandleScopeVisitRoots(RootVisitor* visitor, pid_t thread_id)
901       REQUIRES_SHARED(Locks::mutator_lock_);
902 
GetTopHandleScope()903   BaseHandleScope* GetTopHandleScope() {
904     return tlsPtr_.top_handle_scope;
905   }
906 
PushHandleScope(BaseHandleScope * handle_scope)907   void PushHandleScope(BaseHandleScope* handle_scope) {
908     DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope);
909     tlsPtr_.top_handle_scope = handle_scope;
910   }
911 
PopHandleScope()912   BaseHandleScope* PopHandleScope() {
913     BaseHandleScope* handle_scope = tlsPtr_.top_handle_scope;
914     DCHECK(handle_scope != nullptr);
915     tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
916     return handle_scope;
917   }
918 
919   template<PointerSize pointer_size>
TopHandleScopeOffset()920   static constexpr ThreadOffset<pointer_size> TopHandleScopeOffset() {
921     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
922                                                                 top_handle_scope));
923   }
924 
GetTopReflectiveHandleScope()925   BaseReflectiveHandleScope* GetTopReflectiveHandleScope() {
926     return tlsPtr_.top_reflective_handle_scope;
927   }
928 
PushReflectiveHandleScope(BaseReflectiveHandleScope * scope)929   void PushReflectiveHandleScope(BaseReflectiveHandleScope* scope) {
930     DCHECK_EQ(scope->GetLink(), tlsPtr_.top_reflective_handle_scope);
931     DCHECK_EQ(scope->GetThread(), this);
932     tlsPtr_.top_reflective_handle_scope = scope;
933   }
934 
PopReflectiveHandleScope()935   BaseReflectiveHandleScope* PopReflectiveHandleScope() {
936     BaseReflectiveHandleScope* handle_scope = tlsPtr_.top_reflective_handle_scope;
937     DCHECK(handle_scope != nullptr);
938     tlsPtr_.top_reflective_handle_scope = tlsPtr_.top_reflective_handle_scope->GetLink();
939     return handle_scope;
940   }
941 
942   // Indicates whether this thread is ready to invoke a method for debugging. This
943   // is only true if the thread has been suspended by a debug event.
IsReadyForDebugInvoke()944   bool IsReadyForDebugInvoke() const {
945     return tls32_.ready_for_debug_invoke;
946   }
947 
SetReadyForDebugInvoke(bool ready)948   void SetReadyForDebugInvoke(bool ready) {
949     tls32_.ready_for_debug_invoke = ready;
950   }
951 
IsDebugMethodEntry()952   bool IsDebugMethodEntry() const {
953     return tls32_.debug_method_entry_;
954   }
955 
SetDebugMethodEntry()956   void SetDebugMethodEntry() {
957     tls32_.debug_method_entry_ = true;
958   }
959 
ClearDebugMethodEntry()960   void ClearDebugMethodEntry() {
961     tls32_.debug_method_entry_ = false;
962   }
963 
GetIsGcMarking()964   bool GetIsGcMarking() const {
965     CHECK(kUseReadBarrier);
966     return tls32_.is_gc_marking;
967   }
968 
969   void SetIsGcMarkingAndUpdateEntrypoints(bool is_marking);
970 
GetWeakRefAccessEnabled()971   bool GetWeakRefAccessEnabled() const {
972     CHECK(kUseReadBarrier);
973     return tls32_.weak_ref_access_enabled;
974   }
975 
SetWeakRefAccessEnabled(bool enabled)976   void SetWeakRefAccessEnabled(bool enabled) {
977     CHECK(kUseReadBarrier);
978     tls32_.weak_ref_access_enabled = enabled;
979   }
980 
GetDisableThreadFlipCount()981   uint32_t GetDisableThreadFlipCount() const {
982     CHECK(kUseReadBarrier);
983     return tls32_.disable_thread_flip_count;
984   }
985 
IncrementDisableThreadFlipCount()986   void IncrementDisableThreadFlipCount() {
987     CHECK(kUseReadBarrier);
988     ++tls32_.disable_thread_flip_count;
989   }
990 
DecrementDisableThreadFlipCount()991   void DecrementDisableThreadFlipCount() {
992     CHECK(kUseReadBarrier);
993     DCHECK_GT(tls32_.disable_thread_flip_count, 0U);
994     --tls32_.disable_thread_flip_count;
995   }
996 
997   // Returns true if the thread is a runtime thread (eg from a ThreadPool).
IsRuntimeThread()998   bool IsRuntimeThread() const {
999     return is_runtime_thread_;
1000   }
1001 
SetIsRuntimeThread(bool is_runtime_thread)1002   void SetIsRuntimeThread(bool is_runtime_thread) {
1003     is_runtime_thread_ = is_runtime_thread;
1004   }
1005 
CorePlatformApiCookie()1006   uint32_t CorePlatformApiCookie() {
1007     return core_platform_api_cookie_;
1008   }
1009 
SetCorePlatformApiCookie(uint32_t cookie)1010   void SetCorePlatformApiCookie(uint32_t cookie) {
1011     core_platform_api_cookie_ = cookie;
1012   }
1013 
1014   // Returns true if the thread is allowed to load java classes.
1015   bool CanLoadClasses() const;
1016 
1017   // Returns the fake exception used to activate deoptimization.
GetDeoptimizationException()1018   static mirror::Throwable* GetDeoptimizationException() {
1019     // Note that the mirror::Throwable must be aligned to kObjectAlignment or else it cannot be
1020     // represented by ObjPtr.
1021     return reinterpret_cast<mirror::Throwable*>(0x100);
1022   }
1023 
1024   // Currently deoptimization invokes verifier which can trigger class loading
1025   // and execute Java code, so there might be nested deoptimizations happening.
1026   // We need to save the ongoing deoptimization shadow frames and return
1027   // values on stacks.
1028   // 'from_code' denotes whether the deoptimization was explicitly made from
1029   // compiled code.
1030   // 'method_type' contains info on whether deoptimization should advance
1031   // dex_pc.
1032   void PushDeoptimizationContext(const JValue& return_value,
1033                                  bool is_reference,
1034                                  ObjPtr<mirror::Throwable> exception,
1035                                  bool from_code,
1036                                  DeoptimizationMethodType method_type)
1037       REQUIRES_SHARED(Locks::mutator_lock_);
1038   void PopDeoptimizationContext(JValue* result,
1039                                 ObjPtr<mirror::Throwable>* exception,
1040                                 bool* from_code,
1041                                 DeoptimizationMethodType* method_type)
1042       REQUIRES_SHARED(Locks::mutator_lock_);
1043   void AssertHasDeoptimizationContext()
1044       REQUIRES_SHARED(Locks::mutator_lock_);
1045   void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
1046   ShadowFrame* PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present = true);
1047 
1048   // For debugger, find the shadow frame that corresponds to a frame id.
1049   // Or return null if there is none.
1050   ShadowFrame* FindDebuggerShadowFrame(size_t frame_id)
1051       REQUIRES_SHARED(Locks::mutator_lock_);
1052   // For debugger, find the bool array that keeps track of the updated vreg set
1053   // for a frame id.
1054   bool* GetUpdatedVRegFlags(size_t frame_id) REQUIRES_SHARED(Locks::mutator_lock_);
1055   // For debugger, find the shadow frame that corresponds to a frame id. If
1056   // one doesn't exist yet, create one and track it in frame_id_to_shadow_frame.
1057   ShadowFrame* FindOrCreateDebuggerShadowFrame(size_t frame_id,
1058                                                uint32_t num_vregs,
1059                                                ArtMethod* method,
1060                                                uint32_t dex_pc)
1061       REQUIRES_SHARED(Locks::mutator_lock_);
1062 
1063   // Delete the entry that maps from frame_id to shadow_frame.
1064   void RemoveDebuggerShadowFrameMapping(size_t frame_id)
1065       REQUIRES_SHARED(Locks::mutator_lock_);
1066 
1067   // While getting this map requires shared the mutator lock, manipulating it
1068   // should actually follow these rules:
1069   // (1) The owner of this map (the thread) can change it with its mutator lock.
1070   // (2) Other threads can read this map when the owner is suspended and they
1071   //     hold the mutator lock.
1072   // (3) Other threads can change this map when owning the mutator lock exclusively.
1073   //
1074   // The reason why (3) needs the mutator lock exclusively (and not just having
1075   // the owner suspended) is that we don't want other threads to concurrently read the map.
1076   //
1077   // TODO: Add a class abstraction to express these rules.
GetInstrumentationStack()1078   std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* GetInstrumentationStack()
1079       REQUIRES_SHARED(Locks::mutator_lock_) {
1080     return tlsPtr_.instrumentation_stack;
1081   }
1082 
GetStackTraceSample()1083   std::vector<ArtMethod*>* GetStackTraceSample() const {
1084     DCHECK(!IsAotCompiler());
1085     return tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
1086   }
1087 
SetStackTraceSample(std::vector<ArtMethod * > * sample)1088   void SetStackTraceSample(std::vector<ArtMethod*>* sample) {
1089     DCHECK(!IsAotCompiler());
1090     tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample = sample;
1091   }
1092 
GetVerifierDeps()1093   verifier::VerifierDeps* GetVerifierDeps() const {
1094     DCHECK(IsAotCompiler());
1095     return tlsPtr_.deps_or_stack_trace_sample.verifier_deps;
1096   }
1097 
1098   // It is the responsability of the caller to make sure the verifier_deps
1099   // entry in the thread is cleared before destruction of the actual VerifierDeps
1100   // object, or the thread.
SetVerifierDeps(verifier::VerifierDeps * verifier_deps)1101   void SetVerifierDeps(verifier::VerifierDeps* verifier_deps) {
1102     DCHECK(IsAotCompiler());
1103     DCHECK(verifier_deps == nullptr || tlsPtr_.deps_or_stack_trace_sample.verifier_deps == nullptr);
1104     tlsPtr_.deps_or_stack_trace_sample.verifier_deps = verifier_deps;
1105   }
1106 
GetTraceClockBase()1107   uint64_t GetTraceClockBase() const {
1108     return tls64_.trace_clock_base;
1109   }
1110 
SetTraceClockBase(uint64_t clock_base)1111   void SetTraceClockBase(uint64_t clock_base) {
1112     tls64_.trace_clock_base = clock_base;
1113   }
1114 
GetHeldMutex(LockLevel level)1115   BaseMutex* GetHeldMutex(LockLevel level) const {
1116     return tlsPtr_.held_mutexes[level];
1117   }
1118 
SetHeldMutex(LockLevel level,BaseMutex * mutex)1119   void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
1120     tlsPtr_.held_mutexes[level] = mutex;
1121   }
1122 
1123   void ClearSuspendBarrier(AtomicInteger* target)
1124       REQUIRES(Locks::thread_suspend_count_lock_);
1125 
ReadFlag(ThreadFlag flag)1126   bool ReadFlag(ThreadFlag flag) const {
1127     return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
1128   }
1129 
TestAllFlags()1130   bool TestAllFlags() const {
1131     return (tls32_.state_and_flags.as_struct.flags != 0);
1132   }
1133 
AtomicSetFlag(ThreadFlag flag)1134   void AtomicSetFlag(ThreadFlag flag) {
1135     tls32_.state_and_flags.as_atomic_int.fetch_or(flag, std::memory_order_seq_cst);
1136   }
1137 
AtomicClearFlag(ThreadFlag flag)1138   void AtomicClearFlag(ThreadFlag flag) {
1139     tls32_.state_and_flags.as_atomic_int.fetch_and(-1 ^ flag, std::memory_order_seq_cst);
1140   }
1141 
UseMterp()1142   bool UseMterp() const {
1143     return tls32_.use_mterp.load();
1144   }
1145 
1146   void ResetQuickAllocEntryPointsForThread(bool is_marking);
1147 
1148   // Returns the remaining space in the TLAB.
TlabSize()1149   size_t TlabSize() const {
1150     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_pos;
1151   }
1152 
1153   // Returns the remaining space in the TLAB if we were to expand it to maximum capacity.
TlabRemainingCapacity()1154   size_t TlabRemainingCapacity() const {
1155     return tlsPtr_.thread_local_limit - tlsPtr_.thread_local_pos;
1156   }
1157 
1158   // Expand the TLAB by a fixed number of bytes. There must be enough capacity to do so.
ExpandTlab(size_t bytes)1159   void ExpandTlab(size_t bytes) {
1160     tlsPtr_.thread_local_end += bytes;
1161     DCHECK_LE(tlsPtr_.thread_local_end, tlsPtr_.thread_local_limit);
1162   }
1163 
1164   // Doesn't check that there is room.
1165   mirror::Object* AllocTlab(size_t bytes);
1166   void SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit);
1167   bool HasTlab() const;
1168   void ResetTlab();
GetTlabStart()1169   uint8_t* GetTlabStart() {
1170     return tlsPtr_.thread_local_start;
1171   }
GetTlabPos()1172   uint8_t* GetTlabPos() {
1173     return tlsPtr_.thread_local_pos;
1174   }
GetTlabEnd()1175   uint8_t* GetTlabEnd() {
1176     return tlsPtr_.thread_local_end;
1177   }
1178   // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
1179   // equal to a valid pointer.
1180   // TODO: does this need to atomic?  I don't think so.
RemoveSuspendTrigger()1181   void RemoveSuspendTrigger() {
1182     tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
1183   }
1184 
1185   // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
1186   // The next time a suspend check is done, it will load from the value at this address
1187   // and trigger a SIGSEGV.
TriggerSuspend()1188   void TriggerSuspend() {
1189     tlsPtr_.suspend_trigger = nullptr;
1190   }
1191 
1192 
1193   // Push an object onto the allocation stack.
1194   bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
1195       REQUIRES_SHARED(Locks::mutator_lock_);
1196 
1197   // Set the thread local allocation pointers to the given pointers.
1198   void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
1199                                      StackReference<mirror::Object>* end);
1200 
1201   // Resets the thread local allocation pointers.
1202   void RevokeThreadLocalAllocationStack();
1203 
GetThreadLocalBytesAllocated()1204   size_t GetThreadLocalBytesAllocated() const {
1205     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
1206   }
1207 
GetThreadLocalObjectsAllocated()1208   size_t GetThreadLocalObjectsAllocated() const {
1209     return tlsPtr_.thread_local_objects;
1210   }
1211 
GetRosAllocRun(size_t index)1212   void* GetRosAllocRun(size_t index) const {
1213     return tlsPtr_.rosalloc_runs[index];
1214   }
1215 
SetRosAllocRun(size_t index,void * run)1216   void SetRosAllocRun(size_t index, void* run) {
1217     tlsPtr_.rosalloc_runs[index] = run;
1218   }
1219 
1220   bool ProtectStack(bool fatal_on_error = true);
1221   bool UnprotectStack();
1222 
SetMterpCurrentIBase(void * ibase)1223   void SetMterpCurrentIBase(void* ibase) {
1224     tlsPtr_.mterp_current_ibase = ibase;
1225   }
1226 
GetMterpCurrentIBase()1227   const void* GetMterpCurrentIBase() const {
1228     return tlsPtr_.mterp_current_ibase;
1229   }
1230 
HandlingSignal()1231   bool HandlingSignal() const {
1232     return tls32_.handling_signal_;
1233   }
1234 
SetHandlingSignal(bool handling_signal)1235   void SetHandlingSignal(bool handling_signal) {
1236     tls32_.handling_signal_ = handling_signal;
1237   }
1238 
IsTransitioningToRunnable()1239   bool IsTransitioningToRunnable() const {
1240     return tls32_.is_transitioning_to_runnable;
1241   }
1242 
SetIsTransitioningToRunnable(bool value)1243   void SetIsTransitioningToRunnable(bool value) {
1244     tls32_.is_transitioning_to_runnable = value;
1245   }
1246 
DecrementForceInterpreterCount()1247   uint32_t DecrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
1248     return --tls32_.force_interpreter_count;
1249   }
1250 
IncrementForceInterpreterCount()1251   uint32_t IncrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
1252     return ++tls32_.force_interpreter_count;
1253   }
1254 
SetForceInterpreterCount(uint32_t value)1255   void SetForceInterpreterCount(uint32_t value) REQUIRES(Locks::thread_list_lock_) {
1256     tls32_.force_interpreter_count = value;
1257   }
1258 
ForceInterpreterCount()1259   uint32_t ForceInterpreterCount() const {
1260     return tls32_.force_interpreter_count;
1261   }
1262 
IsForceInterpreter()1263   bool IsForceInterpreter() const {
1264     return tls32_.force_interpreter_count != 0;
1265   }
1266 
IncrementMakeVisiblyInitializedCounter()1267   bool IncrementMakeVisiblyInitializedCounter() {
1268     tls32_.make_visibly_initialized_counter += 1u;
1269     return tls32_.make_visibly_initialized_counter == kMakeVisiblyInitializedCounterTriggerCount;
1270   }
1271 
ClearMakeVisiblyInitializedCounter()1272   void ClearMakeVisiblyInitializedCounter() {
1273     tls32_.make_visibly_initialized_counter = 0u;
1274   }
1275 
1276   void PushVerifier(verifier::MethodVerifier* verifier);
1277   void PopVerifier(verifier::MethodVerifier* verifier);
1278 
1279   void InitStringEntryPoints();
1280 
ModifyDebugDisallowReadBarrier(int8_t delta)1281   void ModifyDebugDisallowReadBarrier(int8_t delta) {
1282     debug_disallow_read_barrier_ += delta;
1283   }
1284 
GetDebugDisallowReadBarrierCount()1285   uint8_t GetDebugDisallowReadBarrierCount() const {
1286     return debug_disallow_read_barrier_;
1287   }
1288 
1289   // Gets the current TLSData associated with the key or nullptr if there isn't any. Note that users
1290   // do not gain ownership of TLSData and must synchronize with SetCustomTls themselves to prevent
1291   // it from being deleted.
1292   TLSData* GetCustomTLS(const char* key) REQUIRES(!Locks::custom_tls_lock_);
1293 
1294   // Sets the tls entry at 'key' to data. The thread takes ownership of the TLSData. The destructor
1295   // will be run when the thread exits or when SetCustomTLS is called again with the same key.
1296   void SetCustomTLS(const char* key, TLSData* data) REQUIRES(!Locks::custom_tls_lock_);
1297 
1298   // Returns true if the current thread is the jit sensitive thread.
IsJitSensitiveThread()1299   bool IsJitSensitiveThread() const {
1300     return this == jit_sensitive_thread_;
1301   }
1302 
1303   bool IsSystemDaemon() const REQUIRES_SHARED(Locks::mutator_lock_);
1304 
1305   // Returns true if StrictMode events are traced for the current thread.
IsSensitiveThread()1306   static bool IsSensitiveThread() {
1307     if (is_sensitive_thread_hook_ != nullptr) {
1308       return (*is_sensitive_thread_hook_)();
1309     }
1310     return false;
1311   }
1312 
1313   // Set to the read barrier marking entrypoints to be non-null.
1314   void SetReadBarrierEntrypoints();
1315 
1316   static jobject CreateCompileTimePeer(JNIEnv* env,
1317                                        const char* name,
1318                                        bool as_daemon,
1319                                        jobject thread_group)
1320       REQUIRES_SHARED(Locks::mutator_lock_);
1321 
GetInterpreterCache()1322   ALWAYS_INLINE InterpreterCache* GetInterpreterCache() {
1323     return &interpreter_cache_;
1324   }
1325 
1326   // Clear all thread-local interpreter caches.
1327   //
1328   // Since the caches are keyed by memory pointer to dex instructions, this must be
1329   // called when any dex code is unloaded (before different code gets loaded at the
1330   // same memory location).
1331   //
1332   // If presence of cache entry implies some pre-conditions, this must also be
1333   // called if the pre-conditions might no longer hold true.
1334   static void ClearAllInterpreterCaches();
1335 
1336   template<PointerSize pointer_size>
InterpreterCacheOffset()1337   static constexpr ThreadOffset<pointer_size> InterpreterCacheOffset() {
1338     return ThreadOffset<pointer_size>(OFFSETOF_MEMBER(Thread, interpreter_cache_));
1339   }
1340 
InterpreterCacheSizeLog2()1341   static constexpr int InterpreterCacheSizeLog2() {
1342     return WhichPowerOf2(InterpreterCache::kSize);
1343   }
1344 
1345  private:
1346   explicit Thread(bool daemon);
1347   ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
1348   void Destroy();
1349 
1350   // Deletes and clears the tlsPtr_.jpeer field. Done in a way so that both it and opeer cannot be
1351   // observed to be set at the same time by instrumentation.
1352   void DeleteJPeer(JNIEnv* env);
1353 
1354   void NotifyInTheadList()
1355       REQUIRES_SHARED(Locks::thread_list_lock_);
1356 
1357   // Attaches the calling native thread to the runtime, returning the new native peer.
1358   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
1359   template <typename PeerAction>
1360   static Thread* Attach(const char* thread_name,
1361                         bool as_daemon,
1362                         PeerAction p);
1363 
1364   void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
1365 
1366   template<bool kTransactionActive>
1367   static void InitPeer(ScopedObjectAccessAlreadyRunnable& soa,
1368                        ObjPtr<mirror::Object> peer,
1369                        jboolean thread_is_daemon,
1370                        jobject thread_group,
1371                        jobject thread_name,
1372                        jint thread_priority)
1373       REQUIRES_SHARED(Locks::mutator_lock_);
1374 
1375   // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit and, ~Thread
SetStateUnsafe(ThreadState new_state)1376   ThreadState SetStateUnsafe(ThreadState new_state) {
1377     ThreadState old_state = GetState();
1378     if (old_state == kRunnable && new_state != kRunnable) {
1379       // Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in
1380       // case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA
1381       // attempts to TransitionFromSuspendedToRunnable, it results in a deadlock.
1382       TransitionToSuspendedAndRunCheckpoints(new_state);
1383       // Since we transitioned to a suspended state, check the pass barrier requests.
1384       PassActiveSuspendBarriers();
1385     } else {
1386       tls32_.state_and_flags.as_struct.state = new_state;
1387     }
1388     return old_state;
1389   }
1390 
1391   void VerifyStackImpl() REQUIRES_SHARED(Locks::mutator_lock_);
1392 
1393   void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
1394   void DumpStack(std::ostream& os,
1395                  bool dump_native_stack = true,
1396                  BacktraceMap* backtrace_map = nullptr,
1397                  bool force_dump_stack = false) const
1398       REQUIRES_SHARED(Locks::mutator_lock_);
1399 
1400   // Out-of-line conveniences for debugging in gdb.
1401   static Thread* CurrentFromGdb();  // Like Thread::Current.
1402   // Like Thread::Dump(std::cerr).
1403   void DumpFromGdb() const REQUIRES_SHARED(Locks::mutator_lock_);
1404 
1405   static void* CreateCallback(void* arg);
1406 
1407   void HandleUncaughtExceptions(ScopedObjectAccessAlreadyRunnable& soa)
1408       REQUIRES_SHARED(Locks::mutator_lock_);
1409   void RemoveFromThreadGroup(ScopedObjectAccessAlreadyRunnable& soa)
1410       REQUIRES_SHARED(Locks::mutator_lock_);
1411 
1412   // Initialize a thread.
1413   //
1414   // The third parameter is not mandatory. If given, the thread will use this JNIEnvExt. In case
1415   // Init succeeds, this means the thread takes ownership of it. If Init fails, it is the caller's
1416   // responsibility to destroy the given JNIEnvExt. If the parameter is null, Init will try to
1417   // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value
1418   // of false).
1419   bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr)
1420       REQUIRES(Locks::runtime_shutdown_lock_);
1421   void InitCardTable();
1422   void InitCpu();
1423   void CleanupCpu();
1424   void InitTlsEntryPoints();
1425   void InitTid();
1426   void InitPthreadKeySelf();
1427   bool InitStackHwm();
1428 
1429   void SetUpAlternateSignalStack();
1430   void TearDownAlternateSignalStack();
1431 
1432   ALWAYS_INLINE void TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)
1433       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1434 
1435   ALWAYS_INLINE void PassActiveSuspendBarriers()
1436       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1437 
1438   // Registers the current thread as the jit sensitive thread. Should be called just once.
SetJitSensitiveThread()1439   static void SetJitSensitiveThread() {
1440     if (jit_sensitive_thread_ == nullptr) {
1441       jit_sensitive_thread_ = Thread::Current();
1442     } else {
1443       LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:"
1444           << Thread::Current()->GetTid();
1445     }
1446   }
1447 
SetSensitiveThreadHook(bool (* is_sensitive_thread_hook)())1448   static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) {
1449     is_sensitive_thread_hook_ = is_sensitive_thread_hook;
1450   }
1451 
1452   bool ModifySuspendCountInternal(Thread* self,
1453                                   int delta,
1454                                   AtomicInteger* suspend_barrier,
1455                                   SuspendReason reason)
1456       WARN_UNUSED
1457       REQUIRES(Locks::thread_suspend_count_lock_);
1458 
1459   // Runs a single checkpoint function. If there are no more pending checkpoint functions it will
1460   // clear the kCheckpointRequest flag. The caller is responsible for calling this in a loop until
1461   // the kCheckpointRequest flag is cleared.
1462   void RunCheckpointFunction();
1463   void RunEmptyCheckpoint();
1464 
1465   bool PassActiveSuspendBarriers(Thread* self)
1466       REQUIRES(!Locks::thread_suspend_count_lock_);
1467 
1468   // Install the protected region for implicit stack checks.
1469   void InstallImplicitProtection();
1470 
1471   template <bool kPrecise>
1472   void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
1473 
1474   void SweepInterpreterCache(IsMarkedVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
1475 
1476   static bool IsAotCompiler();
1477 
1478   void ReleaseLongJumpContextInternal();
1479 
1480   // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
1481   // change from being Suspended to Runnable without a suspend request occurring.
1482   union PACKED(4) StateAndFlags {
StateAndFlags()1483     StateAndFlags() {}
1484     struct PACKED(4) {
1485       // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
1486       // ThreadFlags for bit field meanings.
1487       volatile uint16_t flags;
1488       // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
1489       // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
1490       // operation. If a thread is suspended and a suspend_request is present, a thread may not
1491       // change to Runnable as a GC or other operation is in progress.
1492       volatile uint16_t state;
1493     } as_struct;
1494     AtomicInteger as_atomic_int;
1495     volatile int32_t as_int;
1496 
1497    private:
1498     // gcc does not handle struct with volatile member assignments correctly.
1499     // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
1500     DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
1501   };
1502   static_assert(sizeof(StateAndFlags) == sizeof(int32_t), "Weird state_and_flags size");
1503 
1504   static void ThreadExitCallback(void* arg);
1505 
1506   // Maximum number of suspend barriers.
1507   static constexpr uint32_t kMaxSuspendBarriers = 3;
1508 
1509   // Has Thread::Startup been called?
1510   static bool is_started_;
1511 
1512   // TLS key used to retrieve the Thread*.
1513   static pthread_key_t pthread_key_self_;
1514 
1515   // Used to notify threads that they should attempt to resume, they will suspend again if
1516   // their suspend count is > 0.
1517   static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1518 
1519   // Hook passed by framework which returns true
1520   // when StrictMode events are traced for the current thread.
1521   static bool (*is_sensitive_thread_hook_)();
1522   // Stores the jit sensitive thread (which for now is the UI thread).
1523   static Thread* jit_sensitive_thread_;
1524 
1525   static constexpr uint32_t kMakeVisiblyInitializedCounterTriggerCount = 128;
1526 
1527   /***********************************************************************************************/
1528   // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
1529   // pointer size differences. To encourage shorter encoding, more frequently used values appear
1530   // first if possible.
1531   /***********************************************************************************************/
1532 
1533   struct PACKED(4) tls_32bit_sized_values {
1534     // We have no control over the size of 'bool', but want our boolean fields
1535     // to be 4-byte quantities.
1536     typedef uint32_t bool32_t;
1537 
tls_32bit_sized_valuestls_32bit_sized_values1538     explicit tls_32bit_sized_values(bool is_daemon)
1539         : suspend_count(0),
1540           debug_suspend_count(0),
1541           thin_lock_thread_id(0),
1542           tid(0),
1543           daemon(is_daemon),
1544           throwing_OutOfMemoryError(false),
1545           no_thread_suspension(0),
1546           thread_exit_check_count(0),
1547           handling_signal_(false),
1548           is_transitioning_to_runnable(false),
1549           ready_for_debug_invoke(false),
1550           debug_method_entry_(false),
1551           is_gc_marking(false),
1552           weak_ref_access_enabled(true),
1553           disable_thread_flip_count(0),
1554           user_code_suspend_count(0),
1555           force_interpreter_count(0),
1556           use_mterp(0),
1557           make_visibly_initialized_counter(0),
1558           define_class_counter(0) {}
1559 
1560     union StateAndFlags state_and_flags;
1561     static_assert(sizeof(union StateAndFlags) == sizeof(int32_t),
1562                   "Size of state_and_flags and int32 are different");
1563 
1564     // A non-zero value is used to tell the current thread to enter a safe point
1565     // at the next poll.
1566     int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1567 
1568     // How much of 'suspend_count_' is by request of the debugger, used to set things right
1569     // when the debugger detaches. Must be <= suspend_count_.
1570     int debug_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1571 
1572     // Thin lock thread id. This is a small integer used by the thin lock implementation.
1573     // This is not to be confused with the native thread's tid, nor is it the value returned
1574     // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
1575     // important difference between this id and the ids visible to managed code is that these
1576     // ones get reused (to ensure that they fit in the number of bits available).
1577     uint32_t thin_lock_thread_id;
1578 
1579     // System thread id.
1580     uint32_t tid;
1581 
1582     // Is the thread a daemon?
1583     const bool32_t daemon;
1584 
1585     // A boolean telling us whether we're recursively throwing OOME.
1586     bool32_t throwing_OutOfMemoryError;
1587 
1588     // A positive value implies we're in a region where thread suspension isn't expected.
1589     uint32_t no_thread_suspension;
1590 
1591     // How many times has our pthread key's destructor been called?
1592     uint32_t thread_exit_check_count;
1593 
1594     // True if signal is being handled by this thread.
1595     bool32_t handling_signal_;
1596 
1597     // True if the thread is in TransitionFromSuspendedToRunnable(). This is used to distinguish the
1598     // non-runnable threads (eg. kNative, kWaiting) that are about to transition to runnable from
1599     // the rest of them.
1600     bool32_t is_transitioning_to_runnable;
1601 
1602     // True if the thread has been suspended by a debugger event. This is
1603     // used to invoke method from the debugger which is only allowed when
1604     // the thread is suspended by an event.
1605     bool32_t ready_for_debug_invoke;
1606 
1607     // True if the thread enters a method. This is used to detect method entry
1608     // event for the debugger.
1609     bool32_t debug_method_entry_;
1610 
1611     // True if the GC is in the marking phase. This is used for the CC collector only. This is
1612     // thread local so that we can simplify the logic to check for the fast path of read barriers of
1613     // GC roots.
1614     bool32_t is_gc_marking;
1615 
1616     // Thread "interrupted" status; stays raised until queried or thrown.
1617     Atomic<bool32_t> interrupted;
1618 
1619     AtomicInteger park_state_;
1620 
1621     // True if the thread is allowed to access a weak ref (Reference::GetReferent() and system
1622     // weaks) and to potentially mark an object alive/gray. This is used for concurrent reference
1623     // processing of the CC collector only. This is thread local so that we can enable/disable weak
1624     // ref access by using a checkpoint and avoid a race around the time weak ref access gets
1625     // disabled and concurrent reference processing begins (if weak ref access is disabled during a
1626     // pause, this is not an issue.) Other collectors use Runtime::DisallowNewSystemWeaks() and
1627     // ReferenceProcessor::EnableSlowPath().
1628     bool32_t weak_ref_access_enabled;
1629 
1630     // A thread local version of Heap::disable_thread_flip_count_. This keeps track of how many
1631     // levels of (nested) JNI critical sections the thread is in and is used to detect a nested JNI
1632     // critical section enter.
1633     uint32_t disable_thread_flip_count;
1634 
1635     // How much of 'suspend_count_' is by request of user code, used to distinguish threads
1636     // suspended by the runtime from those suspended by user code.
1637     // This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be
1638     // told that AssertHeld should be good enough.
1639     int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1640 
1641     // Count of how many times this thread has been forced to interpreter. If this is not 0 the
1642     // thread must remain in interpreted code as much as possible.
1643     uint32_t force_interpreter_count;
1644 
1645     // True if everything is in the ideal state for fast interpretation.
1646     // False if we need to switch to the C++ interpreter to handle special cases.
1647     std::atomic<bool32_t> use_mterp;
1648 
1649     // Counter for calls to initialize a class that's initialized but not visibly initialized.
1650     // When this reaches kMakeVisiblyInitializedCounterTriggerCount, we call the runtime to
1651     // make initialized classes visibly initialized. This is needed because we usually make
1652     // classes visibly initialized in batches but we do not want to be stuck with a class
1653     // initialized but not visibly initialized for a long time even if no more classes are
1654     // being initialized anymore.
1655     uint32_t make_visibly_initialized_counter;
1656 
1657     // Counter for how many nested define-classes are ongoing in this thread. Used to allow waiting
1658     // for threads to be done with class-definition work.
1659     uint32_t define_class_counter;
1660   } tls32_;
1661 
1662   struct PACKED(8) tls_64bit_sized_values {
tls_64bit_sized_valuestls_64bit_sized_values1663     tls_64bit_sized_values() : trace_clock_base(0) {
1664     }
1665 
1666     // The clock base used for tracing.
1667     uint64_t trace_clock_base;
1668 
1669     RuntimeStats stats;
1670   } tls64_;
1671 
PACKED(sizeof (void *))1672   struct PACKED(sizeof(void*)) tls_ptr_sized_values {
1673       tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
1674       managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), tmp_jni_env(nullptr),
1675       self(nullptr), opeer(nullptr), jpeer(nullptr), stack_begin(nullptr), stack_size(0),
1676       deps_or_stack_trace_sample(), wait_next(nullptr), monitor_enter_object(nullptr),
1677       top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
1678       instrumentation_stack(nullptr),
1679       stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr),
1680       frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0),
1681       last_no_thread_suspension_cause(nullptr), checkpoint_function(nullptr),
1682       thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr),
1683       thread_local_limit(nullptr),
1684       thread_local_objects(0), mterp_current_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
1685       thread_local_alloc_stack_end(nullptr),
1686       flip_function(nullptr), method_verifier(nullptr), thread_local_mark_stack(nullptr),
1687       async_exception(nullptr), top_reflective_handle_scope(nullptr) {
1688       std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
1689     }
1690 
1691     // The biased card table, see CardTable for details.
1692     uint8_t* card_table;
1693 
1694     // The pending exception or null.
1695     mirror::Throwable* exception;
1696 
1697     // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
1698     // We leave extra space so there's room for the code that throws StackOverflowError.
1699     uint8_t* stack_end;
1700 
1701     // The top of the managed stack often manipulated directly by compiler generated code.
1702     ManagedStack managed_stack;
1703 
1704     // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check.  It is
1705     // normally set to the address of itself.
1706     uintptr_t* suspend_trigger;
1707 
1708     // Every thread may have an associated JNI environment
1709     JNIEnvExt* jni_env;
1710 
1711     // Temporary storage to transfer a pre-allocated JNIEnvExt from the creating thread to the
1712     // created thread.
1713     JNIEnvExt* tmp_jni_env;
1714 
1715     // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
1716     // is easy but getting the address of Thread::Current is hard. This field can be read off of
1717     // Thread::Current to give the address.
1718     Thread* self;
1719 
1720     // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
1721     // start up, until the thread is registered and the local opeer_ is used.
1722     mirror::Object* opeer;
1723     jobject jpeer;
1724 
1725     // The "lowest addressable byte" of the stack.
1726     uint8_t* stack_begin;
1727 
1728     // Size of the stack.
1729     size_t stack_size;
1730 
1731     // Sampling profiler and AOT verification cannot happen on the same run, so we share
1732     // the same entry for the stack trace and the verifier deps.
1733     union DepsOrStackTraceSample {
1734       DepsOrStackTraceSample() {
1735         verifier_deps = nullptr;
1736         stack_trace_sample = nullptr;
1737       }
1738       // Pointer to previous stack trace captured by sampling profiler.
1739       std::vector<ArtMethod*>* stack_trace_sample;
1740       // When doing AOT verification, per-thread VerifierDeps.
1741       verifier::VerifierDeps* verifier_deps;
1742     } deps_or_stack_trace_sample;
1743 
1744     // The next thread in the wait set this thread is part of or null if not waiting.
1745     Thread* wait_next;
1746 
1747     // If we're blocked in MonitorEnter, this is the object we're trying to lock.
1748     mirror::Object* monitor_enter_object;
1749 
1750     // Top of linked list of handle scopes or null for none.
1751     BaseHandleScope* top_handle_scope;
1752 
1753     // Needed to get the right ClassLoader in JNI_OnLoad, but also
1754     // useful for testing.
1755     jobject class_loader_override;
1756 
1757     // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
1758     Context* long_jump_context;
1759 
1760     // Additional stack used by method instrumentation to store method and return pc values.
1761     // Stored as a pointer since std::map is not PACKED.
1762     // !DO NOT CHANGE! to std::unordered_map: the users of this map require an
1763     // ordered iteration on the keys (which are stack addresses).
1764     // Also see Thread::GetInstrumentationStack for the requirements on
1765     // manipulating and reading this map.
1766     std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* instrumentation_stack;
1767 
1768     // For gc purpose, a shadow frame record stack that keeps track of:
1769     // 1) shadow frames under construction.
1770     // 2) deoptimization shadow frames.
1771     StackedShadowFrameRecord* stacked_shadow_frame_record;
1772 
1773     // Deoptimization return value record stack.
1774     DeoptimizationContextRecord* deoptimization_context_stack;
1775 
1776     // For debugger, a linked list that keeps the mapping from frame_id to shadow frame.
1777     // Shadow frames may be created before deoptimization happens so that the debugger can
1778     // set local values there first.
1779     FrameIdToShadowFrame* frame_id_to_shadow_frame;
1780 
1781     // A cached copy of the java.lang.Thread's name.
1782     std::string* name;
1783 
1784     // A cached pthread_t for the pthread underlying this Thread*.
1785     pthread_t pthread_self;
1786 
1787     // If no_thread_suspension_ is > 0, what is causing that assertion.
1788     const char* last_no_thread_suspension_cause;
1789 
1790     // Pending checkpoint function or null if non-pending. If this checkpoint is set and someone\
1791     // requests another checkpoint, it goes to the checkpoint overflow list.
1792     Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_);
1793 
1794     // Pending barriers that require passing or NULL if non-pending. Installation guarding by
1795     // Locks::thread_suspend_count_lock_.
1796     // They work effectively as art::Barrier, but implemented directly using AtomicInteger and futex
1797     // to avoid additional cost of a mutex and a condition variable, as used in art::Barrier.
1798     AtomicInteger* active_suspend_barriers[kMaxSuspendBarriers];
1799 
1800     // Thread-local allocation pointer. Moved here to force alignment for thread_local_pos on ARM.
1801     uint8_t* thread_local_start;
1802 
1803     // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
1804     // potentially better performance.
1805     uint8_t* thread_local_pos;
1806     uint8_t* thread_local_end;
1807 
1808     // Thread local limit is how much we can expand the thread local buffer to, it is greater or
1809     // equal to thread_local_end.
1810     uint8_t* thread_local_limit;
1811 
1812     size_t thread_local_objects;
1813 
1814     // Entrypoint function pointers.
1815     // TODO: move this to more of a global offset table model to avoid per-thread duplication.
1816     JniEntryPoints jni_entrypoints;
1817     QuickEntryPoints quick_entrypoints;
1818 
1819     // Mterp jump table base.
1820     void* mterp_current_ibase;
1821 
1822     // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
1823     void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread];
1824 
1825     // Thread-local allocation stack data/routines.
1826     StackReference<mirror::Object>* thread_local_alloc_stack_top;
1827     StackReference<mirror::Object>* thread_local_alloc_stack_end;
1828 
1829     // Support for Mutex lock hierarchy bug detection.
1830     BaseMutex* held_mutexes[kLockLevelCount];
1831 
1832     // The function used for thread flip.
1833     Closure* flip_function;
1834 
1835     // Current method verifier, used for root marking.
1836     verifier::MethodVerifier* method_verifier;
1837 
1838     // Thread-local mark stack for the concurrent copying collector.
1839     gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack;
1840 
1841     // The pending async-exception or null.
1842     mirror::Throwable* async_exception;
1843 
1844     // Top of the linked-list for reflective-handle scopes or null if none.
1845     BaseReflectiveHandleScope* top_reflective_handle_scope;
1846   } tlsPtr_;
1847 
1848   // Small thread-local cache to be used from the interpreter.
1849   // It is keyed by dex instruction pointer.
1850   // The value is opcode-depended (e.g. field offset).
1851   InterpreterCache interpreter_cache_;
1852 
1853   // All fields below this line should not be accessed by native code. This means these fields can
1854   // be modified, rearranged, added or removed without having to modify asm_support.h
1855 
1856   // Guards the 'wait_monitor_' members.
1857   Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1858 
1859   // Condition variable waited upon during a wait.
1860   ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
1861   // Pointer to the monitor lock we're currently waiting on or null if not waiting.
1862   Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
1863 
1864   // Debug disable read barrier count, only is checked for debug builds and only in the runtime.
1865   uint8_t debug_disallow_read_barrier_ = 0;
1866 
1867   // Note that it is not in the packed struct, may not be accessed for cross compilation.
1868   uintptr_t poison_object_cookie_ = 0;
1869 
1870   // Pending extra checkpoints if checkpoint_function_ is already used.
1871   std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1872 
1873   // Custom TLS field that can be used by plugins or the runtime. Should not be accessed directly by
1874   // compiled code or entrypoints.
1875   SafeMap<std::string, std::unique_ptr<TLSData>> custom_tls_ GUARDED_BY(Locks::custom_tls_lock_);
1876 
1877 #ifndef __BIONIC__
1878   __attribute__((tls_model("initial-exec")))
1879   static thread_local Thread* self_tls_;
1880 #endif
1881 
1882   // True if the thread is some form of runtime thread (ex, GC or JIT).
1883   bool is_runtime_thread_;
1884 
1885   // Set during execution of JNI methods that get field and method id's as part of determining if
1886   // the caller is allowed to access all fields and methods in the Core Platform API.
1887   uint32_t core_platform_api_cookie_ = 0;
1888 
1889   friend class gc::collector::SemiSpace;  // For getting stack traces.
1890   friend class Runtime;  // For CreatePeer.
1891   friend class QuickExceptionHandler;  // For dumping the stack.
1892   friend class ScopedThreadStateChange;
1893   friend class StubTest;  // For accessing entrypoints.
1894   friend class ThreadList;  // For ~Thread and Destroy.
1895 
1896   friend class EntrypointsOrderTest;  // To test the order of tls entries.
1897 
1898   DISALLOW_COPY_AND_ASSIGN(Thread);
1899 };
1900 
1901 class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension {
1902  public:
1903   ALWAYS_INLINE ScopedAssertNoThreadSuspension(const char* cause,
1904                                                bool enabled = true)
ACQUIRE(Roles::uninterruptible_)1905       ACQUIRE(Roles::uninterruptible_)
1906       : enabled_(enabled) {
1907     if (!enabled_) {
1908       return;
1909     }
1910     if (kIsDebugBuild) {
1911       self_ = Thread::Current();
1912       old_cause_ = self_->StartAssertNoThreadSuspension(cause);
1913     } else {
1914       Roles::uninterruptible_.Acquire();  // No-op.
1915     }
1916   }
~ScopedAssertNoThreadSuspension()1917   ALWAYS_INLINE ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) {
1918     if (!enabled_) {
1919       return;
1920     }
1921     if (kIsDebugBuild) {
1922       self_->EndAssertNoThreadSuspension(old_cause_);
1923     } else {
1924       Roles::uninterruptible_.Release();  // No-op.
1925     }
1926   }
1927 
1928  private:
1929   Thread* self_;
1930   const bool enabled_;
1931   const char* old_cause_;
1932 };
1933 
1934 class ScopedAllowThreadSuspension {
1935  public:
ScopedAllowThreadSuspension()1936   ALWAYS_INLINE ScopedAllowThreadSuspension() RELEASE(Roles::uninterruptible_) {
1937     if (kIsDebugBuild) {
1938       self_ = Thread::Current();
1939       old_cause_ = self_->EndAssertNoThreadSuspension();
1940     } else {
1941       Roles::uninterruptible_.Release();  // No-op.
1942     }
1943   }
~ScopedAllowThreadSuspension()1944   ALWAYS_INLINE ~ScopedAllowThreadSuspension() ACQUIRE(Roles::uninterruptible_) {
1945     if (kIsDebugBuild) {
1946       CHECK(self_->StartAssertNoThreadSuspension(old_cause_) == nullptr);
1947     } else {
1948       Roles::uninterruptible_.Acquire();  // No-op.
1949     }
1950   }
1951 
1952  private:
1953   Thread* self_;
1954   const char* old_cause_;
1955 };
1956 
1957 
1958 class ScopedStackedShadowFramePusher {
1959  public:
ScopedStackedShadowFramePusher(Thread * self,ShadowFrame * sf,StackedShadowFrameType type)1960   ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf, StackedShadowFrameType type)
1961     : self_(self), type_(type) {
1962     self_->PushStackedShadowFrame(sf, type);
1963   }
~ScopedStackedShadowFramePusher()1964   ~ScopedStackedShadowFramePusher() {
1965     self_->PopStackedShadowFrame(type_);
1966   }
1967 
1968  private:
1969   Thread* const self_;
1970   const StackedShadowFrameType type_;
1971 
1972   DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher);
1973 };
1974 
1975 // Only works for debug builds.
1976 class ScopedDebugDisallowReadBarriers {
1977  public:
ScopedDebugDisallowReadBarriers(Thread * self)1978   explicit ScopedDebugDisallowReadBarriers(Thread* self) : self_(self) {
1979     self_->ModifyDebugDisallowReadBarrier(1);
1980   }
~ScopedDebugDisallowReadBarriers()1981   ~ScopedDebugDisallowReadBarriers() {
1982     self_->ModifyDebugDisallowReadBarrier(-1);
1983   }
1984 
1985  private:
1986   Thread* const self_;
1987 };
1988 
1989 class ScopedTransitioningToRunnable : public ValueObject {
1990  public:
ScopedTransitioningToRunnable(Thread * self)1991   explicit ScopedTransitioningToRunnable(Thread* self)
1992       : self_(self) {
1993     DCHECK_EQ(self, Thread::Current());
1994     if (kUseReadBarrier) {
1995       self_->SetIsTransitioningToRunnable(true);
1996     }
1997   }
1998 
~ScopedTransitioningToRunnable()1999   ~ScopedTransitioningToRunnable() {
2000     if (kUseReadBarrier) {
2001       self_->SetIsTransitioningToRunnable(false);
2002     }
2003   }
2004 
2005  private:
2006   Thread* const self_;
2007 };
2008 
2009 class ThreadLifecycleCallback {
2010  public:
~ThreadLifecycleCallback()2011   virtual ~ThreadLifecycleCallback() {}
2012 
2013   virtual void ThreadStart(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
2014   virtual void ThreadDeath(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
2015 };
2016 
2017 // Store an exception from the thread and suppress it for the duration of this object.
2018 class ScopedExceptionStorage {
2019  public:
2020   explicit ScopedExceptionStorage(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
2021   void SuppressOldException(const char* message = "") REQUIRES_SHARED(Locks::mutator_lock_);
2022   ~ScopedExceptionStorage() REQUIRES_SHARED(Locks::mutator_lock_);
2023 
2024  private:
2025   Thread* self_;
2026   StackHandleScope<1> hs_;
2027   MutableHandle<mirror::Throwable> excp_;
2028 };
2029 
2030 std::ostream& operator<<(std::ostream& os, const Thread& thread);
2031 std::ostream& operator<<(std::ostream& os, const StackedShadowFrameType& thread);
2032 
2033 }  // namespace art
2034 
2035 #endif  // ART_RUNTIME_THREAD_H_
2036