• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_THREAD_H_
18 #define ART_RUNTIME_THREAD_H_
19 
20 #include <atomic>
21 #include <bitset>
22 #include <deque>
23 #include <iosfwd>
24 #include <list>
25 #include <memory>
26 #include <string>
27 
28 #include "base/atomic.h"
29 #include "base/bit_field.h"
30 #include "base/bit_utils.h"
31 #include "base/enums.h"
32 #include "base/locks.h"
33 #include "base/macros.h"
34 #include "base/safe_map.h"
35 #include "base/value_object.h"
36 #include "entrypoints/jni/jni_entrypoints.h"
37 #include "entrypoints/quick/quick_entrypoints.h"
38 #include "handle.h"
39 #include "handle_scope.h"
40 #include "interpreter/interpreter_cache.h"
41 #include "javaheapprof/javaheapsampler.h"
42 #include "jvalue.h"
43 #include "managed_stack.h"
44 #include "offsets.h"
45 #include "read_barrier_config.h"
46 #include "reflective_handle_scope.h"
47 #include "runtime_globals.h"
48 #include "runtime_stats.h"
49 #include "thread_state.h"
50 
51 class BacktraceMap;
52 
53 namespace art {
54 
55 namespace gc {
56 namespace accounting {
57 template<class T> class AtomicStack;
58 }  // namespace accounting
59 namespace collector {
60 class SemiSpace;
61 }  // namespace collector
62 }  // namespace gc
63 
64 namespace instrumentation {
65 struct InstrumentationStackFrame;
66 }  // namespace instrumentation
67 
68 namespace mirror {
69 class Array;
70 class Class;
71 class ClassLoader;
72 class Object;
73 template<class T> class ObjectArray;
74 template<class T> class PrimitiveArray;
75 using IntArray = PrimitiveArray<int32_t>;
76 class StackTraceElement;
77 class String;
78 class Throwable;
79 }  // namespace mirror
80 
81 namespace verifier {
82 class MethodVerifier;
83 class VerifierDeps;
84 }  // namespace verifier
85 
86 class ArtMethod;
87 class BaseMutex;
88 class ClassLinker;
89 class Closure;
90 class Context;
91 class DeoptimizationContextRecord;
92 class DexFile;
93 class FrameIdToShadowFrame;
94 class IsMarkedVisitor;
95 class JavaVMExt;
96 class JNIEnvExt;
97 class Monitor;
98 class RootVisitor;
99 class ScopedObjectAccessAlreadyRunnable;
100 class ShadowFrame;
101 class StackedShadowFrameRecord;
102 enum class SuspendReason : char;
103 class Thread;
104 class ThreadList;
105 enum VisitRootFlags : uint8_t;
106 
107 // A piece of data that can be held in the CustomTls. The destructor will be called during thread
108 // shutdown. The thread the destructor is called on is not necessarily the same thread it was stored
109 // on.
110 class TLSData {
111  public:
~TLSData()112   virtual ~TLSData() {}
113 };
114 
115 // Thread priorities. These must match the Thread.MIN_PRIORITY,
116 // Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
117 enum ThreadPriority {
118   kMinThreadPriority = 1,
119   kNormThreadPriority = 5,
120   kMaxThreadPriority = 10,
121 };
122 
123 enum class ThreadFlag : uint32_t {
124   // If set, implies that suspend_count_ > 0 and the Thread should enter the safepoint handler.
125   kSuspendRequest = 1u << 0,
126 
127   // Request that the thread do some checkpoint work and then continue.
128   kCheckpointRequest = 1u << 1,
129 
130   // Request that the thread do empty checkpoint and then continue.
131   kEmptyCheckpointRequest = 1u << 2,
132 
133   // Register that at least 1 suspend barrier needs to be passed.
134   kActiveSuspendBarrier = 1u << 3,
135 
136   // Marks that a "flip function" needs to be executed on this thread.
137   kPendingFlipFunction = 1u << 4,
138 
139   // Marks that the "flip function" is being executed by another thread.
140   //
141   // This is used to guards against multiple threads trying to run the
142   // "flip function" for the same thread while the thread is suspended.
143   //
144   // This is not needed when the thread is running the flip function
145   // on its own after transitioning to Runnable.
146   kRunningFlipFunction = 1u << 5,
147 
148   // Marks that a thread is wating for "flip function" to complete.
149   //
150   // This is used to check if we need to broadcast the completion of the
151   // "flip function" to other threads. See also `kRunningFlipFunction`.
152   kWaitingForFlipFunction = 1u << 6,
153 
154   // Request that compiled JNI stubs do not transition to Native or Runnable with
155   // inlined code, but take a slow path for monitoring method entry and exit events.
156   kMonitorJniEntryExit = 1u << 7,
157 
158   // Indicates the last flag. Used for checking that the flags do not overlap thread state.
159   kLastFlag = kMonitorJniEntryExit
160 };
161 
162 enum class StackedShadowFrameType {
163   kShadowFrameUnderConstruction,
164   kDeoptimizationShadowFrame,
165 };
166 
167 // The type of method that triggers deoptimization. It contains info on whether
168 // the deoptimized method should advance dex_pc.
169 enum class DeoptimizationMethodType {
170   kKeepDexPc,  // dex pc is required to be kept upon deoptimization.
171   kDefault     // dex pc may or may not advance depending on other conditions.
172 };
173 
174 // For the CC colector, normal weak reference access can be disabled on a per-thread basis, while
175 // processing references.  After finishing, the reference processor asynchronously sets the
176 // per-thread flags back to kEnabled with release memory ordering semantics. Each mutator thread
177 // should check its flag with acquire semantics before assuming that it is enabled. However,
178 // that is often too expensive, so the reading thread sets it to kVisiblyEnabled after seeing it
179 // kEnabled.  The Reference.get() intrinsic can thus read it in relaxed mode, and reread (by
180 // resorting to the slow path) with acquire semantics if it sees a value of kEnabled rather than
181 // kVisiblyEnabled.
182 enum class WeakRefAccessState : int32_t {
183   kVisiblyEnabled = 0,  // Enabled, and previously read with acquire load by this thread.
184   kEnabled,
185   kDisabled
186 };
187 
188 // This should match RosAlloc::kNumThreadLocalSizeBrackets.
189 static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
190 
191 static constexpr size_t kSharedMethodHotnessThreshold = 0xffff;
192 
193 // Thread's stack layout for implicit stack overflow checks:
194 //
195 //   +---------------------+  <- highest address of stack memory
196 //   |                     |
197 //   .                     .  <- SP
198 //   |                     |
199 //   |                     |
200 //   +---------------------+  <- stack_end
201 //   |                     |
202 //   |  Gap                |
203 //   |                     |
204 //   +---------------------+  <- stack_begin
205 //   |                     |
206 //   | Protected region    |
207 //   |                     |
208 //   +---------------------+  <- lowest address of stack memory
209 //
210 // The stack always grows down in memory.  At the lowest address is a region of memory
211 // that is set mprotect(PROT_NONE).  Any attempt to read/write to this region will
212 // result in a segmentation fault signal.  At any point, the thread's SP will be somewhere
213 // between the stack_end and the highest address in stack memory.  An implicit stack
214 // overflow check is a read of memory at a certain offset below the current SP (4K typically).
215 // If the thread's SP is below the stack_end address this will be a read into the protected
216 // region.  If the SP is above the stack_end address, the thread is guaranteed to have
217 // at least 4K of space.  Because stack overflow checks are only performed in generated code,
218 // if the thread makes a call out to a native function (through JNI), that native function
219 // might only have 4K of memory (if the SP is adjacent to stack_end).
220 
221 class Thread {
222  public:
223   static const size_t kStackOverflowImplicitCheckSize;
224   static constexpr bool kVerifyStack = kIsDebugBuild;
225 
226   // Creates a new native thread corresponding to the given managed peer.
227   // Used to implement Thread.start.
228   static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
229 
230   // Attaches the calling native thread to the runtime, returning the new native peer.
231   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
232   static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
233                         bool create_peer);
234   // Attaches the calling native thread to the runtime, returning the new native peer.
235   static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_peer);
236 
237   // Reset internal state of child thread after fork.
238   void InitAfterFork();
239 
240   // Get the currently executing thread, frequently referred to as 'self'. This call has reasonably
241   // high cost and so we favor passing self around when possible.
242   // TODO: mark as PURE so the compiler may coalesce and remove?
243   static Thread* Current();
244 
245   // On a runnable thread, check for pending thread suspension request and handle if pending.
246   void AllowThreadSuspension() REQUIRES_SHARED(Locks::mutator_lock_);
247 
248   // Process pending thread suspension request and handle if pending.
249   void CheckSuspend(bool implicit = false) REQUIRES_SHARED(Locks::mutator_lock_);
250 
251   // Process a pending empty checkpoint if pending.
252   void CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex);
253   void CheckEmptyCheckpointFromMutex();
254 
255   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
256                                    ObjPtr<mirror::Object> thread_peer)
257       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
258       REQUIRES_SHARED(Locks::mutator_lock_);
259   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
260       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
261       REQUIRES_SHARED(Locks::mutator_lock_);
262 
263   // Translates 172 to pAllocArrayFromCode and so on.
264   template<PointerSize size_of_pointers>
265   static void DumpThreadOffset(std::ostream& os, uint32_t offset);
266 
267   // Dumps a one-line summary of thread state (used for operator<<).
268   void ShortDump(std::ostream& os) const;
269 
270   // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
271   void Dump(std::ostream& os,
272             bool dump_native_stack = true,
273             BacktraceMap* backtrace_map = nullptr,
274             bool force_dump_stack = false) const
275       REQUIRES_SHARED(Locks::mutator_lock_);
276 
277   void DumpJavaStack(std::ostream& os,
278                      bool check_suspended = true,
279                      bool dump_locks = true) const
280       REQUIRES_SHARED(Locks::mutator_lock_);
281 
282   // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
283   // case we use 'tid' to identify the thread, and we'll include as much information as we can.
284   static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
285       REQUIRES_SHARED(Locks::mutator_lock_);
286 
GetState()287   ThreadState GetState() const {
288     return GetStateAndFlags(std::memory_order_relaxed).GetState();
289   }
290 
291   ThreadState SetState(ThreadState new_state);
292 
GetSuspendCount()293   int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
294     return tls32_.suspend_count;
295   }
296 
GetUserCodeSuspendCount()297   int GetUserCodeSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_,
298                                                Locks::user_code_suspension_lock_) {
299     return tls32_.user_code_suspend_count;
300   }
301 
IsSuspended()302   bool IsSuspended() const {
303     StateAndFlags state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
304     return state_and_flags.GetState() != ThreadState::kRunnable &&
305            state_and_flags.IsFlagSet(ThreadFlag::kSuspendRequest);
306   }
307 
DecrDefineClassCount()308   void DecrDefineClassCount() {
309     tls32_.define_class_counter--;
310   }
311 
IncrDefineClassCount()312   void IncrDefineClassCount() {
313     tls32_.define_class_counter++;
314   }
GetDefineClassCount()315   uint32_t GetDefineClassCount() const {
316     return tls32_.define_class_counter;
317   }
318 
319   // If delta > 0 and (this != self or suspend_barrier is not null), this function may temporarily
320   // release thread_suspend_count_lock_ internally.
321   ALWAYS_INLINE
322   bool ModifySuspendCount(Thread* self,
323                           int delta,
324                           AtomicInteger* suspend_barrier,
325                           SuspendReason reason)
326       WARN_UNUSED
327       REQUIRES(Locks::thread_suspend_count_lock_);
328 
329   // Requests a checkpoint closure to run on another thread. The closure will be run when the
330   // thread notices the request, either in an explicit runtime CheckSuspend() call, or in a call
331   // originating from a compiler generated suspend point check. This returns true if the closure
332   // was added and will (eventually) be executed. It returns false otherwise.
333   //
334   // Since multiple closures can be queued and some closures can delay other threads from running,
335   // no closure should attempt to suspend another thread while running.
336   // TODO We should add some debug option that verifies this.
337   //
338   // This guarantees that the RequestCheckpoint invocation happens-before the function invocation:
339   // RequestCheckpointFunction holds thread_suspend_count_lock_, and RunCheckpointFunction
340   // acquires it.
341   bool RequestCheckpoint(Closure* function)
342       REQUIRES(Locks::thread_suspend_count_lock_);
343 
344   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. This is
345   // due to the fact that Thread::Current() needs to go to sleep to allow the targeted thread to
346   // execute the checkpoint for us if it is Runnable. The suspend_state is the state that the thread
347   // will go into while it is awaiting the checkpoint to be run.
348   // NB Passing ThreadState::kRunnable may cause the current thread to wait in a condition variable
349   // while holding the mutator_lock_.  Callers should ensure that this will not cause any problems
350   // for the closure or the rest of the system.
351   // NB Since multiple closures can be queued and some closures can delay other threads from running
352   // no closure should attempt to suspend another thread while running.
353   bool RequestSynchronousCheckpoint(Closure* function,
354                                     ThreadState suspend_state = ThreadState::kWaiting)
355       REQUIRES_SHARED(Locks::mutator_lock_)
356       RELEASE(Locks::thread_list_lock_)
357       REQUIRES(!Locks::thread_suspend_count_lock_);
358 
359   bool RequestEmptyCheckpoint()
360       REQUIRES(Locks::thread_suspend_count_lock_);
361 
362   // Set the flip function. This is done with all threads suspended, except for the calling thread.
363   void SetFlipFunction(Closure* function);
364 
365   // Ensure that thread flip function started running. If no other thread is executing
366   // it, the calling thread shall run the flip function and then notify other threads
367   // that have tried to do that concurrently. After this function returns, the
368   // `ThreadFlag::kPendingFlipFunction` is cleared but another thread may still
369   // run the flip function as indicated by the `ThreadFlag::kRunningFlipFunction`.
370   void EnsureFlipFunctionStarted(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
371 
372   // Wait for the flip function to complete if still running on another thread.
373   void WaitForFlipFunction(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
374 
GetThreadLocalMarkStack()375   gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() {
376     CHECK(kUseReadBarrier);
377     return tlsPtr_.thread_local_mark_stack;
378   }
SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object> * stack)379   void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) {
380     CHECK(kUseReadBarrier);
381     tlsPtr_.thread_local_mark_stack = stack;
382   }
383 
384   // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
385   // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
386   void FullSuspendCheck(bool implicit = false)
387       REQUIRES(!Locks::thread_suspend_count_lock_)
388       REQUIRES_SHARED(Locks::mutator_lock_);
389 
390   // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
391   ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable()
392       REQUIRES(!Locks::thread_suspend_count_lock_)
393       SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
394 
395   // Transition from runnable into a state where mutator privileges are denied. Releases share of
396   // mutator lock.
397   ALWAYS_INLINE void TransitionFromRunnableToSuspended(ThreadState new_state)
398       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
399       UNLOCK_FUNCTION(Locks::mutator_lock_);
400 
401   // Once called thread suspension will cause an assertion failure.
StartAssertNoThreadSuspension(const char * cause)402   const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
403     Roles::uninterruptible_.Acquire();  // No-op.
404     if (kIsDebugBuild) {
405       CHECK(cause != nullptr);
406       const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
407       tls32_.no_thread_suspension++;
408       tlsPtr_.last_no_thread_suspension_cause = cause;
409       return previous_cause;
410     } else {
411       return nullptr;
412     }
413   }
414 
415   // End region where no thread suspension is expected.
EndAssertNoThreadSuspension(const char * old_cause)416   void EndAssertNoThreadSuspension(const char* old_cause) RELEASE(Roles::uninterruptible_) {
417     if (kIsDebugBuild) {
418       CHECK_IMPLIES(old_cause == nullptr, tls32_.no_thread_suspension == 1);
419       CHECK_GT(tls32_.no_thread_suspension, 0U);
420       tls32_.no_thread_suspension--;
421       tlsPtr_.last_no_thread_suspension_cause = old_cause;
422     }
423     Roles::uninterruptible_.Release();  // No-op.
424   }
425 
426   // End region where no thread suspension is expected. Returns the current open region in case we
427   // want to reopen it. Used for ScopedAllowThreadSuspension. Not supported if no_thread_suspension
428   // is larger than one.
EndAssertNoThreadSuspension()429   const char* EndAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) WARN_UNUSED {
430     const char* ret = nullptr;
431     if (kIsDebugBuild) {
432       CHECK_EQ(tls32_.no_thread_suspension, 1u);
433       tls32_.no_thread_suspension--;
434       ret = tlsPtr_.last_no_thread_suspension_cause;
435       tlsPtr_.last_no_thread_suspension_cause = nullptr;
436     }
437     Roles::uninterruptible_.Release();  // No-op.
438     return ret;
439   }
440 
441   void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
442 
443   // Return true if thread suspension is allowable.
444   bool IsThreadSuspensionAllowable() const;
445 
IsDaemon()446   bool IsDaemon() const {
447     return tls32_.daemon;
448   }
449 
450   size_t NumberOfHeldMutexes() const;
451 
452   bool HoldsLock(ObjPtr<mirror::Object> object) const REQUIRES_SHARED(Locks::mutator_lock_);
453 
454   /*
455    * Changes the priority of this thread to match that of the java.lang.Thread object.
456    *
457    * We map a priority value from 1-10 to Linux "nice" values, where lower
458    * numbers indicate higher priority.
459    */
460   void SetNativePriority(int newPriority);
461 
462   /*
463    * Returns the priority of this thread by querying the system.
464    * This is useful when attaching a thread through JNI.
465    *
466    * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
467    */
468   int GetNativePriority() const;
469 
470   // Guaranteed to be non-zero.
GetThreadId()471   uint32_t GetThreadId() const {
472     return tls32_.thin_lock_thread_id;
473   }
474 
GetTid()475   pid_t GetTid() const {
476     return tls32_.tid;
477   }
478 
479   // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
480   ObjPtr<mirror::String> GetThreadName() const REQUIRES_SHARED(Locks::mutator_lock_);
481 
482   // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
483   // allocation, or locking.
484   void GetThreadName(std::string& name) const;
485 
486   // Sets the thread's name.
487   void SetThreadName(const char* name) REQUIRES_SHARED(Locks::mutator_lock_);
488 
489   // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
490   uint64_t GetCpuMicroTime() const;
491 
GetPeer()492   mirror::Object* GetPeer() const REQUIRES_SHARED(Locks::mutator_lock_) {
493     DCHECK(Thread::Current() == this) << "Use GetPeerFromOtherThread instead";
494     CHECK(tlsPtr_.jpeer == nullptr);
495     return tlsPtr_.opeer;
496   }
497   // GetPeer is not safe if called on another thread in the middle of the CC thread flip and
498   // the thread's stack may have not been flipped yet and peer may be a from-space (stale) ref.
499   // This function will explicitly mark/forward it.
500   mirror::Object* GetPeerFromOtherThread() const REQUIRES_SHARED(Locks::mutator_lock_);
501 
HasPeer()502   bool HasPeer() const {
503     return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
504   }
505 
GetStats()506   RuntimeStats* GetStats() {
507     return &tls64_.stats;
508   }
509 
510   bool IsStillStarting() const;
511 
IsExceptionPending()512   bool IsExceptionPending() const {
513     return tlsPtr_.exception != nullptr;
514   }
515 
IsAsyncExceptionPending()516   bool IsAsyncExceptionPending() const {
517     return tlsPtr_.async_exception != nullptr;
518   }
519 
GetException()520   mirror::Throwable* GetException() const REQUIRES_SHARED(Locks::mutator_lock_) {
521     return tlsPtr_.exception;
522   }
523 
524   void AssertPendingException() const;
525   void AssertPendingOOMException() const REQUIRES_SHARED(Locks::mutator_lock_);
526   void AssertNoPendingException() const;
527   void AssertNoPendingExceptionForNewException(const char* msg) const;
528 
529   void SetException(ObjPtr<mirror::Throwable> new_exception) REQUIRES_SHARED(Locks::mutator_lock_);
530 
531   // Set an exception that is asynchronously thrown from a different thread. This will be checked
532   // periodically and might overwrite the current 'Exception'. This can only be called from a
533   // checkpoint.
534   //
535   // The caller should also make sure that the thread has been deoptimized so that the exception
536   // could be detected on back-edges.
537   void SetAsyncException(ObjPtr<mirror::Throwable> new_exception)
538       REQUIRES_SHARED(Locks::mutator_lock_);
539 
ClearException()540   void ClearException() REQUIRES_SHARED(Locks::mutator_lock_) {
541     tlsPtr_.exception = nullptr;
542   }
543 
544   // Move the current async-exception to the main exception. This should be called when the current
545   // thread is ready to deal with any async exceptions. Returns true if there is an async exception
546   // that needs to be dealt with, false otherwise.
547   bool ObserveAsyncException() REQUIRES_SHARED(Locks::mutator_lock_);
548 
549   // Find catch block and perform long jump to appropriate exception handle
550   NO_RETURN void QuickDeliverException() REQUIRES_SHARED(Locks::mutator_lock_);
551 
552   Context* GetLongJumpContext();
ReleaseLongJumpContext(Context * context)553   void ReleaseLongJumpContext(Context* context) {
554     if (tlsPtr_.long_jump_context != nullptr) {
555       ReleaseLongJumpContextInternal();
556     }
557     tlsPtr_.long_jump_context = context;
558   }
559 
560   // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
561   // abort the runtime iff abort_on_error is true.
562   ArtMethod* GetCurrentMethod(uint32_t* dex_pc,
563                               bool check_suspended = true,
564                               bool abort_on_error = true) const
565       REQUIRES_SHARED(Locks::mutator_lock_);
566 
567   // Returns whether the given exception was thrown by the current Java method being executed
568   // (Note that this includes native Java methods).
569   bool IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const
570       REQUIRES_SHARED(Locks::mutator_lock_);
571 
SetTopOfStack(ArtMethod ** top_method)572   void SetTopOfStack(ArtMethod** top_method) {
573     tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
574   }
575 
SetTopOfStackTagged(ArtMethod ** top_method)576   void SetTopOfStackTagged(ArtMethod** top_method) {
577     tlsPtr_.managed_stack.SetTopQuickFrameTagged(top_method);
578   }
579 
SetTopOfShadowStack(ShadowFrame * top)580   void SetTopOfShadowStack(ShadowFrame* top) {
581     tlsPtr_.managed_stack.SetTopShadowFrame(top);
582   }
583 
HasManagedStack()584   bool HasManagedStack() const {
585     return tlsPtr_.managed_stack.HasTopQuickFrame() || tlsPtr_.managed_stack.HasTopShadowFrame();
586   }
587 
588   // If 'msg' is null, no detail message is set.
589   void ThrowNewException(const char* exception_class_descriptor, const char* msg)
590       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
591 
592   // If 'msg' is null, no detail message is set. An exception must be pending, and will be
593   // used as the new exception's cause.
594   void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
595       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
596 
597   void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
598       __attribute__((format(printf, 3, 4)))
599       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
600 
601   void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
602       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
603 
604   // OutOfMemoryError is special, because we need to pre-allocate an instance.
605   // Only the GC should call this.
606   void ThrowOutOfMemoryError(const char* msg) REQUIRES_SHARED(Locks::mutator_lock_)
607       REQUIRES(!Roles::uninterruptible_);
608 
609   static void Startup();
610   static void FinishStartup();
611   static void Shutdown();
612 
613   // Notify this thread's thread-group that this thread has started.
614   // Note: the given thread-group is used as a fast path and verified in debug build. If the value
615   //       is null, the thread's thread-group is loaded from the peer.
616   void NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject thread_group = nullptr)
617       REQUIRES_SHARED(Locks::mutator_lock_);
618 
619   // JNI methods
GetJniEnv()620   JNIEnvExt* GetJniEnv() const {
621     return tlsPtr_.jni_env;
622   }
623 
624   // Convert a jobject into a Object*
625   ObjPtr<mirror::Object> DecodeJObject(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
626   // Checks if the weak global ref has been cleared by the GC without decoding it.
627   bool IsJWeakCleared(jweak obj) const REQUIRES_SHARED(Locks::mutator_lock_);
628 
GetMonitorEnterObject()629   mirror::Object* GetMonitorEnterObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
630     return tlsPtr_.monitor_enter_object;
631   }
632 
SetMonitorEnterObject(mirror::Object * obj)633   void SetMonitorEnterObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
634     tlsPtr_.monitor_enter_object = obj;
635   }
636 
637   // Implements java.lang.Thread.interrupted.
638   bool Interrupted();
639   // Implements java.lang.Thread.isInterrupted.
640   bool IsInterrupted();
641   void Interrupt(Thread* self) REQUIRES(!wait_mutex_);
SetInterrupted(bool i)642   void SetInterrupted(bool i) {
643     tls32_.interrupted.store(i, std::memory_order_seq_cst);
644   }
645   void Notify() REQUIRES(!wait_mutex_);
646 
PoisonObjectPointers()647   ALWAYS_INLINE void PoisonObjectPointers() {
648     ++poison_object_cookie_;
649   }
650 
651   ALWAYS_INLINE static void PoisonObjectPointersIfDebug();
652 
GetPoisonObjectCookie()653   ALWAYS_INLINE uintptr_t GetPoisonObjectCookie() const {
654     return poison_object_cookie_;
655   }
656 
657   // Parking for 0ns of relative time means an untimed park, negative (though
658   // should be handled in java code) returns immediately
659   void Park(bool is_absolute, int64_t time) REQUIRES_SHARED(Locks::mutator_lock_);
660   void Unpark();
661 
662  private:
663   void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
664 
665  public:
GetWaitMutex()666   Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
667     return wait_mutex_;
668   }
669 
GetWaitConditionVariable()670   ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) {
671     return wait_cond_;
672   }
673 
GetWaitMonitor()674   Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) {
675     return wait_monitor_;
676   }
677 
SetWaitMonitor(Monitor * mon)678   void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) {
679     wait_monitor_ = mon;
680   }
681 
682   // Waiter link-list support.
GetWaitNext()683   Thread* GetWaitNext() const {
684     return tlsPtr_.wait_next;
685   }
686 
SetWaitNext(Thread * next)687   void SetWaitNext(Thread* next) {
688     tlsPtr_.wait_next = next;
689   }
690 
GetClassLoaderOverride()691   jobject GetClassLoaderOverride() {
692     return tlsPtr_.class_loader_override;
693   }
694 
695   void SetClassLoaderOverride(jobject class_loader_override);
696 
697   // Create the internal representation of a stack trace, that is more time
698   // and space efficient to compute than the StackTraceElement[].
699   jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
700       REQUIRES_SHARED(Locks::mutator_lock_);
701 
702   // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
703   // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
704   // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
705   // with the number of valid frames in the returned array.
706   static jobjectArray InternalStackTraceToStackTraceElementArray(
707       const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
708       jobjectArray output_array = nullptr, int* stack_depth = nullptr)
709       REQUIRES_SHARED(Locks::mutator_lock_);
710 
711   jobjectArray CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
712       REQUIRES_SHARED(Locks::mutator_lock_);
713 
HasDebuggerShadowFrames()714   bool HasDebuggerShadowFrames() const {
715     return tlsPtr_.frame_id_to_shadow_frame != nullptr;
716   }
717 
718   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
719       REQUIRES_SHARED(Locks::mutator_lock_);
720 
721   void VisitReflectiveTargets(ReflectiveValueVisitor* visitor)
722       REQUIRES(Locks::mutator_lock_);
723 
VerifyStack()724   void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_) {
725     if (kVerifyStack) {
726       VerifyStackImpl();
727     }
728   }
729 
730   //
731   // Offsets of various members of native Thread class, used by compiled code.
732   //
733 
734   template<PointerSize pointer_size>
ThinLockIdOffset()735   static constexpr ThreadOffset<pointer_size> ThinLockIdOffset() {
736     return ThreadOffset<pointer_size>(
737         OFFSETOF_MEMBER(Thread, tls32_) +
738         OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
739   }
740 
741   template<PointerSize pointer_size>
InterruptedOffset()742   static constexpr ThreadOffset<pointer_size> InterruptedOffset() {
743     return ThreadOffset<pointer_size>(
744         OFFSETOF_MEMBER(Thread, tls32_) +
745         OFFSETOF_MEMBER(tls_32bit_sized_values, interrupted));
746   }
747 
748   template<PointerSize pointer_size>
WeakRefAccessEnabledOffset()749   static constexpr ThreadOffset<pointer_size> WeakRefAccessEnabledOffset() {
750     return ThreadOffset<pointer_size>(
751         OFFSETOF_MEMBER(Thread, tls32_) +
752         OFFSETOF_MEMBER(tls_32bit_sized_values, weak_ref_access_enabled));
753   }
754 
755   template<PointerSize pointer_size>
ThreadFlagsOffset()756   static constexpr ThreadOffset<pointer_size> ThreadFlagsOffset() {
757     return ThreadOffset<pointer_size>(
758         OFFSETOF_MEMBER(Thread, tls32_) +
759         OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
760   }
761 
762   template<PointerSize pointer_size>
IsGcMarkingOffset()763   static constexpr ThreadOffset<pointer_size> IsGcMarkingOffset() {
764     return ThreadOffset<pointer_size>(
765         OFFSETOF_MEMBER(Thread, tls32_) +
766         OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
767   }
768 
IsGcMarkingSize()769   static constexpr size_t IsGcMarkingSize() {
770     return sizeof(tls32_.is_gc_marking);
771   }
772 
773   template<PointerSize pointer_size>
SharedMethodHotnessOffset()774   static constexpr ThreadOffset<pointer_size> SharedMethodHotnessOffset() {
775     return ThreadOffset<pointer_size>(
776         OFFSETOF_MEMBER(Thread, tls32_) +
777         OFFSETOF_MEMBER(tls_32bit_sized_values, shared_method_hotness));
778   }
779 
780   // Deoptimize the Java stack.
781   void DeoptimizeWithDeoptimizationException(JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
782 
783  private:
784   template<PointerSize pointer_size>
ThreadOffsetFromTlsPtr(size_t tls_ptr_offset)785   static constexpr ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
786     size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
787     size_t scale = (pointer_size > kRuntimePointerSize) ?
788       static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize) : 1;
789     size_t shrink = (kRuntimePointerSize > pointer_size) ?
790       static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size) : 1;
791     return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
792   }
793 
794  public:
795   template<PointerSize pointer_size>
QuickEntryPointOffset(size_t quick_entrypoint_offset)796   static constexpr ThreadOffset<pointer_size> QuickEntryPointOffset(
797       size_t quick_entrypoint_offset) {
798     return ThreadOffsetFromTlsPtr<pointer_size>(
799         OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
800   }
801 
QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,PointerSize pointer_size)802   static constexpr uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
803                                                           PointerSize pointer_size) {
804     if (pointer_size == PointerSize::k32) {
805       return QuickEntryPointOffset<PointerSize::k32>(quick_entrypoint_offset).
806           Uint32Value();
807     } else {
808       return QuickEntryPointOffset<PointerSize::k64>(quick_entrypoint_offset).
809           Uint32Value();
810     }
811   }
812 
813   template<PointerSize pointer_size>
JniEntryPointOffset(size_t jni_entrypoint_offset)814   static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
815     return ThreadOffsetFromTlsPtr<pointer_size>(
816         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
817   }
818 
819   // Return the entry point offset integer value for ReadBarrierMarkRegX, where X is `reg`.
820   template <PointerSize pointer_size>
ReadBarrierMarkEntryPointsOffset(size_t reg)821   static constexpr int32_t ReadBarrierMarkEntryPointsOffset(size_t reg) {
822     // The entry point list defines 30 ReadBarrierMarkRegX entry points.
823     DCHECK_LT(reg, 30u);
824     // The ReadBarrierMarkRegX entry points are ordered by increasing
825     // register number in Thread::tls_Ptr_.quick_entrypoints.
826     return QUICK_ENTRYPOINT_OFFSET(pointer_size, pReadBarrierMarkReg00).Int32Value()
827         + static_cast<size_t>(pointer_size) * reg;
828   }
829 
830   template<PointerSize pointer_size>
SelfOffset()831   static constexpr ThreadOffset<pointer_size> SelfOffset() {
832     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
833   }
834 
835   template<PointerSize pointer_size>
ExceptionOffset()836   static constexpr ThreadOffset<pointer_size> ExceptionOffset() {
837     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
838   }
839 
840   template<PointerSize pointer_size>
PeerOffset()841   static constexpr ThreadOffset<pointer_size> PeerOffset() {
842     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
843   }
844 
845 
846   template<PointerSize pointer_size>
CardTableOffset()847   static constexpr ThreadOffset<pointer_size> CardTableOffset() {
848     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
849   }
850 
851   template<PointerSize pointer_size>
ThreadSuspendTriggerOffset()852   static constexpr ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
853     return ThreadOffsetFromTlsPtr<pointer_size>(
854         OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
855   }
856 
857   template<PointerSize pointer_size>
ThreadLocalPosOffset()858   static constexpr ThreadOffset<pointer_size> ThreadLocalPosOffset() {
859     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
860                                                                 thread_local_pos));
861   }
862 
863   template<PointerSize pointer_size>
ThreadLocalEndOffset()864   static constexpr ThreadOffset<pointer_size> ThreadLocalEndOffset() {
865     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
866                                                                 thread_local_end));
867   }
868 
869   template<PointerSize pointer_size>
ThreadLocalObjectsOffset()870   static constexpr ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
871     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
872                                                                 thread_local_objects));
873   }
874 
875   template<PointerSize pointer_size>
RosAllocRunsOffset()876   static constexpr ThreadOffset<pointer_size> RosAllocRunsOffset() {
877     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
878                                                                 rosalloc_runs));
879   }
880 
881   template<PointerSize pointer_size>
ThreadLocalAllocStackTopOffset()882   static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
883     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
884                                                                 thread_local_alloc_stack_top));
885   }
886 
887   template<PointerSize pointer_size>
ThreadLocalAllocStackEndOffset()888   static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
889     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
890                                                                 thread_local_alloc_stack_end));
891   }
892 
893   // Size of stack less any space reserved for stack overflow
GetStackSize()894   size_t GetStackSize() const {
895     return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
896   }
897 
898   ALWAYS_INLINE uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const;
899 
GetStackEnd()900   uint8_t* GetStackEnd() const {
901     return tlsPtr_.stack_end;
902   }
903 
904   // Set the stack end to that to be used during a stack overflow
905   void SetStackEndForStackOverflow() REQUIRES_SHARED(Locks::mutator_lock_);
906 
907   // Set the stack end to that to be used during regular execution
908   ALWAYS_INLINE void ResetDefaultStackEnd();
909 
IsHandlingStackOverflow()910   bool IsHandlingStackOverflow() const {
911     return tlsPtr_.stack_end == tlsPtr_.stack_begin;
912   }
913 
914   template<PointerSize pointer_size>
StackEndOffset()915   static constexpr ThreadOffset<pointer_size> StackEndOffset() {
916     return ThreadOffsetFromTlsPtr<pointer_size>(
917         OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
918   }
919 
920   template<PointerSize pointer_size>
JniEnvOffset()921   static constexpr ThreadOffset<pointer_size> JniEnvOffset() {
922     return ThreadOffsetFromTlsPtr<pointer_size>(
923         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
924   }
925 
926   template<PointerSize pointer_size>
TopOfManagedStackOffset()927   static constexpr ThreadOffset<pointer_size> TopOfManagedStackOffset() {
928     return ThreadOffsetFromTlsPtr<pointer_size>(
929         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
930         ManagedStack::TaggedTopQuickFrameOffset());
931   }
932 
GetManagedStack()933   const ManagedStack* GetManagedStack() const {
934     return &tlsPtr_.managed_stack;
935   }
936 
937   // Linked list recording fragments of managed stack.
PushManagedStackFragment(ManagedStack * fragment)938   void PushManagedStackFragment(ManagedStack* fragment) {
939     tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
940   }
PopManagedStackFragment(const ManagedStack & fragment)941   void PopManagedStackFragment(const ManagedStack& fragment) {
942     tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
943   }
944 
945   ALWAYS_INLINE ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame);
946   ALWAYS_INLINE ShadowFrame* PopShadowFrame();
947 
948   template<PointerSize pointer_size>
TopShadowFrameOffset()949   static constexpr ThreadOffset<pointer_size> TopShadowFrameOffset() {
950     return ThreadOffsetFromTlsPtr<pointer_size>(
951         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
952         ManagedStack::TopShadowFrameOffset());
953   }
954 
955   // Is the given obj in one of this thread's JNI transition frames?
956   bool IsJniTransitionReference(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
957 
958   void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
959       REQUIRES_SHARED(Locks::mutator_lock_);
960 
GetTopHandleScope()961   BaseHandleScope* GetTopHandleScope() REQUIRES_SHARED(Locks::mutator_lock_) {
962     return tlsPtr_.top_handle_scope;
963   }
964 
PushHandleScope(BaseHandleScope * handle_scope)965   void PushHandleScope(BaseHandleScope* handle_scope) REQUIRES_SHARED(Locks::mutator_lock_) {
966     DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope);
967     tlsPtr_.top_handle_scope = handle_scope;
968   }
969 
PopHandleScope()970   BaseHandleScope* PopHandleScope() REQUIRES_SHARED(Locks::mutator_lock_) {
971     BaseHandleScope* handle_scope = tlsPtr_.top_handle_scope;
972     DCHECK(handle_scope != nullptr);
973     tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
974     return handle_scope;
975   }
976 
977   template<PointerSize pointer_size>
TopHandleScopeOffset()978   static constexpr ThreadOffset<pointer_size> TopHandleScopeOffset() {
979     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
980                                                                 top_handle_scope));
981   }
982 
983   template<PointerSize pointer_size>
MutatorLockOffset()984   static constexpr ThreadOffset<pointer_size> MutatorLockOffset() {
985     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
986                                                                 mutator_lock));
987   }
988 
989   template<PointerSize pointer_size>
HeldMutexOffset(LockLevel level)990   static constexpr ThreadOffset<pointer_size> HeldMutexOffset(LockLevel level) {
991     DCHECK_LT(enum_cast<size_t>(level), arraysize(tlsPtr_.held_mutexes));
992     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
993                                                                 held_mutexes[level]));
994   }
995 
GetTopReflectiveHandleScope()996   BaseReflectiveHandleScope* GetTopReflectiveHandleScope() {
997     return tlsPtr_.top_reflective_handle_scope;
998   }
999 
PushReflectiveHandleScope(BaseReflectiveHandleScope * scope)1000   void PushReflectiveHandleScope(BaseReflectiveHandleScope* scope) {
1001     DCHECK_EQ(scope->GetLink(), tlsPtr_.top_reflective_handle_scope);
1002     DCHECK_EQ(scope->GetThread(), this);
1003     tlsPtr_.top_reflective_handle_scope = scope;
1004   }
1005 
PopReflectiveHandleScope()1006   BaseReflectiveHandleScope* PopReflectiveHandleScope() {
1007     BaseReflectiveHandleScope* handle_scope = tlsPtr_.top_reflective_handle_scope;
1008     DCHECK(handle_scope != nullptr);
1009     tlsPtr_.top_reflective_handle_scope = tlsPtr_.top_reflective_handle_scope->GetLink();
1010     return handle_scope;
1011   }
1012 
GetIsGcMarking()1013   bool GetIsGcMarking() const {
1014     CHECK(kUseReadBarrier);
1015     return tls32_.is_gc_marking;
1016   }
1017 
1018   void SetIsGcMarkingAndUpdateEntrypoints(bool is_marking);
1019 
1020   bool GetWeakRefAccessEnabled() const;  // Only safe for current thread.
1021 
SetWeakRefAccessEnabled(bool enabled)1022   void SetWeakRefAccessEnabled(bool enabled) {
1023     CHECK(kUseReadBarrier);
1024     WeakRefAccessState new_state = enabled ?
1025         WeakRefAccessState::kEnabled : WeakRefAccessState::kDisabled;
1026     tls32_.weak_ref_access_enabled.store(new_state, std::memory_order_release);
1027   }
1028 
GetDisableThreadFlipCount()1029   uint32_t GetDisableThreadFlipCount() const {
1030     CHECK(kUseReadBarrier);
1031     return tls32_.disable_thread_flip_count;
1032   }
1033 
IncrementDisableThreadFlipCount()1034   void IncrementDisableThreadFlipCount() {
1035     CHECK(kUseReadBarrier);
1036     ++tls32_.disable_thread_flip_count;
1037   }
1038 
DecrementDisableThreadFlipCount()1039   void DecrementDisableThreadFlipCount() {
1040     CHECK(kUseReadBarrier);
1041     DCHECK_GT(tls32_.disable_thread_flip_count, 0U);
1042     --tls32_.disable_thread_flip_count;
1043   }
1044 
1045   // Returns true if the thread is a runtime thread (eg from a ThreadPool).
IsRuntimeThread()1046   bool IsRuntimeThread() const {
1047     return is_runtime_thread_;
1048   }
1049 
SetIsRuntimeThread(bool is_runtime_thread)1050   void SetIsRuntimeThread(bool is_runtime_thread) {
1051     is_runtime_thread_ = is_runtime_thread;
1052   }
1053 
CorePlatformApiCookie()1054   uint32_t CorePlatformApiCookie() {
1055     return core_platform_api_cookie_;
1056   }
1057 
SetCorePlatformApiCookie(uint32_t cookie)1058   void SetCorePlatformApiCookie(uint32_t cookie) {
1059     core_platform_api_cookie_ = cookie;
1060   }
1061 
1062   // Returns true if the thread is allowed to load java classes.
1063   bool CanLoadClasses() const;
1064 
1065   // Returns the fake exception used to activate deoptimization.
GetDeoptimizationException()1066   static mirror::Throwable* GetDeoptimizationException() {
1067     // Note that the mirror::Throwable must be aligned to kObjectAlignment or else it cannot be
1068     // represented by ObjPtr.
1069     return reinterpret_cast<mirror::Throwable*>(0x100);
1070   }
1071 
1072   // Currently deoptimization invokes verifier which can trigger class loading
1073   // and execute Java code, so there might be nested deoptimizations happening.
1074   // We need to save the ongoing deoptimization shadow frames and return
1075   // values on stacks.
1076   // 'from_code' denotes whether the deoptimization was explicitly made from
1077   // compiled code.
1078   // 'method_type' contains info on whether deoptimization should advance
1079   // dex_pc.
1080   void PushDeoptimizationContext(const JValue& return_value,
1081                                  bool is_reference,
1082                                  ObjPtr<mirror::Throwable> exception,
1083                                  bool from_code,
1084                                  DeoptimizationMethodType method_type)
1085       REQUIRES_SHARED(Locks::mutator_lock_);
1086   void PopDeoptimizationContext(JValue* result,
1087                                 ObjPtr<mirror::Throwable>* exception,
1088                                 bool* from_code,
1089                                 DeoptimizationMethodType* method_type)
1090       REQUIRES_SHARED(Locks::mutator_lock_);
1091   void AssertHasDeoptimizationContext()
1092       REQUIRES_SHARED(Locks::mutator_lock_);
1093   void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
1094   ShadowFrame* PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present = true);
1095 
1096   // For debugger, find the shadow frame that corresponds to a frame id.
1097   // Or return null if there is none.
1098   ShadowFrame* FindDebuggerShadowFrame(size_t frame_id)
1099       REQUIRES_SHARED(Locks::mutator_lock_);
1100   // For debugger, find the bool array that keeps track of the updated vreg set
1101   // for a frame id.
1102   bool* GetUpdatedVRegFlags(size_t frame_id) REQUIRES_SHARED(Locks::mutator_lock_);
1103   // For debugger, find the shadow frame that corresponds to a frame id. If
1104   // one doesn't exist yet, create one and track it in frame_id_to_shadow_frame.
1105   ShadowFrame* FindOrCreateDebuggerShadowFrame(size_t frame_id,
1106                                                uint32_t num_vregs,
1107                                                ArtMethod* method,
1108                                                uint32_t dex_pc)
1109       REQUIRES_SHARED(Locks::mutator_lock_);
1110 
1111   // Delete the entry that maps from frame_id to shadow_frame.
1112   void RemoveDebuggerShadowFrameMapping(size_t frame_id)
1113       REQUIRES_SHARED(Locks::mutator_lock_);
1114 
1115   // While getting this map requires shared the mutator lock, manipulating it
1116   // should actually follow these rules:
1117   // (1) The owner of this map (the thread) can change it with its mutator lock.
1118   // (2) Other threads can read this map when the owner is suspended and they
1119   //     hold the mutator lock.
1120   // (3) Other threads can change this map when owning the mutator lock exclusively.
1121   //
1122   // The reason why (3) needs the mutator lock exclusively (and not just having
1123   // the owner suspended) is that we don't want other threads to concurrently read the map.
1124   //
1125   // TODO: Add a class abstraction to express these rules.
GetInstrumentationStack()1126   std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* GetInstrumentationStack()
1127       REQUIRES_SHARED(Locks::mutator_lock_) {
1128     return tlsPtr_.instrumentation_stack;
1129   }
1130 
GetStackTraceSample()1131   std::vector<ArtMethod*>* GetStackTraceSample() const {
1132     DCHECK(!IsAotCompiler());
1133     return tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
1134   }
1135 
SetStackTraceSample(std::vector<ArtMethod * > * sample)1136   void SetStackTraceSample(std::vector<ArtMethod*>* sample) {
1137     DCHECK(!IsAotCompiler());
1138     tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample = sample;
1139   }
1140 
GetVerifierDeps()1141   verifier::VerifierDeps* GetVerifierDeps() const {
1142     DCHECK(IsAotCompiler());
1143     return tlsPtr_.deps_or_stack_trace_sample.verifier_deps;
1144   }
1145 
1146   // It is the responsability of the caller to make sure the verifier_deps
1147   // entry in the thread is cleared before destruction of the actual VerifierDeps
1148   // object, or the thread.
SetVerifierDeps(verifier::VerifierDeps * verifier_deps)1149   void SetVerifierDeps(verifier::VerifierDeps* verifier_deps) {
1150     DCHECK(IsAotCompiler());
1151     DCHECK(verifier_deps == nullptr || tlsPtr_.deps_or_stack_trace_sample.verifier_deps == nullptr);
1152     tlsPtr_.deps_or_stack_trace_sample.verifier_deps = verifier_deps;
1153   }
1154 
GetTraceClockBase()1155   uint64_t GetTraceClockBase() const {
1156     return tls64_.trace_clock_base;
1157   }
1158 
SetTraceClockBase(uint64_t clock_base)1159   void SetTraceClockBase(uint64_t clock_base) {
1160     tls64_.trace_clock_base = clock_base;
1161   }
1162 
GetHeldMutex(LockLevel level)1163   BaseMutex* GetHeldMutex(LockLevel level) const {
1164     return tlsPtr_.held_mutexes[level];
1165   }
1166 
SetHeldMutex(LockLevel level,BaseMutex * mutex)1167   void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
1168     tlsPtr_.held_mutexes[level] = mutex;
1169   }
1170 
1171   void ClearSuspendBarrier(AtomicInteger* target)
1172       REQUIRES(Locks::thread_suspend_count_lock_);
1173 
ReadFlag(ThreadFlag flag)1174   bool ReadFlag(ThreadFlag flag) const {
1175     return GetStateAndFlags(std::memory_order_relaxed).IsFlagSet(flag);
1176   }
1177 
1178   void AtomicSetFlag(ThreadFlag flag, std::memory_order order = std::memory_order_seq_cst) {
1179     tls32_.state_and_flags.fetch_or(enum_cast<uint32_t>(flag), order);
1180   }
1181 
1182   void AtomicClearFlag(ThreadFlag flag, std::memory_order order = std::memory_order_seq_cst) {
1183     tls32_.state_and_flags.fetch_and(~enum_cast<uint32_t>(flag), order);
1184   }
1185 
1186   void ResetQuickAllocEntryPointsForThread();
1187 
1188   // Returns the remaining space in the TLAB.
TlabSize()1189   size_t TlabSize() const {
1190     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_pos;
1191   }
1192 
1193   // Returns pos offset from start.
GetTlabPosOffset()1194   size_t GetTlabPosOffset() const {
1195     return tlsPtr_.thread_local_pos - tlsPtr_.thread_local_start;
1196   }
1197 
1198   // Returns the remaining space in the TLAB if we were to expand it to maximum capacity.
TlabRemainingCapacity()1199   size_t TlabRemainingCapacity() const {
1200     return tlsPtr_.thread_local_limit - tlsPtr_.thread_local_pos;
1201   }
1202 
1203   // Expand the TLAB by a fixed number of bytes. There must be enough capacity to do so.
ExpandTlab(size_t bytes)1204   void ExpandTlab(size_t bytes) {
1205     tlsPtr_.thread_local_end += bytes;
1206     DCHECK_LE(tlsPtr_.thread_local_end, tlsPtr_.thread_local_limit);
1207   }
1208 
1209   // Doesn't check that there is room.
1210   mirror::Object* AllocTlab(size_t bytes);
1211   void SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit);
1212   bool HasTlab() const;
1213   void ResetTlab();
GetTlabStart()1214   uint8_t* GetTlabStart() {
1215     return tlsPtr_.thread_local_start;
1216   }
GetTlabPos()1217   uint8_t* GetTlabPos() {
1218     return tlsPtr_.thread_local_pos;
1219   }
GetTlabEnd()1220   uint8_t* GetTlabEnd() {
1221     return tlsPtr_.thread_local_end;
1222   }
1223   // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
1224   // equal to a valid pointer.
1225   // TODO: does this need to atomic?  I don't think so.
RemoveSuspendTrigger()1226   void RemoveSuspendTrigger() {
1227     tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
1228   }
1229 
1230   // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
1231   // The next time a suspend check is done, it will load from the value at this address
1232   // and trigger a SIGSEGV.
1233   // Only needed if Runtime::implicit_suspend_checks_ is true and fully implemented.  It currently
1234   // is always false. Client code currently just looks at the thread flags directly to determine
1235   // whether we should suspend, so this call is currently unnecessary.
TriggerSuspend()1236   void TriggerSuspend() {
1237     tlsPtr_.suspend_trigger = nullptr;
1238   }
1239 
1240 
1241   // Push an object onto the allocation stack.
1242   bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
1243       REQUIRES_SHARED(Locks::mutator_lock_);
1244 
1245   // Set the thread local allocation pointers to the given pointers.
1246   void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
1247                                      StackReference<mirror::Object>* end);
1248 
1249   // Resets the thread local allocation pointers.
1250   void RevokeThreadLocalAllocationStack();
1251 
GetThreadLocalBytesAllocated()1252   size_t GetThreadLocalBytesAllocated() const {
1253     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
1254   }
1255 
GetThreadLocalObjectsAllocated()1256   size_t GetThreadLocalObjectsAllocated() const {
1257     return tlsPtr_.thread_local_objects;
1258   }
1259 
GetRosAllocRun(size_t index)1260   void* GetRosAllocRun(size_t index) const {
1261     return tlsPtr_.rosalloc_runs[index];
1262   }
1263 
SetRosAllocRun(size_t index,void * run)1264   void SetRosAllocRun(size_t index, void* run) {
1265     tlsPtr_.rosalloc_runs[index] = run;
1266   }
1267 
1268   bool ProtectStack(bool fatal_on_error = true);
1269   bool UnprotectStack();
1270 
IsTransitioningToRunnable()1271   bool IsTransitioningToRunnable() const {
1272     return tls32_.is_transitioning_to_runnable;
1273   }
1274 
SetIsTransitioningToRunnable(bool value)1275   void SetIsTransitioningToRunnable(bool value) {
1276     tls32_.is_transitioning_to_runnable = value;
1277   }
1278 
DecrementForceInterpreterCount()1279   uint32_t DecrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
1280     return --tls32_.force_interpreter_count;
1281   }
1282 
IncrementForceInterpreterCount()1283   uint32_t IncrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
1284     return ++tls32_.force_interpreter_count;
1285   }
1286 
SetForceInterpreterCount(uint32_t value)1287   void SetForceInterpreterCount(uint32_t value) REQUIRES(Locks::thread_list_lock_) {
1288     tls32_.force_interpreter_count = value;
1289   }
1290 
ForceInterpreterCount()1291   uint32_t ForceInterpreterCount() const {
1292     return tls32_.force_interpreter_count;
1293   }
1294 
IsForceInterpreter()1295   bool IsForceInterpreter() const {
1296     return tls32_.force_interpreter_count != 0;
1297   }
1298 
IncrementMakeVisiblyInitializedCounter()1299   bool IncrementMakeVisiblyInitializedCounter() {
1300     tls32_.make_visibly_initialized_counter += 1u;
1301     return tls32_.make_visibly_initialized_counter == kMakeVisiblyInitializedCounterTriggerCount;
1302   }
1303 
ClearMakeVisiblyInitializedCounter()1304   void ClearMakeVisiblyInitializedCounter() {
1305     tls32_.make_visibly_initialized_counter = 0u;
1306   }
1307 
1308   void PushVerifier(verifier::MethodVerifier* verifier);
1309   void PopVerifier(verifier::MethodVerifier* verifier);
1310 
1311   void InitStringEntryPoints();
1312 
ModifyDebugDisallowReadBarrier(int8_t delta)1313   void ModifyDebugDisallowReadBarrier(int8_t delta) {
1314     if (kCheckDebugDisallowReadBarrierCount) {
1315       debug_disallow_read_barrier_ += delta;
1316     }
1317   }
1318 
GetDebugDisallowReadBarrierCount()1319   uint8_t GetDebugDisallowReadBarrierCount() const {
1320     return kCheckDebugDisallowReadBarrierCount ? debug_disallow_read_barrier_ : 0u;
1321   }
1322 
1323   // Gets the current TLSData associated with the key or nullptr if there isn't any. Note that users
1324   // do not gain ownership of TLSData and must synchronize with SetCustomTls themselves to prevent
1325   // it from being deleted.
1326   TLSData* GetCustomTLS(const char* key) REQUIRES(!Locks::custom_tls_lock_);
1327 
1328   // Sets the tls entry at 'key' to data. The thread takes ownership of the TLSData. The destructor
1329   // will be run when the thread exits or when SetCustomTLS is called again with the same key.
1330   void SetCustomTLS(const char* key, TLSData* data) REQUIRES(!Locks::custom_tls_lock_);
1331 
1332   // Returns true if the current thread is the jit sensitive thread.
IsJitSensitiveThread()1333   bool IsJitSensitiveThread() const {
1334     return this == jit_sensitive_thread_;
1335   }
1336 
1337   bool IsSystemDaemon() const REQUIRES_SHARED(Locks::mutator_lock_);
1338 
1339   // Returns true if StrictMode events are traced for the current thread.
IsSensitiveThread()1340   static bool IsSensitiveThread() {
1341     if (is_sensitive_thread_hook_ != nullptr) {
1342       return (*is_sensitive_thread_hook_)();
1343     }
1344     return false;
1345   }
1346 
1347   // Set to the read barrier marking entrypoints to be non-null.
1348   void SetReadBarrierEntrypoints();
1349 
1350   static jobject CreateCompileTimePeer(JNIEnv* env,
1351                                        const char* name,
1352                                        bool as_daemon,
1353                                        jobject thread_group)
1354       REQUIRES_SHARED(Locks::mutator_lock_);
1355 
GetInterpreterCache()1356   ALWAYS_INLINE InterpreterCache* GetInterpreterCache() {
1357     return &interpreter_cache_;
1358   }
1359 
1360   // Clear all thread-local interpreter caches.
1361   //
1362   // Since the caches are keyed by memory pointer to dex instructions, this must be
1363   // called when any dex code is unloaded (before different code gets loaded at the
1364   // same memory location).
1365   //
1366   // If presence of cache entry implies some pre-conditions, this must also be
1367   // called if the pre-conditions might no longer hold true.
1368   static void ClearAllInterpreterCaches();
1369 
1370   template<PointerSize pointer_size>
InterpreterCacheOffset()1371   static constexpr ThreadOffset<pointer_size> InterpreterCacheOffset() {
1372     return ThreadOffset<pointer_size>(OFFSETOF_MEMBER(Thread, interpreter_cache_));
1373   }
1374 
InterpreterCacheSizeLog2()1375   static constexpr int InterpreterCacheSizeLog2() {
1376     return WhichPowerOf2(InterpreterCache::kSize);
1377   }
1378 
AllThreadFlags()1379   static constexpr uint32_t AllThreadFlags() {
1380     return enum_cast<uint32_t>(ThreadFlag::kLastFlag) |
1381            (enum_cast<uint32_t>(ThreadFlag::kLastFlag) - 1u);
1382   }
1383 
SuspendOrCheckpointRequestFlags()1384   static constexpr uint32_t SuspendOrCheckpointRequestFlags() {
1385     return enum_cast<uint32_t>(ThreadFlag::kSuspendRequest) |
1386            enum_cast<uint32_t>(ThreadFlag::kCheckpointRequest) |
1387            enum_cast<uint32_t>(ThreadFlag::kEmptyCheckpointRequest);
1388   }
1389 
FlipFunctionFlags()1390   static constexpr uint32_t FlipFunctionFlags() {
1391     return enum_cast<uint32_t>(ThreadFlag::kPendingFlipFunction) |
1392            enum_cast<uint32_t>(ThreadFlag::kRunningFlipFunction) |
1393            enum_cast<uint32_t>(ThreadFlag::kWaitingForFlipFunction);
1394   }
1395 
StoredThreadStateValue(ThreadState state)1396   static constexpr uint32_t StoredThreadStateValue(ThreadState state) {
1397     return StateAndFlags::EncodeState(state);
1398   }
1399 
ResetSharedMethodHotness()1400   void ResetSharedMethodHotness() {
1401     tls32_.shared_method_hotness = kSharedMethodHotnessThreshold;
1402   }
1403 
GetSharedMethodHotness()1404   uint32_t GetSharedMethodHotness() const {
1405     return tls32_.shared_method_hotness;
1406   }
1407 
DecrementSharedMethodHotness()1408   uint32_t DecrementSharedMethodHotness() {
1409     tls32_.shared_method_hotness = (tls32_.shared_method_hotness - 1) & 0xffff;
1410     return tls32_.shared_method_hotness;
1411   }
1412 
1413  private:
1414   explicit Thread(bool daemon);
1415   ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
1416   void Destroy();
1417 
1418   // Deletes and clears the tlsPtr_.jpeer field. Done in a way so that both it and opeer cannot be
1419   // observed to be set at the same time by instrumentation.
1420   void DeleteJPeer(JNIEnv* env);
1421 
1422   // Attaches the calling native thread to the runtime, returning the new native peer.
1423   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
1424   template <typename PeerAction>
1425   static Thread* Attach(const char* thread_name,
1426                         bool as_daemon,
1427                         PeerAction p);
1428 
1429   void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
1430 
1431   template<bool kTransactionActive>
1432   static void InitPeer(ScopedObjectAccessAlreadyRunnable& soa,
1433                        ObjPtr<mirror::Object> peer,
1434                        jboolean thread_is_daemon,
1435                        jobject thread_group,
1436                        jobject thread_name,
1437                        jint thread_priority)
1438       REQUIRES_SHARED(Locks::mutator_lock_);
1439 
1440   // Avoid use, callers should use SetState.
1441   // Used only by `Thread` destructor and stack trace collection in semi-space GC (currently
1442   // disabled by `kStoreStackTraces = false`).
1443   // NO_THREAD_SAFETY_ANALYSIS: This function is "Unsafe" and can be called in
1444   // different states, so clang cannot perform the thread safety analysis.
SetStateUnsafe(ThreadState new_state)1445   ThreadState SetStateUnsafe(ThreadState new_state) NO_THREAD_SAFETY_ANALYSIS {
1446     StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
1447     ThreadState old_state = old_state_and_flags.GetState();
1448     if (old_state == new_state) {
1449       // Nothing to do.
1450     } else if (old_state == ThreadState::kRunnable) {
1451       // Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in
1452       // case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA
1453       // attempts to TransitionFromSuspendedToRunnable, it results in a deadlock.
1454       TransitionToSuspendedAndRunCheckpoints(new_state);
1455       // Since we transitioned to a suspended state, check the pass barrier requests.
1456       PassActiveSuspendBarriers();
1457     } else {
1458       while (true) {
1459         StateAndFlags new_state_and_flags = old_state_and_flags;
1460         new_state_and_flags.SetState(new_state);
1461         if (LIKELY(tls32_.state_and_flags.CompareAndSetWeakAcquire(
1462                                               old_state_and_flags.GetValue(),
1463                                               new_state_and_flags.GetValue()))) {
1464           break;
1465         }
1466         // Reload state and flags.
1467         old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
1468         DCHECK_EQ(old_state, old_state_and_flags.GetState());
1469       }
1470     }
1471     return old_state;
1472   }
1473 
GetMutatorLock()1474   MutatorMutex* GetMutatorLock() RETURN_CAPABILITY(Locks::mutator_lock_) {
1475     DCHECK_EQ(tlsPtr_.mutator_lock, Locks::mutator_lock_);
1476     return tlsPtr_.mutator_lock;
1477   }
1478 
1479   void VerifyStackImpl() REQUIRES_SHARED(Locks::mutator_lock_);
1480 
1481   void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
1482   void DumpStack(std::ostream& os,
1483                  bool dump_native_stack = true,
1484                  BacktraceMap* backtrace_map = nullptr,
1485                  bool force_dump_stack = false) const
1486       REQUIRES_SHARED(Locks::mutator_lock_);
1487 
1488   // Out-of-line conveniences for debugging in gdb.
1489   static Thread* CurrentFromGdb();  // Like Thread::Current.
1490   // Like Thread::Dump(std::cerr).
1491   void DumpFromGdb() const REQUIRES_SHARED(Locks::mutator_lock_);
1492 
1493   static void* CreateCallback(void* arg);
1494 
1495   void HandleUncaughtExceptions(ScopedObjectAccessAlreadyRunnable& soa)
1496       REQUIRES_SHARED(Locks::mutator_lock_);
1497   void RemoveFromThreadGroup(ScopedObjectAccessAlreadyRunnable& soa)
1498       REQUIRES_SHARED(Locks::mutator_lock_);
1499 
1500   // Initialize a thread.
1501   //
1502   // The third parameter is not mandatory. If given, the thread will use this JNIEnvExt. In case
1503   // Init succeeds, this means the thread takes ownership of it. If Init fails, it is the caller's
1504   // responsibility to destroy the given JNIEnvExt. If the parameter is null, Init will try to
1505   // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value
1506   // of false).
1507   bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr)
1508       REQUIRES(Locks::runtime_shutdown_lock_);
1509   void InitCardTable();
1510   void InitCpu();
1511   void CleanupCpu();
1512   void InitTlsEntryPoints();
1513   void InitTid();
1514   void InitPthreadKeySelf();
1515   bool InitStackHwm();
1516 
1517   void SetUpAlternateSignalStack();
1518   void TearDownAlternateSignalStack();
1519   void MadviseAwayAlternateSignalStack();
1520 
1521   ALWAYS_INLINE void TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)
1522       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
1523       REQUIRES_SHARED(Locks::mutator_lock_);
1524 
1525   ALWAYS_INLINE void PassActiveSuspendBarriers()
1526       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1527 
1528   // Registers the current thread as the jit sensitive thread. Should be called just once.
SetJitSensitiveThread()1529   static void SetJitSensitiveThread() {
1530     if (jit_sensitive_thread_ == nullptr) {
1531       jit_sensitive_thread_ = Thread::Current();
1532     } else {
1533       LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:"
1534           << Thread::Current()->GetTid();
1535     }
1536   }
1537 
SetSensitiveThreadHook(bool (* is_sensitive_thread_hook)())1538   static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) {
1539     is_sensitive_thread_hook_ = is_sensitive_thread_hook;
1540   }
1541 
1542   bool ModifySuspendCountInternal(Thread* self,
1543                                   int delta,
1544                                   AtomicInteger* suspend_barrier,
1545                                   SuspendReason reason)
1546       WARN_UNUSED
1547       REQUIRES(Locks::thread_suspend_count_lock_);
1548 
1549   // Runs a single checkpoint function. If there are no more pending checkpoint functions it will
1550   // clear the kCheckpointRequest flag. The caller is responsible for calling this in a loop until
1551   // the kCheckpointRequest flag is cleared.
1552   void RunCheckpointFunction()
1553       REQUIRES(!Locks::thread_suspend_count_lock_)
1554       REQUIRES_SHARED(Locks::mutator_lock_);
1555   void RunEmptyCheckpoint();
1556 
1557   bool PassActiveSuspendBarriers(Thread* self)
1558       REQUIRES(!Locks::thread_suspend_count_lock_);
1559 
1560   // Install the protected region for implicit stack checks.
1561   void InstallImplicitProtection();
1562 
1563   template <bool kPrecise>
1564   void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
1565 
1566   static void SweepInterpreterCaches(IsMarkedVisitor* visitor)
1567       REQUIRES_SHARED(Locks::mutator_lock_);
1568 
1569   static bool IsAotCompiler();
1570 
1571   void ReleaseLongJumpContextInternal();
1572 
1573   void SetCachedThreadName(const char* name);
1574 
1575   // Helper class for manipulating the 32 bits of atomically changed state and flags.
1576   class StateAndFlags {
1577    public:
StateAndFlags(uint32_t value)1578     explicit StateAndFlags(uint32_t value) :value_(value) {}
1579 
GetValue()1580     uint32_t GetValue() const {
1581       return value_;
1582     }
1583 
SetValue(uint32_t value)1584     void SetValue(uint32_t value) {
1585       value_ = value;
1586     }
1587 
IsAnyOfFlagsSet(uint32_t flags)1588     bool IsAnyOfFlagsSet(uint32_t flags) const {
1589       DCHECK_EQ(flags & ~AllThreadFlags(), 0u);
1590       return (value_ & flags) != 0u;
1591     }
1592 
IsFlagSet(ThreadFlag flag)1593     bool IsFlagSet(ThreadFlag flag) const {
1594       return (value_ & enum_cast<uint32_t>(flag)) != 0u;
1595     }
1596 
SetFlag(ThreadFlag flag)1597     void SetFlag(ThreadFlag flag) {
1598       value_ |= enum_cast<uint32_t>(flag);
1599     }
1600 
WithFlag(ThreadFlag flag)1601     StateAndFlags WithFlag(ThreadFlag flag) const {
1602       StateAndFlags result = *this;
1603       result.SetFlag(flag);
1604       return result;
1605     }
1606 
WithoutFlag(ThreadFlag flag)1607     StateAndFlags WithoutFlag(ThreadFlag flag) const {
1608       StateAndFlags result = *this;
1609       result.ClearFlag(flag);
1610       return result;
1611     }
1612 
ClearFlag(ThreadFlag flag)1613     void ClearFlag(ThreadFlag flag) {
1614       value_ &= ~enum_cast<uint32_t>(flag);
1615     }
1616 
GetState()1617     ThreadState GetState() const {
1618       ThreadState state = ThreadStateField::Decode(value_);
1619       ValidateThreadState(state);
1620       return state;
1621     }
1622 
SetState(ThreadState state)1623     void SetState(ThreadState state) {
1624       ValidateThreadState(state);
1625       value_ = ThreadStateField::Update(state, value_);
1626     }
1627 
WithState(ThreadState state)1628     StateAndFlags WithState(ThreadState state) const {
1629       StateAndFlags result = *this;
1630       result.SetState(state);
1631       return result;
1632     }
1633 
EncodeState(ThreadState state)1634     static constexpr uint32_t EncodeState(ThreadState state) {
1635       ValidateThreadState(state);
1636       return ThreadStateField::Encode(state);
1637     }
1638 
1639    private:
ValidateThreadState(ThreadState state)1640     static constexpr void ValidateThreadState(ThreadState state) {
1641       if (kIsDebugBuild && state != ThreadState::kRunnable) {
1642         CHECK_GE(state, ThreadState::kTerminated);
1643         CHECK_LE(state, ThreadState::kSuspended);
1644         CHECK_NE(state, ThreadState::kObsoleteRunnable);
1645       }
1646     }
1647 
1648     // The value holds thread flags and thread state.
1649     uint32_t value_;
1650 
1651     static constexpr size_t kThreadStateBitSize = BitSizeOf<std::underlying_type_t<ThreadState>>();
1652     static constexpr size_t kThreadStatePosition = BitSizeOf<uint32_t>() - kThreadStateBitSize;
1653     using ThreadStateField = BitField<ThreadState, kThreadStatePosition, kThreadStateBitSize>;
1654     static_assert(
1655         WhichPowerOf2(enum_cast<uint32_t>(ThreadFlag::kLastFlag)) < kThreadStatePosition);
1656   };
1657   static_assert(sizeof(StateAndFlags) == sizeof(uint32_t), "Unexpected StateAndFlags size");
1658 
GetStateAndFlags(std::memory_order order)1659   StateAndFlags GetStateAndFlags(std::memory_order order) const {
1660     return StateAndFlags(tls32_.state_and_flags.load(order));
1661   }
1662 
1663   // Format state and flags as a hex string. For diagnostic output.
1664   std::string StateAndFlagsAsHexString() const;
1665 
1666   // Run the flip function and, if requested, notify other threads that may have tried
1667   // to do that concurrently.
1668   void RunFlipFunction(Thread* self, bool notify) REQUIRES_SHARED(Locks::mutator_lock_);
1669 
1670   static void ThreadExitCallback(void* arg);
1671 
1672   // Maximum number of suspend barriers.
1673   static constexpr uint32_t kMaxSuspendBarriers = 3;
1674 
1675   // Has Thread::Startup been called?
1676   static bool is_started_;
1677 
1678   // TLS key used to retrieve the Thread*.
1679   static pthread_key_t pthread_key_self_;
1680 
1681   // Used to notify threads that they should attempt to resume, they will suspend again if
1682   // their suspend count is > 0.
1683   static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1684 
1685   // Hook passed by framework which returns true
1686   // when StrictMode events are traced for the current thread.
1687   static bool (*is_sensitive_thread_hook_)();
1688   // Stores the jit sensitive thread (which for now is the UI thread).
1689   static Thread* jit_sensitive_thread_;
1690 
1691   static constexpr uint32_t kMakeVisiblyInitializedCounterTriggerCount = 128;
1692 
1693   /***********************************************************************************************/
1694   // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
1695   // pointer size differences. To encourage shorter encoding, more frequently used values appear
1696   // first if possible.
1697   /***********************************************************************************************/
1698 
1699   struct PACKED(4) tls_32bit_sized_values {
1700     // We have no control over the size of 'bool', but want our boolean fields
1701     // to be 4-byte quantities.
1702     using bool32_t = uint32_t;
1703 
tls_32bit_sized_valuestls_32bit_sized_values1704     explicit tls_32bit_sized_values(bool is_daemon)
1705         : state_and_flags(0u),
1706           suspend_count(0),
1707           thin_lock_thread_id(0),
1708           tid(0),
1709           daemon(is_daemon),
1710           throwing_OutOfMemoryError(false),
1711           no_thread_suspension(0),
1712           thread_exit_check_count(0),
1713           is_transitioning_to_runnable(false),
1714           is_gc_marking(false),
1715           weak_ref_access_enabled(WeakRefAccessState::kVisiblyEnabled),
1716           disable_thread_flip_count(0),
1717           user_code_suspend_count(0),
1718           force_interpreter_count(0),
1719           make_visibly_initialized_counter(0),
1720           define_class_counter(0),
1721           num_name_readers(0),
1722           shared_method_hotness(kSharedMethodHotnessThreshold)
1723         {}
1724 
1725     // The state and flags field must be changed atomically so that flag values aren't lost.
1726     // See `StateAndFlags` for bit assignments of `ThreadFlag` and `ThreadState` values.
1727     // Keeping the state and flags together allows an atomic CAS to change from being
1728     // Suspended to Runnable without a suspend request occurring.
1729     Atomic<uint32_t> state_and_flags;
1730     static_assert(sizeof(state_and_flags) == sizeof(uint32_t),
1731                   "Size of state_and_flags and uint32 are different");
1732 
1733     // A non-zero value is used to tell the current thread to enter a safe point
1734     // at the next poll.
1735     int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1736 
1737     // Thin lock thread id. This is a small integer used by the thin lock implementation.
1738     // This is not to be confused with the native thread's tid, nor is it the value returned
1739     // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
1740     // important difference between this id and the ids visible to managed code is that these
1741     // ones get reused (to ensure that they fit in the number of bits available).
1742     uint32_t thin_lock_thread_id;
1743 
1744     // System thread id.
1745     uint32_t tid;
1746 
1747     // Is the thread a daemon?
1748     const bool32_t daemon;
1749 
1750     // A boolean telling us whether we're recursively throwing OOME.
1751     bool32_t throwing_OutOfMemoryError;
1752 
1753     // A positive value implies we're in a region where thread suspension isn't expected.
1754     uint32_t no_thread_suspension;
1755 
1756     // How many times has our pthread key's destructor been called?
1757     uint32_t thread_exit_check_count;
1758 
1759     // True if the thread is in TransitionFromSuspendedToRunnable(). This is used to distinguish the
1760     // non-runnable threads (eg. kNative, kWaiting) that are about to transition to runnable from
1761     // the rest of them.
1762     bool32_t is_transitioning_to_runnable;
1763 
1764     // True if the GC is in the marking phase. This is used for the CC collector only. This is
1765     // thread local so that we can simplify the logic to check for the fast path of read barriers of
1766     // GC roots.
1767     bool32_t is_gc_marking;
1768 
1769     // Thread "interrupted" status; stays raised until queried or thrown.
1770     Atomic<bool32_t> interrupted;
1771 
1772     AtomicInteger park_state_;
1773 
1774     // Determines whether the thread is allowed to directly access a weak ref
1775     // (Reference::GetReferent() and system weaks) and to potentially mark an object alive/gray.
1776     // This is used for concurrent reference processing of the CC collector only. This is thread
1777     // local so that we can enable/disable weak ref access by using a checkpoint and avoid a race
1778     // around the time weak ref access gets disabled and concurrent reference processing begins
1779     // (if weak ref access is disabled during a pause, this is not an issue.) Other collectors use
1780     // Runtime::DisallowNewSystemWeaks() and ReferenceProcessor::EnableSlowPath().  Can be
1781     // concurrently accessed by GetReferent() and set (by iterating over threads).
1782     // Can be changed from kEnabled to kVisiblyEnabled by readers. No other concurrent access is
1783     // possible when that happens.
1784     mutable std::atomic<WeakRefAccessState> weak_ref_access_enabled;
1785 
1786     // A thread local version of Heap::disable_thread_flip_count_. This keeps track of how many
1787     // levels of (nested) JNI critical sections the thread is in and is used to detect a nested JNI
1788     // critical section enter.
1789     uint32_t disable_thread_flip_count;
1790 
1791     // How much of 'suspend_count_' is by request of user code, used to distinguish threads
1792     // suspended by the runtime from those suspended by user code.
1793     // This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be
1794     // told that AssertHeld should be good enough.
1795     int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1796 
1797     // Count of how many times this thread has been forced to interpreter. If this is not 0 the
1798     // thread must remain in interpreted code as much as possible.
1799     uint32_t force_interpreter_count;
1800 
1801     // Counter for calls to initialize a class that's initialized but not visibly initialized.
1802     // When this reaches kMakeVisiblyInitializedCounterTriggerCount, we call the runtime to
1803     // make initialized classes visibly initialized. This is needed because we usually make
1804     // classes visibly initialized in batches but we do not want to be stuck with a class
1805     // initialized but not visibly initialized for a long time even if no more classes are
1806     // being initialized anymore.
1807     uint32_t make_visibly_initialized_counter;
1808 
1809     // Counter for how many nested define-classes are ongoing in this thread. Used to allow waiting
1810     // for threads to be done with class-definition work.
1811     uint32_t define_class_counter;
1812 
1813     // A count of the number of readers of tlsPtr_.name that may still be looking at a string they
1814     // retrieved.
1815     mutable std::atomic<uint32_t> num_name_readers;
1816     static_assert(std::atomic<uint32_t>::is_always_lock_free);
1817 
1818     // Thread-local hotness counter for shared memory methods. Initialized with
1819     // `kSharedMethodHotnessThreshold`. The interpreter decrements it and goes
1820     // into the runtime when hitting zero. Note that all previous decrements
1821     // could have been executed by another method than the one seeing zero.
1822     // There is a second level counter in `Jit::shared_method_counters_` to make
1823     // sure we at least have a few samples before compiling a method.
1824     uint32_t shared_method_hotness;
1825   } tls32_;
1826 
1827   struct PACKED(8) tls_64bit_sized_values {
tls_64bit_sized_valuestls_64bit_sized_values1828     tls_64bit_sized_values() : trace_clock_base(0) {
1829     }
1830 
1831     // The clock base used for tracing.
1832     uint64_t trace_clock_base;
1833 
1834     RuntimeStats stats;
1835   } tls64_;
1836 
PACKED(sizeof (void *))1837   struct PACKED(sizeof(void*)) tls_ptr_sized_values {
1838       tls_ptr_sized_values() : card_table(nullptr),
1839                                exception(nullptr),
1840                                stack_end(nullptr),
1841                                managed_stack(),
1842                                suspend_trigger(nullptr),
1843                                jni_env(nullptr),
1844                                tmp_jni_env(nullptr),
1845                                self(nullptr),
1846                                opeer(nullptr),
1847                                jpeer(nullptr),
1848                                stack_begin(nullptr),
1849                                stack_size(0),
1850                                deps_or_stack_trace_sample(),
1851                                wait_next(nullptr),
1852                                monitor_enter_object(nullptr),
1853                                top_handle_scope(nullptr),
1854                                class_loader_override(nullptr),
1855                                long_jump_context(nullptr),
1856                                instrumentation_stack(nullptr),
1857                                stacked_shadow_frame_record(nullptr),
1858                                deoptimization_context_stack(nullptr),
1859                                frame_id_to_shadow_frame(nullptr),
1860                                name(nullptr),
1861                                pthread_self(0),
1862                                last_no_thread_suspension_cause(nullptr),
1863                                checkpoint_function(nullptr),
1864                                thread_local_start(nullptr),
1865                                thread_local_pos(nullptr),
1866                                thread_local_end(nullptr),
1867                                thread_local_limit(nullptr),
1868                                thread_local_objects(0),
1869                                thread_local_alloc_stack_top(nullptr),
1870                                thread_local_alloc_stack_end(nullptr),
1871                                mutator_lock(nullptr),
1872                                flip_function(nullptr),
1873                                method_verifier(nullptr),
1874                                thread_local_mark_stack(nullptr),
1875                                async_exception(nullptr),
1876                                top_reflective_handle_scope(nullptr) {
1877       std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
1878     }
1879 
1880     // The biased card table, see CardTable for details.
1881     uint8_t* card_table;
1882 
1883     // The pending exception or null.
1884     mirror::Throwable* exception;
1885 
1886     // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
1887     // We leave extra space so there's room for the code that throws StackOverflowError.
1888     uint8_t* stack_end;
1889 
1890     // The top of the managed stack often manipulated directly by compiler generated code.
1891     ManagedStack managed_stack;
1892 
1893     // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check.  It is
1894     // normally set to the address of itself.
1895     uintptr_t* suspend_trigger;
1896 
1897     // Every thread may have an associated JNI environment
1898     JNIEnvExt* jni_env;
1899 
1900     // Temporary storage to transfer a pre-allocated JNIEnvExt from the creating thread to the
1901     // created thread.
1902     JNIEnvExt* tmp_jni_env;
1903 
1904     // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
1905     // is easy but getting the address of Thread::Current is hard. This field can be read off of
1906     // Thread::Current to give the address.
1907     Thread* self;
1908 
1909     // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
1910     // start up, until the thread is registered and the local opeer_ is used.
1911     mirror::Object* opeer;
1912     jobject jpeer;
1913 
1914     // The "lowest addressable byte" of the stack.
1915     uint8_t* stack_begin;
1916 
1917     // Size of the stack.
1918     size_t stack_size;
1919 
1920     // Sampling profiler and AOT verification cannot happen on the same run, so we share
1921     // the same entry for the stack trace and the verifier deps.
1922     union DepsOrStackTraceSample {
1923       DepsOrStackTraceSample() {
1924         verifier_deps = nullptr;
1925         stack_trace_sample = nullptr;
1926       }
1927       // Pointer to previous stack trace captured by sampling profiler.
1928       std::vector<ArtMethod*>* stack_trace_sample;
1929       // When doing AOT verification, per-thread VerifierDeps.
1930       verifier::VerifierDeps* verifier_deps;
1931     } deps_or_stack_trace_sample;
1932 
1933     // The next thread in the wait set this thread is part of or null if not waiting.
1934     Thread* wait_next;
1935 
1936     // If we're blocked in MonitorEnter, this is the object we're trying to lock.
1937     mirror::Object* monitor_enter_object;
1938 
1939     // Top of linked list of handle scopes or null for none.
1940     BaseHandleScope* top_handle_scope;
1941 
1942     // Needed to get the right ClassLoader in JNI_OnLoad, but also
1943     // useful for testing.
1944     jobject class_loader_override;
1945 
1946     // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
1947     Context* long_jump_context;
1948 
1949     // Additional stack used by method instrumentation to store method and return pc values.
1950     // Stored as a pointer since std::map is not PACKED.
1951     // !DO NOT CHANGE! to std::unordered_map: the users of this map require an
1952     // ordered iteration on the keys (which are stack addresses).
1953     // Also see Thread::GetInstrumentationStack for the requirements on
1954     // manipulating and reading this map.
1955     std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* instrumentation_stack;
1956 
1957     // For gc purpose, a shadow frame record stack that keeps track of:
1958     // 1) shadow frames under construction.
1959     // 2) deoptimization shadow frames.
1960     StackedShadowFrameRecord* stacked_shadow_frame_record;
1961 
1962     // Deoptimization return value record stack.
1963     DeoptimizationContextRecord* deoptimization_context_stack;
1964 
1965     // For debugger, a linked list that keeps the mapping from frame_id to shadow frame.
1966     // Shadow frames may be created before deoptimization happens so that the debugger can
1967     // set local values there first.
1968     FrameIdToShadowFrame* frame_id_to_shadow_frame;
1969 
1970     // A cached copy of the java.lang.Thread's (modified UTF-8) name.
1971     // If this is not null or kThreadNameDuringStartup, then it owns the malloc memory holding
1972     // the string. Updated in an RCU-like manner.
1973     std::atomic<const char*> name;
1974     static_assert(std::atomic<const char*>::is_always_lock_free);
1975 
1976     // A cached pthread_t for the pthread underlying this Thread*.
1977     pthread_t pthread_self;
1978 
1979     // If no_thread_suspension_ is > 0, what is causing that assertion.
1980     const char* last_no_thread_suspension_cause;
1981 
1982     // Pending checkpoint function or null if non-pending. If this checkpoint is set and someone\
1983     // requests another checkpoint, it goes to the checkpoint overflow list.
1984     Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_);
1985 
1986     // Pending barriers that require passing or NULL if non-pending. Installation guarding by
1987     // Locks::thread_suspend_count_lock_.
1988     // They work effectively as art::Barrier, but implemented directly using AtomicInteger and futex
1989     // to avoid additional cost of a mutex and a condition variable, as used in art::Barrier.
1990     AtomicInteger* active_suspend_barriers[kMaxSuspendBarriers];
1991 
1992     // Thread-local allocation pointer. Moved here to force alignment for thread_local_pos on ARM.
1993     uint8_t* thread_local_start;
1994 
1995     // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
1996     // potentially better performance.
1997     uint8_t* thread_local_pos;
1998     uint8_t* thread_local_end;
1999 
2000     // Thread local limit is how much we can expand the thread local buffer to, it is greater or
2001     // equal to thread_local_end.
2002     uint8_t* thread_local_limit;
2003 
2004     size_t thread_local_objects;
2005 
2006     // Entrypoint function pointers.
2007     // TODO: move this to more of a global offset table model to avoid per-thread duplication.
2008     JniEntryPoints jni_entrypoints;
2009     QuickEntryPoints quick_entrypoints;
2010 
2011     // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
2012     void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread];
2013 
2014     // Thread-local allocation stack data/routines.
2015     StackReference<mirror::Object>* thread_local_alloc_stack_top;
2016     StackReference<mirror::Object>* thread_local_alloc_stack_end;
2017 
2018     // Pointer to the mutator lock.
2019     // This is the same as `Locks::mutator_lock_` but cached for faster state transitions.
2020     MutatorMutex* mutator_lock;
2021 
2022     // Support for Mutex lock hierarchy bug detection.
2023     BaseMutex* held_mutexes[kLockLevelCount];
2024 
2025     // The function used for thread flip.
2026     Closure* flip_function;
2027 
2028     // Current method verifier, used for root marking.
2029     verifier::MethodVerifier* method_verifier;
2030 
2031     // Thread-local mark stack for the concurrent copying collector.
2032     gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack;
2033 
2034     // The pending async-exception or null.
2035     mirror::Throwable* async_exception;
2036 
2037     // Top of the linked-list for reflective-handle scopes or null if none.
2038     BaseReflectiveHandleScope* top_reflective_handle_scope;
2039   } tlsPtr_;
2040 
2041   // Small thread-local cache to be used from the interpreter.
2042   // It is keyed by dex instruction pointer.
2043   // The value is opcode-depended (e.g. field offset).
2044   InterpreterCache interpreter_cache_;
2045 
2046   // All fields below this line should not be accessed by native code. This means these fields can
2047   // be modified, rearranged, added or removed without having to modify asm_support.h
2048 
2049   // Guards the 'wait_monitor_' members.
2050   Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
2051 
2052   // Condition variable waited upon during a wait.
2053   ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
2054   // Pointer to the monitor lock we're currently waiting on or null if not waiting.
2055   Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
2056 
2057   // Debug disable read barrier count, only is checked for debug builds and only in the runtime.
2058   uint8_t debug_disallow_read_barrier_ = 0;
2059 
2060   // Note that it is not in the packed struct, may not be accessed for cross compilation.
2061   uintptr_t poison_object_cookie_ = 0;
2062 
2063   // Pending extra checkpoints if checkpoint_function_ is already used.
2064   std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_);
2065 
2066   // Custom TLS field that can be used by plugins or the runtime. Should not be accessed directly by
2067   // compiled code or entrypoints.
2068   SafeMap<std::string, std::unique_ptr<TLSData>, std::less<>> custom_tls_
2069       GUARDED_BY(Locks::custom_tls_lock_);
2070 
2071 #ifndef __BIONIC__
2072   __attribute__((tls_model("initial-exec")))
2073   static thread_local Thread* self_tls_;
2074 #endif
2075 
2076   // True if the thread is some form of runtime thread (ex, GC or JIT).
2077   bool is_runtime_thread_;
2078 
2079   // Set during execution of JNI methods that get field and method id's as part of determining if
2080   // the caller is allowed to access all fields and methods in the Core Platform API.
2081   uint32_t core_platform_api_cookie_ = 0;
2082 
2083   friend class gc::collector::SemiSpace;  // For getting stack traces.
2084   friend class Runtime;  // For CreatePeer.
2085   friend class QuickExceptionHandler;  // For dumping the stack.
2086   friend class ScopedThreadStateChange;
2087   friend class StubTest;  // For accessing entrypoints.
2088   friend class ThreadList;  // For ~Thread and Destroy.
2089 
2090   friend class EntrypointsOrderTest;  // To test the order of tls entries.
2091   friend class JniCompilerTest;  // For intercepting JNI entrypoint calls.
2092 
2093   DISALLOW_COPY_AND_ASSIGN(Thread);
2094 };
2095 
2096 class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension {
2097  public:
2098   ALWAYS_INLINE ScopedAssertNoThreadSuspension(const char* cause,
2099                                                bool enabled = true)
ACQUIRE(Roles::uninterruptible_)2100       ACQUIRE(Roles::uninterruptible_)
2101       : enabled_(enabled) {
2102     if (!enabled_) {
2103       return;
2104     }
2105     if (kIsDebugBuild) {
2106       self_ = Thread::Current();
2107       old_cause_ = self_->StartAssertNoThreadSuspension(cause);
2108     } else {
2109       Roles::uninterruptible_.Acquire();  // No-op.
2110     }
2111   }
~ScopedAssertNoThreadSuspension()2112   ALWAYS_INLINE ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) {
2113     if (!enabled_) {
2114       return;
2115     }
2116     if (kIsDebugBuild) {
2117       self_->EndAssertNoThreadSuspension(old_cause_);
2118     } else {
2119       Roles::uninterruptible_.Release();  // No-op.
2120     }
2121   }
2122 
2123  private:
2124   Thread* self_;
2125   const bool enabled_;
2126   const char* old_cause_;
2127 };
2128 
2129 class ScopedAllowThreadSuspension {
2130  public:
ScopedAllowThreadSuspension()2131   ALWAYS_INLINE ScopedAllowThreadSuspension() RELEASE(Roles::uninterruptible_) {
2132     if (kIsDebugBuild) {
2133       self_ = Thread::Current();
2134       old_cause_ = self_->EndAssertNoThreadSuspension();
2135     } else {
2136       Roles::uninterruptible_.Release();  // No-op.
2137     }
2138   }
~ScopedAllowThreadSuspension()2139   ALWAYS_INLINE ~ScopedAllowThreadSuspension() ACQUIRE(Roles::uninterruptible_) {
2140     if (kIsDebugBuild) {
2141       CHECK(self_->StartAssertNoThreadSuspension(old_cause_) == nullptr);
2142     } else {
2143       Roles::uninterruptible_.Acquire();  // No-op.
2144     }
2145   }
2146 
2147  private:
2148   Thread* self_;
2149   const char* old_cause_;
2150 };
2151 
2152 
2153 class ScopedStackedShadowFramePusher {
2154  public:
ScopedStackedShadowFramePusher(Thread * self,ShadowFrame * sf,StackedShadowFrameType type)2155   ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf, StackedShadowFrameType type)
2156     : self_(self), type_(type) {
2157     self_->PushStackedShadowFrame(sf, type);
2158   }
~ScopedStackedShadowFramePusher()2159   ~ScopedStackedShadowFramePusher() {
2160     self_->PopStackedShadowFrame(type_);
2161   }
2162 
2163  private:
2164   Thread* const self_;
2165   const StackedShadowFrameType type_;
2166 
2167   DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher);
2168 };
2169 
2170 // Only works for debug builds.
2171 class ScopedDebugDisallowReadBarriers {
2172  public:
ScopedDebugDisallowReadBarriers(Thread * self)2173   explicit ScopedDebugDisallowReadBarriers(Thread* self) : self_(self) {
2174     self_->ModifyDebugDisallowReadBarrier(1);
2175   }
~ScopedDebugDisallowReadBarriers()2176   ~ScopedDebugDisallowReadBarriers() {
2177     self_->ModifyDebugDisallowReadBarrier(-1);
2178   }
2179 
2180  private:
2181   Thread* const self_;
2182 };
2183 
2184 class ScopedTransitioningToRunnable : public ValueObject {
2185  public:
ScopedTransitioningToRunnable(Thread * self)2186   explicit ScopedTransitioningToRunnable(Thread* self)
2187       : self_(self) {
2188     DCHECK_EQ(self, Thread::Current());
2189     if (kUseReadBarrier) {
2190       self_->SetIsTransitioningToRunnable(true);
2191     }
2192   }
2193 
~ScopedTransitioningToRunnable()2194   ~ScopedTransitioningToRunnable() {
2195     if (kUseReadBarrier) {
2196       self_->SetIsTransitioningToRunnable(false);
2197     }
2198   }
2199 
2200  private:
2201   Thread* const self_;
2202 };
2203 
2204 class ThreadLifecycleCallback {
2205  public:
~ThreadLifecycleCallback()2206   virtual ~ThreadLifecycleCallback() {}
2207 
2208   virtual void ThreadStart(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
2209   virtual void ThreadDeath(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
2210 };
2211 
2212 // Store an exception from the thread and suppress it for the duration of this object.
2213 class ScopedExceptionStorage {
2214  public:
2215   explicit ScopedExceptionStorage(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
2216   void SuppressOldException(const char* message = "") REQUIRES_SHARED(Locks::mutator_lock_);
2217   ~ScopedExceptionStorage() REQUIRES_SHARED(Locks::mutator_lock_);
2218 
2219  private:
2220   Thread* self_;
2221   StackHandleScope<1> hs_;
2222   MutableHandle<mirror::Throwable> excp_;
2223 };
2224 
2225 std::ostream& operator<<(std::ostream& os, const Thread& thread);
2226 std::ostream& operator<<(std::ostream& os, StackedShadowFrameType thread);
2227 
2228 }  // namespace art
2229 
2230 #endif  // ART_RUNTIME_THREAD_H_
2231