• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_THREAD_H_
18 #define ART_RUNTIME_THREAD_H_
19 
20 #include <atomic>
21 #include <bitset>
22 #include <deque>
23 #include <iosfwd>
24 #include <list>
25 #include <memory>
26 #include <string>
27 
28 #include "base/atomic.h"
29 #include "base/bit_field.h"
30 #include "base/bit_utils.h"
31 #include "base/enums.h"
32 #include "base/locks.h"
33 #include "base/macros.h"
34 #include "base/safe_map.h"
35 #include "base/value_object.h"
36 #include "entrypoints/jni/jni_entrypoints.h"
37 #include "entrypoints/quick/quick_entrypoints.h"
38 #include "handle.h"
39 #include "handle_scope.h"
40 #include "interpreter/interpreter_cache.h"
41 #include "interpreter/shadow_frame.h"
42 #include "javaheapprof/javaheapsampler.h"
43 #include "jvalue.h"
44 #include "managed_stack.h"
45 #include "offsets.h"
46 #include "read_barrier_config.h"
47 #include "reflective_handle_scope.h"
48 #include "runtime_globals.h"
49 #include "runtime_stats.h"
50 #include "thread_state.h"
51 
52 namespace unwindstack {
53 class AndroidLocalUnwinder;
54 }  // namespace unwindstack
55 
56 namespace art {
57 
58 namespace gc {
59 namespace accounting {
60 template<class T> class AtomicStack;
61 }  // namespace accounting
62 namespace collector {
63 class SemiSpace;
64 }  // namespace collector
65 }  // namespace gc
66 
67 namespace instrumentation {
68 struct InstrumentationStackFrame;
69 }  // namespace instrumentation
70 
71 namespace mirror {
72 class Array;
73 class Class;
74 class ClassLoader;
75 class Object;
76 template<class T> class ObjectArray;
77 template<class T> class PrimitiveArray;
78 using IntArray = PrimitiveArray<int32_t>;
79 class StackTraceElement;
80 class String;
81 class Throwable;
82 }  // namespace mirror
83 
84 namespace verifier {
85 class MethodVerifier;
86 class VerifierDeps;
87 }  // namespace verifier
88 
89 class ArtMethod;
90 class BaseMutex;
91 class ClassLinker;
92 class Closure;
93 class Context;
94 class DeoptimizationContextRecord;
95 class DexFile;
96 class FrameIdToShadowFrame;
97 class IsMarkedVisitor;
98 class JavaVMExt;
99 class JNIEnvExt;
100 class Monitor;
101 class RootVisitor;
102 class ScopedObjectAccessAlreadyRunnable;
103 class ShadowFrame;
104 class StackedShadowFrameRecord;
105 enum class SuspendReason : char;
106 class Thread;
107 class ThreadList;
108 enum VisitRootFlags : uint8_t;
109 
110 // A piece of data that can be held in the CustomTls. The destructor will be called during thread
111 // shutdown. The thread the destructor is called on is not necessarily the same thread it was stored
112 // on.
113 class TLSData {
114  public:
~TLSData()115   virtual ~TLSData() {}
116 };
117 
118 // Thread priorities. These must match the Thread.MIN_PRIORITY,
119 // Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
120 enum ThreadPriority {
121   kMinThreadPriority = 1,
122   kNormThreadPriority = 5,
123   kMaxThreadPriority = 10,
124 };
125 
126 enum class ThreadFlag : uint32_t {
127   // If set, implies that suspend_count_ > 0 and the Thread should enter the safepoint handler.
128   kSuspendRequest = 1u << 0,
129 
130   // Request that the thread do some checkpoint work and then continue.
131   kCheckpointRequest = 1u << 1,
132 
133   // Request that the thread do empty checkpoint and then continue.
134   kEmptyCheckpointRequest = 1u << 2,
135 
136   // Register that at least 1 suspend barrier needs to be passed.
137   kActiveSuspendBarrier = 1u << 3,
138 
139   // Marks that a "flip function" needs to be executed on this thread.
140   kPendingFlipFunction = 1u << 4,
141 
142   // Marks that the "flip function" is being executed by another thread.
143   //
144   // This is used to guards against multiple threads trying to run the
145   // "flip function" for the same thread while the thread is suspended.
146   //
147   // This is not needed when the thread is running the flip function
148   // on its own after transitioning to Runnable.
149   kRunningFlipFunction = 1u << 5,
150 
151   // Marks that a thread is wating for "flip function" to complete.
152   //
153   // This is used to check if we need to broadcast the completion of the
154   // "flip function" to other threads. See also `kRunningFlipFunction`.
155   kWaitingForFlipFunction = 1u << 6,
156 
157   // Request that compiled JNI stubs do not transition to Native or Runnable with
158   // inlined code, but take a slow path for monitoring method entry and exit events.
159   kMonitorJniEntryExit = 1u << 7,
160 
161   // Indicates the last flag. Used for checking that the flags do not overlap thread state.
162   kLastFlag = kMonitorJniEntryExit
163 };
164 
165 enum class StackedShadowFrameType {
166   kShadowFrameUnderConstruction,
167   kDeoptimizationShadowFrame,
168 };
169 
170 // The type of method that triggers deoptimization. It contains info on whether
171 // the deoptimized method should advance dex_pc.
172 enum class DeoptimizationMethodType {
173   kKeepDexPc,  // dex pc is required to be kept upon deoptimization.
174   kDefault     // dex pc may or may not advance depending on other conditions.
175 };
176 
177 // For the CC colector, normal weak reference access can be disabled on a per-thread basis, while
178 // processing references.  After finishing, the reference processor asynchronously sets the
179 // per-thread flags back to kEnabled with release memory ordering semantics. Each mutator thread
180 // should check its flag with acquire semantics before assuming that it is enabled. However,
181 // that is often too expensive, so the reading thread sets it to kVisiblyEnabled after seeing it
182 // kEnabled.  The Reference.get() intrinsic can thus read it in relaxed mode, and reread (by
183 // resorting to the slow path) with acquire semantics if it sees a value of kEnabled rather than
184 // kVisiblyEnabled.
185 enum class WeakRefAccessState : int32_t {
186   kVisiblyEnabled = 0,  // Enabled, and previously read with acquire load by this thread.
187   kEnabled,
188   kDisabled
189 };
190 
191 // This should match RosAlloc::kNumThreadLocalSizeBrackets.
192 static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
193 
194 static constexpr size_t kSharedMethodHotnessThreshold = 0x1fff;
195 
196 // Thread's stack layout for implicit stack overflow checks:
197 //
198 //   +---------------------+  <- highest address of stack memory
199 //   |                     |
200 //   .                     .  <- SP
201 //   |                     |
202 //   |                     |
203 //   +---------------------+  <- stack_end
204 //   |                     |
205 //   |  Gap                |
206 //   |                     |
207 //   +---------------------+  <- stack_begin
208 //   |                     |
209 //   | Protected region    |
210 //   |                     |
211 //   +---------------------+  <- lowest address of stack memory
212 //
213 // The stack always grows down in memory.  At the lowest address is a region of memory
214 // that is set mprotect(PROT_NONE).  Any attempt to read/write to this region will
215 // result in a segmentation fault signal.  At any point, the thread's SP will be somewhere
216 // between the stack_end and the highest address in stack memory.  An implicit stack
217 // overflow check is a read of memory at a certain offset below the current SP (4K typically).
218 // If the thread's SP is below the stack_end address this will be a read into the protected
219 // region.  If the SP is above the stack_end address, the thread is guaranteed to have
220 // at least 4K of space.  Because stack overflow checks are only performed in generated code,
221 // if the thread makes a call out to a native function (through JNI), that native function
222 // might only have 4K of memory (if the SP is adjacent to stack_end).
223 
224 class Thread {
225  public:
226   static const size_t kStackOverflowImplicitCheckSize;
227   static constexpr bool kVerifyStack = kIsDebugBuild;
228 
229   // Creates a new native thread corresponding to the given managed peer.
230   // Used to implement Thread.start.
231   static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
232 
233   // Attaches the calling native thread to the runtime, returning the new native peer.
234   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
235   static Thread* Attach(const char* thread_name,
236                         bool as_daemon,
237                         jobject thread_group,
238                         bool create_peer,
239                         bool should_run_callbacks);
240   // Attaches the calling native thread to the runtime, returning the new native peer.
241   static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_peer);
242 
243   // Reset internal state of child thread after fork.
244   void InitAfterFork();
245 
246   // Get the currently executing thread, frequently referred to as 'self'. This call has reasonably
247   // high cost and so we favor passing self around when possible.
248   // TODO: mark as PURE so the compiler may coalesce and remove?
249   static Thread* Current();
250 
251   // Get the thread from the JNI environment.
252   static Thread* ForEnv(JNIEnv* env);
253 
254   // On a runnable thread, check for pending thread suspension request and handle if pending.
255   void AllowThreadSuspension() REQUIRES_SHARED(Locks::mutator_lock_);
256 
257   // Process pending thread suspension request and handle if pending.
258   void CheckSuspend(bool implicit = false) REQUIRES_SHARED(Locks::mutator_lock_);
259 
260   // Process a pending empty checkpoint if pending.
261   void CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex);
262   void CheckEmptyCheckpointFromMutex();
263 
264   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
265                                    ObjPtr<mirror::Object> thread_peer)
266       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
267       REQUIRES_SHARED(Locks::mutator_lock_);
268   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
269       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
270       REQUIRES_SHARED(Locks::mutator_lock_);
271 
272   // Translates 172 to pAllocArrayFromCode and so on.
273   template<PointerSize size_of_pointers>
274   static void DumpThreadOffset(std::ostream& os, uint32_t offset);
275 
276   // Dumps a one-line summary of thread state (used for operator<<).
277   void ShortDump(std::ostream& os) const;
278 
279   // Order of threads for ANRs (ANRs can be trimmed, so we print important ones first).
280   enum class DumpOrder : uint8_t {
281     kMain,     // Always print the main thread first (there might not be one).
282     kBlocked,  // Then print all threads that are blocked due to waiting on lock.
283     kLocked,   // Then print all threads that are holding some lock already.
284     kDefault,  // Print all other threads which might not be interesting for ANR.
285   };
286 
287   // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
288   DumpOrder Dump(std::ostream& os,
289                  bool dump_native_stack = true,
290                  bool force_dump_stack = false) const
291       REQUIRES_SHARED(Locks::mutator_lock_);
292   DumpOrder Dump(std::ostream& os,
293                  unwindstack::AndroidLocalUnwinder& unwinder,
294                  bool dump_native_stack = true,
295                  bool force_dump_stack = false) const
296       REQUIRES_SHARED(Locks::mutator_lock_);
297 
298   DumpOrder DumpJavaStack(std::ostream& os,
299                           bool check_suspended = true,
300                           bool dump_locks = true) const
301       REQUIRES_SHARED(Locks::mutator_lock_);
302 
303   // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
304   // case we use 'tid' to identify the thread, and we'll include as much information as we can.
305   static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
306       REQUIRES_SHARED(Locks::mutator_lock_);
307 
GetState()308   ThreadState GetState() const {
309     return GetStateAndFlags(std::memory_order_relaxed).GetState();
310   }
311 
312   ThreadState SetState(ThreadState new_state);
313 
GetSuspendCount()314   int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
315     return tls32_.suspend_count;
316   }
317 
GetUserCodeSuspendCount()318   int GetUserCodeSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_,
319                                                Locks::user_code_suspension_lock_) {
320     return tls32_.user_code_suspend_count;
321   }
322 
IsSuspended()323   bool IsSuspended() const {
324     StateAndFlags state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
325     return state_and_flags.GetState() != ThreadState::kRunnable &&
326            state_and_flags.IsFlagSet(ThreadFlag::kSuspendRequest);
327   }
328 
DecrDefineClassCount()329   void DecrDefineClassCount() {
330     tls32_.define_class_counter--;
331   }
332 
IncrDefineClassCount()333   void IncrDefineClassCount() {
334     tls32_.define_class_counter++;
335   }
GetDefineClassCount()336   uint32_t GetDefineClassCount() const {
337     return tls32_.define_class_counter;
338   }
339 
340   // If delta > 0 and (this != self or suspend_barrier is not null), this function may temporarily
341   // release thread_suspend_count_lock_ internally.
342   ALWAYS_INLINE
343   bool ModifySuspendCount(Thread* self,
344                           int delta,
345                           AtomicInteger* suspend_barrier,
346                           SuspendReason reason)
347       WARN_UNUSED
348       REQUIRES(Locks::thread_suspend_count_lock_);
349 
350   // Requests a checkpoint closure to run on another thread. The closure will be run when the
351   // thread notices the request, either in an explicit runtime CheckSuspend() call, or in a call
352   // originating from a compiler generated suspend point check. This returns true if the closure
353   // was added and will (eventually) be executed. It returns false otherwise.
354   //
355   // Since multiple closures can be queued and some closures can delay other threads from running,
356   // no closure should attempt to suspend another thread while running.
357   // TODO We should add some debug option that verifies this.
358   //
359   // This guarantees that the RequestCheckpoint invocation happens-before the function invocation:
360   // RequestCheckpointFunction holds thread_suspend_count_lock_, and RunCheckpointFunction
361   // acquires it.
362   bool RequestCheckpoint(Closure* function)
363       REQUIRES(Locks::thread_suspend_count_lock_);
364 
365   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. This is
366   // due to the fact that Thread::Current() needs to go to sleep to allow the targeted thread to
367   // execute the checkpoint for us if it is Runnable. The suspend_state is the state that the thread
368   // will go into while it is awaiting the checkpoint to be run.
369   // NB Passing ThreadState::kRunnable may cause the current thread to wait in a condition variable
370   // while holding the mutator_lock_.  Callers should ensure that this will not cause any problems
371   // for the closure or the rest of the system.
372   // NB Since multiple closures can be queued and some closures can delay other threads from running
373   // no closure should attempt to suspend another thread while running.
374   bool RequestSynchronousCheckpoint(Closure* function,
375                                     ThreadState suspend_state = ThreadState::kWaiting)
376       REQUIRES_SHARED(Locks::mutator_lock_)
377       RELEASE(Locks::thread_list_lock_)
378       REQUIRES(!Locks::thread_suspend_count_lock_);
379 
380   bool RequestEmptyCheckpoint()
381       REQUIRES(Locks::thread_suspend_count_lock_);
382 
383   // Set the flip function. This is done with all threads suspended, except for the calling thread.
384   void SetFlipFunction(Closure* function);
385 
386   // Ensure that thread flip function started running. If no other thread is executing
387   // it, the calling thread shall run the flip function and then notify other threads
388   // that have tried to do that concurrently. After this function returns, the
389   // `ThreadFlag::kPendingFlipFunction` is cleared but another thread may still
390   // run the flip function as indicated by the `ThreadFlag::kRunningFlipFunction`.
391   void EnsureFlipFunctionStarted(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
392 
393   // Wait for the flip function to complete if still running on another thread.
394   void WaitForFlipFunction(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
395 
GetThreadLocalMarkStack()396   gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() {
397     CHECK(gUseReadBarrier);
398     return tlsPtr_.thread_local_mark_stack;
399   }
SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object> * stack)400   void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) {
401     CHECK(gUseReadBarrier);
402     tlsPtr_.thread_local_mark_stack = stack;
403   }
404 
GetThreadLocalGcBuffer()405   uint8_t* GetThreadLocalGcBuffer() {
406     DCHECK(gUseUserfaultfd);
407     return tlsPtr_.thread_local_gc_buffer;
408   }
SetThreadLocalGcBuffer(uint8_t * buf)409   void SetThreadLocalGcBuffer(uint8_t* buf) {
410     DCHECK(gUseUserfaultfd);
411     tlsPtr_.thread_local_gc_buffer = buf;
412   }
413 
414   // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
415   // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
416   void FullSuspendCheck(bool implicit = false)
417       REQUIRES(!Locks::thread_suspend_count_lock_)
418       REQUIRES_SHARED(Locks::mutator_lock_);
419 
420   // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
421   ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable()
422       REQUIRES(!Locks::thread_suspend_count_lock_)
423       SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
424 
425   // Transition from runnable into a state where mutator privileges are denied. Releases share of
426   // mutator lock.
427   ALWAYS_INLINE void TransitionFromRunnableToSuspended(ThreadState new_state)
428       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
429       UNLOCK_FUNCTION(Locks::mutator_lock_);
430 
431   // Once called thread suspension will cause an assertion failure.
StartAssertNoThreadSuspension(const char * cause)432   const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
433     Roles::uninterruptible_.Acquire();  // No-op.
434     if (kIsDebugBuild) {
435       CHECK(cause != nullptr);
436       const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
437       tls32_.no_thread_suspension++;
438       tlsPtr_.last_no_thread_suspension_cause = cause;
439       return previous_cause;
440     } else {
441       return nullptr;
442     }
443   }
444 
445   // End region where no thread suspension is expected.
EndAssertNoThreadSuspension(const char * old_cause)446   void EndAssertNoThreadSuspension(const char* old_cause) RELEASE(Roles::uninterruptible_) {
447     if (kIsDebugBuild) {
448       CHECK_IMPLIES(old_cause == nullptr, tls32_.no_thread_suspension == 1);
449       CHECK_GT(tls32_.no_thread_suspension, 0U);
450       tls32_.no_thread_suspension--;
451       tlsPtr_.last_no_thread_suspension_cause = old_cause;
452     }
453     Roles::uninterruptible_.Release();  // No-op.
454   }
455 
456   // End region where no thread suspension is expected. Returns the current open region in case we
457   // want to reopen it. Used for ScopedAllowThreadSuspension. Not supported if no_thread_suspension
458   // is larger than one.
EndAssertNoThreadSuspension()459   const char* EndAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) WARN_UNUSED {
460     const char* ret = nullptr;
461     if (kIsDebugBuild) {
462       CHECK_EQ(tls32_.no_thread_suspension, 1u);
463       tls32_.no_thread_suspension--;
464       ret = tlsPtr_.last_no_thread_suspension_cause;
465       tlsPtr_.last_no_thread_suspension_cause = nullptr;
466     }
467     Roles::uninterruptible_.Release();  // No-op.
468     return ret;
469   }
470 
471   void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
472 
473   // Return true if thread suspension is allowable.
474   bool IsThreadSuspensionAllowable() const;
475 
IsDaemon()476   bool IsDaemon() const {
477     return tls32_.daemon;
478   }
479 
480   size_t NumberOfHeldMutexes() const;
481 
482   bool HoldsLock(ObjPtr<mirror::Object> object) const REQUIRES_SHARED(Locks::mutator_lock_);
483 
484   /*
485    * Changes the priority of this thread to match that of the java.lang.Thread object.
486    *
487    * We map a priority value from 1-10 to Linux "nice" values, where lower
488    * numbers indicate higher priority.
489    */
490   void SetNativePriority(int newPriority);
491 
492   /*
493    * Returns the priority of this thread by querying the system.
494    * This is useful when attaching a thread through JNI.
495    *
496    * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
497    */
498   int GetNativePriority() const;
499 
500   // Guaranteed to be non-zero.
GetThreadId()501   uint32_t GetThreadId() const {
502     return tls32_.thin_lock_thread_id;
503   }
504 
GetTid()505   pid_t GetTid() const {
506     return tls32_.tid;
507   }
508 
509   // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
510   ObjPtr<mirror::String> GetThreadName() const REQUIRES_SHARED(Locks::mutator_lock_);
511 
512   // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
513   // allocation, or locking.
514   void GetThreadName(std::string& name) const;
515 
516   // Sets the thread's name.
517   void SetThreadName(const char* name) REQUIRES_SHARED(Locks::mutator_lock_);
518 
519   // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
520   uint64_t GetCpuMicroTime() const;
521 
GetPeer()522   mirror::Object* GetPeer() const REQUIRES_SHARED(Locks::mutator_lock_) {
523     DCHECK(Thread::Current() == this) << "Use GetPeerFromOtherThread instead";
524     CHECK(tlsPtr_.jpeer == nullptr);
525     return tlsPtr_.opeer;
526   }
527   // GetPeer is not safe if called on another thread in the middle of the CC thread flip and
528   // the thread's stack may have not been flipped yet and peer may be a from-space (stale) ref.
529   // This function will explicitly mark/forward it.
530   mirror::Object* GetPeerFromOtherThread() const REQUIRES_SHARED(Locks::mutator_lock_);
531 
HasPeer()532   bool HasPeer() const {
533     return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
534   }
535 
GetStats()536   RuntimeStats* GetStats() {
537     return &tls64_.stats;
538   }
539 
540   bool IsStillStarting() const;
541 
IsExceptionPending()542   bool IsExceptionPending() const {
543     return tlsPtr_.exception != nullptr;
544   }
545 
IsAsyncExceptionPending()546   bool IsAsyncExceptionPending() const {
547     return tlsPtr_.async_exception != nullptr;
548   }
549 
GetException()550   mirror::Throwable* GetException() const REQUIRES_SHARED(Locks::mutator_lock_) {
551     return tlsPtr_.exception;
552   }
553 
554   void AssertPendingException() const;
555   void AssertPendingOOMException() const REQUIRES_SHARED(Locks::mutator_lock_);
556   void AssertNoPendingException() const;
557   void AssertNoPendingExceptionForNewException(const char* msg) const;
558 
559   void SetException(ObjPtr<mirror::Throwable> new_exception) REQUIRES_SHARED(Locks::mutator_lock_);
560 
561   // Set an exception that is asynchronously thrown from a different thread. This will be checked
562   // periodically and might overwrite the current 'Exception'. This can only be called from a
563   // checkpoint.
564   //
565   // The caller should also make sure that the thread has been deoptimized so that the exception
566   // could be detected on back-edges.
567   void SetAsyncException(ObjPtr<mirror::Throwable> new_exception)
568       REQUIRES_SHARED(Locks::mutator_lock_);
569 
ClearException()570   void ClearException() REQUIRES_SHARED(Locks::mutator_lock_) {
571     tlsPtr_.exception = nullptr;
572   }
573 
574   // Move the current async-exception to the main exception. This should be called when the current
575   // thread is ready to deal with any async exceptions. Returns true if there is an async exception
576   // that needs to be dealt with, false otherwise.
577   bool ObserveAsyncException() REQUIRES_SHARED(Locks::mutator_lock_);
578 
579   // Find catch block and perform long jump to appropriate exception handle. When
580   // is_method_exit_exception is true, the exception was thrown by the method exit callback and we
581   // should not send method unwind for the method on top of the stack since method exit callback was
582   // already called.
583   NO_RETURN void QuickDeliverException(bool is_method_exit_exception = false)
584       REQUIRES_SHARED(Locks::mutator_lock_);
585 
586   Context* GetLongJumpContext();
ReleaseLongJumpContext(Context * context)587   void ReleaseLongJumpContext(Context* context) {
588     if (tlsPtr_.long_jump_context != nullptr) {
589       ReleaseLongJumpContextInternal();
590     }
591     tlsPtr_.long_jump_context = context;
592   }
593 
594   // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
595   // abort the runtime iff abort_on_error is true.
596   ArtMethod* GetCurrentMethod(uint32_t* dex_pc,
597                               bool check_suspended = true,
598                               bool abort_on_error = true) const
599       REQUIRES_SHARED(Locks::mutator_lock_);
600 
601   // Returns whether the given exception was thrown by the current Java method being executed
602   // (Note that this includes native Java methods).
603   bool IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const
604       REQUIRES_SHARED(Locks::mutator_lock_);
605 
SetTopOfStack(ArtMethod ** top_method)606   void SetTopOfStack(ArtMethod** top_method) {
607     tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
608   }
609 
SetTopOfStackGenericJniTagged(ArtMethod ** top_method)610   void SetTopOfStackGenericJniTagged(ArtMethod** top_method) {
611     tlsPtr_.managed_stack.SetTopQuickFrameGenericJniTagged(top_method);
612   }
613 
SetTopOfShadowStack(ShadowFrame * top)614   void SetTopOfShadowStack(ShadowFrame* top) {
615     tlsPtr_.managed_stack.SetTopShadowFrame(top);
616   }
617 
HasManagedStack()618   bool HasManagedStack() const {
619     return tlsPtr_.managed_stack.HasTopQuickFrame() || tlsPtr_.managed_stack.HasTopShadowFrame();
620   }
621 
622   // If 'msg' is null, no detail message is set.
623   void ThrowNewException(const char* exception_class_descriptor, const char* msg)
624       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
625 
626   // If 'msg' is null, no detail message is set. An exception must be pending, and will be
627   // used as the new exception's cause.
628   void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
629       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
630 
631   void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
632       __attribute__((format(printf, 3, 4)))
633       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
634 
635   void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
636       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
637 
638   // OutOfMemoryError is special, because we need to pre-allocate an instance.
639   // Only the GC should call this.
640   void ThrowOutOfMemoryError(const char* msg) REQUIRES_SHARED(Locks::mutator_lock_)
641       REQUIRES(!Roles::uninterruptible_);
642 
643   static void Startup();
644   static void FinishStartup();
645   static void Shutdown();
646 
647   // Notify this thread's thread-group that this thread has started.
648   // Note: the given thread-group is used as a fast path and verified in debug build. If the value
649   //       is null, the thread's thread-group is loaded from the peer.
650   void NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject thread_group = nullptr)
651       REQUIRES_SHARED(Locks::mutator_lock_);
652 
653   // JNI methods
GetJniEnv()654   JNIEnvExt* GetJniEnv() const {
655     return tlsPtr_.jni_env;
656   }
657 
658   // Convert a jobject into a Object*
659   ObjPtr<mirror::Object> DecodeJObject(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
660   // Checks if the weak global ref has been cleared by the GC without decoding it.
661   bool IsJWeakCleared(jweak obj) const REQUIRES_SHARED(Locks::mutator_lock_);
662 
GetMonitorEnterObject()663   mirror::Object* GetMonitorEnterObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
664     return tlsPtr_.monitor_enter_object;
665   }
666 
SetMonitorEnterObject(mirror::Object * obj)667   void SetMonitorEnterObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
668     tlsPtr_.monitor_enter_object = obj;
669   }
670 
671   // Implements java.lang.Thread.interrupted.
672   bool Interrupted();
673   // Implements java.lang.Thread.isInterrupted.
674   bool IsInterrupted();
675   void Interrupt(Thread* self) REQUIRES(!wait_mutex_);
SetInterrupted(bool i)676   void SetInterrupted(bool i) {
677     tls32_.interrupted.store(i, std::memory_order_seq_cst);
678   }
679   void Notify() REQUIRES(!wait_mutex_);
680 
PoisonObjectPointers()681   ALWAYS_INLINE void PoisonObjectPointers() {
682     ++poison_object_cookie_;
683   }
684 
685   ALWAYS_INLINE static void PoisonObjectPointersIfDebug();
686 
GetPoisonObjectCookie()687   ALWAYS_INLINE uintptr_t GetPoisonObjectCookie() const {
688     return poison_object_cookie_;
689   }
690 
691   // Parking for 0ns of relative time means an untimed park, negative (though
692   // should be handled in java code) returns immediately
693   void Park(bool is_absolute, int64_t time) REQUIRES_SHARED(Locks::mutator_lock_);
694   void Unpark();
695 
696  private:
697   void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
698 
699  public:
GetWaitMutex()700   Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
701     return wait_mutex_;
702   }
703 
GetWaitConditionVariable()704   ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) {
705     return wait_cond_;
706   }
707 
GetWaitMonitor()708   Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) {
709     return wait_monitor_;
710   }
711 
SetWaitMonitor(Monitor * mon)712   void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) {
713     wait_monitor_ = mon;
714   }
715 
716   // Waiter link-list support.
GetWaitNext()717   Thread* GetWaitNext() const {
718     return tlsPtr_.wait_next;
719   }
720 
SetWaitNext(Thread * next)721   void SetWaitNext(Thread* next) {
722     tlsPtr_.wait_next = next;
723   }
724 
GetClassLoaderOverride()725   jobject GetClassLoaderOverride() {
726     return tlsPtr_.class_loader_override;
727   }
728 
729   void SetClassLoaderOverride(jobject class_loader_override);
730 
731   // Create the internal representation of a stack trace, that is more time
732   // and space efficient to compute than the StackTraceElement[].
733   jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
734       REQUIRES_SHARED(Locks::mutator_lock_);
735 
736   // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
737   // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
738   // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
739   // with the number of valid frames in the returned array.
740   static jobjectArray InternalStackTraceToStackTraceElementArray(
741       const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
742       jobjectArray output_array = nullptr, int* stack_depth = nullptr)
743       REQUIRES_SHARED(Locks::mutator_lock_);
744 
745   static jint InternalStackTraceToStackFrameInfoArray(
746       const ScopedObjectAccessAlreadyRunnable& soa,
747       jlong mode,  // See java.lang.StackStreamFactory for the mode flags
748       jobject internal,
749       jint startLevel,
750       jint batchSize,
751       jint startIndex,
752       jobjectArray output_array)  // java.lang.StackFrameInfo[]
753       REQUIRES_SHARED(Locks::mutator_lock_);
754 
755   jobjectArray CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
756       REQUIRES_SHARED(Locks::mutator_lock_);
757 
HasDebuggerShadowFrames()758   bool HasDebuggerShadowFrames() const {
759     return tlsPtr_.frame_id_to_shadow_frame != nullptr;
760   }
761 
762   // This is done by GC using a checkpoint (or in a stop-the-world pause).
763   void SweepInterpreterCache(IsMarkedVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
764 
765   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
766       REQUIRES_SHARED(Locks::mutator_lock_);
767 
768   void VisitReflectiveTargets(ReflectiveValueVisitor* visitor)
769       REQUIRES(Locks::mutator_lock_);
770 
VerifyStack()771   void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_) {
772     if (kVerifyStack) {
773       VerifyStackImpl();
774     }
775   }
776 
777   //
778   // Offsets of various members of native Thread class, used by compiled code.
779   //
780 
781   template<PointerSize pointer_size>
ThinLockIdOffset()782   static constexpr ThreadOffset<pointer_size> ThinLockIdOffset() {
783     return ThreadOffset<pointer_size>(
784         OFFSETOF_MEMBER(Thread, tls32_) +
785         OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
786   }
787 
788   template<PointerSize pointer_size>
TidOffset()789   static constexpr ThreadOffset<pointer_size> TidOffset() {
790     return ThreadOffset<pointer_size>(
791         OFFSETOF_MEMBER(Thread, tls32_) +
792         OFFSETOF_MEMBER(tls_32bit_sized_values, tid));
793   }
794 
795   template<PointerSize pointer_size>
InterruptedOffset()796   static constexpr ThreadOffset<pointer_size> InterruptedOffset() {
797     return ThreadOffset<pointer_size>(
798         OFFSETOF_MEMBER(Thread, tls32_) +
799         OFFSETOF_MEMBER(tls_32bit_sized_values, interrupted));
800   }
801 
802   template<PointerSize pointer_size>
WeakRefAccessEnabledOffset()803   static constexpr ThreadOffset<pointer_size> WeakRefAccessEnabledOffset() {
804     return ThreadOffset<pointer_size>(
805         OFFSETOF_MEMBER(Thread, tls32_) +
806         OFFSETOF_MEMBER(tls_32bit_sized_values, weak_ref_access_enabled));
807   }
808 
809   template<PointerSize pointer_size>
ThreadFlagsOffset()810   static constexpr ThreadOffset<pointer_size> ThreadFlagsOffset() {
811     return ThreadOffset<pointer_size>(
812         OFFSETOF_MEMBER(Thread, tls32_) +
813         OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
814   }
815 
816   template<PointerSize pointer_size>
IsGcMarkingOffset()817   static constexpr ThreadOffset<pointer_size> IsGcMarkingOffset() {
818     return ThreadOffset<pointer_size>(
819         OFFSETOF_MEMBER(Thread, tls32_) +
820         OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
821   }
822 
823   template <PointerSize pointer_size>
DeoptCheckRequiredOffset()824   static constexpr ThreadOffset<pointer_size> DeoptCheckRequiredOffset() {
825     return ThreadOffset<pointer_size>(
826         OFFSETOF_MEMBER(Thread, tls32_) +
827         OFFSETOF_MEMBER(tls_32bit_sized_values, is_deopt_check_required));
828   }
829 
IsGcMarkingSize()830   static constexpr size_t IsGcMarkingSize() {
831     return sizeof(tls32_.is_gc_marking);
832   }
833 
834   template<PointerSize pointer_size>
SharedMethodHotnessOffset()835   static constexpr ThreadOffset<pointer_size> SharedMethodHotnessOffset() {
836     return ThreadOffset<pointer_size>(
837         OFFSETOF_MEMBER(Thread, tls32_) +
838         OFFSETOF_MEMBER(tls_32bit_sized_values, shared_method_hotness));
839   }
840 
841   // Deoptimize the Java stack.
842   void DeoptimizeWithDeoptimizationException(JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
843 
844  private:
845   template<PointerSize pointer_size>
ThreadOffsetFromTlsPtr(size_t tls_ptr_offset)846   static constexpr ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
847     size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
848     size_t scale = (pointer_size > kRuntimePointerSize) ?
849       static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize) : 1;
850     size_t shrink = (kRuntimePointerSize > pointer_size) ?
851       static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size) : 1;
852     return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
853   }
854 
855  public:
856   template<PointerSize pointer_size>
QuickEntryPointOffset(size_t quick_entrypoint_offset)857   static constexpr ThreadOffset<pointer_size> QuickEntryPointOffset(
858       size_t quick_entrypoint_offset) {
859     return ThreadOffsetFromTlsPtr<pointer_size>(
860         OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
861   }
862 
QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,PointerSize pointer_size)863   static constexpr uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
864                                                           PointerSize pointer_size) {
865     if (pointer_size == PointerSize::k32) {
866       return QuickEntryPointOffset<PointerSize::k32>(quick_entrypoint_offset).
867           Uint32Value();
868     } else {
869       return QuickEntryPointOffset<PointerSize::k64>(quick_entrypoint_offset).
870           Uint32Value();
871     }
872   }
873 
874   template<PointerSize pointer_size>
JniEntryPointOffset(size_t jni_entrypoint_offset)875   static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
876     return ThreadOffsetFromTlsPtr<pointer_size>(
877         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
878   }
879 
880   // Return the entry point offset integer value for ReadBarrierMarkRegX, where X is `reg`.
881   template <PointerSize pointer_size>
ReadBarrierMarkEntryPointsOffset(size_t reg)882   static constexpr int32_t ReadBarrierMarkEntryPointsOffset(size_t reg) {
883     // The entry point list defines 30 ReadBarrierMarkRegX entry points.
884     DCHECK_LT(reg, 30u);
885     // The ReadBarrierMarkRegX entry points are ordered by increasing
886     // register number in Thread::tls_Ptr_.quick_entrypoints.
887     return QUICK_ENTRYPOINT_OFFSET(pointer_size, pReadBarrierMarkReg00).Int32Value()
888         + static_cast<size_t>(pointer_size) * reg;
889   }
890 
891   template<PointerSize pointer_size>
SelfOffset()892   static constexpr ThreadOffset<pointer_size> SelfOffset() {
893     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
894   }
895 
896   template<PointerSize pointer_size>
ExceptionOffset()897   static constexpr ThreadOffset<pointer_size> ExceptionOffset() {
898     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
899   }
900 
901   template<PointerSize pointer_size>
PeerOffset()902   static constexpr ThreadOffset<pointer_size> PeerOffset() {
903     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
904   }
905 
906 
907   template<PointerSize pointer_size>
CardTableOffset()908   static constexpr ThreadOffset<pointer_size> CardTableOffset() {
909     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
910   }
911 
912   template<PointerSize pointer_size>
ThreadSuspendTriggerOffset()913   static constexpr ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
914     return ThreadOffsetFromTlsPtr<pointer_size>(
915         OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
916   }
917 
918   template<PointerSize pointer_size>
ThreadLocalPosOffset()919   static constexpr ThreadOffset<pointer_size> ThreadLocalPosOffset() {
920     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
921                                                                 thread_local_pos));
922   }
923 
924   template<PointerSize pointer_size>
ThreadLocalEndOffset()925   static constexpr ThreadOffset<pointer_size> ThreadLocalEndOffset() {
926     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
927                                                                 thread_local_end));
928   }
929 
930   template<PointerSize pointer_size>
ThreadLocalObjectsOffset()931   static constexpr ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
932     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
933                                                                 thread_local_objects));
934   }
935 
936   template<PointerSize pointer_size>
RosAllocRunsOffset()937   static constexpr ThreadOffset<pointer_size> RosAllocRunsOffset() {
938     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
939                                                                 rosalloc_runs));
940   }
941 
942   template<PointerSize pointer_size>
ThreadLocalAllocStackTopOffset()943   static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
944     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
945                                                                 thread_local_alloc_stack_top));
946   }
947 
948   template<PointerSize pointer_size>
ThreadLocalAllocStackEndOffset()949   static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
950     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
951                                                                 thread_local_alloc_stack_end));
952   }
953 
954   // Size of stack less any space reserved for stack overflow
GetStackSize()955   size_t GetStackSize() const {
956     return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
957   }
958 
959   ALWAYS_INLINE uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const;
960 
GetStackEnd()961   uint8_t* GetStackEnd() const {
962     return tlsPtr_.stack_end;
963   }
964 
965   // Set the stack end to that to be used during a stack overflow
966   void SetStackEndForStackOverflow() REQUIRES_SHARED(Locks::mutator_lock_);
967 
968   // Set the stack end to that to be used during regular execution
969   ALWAYS_INLINE void ResetDefaultStackEnd();
970 
IsHandlingStackOverflow()971   bool IsHandlingStackOverflow() const {
972     return tlsPtr_.stack_end == tlsPtr_.stack_begin;
973   }
974 
975   template<PointerSize pointer_size>
StackEndOffset()976   static constexpr ThreadOffset<pointer_size> StackEndOffset() {
977     return ThreadOffsetFromTlsPtr<pointer_size>(
978         OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
979   }
980 
981   template<PointerSize pointer_size>
JniEnvOffset()982   static constexpr ThreadOffset<pointer_size> JniEnvOffset() {
983     return ThreadOffsetFromTlsPtr<pointer_size>(
984         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
985   }
986 
987   template<PointerSize pointer_size>
TopOfManagedStackOffset()988   static constexpr ThreadOffset<pointer_size> TopOfManagedStackOffset() {
989     return ThreadOffsetFromTlsPtr<pointer_size>(
990         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
991         ManagedStack::TaggedTopQuickFrameOffset());
992   }
993 
GetManagedStack()994   const ManagedStack* GetManagedStack() const {
995     return &tlsPtr_.managed_stack;
996   }
997 
998   // Linked list recording fragments of managed stack.
PushManagedStackFragment(ManagedStack * fragment)999   void PushManagedStackFragment(ManagedStack* fragment) {
1000     tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
1001   }
PopManagedStackFragment(const ManagedStack & fragment)1002   void PopManagedStackFragment(const ManagedStack& fragment) {
1003     tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
1004   }
1005 
1006   ALWAYS_INLINE ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame);
1007   ALWAYS_INLINE ShadowFrame* PopShadowFrame();
1008 
1009   template<PointerSize pointer_size>
TopShadowFrameOffset()1010   static constexpr ThreadOffset<pointer_size> TopShadowFrameOffset() {
1011     return ThreadOffsetFromTlsPtr<pointer_size>(
1012         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
1013         ManagedStack::TopShadowFrameOffset());
1014   }
1015 
1016   // Is the given obj in one of this thread's JNI transition frames?
1017   bool IsJniTransitionReference(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
1018 
1019   // Convert a global (or weak global) jobject into a Object*
1020   ObjPtr<mirror::Object> DecodeGlobalJObject(jobject obj) const
1021       REQUIRES_SHARED(Locks::mutator_lock_);
1022 
1023   void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
1024       REQUIRES_SHARED(Locks::mutator_lock_);
1025 
GetTopHandleScope()1026   BaseHandleScope* GetTopHandleScope() REQUIRES_SHARED(Locks::mutator_lock_) {
1027     return tlsPtr_.top_handle_scope;
1028   }
1029 
PushHandleScope(BaseHandleScope * handle_scope)1030   void PushHandleScope(BaseHandleScope* handle_scope) REQUIRES_SHARED(Locks::mutator_lock_) {
1031     DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope);
1032     tlsPtr_.top_handle_scope = handle_scope;
1033   }
1034 
PopHandleScope()1035   BaseHandleScope* PopHandleScope() REQUIRES_SHARED(Locks::mutator_lock_) {
1036     BaseHandleScope* handle_scope = tlsPtr_.top_handle_scope;
1037     DCHECK(handle_scope != nullptr);
1038     tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
1039     return handle_scope;
1040   }
1041 
1042   template<PointerSize pointer_size>
TopHandleScopeOffset()1043   static constexpr ThreadOffset<pointer_size> TopHandleScopeOffset() {
1044     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1045                                                                 top_handle_scope));
1046   }
1047 
1048   template<PointerSize pointer_size>
MutatorLockOffset()1049   static constexpr ThreadOffset<pointer_size> MutatorLockOffset() {
1050     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1051                                                                 mutator_lock));
1052   }
1053 
1054   template<PointerSize pointer_size>
HeldMutexOffset(LockLevel level)1055   static constexpr ThreadOffset<pointer_size> HeldMutexOffset(LockLevel level) {
1056     DCHECK_LT(enum_cast<size_t>(level), arraysize(tlsPtr_.held_mutexes));
1057     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1058                                                                 held_mutexes[level]));
1059   }
1060 
GetTopReflectiveHandleScope()1061   BaseReflectiveHandleScope* GetTopReflectiveHandleScope() {
1062     return tlsPtr_.top_reflective_handle_scope;
1063   }
1064 
PushReflectiveHandleScope(BaseReflectiveHandleScope * scope)1065   void PushReflectiveHandleScope(BaseReflectiveHandleScope* scope) {
1066     DCHECK_EQ(scope->GetLink(), tlsPtr_.top_reflective_handle_scope);
1067     DCHECK_EQ(scope->GetThread(), this);
1068     tlsPtr_.top_reflective_handle_scope = scope;
1069   }
1070 
PopReflectiveHandleScope()1071   BaseReflectiveHandleScope* PopReflectiveHandleScope() {
1072     BaseReflectiveHandleScope* handle_scope = tlsPtr_.top_reflective_handle_scope;
1073     DCHECK(handle_scope != nullptr);
1074     tlsPtr_.top_reflective_handle_scope = tlsPtr_.top_reflective_handle_scope->GetLink();
1075     return handle_scope;
1076   }
1077 
GetIsGcMarking()1078   bool GetIsGcMarking() const {
1079     DCHECK(gUseReadBarrier);
1080     return tls32_.is_gc_marking;
1081   }
1082 
1083   void SetIsGcMarkingAndUpdateEntrypoints(bool is_marking);
1084 
IsDeoptCheckRequired()1085   bool IsDeoptCheckRequired() const { return tls32_.is_deopt_check_required; }
1086 
SetDeoptCheckRequired(bool flag)1087   void SetDeoptCheckRequired(bool flag) { tls32_.is_deopt_check_required = flag; }
1088 
1089   bool GetWeakRefAccessEnabled() const;  // Only safe for current thread.
1090 
SetWeakRefAccessEnabled(bool enabled)1091   void SetWeakRefAccessEnabled(bool enabled) {
1092     DCHECK(gUseReadBarrier);
1093     WeakRefAccessState new_state = enabled ?
1094         WeakRefAccessState::kEnabled : WeakRefAccessState::kDisabled;
1095     tls32_.weak_ref_access_enabled.store(new_state, std::memory_order_release);
1096   }
1097 
GetDisableThreadFlipCount()1098   uint32_t GetDisableThreadFlipCount() const {
1099     return tls32_.disable_thread_flip_count;
1100   }
1101 
IncrementDisableThreadFlipCount()1102   void IncrementDisableThreadFlipCount() {
1103     ++tls32_.disable_thread_flip_count;
1104   }
1105 
DecrementDisableThreadFlipCount()1106   void DecrementDisableThreadFlipCount() {
1107     DCHECK_GT(tls32_.disable_thread_flip_count, 0U);
1108     --tls32_.disable_thread_flip_count;
1109   }
1110 
1111   // Returns true if the thread is a runtime thread (eg from a ThreadPool).
IsRuntimeThread()1112   bool IsRuntimeThread() const {
1113     return is_runtime_thread_;
1114   }
1115 
SetIsRuntimeThread(bool is_runtime_thread)1116   void SetIsRuntimeThread(bool is_runtime_thread) {
1117     is_runtime_thread_ = is_runtime_thread;
1118   }
1119 
CorePlatformApiCookie()1120   uint32_t CorePlatformApiCookie() {
1121     return core_platform_api_cookie_;
1122   }
1123 
SetCorePlatformApiCookie(uint32_t cookie)1124   void SetCorePlatformApiCookie(uint32_t cookie) {
1125     core_platform_api_cookie_ = cookie;
1126   }
1127 
1128   // Returns true if the thread is allowed to load java classes.
1129   bool CanLoadClasses() const;
1130 
1131   // Returns the fake exception used to activate deoptimization.
GetDeoptimizationException()1132   static mirror::Throwable* GetDeoptimizationException() {
1133     // Note that the mirror::Throwable must be aligned to kObjectAlignment or else it cannot be
1134     // represented by ObjPtr.
1135     return reinterpret_cast<mirror::Throwable*>(0x100);
1136   }
1137 
1138   // Currently deoptimization invokes verifier which can trigger class loading
1139   // and execute Java code, so there might be nested deoptimizations happening.
1140   // We need to save the ongoing deoptimization shadow frames and return
1141   // values on stacks.
1142   // 'from_code' denotes whether the deoptimization was explicitly made from
1143   // compiled code.
1144   // 'method_type' contains info on whether deoptimization should advance
1145   // dex_pc.
1146   void PushDeoptimizationContext(const JValue& return_value,
1147                                  bool is_reference,
1148                                  ObjPtr<mirror::Throwable> exception,
1149                                  bool from_code,
1150                                  DeoptimizationMethodType method_type)
1151       REQUIRES_SHARED(Locks::mutator_lock_);
1152   void PopDeoptimizationContext(JValue* result,
1153                                 ObjPtr<mirror::Throwable>* exception,
1154                                 bool* from_code,
1155                                 DeoptimizationMethodType* method_type)
1156       REQUIRES_SHARED(Locks::mutator_lock_);
1157   void AssertHasDeoptimizationContext()
1158       REQUIRES_SHARED(Locks::mutator_lock_);
1159   void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
1160   ShadowFrame* PopStackedShadowFrame();
1161   ShadowFrame* MaybePopDeoptimizedStackedShadowFrame();
1162 
1163   // For debugger, find the shadow frame that corresponds to a frame id.
1164   // Or return null if there is none.
1165   ShadowFrame* FindDebuggerShadowFrame(size_t frame_id)
1166       REQUIRES_SHARED(Locks::mutator_lock_);
1167   // For debugger, find the bool array that keeps track of the updated vreg set
1168   // for a frame id.
1169   bool* GetUpdatedVRegFlags(size_t frame_id) REQUIRES_SHARED(Locks::mutator_lock_);
1170   // For debugger, find the shadow frame that corresponds to a frame id. If
1171   // one doesn't exist yet, create one and track it in frame_id_to_shadow_frame.
1172   ShadowFrame* FindOrCreateDebuggerShadowFrame(size_t frame_id,
1173                                                uint32_t num_vregs,
1174                                                ArtMethod* method,
1175                                                uint32_t dex_pc)
1176       REQUIRES_SHARED(Locks::mutator_lock_);
1177 
1178   // Delete the entry that maps from frame_id to shadow_frame.
1179   void RemoveDebuggerShadowFrameMapping(size_t frame_id)
1180       REQUIRES_SHARED(Locks::mutator_lock_);
1181 
GetStackTraceSample()1182   std::vector<ArtMethod*>* GetStackTraceSample() const {
1183     DCHECK(!IsAotCompiler());
1184     return tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
1185   }
1186 
SetStackTraceSample(std::vector<ArtMethod * > * sample)1187   void SetStackTraceSample(std::vector<ArtMethod*>* sample) {
1188     DCHECK(!IsAotCompiler());
1189     tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample = sample;
1190   }
1191 
GetVerifierDeps()1192   verifier::VerifierDeps* GetVerifierDeps() const {
1193     DCHECK(IsAotCompiler());
1194     return tlsPtr_.deps_or_stack_trace_sample.verifier_deps;
1195   }
1196 
1197   // It is the responsability of the caller to make sure the verifier_deps
1198   // entry in the thread is cleared before destruction of the actual VerifierDeps
1199   // object, or the thread.
SetVerifierDeps(verifier::VerifierDeps * verifier_deps)1200   void SetVerifierDeps(verifier::VerifierDeps* verifier_deps) {
1201     DCHECK(IsAotCompiler());
1202     DCHECK(verifier_deps == nullptr || tlsPtr_.deps_or_stack_trace_sample.verifier_deps == nullptr);
1203     tlsPtr_.deps_or_stack_trace_sample.verifier_deps = verifier_deps;
1204   }
1205 
GetMethodTraceBuffer()1206   uintptr_t* GetMethodTraceBuffer() { return tlsPtr_.method_trace_buffer; }
1207 
GetMethodTraceIndexPtr()1208   size_t* GetMethodTraceIndexPtr() { return &tlsPtr_.method_trace_buffer_index; }
1209 
SetMethodTraceBuffer(uintptr_t * buffer)1210   uintptr_t* SetMethodTraceBuffer(uintptr_t* buffer) {
1211     return tlsPtr_.method_trace_buffer = buffer;
1212   }
1213 
ResetMethodTraceBuffer()1214   void ResetMethodTraceBuffer() {
1215     if (tlsPtr_.method_trace_buffer != nullptr) {
1216       delete[] tlsPtr_.method_trace_buffer;
1217     }
1218     tlsPtr_.method_trace_buffer = nullptr;
1219     tlsPtr_.method_trace_buffer_index = 0;
1220   }
1221 
GetTraceClockBase()1222   uint64_t GetTraceClockBase() const {
1223     return tls64_.trace_clock_base;
1224   }
1225 
SetTraceClockBase(uint64_t clock_base)1226   void SetTraceClockBase(uint64_t clock_base) {
1227     tls64_.trace_clock_base = clock_base;
1228   }
1229 
GetHeldMutex(LockLevel level)1230   BaseMutex* GetHeldMutex(LockLevel level) const {
1231     return tlsPtr_.held_mutexes[level];
1232   }
1233 
SetHeldMutex(LockLevel level,BaseMutex * mutex)1234   void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
1235     tlsPtr_.held_mutexes[level] = mutex;
1236   }
1237 
1238   void ClearSuspendBarrier(AtomicInteger* target)
1239       REQUIRES(Locks::thread_suspend_count_lock_);
1240 
ReadFlag(ThreadFlag flag)1241   bool ReadFlag(ThreadFlag flag) const {
1242     return GetStateAndFlags(std::memory_order_relaxed).IsFlagSet(flag);
1243   }
1244 
1245   void AtomicSetFlag(ThreadFlag flag, std::memory_order order = std::memory_order_seq_cst) {
1246     tls32_.state_and_flags.fetch_or(enum_cast<uint32_t>(flag), order);
1247   }
1248 
1249   void AtomicClearFlag(ThreadFlag flag, std::memory_order order = std::memory_order_seq_cst) {
1250     tls32_.state_and_flags.fetch_and(~enum_cast<uint32_t>(flag), order);
1251   }
1252 
1253   void ResetQuickAllocEntryPointsForThread();
1254 
1255   // Returns the remaining space in the TLAB.
TlabSize()1256   size_t TlabSize() const {
1257     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_pos;
1258   }
1259 
1260   // Returns pos offset from start.
GetTlabPosOffset()1261   size_t GetTlabPosOffset() const {
1262     return tlsPtr_.thread_local_pos - tlsPtr_.thread_local_start;
1263   }
1264 
1265   // Returns the remaining space in the TLAB if we were to expand it to maximum capacity.
TlabRemainingCapacity()1266   size_t TlabRemainingCapacity() const {
1267     return tlsPtr_.thread_local_limit - tlsPtr_.thread_local_pos;
1268   }
1269 
1270   // Expand the TLAB by a fixed number of bytes. There must be enough capacity to do so.
ExpandTlab(size_t bytes)1271   void ExpandTlab(size_t bytes) {
1272     tlsPtr_.thread_local_end += bytes;
1273     DCHECK_LE(tlsPtr_.thread_local_end, tlsPtr_.thread_local_limit);
1274   }
1275 
1276   // Called from Concurrent mark-compact GC to slide the TLAB pointers backwards
1277   // to adjust to post-compact addresses.
1278   void AdjustTlab(size_t slide_bytes);
1279 
1280   // Doesn't check that there is room.
1281   mirror::Object* AllocTlab(size_t bytes);
1282   void SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit);
1283   bool HasTlab() const;
1284   void ResetTlab();
GetTlabStart()1285   uint8_t* GetTlabStart() {
1286     return tlsPtr_.thread_local_start;
1287   }
GetTlabPos()1288   uint8_t* GetTlabPos() {
1289     return tlsPtr_.thread_local_pos;
1290   }
GetTlabEnd()1291   uint8_t* GetTlabEnd() {
1292     return tlsPtr_.thread_local_end;
1293   }
1294   // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
1295   // equal to a valid pointer.
1296   // TODO: does this need to atomic?  I don't think so.
RemoveSuspendTrigger()1297   void RemoveSuspendTrigger() {
1298     tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
1299   }
1300 
1301   // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
1302   // The next time a suspend check is done, it will load from the value at this address
1303   // and trigger a SIGSEGV.
1304   // Only needed if Runtime::implicit_suspend_checks_ is true and fully implemented.  It currently
1305   // is always false. Client code currently just looks at the thread flags directly to determine
1306   // whether we should suspend, so this call is currently unnecessary.
TriggerSuspend()1307   void TriggerSuspend() {
1308     tlsPtr_.suspend_trigger = nullptr;
1309   }
1310 
1311 
1312   // Push an object onto the allocation stack.
1313   bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
1314       REQUIRES_SHARED(Locks::mutator_lock_);
1315 
1316   // Set the thread local allocation pointers to the given pointers.
1317   void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
1318                                      StackReference<mirror::Object>* end);
1319 
1320   // Resets the thread local allocation pointers.
1321   void RevokeThreadLocalAllocationStack();
1322 
GetThreadLocalBytesAllocated()1323   size_t GetThreadLocalBytesAllocated() const {
1324     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
1325   }
1326 
GetThreadLocalObjectsAllocated()1327   size_t GetThreadLocalObjectsAllocated() const {
1328     return tlsPtr_.thread_local_objects;
1329   }
1330 
GetRosAllocRun(size_t index)1331   void* GetRosAllocRun(size_t index) const {
1332     return tlsPtr_.rosalloc_runs[index];
1333   }
1334 
SetRosAllocRun(size_t index,void * run)1335   void SetRosAllocRun(size_t index, void* run) {
1336     tlsPtr_.rosalloc_runs[index] = run;
1337   }
1338 
1339   bool ProtectStack(bool fatal_on_error = true);
1340   bool UnprotectStack();
1341 
IsTransitioningToRunnable()1342   bool IsTransitioningToRunnable() const {
1343     return tls32_.is_transitioning_to_runnable;
1344   }
1345 
SetIsTransitioningToRunnable(bool value)1346   void SetIsTransitioningToRunnable(bool value) {
1347     tls32_.is_transitioning_to_runnable = value;
1348   }
1349 
DecrementForceInterpreterCount()1350   uint32_t DecrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
1351     return --tls32_.force_interpreter_count;
1352   }
1353 
IncrementForceInterpreterCount()1354   uint32_t IncrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
1355     return ++tls32_.force_interpreter_count;
1356   }
1357 
SetForceInterpreterCount(uint32_t value)1358   void SetForceInterpreterCount(uint32_t value) REQUIRES(Locks::thread_list_lock_) {
1359     tls32_.force_interpreter_count = value;
1360   }
1361 
ForceInterpreterCount()1362   uint32_t ForceInterpreterCount() const {
1363     return tls32_.force_interpreter_count;
1364   }
1365 
IsForceInterpreter()1366   bool IsForceInterpreter() const {
1367     return tls32_.force_interpreter_count != 0;
1368   }
1369 
IncrementMakeVisiblyInitializedCounter()1370   bool IncrementMakeVisiblyInitializedCounter() {
1371     tls32_.make_visibly_initialized_counter += 1u;
1372     DCHECK_LE(tls32_.make_visibly_initialized_counter, kMakeVisiblyInitializedCounterTriggerCount);
1373     if (tls32_.make_visibly_initialized_counter == kMakeVisiblyInitializedCounterTriggerCount) {
1374       tls32_.make_visibly_initialized_counter = 0u;
1375       return true;
1376     }
1377     return false;
1378   }
1379 
1380   void PushVerifier(verifier::MethodVerifier* verifier);
1381   void PopVerifier(verifier::MethodVerifier* verifier);
1382 
1383   void InitStringEntryPoints();
1384 
ModifyDebugDisallowReadBarrier(int8_t delta)1385   void ModifyDebugDisallowReadBarrier(int8_t delta) {
1386     if (kCheckDebugDisallowReadBarrierCount) {
1387       debug_disallow_read_barrier_ += delta;
1388     }
1389   }
1390 
GetDebugDisallowReadBarrierCount()1391   uint8_t GetDebugDisallowReadBarrierCount() const {
1392     return kCheckDebugDisallowReadBarrierCount ? debug_disallow_read_barrier_ : 0u;
1393   }
1394 
1395   // Gets the current TLSData associated with the key or nullptr if there isn't any. Note that users
1396   // do not gain ownership of TLSData and must synchronize with SetCustomTls themselves to prevent
1397   // it from being deleted.
1398   TLSData* GetCustomTLS(const char* key) REQUIRES(!Locks::custom_tls_lock_);
1399 
1400   // Sets the tls entry at 'key' to data. The thread takes ownership of the TLSData. The destructor
1401   // will be run when the thread exits or when SetCustomTLS is called again with the same key.
1402   void SetCustomTLS(const char* key, TLSData* data) REQUIRES(!Locks::custom_tls_lock_);
1403 
1404   // Returns true if the current thread is the jit sensitive thread.
IsJitSensitiveThread()1405   bool IsJitSensitiveThread() const {
1406     return this == jit_sensitive_thread_;
1407   }
1408 
1409   bool IsSystemDaemon() const REQUIRES_SHARED(Locks::mutator_lock_);
1410 
1411   // Returns true if StrictMode events are traced for the current thread.
IsSensitiveThread()1412   static bool IsSensitiveThread() {
1413     if (is_sensitive_thread_hook_ != nullptr) {
1414       return (*is_sensitive_thread_hook_)();
1415     }
1416     return false;
1417   }
1418 
1419   // Set to the read barrier marking entrypoints to be non-null.
1420   void SetReadBarrierEntrypoints();
1421 
1422   ObjPtr<mirror::Object> CreateCompileTimePeer(const char* name,
1423                                                bool as_daemon,
1424                                                jobject thread_group)
1425       REQUIRES_SHARED(Locks::mutator_lock_);
1426 
GetInterpreterCache()1427   ALWAYS_INLINE InterpreterCache* GetInterpreterCache() {
1428     return &interpreter_cache_;
1429   }
1430 
1431   // Clear all thread-local interpreter caches.
1432   //
1433   // Since the caches are keyed by memory pointer to dex instructions, this must be
1434   // called when any dex code is unloaded (before different code gets loaded at the
1435   // same memory location).
1436   //
1437   // If presence of cache entry implies some pre-conditions, this must also be
1438   // called if the pre-conditions might no longer hold true.
1439   static void ClearAllInterpreterCaches();
1440 
1441   template<PointerSize pointer_size>
InterpreterCacheOffset()1442   static constexpr ThreadOffset<pointer_size> InterpreterCacheOffset() {
1443     return ThreadOffset<pointer_size>(OFFSETOF_MEMBER(Thread, interpreter_cache_));
1444   }
1445 
InterpreterCacheSizeLog2()1446   static constexpr int InterpreterCacheSizeLog2() {
1447     return WhichPowerOf2(InterpreterCache::kSize);
1448   }
1449 
AllThreadFlags()1450   static constexpr uint32_t AllThreadFlags() {
1451     return enum_cast<uint32_t>(ThreadFlag::kLastFlag) |
1452            (enum_cast<uint32_t>(ThreadFlag::kLastFlag) - 1u);
1453   }
1454 
SuspendOrCheckpointRequestFlags()1455   static constexpr uint32_t SuspendOrCheckpointRequestFlags() {
1456     return enum_cast<uint32_t>(ThreadFlag::kSuspendRequest) |
1457            enum_cast<uint32_t>(ThreadFlag::kCheckpointRequest) |
1458            enum_cast<uint32_t>(ThreadFlag::kEmptyCheckpointRequest);
1459   }
1460 
FlipFunctionFlags()1461   static constexpr uint32_t FlipFunctionFlags() {
1462     return enum_cast<uint32_t>(ThreadFlag::kPendingFlipFunction) |
1463            enum_cast<uint32_t>(ThreadFlag::kRunningFlipFunction) |
1464            enum_cast<uint32_t>(ThreadFlag::kWaitingForFlipFunction);
1465   }
1466 
StoredThreadStateValue(ThreadState state)1467   static constexpr uint32_t StoredThreadStateValue(ThreadState state) {
1468     return StateAndFlags::EncodeState(state);
1469   }
1470 
ResetSharedMethodHotness()1471   void ResetSharedMethodHotness() {
1472     tls32_.shared_method_hotness = kSharedMethodHotnessThreshold;
1473   }
1474 
GetSharedMethodHotness()1475   uint32_t GetSharedMethodHotness() const {
1476     return tls32_.shared_method_hotness;
1477   }
1478 
DecrementSharedMethodHotness()1479   uint32_t DecrementSharedMethodHotness() {
1480     tls32_.shared_method_hotness = (tls32_.shared_method_hotness - 1) & 0xffff;
1481     return tls32_.shared_method_hotness;
1482   }
1483 
1484  private:
1485   explicit Thread(bool daemon);
1486   ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
1487   void Destroy(bool should_run_callbacks);
1488 
1489   // Deletes and clears the tlsPtr_.jpeer field. Done in a way so that both it and opeer cannot be
1490   // observed to be set at the same time by instrumentation.
1491   void DeleteJPeer(JNIEnv* env);
1492 
1493   // Attaches the calling native thread to the runtime, returning the new native peer.
1494   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
1495   template <typename PeerAction>
1496   static Thread* Attach(const char* thread_name,
1497                         bool as_daemon,
1498                         PeerAction p,
1499                         bool should_run_callbacks);
1500 
1501   void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
1502 
1503   template<bool kTransactionActive>
1504   static void InitPeer(ObjPtr<mirror::Object> peer,
1505                        bool as_daemon,
1506                        ObjPtr<mirror::Object> thread_group,
1507                        ObjPtr<mirror::String> thread_name,
1508                        jint thread_priority)
1509       REQUIRES_SHARED(Locks::mutator_lock_);
1510 
1511   // Avoid use, callers should use SetState.
1512   // Used only by `Thread` destructor and stack trace collection in semi-space GC (currently
1513   // disabled by `kStoreStackTraces = false`).
1514   // NO_THREAD_SAFETY_ANALYSIS: This function is "Unsafe" and can be called in
1515   // different states, so clang cannot perform the thread safety analysis.
SetStateUnsafe(ThreadState new_state)1516   ThreadState SetStateUnsafe(ThreadState new_state) NO_THREAD_SAFETY_ANALYSIS {
1517     StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
1518     ThreadState old_state = old_state_and_flags.GetState();
1519     if (old_state == new_state) {
1520       // Nothing to do.
1521     } else if (old_state == ThreadState::kRunnable) {
1522       // Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in
1523       // case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA
1524       // attempts to TransitionFromSuspendedToRunnable, it results in a deadlock.
1525       TransitionToSuspendedAndRunCheckpoints(new_state);
1526       // Since we transitioned to a suspended state, check the pass barrier requests.
1527       PassActiveSuspendBarriers();
1528     } else {
1529       while (true) {
1530         StateAndFlags new_state_and_flags = old_state_and_flags;
1531         new_state_and_flags.SetState(new_state);
1532         if (LIKELY(tls32_.state_and_flags.CompareAndSetWeakAcquire(
1533                                               old_state_and_flags.GetValue(),
1534                                               new_state_and_flags.GetValue()))) {
1535           break;
1536         }
1537         // Reload state and flags.
1538         old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
1539         DCHECK_EQ(old_state, old_state_and_flags.GetState());
1540       }
1541     }
1542     return old_state;
1543   }
1544 
GetMutatorLock()1545   MutatorMutex* GetMutatorLock() RETURN_CAPABILITY(Locks::mutator_lock_) {
1546     DCHECK_EQ(tlsPtr_.mutator_lock, Locks::mutator_lock_);
1547     return tlsPtr_.mutator_lock;
1548   }
1549 
1550   void VerifyStackImpl() REQUIRES_SHARED(Locks::mutator_lock_);
1551 
1552   void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
1553   DumpOrder DumpStack(std::ostream& os,
1554                       bool dump_native_stack = true,
1555                       bool force_dump_stack = false) const
1556       REQUIRES_SHARED(Locks::mutator_lock_);
1557   DumpOrder DumpStack(std::ostream& os,
1558                       unwindstack::AndroidLocalUnwinder& unwinder,
1559                       bool dump_native_stack = true,
1560                       bool force_dump_stack = false) const
1561       REQUIRES_SHARED(Locks::mutator_lock_);
1562 
1563   // Out-of-line conveniences for debugging in gdb.
1564   static Thread* CurrentFromGdb();  // Like Thread::Current.
1565   // Like Thread::Dump(std::cerr).
1566   void DumpFromGdb() const REQUIRES_SHARED(Locks::mutator_lock_);
1567 
1568   // A wrapper around CreateCallback used when userfaultfd GC is used to
1569   // identify the GC by stacktrace.
1570   static NO_INLINE void* CreateCallbackWithUffdGc(void* arg);
1571   static void* CreateCallback(void* arg);
1572 
1573   void HandleUncaughtExceptions() REQUIRES_SHARED(Locks::mutator_lock_);
1574   void RemoveFromThreadGroup() REQUIRES_SHARED(Locks::mutator_lock_);
1575 
1576   // Initialize a thread.
1577   //
1578   // The third parameter is not mandatory. If given, the thread will use this JNIEnvExt. In case
1579   // Init succeeds, this means the thread takes ownership of it. If Init fails, it is the caller's
1580   // responsibility to destroy the given JNIEnvExt. If the parameter is null, Init will try to
1581   // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value
1582   // of false).
1583   bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr)
1584       REQUIRES(Locks::runtime_shutdown_lock_);
1585   void InitCardTable();
1586   void InitCpu();
1587   void CleanupCpu();
1588   void InitTlsEntryPoints();
1589   void InitTid();
1590   void InitPthreadKeySelf();
1591   bool InitStackHwm();
1592 
1593   void SetUpAlternateSignalStack();
1594   void TearDownAlternateSignalStack();
1595   void MadviseAwayAlternateSignalStack();
1596 
1597   ALWAYS_INLINE void TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)
1598       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
1599       REQUIRES_SHARED(Locks::mutator_lock_);
1600 
1601   ALWAYS_INLINE void PassActiveSuspendBarriers()
1602       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1603 
1604   // Registers the current thread as the jit sensitive thread. Should be called just once.
SetJitSensitiveThread()1605   static void SetJitSensitiveThread() {
1606     if (jit_sensitive_thread_ == nullptr) {
1607       jit_sensitive_thread_ = Thread::Current();
1608     } else {
1609       LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:"
1610           << Thread::Current()->GetTid();
1611     }
1612   }
1613 
SetSensitiveThreadHook(bool (* is_sensitive_thread_hook)())1614   static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) {
1615     is_sensitive_thread_hook_ = is_sensitive_thread_hook;
1616   }
1617 
1618   bool ModifySuspendCountInternal(Thread* self,
1619                                   int delta,
1620                                   AtomicInteger* suspend_barrier,
1621                                   SuspendReason reason)
1622       WARN_UNUSED
1623       REQUIRES(Locks::thread_suspend_count_lock_);
1624 
1625   // Runs a single checkpoint function. If there are no more pending checkpoint functions it will
1626   // clear the kCheckpointRequest flag. The caller is responsible for calling this in a loop until
1627   // the kCheckpointRequest flag is cleared.
1628   void RunCheckpointFunction()
1629       REQUIRES(!Locks::thread_suspend_count_lock_)
1630       REQUIRES_SHARED(Locks::mutator_lock_);
1631   void RunEmptyCheckpoint();
1632 
1633   bool PassActiveSuspendBarriers(Thread* self)
1634       REQUIRES(!Locks::thread_suspend_count_lock_);
1635 
1636   // Install the protected region for implicit stack checks.
1637   void InstallImplicitProtection();
1638 
1639   template <bool kPrecise>
1640   void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
1641 
1642   static bool IsAotCompiler();
1643 
1644   void ReleaseLongJumpContextInternal();
1645 
1646   void SetCachedThreadName(const char* name);
1647 
1648   // Helper class for manipulating the 32 bits of atomically changed state and flags.
1649   class StateAndFlags {
1650    public:
StateAndFlags(uint32_t value)1651     explicit StateAndFlags(uint32_t value) :value_(value) {}
1652 
GetValue()1653     uint32_t GetValue() const {
1654       return value_;
1655     }
1656 
SetValue(uint32_t value)1657     void SetValue(uint32_t value) {
1658       value_ = value;
1659     }
1660 
IsAnyOfFlagsSet(uint32_t flags)1661     bool IsAnyOfFlagsSet(uint32_t flags) const {
1662       DCHECK_EQ(flags & ~AllThreadFlags(), 0u);
1663       return (value_ & flags) != 0u;
1664     }
1665 
IsFlagSet(ThreadFlag flag)1666     bool IsFlagSet(ThreadFlag flag) const {
1667       return (value_ & enum_cast<uint32_t>(flag)) != 0u;
1668     }
1669 
SetFlag(ThreadFlag flag)1670     void SetFlag(ThreadFlag flag) {
1671       value_ |= enum_cast<uint32_t>(flag);
1672     }
1673 
WithFlag(ThreadFlag flag)1674     StateAndFlags WithFlag(ThreadFlag flag) const {
1675       StateAndFlags result = *this;
1676       result.SetFlag(flag);
1677       return result;
1678     }
1679 
WithoutFlag(ThreadFlag flag)1680     StateAndFlags WithoutFlag(ThreadFlag flag) const {
1681       StateAndFlags result = *this;
1682       result.ClearFlag(flag);
1683       return result;
1684     }
1685 
ClearFlag(ThreadFlag flag)1686     void ClearFlag(ThreadFlag flag) {
1687       value_ &= ~enum_cast<uint32_t>(flag);
1688     }
1689 
GetState()1690     ThreadState GetState() const {
1691       ThreadState state = ThreadStateField::Decode(value_);
1692       ValidateThreadState(state);
1693       return state;
1694     }
1695 
SetState(ThreadState state)1696     void SetState(ThreadState state) {
1697       ValidateThreadState(state);
1698       value_ = ThreadStateField::Update(state, value_);
1699     }
1700 
WithState(ThreadState state)1701     StateAndFlags WithState(ThreadState state) const {
1702       StateAndFlags result = *this;
1703       result.SetState(state);
1704       return result;
1705     }
1706 
EncodeState(ThreadState state)1707     static constexpr uint32_t EncodeState(ThreadState state) {
1708       ValidateThreadState(state);
1709       return ThreadStateField::Encode(state);
1710     }
1711 
1712    private:
ValidateThreadState(ThreadState state)1713     static constexpr void ValidateThreadState(ThreadState state) {
1714       if (kIsDebugBuild && state != ThreadState::kRunnable) {
1715         CHECK_GE(state, ThreadState::kTerminated);
1716         CHECK_LE(state, ThreadState::kSuspended);
1717         CHECK_NE(state, ThreadState::kObsoleteRunnable);
1718       }
1719     }
1720 
1721     // The value holds thread flags and thread state.
1722     uint32_t value_;
1723 
1724     static constexpr size_t kThreadStateBitSize = BitSizeOf<std::underlying_type_t<ThreadState>>();
1725     static constexpr size_t kThreadStatePosition = BitSizeOf<uint32_t>() - kThreadStateBitSize;
1726     using ThreadStateField = BitField<ThreadState, kThreadStatePosition, kThreadStateBitSize>;
1727     static_assert(
1728         WhichPowerOf2(enum_cast<uint32_t>(ThreadFlag::kLastFlag)) < kThreadStatePosition);
1729   };
1730   static_assert(sizeof(StateAndFlags) == sizeof(uint32_t), "Unexpected StateAndFlags size");
1731 
GetStateAndFlags(std::memory_order order)1732   StateAndFlags GetStateAndFlags(std::memory_order order) const {
1733     return StateAndFlags(tls32_.state_and_flags.load(order));
1734   }
1735 
1736   // Format state and flags as a hex string. For diagnostic output.
1737   std::string StateAndFlagsAsHexString() const;
1738 
1739   // Run the flip function and, if requested, notify other threads that may have tried
1740   // to do that concurrently.
1741   void RunFlipFunction(Thread* self, bool notify) REQUIRES_SHARED(Locks::mutator_lock_);
1742 
1743   static void ThreadExitCallback(void* arg);
1744 
1745   // Maximum number of suspend barriers.
1746   static constexpr uint32_t kMaxSuspendBarriers = 3;
1747 
1748   // Has Thread::Startup been called?
1749   static bool is_started_;
1750 
1751   // TLS key used to retrieve the Thread*.
1752   static pthread_key_t pthread_key_self_;
1753 
1754   // Used to notify threads that they should attempt to resume, they will suspend again if
1755   // their suspend count is > 0.
1756   static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1757 
1758   // Hook passed by framework which returns true
1759   // when StrictMode events are traced for the current thread.
1760   static bool (*is_sensitive_thread_hook_)();
1761   // Stores the jit sensitive thread (which for now is the UI thread).
1762   static Thread* jit_sensitive_thread_;
1763 
1764   static constexpr uint32_t kMakeVisiblyInitializedCounterTriggerCount = 128;
1765 
1766   /***********************************************************************************************/
1767   // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
1768   // pointer size differences. To encourage shorter encoding, more frequently used values appear
1769   // first if possible.
1770   /***********************************************************************************************/
1771 
1772   struct PACKED(4) tls_32bit_sized_values {
1773     // We have no control over the size of 'bool', but want our boolean fields
1774     // to be 4-byte quantities.
1775     using bool32_t = uint32_t;
1776 
tls_32bit_sized_valuestls_32bit_sized_values1777     explicit tls_32bit_sized_values(bool is_daemon)
1778         : state_and_flags(0u),
1779           suspend_count(0),
1780           thin_lock_thread_id(0),
1781           tid(0),
1782           daemon(is_daemon),
1783           throwing_OutOfMemoryError(false),
1784           no_thread_suspension(0),
1785           thread_exit_check_count(0),
1786           is_transitioning_to_runnable(false),
1787           is_gc_marking(false),
1788           is_deopt_check_required(false),
1789           weak_ref_access_enabled(WeakRefAccessState::kVisiblyEnabled),
1790           disable_thread_flip_count(0),
1791           user_code_suspend_count(0),
1792           force_interpreter_count(0),
1793           make_visibly_initialized_counter(0),
1794           define_class_counter(0),
1795           num_name_readers(0),
1796           shared_method_hotness(kSharedMethodHotnessThreshold)
1797         {}
1798 
1799     // The state and flags field must be changed atomically so that flag values aren't lost.
1800     // See `StateAndFlags` for bit assignments of `ThreadFlag` and `ThreadState` values.
1801     // Keeping the state and flags together allows an atomic CAS to change from being
1802     // Suspended to Runnable without a suspend request occurring.
1803     Atomic<uint32_t> state_and_flags;
1804     static_assert(sizeof(state_and_flags) == sizeof(uint32_t),
1805                   "Size of state_and_flags and uint32 are different");
1806 
1807     // A non-zero value is used to tell the current thread to enter a safe point
1808     // at the next poll.
1809     int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1810 
1811     // Thin lock thread id. This is a small integer used by the thin lock implementation.
1812     // This is not to be confused with the native thread's tid, nor is it the value returned
1813     // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
1814     // important difference between this id and the ids visible to managed code is that these
1815     // ones get reused (to ensure that they fit in the number of bits available).
1816     uint32_t thin_lock_thread_id;
1817 
1818     // System thread id.
1819     uint32_t tid;
1820 
1821     // Is the thread a daemon?
1822     const bool32_t daemon;
1823 
1824     // A boolean telling us whether we're recursively throwing OOME.
1825     bool32_t throwing_OutOfMemoryError;
1826 
1827     // A positive value implies we're in a region where thread suspension isn't expected.
1828     uint32_t no_thread_suspension;
1829 
1830     // How many times has our pthread key's destructor been called?
1831     uint32_t thread_exit_check_count;
1832 
1833     // True if the thread is in TransitionFromSuspendedToRunnable(). This is used to distinguish the
1834     // non-runnable threads (eg. kNative, kWaiting) that are about to transition to runnable from
1835     // the rest of them.
1836     bool32_t is_transitioning_to_runnable;
1837 
1838     // True if the GC is in the marking phase. This is used for the CC collector only. This is
1839     // thread local so that we can simplify the logic to check for the fast path of read barriers of
1840     // GC roots.
1841     bool32_t is_gc_marking;
1842 
1843     // True if we need to check for deoptimization when returning from the runtime functions. This
1844     // is required only when a class is redefined to prevent executing code that has field offsets
1845     // embedded. For non-debuggable apps redefinition is not allowed and this flag should always be
1846     // set to false.
1847     bool32_t is_deopt_check_required;
1848 
1849     // Thread "interrupted" status; stays raised until queried or thrown.
1850     Atomic<bool32_t> interrupted;
1851 
1852     AtomicInteger park_state_;
1853 
1854     // Determines whether the thread is allowed to directly access a weak ref
1855     // (Reference::GetReferent() and system weaks) and to potentially mark an object alive/gray.
1856     // This is used for concurrent reference processing of the CC collector only. This is thread
1857     // local so that we can enable/disable weak ref access by using a checkpoint and avoid a race
1858     // around the time weak ref access gets disabled and concurrent reference processing begins
1859     // (if weak ref access is disabled during a pause, this is not an issue.) Other collectors use
1860     // Runtime::DisallowNewSystemWeaks() and ReferenceProcessor::EnableSlowPath().  Can be
1861     // concurrently accessed by GetReferent() and set (by iterating over threads).
1862     // Can be changed from kEnabled to kVisiblyEnabled by readers. No other concurrent access is
1863     // possible when that happens.
1864     mutable std::atomic<WeakRefAccessState> weak_ref_access_enabled;
1865 
1866     // A thread local version of Heap::disable_thread_flip_count_. This keeps track of how many
1867     // levels of (nested) JNI critical sections the thread is in and is used to detect a nested JNI
1868     // critical section enter.
1869     uint32_t disable_thread_flip_count;
1870 
1871     // How much of 'suspend_count_' is by request of user code, used to distinguish threads
1872     // suspended by the runtime from those suspended by user code.
1873     // This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be
1874     // told that AssertHeld should be good enough.
1875     int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1876 
1877     // Count of how many times this thread has been forced to interpreter. If this is not 0 the
1878     // thread must remain in interpreted code as much as possible.
1879     uint32_t force_interpreter_count;
1880 
1881     // Counter for calls to initialize a class that's initialized but not visibly initialized.
1882     // When this reaches kMakeVisiblyInitializedCounterTriggerCount, we call the runtime to
1883     // make initialized classes visibly initialized. This is needed because we usually make
1884     // classes visibly initialized in batches but we do not want to be stuck with a class
1885     // initialized but not visibly initialized for a long time even if no more classes are
1886     // being initialized anymore.
1887     uint32_t make_visibly_initialized_counter;
1888 
1889     // Counter for how many nested define-classes are ongoing in this thread. Used to allow waiting
1890     // for threads to be done with class-definition work.
1891     uint32_t define_class_counter;
1892 
1893     // A count of the number of readers of tlsPtr_.name that may still be looking at a string they
1894     // retrieved.
1895     mutable std::atomic<uint32_t> num_name_readers;
1896     static_assert(std::atomic<uint32_t>::is_always_lock_free);
1897 
1898     // Thread-local hotness counter for shared memory methods. Initialized with
1899     // `kSharedMethodHotnessThreshold`. The interpreter decrements it and goes
1900     // into the runtime when hitting zero. Note that all previous decrements
1901     // could have been executed by another method than the one seeing zero.
1902     // There is a second level counter in `Jit::shared_method_counters_` to make
1903     // sure we at least have a few samples before compiling a method.
1904     uint32_t shared_method_hotness;
1905   } tls32_;
1906 
1907   struct PACKED(8) tls_64bit_sized_values {
tls_64bit_sized_valuestls_64bit_sized_values1908     tls_64bit_sized_values() : trace_clock_base(0) {
1909     }
1910 
1911     // The clock base used for tracing.
1912     uint64_t trace_clock_base;
1913 
1914     RuntimeStats stats;
1915   } tls64_;
1916 
PACKED(sizeof (void *))1917   struct PACKED(sizeof(void*)) tls_ptr_sized_values {
1918       tls_ptr_sized_values() : card_table(nullptr),
1919                                exception(nullptr),
1920                                stack_end(nullptr),
1921                                managed_stack(),
1922                                suspend_trigger(nullptr),
1923                                jni_env(nullptr),
1924                                tmp_jni_env(nullptr),
1925                                self(nullptr),
1926                                opeer(nullptr),
1927                                jpeer(nullptr),
1928                                stack_begin(nullptr),
1929                                stack_size(0),
1930                                deps_or_stack_trace_sample(),
1931                                wait_next(nullptr),
1932                                monitor_enter_object(nullptr),
1933                                top_handle_scope(nullptr),
1934                                class_loader_override(nullptr),
1935                                long_jump_context(nullptr),
1936                                stacked_shadow_frame_record(nullptr),
1937                                deoptimization_context_stack(nullptr),
1938                                frame_id_to_shadow_frame(nullptr),
1939                                name(nullptr),
1940                                pthread_self(0),
1941                                last_no_thread_suspension_cause(nullptr),
1942                                thread_local_start(nullptr),
1943                                thread_local_pos(nullptr),
1944                                thread_local_end(nullptr),
1945                                thread_local_limit(nullptr),
1946                                thread_local_objects(0),
1947                                checkpoint_function(nullptr),
1948                                thread_local_alloc_stack_top(nullptr),
1949                                thread_local_alloc_stack_end(nullptr),
1950                                mutator_lock(nullptr),
1951                                flip_function(nullptr),
1952                                method_verifier(nullptr),
1953                                thread_local_mark_stack(nullptr),
1954                                async_exception(nullptr),
1955                                top_reflective_handle_scope(nullptr),
1956                                method_trace_buffer(nullptr),
1957                                method_trace_buffer_index(0) {
1958       std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
1959     }
1960 
1961     // The biased card table, see CardTable for details.
1962     uint8_t* card_table;
1963 
1964     // The pending exception or null.
1965     mirror::Throwable* exception;
1966 
1967     // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
1968     // We leave extra space so there's room for the code that throws StackOverflowError.
1969     uint8_t* stack_end;
1970 
1971     // The top of the managed stack often manipulated directly by compiler generated code.
1972     ManagedStack managed_stack;
1973 
1974     // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check.  It is
1975     // normally set to the address of itself.
1976     uintptr_t* suspend_trigger;
1977 
1978     // Every thread may have an associated JNI environment
1979     JNIEnvExt* jni_env;
1980 
1981     // Temporary storage to transfer a pre-allocated JNIEnvExt from the creating thread to the
1982     // created thread.
1983     JNIEnvExt* tmp_jni_env;
1984 
1985     // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
1986     // is easy but getting the address of Thread::Current is hard. This field can be read off of
1987     // Thread::Current to give the address.
1988     Thread* self;
1989 
1990     // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
1991     // start up, until the thread is registered and the local opeer_ is used.
1992     mirror::Object* opeer;
1993     jobject jpeer;
1994 
1995     // The "lowest addressable byte" of the stack.
1996     uint8_t* stack_begin;
1997 
1998     // Size of the stack.
1999     size_t stack_size;
2000 
2001     // Sampling profiler and AOT verification cannot happen on the same run, so we share
2002     // the same entry for the stack trace and the verifier deps.
2003     union DepsOrStackTraceSample {
2004       DepsOrStackTraceSample() {
2005         verifier_deps = nullptr;
2006         stack_trace_sample = nullptr;
2007       }
2008       // Pointer to previous stack trace captured by sampling profiler.
2009       std::vector<ArtMethod*>* stack_trace_sample;
2010       // When doing AOT verification, per-thread VerifierDeps.
2011       verifier::VerifierDeps* verifier_deps;
2012     } deps_or_stack_trace_sample;
2013 
2014     // The next thread in the wait set this thread is part of or null if not waiting.
2015     Thread* wait_next;
2016 
2017     // If we're blocked in MonitorEnter, this is the object we're trying to lock.
2018     mirror::Object* monitor_enter_object;
2019 
2020     // Top of linked list of handle scopes or null for none.
2021     BaseHandleScope* top_handle_scope;
2022 
2023     // Needed to get the right ClassLoader in JNI_OnLoad, but also
2024     // useful for testing.
2025     jobject class_loader_override;
2026 
2027     // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
2028     Context* long_jump_context;
2029 
2030     // For gc purpose, a shadow frame record stack that keeps track of:
2031     // 1) shadow frames under construction.
2032     // 2) deoptimization shadow frames.
2033     StackedShadowFrameRecord* stacked_shadow_frame_record;
2034 
2035     // Deoptimization return value record stack.
2036     DeoptimizationContextRecord* deoptimization_context_stack;
2037 
2038     // For debugger, a linked list that keeps the mapping from frame_id to shadow frame.
2039     // Shadow frames may be created before deoptimization happens so that the debugger can
2040     // set local values there first.
2041     FrameIdToShadowFrame* frame_id_to_shadow_frame;
2042 
2043     // A cached copy of the java.lang.Thread's (modified UTF-8) name.
2044     // If this is not null or kThreadNameDuringStartup, then it owns the malloc memory holding
2045     // the string. Updated in an RCU-like manner.
2046     std::atomic<const char*> name;
2047     static_assert(std::atomic<const char*>::is_always_lock_free);
2048 
2049     // A cached pthread_t for the pthread underlying this Thread*.
2050     pthread_t pthread_self;
2051 
2052     // If no_thread_suspension_ is > 0, what is causing that assertion.
2053     const char* last_no_thread_suspension_cause;
2054 
2055     // Pending barriers that require passing or NULL if non-pending. Installation guarding by
2056     // Locks::thread_suspend_count_lock_.
2057     // They work effectively as art::Barrier, but implemented directly using AtomicInteger and futex
2058     // to avoid additional cost of a mutex and a condition variable, as used in art::Barrier.
2059     AtomicInteger* active_suspend_barriers[kMaxSuspendBarriers];
2060 
2061     // Thread-local allocation pointer. Moved here to force alignment for thread_local_pos on ARM.
2062     uint8_t* thread_local_start;
2063 
2064     // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
2065     // potentially better performance.
2066     uint8_t* thread_local_pos;
2067     uint8_t* thread_local_end;
2068 
2069     // Thread local limit is how much we can expand the thread local buffer to, it is greater or
2070     // equal to thread_local_end.
2071     uint8_t* thread_local_limit;
2072 
2073     size_t thread_local_objects;
2074 
2075     // Pending checkpoint function or null if non-pending. If this checkpoint is set and someone\
2076     // requests another checkpoint, it goes to the checkpoint overflow list.
2077     Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_);
2078 
2079     // Entrypoint function pointers.
2080     // TODO: move this to more of a global offset table model to avoid per-thread duplication.
2081     JniEntryPoints jni_entrypoints;
2082     QuickEntryPoints quick_entrypoints;
2083 
2084     // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
2085     void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread];
2086 
2087     // Thread-local allocation stack data/routines.
2088     StackReference<mirror::Object>* thread_local_alloc_stack_top;
2089     StackReference<mirror::Object>* thread_local_alloc_stack_end;
2090 
2091     // Pointer to the mutator lock.
2092     // This is the same as `Locks::mutator_lock_` but cached for faster state transitions.
2093     MutatorMutex* mutator_lock;
2094 
2095     // Support for Mutex lock hierarchy bug detection.
2096     BaseMutex* held_mutexes[kLockLevelCount];
2097 
2098     // The function used for thread flip.
2099     Closure* flip_function;
2100 
2101     // Current method verifier, used for root marking.
2102     verifier::MethodVerifier* method_verifier;
2103 
2104     union {
2105       // Thread-local mark stack for the concurrent copying collector.
2106       gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack;
2107       // Thread-local page-sized buffer for userfaultfd GC.
2108       uint8_t* thread_local_gc_buffer;
2109     };
2110 
2111     // The pending async-exception or null.
2112     mirror::Throwable* async_exception;
2113 
2114     // Top of the linked-list for reflective-handle scopes or null if none.
2115     BaseReflectiveHandleScope* top_reflective_handle_scope;
2116 
2117     // Pointer to a thread-local buffer for method tracing.
2118     uintptr_t* method_trace_buffer;
2119 
2120     // The index of the next free entry in method_trace_buffer.
2121     size_t method_trace_buffer_index;
2122   } tlsPtr_;
2123 
2124   // Small thread-local cache to be used from the interpreter.
2125   // It is keyed by dex instruction pointer.
2126   // The value is opcode-depended (e.g. field offset).
2127   InterpreterCache interpreter_cache_;
2128 
2129   // All fields below this line should not be accessed by native code. This means these fields can
2130   // be modified, rearranged, added or removed without having to modify asm_support.h
2131 
2132   // Guards the 'wait_monitor_' members.
2133   Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
2134 
2135   // Condition variable waited upon during a wait.
2136   ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
2137   // Pointer to the monitor lock we're currently waiting on or null if not waiting.
2138   Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
2139 
2140   // Debug disable read barrier count, only is checked for debug builds and only in the runtime.
2141   uint8_t debug_disallow_read_barrier_ = 0;
2142 
2143   // Note that it is not in the packed struct, may not be accessed for cross compilation.
2144   uintptr_t poison_object_cookie_ = 0;
2145 
2146   // Pending extra checkpoints if checkpoint_function_ is already used.
2147   std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_);
2148 
2149   // Custom TLS field that can be used by plugins or the runtime. Should not be accessed directly by
2150   // compiled code or entrypoints.
2151   SafeMap<std::string, std::unique_ptr<TLSData>, std::less<>> custom_tls_
2152       GUARDED_BY(Locks::custom_tls_lock_);
2153 
2154 #if !defined(__BIONIC__)
2155 #if !defined(ANDROID_HOST_MUSL)
2156     __attribute__((tls_model("initial-exec")))
2157 #endif
2158   static thread_local Thread* self_tls_;
2159 #endif
2160 
2161   // True if the thread is some form of runtime thread (ex, GC or JIT).
2162   bool is_runtime_thread_;
2163 
2164   // Set during execution of JNI methods that get field and method id's as part of determining if
2165   // the caller is allowed to access all fields and methods in the Core Platform API.
2166   uint32_t core_platform_api_cookie_ = 0;
2167 
2168   friend class gc::collector::SemiSpace;  // For getting stack traces.
2169   friend class Runtime;  // For CreatePeer.
2170   friend class QuickExceptionHandler;  // For dumping the stack.
2171   friend class ScopedThreadStateChange;
2172   friend class StubTest;  // For accessing entrypoints.
2173   friend class ThreadList;  // For ~Thread and Destroy.
2174 
2175   friend class EntrypointsOrderTest;  // To test the order of tls entries.
2176   friend class JniCompilerTest;  // For intercepting JNI entrypoint calls.
2177 
2178   DISALLOW_COPY_AND_ASSIGN(Thread);
2179 };
2180 
2181 class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension {
2182  public:
2183   ALWAYS_INLINE ScopedAssertNoThreadSuspension(const char* cause,
2184                                                bool enabled = true)
ACQUIRE(Roles::uninterruptible_)2185       ACQUIRE(Roles::uninterruptible_)
2186       : enabled_(enabled) {
2187     if (!enabled_) {
2188       return;
2189     }
2190     if (kIsDebugBuild) {
2191       self_ = Thread::Current();
2192       old_cause_ = self_->StartAssertNoThreadSuspension(cause);
2193     } else {
2194       Roles::uninterruptible_.Acquire();  // No-op.
2195     }
2196   }
~ScopedAssertNoThreadSuspension()2197   ALWAYS_INLINE ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) {
2198     if (!enabled_) {
2199       return;
2200     }
2201     if (kIsDebugBuild) {
2202       self_->EndAssertNoThreadSuspension(old_cause_);
2203     } else {
2204       Roles::uninterruptible_.Release();  // No-op.
2205     }
2206   }
2207 
2208  private:
2209   Thread* self_;
2210   const bool enabled_;
2211   const char* old_cause_;
2212 };
2213 
2214 class ScopedAllowThreadSuspension {
2215  public:
ScopedAllowThreadSuspension()2216   ALWAYS_INLINE ScopedAllowThreadSuspension() RELEASE(Roles::uninterruptible_) {
2217     if (kIsDebugBuild) {
2218       self_ = Thread::Current();
2219       old_cause_ = self_->EndAssertNoThreadSuspension();
2220     } else {
2221       Roles::uninterruptible_.Release();  // No-op.
2222     }
2223   }
~ScopedAllowThreadSuspension()2224   ALWAYS_INLINE ~ScopedAllowThreadSuspension() ACQUIRE(Roles::uninterruptible_) {
2225     if (kIsDebugBuild) {
2226       CHECK(self_->StartAssertNoThreadSuspension(old_cause_) == nullptr);
2227     } else {
2228       Roles::uninterruptible_.Acquire();  // No-op.
2229     }
2230   }
2231 
2232  private:
2233   Thread* self_;
2234   const char* old_cause_;
2235 };
2236 
2237 
2238 class ScopedStackedShadowFramePusher {
2239  public:
ScopedStackedShadowFramePusher(Thread * self,ShadowFrame * sf)2240   ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf) : self_(self), sf_(sf) {
2241     DCHECK_EQ(sf->GetLink(), nullptr);
2242     self_->PushStackedShadowFrame(sf, StackedShadowFrameType::kShadowFrameUnderConstruction);
2243   }
~ScopedStackedShadowFramePusher()2244   ~ScopedStackedShadowFramePusher() {
2245     ShadowFrame* sf = self_->PopStackedShadowFrame();
2246     DCHECK_EQ(sf, sf_);
2247   }
2248 
2249  private:
2250   Thread* const self_;
2251   ShadowFrame* const sf_;
2252 
2253   DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher);
2254 };
2255 
2256 // Only works for debug builds.
2257 class ScopedDebugDisallowReadBarriers {
2258  public:
ScopedDebugDisallowReadBarriers(Thread * self)2259   explicit ScopedDebugDisallowReadBarriers(Thread* self) : self_(self) {
2260     self_->ModifyDebugDisallowReadBarrier(1);
2261   }
~ScopedDebugDisallowReadBarriers()2262   ~ScopedDebugDisallowReadBarriers() {
2263     self_->ModifyDebugDisallowReadBarrier(-1);
2264   }
2265 
2266  private:
2267   Thread* const self_;
2268 };
2269 
2270 class ScopedTransitioningToRunnable : public ValueObject {
2271  public:
ScopedTransitioningToRunnable(Thread * self)2272   explicit ScopedTransitioningToRunnable(Thread* self)
2273       : self_(self) {
2274     DCHECK_EQ(self, Thread::Current());
2275     self_->SetIsTransitioningToRunnable(true);
2276   }
2277 
~ScopedTransitioningToRunnable()2278   ~ScopedTransitioningToRunnable() { self_->SetIsTransitioningToRunnable(false); }
2279 
2280  private:
2281   Thread* const self_;
2282 };
2283 
2284 class ThreadLifecycleCallback {
2285  public:
~ThreadLifecycleCallback()2286   virtual ~ThreadLifecycleCallback() {}
2287 
2288   virtual void ThreadStart(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
2289   virtual void ThreadDeath(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
2290 };
2291 
2292 // Store an exception from the thread and suppress it for the duration of this object.
2293 class ScopedExceptionStorage {
2294  public:
2295   explicit ScopedExceptionStorage(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
2296   void SuppressOldException(const char* message = "") REQUIRES_SHARED(Locks::mutator_lock_);
2297   ~ScopedExceptionStorage() REQUIRES_SHARED(Locks::mutator_lock_);
2298 
2299  private:
2300   Thread* self_;
2301   StackHandleScope<1> hs_;
2302   MutableHandle<mirror::Throwable> excp_;
2303 };
2304 
2305 std::ostream& operator<<(std::ostream& os, const Thread& thread);
2306 std::ostream& operator<<(std::ostream& os, StackedShadowFrameType thread);
2307 
2308 }  // namespace art
2309 
2310 #endif  // ART_RUNTIME_THREAD_H_
2311