• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_THREAD_H_
18 #define ART_RUNTIME_THREAD_H_
19 
20 #include <atomic>
21 #include <bitset>
22 #include <deque>
23 #include <iosfwd>
24 #include <list>
25 #include <memory>
26 #include <string>
27 
28 #include "base/atomic.h"
29 #include "base/bit_field.h"
30 #include "base/bit_utils.h"
31 #include "base/locks.h"
32 #include "base/macros.h"
33 #include "base/pointer_size.h"
34 #include "base/safe_map.h"
35 #include "base/value_object.h"
36 #include "entrypoints/jni/jni_entrypoints.h"
37 #include "entrypoints/quick/quick_entrypoints.h"
38 #include "handle.h"
39 #include "handle_scope.h"
40 #include "interpreter/interpreter_cache.h"
41 #include "interpreter/shadow_frame.h"
42 #include "javaheapprof/javaheapsampler.h"
43 #include "jvalue.h"
44 #include "managed_stack.h"
45 #include "offsets.h"
46 #include "read_barrier_config.h"
47 #include "reflective_handle_scope.h"
48 #include "runtime_globals.h"
49 #include "runtime_stats.h"
50 #include "suspend_reason.h"
51 #include "thread_state.h"
52 
53 namespace unwindstack {
54 class AndroidLocalUnwinder;
55 }  // namespace unwindstack
56 
57 namespace art HIDDEN {
58 
59 namespace gc {
60 namespace accounting {
61 template<class T> class AtomicStack;
62 }  // namespace accounting
63 namespace collector {
64 class SemiSpace;
65 }  // namespace collector
66 }  // namespace gc
67 
68 namespace instrumentation {
69 struct InstrumentationStackFrame;
70 }  // namespace instrumentation
71 
72 namespace mirror {
73 class Array;
74 class Class;
75 class ClassLoader;
76 class Object;
77 template<class T> class ObjectArray;
78 template<class T> class PrimitiveArray;
79 using IntArray = PrimitiveArray<int32_t>;
80 class StackTraceElement;
81 class String;
82 class Throwable;
83 }  // namespace mirror
84 
85 namespace verifier {
86 class VerifierDeps;
87 }  // namespace verifier
88 
89 class ArtMethod;
90 class BaseMutex;
91 class ClassLinker;
92 class Closure;
93 class Context;
94 class DeoptimizationContextRecord;
95 class DexFile;
96 class FrameIdToShadowFrame;
97 class IsMarkedVisitor;
98 class JavaVMExt;
99 class JNIEnvExt;
100 class Monitor;
101 class RootVisitor;
102 class ScopedObjectAccessAlreadyRunnable;
103 class ShadowFrame;
104 class StackedShadowFrameRecord;
105 class Thread;
106 class ThreadList;
107 enum VisitRootFlags : uint8_t;
108 
109 // A piece of data that can be held in the CustomTls. The destructor will be called during thread
110 // shutdown. The thread the destructor is called on is not necessarily the same thread it was stored
111 // on.
112 class TLSData {
113  public:
~TLSData()114   virtual ~TLSData() {}
115 };
116 
117 // Thread priorities. These must match the Thread.MIN_PRIORITY,
118 // Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
119 enum ThreadPriority {
120   kMinThreadPriority = 1,
121   kNormThreadPriority = 5,
122   kMaxThreadPriority = 10,
123 };
124 
125 enum class ThreadFlag : uint32_t {
126   // If set, implies that suspend_count_ > 0 and the Thread should enter the safepoint handler.
127   kSuspendRequest = 1u << 0,
128 
129   // Request that the thread do some checkpoint work and then continue.
130   kCheckpointRequest = 1u << 1,
131 
132   // Request that the thread do empty checkpoint and then continue.
133   kEmptyCheckpointRequest = 1u << 2,
134 
135   // Register that at least 1 suspend barrier needs to be passed.
136   // Changes to this flag are guarded by suspend_count_lock_ .
137   kActiveSuspendBarrier = 1u << 3,
138 
139   // Marks that a "flip function" needs to be executed on this thread.
140   // Set only while holding thread_list_lock_.
141   kPendingFlipFunction = 1u << 4,
142 
143   // Marks that the "flip function" is being executed by another thread.
144   //
145   // This is used to guard against multiple threads trying to run the
146   // "flip function" for the same thread while the thread is suspended.
147   //
148   // Set when we have some way to ensure that the thread cannot disappear out from under us,
149   // Either:
150   //   1) Set by the thread itself,
151   //   2) by a thread holding thread_list_lock_, or
152   //   3) while the target has a pending suspension request.
153   // Once set, prevents a thread from exiting.
154   kRunningFlipFunction = 1u << 5,
155 
156   // We are responsible for resuming all other threads. We ignore suspension requests,
157   // but not checkpoint requests, until a more opportune time. GC code should
158   // in any case not check for such requests; other clients of SuspendAll might.
159   // Prevents a situation in which we are asked to suspend just before we suspend all
160   // other threads, and then notice the suspension request and suspend ourselves,
161   // leading to deadlock. Guarded by suspend_count_lock_ .
162   // Should not ever be set when we try to transition to kRunnable.
163   // TODO(b/296639267): Generalize use to prevent SuspendAll from blocking
164   // in-progress GC.
165   kSuspensionImmune = 1u << 6,
166 
167   // Request that compiled JNI stubs do not transition to Native or Runnable with
168   // inlined code, but take a slow path for monitoring method entry and exit events.
169   kMonitorJniEntryExit = 1u << 7,
170 
171   // Indicates the last flag. Used for checking that the flags do not overlap thread state.
172   kLastFlag = kMonitorJniEntryExit
173 };
174 
175 enum class StackedShadowFrameType {
176   kShadowFrameUnderConstruction,
177   kDeoptimizationShadowFrame,
178 };
179 
180 // The type of method that triggers deoptimization. It contains info on whether
181 // the deoptimized method should advance dex_pc.
182 enum class DeoptimizationMethodType {
183   kKeepDexPc,  // dex pc is required to be kept upon deoptimization.
184   kDefault     // dex pc may or may not advance depending on other conditions.
185 };
186 
187 // For the CC colector, normal weak reference access can be disabled on a per-thread basis, while
188 // processing references.  After finishing, the reference processor asynchronously sets the
189 // per-thread flags back to kEnabled with release memory ordering semantics. Each mutator thread
190 // should check its flag with acquire semantics before assuming that it is enabled. However,
191 // that is often too expensive, so the reading thread sets it to kVisiblyEnabled after seeing it
192 // kEnabled.  The Reference.get() intrinsic can thus read it in relaxed mode, and reread (by
193 // resorting to the slow path) with acquire semantics if it sees a value of kEnabled rather than
194 // kVisiblyEnabled.
195 enum class WeakRefAccessState : int32_t {
196   kVisiblyEnabled = 0,  // Enabled, and previously read with acquire load by this thread.
197   kEnabled,
198   kDisabled
199 };
200 
201 // See Thread.tlsPtr_.active_suspend1_barriers below for explanation.
202 struct WrappedSuspend1Barrier {
203   // TODO(b/23668816): At least weaken CHECKs to DCHECKs once the bug is fixed.
204   static constexpr int kMagic = 0xba8;
WrappedSuspend1BarrierWrappedSuspend1Barrier205   WrappedSuspend1Barrier() : magic_(kMagic), barrier_(1), next_(nullptr) {}
206   int magic_;
207   AtomicInteger barrier_;
208   struct WrappedSuspend1Barrier* next_ GUARDED_BY(Locks::thread_suspend_count_lock_);
209 };
210 
211 // Mostly opaque structure allocated by the client of NotifyOnThreadExit.  Allows a client to
212 // check whether the thread still exists after temporarily releasing thread_list_lock_, usually
213 // because we need to wait for something.
214 class ThreadExitFlag {
215  public:
ThreadExitFlag()216   ThreadExitFlag() : exited_(false) {}
HasExited()217   bool HasExited() REQUIRES(Locks::thread_list_lock_) { return exited_; }
218 
219  private:
220   // All ThreadExitFlags associated with a thread and with exited_ == false are in a doubly linked
221   // list.  tlsPtr_.thread_exit_flags points to the first element.  first.prev_ and last.next_ are
222   // null. This list contains no ThreadExitFlags with exited_ == true;
223   ThreadExitFlag* next_ GUARDED_BY(Locks::thread_list_lock_);
224   ThreadExitFlag* prev_ GUARDED_BY(Locks::thread_list_lock_);
225   bool exited_ GUARDED_BY(Locks::thread_list_lock_);
226   friend class Thread;
227 };
228 
229 // This should match RosAlloc::kNumThreadLocalSizeBrackets.
230 static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
231 
232 static constexpr size_t kSharedMethodHotnessThreshold = 0x1fff;
233 
234 // Thread's stack layout for implicit stack overflow checks:
235 //
236 //   +---------------------+  <- highest address of stack memory
237 //   |                     |
238 //   .                     .  <- SP
239 //   |                     |
240 //   |                     |
241 //   +---------------------+  <- stack_end
242 //   |                     |
243 //   |  Gap                |
244 //   |                     |
245 //   +---------------------+  <- stack_begin
246 //   |                     |
247 //   | Protected region    |
248 //   |                     |
249 //   +---------------------+  <- lowest address of stack memory
250 //
251 // The stack always grows down in memory.  At the lowest address is a region of memory
252 // that is set mprotect(PROT_NONE).  Any attempt to read/write to this region will
253 // result in a segmentation fault signal.  At any point, the thread's SP will be somewhere
254 // between the stack_end and the highest address in stack memory.  An implicit stack
255 // overflow check is a read of memory at a certain offset below the current SP (8K typically).
256 // If the thread's SP is below the stack_end address this will be a read into the protected
257 // region.  If the SP is above the stack_end address, the thread is guaranteed to have
258 // at least 8K of space.  Because stack overflow checks are only performed in generated code,
259 // if the thread makes a call out to a native function (through JNI), that native function
260 // might only have 4K of memory (if the SP is adjacent to stack_end).
261 
262 class EXPORT Thread {
263  public:
264   static const size_t kStackOverflowImplicitCheckSize;
265   static constexpr bool kVerifyStack = kIsDebugBuild;
266 
267   // Creates a new native thread corresponding to the given managed peer.
268   // Used to implement Thread.start.
269   static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
270 
271   // Attaches the calling native thread to the runtime, returning the new native peer.
272   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
273   static Thread* Attach(const char* thread_name,
274                         bool as_daemon,
275                         jobject thread_group,
276                         bool create_peer,
277                         bool should_run_callbacks);
278   // Attaches the calling native thread to the runtime, returning the new native peer.
279   static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_peer);
280 
281   // Reset internal state of child thread after fork.
282   void InitAfterFork();
283 
284   // Get the currently executing thread, frequently referred to as 'self'. This call has reasonably
285   // high cost and so we favor passing self around when possible.
286   // TODO: mark as PURE so the compiler may coalesce and remove?
287   static Thread* Current();
288 
289   // Get the thread from the JNI environment.
290   static Thread* ForEnv(JNIEnv* env);
291 
292   // For implicit overflow checks we reserve an extra piece of memory at the bottom of the stack
293   // (lowest memory). The higher portion of the memory is protected against reads and the lower is
294   // available for use while throwing the StackOverflow exception.
295   ALWAYS_INLINE static size_t GetStackOverflowProtectedSize();
296 
297   // On a runnable thread, check for pending thread suspension request and handle if pending.
298   void AllowThreadSuspension() REQUIRES_SHARED(Locks::mutator_lock_);
299 
300   // Process pending thread suspension request and handle if pending.
301   void CheckSuspend(bool implicit = false) REQUIRES_SHARED(Locks::mutator_lock_);
302 
303   // Process a pending empty checkpoint if pending.
304   void CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex);
305   void CheckEmptyCheckpointFromMutex();
306 
307   static Thread* FromManagedThread(Thread* self, ObjPtr<mirror::Object> thread_peer)
308       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
309       REQUIRES_SHARED(Locks::mutator_lock_);
310   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
311       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
312       REQUIRES_SHARED(Locks::mutator_lock_);
313 
314   // Translates 172 to pAllocArrayFromCode and so on.
315   template<PointerSize size_of_pointers>
316   static void DumpThreadOffset(std::ostream& os, uint32_t offset);
317 
318   // Dumps a one-line summary of thread state (used for operator<<).
319   void ShortDump(std::ostream& os) const;
320 
321   // Order of threads for ANRs (ANRs can be trimmed, so we print important ones first).
322   enum class DumpOrder : uint8_t {
323     kMain,     // Always print the main thread first (there might not be one).
324     kBlocked,  // Then print all threads that are blocked due to waiting on lock.
325     kLocked,   // Then print all threads that are holding some lock already.
326     kDefault,  // Print all other threads which might not be interesting for ANR.
327   };
328 
329   // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
330   DumpOrder Dump(std::ostream& os,
331                  bool dump_native_stack = true,
332                  bool force_dump_stack = false) const
333       REQUIRES_SHARED(Locks::mutator_lock_);
334   DumpOrder Dump(std::ostream& os,
335                  unwindstack::AndroidLocalUnwinder& unwinder,
336                  bool dump_native_stack = true,
337                  bool force_dump_stack = false) const
338       REQUIRES_SHARED(Locks::mutator_lock_);
339 
340   DumpOrder DumpJavaStack(std::ostream& os,
341                           bool check_suspended = true,
342                           bool dump_locks = true) const
343       REQUIRES_SHARED(Locks::mutator_lock_);
344 
345   // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
346   // case we use 'tid' to identify the thread, and we'll include as much information as we can.
347   static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
348       REQUIRES_SHARED(Locks::mutator_lock_);
349 
GetState()350   ThreadState GetState() const {
351     return GetStateAndFlags(std::memory_order_relaxed).GetState();
352   }
353 
354   ThreadState SetState(ThreadState new_state);
355 
GetSuspendCount()356   int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
357     return tls32_.suspend_count;
358   }
359 
GetUserCodeSuspendCount()360   int GetUserCodeSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_,
361                                                Locks::user_code_suspension_lock_) {
362     return tls32_.user_code_suspend_count;
363   }
364 
IsSuspended()365   bool IsSuspended() const {
366     // We need to ensure that once we return true, all prior accesses to the Java data by "this"
367     // thread are complete. Hence we need "acquire" ordering here, and "release" when the flags
368     // are set.
369     StateAndFlags state_and_flags = GetStateAndFlags(std::memory_order_acquire);
370     return state_and_flags.GetState() != ThreadState::kRunnable &&
371            state_and_flags.IsFlagSet(ThreadFlag::kSuspendRequest);
372   }
373 
DecrDefineClassCount()374   void DecrDefineClassCount() {
375     tls32_.define_class_counter--;
376   }
377 
IncrDefineClassCount()378   void IncrDefineClassCount() {
379     tls32_.define_class_counter++;
380   }
GetDefineClassCount()381   uint32_t GetDefineClassCount() const {
382     return tls32_.define_class_counter;
383   }
384 
385   // Increment suspend count and optionally install at most one suspend barrier.
386   // Must hold thread_list_lock, OR be called with self == this, so that the Thread cannot
387   // disappear while we're running. If it's known that this == self, and thread_list_lock_
388   // is not held, FakeMutexLock should be used to fake-acquire thread_list_lock_ for
389   // static checking purposes.
390   ALWAYS_INLINE
391   void IncrementSuspendCount(Thread* self,
392                              AtomicInteger* suspendall_barrier,
393                              WrappedSuspend1Barrier* suspend1_barrier,
394                              SuspendReason reason) REQUIRES(Locks::thread_suspend_count_lock_)
395       REQUIRES(Locks::thread_list_lock_);
396 
397   // The same, but default reason to kInternal, and barriers to nullptr.
398   ALWAYS_INLINE void IncrementSuspendCount(Thread* self) REQUIRES(Locks::thread_suspend_count_lock_)
399       REQUIRES(Locks::thread_list_lock_);
400 
401   // Follows one of the above calls. For_user_code indicates if SuspendReason was kForUserCode.
402   // Generally will need to be closely followed by Thread::resume_cond_->Broadcast(self);
403   // since there may be waiters. DecrementSuspendCount() itself does not do this, since we often
404   // wake more than a single thread.
405   ALWAYS_INLINE void DecrementSuspendCount(Thread* self, bool for_user_code = false)
406       REQUIRES(Locks::thread_suspend_count_lock_);
407 
408  private:
409   NO_RETURN static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread);
410 
411  public:
412   // Requests a checkpoint closure to run on another thread. The closure will be run when the
413   // thread notices the request, either in an explicit runtime CheckSuspend() call, or in a call
414   // originating from a compiler generated suspend point check. This returns true if the closure
415   // was added and will (eventually) be executed. It returns false if this was impossible
416   // because the thread was suspended, and we thus did nothing.
417   //
418   // Since multiple closures can be queued and some closures can delay other threads from running,
419   // no closure should attempt to suspend another thread while running.
420   // TODO We should add some debug option that verifies this.
421   //
422   // This guarantees that the RequestCheckpoint invocation happens-before the function invocation:
423   // RequestCheckpointFunction holds thread_suspend_count_lock_, and RunCheckpointFunction
424   // acquires it.
425   bool RequestCheckpoint(Closure* function)
426       REQUIRES(Locks::thread_suspend_count_lock_);
427 
428   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. This is
429   // due to the fact that Thread::Current() needs to go to sleep to allow the targeted thread to
430   // execute the checkpoint for us if it is Runnable. The wait_state is the state that the thread
431   // will go into while it is awaiting the checkpoint to be run.
432   // The closure may be run on Thread::Current() on behalf of "this" thread.
433   // Thus for lock ordering purposes, the closure should be runnable by the caller. This also
434   // sometimes makes it reasonable to pass ThreadState::kRunnable as wait_state: We may wait on
435   // a condition variable for the "this" thread to act, but for lock ordering purposes, this is
436   // exactly as though Thread::Current() had run the closure.
437   // NB Since multiple closures can be queued and some closures can delay other threads from running
438   // no closure should attempt to suspend another thread while running.
439   bool RequestSynchronousCheckpoint(Closure* function,
440                                     ThreadState wait_state = ThreadState::kWaiting)
441       REQUIRES_SHARED(Locks::mutator_lock_) RELEASE(Locks::thread_list_lock_)
442           REQUIRES(!Locks::thread_suspend_count_lock_);
443 
444   bool RequestEmptyCheckpoint()
445       REQUIRES(Locks::thread_suspend_count_lock_);
446 
GetFlipFunction()447   Closure* GetFlipFunction() { return tlsPtr_.flip_function.load(std::memory_order_relaxed); }
448 
449   // Set the flip function. This is done with all threads suspended, except for the calling thread.
450   void SetFlipFunction(Closure* function) REQUIRES(Locks::thread_suspend_count_lock_)
451       REQUIRES(Locks::thread_list_lock_);
452 
453   // Wait for the flip function to complete if still running on another thread. Assumes the "this"
454   // thread remains live.
455   void WaitForFlipFunction(Thread* self) const REQUIRES(!Locks::thread_suspend_count_lock_);
456 
457   // An enhanced version of the above that uses tef to safely return if the thread exited in the
458   // meantime.
459   void WaitForFlipFunctionTestingExited(Thread* self, ThreadExitFlag* tef)
460       REQUIRES(!Locks::thread_suspend_count_lock_, !Locks::thread_list_lock_);
461 
GetThreadLocalMarkStack()462   gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() {
463     CHECK(gUseReadBarrier);
464     return tlsPtr_.thread_local_mark_stack;
465   }
SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object> * stack)466   void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) {
467     CHECK(gUseReadBarrier);
468     tlsPtr_.thread_local_mark_stack = stack;
469   }
470 
GetThreadLocalGcBuffer()471   uint8_t* GetThreadLocalGcBuffer() {
472     DCHECK(gUseUserfaultfd);
473     return tlsPtr_.thread_local_gc_buffer;
474   }
SetThreadLocalGcBuffer(uint8_t * buf)475   void SetThreadLocalGcBuffer(uint8_t* buf) {
476     DCHECK(gUseUserfaultfd);
477     tlsPtr_.thread_local_gc_buffer = buf;
478   }
479 
480   // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
481   // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
482   // Should be called only when the kSuspensionImmune flag is clear. Requires this == Current();
483   void FullSuspendCheck(bool implicit = false)
484       REQUIRES(!Locks::thread_suspend_count_lock_)
485       REQUIRES_SHARED(Locks::mutator_lock_);
486 
487   // Transition from non-runnable to runnable state acquiring share on mutator_lock_. Returns the
488   // old state, or kInvalidState if we failed because allow_failure and kSuspensionImmune were set.
489   // Should not be called with an argument except by the next function below.
490   ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable(bool fail_on_suspend_req = false)
491       REQUIRES(!Locks::thread_suspend_count_lock_) SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
492 
493   // A version that does not return the old ThreadState, and fails by returning false if it would
494   // have needed to handle a pending suspension request.
TryTransitionFromSuspendedToRunnable()495   ALWAYS_INLINE bool TryTransitionFromSuspendedToRunnable()
496       REQUIRES(!Locks::thread_suspend_count_lock_)
497       SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS {
498     // The above function does not really acquire the lock when we pass true and it returns
499     // kInvalidState. We lie in both places, but clients see correct behavior.
500     return TransitionFromSuspendedToRunnable(true) != ThreadState::kInvalidState;
501   }
502 
503   // Transition from runnable into a state where mutator privileges are denied. Releases share of
504   // mutator lock.
505   ALWAYS_INLINE void TransitionFromRunnableToSuspended(ThreadState new_state)
506       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
507       UNLOCK_FUNCTION(Locks::mutator_lock_);
508 
509   // Once called thread suspension will cause an assertion failure.
StartAssertNoThreadSuspension(const char * cause)510   const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
511     Roles::uninterruptible_.Acquire();  // No-op.
512     if (kIsDebugBuild) {
513       CHECK(cause != nullptr);
514       const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
515       tls32_.no_thread_suspension++;
516       tlsPtr_.last_no_thread_suspension_cause = cause;
517       return previous_cause;
518     } else {
519       return nullptr;
520     }
521   }
522 
523   // End region where no thread suspension is expected.
EndAssertNoThreadSuspension(const char * old_cause)524   void EndAssertNoThreadSuspension(const char* old_cause) RELEASE(Roles::uninterruptible_) {
525     if (kIsDebugBuild) {
526       CHECK_IMPLIES(old_cause == nullptr, tls32_.no_thread_suspension == 1);
527       CHECK_GT(tls32_.no_thread_suspension, 0U);
528       tls32_.no_thread_suspension--;
529       tlsPtr_.last_no_thread_suspension_cause = old_cause;
530     }
531     Roles::uninterruptible_.Release();  // No-op.
532   }
533 
534   // End region where no thread suspension is expected. Returns the current open region in case we
535   // want to reopen it. Used for ScopedAllowThreadSuspension. Not supported if no_thread_suspension
536   // is larger than one.
EndAssertNoThreadSuspension()537   const char* EndAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) WARN_UNUSED {
538     const char* ret = nullptr;
539     if (kIsDebugBuild) {
540       CHECK_EQ(tls32_.no_thread_suspension, 1u);
541       tls32_.no_thread_suspension--;
542       ret = tlsPtr_.last_no_thread_suspension_cause;
543       tlsPtr_.last_no_thread_suspension_cause = nullptr;
544     }
545     Roles::uninterruptible_.Release();  // No-op.
546     return ret;
547   }
548 
549   void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
550 
551   // Return true if thread suspension is allowable.
552   bool IsThreadSuspensionAllowable() const;
553 
IsDaemon()554   bool IsDaemon() const {
555     return tls32_.daemon;
556   }
557 
558   size_t NumberOfHeldMutexes() const;
559 
560   bool HoldsLock(ObjPtr<mirror::Object> object) const REQUIRES_SHARED(Locks::mutator_lock_);
561 
562   /*
563    * Changes the priority of this thread to match that of the java.lang.Thread object.
564    *
565    * We map a priority value from 1-10 to Linux "nice" values, where lower
566    * numbers indicate higher priority.
567    */
568   void SetNativePriority(int newPriority);
569 
570   /*
571    * Returns the priority of this thread by querying the system.
572    * This is useful when attaching a thread through JNI.
573    *
574    * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
575    */
576   int GetNativePriority() const;
577 
578   // Guaranteed to be non-zero.
GetThreadId()579   uint32_t GetThreadId() const {
580     return tls32_.thin_lock_thread_id;
581   }
582 
GetTid()583   pid_t GetTid() const {
584     return tls32_.tid;
585   }
586 
587   // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
588   ObjPtr<mirror::String> GetThreadName() const REQUIRES_SHARED(Locks::mutator_lock_);
589 
590   // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
591   // allocation, or locking.
592   void GetThreadName(std::string& name) const;
593 
594   // Sets the thread's name.
595   void SetThreadName(const char* name) REQUIRES_SHARED(Locks::mutator_lock_);
596 
597   // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
598   uint64_t GetCpuMicroTime() const;
599 
GetPeer()600   mirror::Object* GetPeer() const REQUIRES_SHARED(Locks::mutator_lock_) {
601     DCHECK(Thread::Current() == this) << "Use GetPeerFromOtherThread instead";
602     CHECK(tlsPtr_.jpeer == nullptr);
603     return tlsPtr_.opeer;
604   }
605   // GetPeer is not safe if called on another thread in the middle of the thread flip and
606   // the thread's stack may have not been flipped yet and peer may be a from-space (stale) ref.
607   // This function will force a flip for the other thread if necessary.
608   // Since we hold a shared mutator lock, a new flip function cannot be concurrently installed.
609   // The target thread must be suspended, so that it cannot disappear during the call.
610   // We should ideally not hold thread_list_lock_ . GetReferenceKind in ti_heap.cc, currently does
611   // hold it, but in a context in which we do not invoke EnsureFlipFunctionStarted().
612   mirror::Object* GetPeerFromOtherThread() REQUIRES_SHARED(Locks::mutator_lock_);
613 
614   // A version of the above that requires thread_list_lock_, but does not require the thread to
615   // be suspended. This may temporarily release thread_list_lock_. It thus needs a ThreadExitFlag
616   // describing the thread's status, so we can tell if it exited in the interim. Returns null if
617   // the thread exited.
618   mirror::Object* LockedGetPeerFromOtherThread(ThreadExitFlag* tef)
619       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::thread_list_lock_);
620 
621   // A convenience version of the above that creates the ThreadExitFlag locally. This is often
622   // unsafe if more than one thread is being processed. A prior call may have released
623   // thread_list_lock_, and thus the NotifyOnThreadExit() call here could see a deallocated
624   // Thread. We must hold the thread_list_lock continuously between obtaining the Thread*
625   // and calling NotifyOnThreadExit().
LockedGetPeerFromOtherThread()626   mirror::Object* LockedGetPeerFromOtherThread() REQUIRES_SHARED(Locks::mutator_lock_)
627       REQUIRES(Locks::thread_list_lock_) {
628     ThreadExitFlag tef;
629     NotifyOnThreadExit(&tef);
630     mirror::Object* result = LockedGetPeerFromOtherThread(&tef);
631     UnregisterThreadExitFlag(&tef);
632     return result;
633   }
634 
HasPeer()635   bool HasPeer() const {
636     return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
637   }
638 
GetStats()639   RuntimeStats* GetStats() {
640     return &tls64_.stats;
641   }
642 
643   bool IsStillStarting() const;
644 
IsExceptionPending()645   bool IsExceptionPending() const {
646     return tlsPtr_.exception != nullptr;
647   }
648 
IsAsyncExceptionPending()649   bool IsAsyncExceptionPending() const {
650     return tlsPtr_.async_exception != nullptr;
651   }
652 
GetException()653   mirror::Throwable* GetException() const REQUIRES_SHARED(Locks::mutator_lock_) {
654     return tlsPtr_.exception;
655   }
656 
657   void AssertPendingException() const;
658   void AssertPendingOOMException() const REQUIRES_SHARED(Locks::mutator_lock_);
659   void AssertNoPendingException() const;
660   void AssertNoPendingExceptionForNewException(const char* msg) const;
661 
662   void SetException(ObjPtr<mirror::Throwable> new_exception) REQUIRES_SHARED(Locks::mutator_lock_);
663 
664   // Set an exception that is asynchronously thrown from a different thread. This will be checked
665   // periodically and might overwrite the current 'Exception'. This can only be called from a
666   // checkpoint.
667   //
668   // The caller should also make sure that the thread has been deoptimized so that the exception
669   // could be detected on back-edges.
670   void SetAsyncException(ObjPtr<mirror::Throwable> new_exception)
671       REQUIRES_SHARED(Locks::mutator_lock_);
672 
ClearException()673   void ClearException() REQUIRES_SHARED(Locks::mutator_lock_) {
674     tlsPtr_.exception = nullptr;
675   }
676 
677   // Move the current async-exception to the main exception. This should be called when the current
678   // thread is ready to deal with any async exceptions. Returns true if there is an async exception
679   // that needs to be dealt with, false otherwise.
680   bool ObserveAsyncException() REQUIRES_SHARED(Locks::mutator_lock_);
681 
682   // Find catch block and perform long jump to appropriate exception handle. When
683   // is_method_exit_exception is true, the exception was thrown by the method exit callback and we
684   // should not send method unwind for the method on top of the stack since method exit callback was
685   // already called.
686   NO_RETURN void QuickDeliverException(bool is_method_exit_exception = false)
687       REQUIRES_SHARED(Locks::mutator_lock_);
688 
689   Context* GetLongJumpContext();
ReleaseLongJumpContext(Context * context)690   void ReleaseLongJumpContext(Context* context) {
691     if (tlsPtr_.long_jump_context != nullptr) {
692       ReleaseLongJumpContextInternal();
693     }
694     tlsPtr_.long_jump_context = context;
695   }
696 
697   // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
698   // abort the runtime iff abort_on_error is true.
699   ArtMethod* GetCurrentMethod(uint32_t* dex_pc,
700                               bool check_suspended = true,
701                               bool abort_on_error = true) const
702       REQUIRES_SHARED(Locks::mutator_lock_);
703 
704   // Returns whether the given exception was thrown by the current Java method being executed
705   // (Note that this includes native Java methods).
706   bool IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const
707       REQUIRES_SHARED(Locks::mutator_lock_);
708 
SetTopOfStack(ArtMethod ** top_method)709   void SetTopOfStack(ArtMethod** top_method) {
710     tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
711   }
712 
SetTopOfStackGenericJniTagged(ArtMethod ** top_method)713   void SetTopOfStackGenericJniTagged(ArtMethod** top_method) {
714     tlsPtr_.managed_stack.SetTopQuickFrameGenericJniTagged(top_method);
715   }
716 
SetTopOfShadowStack(ShadowFrame * top)717   void SetTopOfShadowStack(ShadowFrame* top) {
718     tlsPtr_.managed_stack.SetTopShadowFrame(top);
719   }
720 
HasManagedStack()721   bool HasManagedStack() const {
722     return tlsPtr_.managed_stack.HasTopQuickFrame() || tlsPtr_.managed_stack.HasTopShadowFrame();
723   }
724 
725   // If 'msg' is null, no detail message is set.
726   void ThrowNewException(const char* exception_class_descriptor, const char* msg)
727       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
728 
729   // If 'msg' is null, no detail message is set. An exception must be pending, and will be
730   // used as the new exception's cause.
731   void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
732       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
733 
734   void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
735       __attribute__((format(printf, 3, 4)))
736       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
737 
738   void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
739       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
740 
741   // OutOfMemoryError is special, because we need to pre-allocate an instance.
742   // Only the GC should call this.
743   void ThrowOutOfMemoryError(const char* msg) REQUIRES_SHARED(Locks::mutator_lock_)
744       REQUIRES(!Roles::uninterruptible_);
745 
746   static void Startup();
747   static void FinishStartup();
748   static void Shutdown();
749 
750   // Notify this thread's thread-group that this thread has started.
751   // Note: the given thread-group is used as a fast path and verified in debug build. If the value
752   //       is null, the thread's thread-group is loaded from the peer.
753   void NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject thread_group = nullptr)
754       REQUIRES_SHARED(Locks::mutator_lock_);
755 
756   // Request notification when this thread is unregistered, typically because it has exited.
757   //
758   // The ThreadExitFlag status is only changed when we remove the thread from the thread list,
759   // which we only do once no suspend requests are outstanding, and no flip-functions are still
760   // running.
761   //
762   // The caller must allocate a fresh ThreadExitFlag, and pass it in. The caller is responsible
763   // for either waiting until the thread has exited, or unregistering the ThreadExitFlag, and
764   // then, and only then, deallocating the ThreadExitFlag.  (This scheme avoids an allocation and
765   // questions about what to do if the allocation fails. Allows detection of thread exit after
766   // temporary release of thread_list_lock_)
767   void NotifyOnThreadExit(ThreadExitFlag* tef) REQUIRES(Locks::thread_list_lock_);
768   void UnregisterThreadExitFlag(ThreadExitFlag* tef) REQUIRES(Locks::thread_list_lock_);
769 
770   // Is the ThreadExitFlag currently registered in this thread, which has not yet terminated?
771   // Intended only for testing.
772   bool IsRegistered(ThreadExitFlag* query_tef) REQUIRES(Locks::thread_list_lock_);
773 
774   // For debuggable builds, CHECK that neither first nor last, nor any ThreadExitFlag with an
775   // address in-between, is currently registered with any thread.
776   static void DCheckUnregisteredEverywhere(ThreadExitFlag* first, ThreadExitFlag* last)
777       REQUIRES(!Locks::thread_list_lock_);
778 
779   // Called when thread is unregistered. May be called repeatedly, in which case only newly
780   // registered clients are processed.
781   void SignalExitFlags() REQUIRES(Locks::thread_list_lock_);
782 
783   // JNI methods
GetJniEnv()784   JNIEnvExt* GetJniEnv() const {
785     return tlsPtr_.jni_env;
786   }
787 
788   // Convert a jobject into a Object*
789   ObjPtr<mirror::Object> DecodeJObject(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
790   // Checks if the weak global ref has been cleared by the GC without decoding it.
791   bool IsJWeakCleared(jweak obj) const REQUIRES_SHARED(Locks::mutator_lock_);
792 
GetMonitorEnterObject()793   mirror::Object* GetMonitorEnterObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
794     return tlsPtr_.monitor_enter_object;
795   }
796 
SetMonitorEnterObject(mirror::Object * obj)797   void SetMonitorEnterObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
798     tlsPtr_.monitor_enter_object = obj;
799   }
800 
801   // Implements java.lang.Thread.interrupted.
802   bool Interrupted();
803   // Implements java.lang.Thread.isInterrupted.
804   bool IsInterrupted();
805   void Interrupt(Thread* self) REQUIRES(!wait_mutex_);
SetInterrupted(bool i)806   void SetInterrupted(bool i) {
807     tls32_.interrupted.store(i, std::memory_order_seq_cst);
808   }
809   void Notify() REQUIRES(!wait_mutex_);
810 
PoisonObjectPointers()811   ALWAYS_INLINE void PoisonObjectPointers() {
812     ++poison_object_cookie_;
813   }
814 
815   ALWAYS_INLINE static void PoisonObjectPointersIfDebug();
816 
GetPoisonObjectCookie()817   ALWAYS_INLINE uintptr_t GetPoisonObjectCookie() const {
818     return poison_object_cookie_;
819   }
820 
821   // Parking for 0ns of relative time means an untimed park, negative (though
822   // should be handled in java code) returns immediately
823   void Park(bool is_absolute, int64_t time) REQUIRES_SHARED(Locks::mutator_lock_);
824   void Unpark();
825 
826  private:
827   void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
828 
829  public:
GetWaitMutex()830   Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
831     return wait_mutex_;
832   }
833 
GetWaitConditionVariable()834   ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) {
835     return wait_cond_;
836   }
837 
GetWaitMonitor()838   Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) {
839     return wait_monitor_;
840   }
841 
SetWaitMonitor(Monitor * mon)842   void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) {
843     wait_monitor_ = mon;
844   }
845 
846   // Waiter link-list support.
GetWaitNext()847   Thread* GetWaitNext() const {
848     return tlsPtr_.wait_next;
849   }
850 
SetWaitNext(Thread * next)851   void SetWaitNext(Thread* next) {
852     tlsPtr_.wait_next = next;
853   }
854 
GetClassLoaderOverride()855   jobject GetClassLoaderOverride() {
856     return tlsPtr_.class_loader_override;
857   }
858 
859   void SetClassLoaderOverride(jobject class_loader_override);
860 
861   // Create the internal representation of a stack trace, that is more time
862   // and space efficient to compute than the StackTraceElement[].
863   jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
864       REQUIRES_SHARED(Locks::mutator_lock_);
865 
866   // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
867   // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
868   // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
869   // with the number of valid frames in the returned array.
870   static jobjectArray InternalStackTraceToStackTraceElementArray(
871       const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
872       jobjectArray output_array = nullptr, int* stack_depth = nullptr)
873       REQUIRES_SHARED(Locks::mutator_lock_);
874 
875   static jint InternalStackTraceToStackFrameInfoArray(
876       const ScopedObjectAccessAlreadyRunnable& soa,
877       jlong mode,  // See java.lang.StackStreamFactory for the mode flags
878       jobject internal,
879       jint startLevel,
880       jint batchSize,
881       jint startIndex,
882       jobjectArray output_array)  // java.lang.StackFrameInfo[]
883       REQUIRES_SHARED(Locks::mutator_lock_);
884 
885   jobjectArray CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
886       REQUIRES_SHARED(Locks::mutator_lock_);
887 
HasDebuggerShadowFrames()888   bool HasDebuggerShadowFrames() const {
889     return tlsPtr_.frame_id_to_shadow_frame != nullptr;
890   }
891 
892   // This is done by GC using a checkpoint (or in a stop-the-world pause).
893   void SweepInterpreterCache(IsMarkedVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
894 
895   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
896       REQUIRES_SHARED(Locks::mutator_lock_);
897 
898   void VisitReflectiveTargets(ReflectiveValueVisitor* visitor)
899       REQUIRES(Locks::mutator_lock_);
900 
901   // Check that the thread state is valid. Try to fail if the thread has erroneously terminated.
902   // Note that once the thread has been terminated, it can also be deallocated.  But even if the
903   // thread state has been overwritten, the value is unlikely to be in the correct range.
VerifyState()904   void VerifyState() {
905     if (kIsDebugBuild) {
906       ThreadState state = GetState();
907       StateAndFlags::ValidateThreadState(state);
908       DCHECK_NE(state, ThreadState::kTerminated);
909     }
910   }
911 
VerifyStack()912   void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_) {
913     if (kVerifyStack) {
914       VerifyStackImpl();
915     }
916   }
917 
918   //
919   // Offsets of various members of native Thread class, used by compiled code.
920   //
921 
922   template<PointerSize pointer_size>
ThinLockIdOffset()923   static constexpr ThreadOffset<pointer_size> ThinLockIdOffset() {
924     return ThreadOffset<pointer_size>(
925         OFFSETOF_MEMBER(Thread, tls32_) +
926         OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
927   }
928 
929   template<PointerSize pointer_size>
TidOffset()930   static constexpr ThreadOffset<pointer_size> TidOffset() {
931     return ThreadOffset<pointer_size>(
932         OFFSETOF_MEMBER(Thread, tls32_) +
933         OFFSETOF_MEMBER(tls_32bit_sized_values, tid));
934   }
935 
936   template<PointerSize pointer_size>
InterruptedOffset()937   static constexpr ThreadOffset<pointer_size> InterruptedOffset() {
938     return ThreadOffset<pointer_size>(
939         OFFSETOF_MEMBER(Thread, tls32_) +
940         OFFSETOF_MEMBER(tls_32bit_sized_values, interrupted));
941   }
942 
943   template<PointerSize pointer_size>
WeakRefAccessEnabledOffset()944   static constexpr ThreadOffset<pointer_size> WeakRefAccessEnabledOffset() {
945     return ThreadOffset<pointer_size>(
946         OFFSETOF_MEMBER(Thread, tls32_) +
947         OFFSETOF_MEMBER(tls_32bit_sized_values, weak_ref_access_enabled));
948   }
949 
950   template<PointerSize pointer_size>
ThreadFlagsOffset()951   static constexpr ThreadOffset<pointer_size> ThreadFlagsOffset() {
952     return ThreadOffset<pointer_size>(
953         OFFSETOF_MEMBER(Thread, tls32_) +
954         OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
955   }
956 
957   template<PointerSize pointer_size>
IsGcMarkingOffset()958   static constexpr ThreadOffset<pointer_size> IsGcMarkingOffset() {
959     return ThreadOffset<pointer_size>(
960         OFFSETOF_MEMBER(Thread, tls32_) +
961         OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
962   }
963 
964   template <PointerSize pointer_size>
DeoptCheckRequiredOffset()965   static constexpr ThreadOffset<pointer_size> DeoptCheckRequiredOffset() {
966     return ThreadOffset<pointer_size>(
967         OFFSETOF_MEMBER(Thread, tls32_) +
968         OFFSETOF_MEMBER(tls_32bit_sized_values, is_deopt_check_required));
969   }
970 
IsGcMarkingSize()971   static constexpr size_t IsGcMarkingSize() {
972     return sizeof(tls32_.is_gc_marking);
973   }
974 
975   template<PointerSize pointer_size>
SharedMethodHotnessOffset()976   static constexpr ThreadOffset<pointer_size> SharedMethodHotnessOffset() {
977     return ThreadOffset<pointer_size>(
978         OFFSETOF_MEMBER(Thread, tls32_) +
979         OFFSETOF_MEMBER(tls_32bit_sized_values, shared_method_hotness));
980   }
981 
982   // Deoptimize the Java stack.
983   void DeoptimizeWithDeoptimizationException(JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
984 
985  private:
986   template<PointerSize pointer_size>
ThreadOffsetFromTlsPtr(size_t tls_ptr_offset)987   static constexpr ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
988     size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
989     size_t scale = (pointer_size > kRuntimePointerSize) ?
990       static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize) : 1;
991     size_t shrink = (kRuntimePointerSize > pointer_size) ?
992       static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size) : 1;
993     return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
994   }
995 
996  public:
997   template<PointerSize pointer_size>
QuickEntryPointOffset(size_t quick_entrypoint_offset)998   static constexpr ThreadOffset<pointer_size> QuickEntryPointOffset(
999       size_t quick_entrypoint_offset) {
1000     return ThreadOffsetFromTlsPtr<pointer_size>(
1001         OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
1002   }
1003 
QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,PointerSize pointer_size)1004   static constexpr uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
1005                                                           PointerSize pointer_size) {
1006     if (pointer_size == PointerSize::k32) {
1007       return QuickEntryPointOffset<PointerSize::k32>(quick_entrypoint_offset).
1008           Uint32Value();
1009     } else {
1010       return QuickEntryPointOffset<PointerSize::k64>(quick_entrypoint_offset).
1011           Uint32Value();
1012     }
1013   }
1014 
1015   template<PointerSize pointer_size>
JniEntryPointOffset(size_t jni_entrypoint_offset)1016   static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
1017     return ThreadOffsetFromTlsPtr<pointer_size>(
1018         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
1019   }
1020 
1021   // Return the entry point offset integer value for ReadBarrierMarkRegX, where X is `reg`.
1022   template <PointerSize pointer_size>
ReadBarrierMarkEntryPointsOffset(size_t reg)1023   static constexpr int32_t ReadBarrierMarkEntryPointsOffset(size_t reg) {
1024     // The entry point list defines 30 ReadBarrierMarkRegX entry points.
1025     DCHECK_LT(reg, 30u);
1026     // The ReadBarrierMarkRegX entry points are ordered by increasing
1027     // register number in Thread::tls_Ptr_.quick_entrypoints.
1028     return QUICK_ENTRYPOINT_OFFSET(pointer_size, pReadBarrierMarkReg00).Int32Value()
1029         + static_cast<size_t>(pointer_size) * reg;
1030   }
1031 
1032   template<PointerSize pointer_size>
SelfOffset()1033   static constexpr ThreadOffset<pointer_size> SelfOffset() {
1034     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
1035   }
1036 
1037   template<PointerSize pointer_size>
ExceptionOffset()1038   static constexpr ThreadOffset<pointer_size> ExceptionOffset() {
1039     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
1040   }
1041 
1042   template<PointerSize pointer_size>
PeerOffset()1043   static constexpr ThreadOffset<pointer_size> PeerOffset() {
1044     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
1045   }
1046 
1047 
1048   template<PointerSize pointer_size>
CardTableOffset()1049   static constexpr ThreadOffset<pointer_size> CardTableOffset() {
1050     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
1051   }
1052 
1053   template<PointerSize pointer_size>
ThreadSuspendTriggerOffset()1054   static constexpr ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
1055     return ThreadOffsetFromTlsPtr<pointer_size>(
1056         OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
1057   }
1058 
1059   template<PointerSize pointer_size>
ThreadLocalPosOffset()1060   static constexpr ThreadOffset<pointer_size> ThreadLocalPosOffset() {
1061     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1062                                                                 thread_local_pos));
1063   }
1064 
1065   template<PointerSize pointer_size>
ThreadLocalEndOffset()1066   static constexpr ThreadOffset<pointer_size> ThreadLocalEndOffset() {
1067     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1068                                                                 thread_local_end));
1069   }
1070 
1071   template<PointerSize pointer_size>
ThreadLocalObjectsOffset()1072   static constexpr ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
1073     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1074                                                                 thread_local_objects));
1075   }
1076 
1077   template<PointerSize pointer_size>
RosAllocRunsOffset()1078   static constexpr ThreadOffset<pointer_size> RosAllocRunsOffset() {
1079     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1080                                                                 rosalloc_runs));
1081   }
1082 
1083   template<PointerSize pointer_size>
ThreadLocalAllocStackTopOffset()1084   static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
1085     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1086                                                                 thread_local_alloc_stack_top));
1087   }
1088 
1089   template<PointerSize pointer_size>
ThreadLocalAllocStackEndOffset()1090   static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
1091     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1092                                                                 thread_local_alloc_stack_end));
1093   }
1094 
1095   template <PointerSize pointer_size>
TraceBufferIndexOffset()1096   static constexpr ThreadOffset<pointer_size> TraceBufferIndexOffset() {
1097     return ThreadOffsetFromTlsPtr<pointer_size>(
1098         OFFSETOF_MEMBER(tls_ptr_sized_values, method_trace_buffer_index));
1099   }
1100 
1101   template <PointerSize pointer_size>
TraceBufferPtrOffset()1102   static constexpr ThreadOffset<pointer_size> TraceBufferPtrOffset() {
1103     return ThreadOffsetFromTlsPtr<pointer_size>(
1104         OFFSETOF_MEMBER(tls_ptr_sized_values, method_trace_buffer));
1105   }
1106 
1107   // Size of stack less any space reserved for stack overflow
GetStackSize()1108   size_t GetStackSize() const {
1109     return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
1110   }
1111 
1112   ALWAYS_INLINE uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const;
1113 
GetStackEnd()1114   uint8_t* GetStackEnd() const {
1115     return tlsPtr_.stack_end;
1116   }
1117 
1118   // Set the stack end to that to be used during a stack overflow
1119   void SetStackEndForStackOverflow() REQUIRES_SHARED(Locks::mutator_lock_);
1120 
1121   // Set the stack end to that to be used during regular execution
1122   ALWAYS_INLINE void ResetDefaultStackEnd();
1123 
IsHandlingStackOverflow()1124   bool IsHandlingStackOverflow() const {
1125     return tlsPtr_.stack_end == tlsPtr_.stack_begin;
1126   }
1127 
1128   template<PointerSize pointer_size>
StackEndOffset()1129   static constexpr ThreadOffset<pointer_size> StackEndOffset() {
1130     return ThreadOffsetFromTlsPtr<pointer_size>(
1131         OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
1132   }
1133 
1134   template<PointerSize pointer_size>
JniEnvOffset()1135   static constexpr ThreadOffset<pointer_size> JniEnvOffset() {
1136     return ThreadOffsetFromTlsPtr<pointer_size>(
1137         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
1138   }
1139 
1140   template<PointerSize pointer_size>
TopOfManagedStackOffset()1141   static constexpr ThreadOffset<pointer_size> TopOfManagedStackOffset() {
1142     return ThreadOffsetFromTlsPtr<pointer_size>(
1143         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
1144         ManagedStack::TaggedTopQuickFrameOffset());
1145   }
1146 
GetManagedStack()1147   const ManagedStack* GetManagedStack() const {
1148     return &tlsPtr_.managed_stack;
1149   }
1150 
1151   // Linked list recording fragments of managed stack.
PushManagedStackFragment(ManagedStack * fragment)1152   void PushManagedStackFragment(ManagedStack* fragment) {
1153     tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
1154   }
PopManagedStackFragment(const ManagedStack & fragment)1155   void PopManagedStackFragment(const ManagedStack& fragment) {
1156     tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
1157   }
1158 
1159   ALWAYS_INLINE ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame);
1160   ALWAYS_INLINE ShadowFrame* PopShadowFrame();
1161 
1162   template<PointerSize pointer_size>
TopShadowFrameOffset()1163   static constexpr ThreadOffset<pointer_size> TopShadowFrameOffset() {
1164     return ThreadOffsetFromTlsPtr<pointer_size>(
1165         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
1166         ManagedStack::TopShadowFrameOffset());
1167   }
1168 
1169   // Is the given obj in one of this thread's JNI transition frames?
1170   bool IsJniTransitionReference(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
1171 
1172   // Convert a global (or weak global) jobject into a Object*
1173   ObjPtr<mirror::Object> DecodeGlobalJObject(jobject obj) const
1174       REQUIRES_SHARED(Locks::mutator_lock_);
1175 
1176   void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
1177       REQUIRES_SHARED(Locks::mutator_lock_);
1178 
GetTopHandleScope()1179   BaseHandleScope* GetTopHandleScope() REQUIRES_SHARED(Locks::mutator_lock_) {
1180     return tlsPtr_.top_handle_scope;
1181   }
1182 
PushHandleScope(BaseHandleScope * handle_scope)1183   void PushHandleScope(BaseHandleScope* handle_scope) REQUIRES_SHARED(Locks::mutator_lock_) {
1184     DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope);
1185     tlsPtr_.top_handle_scope = handle_scope;
1186   }
1187 
PopHandleScope()1188   BaseHandleScope* PopHandleScope() REQUIRES_SHARED(Locks::mutator_lock_) {
1189     BaseHandleScope* handle_scope = tlsPtr_.top_handle_scope;
1190     DCHECK(handle_scope != nullptr);
1191     tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
1192     return handle_scope;
1193   }
1194 
1195   template<PointerSize pointer_size>
TopHandleScopeOffset()1196   static constexpr ThreadOffset<pointer_size> TopHandleScopeOffset() {
1197     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1198                                                                 top_handle_scope));
1199   }
1200 
1201   template<PointerSize pointer_size>
MutatorLockOffset()1202   static constexpr ThreadOffset<pointer_size> MutatorLockOffset() {
1203     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1204                                                                 mutator_lock));
1205   }
1206 
1207   template<PointerSize pointer_size>
HeldMutexOffset(LockLevel level)1208   static constexpr ThreadOffset<pointer_size> HeldMutexOffset(LockLevel level) {
1209     DCHECK_LT(enum_cast<size_t>(level), arraysize(tlsPtr_.held_mutexes));
1210     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1211                                                                 held_mutexes[level]));
1212   }
1213 
GetTopReflectiveHandleScope()1214   BaseReflectiveHandleScope* GetTopReflectiveHandleScope() {
1215     return tlsPtr_.top_reflective_handle_scope;
1216   }
1217 
PushReflectiveHandleScope(BaseReflectiveHandleScope * scope)1218   void PushReflectiveHandleScope(BaseReflectiveHandleScope* scope) {
1219     DCHECK_EQ(scope->GetLink(), tlsPtr_.top_reflective_handle_scope);
1220     DCHECK_EQ(scope->GetThread(), this);
1221     tlsPtr_.top_reflective_handle_scope = scope;
1222   }
1223 
PopReflectiveHandleScope()1224   BaseReflectiveHandleScope* PopReflectiveHandleScope() {
1225     BaseReflectiveHandleScope* handle_scope = tlsPtr_.top_reflective_handle_scope;
1226     DCHECK(handle_scope != nullptr);
1227     tlsPtr_.top_reflective_handle_scope = tlsPtr_.top_reflective_handle_scope->GetLink();
1228     return handle_scope;
1229   }
1230 
GetIsGcMarking()1231   bool GetIsGcMarking() const {
1232     DCHECK(gUseReadBarrier);
1233     return tls32_.is_gc_marking;
1234   }
1235 
1236   void SetIsGcMarkingAndUpdateEntrypoints(bool is_marking);
1237 
IsDeoptCheckRequired()1238   bool IsDeoptCheckRequired() const { return tls32_.is_deopt_check_required; }
1239 
SetDeoptCheckRequired(bool flag)1240   void SetDeoptCheckRequired(bool flag) { tls32_.is_deopt_check_required = flag; }
1241 
1242   bool GetWeakRefAccessEnabled() const;  // Only safe for current thread.
1243 
SetWeakRefAccessEnabled(bool enabled)1244   void SetWeakRefAccessEnabled(bool enabled) {
1245     DCHECK(gUseReadBarrier);
1246     WeakRefAccessState new_state = enabled ?
1247         WeakRefAccessState::kEnabled : WeakRefAccessState::kDisabled;
1248     tls32_.weak_ref_access_enabled.store(new_state, std::memory_order_release);
1249   }
1250 
GetDisableThreadFlipCount()1251   uint32_t GetDisableThreadFlipCount() const {
1252     return tls32_.disable_thread_flip_count;
1253   }
1254 
IncrementDisableThreadFlipCount()1255   void IncrementDisableThreadFlipCount() {
1256     ++tls32_.disable_thread_flip_count;
1257   }
1258 
DecrementDisableThreadFlipCount()1259   void DecrementDisableThreadFlipCount() {
1260     DCHECK_GT(tls32_.disable_thread_flip_count, 0U);
1261     --tls32_.disable_thread_flip_count;
1262   }
1263 
1264   // Returns true if the thread is a runtime thread (eg from a ThreadPool).
IsRuntimeThread()1265   bool IsRuntimeThread() const {
1266     return is_runtime_thread_;
1267   }
1268 
SetIsRuntimeThread(bool is_runtime_thread)1269   void SetIsRuntimeThread(bool is_runtime_thread) {
1270     is_runtime_thread_ = is_runtime_thread;
1271   }
1272 
CorePlatformApiCookie()1273   uint32_t CorePlatformApiCookie() {
1274     return core_platform_api_cookie_;
1275   }
1276 
SetCorePlatformApiCookie(uint32_t cookie)1277   void SetCorePlatformApiCookie(uint32_t cookie) {
1278     core_platform_api_cookie_ = cookie;
1279   }
1280 
1281   // Returns true if the thread is allowed to load java classes.
1282   bool CanLoadClasses() const;
1283 
1284   // Returns the fake exception used to activate deoptimization.
GetDeoptimizationException()1285   static mirror::Throwable* GetDeoptimizationException() {
1286     // Note that the mirror::Throwable must be aligned to kObjectAlignment or else it cannot be
1287     // represented by ObjPtr.
1288     return reinterpret_cast<mirror::Throwable*>(0x100);
1289   }
1290 
1291   // Currently deoptimization invokes verifier which can trigger class loading
1292   // and execute Java code, so there might be nested deoptimizations happening.
1293   // We need to save the ongoing deoptimization shadow frames and return
1294   // values on stacks.
1295   // 'from_code' denotes whether the deoptimization was explicitly made from
1296   // compiled code.
1297   // 'method_type' contains info on whether deoptimization should advance
1298   // dex_pc.
1299   void PushDeoptimizationContext(const JValue& return_value,
1300                                  bool is_reference,
1301                                  ObjPtr<mirror::Throwable> exception,
1302                                  bool from_code,
1303                                  DeoptimizationMethodType method_type)
1304       REQUIRES_SHARED(Locks::mutator_lock_);
1305   void PopDeoptimizationContext(JValue* result,
1306                                 ObjPtr<mirror::Throwable>* exception,
1307                                 bool* from_code,
1308                                 DeoptimizationMethodType* method_type)
1309       REQUIRES_SHARED(Locks::mutator_lock_);
1310   void AssertHasDeoptimizationContext()
1311       REQUIRES_SHARED(Locks::mutator_lock_);
1312   void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
1313   ShadowFrame* PopStackedShadowFrame();
1314   ShadowFrame* MaybePopDeoptimizedStackedShadowFrame();
1315 
1316   // For debugger, find the shadow frame that corresponds to a frame id.
1317   // Or return null if there is none.
1318   ShadowFrame* FindDebuggerShadowFrame(size_t frame_id)
1319       REQUIRES_SHARED(Locks::mutator_lock_);
1320   // For debugger, find the bool array that keeps track of the updated vreg set
1321   // for a frame id.
1322   bool* GetUpdatedVRegFlags(size_t frame_id) REQUIRES_SHARED(Locks::mutator_lock_);
1323   // For debugger, find the shadow frame that corresponds to a frame id. If
1324   // one doesn't exist yet, create one and track it in frame_id_to_shadow_frame.
1325   ShadowFrame* FindOrCreateDebuggerShadowFrame(size_t frame_id,
1326                                                uint32_t num_vregs,
1327                                                ArtMethod* method,
1328                                                uint32_t dex_pc)
1329       REQUIRES_SHARED(Locks::mutator_lock_);
1330 
1331   // Delete the entry that maps from frame_id to shadow_frame.
1332   void RemoveDebuggerShadowFrameMapping(size_t frame_id)
1333       REQUIRES_SHARED(Locks::mutator_lock_);
1334 
GetStackTraceSample()1335   std::vector<ArtMethod*>* GetStackTraceSample() const {
1336     DCHECK(!IsAotCompiler());
1337     return tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
1338   }
1339 
SetStackTraceSample(std::vector<ArtMethod * > * sample)1340   void SetStackTraceSample(std::vector<ArtMethod*>* sample) {
1341     DCHECK(!IsAotCompiler());
1342     tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample = sample;
1343   }
1344 
GetVerifierDeps()1345   verifier::VerifierDeps* GetVerifierDeps() const {
1346     DCHECK(IsAotCompiler());
1347     return tlsPtr_.deps_or_stack_trace_sample.verifier_deps;
1348   }
1349 
1350   // It is the responsability of the caller to make sure the verifier_deps
1351   // entry in the thread is cleared before destruction of the actual VerifierDeps
1352   // object, or the thread.
SetVerifierDeps(verifier::VerifierDeps * verifier_deps)1353   void SetVerifierDeps(verifier::VerifierDeps* verifier_deps) {
1354     DCHECK(IsAotCompiler());
1355     DCHECK(verifier_deps == nullptr || tlsPtr_.deps_or_stack_trace_sample.verifier_deps == nullptr);
1356     tlsPtr_.deps_or_stack_trace_sample.verifier_deps = verifier_deps;
1357   }
1358 
GetMethodTraceBuffer()1359   uintptr_t* GetMethodTraceBuffer() { return tlsPtr_.method_trace_buffer; }
1360 
GetMethodTraceIndexPtr()1361   size_t* GetMethodTraceIndexPtr() { return &tlsPtr_.method_trace_buffer_index; }
1362 
SetMethodTraceBuffer(uintptr_t * buffer)1363   uintptr_t* SetMethodTraceBuffer(uintptr_t* buffer) {
1364     return tlsPtr_.method_trace_buffer = buffer;
1365   }
1366 
GetTraceClockBase()1367   uint64_t GetTraceClockBase() const {
1368     return tls64_.trace_clock_base;
1369   }
1370 
SetTraceClockBase(uint64_t clock_base)1371   void SetTraceClockBase(uint64_t clock_base) {
1372     tls64_.trace_clock_base = clock_base;
1373   }
1374 
GetHeldMutex(LockLevel level)1375   BaseMutex* GetHeldMutex(LockLevel level) const {
1376     return tlsPtr_.held_mutexes[level];
1377   }
1378 
SetHeldMutex(LockLevel level,BaseMutex * mutex)1379   void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
1380     tlsPtr_.held_mutexes[level] = mutex;
1381   }
1382 
1383   // Possibly check that no mutexes at level kMonitorLock or above are subsequently acquired.
1384   // Only invoked by the thread itself.
1385   void DisallowPreMonitorMutexes();
1386 
1387   // Undo the effect of the previous call. Again only invoked by the thread itself.
1388   void AllowPreMonitorMutexes();
1389 
ReadFlag(ThreadFlag flag)1390   bool ReadFlag(ThreadFlag flag) const {
1391     return GetStateAndFlags(std::memory_order_relaxed).IsFlagSet(flag);
1392   }
1393 
1394   void AtomicSetFlag(ThreadFlag flag, std::memory_order order = std::memory_order_seq_cst) {
1395     // Since we discard the returned value, memory_order_release will often suffice.
1396     tls32_.state_and_flags.fetch_or(enum_cast<uint32_t>(flag), order);
1397   }
1398 
1399   void AtomicClearFlag(ThreadFlag flag, std::memory_order order = std::memory_order_seq_cst) {
1400     // Since we discard the returned value, memory_order_release will often suffice.
1401     tls32_.state_and_flags.fetch_and(~enum_cast<uint32_t>(flag), order);
1402   }
1403 
1404   void ResetQuickAllocEntryPointsForThread();
1405 
1406   // Returns the remaining space in the TLAB.
TlabSize()1407   size_t TlabSize() const {
1408     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_pos;
1409   }
1410 
1411   // Returns pos offset from start.
GetTlabPosOffset()1412   size_t GetTlabPosOffset() const {
1413     return tlsPtr_.thread_local_pos - tlsPtr_.thread_local_start;
1414   }
1415 
1416   // Returns the remaining space in the TLAB if we were to expand it to maximum capacity.
TlabRemainingCapacity()1417   size_t TlabRemainingCapacity() const {
1418     return tlsPtr_.thread_local_limit - tlsPtr_.thread_local_pos;
1419   }
1420 
1421   // Expand the TLAB by a fixed number of bytes. There must be enough capacity to do so.
ExpandTlab(size_t bytes)1422   void ExpandTlab(size_t bytes) {
1423     tlsPtr_.thread_local_end += bytes;
1424     DCHECK_LE(tlsPtr_.thread_local_end, tlsPtr_.thread_local_limit);
1425   }
1426 
1427   // Called from Concurrent mark-compact GC to slide the TLAB pointers backwards
1428   // to adjust to post-compact addresses.
1429   void AdjustTlab(size_t slide_bytes);
1430 
1431   // Doesn't check that there is room.
1432   mirror::Object* AllocTlab(size_t bytes);
1433   void SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit);
1434   bool HasTlab() const;
1435   void ResetTlab();
GetTlabStart()1436   uint8_t* GetTlabStart() {
1437     return tlsPtr_.thread_local_start;
1438   }
GetTlabPos()1439   uint8_t* GetTlabPos() {
1440     return tlsPtr_.thread_local_pos;
1441   }
GetTlabEnd()1442   uint8_t* GetTlabEnd() {
1443     return tlsPtr_.thread_local_end;
1444   }
1445   // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
1446   // equal to a valid pointer.
RemoveSuspendTrigger()1447   void RemoveSuspendTrigger() {
1448     tlsPtr_.suspend_trigger.store(reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger),
1449                                   std::memory_order_relaxed);
1450   }
1451 
1452   // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
1453   // The next time a suspend check is done, it will load from the value at this address
1454   // and trigger a SIGSEGV.
1455   // Only needed if Runtime::implicit_suspend_checks_ is true. On some platforms, and in the
1456   // interpreter, client code currently just looks at the thread flags directly to determine
1457   // whether we should suspend, so this call is not always necessary.
TriggerSuspend()1458   void TriggerSuspend() { tlsPtr_.suspend_trigger.store(nullptr, std::memory_order_release); }
1459 
1460   // Push an object onto the allocation stack.
1461   bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
1462       REQUIRES_SHARED(Locks::mutator_lock_);
1463 
1464   // Set the thread local allocation pointers to the given pointers.
1465   void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
1466                                      StackReference<mirror::Object>* end);
1467 
1468   // Resets the thread local allocation pointers.
1469   void RevokeThreadLocalAllocationStack();
1470 
GetThreadLocalBytesAllocated()1471   size_t GetThreadLocalBytesAllocated() const {
1472     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
1473   }
1474 
GetThreadLocalObjectsAllocated()1475   size_t GetThreadLocalObjectsAllocated() const {
1476     return tlsPtr_.thread_local_objects;
1477   }
1478 
GetRosAllocRun(size_t index)1479   void* GetRosAllocRun(size_t index) const {
1480     return tlsPtr_.rosalloc_runs[index];
1481   }
1482 
SetRosAllocRun(size_t index,void * run)1483   void SetRosAllocRun(size_t index, void* run) {
1484     tlsPtr_.rosalloc_runs[index] = run;
1485   }
1486 
1487   bool ProtectStack(bool fatal_on_error = true);
1488   bool UnprotectStack();
1489 
DecrementForceInterpreterCount()1490   uint32_t DecrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
1491     return --tls32_.force_interpreter_count;
1492   }
1493 
IncrementForceInterpreterCount()1494   uint32_t IncrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
1495     return ++tls32_.force_interpreter_count;
1496   }
1497 
SetForceInterpreterCount(uint32_t value)1498   void SetForceInterpreterCount(uint32_t value) REQUIRES(Locks::thread_list_lock_) {
1499     tls32_.force_interpreter_count = value;
1500   }
1501 
ForceInterpreterCount()1502   uint32_t ForceInterpreterCount() const {
1503     return tls32_.force_interpreter_count;
1504   }
1505 
IsForceInterpreter()1506   bool IsForceInterpreter() const {
1507     return tls32_.force_interpreter_count != 0;
1508   }
1509 
IncrementMakeVisiblyInitializedCounter()1510   bool IncrementMakeVisiblyInitializedCounter() {
1511     tls32_.make_visibly_initialized_counter += 1u;
1512     DCHECK_LE(tls32_.make_visibly_initialized_counter, kMakeVisiblyInitializedCounterTriggerCount);
1513     if (tls32_.make_visibly_initialized_counter == kMakeVisiblyInitializedCounterTriggerCount) {
1514       tls32_.make_visibly_initialized_counter = 0u;
1515       return true;
1516     }
1517     return false;
1518   }
1519 
1520   void InitStringEntryPoints();
1521 
ModifyDebugDisallowReadBarrier(int8_t delta)1522   void ModifyDebugDisallowReadBarrier(int8_t delta) {
1523     if (kCheckDebugDisallowReadBarrierCount) {
1524       debug_disallow_read_barrier_ += delta;
1525     }
1526   }
1527 
GetDebugDisallowReadBarrierCount()1528   uint8_t GetDebugDisallowReadBarrierCount() const {
1529     return kCheckDebugDisallowReadBarrierCount ? debug_disallow_read_barrier_ : 0u;
1530   }
1531 
1532   // Gets the current TLSData associated with the key or nullptr if there isn't any. Note that users
1533   // do not gain ownership of TLSData and must synchronize with SetCustomTls themselves to prevent
1534   // it from being deleted.
1535   TLSData* GetCustomTLS(const char* key) REQUIRES(!Locks::custom_tls_lock_);
1536 
1537   // Sets the tls entry at 'key' to data. The thread takes ownership of the TLSData. The destructor
1538   // will be run when the thread exits or when SetCustomTLS is called again with the same key.
1539   void SetCustomTLS(const char* key, TLSData* data) REQUIRES(!Locks::custom_tls_lock_);
1540 
1541   // Returns true if the current thread is the jit sensitive thread.
IsJitSensitiveThread()1542   bool IsJitSensitiveThread() const {
1543     return this == jit_sensitive_thread_;
1544   }
1545 
1546   bool IsSystemDaemon() const REQUIRES_SHARED(Locks::mutator_lock_);
1547 
1548   // Cause the 'this' thread to abort the process by sending SIGABRT.  Thus we should get an
1549   // asynchronous stack trace for 'this' thread, rather than waiting for it to process a
1550   // checkpoint. Useful mostly to discover why a thread isn't responding to a suspend request or
1551   // checkpoint. The caller should "suspend" (in the Java sense) 'thread' before invoking this, so
1552   // 'thread' can't get deallocated before we access it.
1553   NO_RETURN void AbortInThis(const std::string& message);
1554 
1555   // Returns true if StrictMode events are traced for the current thread.
IsSensitiveThread()1556   static bool IsSensitiveThread() {
1557     if (is_sensitive_thread_hook_ != nullptr) {
1558       return (*is_sensitive_thread_hook_)();
1559     }
1560     return false;
1561   }
1562 
1563   // Set to the read barrier marking entrypoints to be non-null.
1564   void SetReadBarrierEntrypoints();
1565 
1566   ObjPtr<mirror::Object> CreateCompileTimePeer(const char* name,
1567                                                bool as_daemon,
1568                                                jobject thread_group)
1569       REQUIRES_SHARED(Locks::mutator_lock_);
1570 
GetInterpreterCache()1571   ALWAYS_INLINE InterpreterCache* GetInterpreterCache() {
1572     return &interpreter_cache_;
1573   }
1574 
1575   // Clear all thread-local interpreter caches.
1576   //
1577   // Since the caches are keyed by memory pointer to dex instructions, this must be
1578   // called when any dex code is unloaded (before different code gets loaded at the
1579   // same memory location).
1580   //
1581   // If presence of cache entry implies some pre-conditions, this must also be
1582   // called if the pre-conditions might no longer hold true.
1583   static void ClearAllInterpreterCaches();
1584 
1585   template<PointerSize pointer_size>
InterpreterCacheOffset()1586   static constexpr ThreadOffset<pointer_size> InterpreterCacheOffset() {
1587     return ThreadOffset<pointer_size>(OFFSETOF_MEMBER(Thread, interpreter_cache_));
1588   }
1589 
InterpreterCacheSizeLog2()1590   static constexpr int InterpreterCacheSizeLog2() {
1591     return WhichPowerOf2(InterpreterCache::kSize);
1592   }
1593 
AllThreadFlags()1594   static constexpr uint32_t AllThreadFlags() {
1595     return enum_cast<uint32_t>(ThreadFlag::kLastFlag) |
1596            (enum_cast<uint32_t>(ThreadFlag::kLastFlag) - 1u);
1597   }
1598 
SuspendOrCheckpointRequestFlags()1599   static constexpr uint32_t SuspendOrCheckpointRequestFlags() {
1600     return enum_cast<uint32_t>(ThreadFlag::kSuspendRequest) |
1601            enum_cast<uint32_t>(ThreadFlag::kCheckpointRequest) |
1602            enum_cast<uint32_t>(ThreadFlag::kEmptyCheckpointRequest);
1603   }
1604 
FlipFunctionFlags()1605   static constexpr uint32_t FlipFunctionFlags() {
1606     return enum_cast<uint32_t>(ThreadFlag::kPendingFlipFunction) |
1607            enum_cast<uint32_t>(ThreadFlag::kRunningFlipFunction);
1608   }
1609 
StoredThreadStateValue(ThreadState state)1610   static constexpr uint32_t StoredThreadStateValue(ThreadState state) {
1611     return StateAndFlags::EncodeState(state);
1612   }
1613 
ResetSharedMethodHotness()1614   void ResetSharedMethodHotness() {
1615     tls32_.shared_method_hotness = kSharedMethodHotnessThreshold;
1616   }
1617 
GetSharedMethodHotness()1618   uint32_t GetSharedMethodHotness() const {
1619     return tls32_.shared_method_hotness;
1620   }
1621 
DecrementSharedMethodHotness()1622   uint32_t DecrementSharedMethodHotness() {
1623     tls32_.shared_method_hotness = (tls32_.shared_method_hotness - 1) & 0xffff;
1624     return tls32_.shared_method_hotness;
1625   }
1626 
1627  private:
1628   // We pretend to acquire this while running a checkpoint to detect lock ordering issues.
1629   // Initialized lazily.
1630   static std::atomic<Mutex*> cp_placeholder_mutex_;
1631 
1632   explicit Thread(bool daemon);
1633 
1634   // A successfully started thread is only deleted by the thread itself.
1635   // Threads are deleted after they have been removed from the thread list while holding
1636   // suspend_count_lock_ and thread_list_lock_. We refuse to do this while either kSuspendRequest
1637   // or kRunningFlipFunction are set. We can prevent Thread destruction by holding either of those
1638   // locks, ensuring that either of those flags are set, or possibly by registering and checking a
1639   // ThreadExitFlag.
1640   ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
1641 
1642   // Thread destruction actions that do not invalidate the thread. Checkpoints and flip_functions
1643   // may still be called on this Thread object, though not by this thread, during and after the
1644   // Destroy() call.
1645   void Destroy(bool should_run_callbacks);
1646 
1647   // Deletes and clears the tlsPtr_.jpeer field. Done in a way so that both it and opeer cannot be
1648   // observed to be set at the same time by instrumentation.
1649   void DeleteJPeer(JNIEnv* env);
1650 
1651   // Attaches the calling native thread to the runtime, returning the new native peer.
1652   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
1653   template <typename PeerAction>
1654   static Thread* Attach(const char* thread_name,
1655                         bool as_daemon,
1656                         PeerAction p,
1657                         bool should_run_callbacks);
1658 
1659   void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
1660 
1661   template<bool kTransactionActive>
1662   static void InitPeer(ObjPtr<mirror::Object> peer,
1663                        bool as_daemon,
1664                        ObjPtr<mirror::Object> thread_group,
1665                        ObjPtr<mirror::String> thread_name,
1666                        jint thread_priority)
1667       REQUIRES_SHARED(Locks::mutator_lock_);
1668 
1669   // Avoid use, callers should use SetState.
1670   // Used only by `Thread` destructor and stack trace collection in semi-space GC (currently
1671   // disabled by `kStoreStackTraces = false`). May not be called on a runnable thread other
1672   // than Thread::Current().
1673   // NO_THREAD_SAFETY_ANALYSIS: This function is "Unsafe" and can be called in
1674   // different states, so clang cannot perform the thread safety analysis.
SetStateUnsafe(ThreadState new_state)1675   ThreadState SetStateUnsafe(ThreadState new_state) NO_THREAD_SAFETY_ANALYSIS {
1676     StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
1677     ThreadState old_state = old_state_and_flags.GetState();
1678     if (old_state == new_state) {
1679       // Nothing to do.
1680     } else if (old_state == ThreadState::kRunnable) {
1681       DCHECK_EQ(this, Thread::Current());
1682       // Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in
1683       // case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA
1684       // attempts to TransitionFromSuspendedToRunnable, it results in a deadlock.
1685       TransitionToSuspendedAndRunCheckpoints(new_state);
1686       // Since we transitioned to a suspended state, check the pass barrier requests.
1687       CheckActiveSuspendBarriers();
1688     } else {
1689       while (true) {
1690         StateAndFlags new_state_and_flags = old_state_and_flags;
1691         new_state_and_flags.SetState(new_state);
1692         if (LIKELY(tls32_.state_and_flags.CompareAndSetWeakAcquire(
1693                 old_state_and_flags.GetValue(), new_state_and_flags.GetValue()))) {
1694           break;
1695         }
1696         // Reload state and flags.
1697         old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
1698         DCHECK_EQ(old_state, old_state_and_flags.GetState());
1699       }
1700     }
1701     return old_state;
1702   }
1703 
GetMutatorLock()1704   MutatorMutex* GetMutatorLock() RETURN_CAPABILITY(Locks::mutator_lock_) {
1705     DCHECK_EQ(tlsPtr_.mutator_lock, Locks::mutator_lock_);
1706     return tlsPtr_.mutator_lock;
1707   }
1708 
1709   void VerifyStackImpl() REQUIRES_SHARED(Locks::mutator_lock_);
1710 
1711   void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
1712   DumpOrder DumpStack(std::ostream& os,
1713                       bool dump_native_stack = true,
1714                       bool force_dump_stack = false) const
1715       REQUIRES_SHARED(Locks::mutator_lock_);
1716   DumpOrder DumpStack(std::ostream& os,
1717                       unwindstack::AndroidLocalUnwinder& unwinder,
1718                       bool dump_native_stack = true,
1719                       bool force_dump_stack = false) const
1720       REQUIRES_SHARED(Locks::mutator_lock_);
1721 
1722   // Out-of-line conveniences for debugging in gdb.
1723   static Thread* CurrentFromGdb();  // Like Thread::Current.
1724   // Like Thread::Dump(std::cerr).
1725   void DumpFromGdb() const REQUIRES_SHARED(Locks::mutator_lock_);
1726 
1727   // A wrapper around CreateCallback used when userfaultfd GC is used to
1728   // identify the GC by stacktrace.
1729   static NO_INLINE void* CreateCallbackWithUffdGc(void* arg);
1730   static void* CreateCallback(void* arg);
1731 
1732   void HandleUncaughtExceptions() REQUIRES_SHARED(Locks::mutator_lock_);
1733   void RemoveFromThreadGroup() REQUIRES_SHARED(Locks::mutator_lock_);
1734 
1735   // Initialize a thread.
1736   //
1737   // The third parameter is not mandatory. If given, the thread will use this JNIEnvExt. In case
1738   // Init succeeds, this means the thread takes ownership of it. If Init fails, it is the caller's
1739   // responsibility to destroy the given JNIEnvExt. If the parameter is null, Init will try to
1740   // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value
1741   // of false).
1742   bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr)
1743       REQUIRES(Locks::runtime_shutdown_lock_);
1744   void InitCardTable();
1745   void InitCpu();
1746   void CleanupCpu();
1747   void InitTlsEntryPoints();
1748   void InitTid();
1749   void InitPthreadKeySelf();
1750   bool InitStackHwm();
1751 
1752   void SetUpAlternateSignalStack();
1753   void TearDownAlternateSignalStack();
1754   void MadviseAwayAlternateSignalStack();
1755 
1756   ALWAYS_INLINE void TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)
1757       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
1758       REQUIRES_SHARED(Locks::mutator_lock_);
1759 
1760   // Call PassActiveSuspendBarriers() if there are active barriers. Only called on current thread.
1761   ALWAYS_INLINE void CheckActiveSuspendBarriers()
1762       REQUIRES(!Locks::thread_suspend_count_lock_, !Locks::mutator_lock_, !Roles::uninterruptible_);
1763 
1764   // Decrement all "suspend barriers" for the current thread, notifying threads that requested our
1765   // suspension. Only called on current thread, when suspended. If suspend_count_ > 0 then we
1766   // promise that we are and will remain "suspended" until the suspend count is decremented.
1767   bool PassActiveSuspendBarriers()
1768       REQUIRES(!Locks::thread_suspend_count_lock_, !Locks::mutator_lock_);
1769 
1770   // Add an entry to active_suspend1_barriers.
1771   ALWAYS_INLINE void AddSuspend1Barrier(WrappedSuspend1Barrier* suspend1_barrier)
1772       REQUIRES(Locks::thread_suspend_count_lock_);
1773 
1774   // Remove last-added entry from active_suspend1_barriers.
1775   // Only makes sense if we're still holding thread_suspend_count_lock_ since insertion.
1776   // We redundantly pass in the barrier to be removed in order to enable a DCHECK.
1777   ALWAYS_INLINE void RemoveFirstSuspend1Barrier(WrappedSuspend1Barrier* suspend1_barrier)
1778       REQUIRES(Locks::thread_suspend_count_lock_);
1779 
1780   // Remove the "barrier" from the list no matter where it appears. Called only under exceptional
1781   // circumstances. The barrier must be in the list.
1782   ALWAYS_INLINE void RemoveSuspend1Barrier(WrappedSuspend1Barrier* suspend1_barrier)
1783       REQUIRES(Locks::thread_suspend_count_lock_);
1784 
1785   ALWAYS_INLINE bool HasActiveSuspendBarrier() REQUIRES(Locks::thread_suspend_count_lock_);
1786 
1787   // CHECK that the given barrier is no longer on our list.
1788   ALWAYS_INLINE void CheckBarrierInactive(WrappedSuspend1Barrier* suspend1_barrier)
1789       REQUIRES(Locks::thread_suspend_count_lock_);
1790 
1791   // Registers the current thread as the jit sensitive thread. Should be called just once.
SetJitSensitiveThread()1792   static void SetJitSensitiveThread() {
1793     if (jit_sensitive_thread_ == nullptr) {
1794       jit_sensitive_thread_ = Thread::Current();
1795     } else {
1796       LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:"
1797           << Thread::Current()->GetTid();
1798     }
1799   }
1800 
SetSensitiveThreadHook(bool (* is_sensitive_thread_hook)())1801   static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) {
1802     is_sensitive_thread_hook_ = is_sensitive_thread_hook;
1803   }
1804 
1805   // Runs a single checkpoint function. If there are no more pending checkpoint functions it will
1806   // clear the kCheckpointRequest flag. The caller is responsible for calling this in a loop until
1807   // the kCheckpointRequest flag is cleared.
1808   void RunCheckpointFunction()
1809       REQUIRES(!Locks::thread_suspend_count_lock_)
1810       REQUIRES_SHARED(Locks::mutator_lock_);
1811   void RunEmptyCheckpoint();
1812 
1813   // Install the protected region for implicit stack checks.
1814   void InstallImplicitProtection();
1815 
1816   template <bool kPrecise>
1817   void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
1818 
1819   static bool IsAotCompiler();
1820 
1821   void ReleaseLongJumpContextInternal();
1822 
1823   void SetCachedThreadName(const char* name);
1824 
1825   // Helper class for manipulating the 32 bits of atomically changed state and flags.
1826   class StateAndFlags {
1827    public:
StateAndFlags(uint32_t value)1828     explicit StateAndFlags(uint32_t value) :value_(value) {}
1829 
GetValue()1830     uint32_t GetValue() const {
1831       return value_;
1832     }
1833 
SetValue(uint32_t value)1834     void SetValue(uint32_t value) {
1835       value_ = value;
1836     }
1837 
IsAnyOfFlagsSet(uint32_t flags)1838     bool IsAnyOfFlagsSet(uint32_t flags) const {
1839       DCHECK_EQ(flags & ~AllThreadFlags(), 0u);
1840       return (value_ & flags) != 0u;
1841     }
1842 
IsFlagSet(ThreadFlag flag)1843     bool IsFlagSet(ThreadFlag flag) const {
1844       return (value_ & enum_cast<uint32_t>(flag)) != 0u;
1845     }
1846 
SetFlag(ThreadFlag flag)1847     void SetFlag(ThreadFlag flag) {
1848       value_ |= enum_cast<uint32_t>(flag);
1849     }
1850 
WithFlag(ThreadFlag flag)1851     StateAndFlags WithFlag(ThreadFlag flag) const {
1852       StateAndFlags result = *this;
1853       result.SetFlag(flag);
1854       return result;
1855     }
1856 
WithoutFlag(ThreadFlag flag)1857     StateAndFlags WithoutFlag(ThreadFlag flag) const {
1858       StateAndFlags result = *this;
1859       result.ClearFlag(flag);
1860       return result;
1861     }
1862 
ClearFlag(ThreadFlag flag)1863     void ClearFlag(ThreadFlag flag) {
1864       value_ &= ~enum_cast<uint32_t>(flag);
1865     }
1866 
GetState()1867     ThreadState GetState() const {
1868       ThreadState state = ThreadStateField::Decode(value_);
1869       ValidateThreadState(state);
1870       return state;
1871     }
1872 
SetState(ThreadState state)1873     void SetState(ThreadState state) {
1874       ValidateThreadState(state);
1875       value_ = ThreadStateField::Update(state, value_);
1876     }
1877 
WithState(ThreadState state)1878     StateAndFlags WithState(ThreadState state) const {
1879       StateAndFlags result = *this;
1880       result.SetState(state);
1881       return result;
1882     }
1883 
EncodeState(ThreadState state)1884     static constexpr uint32_t EncodeState(ThreadState state) {
1885       ValidateThreadState(state);
1886       return ThreadStateField::Encode(state);
1887     }
1888 
ValidateThreadState(ThreadState state)1889     static constexpr void ValidateThreadState(ThreadState state) {
1890       if (kIsDebugBuild && state != ThreadState::kRunnable) {
1891         CHECK_GE(state, ThreadState::kTerminated);
1892         CHECK_LE(state, ThreadState::kSuspended);
1893         CHECK_NE(state, ThreadState::kObsoleteRunnable);
1894       }
1895     }
1896 
1897     // The value holds thread flags and thread state.
1898     uint32_t value_;
1899 
1900     static constexpr size_t kThreadStateBitSize = BitSizeOf<std::underlying_type_t<ThreadState>>();
1901     static constexpr size_t kThreadStatePosition = BitSizeOf<uint32_t>() - kThreadStateBitSize;
1902     using ThreadStateField = BitField<ThreadState, kThreadStatePosition, kThreadStateBitSize>;
1903     static_assert(
1904         WhichPowerOf2(enum_cast<uint32_t>(ThreadFlag::kLastFlag)) < kThreadStatePosition);
1905   };
1906   static_assert(sizeof(StateAndFlags) == sizeof(uint32_t), "Unexpected StateAndFlags size");
1907 
GetStateAndFlags(std::memory_order order)1908   StateAndFlags GetStateAndFlags(std::memory_order order) const {
1909     return StateAndFlags(tls32_.state_and_flags.load(order));
1910   }
1911 
1912   // Format state and flags as a hex string. For diagnostic output.
1913   std::string StateAndFlagsAsHexString() const;
1914 
1915   // Run the flip function and notify other threads that may have tried
1916   // to do that concurrently.
1917   void RunFlipFunction(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
1918 
1919   // Ensure that thread flip function for thread target started running. If no other thread is
1920   // executing it, the calling thread shall run the flip function and then notify other threads
1921   // that have tried to do that concurrently. After this function returns, the
1922   // `ThreadFlag::kPendingFlipFunction` is cleared but another thread may still be running the
1923   // flip function as indicated by the `ThreadFlag::kRunningFlipFunction`. Optional arguments:
1924   //  - old_state_and_flags indicates the current and state and flags value for the thread, with
1925   //    at least kPendingFlipFunction set. The thread should logically acquire the
1926   //    mutator lock before running the flip function.  A special zero value indicates that the
1927   //    thread already holds the mutator lock, and the actual state_and_flags must be read.
1928   //    A non-zero value implies this == Current().
1929   //  - If tef is non-null, we check that the target thread has not yet exited, as indicated by
1930   //    tef. In that case, we acquire thread_list_lock_ as needed.
1931   //  - If finished is non-null, we assign to *finished to indicate whether the flip was known to
1932   //    be completed when we returned.
1933   //  Returns true if and only if we acquired the mutator lock (which implies that we ran the flip
1934   //  function after finding old_state_and_flags unchanged).
1935   static bool EnsureFlipFunctionStarted(Thread* self,
1936                                         Thread* target,
1937                                         StateAndFlags old_state_and_flags = StateAndFlags(0),
1938                                         ThreadExitFlag* tef = nullptr,
1939                                         /*out*/ bool* finished = nullptr)
1940       TRY_ACQUIRE_SHARED(true, Locks::mutator_lock_);
1941 
1942   static void ThreadExitCallback(void* arg);
1943 
1944   // Maximum number of suspend barriers.
1945   static constexpr uint32_t kMaxSuspendBarriers = 3;
1946 
1947   // Has Thread::Startup been called?
1948   static bool is_started_;
1949 
1950   // TLS key used to retrieve the Thread*.
1951   static pthread_key_t pthread_key_self_;
1952 
1953   // Used to notify threads that they should attempt to resume, they will suspend again if
1954   // their suspend count is > 0.
1955   static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1956 
1957   // Hook passed by framework which returns true
1958   // when StrictMode events are traced for the current thread.
1959   static bool (*is_sensitive_thread_hook_)();
1960   // Stores the jit sensitive thread (which for now is the UI thread).
1961   static Thread* jit_sensitive_thread_;
1962 
1963   static constexpr uint32_t kMakeVisiblyInitializedCounterTriggerCount = 128;
1964 
1965   /***********************************************************************************************/
1966   // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
1967   // pointer size differences. To encourage shorter encoding, more frequently used values appear
1968   // first if possible.
1969   /***********************************************************************************************/
1970 
1971   struct alignas(4) tls_32bit_sized_values {
1972     // We have no control over the size of 'bool', but want our boolean fields
1973     // to be 4-byte quantities.
1974     using bool32_t = uint32_t;
1975 
tls_32bit_sized_valuestls_32bit_sized_values1976     explicit tls_32bit_sized_values(bool is_daemon)
1977         : state_and_flags(0u),
1978           suspend_count(0),
1979           thin_lock_thread_id(0),
1980           tid(0),
1981           daemon(is_daemon),
1982           throwing_OutOfMemoryError(false),
1983           no_thread_suspension(0),
1984           thread_exit_check_count(0),
1985           is_gc_marking(false),
1986           is_deopt_check_required(false),
1987           weak_ref_access_enabled(WeakRefAccessState::kVisiblyEnabled),
1988           disable_thread_flip_count(0),
1989           user_code_suspend_count(0),
1990           force_interpreter_count(0),
1991           make_visibly_initialized_counter(0),
1992           define_class_counter(0),
1993           num_name_readers(0),
1994           shared_method_hotness(kSharedMethodHotnessThreshold) {}
1995 
1996     // The state and flags field must be changed atomically so that flag values aren't lost.
1997     // See `StateAndFlags` for bit assignments of `ThreadFlag` and `ThreadState` values.
1998     // Keeping the state and flags together allows an atomic CAS to change from being
1999     // Suspended to Runnable without a suspend request occurring.
2000     Atomic<uint32_t> state_and_flags;
2001     static_assert(sizeof(state_and_flags) == sizeof(uint32_t),
2002                   "Size of state_and_flags and uint32 are different");
2003 
2004     // A non-zero value is used to tell the current thread to enter a safe point
2005     // at the next poll.
2006     int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
2007 
2008     // Thin lock thread id. This is a small integer used by the thin lock implementation.
2009     // This is not to be confused with the native thread's tid, nor is it the value returned
2010     // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
2011     // important difference between this id and the ids visible to managed code is that these
2012     // ones get reused (to ensure that they fit in the number of bits available).
2013     uint32_t thin_lock_thread_id;
2014 
2015     // System thread id.
2016     uint32_t tid;
2017 
2018     // Is the thread a daemon?
2019     const bool32_t daemon;
2020 
2021     // A boolean telling us whether we're recursively throwing OOME.
2022     bool32_t throwing_OutOfMemoryError;
2023 
2024     // A positive value implies we're in a region where thread suspension isn't expected.
2025     uint32_t no_thread_suspension;
2026 
2027     // How many times has our pthread key's destructor been called?
2028     uint32_t thread_exit_check_count;
2029 
2030     // True if the GC is in the marking phase. This is used for the CC collector only. This is
2031     // thread local so that we can simplify the logic to check for the fast path of read barriers of
2032     // GC roots.
2033     bool32_t is_gc_marking;
2034 
2035     // True if we need to check for deoptimization when returning from the runtime functions. This
2036     // is required only when a class is redefined to prevent executing code that has field offsets
2037     // embedded. For non-debuggable apps redefinition is not allowed and this flag should always be
2038     // set to false.
2039     bool32_t is_deopt_check_required;
2040 
2041     // Thread "interrupted" status; stays raised until queried or thrown.
2042     Atomic<bool32_t> interrupted;
2043 
2044     AtomicInteger park_state_;
2045 
2046     // Determines whether the thread is allowed to directly access a weak ref
2047     // (Reference::GetReferent() and system weaks) and to potentially mark an object alive/gray.
2048     // This is used for concurrent reference processing of the CC collector only. This is thread
2049     // local so that we can enable/disable weak ref access by using a checkpoint and avoid a race
2050     // around the time weak ref access gets disabled and concurrent reference processing begins
2051     // (if weak ref access is disabled during a pause, this is not an issue.) Other collectors use
2052     // Runtime::DisallowNewSystemWeaks() and ReferenceProcessor::EnableSlowPath().  Can be
2053     // concurrently accessed by GetReferent() and set (by iterating over threads).
2054     // Can be changed from kEnabled to kVisiblyEnabled by readers. No other concurrent access is
2055     // possible when that happens.
2056     mutable std::atomic<WeakRefAccessState> weak_ref_access_enabled;
2057 
2058     // A thread local version of Heap::disable_thread_flip_count_. This keeps track of how many
2059     // levels of (nested) JNI critical sections the thread is in and is used to detect a nested JNI
2060     // critical section enter.
2061     uint32_t disable_thread_flip_count;
2062 
2063     // How much of 'suspend_count_' is by request of user code, used to distinguish threads
2064     // suspended by the runtime from those suspended by user code.
2065     // This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be
2066     // told that AssertHeld should be good enough.
2067     int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
2068 
2069     // Count of how many times this thread has been forced to interpreter. If this is not 0 the
2070     // thread must remain in interpreted code as much as possible.
2071     uint32_t force_interpreter_count;
2072 
2073     // Counter for calls to initialize a class that's initialized but not visibly initialized.
2074     // When this reaches kMakeVisiblyInitializedCounterTriggerCount, we call the runtime to
2075     // make initialized classes visibly initialized. This is needed because we usually make
2076     // classes visibly initialized in batches but we do not want to be stuck with a class
2077     // initialized but not visibly initialized for a long time even if no more classes are
2078     // being initialized anymore.
2079     uint32_t make_visibly_initialized_counter;
2080 
2081     // Counter for how many nested define-classes are ongoing in this thread. Used to allow waiting
2082     // for threads to be done with class-definition work.
2083     uint32_t define_class_counter;
2084 
2085     // A count of the number of readers of tlsPtr_.name that may still be looking at a string they
2086     // retrieved.
2087     mutable std::atomic<uint32_t> num_name_readers;
2088     static_assert(std::atomic<uint32_t>::is_always_lock_free);
2089 
2090     // Thread-local hotness counter for shared memory methods. Initialized with
2091     // `kSharedMethodHotnessThreshold`. The interpreter decrements it and goes
2092     // into the runtime when hitting zero. Note that all previous decrements
2093     // could have been executed by another method than the one seeing zero.
2094     // There is a second level counter in `Jit::shared_method_counters_` to make
2095     // sure we at least have a few samples before compiling a method.
2096     uint32_t shared_method_hotness;
2097   } tls32_;
2098 
2099   struct alignas(8) tls_64bit_sized_values {
tls_64bit_sized_valuestls_64bit_sized_values2100     tls_64bit_sized_values() : trace_clock_base(0) {
2101     }
2102 
2103     // The clock base used for tracing.
2104     uint64_t trace_clock_base;
2105 
2106     RuntimeStats stats;
2107   } tls64_;
2108 
2109   struct alignas(sizeof(void*)) tls_ptr_sized_values {
tls_ptr_sized_valuestls_ptr_sized_values2110       tls_ptr_sized_values() : card_table(nullptr),
2111                                exception(nullptr),
2112                                stack_end(nullptr),
2113                                managed_stack(),
2114                                suspend_trigger(nullptr),
2115                                jni_env(nullptr),
2116                                tmp_jni_env(nullptr),
2117                                self(nullptr),
2118                                opeer(nullptr),
2119                                jpeer(nullptr),
2120                                stack_begin(nullptr),
2121                                stack_size(0),
2122                                deps_or_stack_trace_sample(),
2123                                wait_next(nullptr),
2124                                monitor_enter_object(nullptr),
2125                                top_handle_scope(nullptr),
2126                                class_loader_override(nullptr),
2127                                long_jump_context(nullptr),
2128                                stacked_shadow_frame_record(nullptr),
2129                                deoptimization_context_stack(nullptr),
2130                                frame_id_to_shadow_frame(nullptr),
2131                                name(nullptr),
2132                                pthread_self(0),
2133                                last_no_thread_suspension_cause(nullptr),
2134                                active_suspendall_barrier(nullptr),
2135                                active_suspend1_barriers(nullptr),
2136                                thread_local_pos(nullptr),
2137                                thread_local_end(nullptr),
2138                                thread_local_start(nullptr),
2139                                thread_local_limit(nullptr),
2140                                thread_local_objects(0),
2141                                checkpoint_function(nullptr),
2142                                thread_local_alloc_stack_top(nullptr),
2143                                thread_local_alloc_stack_end(nullptr),
2144                                mutator_lock(nullptr),
2145                                flip_function(nullptr),
2146                                thread_local_mark_stack(nullptr),
2147                                async_exception(nullptr),
2148                                top_reflective_handle_scope(nullptr),
2149                                method_trace_buffer(nullptr),
2150                                method_trace_buffer_index(0),
2151                                thread_exit_flags(nullptr) {
2152       std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
2153     }
2154 
2155     // The biased card table, see CardTable for details.
2156     uint8_t* card_table;
2157 
2158     // The pending exception or null.
2159     mirror::Throwable* exception;
2160 
2161     // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
2162     // We leave extra space so there's room for the code that throws StackOverflowError.
2163     uint8_t* stack_end;
2164 
2165     // The top of the managed stack often manipulated directly by compiler generated code.
2166     ManagedStack managed_stack;
2167 
2168     // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check.  It is
2169     // normally set to the address of itself. It should be cleared with release semantics to ensure
2170     // that prior state changes etc. are visible to any thread that faults as a result.
2171     // We assume that the kernel ensures that such changes are then visible to the faulting
2172     // thread, even if it is not an acquire load that faults. (Indeed, it seems unlikely that the
2173     // ordering semantics associated with the faulting load has any impact.)
2174     std::atomic<uintptr_t*> suspend_trigger;
2175 
2176     // Every thread may have an associated JNI environment
2177     JNIEnvExt* jni_env;
2178 
2179     // Temporary storage to transfer a pre-allocated JNIEnvExt from the creating thread to the
2180     // created thread.
2181     JNIEnvExt* tmp_jni_env;
2182 
2183     // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
2184     // is easy but getting the address of Thread::Current is hard. This field can be read off of
2185     // Thread::Current to give the address.
2186     Thread* self;
2187 
2188     // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
2189     // start up, until the thread is registered and the local opeer_ is used.
2190     mirror::Object* opeer;
2191     jobject jpeer;
2192 
2193     // The "lowest addressable byte" of the stack.
2194     uint8_t* stack_begin;
2195 
2196     // Size of the stack.
2197     size_t stack_size;
2198 
2199     // Sampling profiler and AOT verification cannot happen on the same run, so we share
2200     // the same entry for the stack trace and the verifier deps.
2201     union DepsOrStackTraceSample {
DepsOrStackTraceSample()2202       DepsOrStackTraceSample() {
2203         verifier_deps = nullptr;
2204         stack_trace_sample = nullptr;
2205       }
2206       // Pointer to previous stack trace captured by sampling profiler.
2207       std::vector<ArtMethod*>* stack_trace_sample;
2208       // When doing AOT verification, per-thread VerifierDeps.
2209       verifier::VerifierDeps* verifier_deps;
2210     } deps_or_stack_trace_sample;
2211 
2212     // The next thread in the wait set this thread is part of or null if not waiting.
2213     Thread* wait_next;
2214 
2215     // If we're blocked in MonitorEnter, this is the object we're trying to lock.
2216     mirror::Object* monitor_enter_object;
2217 
2218     // Top of linked list of handle scopes or null for none.
2219     BaseHandleScope* top_handle_scope;
2220 
2221     // Needed to get the right ClassLoader in JNI_OnLoad, but also
2222     // useful for testing.
2223     jobject class_loader_override;
2224 
2225     // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
2226     Context* long_jump_context;
2227 
2228     // For gc purpose, a shadow frame record stack that keeps track of:
2229     // 1) shadow frames under construction.
2230     // 2) deoptimization shadow frames.
2231     StackedShadowFrameRecord* stacked_shadow_frame_record;
2232 
2233     // Deoptimization return value record stack.
2234     DeoptimizationContextRecord* deoptimization_context_stack;
2235 
2236     // For debugger, a linked list that keeps the mapping from frame_id to shadow frame.
2237     // Shadow frames may be created before deoptimization happens so that the debugger can
2238     // set local values there first.
2239     FrameIdToShadowFrame* frame_id_to_shadow_frame;
2240 
2241     // A cached copy of the java.lang.Thread's (modified UTF-8) name.
2242     // If this is not null or kThreadNameDuringStartup, then it owns the malloc memory holding
2243     // the string. Updated in an RCU-like manner.
2244     std::atomic<const char*> name;
2245     static_assert(std::atomic<const char*>::is_always_lock_free);
2246 
2247     // A cached pthread_t for the pthread underlying this Thread*.
2248     pthread_t pthread_self;
2249 
2250     // If no_thread_suspension_ is > 0, what is causing that assertion.
2251     const char* last_no_thread_suspension_cause;
2252 
2253     // After a thread observes a suspend request and enters a suspended state,
2254     // it notifies the requestor by arriving at a "suspend barrier". This consists of decrementing
2255     // the atomic integer representing the barrier. (This implementation was introduced in 2015 to
2256     // minimize cost. There may be other options.) These atomic integer barriers are always
2257     // stored on the requesting thread's stack. They are referenced from the target thread's
2258     // data structure in one of two ways; in either case the data structure referring to these
2259     // barriers is guarded by suspend_count_lock:
2260     // 1. A SuspendAll barrier is directly referenced from the target thread. Only one of these
2261     // can be active at a time:
2262     AtomicInteger* active_suspendall_barrier GUARDED_BY(Locks::thread_suspend_count_lock_);
2263     // 2. For individual thread suspensions, active barriers are embedded in a struct that is used
2264     // to link together all suspend requests for this thread. Unlike the SuspendAll case, each
2265     // barrier is referenced by a single target thread, and thus can appear only on a single list.
2266     // The struct as a whole is still stored on the requesting thread's stack.
2267     WrappedSuspend1Barrier* active_suspend1_barriers GUARDED_BY(Locks::thread_suspend_count_lock_);
2268 
2269     // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
2270     // potentially better performance.
2271     uint8_t* thread_local_pos;
2272     uint8_t* thread_local_end;
2273 
2274     // Thread-local allocation pointer. Can be moved above the preceding two to correct alignment.
2275     uint8_t* thread_local_start;
2276 
2277     // Thread local limit is how much we can expand the thread local buffer to, it is greater or
2278     // equal to thread_local_end.
2279     uint8_t* thread_local_limit;
2280 
2281     size_t thread_local_objects;
2282 
2283     // Pending checkpoint function or null if non-pending. If this checkpoint is set and someone
2284     // requests another checkpoint, it goes to the checkpoint overflow list.
2285     Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_);
2286 
2287     // Entrypoint function pointers.
2288     // TODO: move this to more of a global offset table model to avoid per-thread duplication.
2289     JniEntryPoints jni_entrypoints;
2290     QuickEntryPoints quick_entrypoints;
2291 
2292     // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
2293     void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread];
2294 
2295     // Thread-local allocation stack data/routines.
2296     StackReference<mirror::Object>* thread_local_alloc_stack_top;
2297     StackReference<mirror::Object>* thread_local_alloc_stack_end;
2298 
2299     // Pointer to the mutator lock.
2300     // This is the same as `Locks::mutator_lock_` but cached for faster state transitions.
2301     MutatorMutex* mutator_lock;
2302 
2303     // Support for Mutex lock hierarchy bug detection.
2304     BaseMutex* held_mutexes[kLockLevelCount];
2305 
2306     // The function used for thread flip.  Set while holding Locks::thread_suspend_count_lock_ and
2307     // with all other threads suspended.  May be cleared while being read.
2308     std::atomic<Closure*> flip_function;
2309 
2310     union {
2311       // Thread-local mark stack for the concurrent copying collector.
2312       gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack;
2313       // Thread-local page-sized buffer for userfaultfd GC.
2314       uint8_t* thread_local_gc_buffer;
2315     };
2316 
2317     // The pending async-exception or null.
2318     mirror::Throwable* async_exception;
2319 
2320     // Top of the linked-list for reflective-handle scopes or null if none.
2321     BaseReflectiveHandleScope* top_reflective_handle_scope;
2322 
2323     // Pointer to a thread-local buffer for method tracing.
2324     uintptr_t* method_trace_buffer;
2325 
2326     // The index of the next free entry in method_trace_buffer.
2327     size_t method_trace_buffer_index;
2328 
2329     // Pointer to the first node of an intrusively doubly-linked list of ThreadExitFlags.
2330     ThreadExitFlag* thread_exit_flags GUARDED_BY(Locks::thread_list_lock_);
2331   } tlsPtr_;
2332 
2333   // Small thread-local cache to be used from the interpreter.
2334   // It is keyed by dex instruction pointer.
2335   // The value is opcode-depended (e.g. field offset).
2336   InterpreterCache interpreter_cache_;
2337 
2338   // All fields below this line should not be accessed by native code. This means these fields can
2339   // be modified, rearranged, added or removed without having to modify asm_support.h
2340 
2341   // Guards the 'wait_monitor_' members.
2342   Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
2343 
2344   // Condition variable waited upon during a wait.
2345   ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
2346   // Pointer to the monitor lock we're currently waiting on or null if not waiting.
2347   Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
2348 
2349   // Debug disable read barrier count, only is checked for debug builds and only in the runtime.
2350   uint8_t debug_disallow_read_barrier_ = 0;
2351 
2352   // Counters used only for debugging and error reporting.  Likely to wrap.  Small to avoid
2353   // increasing Thread size.
2354   // We currently maintain these unconditionally, since it doesn't cost much, and we seem to have
2355   // persistent issues with suspension timeouts, which these should help to diagnose.
2356   // TODO: Reconsider this.
2357   std::atomic<uint8_t> suspended_count_ = 0;   // Number of times we entered a suspended state after
2358                                                // running checkpoints.
2359   std::atomic<uint8_t> checkpoint_count_ = 0;  // Number of checkpoints we started running.
2360 
2361   // Note that it is not in the packed struct, may not be accessed for cross compilation.
2362   uintptr_t poison_object_cookie_ = 0;
2363 
2364   // Pending extra checkpoints if checkpoint_function_ is already used.
2365   std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_);
2366 
2367   // Custom TLS field that can be used by plugins or the runtime. Should not be accessed directly by
2368   // compiled code or entrypoints.
2369   SafeMap<std::string, std::unique_ptr<TLSData>, std::less<>> custom_tls_
2370       GUARDED_BY(Locks::custom_tls_lock_);
2371 
2372 #if !defined(__BIONIC__)
2373 #if !defined(ANDROID_HOST_MUSL)
2374     __attribute__((tls_model("initial-exec")))
2375 #endif
2376   static thread_local Thread* self_tls_;
2377 #endif
2378 
2379   // True if the thread is some form of runtime thread (ex, GC or JIT).
2380   bool is_runtime_thread_;
2381 
2382   // Set during execution of JNI methods that get field and method id's as part of determining if
2383   // the caller is allowed to access all fields and methods in the Core Platform API.
2384   uint32_t core_platform_api_cookie_ = 0;
2385 
2386   friend class gc::collector::SemiSpace;  // For getting stack traces.
2387   friend class Runtime;  // For CreatePeer.
2388   friend class QuickExceptionHandler;  // For dumping the stack.
2389   friend class ScopedThreadStateChange;
2390   friend class StubTest;  // For accessing entrypoints.
2391   friend class ThreadList;  // For ~Thread, Destroy and EnsureFlipFunctionStarted.
2392   friend class EntrypointsOrderTest;  // To test the order of tls entries.
2393   friend class JniCompilerTest;  // For intercepting JNI entrypoint calls.
2394 
2395   DISALLOW_COPY_AND_ASSIGN(Thread);
2396 };
2397 
2398 class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension {
2399  public:
2400   ALWAYS_INLINE ScopedAssertNoThreadSuspension(const char* cause,
2401                                                bool enabled = true)
ACQUIRE(Roles::uninterruptible_)2402       ACQUIRE(Roles::uninterruptible_)
2403       : enabled_(enabled) {
2404     if (!enabled_) {
2405       return;
2406     }
2407     if (kIsDebugBuild) {
2408       self_ = Thread::Current();
2409       old_cause_ = self_->StartAssertNoThreadSuspension(cause);
2410     } else {
2411       Roles::uninterruptible_.Acquire();  // No-op.
2412     }
2413   }
~ScopedAssertNoThreadSuspension()2414   ALWAYS_INLINE ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) {
2415     if (!enabled_) {
2416       return;
2417     }
2418     if (kIsDebugBuild) {
2419       self_->EndAssertNoThreadSuspension(old_cause_);
2420     } else {
2421       Roles::uninterruptible_.Release();  // No-op.
2422     }
2423   }
2424 
2425  private:
2426   Thread* self_;
2427   const bool enabled_;
2428   const char* old_cause_;
2429 };
2430 
2431 class ScopedAllowThreadSuspension {
2432  public:
ScopedAllowThreadSuspension()2433   ALWAYS_INLINE ScopedAllowThreadSuspension() RELEASE(Roles::uninterruptible_) {
2434     if (kIsDebugBuild) {
2435       self_ = Thread::Current();
2436       old_cause_ = self_->EndAssertNoThreadSuspension();
2437     } else {
2438       Roles::uninterruptible_.Release();  // No-op.
2439     }
2440   }
~ScopedAllowThreadSuspension()2441   ALWAYS_INLINE ~ScopedAllowThreadSuspension() ACQUIRE(Roles::uninterruptible_) {
2442     if (kIsDebugBuild) {
2443       CHECK(self_->StartAssertNoThreadSuspension(old_cause_) == nullptr);
2444     } else {
2445       Roles::uninterruptible_.Acquire();  // No-op.
2446     }
2447   }
2448 
2449  private:
2450   Thread* self_;
2451   const char* old_cause_;
2452 };
2453 
2454 
2455 class ScopedStackedShadowFramePusher {
2456  public:
ScopedStackedShadowFramePusher(Thread * self,ShadowFrame * sf)2457   ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf) : self_(self), sf_(sf) {
2458     DCHECK_EQ(sf->GetLink(), nullptr);
2459     self_->PushStackedShadowFrame(sf, StackedShadowFrameType::kShadowFrameUnderConstruction);
2460   }
~ScopedStackedShadowFramePusher()2461   ~ScopedStackedShadowFramePusher() {
2462     ShadowFrame* sf = self_->PopStackedShadowFrame();
2463     DCHECK_EQ(sf, sf_);
2464   }
2465 
2466  private:
2467   Thread* const self_;
2468   ShadowFrame* const sf_;
2469 
2470   DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher);
2471 };
2472 
2473 // Only works for debug builds.
2474 class ScopedDebugDisallowReadBarriers {
2475  public:
ScopedDebugDisallowReadBarriers(Thread * self)2476   explicit ScopedDebugDisallowReadBarriers(Thread* self) : self_(self) {
2477     self_->ModifyDebugDisallowReadBarrier(1);
2478   }
~ScopedDebugDisallowReadBarriers()2479   ~ScopedDebugDisallowReadBarriers() {
2480     self_->ModifyDebugDisallowReadBarrier(-1);
2481   }
2482 
2483  private:
2484   Thread* const self_;
2485 };
2486 
2487 class ThreadLifecycleCallback {
2488  public:
~ThreadLifecycleCallback()2489   virtual ~ThreadLifecycleCallback() {}
2490 
2491   virtual void ThreadStart(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
2492   virtual void ThreadDeath(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
2493 };
2494 
2495 // Store an exception from the thread and suppress it for the duration of this object.
2496 class ScopedExceptionStorage {
2497  public:
2498   EXPORT explicit ScopedExceptionStorage(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
2499   void SuppressOldException(const char* message = "") REQUIRES_SHARED(Locks::mutator_lock_);
2500   EXPORT ~ScopedExceptionStorage() REQUIRES_SHARED(Locks::mutator_lock_);
2501 
2502  private:
2503   Thread* self_;
2504   StackHandleScope<1> hs_;
2505   MutableHandle<mirror::Throwable> excp_;
2506 };
2507 
2508 EXPORT std::ostream& operator<<(std::ostream& os, const Thread& thread);
2509 std::ostream& operator<<(std::ostream& os, StackedShadowFrameType thread);
2510 
2511 }  // namespace art
2512 
2513 #endif  // ART_RUNTIME_THREAD_H_
2514