• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_THREAD_H_
18 #define ART_RUNTIME_THREAD_H_
19 
20 #include <atomic>
21 #include <bitset>
22 #include <deque>
23 #include <iosfwd>
24 #include <list>
25 #include <memory>
26 #include <string>
27 
28 #include "base/atomic.h"
29 #include "base/bit_field.h"
30 #include "base/bit_utils.h"
31 #include "base/locks.h"
32 #include "base/macros.h"
33 #include "base/pointer_size.h"
34 #include "base/safe_map.h"
35 #include "base/value_object.h"
36 #include "entrypoints/jni/jni_entrypoints.h"
37 #include "entrypoints/quick/quick_entrypoints.h"
38 #include "handle.h"
39 #include "handle_scope.h"
40 #include "interpreter/interpreter_cache.h"
41 #include "interpreter/shadow_frame.h"
42 #include "javaheapprof/javaheapsampler.h"
43 #include "jvalue.h"
44 #include "managed_stack.h"
45 #include "offsets.h"
46 #include "read_barrier_config.h"
47 #include "reflective_handle_scope.h"
48 #include "runtime_globals.h"
49 #include "runtime_stats.h"
50 #include "suspend_reason.h"
51 #include "thread_state.h"
52 
53 namespace unwindstack {
54 class AndroidLocalUnwinder;
55 }  // namespace unwindstack
56 
57 namespace art HIDDEN {
58 
59 namespace gc {
60 namespace accounting {
61 template<class T> class AtomicStack;
62 }  // namespace accounting
63 namespace collector {
64 class SemiSpace;
65 }  // namespace collector
66 }  // namespace gc
67 
68 namespace instrumentation {
69 struct InstrumentationStackFrame;
70 }  // namespace instrumentation
71 
72 namespace mirror {
73 class Array;
74 class Class;
75 class ClassLoader;
76 class Object;
77 template<class T> class ObjectArray;
78 template<class T> class PrimitiveArray;
79 using IntArray = PrimitiveArray<int32_t>;
80 class StackTraceElement;
81 class String;
82 class Throwable;
83 }  // namespace mirror
84 
85 namespace verifier {
86 class VerifierDeps;
87 }  // namespace verifier
88 
89 class ArtMethod;
90 class BaseMutex;
91 class ClassLinker;
92 class Closure;
93 class Context;
94 class DeoptimizationContextRecord;
95 class DexFile;
96 class FrameIdToShadowFrame;
97 class IsMarkedVisitor;
98 class JavaVMExt;
99 class JNIEnvExt;
100 class Monitor;
101 class RootVisitor;
102 class ScopedObjectAccessAlreadyRunnable;
103 class ShadowFrame;
104 class StackedShadowFrameRecord;
105 class Thread;
106 class ThreadList;
107 enum VisitRootFlags : uint8_t;
108 enum class LowOverheadTraceType;
109 
110 // A piece of data that can be held in the CustomTls. The destructor will be called during thread
111 // shutdown. The thread the destructor is called on is not necessarily the same thread it was stored
112 // on.
113 class TLSData {
114  public:
~TLSData()115   virtual ~TLSData() {}
116 };
117 
118 // Thread priorities. These must match the Thread.MIN_PRIORITY,
119 // Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
120 enum ThreadPriority {
121   kMinThreadPriority = 1,
122   kNormThreadPriority = 5,
123   kMaxThreadPriority = 10,
124 };
125 
126 enum class ThreadFlag : uint32_t {
127   // If set, implies that suspend_count_ > 0 and the Thread should enter the safepoint handler.
128   kSuspendRequest = 1u << 0,
129 
130   // Request that the thread do some checkpoint work and then continue.
131   // Only modified while holding thread_suspend_count_lock_ .
132   kCheckpointRequest = 1u << 1,
133 
134   // Request that the thread do empty checkpoint and then continue.
135   kEmptyCheckpointRequest = 1u << 2,
136 
137   // Register that at least 1 suspend barrier needs to be passed.
138   // Changes to this flag are guarded by suspend_count_lock_ .
139   kActiveSuspendBarrier = 1u << 3,
140 
141   // Marks that a "flip function" needs to be executed on this thread.
142   // Set only while holding thread_list_lock_.
143   kPendingFlipFunction = 1u << 4,
144 
145   // Marks that the "flip function" is being executed by another thread.
146   //
147   // This is used to guard against multiple threads trying to run the
148   // "flip function" for the same thread while the thread is suspended.
149   //
150   // Set when we have some way to ensure that the thread cannot disappear out from under us,
151   // Either:
152   //   1) Set by the thread itself,
153   //   2) by a thread holding thread_list_lock_, or
154   //   3) while the target has a pending suspension request.
155   // Once set, prevents a thread from exiting.
156   kRunningFlipFunction = 1u << 5,
157 
158   // We are responsible for resuming all other threads. We ignore suspension requests,
159   // but not checkpoint requests, until a more opportune time. GC code should
160   // in any case not check for such requests; other clients of SuspendAll might.
161   // Prevents a situation in which we are asked to suspend just before we suspend all
162   // other threads, and then notice the suspension request and suspend ourselves,
163   // leading to deadlock. Guarded by thread_suspend_count_lock_ .
164   // Should not ever be set when we try to transition to kRunnable.
165   // TODO(b/296639267): Generalize use to prevent SuspendAll from blocking
166   // in-progress GC.
167   kSuspensionImmune = 1u << 6,
168 
169   // Request that compiled JNI stubs do not transition to Native or Runnable with
170   // inlined code, but take a slow path for monitoring method entry and exit events.
171   kMonitorJniEntryExit = 1u << 7,
172 
173   // Indicates the last flag. Used for checking that the flags do not overlap thread state.
174   kLastFlag = kMonitorJniEntryExit
175 };
176 
177 enum class StackedShadowFrameType {
178   kShadowFrameUnderConstruction,
179   kDeoptimizationShadowFrame,
180 };
181 
182 // The type of method that triggers deoptimization. It contains info on whether
183 // the deoptimized method should advance dex_pc.
184 enum class DeoptimizationMethodType {
185   kKeepDexPc,  // dex pc is required to be kept upon deoptimization.
186   kDefault     // dex pc may or may not advance depending on other conditions.
187 };
188 
189 // For the CC colector, normal weak reference access can be disabled on a per-thread basis, while
190 // processing references.  After finishing, the reference processor asynchronously sets the
191 // per-thread flags back to kEnabled with release memory ordering semantics. Each mutator thread
192 // should check its flag with acquire semantics before assuming that it is enabled. However,
193 // that is often too expensive, so the reading thread sets it to kVisiblyEnabled after seeing it
194 // kEnabled.  The Reference.get() intrinsic can thus read it in relaxed mode, and reread (by
195 // resorting to the slow path) with acquire semantics if it sees a value of kEnabled rather than
196 // kVisiblyEnabled.
197 enum class WeakRefAccessState : int32_t {
198   kVisiblyEnabled = 0,  // Enabled, and previously read with acquire load by this thread.
199   kEnabled,
200   kDisabled
201 };
202 
203 // ART uses two types of ABI/code: quick and native.
204 //
205 // Quick code includes:
206 // - The code that ART compiles to, e.g: Java/dex code compiled to Arm64.
207 // - Quick assembly entrypoints.
208 //
209 // Native code includes:
210 // - Interpreter.
211 // - GC.
212 // - JNI.
213 // - Runtime methods, i.e.: all ART C++ code.
214 //
215 // In regular (non-simulator) mode, both native and quick code are of the same ISA and will operate
216 // on the hardware stack. The hardware stack is allocated by the kernel to ART and grows down in
217 // memory.
218 //
219 // In simulator mode, native and quick code use different ISA's and will use different stacks.
220 // Native code will use the hardware stack while quick code will use the simulated stack. The
221 // simulated stack is a simple buffer in the native heap owned by the Simulator class.
222 //
223 // The StackType enum reflects the underlying type of stack in use by any given function while two
224 // constexpr StackTypes (kNativeStackType and kQuickStackType) indicate which type of stack is used
225 // for native and quick code. Whenever possible kNativeStackType and kQuickStackType should be used
226 // instead of using the StackType directly.
227 enum class StackType {
228   kHardware,
229   kSimulated
230 };
231 
232 // The type of stack used when executing native code, i.e.: runtime helpers, interpreter, JNI, etc.
233 // This stack is the native machine's call stack and so should be used when comparing against
234 // values returned from builtin functions such as __builtin_frame_address.
235 static constexpr StackType kNativeStackType = StackType::kHardware;
236 
237 // The type of stack used when executing quick code, i.e.: compiled dex code and quick entrypoints.
238 // For simulator builds this is the kSimulated stack and for non-simulator builds this is the
239 // kHardware stack.
240 static constexpr StackType kQuickStackType = StackType::kHardware;
241 
242 // See Thread.tlsPtr_.active_suspend1_barriers below for explanation.
243 struct WrappedSuspend1Barrier {
244   // TODO(b/323668816): At least weaken CHECKs to DCHECKs once the bug is fixed.
245   static constexpr int kMagic = 0xba8;
WrappedSuspend1BarrierWrappedSuspend1Barrier246   WrappedSuspend1Barrier() : magic_(kMagic), barrier_(1), next_(nullptr) {}
247   int magic_;
248   AtomicInteger barrier_;
249   struct WrappedSuspend1Barrier* next_ GUARDED_BY(Locks::thread_suspend_count_lock_);
250 };
251 
252 // Mostly opaque structure allocated by the client of NotifyOnThreadExit.  Allows a client to
253 // check whether the thread still exists after temporarily releasing thread_list_lock_, usually
254 // because we need to wait for something.
255 class ThreadExitFlag {
256  public:
ThreadExitFlag()257   ThreadExitFlag() : exited_(false) {}
HasExited()258   bool HasExited() REQUIRES(Locks::thread_list_lock_) { return exited_; }
259 
260  private:
261   // All ThreadExitFlags associated with a thread and with exited_ == false are in a doubly linked
262   // list.  tlsPtr_.thread_exit_flags points to the first element.  first.prev_ and last.next_ are
263   // null. This list contains no ThreadExitFlags with exited_ == true;
264   ThreadExitFlag* next_ GUARDED_BY(Locks::thread_list_lock_);
265   ThreadExitFlag* prev_ GUARDED_BY(Locks::thread_list_lock_);
266   bool exited_ GUARDED_BY(Locks::thread_list_lock_);
267   friend class Thread;
268 };
269 
270 // This should match RosAlloc::kNumThreadLocalSizeBrackets.
271 static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
272 
273 static constexpr size_t kSharedMethodHotnessThreshold = 0x1fff;
274 
275 // Thread's stack layout for implicit stack overflow checks:
276 //
277 //   +---------------------+  <- highest address of stack memory
278 //   |                     |
279 //   .                     .  <- SP
280 //   |                     |
281 //   |                     |
282 //   +---------------------+  <- stack_end
283 //   |                     |
284 //   |  Gap                |
285 //   |                     |
286 //   +---------------------+  <- stack_begin
287 //   |                     |
288 //   | Protected region    |
289 //   |                     |
290 //   +---------------------+  <- lowest address of stack memory
291 //
292 // The stack always grows down in memory.  At the lowest address is a region of memory
293 // that is set mprotect(PROT_NONE).  Any attempt to read/write to this region will
294 // result in a segmentation fault signal.  At any point, the thread's SP will be somewhere
295 // between the stack_end and the highest address in stack memory.  An implicit stack
296 // overflow check is a read of memory at a certain offset below the current SP (8K typically).
297 // If the thread's SP is below the stack_end address this will be a read into the protected
298 // region.  If the SP is above the stack_end address, the thread is guaranteed to have
299 // at least 8K of space.  Because stack overflow checks are only performed in generated code,
300 // if the thread makes a call out to a native function (through JNI), that native function
301 // might only have 4K of memory (if the SP is adjacent to stack_end).
302 
303 class EXPORT Thread {
304  public:
305   static const size_t kStackOverflowImplicitCheckSize;
306   static constexpr bool kVerifyStack = kIsDebugBuild;
307 
308   // Creates a new native thread corresponding to the given managed peer.
309   // Used to implement Thread.start.
310   static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
311 
312   // Attaches the calling native thread to the runtime, returning the new native peer.
313   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
314   static Thread* Attach(const char* thread_name,
315                         bool as_daemon,
316                         jobject thread_group,
317                         bool create_peer,
318                         bool should_run_callbacks);
319   // Attaches the calling native thread to the runtime, returning the new native peer.
320   static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_peer);
321 
322   // Reset internal state of child thread after fork.
323   void InitAfterFork();
324 
325   // Get the currently executing thread, frequently referred to as 'self'. This call has reasonably
326   // high cost and so we favor passing self around when possible.
327   // TODO: mark as PURE so the compiler may coalesce and remove?
328   static Thread* Current();
329 
330   // Get the thread from the JNI environment.
331   static Thread* ForEnv(JNIEnv* env);
332 
333   // For implicit overflow checks we reserve an extra piece of memory at the bottom of the stack
334   // (lowest memory). The higher portion of the memory is protected against reads and the lower is
335   // available for use while throwing the StackOverflow exception.
336   ALWAYS_INLINE static size_t GetStackOverflowProtectedSize();
337 
338   // On a runnable thread, check for pending thread suspension request and handle if pending.
339   void AllowThreadSuspension() REQUIRES_SHARED(Locks::mutator_lock_);
340 
341   // Process pending thread suspension request and handle if pending.
342   void CheckSuspend(bool implicit = false) REQUIRES_SHARED(Locks::mutator_lock_);
343 
344   // Process a pending empty checkpoint if pending.
345   void CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex);
346   void CheckEmptyCheckpointFromMutex();
347 
348   static Thread* FromManagedThread(Thread* self, ObjPtr<mirror::Object> thread_peer)
349       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
350       REQUIRES_SHARED(Locks::mutator_lock_);
351   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
352       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
353       REQUIRES_SHARED(Locks::mutator_lock_);
354 
355   // Translates 172 to pAllocArrayFromCode and so on.
356   template<PointerSize size_of_pointers>
357   static void DumpThreadOffset(std::ostream& os, uint32_t offset);
358 
359   // Dumps a one-line summary of thread state (used for operator<<).
360   void ShortDump(std::ostream& os) const;
361 
362   // Order of threads for ANRs (ANRs can be trimmed, so we print important ones first).
363   enum class DumpOrder : uint8_t {
364     kMain,     // Always print the main thread first (there might not be one).
365     kBlocked,  // Then print all threads that are blocked due to waiting on lock.
366     kLocked,   // Then print all threads that are holding some lock already.
367     kDefault,  // Print all other threads which might not be interesting for ANR.
368   };
369 
370   // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
371   DumpOrder Dump(std::ostream& os,
372                  bool dump_native_stack = true,
373                  bool force_dump_stack = false) const
374       REQUIRES_SHARED(Locks::mutator_lock_);
375   DumpOrder Dump(std::ostream& os,
376                  unwindstack::AndroidLocalUnwinder& unwinder,
377                  bool dump_native_stack = true,
378                  bool force_dump_stack = false) const
379       REQUIRES_SHARED(Locks::mutator_lock_);
380 
381   DumpOrder DumpJavaStack(std::ostream& os,
382                           bool check_suspended = true,
383                           bool dump_locks = true) const
384       REQUIRES_SHARED(Locks::mutator_lock_);
385 
386   // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
387   // case we use 'tid' to identify the thread, and we'll include as much information as we can.
388   static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
389       REQUIRES_SHARED(Locks::mutator_lock_);
390 
GetState()391   ThreadState GetState() const {
392     return GetStateAndFlags(std::memory_order_relaxed).GetState();
393   }
394 
395   ThreadState SetState(ThreadState new_state);
396 
GetSuspendCount()397   int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
398     return tls32_.suspend_count;
399   }
400 
GetUserCodeSuspendCount()401   int GetUserCodeSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_,
402                                                Locks::user_code_suspension_lock_) {
403     return tls32_.user_code_suspend_count;
404   }
405 
IsSuspended()406   bool IsSuspended() const {
407     // We need to ensure that once we return true, all prior accesses to the Java data by "this"
408     // thread are complete. Hence we need "acquire" ordering here, and "release" when the flags
409     // are set.
410     StateAndFlags state_and_flags = GetStateAndFlags(std::memory_order_acquire);
411     return state_and_flags.GetState() != ThreadState::kRunnable &&
412            state_and_flags.IsFlagSet(ThreadFlag::kSuspendRequest);
413   }
414 
DecrDefineClassCount()415   void DecrDefineClassCount() {
416     tls32_.define_class_counter--;
417   }
418 
IncrDefineClassCount()419   void IncrDefineClassCount() {
420     tls32_.define_class_counter++;
421   }
GetDefineClassCount()422   uint32_t GetDefineClassCount() const {
423     return tls32_.define_class_counter;
424   }
425 
426   // Increment suspend count and optionally install at most one suspend barrier.
427   // Must hold thread_list_lock, OR be called with self == this, so that the Thread cannot
428   // disappear while we're running. If it's known that this == self, and thread_list_lock_
429   // is not held, FakeMutexLock should be used to fake-acquire thread_list_lock_ for
430   // static checking purposes.
431   ALWAYS_INLINE
432   void IncrementSuspendCount(Thread* self,
433                              AtomicInteger* suspendall_barrier,
434                              WrappedSuspend1Barrier* suspend1_barrier,
435                              SuspendReason reason) REQUIRES(Locks::thread_suspend_count_lock_)
436       REQUIRES(Locks::thread_list_lock_);
437 
438   // The same, but default reason to kInternal, and barriers to nullptr.
439   ALWAYS_INLINE void IncrementSuspendCount(Thread* self) REQUIRES(Locks::thread_suspend_count_lock_)
440       REQUIRES(Locks::thread_list_lock_);
441 
442   // Follows one of the above calls. For_user_code indicates if SuspendReason was kForUserCode.
443   // Generally will need to be closely followed by Thread::resume_cond_->Broadcast(self);
444   // since there may be waiters. DecrementSuspendCount() itself does not do this, since we often
445   // wake more than a single thread.
446   ALWAYS_INLINE void DecrementSuspendCount(Thread* self, bool for_user_code = false)
447       REQUIRES(Locks::thread_suspend_count_lock_);
448 
449  private:
450   NO_RETURN static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread);
451 
452  public:
453   // Requests a checkpoint closure to run on another thread. The closure will be run when the
454   // thread notices the request, either in an explicit runtime CheckSuspend() call, or in a call
455   // originating from a compiler generated suspend point check. This returns true if the closure
456   // was added and will (eventually) be executed. It returns false if this was impossible
457   // because the thread was suspended, and we thus did nothing.
458   //
459   // Since multiple closures can be queued and some closures can delay other threads from running,
460   // no closure should attempt to suspend another thread while running.
461   // TODO We should add some debug option that verifies this.
462   //
463   // This guarantees that the RequestCheckpoint invocation happens-before the function invocation:
464   // RequestCheckpointFunction holds thread_suspend_count_lock_, and RunCheckpointFunction
465   // acquires it.
466   bool RequestCheckpoint(Closure* function)
467       REQUIRES(Locks::thread_suspend_count_lock_);
468 
469   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. This is
470   // due to the fact that Thread::Current() needs to go to sleep to allow the targeted thread to
471   // execute the checkpoint for us if it is Runnable. The wait_state is the state that the thread
472   // will go into while it is awaiting the checkpoint to be run.
473   // The closure may be run on Thread::Current() on behalf of "this" thread.
474   // Thus for lock ordering purposes, the closure should be runnable by the caller. This also
475   // sometimes makes it reasonable to pass ThreadState::kRunnable as wait_state: We may wait on
476   // a condition variable for the "this" thread to act, but for lock ordering purposes, this is
477   // exactly as though Thread::Current() had run the closure.
478   // NB Since multiple closures can be queued and some closures can delay other threads from running
479   // no closure should attempt to suspend another thread while running.
480   bool RequestSynchronousCheckpoint(Closure* function,
481                                     ThreadState wait_state = ThreadState::kWaiting)
482       REQUIRES_SHARED(Locks::mutator_lock_) RELEASE(Locks::thread_list_lock_)
483           REQUIRES(!Locks::thread_suspend_count_lock_);
484 
485   bool RequestEmptyCheckpoint()
486       REQUIRES(Locks::thread_suspend_count_lock_);
487 
GetFlipFunction()488   Closure* GetFlipFunction() { return tlsPtr_.flip_function.load(std::memory_order_relaxed); }
489 
490   // Set the flip function. This is done with all threads suspended, except for the calling thread.
491   void SetFlipFunction(Closure* function) REQUIRES(Locks::thread_suspend_count_lock_)
492       REQUIRES(Locks::thread_list_lock_);
493 
494   // Wait for the flip function to complete if still running on another thread. Assumes the "this"
495   // thread remains live.
496   void WaitForFlipFunction(Thread* self) const REQUIRES(!Locks::thread_suspend_count_lock_);
497 
498   // An enhanced version of the above that uses tef to safely return if the thread exited in the
499   // meantime.
500   void WaitForFlipFunctionTestingExited(Thread* self, ThreadExitFlag* tef)
501       REQUIRES(!Locks::thread_suspend_count_lock_, !Locks::thread_list_lock_);
502 
GetThreadLocalMarkStack()503   gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() {
504     CHECK(gUseReadBarrier);
505     return tlsPtr_.thread_local_mark_stack;
506   }
SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object> * stack)507   void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) {
508     CHECK(gUseReadBarrier);
509     tlsPtr_.thread_local_mark_stack = stack;
510   }
511 
GetThreadLocalGcBuffer()512   uint8_t* GetThreadLocalGcBuffer() {
513     DCHECK(gUseUserfaultfd);
514     return tlsPtr_.thread_local_gc_buffer;
515   }
SetThreadLocalGcBuffer(uint8_t * buf)516   void SetThreadLocalGcBuffer(uint8_t* buf) {
517     DCHECK(gUseUserfaultfd);
518     tlsPtr_.thread_local_gc_buffer = buf;
519   }
520 
521   // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
522   // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
523   // Should be called only when the kSuspensionImmune flag is clear. Requires this == Current();
524   void FullSuspendCheck(bool implicit = false)
525       REQUIRES(!Locks::thread_suspend_count_lock_)
526       REQUIRES_SHARED(Locks::mutator_lock_);
527 
528   // Transition from non-runnable to runnable state acquiring share on mutator_lock_. Returns the
529   // old state, or kInvalidState if we failed because allow_failure and kSuspensionImmune were set.
530   // Should not be called with an argument except by the next function below.
531   ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable(bool fail_on_suspend_req = false)
532       REQUIRES(!Locks::thread_suspend_count_lock_) SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
533 
534   // A version that does not return the old ThreadState, and fails by returning false if it would
535   // have needed to handle a pending suspension request.
TryTransitionFromSuspendedToRunnable()536   ALWAYS_INLINE bool TryTransitionFromSuspendedToRunnable()
537       REQUIRES(!Locks::thread_suspend_count_lock_)
538       SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS {
539     // The above function does not really acquire the lock when we pass true and it returns
540     // kInvalidState. We lie in both places, but clients see correct behavior.
541     return TransitionFromSuspendedToRunnable(true) != ThreadState::kInvalidState;
542   }
543 
544   // Transition from runnable into a state where mutator privileges are denied. Releases share of
545   // mutator lock.
546   ALWAYS_INLINE void TransitionFromRunnableToSuspended(ThreadState new_state)
547       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
548       UNLOCK_FUNCTION(Locks::mutator_lock_);
549 
550   // Once called thread suspension will cause an assertion failure.
StartAssertNoThreadSuspension(const char * cause)551   const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
552     Roles::uninterruptible_.Acquire();  // No-op.
553     if (kIsDebugBuild) {
554       CHECK(cause != nullptr);
555       const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
556       tls32_.no_thread_suspension++;
557       tlsPtr_.last_no_thread_suspension_cause = cause;
558       return previous_cause;
559     } else {
560       return nullptr;
561     }
562   }
563 
564   // End region where no thread suspension is expected.
EndAssertNoThreadSuspension(const char * old_cause)565   void EndAssertNoThreadSuspension(const char* old_cause) RELEASE(Roles::uninterruptible_) {
566     if (kIsDebugBuild) {
567       CHECK_IMPLIES(old_cause == nullptr, tls32_.no_thread_suspension == 1);
568       CHECK_GT(tls32_.no_thread_suspension, 0U);
569       tls32_.no_thread_suspension--;
570       tlsPtr_.last_no_thread_suspension_cause = old_cause;
571     }
572     Roles::uninterruptible_.Release();  // No-op.
573   }
574 
575   // End region where no thread suspension is expected. Returns the current open region in case we
576   // want to reopen it. Used for ScopedAllowThreadSuspension. Not supported if no_thread_suspension
577   // is larger than one.
EndAssertNoThreadSuspension()578   const char* EndAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) WARN_UNUSED {
579     const char* ret = nullptr;
580     if (kIsDebugBuild) {
581       CHECK_EQ(tls32_.no_thread_suspension, 1u);
582       tls32_.no_thread_suspension--;
583       ret = tlsPtr_.last_no_thread_suspension_cause;
584       tlsPtr_.last_no_thread_suspension_cause = nullptr;
585     }
586     Roles::uninterruptible_.Release();  // No-op.
587     return ret;
588   }
589 
590   void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
591 
AssertNoTransactionCheckAllowed()592   void AssertNoTransactionCheckAllowed() const {
593     CHECK(tlsPtr_.last_no_transaction_checks_cause == nullptr)
594         << tlsPtr_.last_no_transaction_checks_cause;
595   }
596 
597   // Return true if thread suspension is allowable.
598   bool IsThreadSuspensionAllowable() const;
599 
IsDaemon()600   bool IsDaemon() const {
601     return tls32_.daemon;
602   }
603 
604   size_t NumberOfHeldMutexes() const;
605 
606   bool HoldsLock(ObjPtr<mirror::Object> object) const REQUIRES_SHARED(Locks::mutator_lock_);
607 
608   /*
609    * Changes the priority of this thread to match that of the java.lang.Thread object.
610    *
611    * We map a priority value from 1-10 to Linux "nice" values, where lower
612    * numbers indicate higher priority.
613    */
614   void SetNativePriority(int newPriority);
615 
616   /*
617    * Returns the priority of this thread by querying the system.
618    * This is useful when attaching a thread through JNI.
619    *
620    * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
621    */
622   int GetNativePriority() const;
623 
624   // Guaranteed to be non-zero.
GetThreadId()625   uint32_t GetThreadId() const {
626     return tls32_.thin_lock_thread_id;
627   }
628 
GetTid()629   pid_t GetTid() const {
630     return tls32_.tid;
631   }
632 
633   // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
634   ObjPtr<mirror::String> GetThreadName() const REQUIRES_SHARED(Locks::mutator_lock_);
635 
636   // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
637   // allocation, or locking.
638   void GetThreadName(std::string& name) const;
639 
640   // Sets the thread's name.
641   void SetThreadName(const char* name) REQUIRES_SHARED(Locks::mutator_lock_);
642 
643   // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
644   uint64_t GetCpuMicroTime() const;
645 
646   // Returns the thread-specific CPU-time clock in nanoseconds or -1 if unavailable.
647   uint64_t GetCpuNanoTime() const;
648 
GetPeer()649   mirror::Object* GetPeer() const REQUIRES_SHARED(Locks::mutator_lock_) {
650     DCHECK(Thread::Current() == this) << "Use GetPeerFromOtherThread instead";
651     CHECK(tlsPtr_.jpeer == nullptr);
652     return tlsPtr_.opeer;
653   }
654   // GetPeer is not safe if called on another thread in the middle of the thread flip and
655   // the thread's stack may have not been flipped yet and peer may be a from-space (stale) ref.
656   // This function will force a flip for the other thread if necessary.
657   // Since we hold a shared mutator lock, a new flip function cannot be concurrently installed.
658   // The target thread must be suspended, so that it cannot disappear during the call.
659   // We should ideally not hold thread_list_lock_ . GetReferenceKind in ti_heap.cc, currently does
660   // hold it, but in a context in which we do not invoke EnsureFlipFunctionStarted().
661   mirror::Object* GetPeerFromOtherThread() REQUIRES_SHARED(Locks::mutator_lock_);
662 
663   // A version of the above that requires thread_list_lock_, but does not require the thread to
664   // be suspended. This may temporarily release thread_list_lock_. It thus needs a ThreadExitFlag
665   // describing the thread's status, so we can tell if it exited in the interim. Returns null if
666   // the thread exited.
667   mirror::Object* LockedGetPeerFromOtherThread(ThreadExitFlag* tef)
668       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::thread_list_lock_);
669 
670   // A convenience version of the above that creates the ThreadExitFlag locally. This is often
671   // unsafe if more than one thread is being processed. A prior call may have released
672   // thread_list_lock_, and thus the NotifyOnThreadExit() call here could see a deallocated
673   // Thread. We must hold the thread_list_lock continuously between obtaining the Thread*
674   // and calling NotifyOnThreadExit().
LockedGetPeerFromOtherThread()675   mirror::Object* LockedGetPeerFromOtherThread() REQUIRES_SHARED(Locks::mutator_lock_)
676       REQUIRES(Locks::thread_list_lock_) {
677     ThreadExitFlag tef;
678     NotifyOnThreadExit(&tef);
679     mirror::Object* result = LockedGetPeerFromOtherThread(&tef);
680     UnregisterThreadExitFlag(&tef);
681     return result;
682   }
683 
HasPeer()684   bool HasPeer() const {
685     return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
686   }
687 
GetStats()688   RuntimeStats* GetStats() {
689     return &tls64_.stats;
690   }
691 
692   bool IsStillStarting() const;
693 
IsExceptionPending()694   bool IsExceptionPending() const {
695     return tlsPtr_.exception != nullptr;
696   }
697 
IsAsyncExceptionPending()698   bool IsAsyncExceptionPending() const {
699     return tlsPtr_.async_exception != nullptr;
700   }
701 
GetException()702   mirror::Throwable* GetException() const REQUIRES_SHARED(Locks::mutator_lock_) {
703     return tlsPtr_.exception;
704   }
705 
706   void AssertPendingException() const;
707   void AssertPendingOOMException() const REQUIRES_SHARED(Locks::mutator_lock_);
708   void AssertNoPendingException() const;
709   void AssertNoPendingExceptionForNewException(const char* msg) const;
710 
711   void SetException(ObjPtr<mirror::Throwable> new_exception) REQUIRES_SHARED(Locks::mutator_lock_);
712 
713   // Set an exception that is asynchronously thrown from a different thread. This will be checked
714   // periodically and might overwrite the current 'Exception'. This can only be called from a
715   // checkpoint.
716   //
717   // The caller should also make sure that the thread has been deoptimized so that the exception
718   // could be detected on back-edges.
719   void SetAsyncException(ObjPtr<mirror::Throwable> new_exception)
720       REQUIRES_SHARED(Locks::mutator_lock_);
721 
ClearException()722   void ClearException() REQUIRES_SHARED(Locks::mutator_lock_) {
723     tlsPtr_.exception = nullptr;
724   }
725 
726   // Move the current async-exception to the main exception. This should be called when the current
727   // thread is ready to deal with any async exceptions. Returns true if there is an async exception
728   // that needs to be dealt with, false otherwise.
729   bool ObserveAsyncException() REQUIRES_SHARED(Locks::mutator_lock_);
730 
731   // Find catch block then prepare and return the long jump context to the appropriate exception
732   // handler. When is_method_exit_exception is true, the exception was thrown by the method exit
733   // callback and we should not send method unwind for the method on top of the stack since method
734   // exit callback was already called.
735   std::unique_ptr<Context> QuickDeliverException(bool is_method_exit_exception = false)
736       REQUIRES_SHARED(Locks::mutator_lock_);
737 
738   // Perform deoptimization. Return a `Context` prepared for a long jump.
739   std::unique_ptr<Context> Deoptimize(DeoptimizationKind kind,
740                                       bool single_frame,
741                                       bool skip_method_exit_callbacks)
742       REQUIRES_SHARED(Locks::mutator_lock_);
743 
744   // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
745   // abort the runtime iff abort_on_error is true.
746   ArtMethod* GetCurrentMethod(uint32_t* dex_pc,
747                               bool check_suspended = true,
748                               bool abort_on_error = true) const
749       REQUIRES_SHARED(Locks::mutator_lock_);
750 
751   // Returns whether the given exception was thrown by the current Java method being executed
752   // (Note that this includes native Java methods).
753   bool IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const
754       REQUIRES_SHARED(Locks::mutator_lock_);
755 
SetTopOfStack(ArtMethod ** top_method)756   void SetTopOfStack(ArtMethod** top_method) {
757     tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
758   }
759 
SetTopOfStackGenericJniTagged(ArtMethod ** top_method)760   void SetTopOfStackGenericJniTagged(ArtMethod** top_method) {
761     tlsPtr_.managed_stack.SetTopQuickFrameGenericJniTagged(top_method);
762   }
763 
SetTopOfShadowStack(ShadowFrame * top)764   void SetTopOfShadowStack(ShadowFrame* top) {
765     tlsPtr_.managed_stack.SetTopShadowFrame(top);
766   }
767 
HasManagedStack()768   bool HasManagedStack() const {
769     return tlsPtr_.managed_stack.HasTopQuickFrame() || tlsPtr_.managed_stack.HasTopShadowFrame();
770   }
771 
772   // If 'msg' is null, no detail message is set.
773   void ThrowNewException(const char* exception_class_descriptor, const char* msg)
774       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
775 
776   // If 'msg' is null, no detail message is set. An exception must be pending, and will be
777   // used as the new exception's cause.
778   void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
779       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
780 
781   void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
782       __attribute__((format(printf, 3, 4)))
783       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
784 
785   void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
786       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
787 
788   // OutOfMemoryError is special, because we need to pre-allocate an instance.
789   // Only the GC should call this.
790   void ThrowOutOfMemoryError(const char* msg) REQUIRES_SHARED(Locks::mutator_lock_)
791       REQUIRES(!Roles::uninterruptible_);
792 
793   static void Startup();
794   static void FinishStartup();
795   static void Shutdown();
796 
797   // Notify this thread's thread-group that this thread has started.
798   // Note: the given thread-group is used as a fast path and verified in debug build. If the value
799   //       is null, the thread's thread-group is loaded from the peer.
800   void NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject thread_group = nullptr)
801       REQUIRES_SHARED(Locks::mutator_lock_);
802 
803   // Request notification when this thread is unregistered, typically because it has exited.
804   //
805   // The ThreadExitFlag status is only changed when we remove the thread from the thread list,
806   // which we only do once no suspend requests are outstanding, and no flip-functions are still
807   // running.
808   //
809   // The caller must allocate a fresh ThreadExitFlag, and pass it in. The caller is responsible
810   // for either waiting until the thread has exited, or unregistering the ThreadExitFlag, and
811   // then, and only then, deallocating the ThreadExitFlag.  (This scheme avoids an allocation and
812   // questions about what to do if the allocation fails. Allows detection of thread exit after
813   // temporary release of thread_list_lock_)
814   void NotifyOnThreadExit(ThreadExitFlag* tef) REQUIRES(Locks::thread_list_lock_);
815   void UnregisterThreadExitFlag(ThreadExitFlag* tef) REQUIRES(Locks::thread_list_lock_);
816 
817   // Is the ThreadExitFlag currently registered in this thread, which has not yet terminated?
818   // Intended only for testing.
819   bool IsRegistered(ThreadExitFlag* query_tef) REQUIRES(Locks::thread_list_lock_);
820 
821   // For debuggable builds, CHECK that neither first nor last, nor any ThreadExitFlag with an
822   // address in-between, is currently registered with any thread.
823   static void DCheckUnregisteredEverywhere(ThreadExitFlag* first, ThreadExitFlag* last)
824       REQUIRES(!Locks::thread_list_lock_);
825 
826   // Called when thread is unregistered. May be called repeatedly, in which case only newly
827   // registered clients are processed.
828   void SignalExitFlags() REQUIRES(Locks::thread_list_lock_);
829 
830   // JNI methods
GetJniEnv()831   JNIEnvExt* GetJniEnv() const {
832     return tlsPtr_.jni_env;
833   }
834 
835   // Convert a jobject into a Object*
836   ObjPtr<mirror::Object> DecodeJObject(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
837   // Checks if the weak global ref has been cleared by the GC without decoding it.
838   bool IsJWeakCleared(jweak obj) const REQUIRES_SHARED(Locks::mutator_lock_);
839 
GetMonitorEnterObject()840   mirror::Object* GetMonitorEnterObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
841     return tlsPtr_.monitor_enter_object;
842   }
843 
SetMonitorEnterObject(mirror::Object * obj)844   void SetMonitorEnterObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
845     tlsPtr_.monitor_enter_object = obj;
846   }
847 
848   // Implements java.lang.Thread.interrupted.
849   bool Interrupted();
850   // Implements java.lang.Thread.isInterrupted.
851   bool IsInterrupted();
852   void Interrupt(Thread* self) REQUIRES(!wait_mutex_);
SetInterrupted(bool i)853   void SetInterrupted(bool i) {
854     tls32_.interrupted.store(i, std::memory_order_seq_cst);
855   }
856   void Notify() REQUIRES(!wait_mutex_);
857 
PoisonObjectPointers()858   ALWAYS_INLINE void PoisonObjectPointers() {
859     ++poison_object_cookie_;
860   }
861 
862   ALWAYS_INLINE static void PoisonObjectPointersIfDebug();
863 
GetPoisonObjectCookie()864   ALWAYS_INLINE uintptr_t GetPoisonObjectCookie() const {
865     return poison_object_cookie_;
866   }
867 
868   // Parking for 0ns of relative time means an untimed park, negative (though
869   // should be handled in java code) returns immediately
870   void Park(bool is_absolute, int64_t time) REQUIRES_SHARED(Locks::mutator_lock_);
871   void Unpark();
872 
873  private:
874   void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
875 
876  public:
GetWaitMutex()877   Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
878     return wait_mutex_;
879   }
880 
GetWaitConditionVariable()881   ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) {
882     return wait_cond_;
883   }
884 
GetWaitMonitor()885   Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) {
886     return wait_monitor_;
887   }
888 
SetWaitMonitor(Monitor * mon)889   void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) {
890     wait_monitor_ = mon;
891   }
892 
893   // Waiter link-list support.
GetWaitNext()894   Thread* GetWaitNext() const {
895     return tlsPtr_.wait_next;
896   }
897 
SetWaitNext(Thread * next)898   void SetWaitNext(Thread* next) {
899     tlsPtr_.wait_next = next;
900   }
901 
GetClassLoaderOverride()902   jobject GetClassLoaderOverride() {
903     return tlsPtr_.class_loader_override;
904   }
905 
906   void SetClassLoaderOverride(jobject class_loader_override);
907 
908   // Create the internal representation of a stack trace, that is more time
909   // and space efficient to compute than the StackTraceElement[].
910   ObjPtr<mirror::ObjectArray<mirror::Object>> CreateInternalStackTrace(
911       const ScopedObjectAccessAlreadyRunnable& soa) const
912       REQUIRES_SHARED(Locks::mutator_lock_);
913 
914   // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
915   // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
916   // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
917   // with the number of valid frames in the returned array.
918   static jobjectArray InternalStackTraceToStackTraceElementArray(
919       const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
920       jobjectArray output_array = nullptr, int* stack_depth = nullptr)
921       REQUIRES_SHARED(Locks::mutator_lock_);
922 
923   static jint InternalStackTraceToStackFrameInfoArray(
924       const ScopedObjectAccessAlreadyRunnable& soa,
925       jlong mode,  // See java.lang.StackStreamFactory for the mode flags
926       jobject internal,
927       jint startLevel,
928       jint batchSize,
929       jint startIndex,
930       jobjectArray output_array)  // java.lang.StackFrameInfo[]
931       REQUIRES_SHARED(Locks::mutator_lock_);
932 
933   jobjectArray CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
934       REQUIRES_SHARED(Locks::mutator_lock_);
935 
HasDebuggerShadowFrames()936   bool HasDebuggerShadowFrames() const {
937     return tlsPtr_.frame_id_to_shadow_frame != nullptr;
938   }
939 
940   // This is done by GC using a checkpoint (or in a stop-the-world pause).
941   void SweepInterpreterCache(IsMarkedVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
942 
943   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
944       REQUIRES_SHARED(Locks::mutator_lock_);
945 
946   void VisitReflectiveTargets(ReflectiveValueVisitor* visitor)
947       REQUIRES(Locks::mutator_lock_);
948 
949   // Check that the thread state is valid. Try to fail if the thread has erroneously terminated.
950   // Note that once the thread has been terminated, it can also be deallocated.  But even if the
951   // thread state has been overwritten, the value is unlikely to be in the correct range.
VerifyState()952   void VerifyState() {
953     if (kIsDebugBuild) {
954       ThreadState state = GetState();
955       StateAndFlags::ValidateThreadState(state);
956       DCHECK_NE(state, ThreadState::kTerminated);
957     }
958   }
959 
VerifyStack()960   void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_) {
961     if (kVerifyStack) {
962       VerifyStackImpl();
963     }
964   }
965 
966   //
967   // Offsets of various members of native Thread class, used by compiled code.
968   //
969 
970   template<PointerSize pointer_size>
ThinLockIdOffset()971   static constexpr ThreadOffset<pointer_size> ThinLockIdOffset() {
972     return ThreadOffset<pointer_size>(
973         OFFSETOF_MEMBER(Thread, tls32_) +
974         OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
975   }
976 
977   template<PointerSize pointer_size>
TidOffset()978   static constexpr ThreadOffset<pointer_size> TidOffset() {
979     return ThreadOffset<pointer_size>(
980         OFFSETOF_MEMBER(Thread, tls32_) +
981         OFFSETOF_MEMBER(tls_32bit_sized_values, tid));
982   }
983 
984   template<PointerSize pointer_size>
InterruptedOffset()985   static constexpr ThreadOffset<pointer_size> InterruptedOffset() {
986     return ThreadOffset<pointer_size>(
987         OFFSETOF_MEMBER(Thread, tls32_) +
988         OFFSETOF_MEMBER(tls_32bit_sized_values, interrupted));
989   }
990 
991   template<PointerSize pointer_size>
WeakRefAccessEnabledOffset()992   static constexpr ThreadOffset<pointer_size> WeakRefAccessEnabledOffset() {
993     return ThreadOffset<pointer_size>(
994         OFFSETOF_MEMBER(Thread, tls32_) +
995         OFFSETOF_MEMBER(tls_32bit_sized_values, weak_ref_access_enabled));
996   }
997 
998   template<PointerSize pointer_size>
ThreadFlagsOffset()999   static constexpr ThreadOffset<pointer_size> ThreadFlagsOffset() {
1000     return ThreadOffset<pointer_size>(
1001         OFFSETOF_MEMBER(Thread, tls32_) +
1002         OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
1003   }
1004 
1005   template<PointerSize pointer_size>
IsGcMarkingOffset()1006   static constexpr ThreadOffset<pointer_size> IsGcMarkingOffset() {
1007     return ThreadOffset<pointer_size>(
1008         OFFSETOF_MEMBER(Thread, tls32_) +
1009         OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
1010   }
1011 
1012   template <PointerSize pointer_size>
DeoptCheckRequiredOffset()1013   static constexpr ThreadOffset<pointer_size> DeoptCheckRequiredOffset() {
1014     return ThreadOffset<pointer_size>(
1015         OFFSETOF_MEMBER(Thread, tls32_) +
1016         OFFSETOF_MEMBER(tls_32bit_sized_values, is_deopt_check_required));
1017   }
1018 
IsGcMarkingSize()1019   static constexpr size_t IsGcMarkingSize() {
1020     return sizeof(tls32_.is_gc_marking);
1021   }
1022 
1023   template<PointerSize pointer_size>
SharedMethodHotnessOffset()1024   static constexpr ThreadOffset<pointer_size> SharedMethodHotnessOffset() {
1025     return ThreadOffset<pointer_size>(
1026         OFFSETOF_MEMBER(Thread, tls32_) +
1027         OFFSETOF_MEMBER(tls_32bit_sized_values, shared_method_hotness));
1028   }
1029 
1030   // Deoptimize the Java stack.
1031   void DeoptimizeWithDeoptimizationException(JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
1032 
1033  private:
1034   template<PointerSize pointer_size>
ThreadOffsetFromTlsPtr(size_t tls_ptr_offset)1035   static constexpr ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
1036     size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
1037     size_t scale = (pointer_size > kRuntimePointerSize) ?
1038       static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize) : 1;
1039     size_t shrink = (kRuntimePointerSize > pointer_size) ?
1040       static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size) : 1;
1041     return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
1042   }
1043 
1044  public:
1045   template<PointerSize pointer_size>
QuickEntryPointOffset(size_t quick_entrypoint_offset)1046   static constexpr ThreadOffset<pointer_size> QuickEntryPointOffset(
1047       size_t quick_entrypoint_offset) {
1048     return ThreadOffsetFromTlsPtr<pointer_size>(
1049         OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
1050   }
1051 
QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,PointerSize pointer_size)1052   static constexpr uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
1053                                                           PointerSize pointer_size) {
1054     if (pointer_size == PointerSize::k32) {
1055       return QuickEntryPointOffset<PointerSize::k32>(quick_entrypoint_offset).
1056           Uint32Value();
1057     } else {
1058       return QuickEntryPointOffset<PointerSize::k64>(quick_entrypoint_offset).
1059           Uint32Value();
1060     }
1061   }
1062 
1063   template<PointerSize pointer_size>
JniEntryPointOffset(size_t jni_entrypoint_offset)1064   static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
1065     return ThreadOffsetFromTlsPtr<pointer_size>(
1066         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
1067   }
1068 
1069   // Return the entry point offset integer value for ReadBarrierMarkRegX, where X is `reg`.
1070   template <PointerSize pointer_size>
ReadBarrierMarkEntryPointsOffset(size_t reg)1071   static constexpr int32_t ReadBarrierMarkEntryPointsOffset(size_t reg) {
1072     // The entry point list defines 30 ReadBarrierMarkRegX entry points.
1073     DCHECK_LT(reg, 30u);
1074     // The ReadBarrierMarkRegX entry points are ordered by increasing
1075     // register number in Thread::tls_Ptr_.quick_entrypoints.
1076     return QUICK_ENTRYPOINT_OFFSET(pointer_size, pReadBarrierMarkReg00).Int32Value()
1077         + static_cast<size_t>(pointer_size) * reg;
1078   }
1079 
1080   template<PointerSize pointer_size>
SelfOffset()1081   static constexpr ThreadOffset<pointer_size> SelfOffset() {
1082     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
1083   }
1084 
1085   template<PointerSize pointer_size>
ExceptionOffset()1086   static constexpr ThreadOffset<pointer_size> ExceptionOffset() {
1087     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
1088   }
1089 
1090   template<PointerSize pointer_size>
PeerOffset()1091   static constexpr ThreadOffset<pointer_size> PeerOffset() {
1092     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
1093   }
1094 
1095 
1096   template<PointerSize pointer_size>
CardTableOffset()1097   static constexpr ThreadOffset<pointer_size> CardTableOffset() {
1098     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
1099   }
1100 
1101   template<PointerSize pointer_size>
ThreadSuspendTriggerOffset()1102   static constexpr ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
1103     return ThreadOffsetFromTlsPtr<pointer_size>(
1104         OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
1105   }
1106 
1107   template<PointerSize pointer_size>
ThreadLocalPosOffset()1108   static constexpr ThreadOffset<pointer_size> ThreadLocalPosOffset() {
1109     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1110                                                                 thread_local_pos));
1111   }
1112 
1113   template<PointerSize pointer_size>
ThreadLocalEndOffset()1114   static constexpr ThreadOffset<pointer_size> ThreadLocalEndOffset() {
1115     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1116                                                                 thread_local_end));
1117   }
1118 
1119   template<PointerSize pointer_size>
ThreadLocalObjectsOffset()1120   static constexpr ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
1121     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1122                                                                 thread_local_objects));
1123   }
1124 
1125   template<PointerSize pointer_size>
RosAllocRunsOffset()1126   static constexpr ThreadOffset<pointer_size> RosAllocRunsOffset() {
1127     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1128                                                                 rosalloc_runs));
1129   }
1130 
1131   template<PointerSize pointer_size>
ThreadLocalAllocStackTopOffset()1132   static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
1133     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1134                                                                 thread_local_alloc_stack_top));
1135   }
1136 
1137   template<PointerSize pointer_size>
ThreadLocalAllocStackEndOffset()1138   static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
1139     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1140                                                                 thread_local_alloc_stack_end));
1141   }
1142 
1143   template <PointerSize pointer_size>
TraceBufferCurrPtrOffset()1144   static constexpr ThreadOffset<pointer_size> TraceBufferCurrPtrOffset() {
1145     return ThreadOffsetFromTlsPtr<pointer_size>(
1146         OFFSETOF_MEMBER(tls_ptr_sized_values, method_trace_buffer_curr_entry));
1147   }
1148 
1149   template <PointerSize pointer_size>
TraceBufferPtrOffset()1150   static constexpr ThreadOffset<pointer_size> TraceBufferPtrOffset() {
1151     return ThreadOffsetFromTlsPtr<pointer_size>(
1152         OFFSETOF_MEMBER(tls_ptr_sized_values, method_trace_buffer));
1153   }
1154 
1155   // Size of stack less any space reserved for stack overflow
1156   template <StackType stack_type>
GetUsableStackSize()1157   size_t GetUsableStackSize() const {
1158     return GetStackSize<stack_type>() - static_cast<size_t>(
1159         GetStackEnd<stack_type>() - GetStackBegin<stack_type>());
1160   }
1161 
1162   template <StackType stack_type>
1163   ALWAYS_INLINE uint8_t* GetStackEnd() const;
1164 
1165   ALWAYS_INLINE uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const;
1166 
1167   // Set the stack end to that to be used during a stack overflow
1168   template <StackType stack_type>
1169   ALWAYS_INLINE void SetStackEndForStackOverflow()
1170       REQUIRES_SHARED(Locks::mutator_lock_);
1171 
1172   // Set the stack end to that to be used during regular execution
1173   template <StackType stack_type>
1174   ALWAYS_INLINE void ResetDefaultStackEnd();
1175 
1176   template <StackType stack_type>
IsHandlingStackOverflow()1177   bool IsHandlingStackOverflow() const {
1178     return GetStackEnd<stack_type>() == GetStackBegin<stack_type>();
1179   }
1180 
1181   template<PointerSize pointer_size>
StackEndOffset()1182   static constexpr ThreadOffset<pointer_size> StackEndOffset() {
1183     return ThreadOffsetFromTlsPtr<pointer_size>(
1184         OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
1185   }
1186 
1187   template<PointerSize pointer_size>
JniEnvOffset()1188   static constexpr ThreadOffset<pointer_size> JniEnvOffset() {
1189     return ThreadOffsetFromTlsPtr<pointer_size>(
1190         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
1191   }
1192 
1193   template<PointerSize pointer_size>
TopOfManagedStackOffset()1194   static constexpr ThreadOffset<pointer_size> TopOfManagedStackOffset() {
1195     return ThreadOffsetFromTlsPtr<pointer_size>(
1196         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
1197         ManagedStack::TaggedTopQuickFrameOffset());
1198   }
1199 
GetManagedStack()1200   const ManagedStack* GetManagedStack() const {
1201     return &tlsPtr_.managed_stack;
1202   }
1203 
1204   // Linked list recording fragments of managed stack.
PushManagedStackFragment(ManagedStack * fragment)1205   void PushManagedStackFragment(ManagedStack* fragment) {
1206     tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
1207   }
PopManagedStackFragment(const ManagedStack & fragment)1208   void PopManagedStackFragment(const ManagedStack& fragment) {
1209     tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
1210   }
1211 
1212   ALWAYS_INLINE ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame);
1213   ALWAYS_INLINE ShadowFrame* PopShadowFrame();
1214 
1215   template<PointerSize pointer_size>
TopShadowFrameOffset()1216   static constexpr ThreadOffset<pointer_size> TopShadowFrameOffset() {
1217     return ThreadOffsetFromTlsPtr<pointer_size>(
1218         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
1219         ManagedStack::TopShadowFrameOffset());
1220   }
1221 
1222   // Is the given object on the quick stack?
1223   bool IsRawObjOnQuickStack(uint8_t* raw_obj) const;
1224 
1225   // Is the given obj in one of this thread's JNI transition frames?
1226   bool IsJniTransitionReference(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
1227 
1228   // Convert a global (or weak global) jobject into a Object*
1229   ObjPtr<mirror::Object> DecodeGlobalJObject(jobject obj) const
1230       REQUIRES_SHARED(Locks::mutator_lock_);
1231 
1232   void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
1233       REQUIRES_SHARED(Locks::mutator_lock_);
1234 
GetTopHandleScope()1235   BaseHandleScope* GetTopHandleScope() REQUIRES_SHARED(Locks::mutator_lock_) {
1236     return tlsPtr_.top_handle_scope;
1237   }
1238 
PushHandleScope(BaseHandleScope * handle_scope)1239   void PushHandleScope(BaseHandleScope* handle_scope) REQUIRES_SHARED(Locks::mutator_lock_) {
1240     DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope);
1241     tlsPtr_.top_handle_scope = handle_scope;
1242   }
1243 
PopHandleScope()1244   BaseHandleScope* PopHandleScope() REQUIRES_SHARED(Locks::mutator_lock_) {
1245     BaseHandleScope* handle_scope = tlsPtr_.top_handle_scope;
1246     DCHECK(handle_scope != nullptr);
1247     tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
1248     return handle_scope;
1249   }
1250 
1251   template<PointerSize pointer_size>
TopHandleScopeOffset()1252   static constexpr ThreadOffset<pointer_size> TopHandleScopeOffset() {
1253     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1254                                                                 top_handle_scope));
1255   }
1256 
1257   template<PointerSize pointer_size>
MutatorLockOffset()1258   static constexpr ThreadOffset<pointer_size> MutatorLockOffset() {
1259     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1260                                                                 mutator_lock));
1261   }
1262 
1263   template<PointerSize pointer_size>
HeldMutexOffset(LockLevel level)1264   static constexpr ThreadOffset<pointer_size> HeldMutexOffset(LockLevel level) {
1265     DCHECK_LT(enum_cast<size_t>(level), arraysize(tlsPtr_.held_mutexes));
1266     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
1267                                                                 held_mutexes[level]));
1268   }
1269 
GetTopReflectiveHandleScope()1270   BaseReflectiveHandleScope* GetTopReflectiveHandleScope() {
1271     return tlsPtr_.top_reflective_handle_scope;
1272   }
1273 
PushReflectiveHandleScope(BaseReflectiveHandleScope * scope)1274   void PushReflectiveHandleScope(BaseReflectiveHandleScope* scope) {
1275     DCHECK_EQ(scope->GetLink(), tlsPtr_.top_reflective_handle_scope);
1276     DCHECK_EQ(scope->GetThread(), this);
1277     tlsPtr_.top_reflective_handle_scope = scope;
1278   }
1279 
PopReflectiveHandleScope()1280   BaseReflectiveHandleScope* PopReflectiveHandleScope() {
1281     BaseReflectiveHandleScope* handle_scope = tlsPtr_.top_reflective_handle_scope;
1282     DCHECK(handle_scope != nullptr);
1283     tlsPtr_.top_reflective_handle_scope = tlsPtr_.top_reflective_handle_scope->GetLink();
1284     return handle_scope;
1285   }
1286 
GetIsGcMarking()1287   bool GetIsGcMarking() const {
1288     DCHECK(gUseReadBarrier);
1289     return tls32_.is_gc_marking;
1290   }
1291 
1292   void SetIsGcMarkingAndUpdateEntrypoints(bool is_marking);
1293 
IsDeoptCheckRequired()1294   bool IsDeoptCheckRequired() const { return tls32_.is_deopt_check_required; }
1295 
SetDeoptCheckRequired(bool flag)1296   void SetDeoptCheckRequired(bool flag) { tls32_.is_deopt_check_required = flag; }
1297 
1298   bool GetWeakRefAccessEnabled() const;  // Only safe for current thread.
1299 
SetWeakRefAccessEnabled(bool enabled)1300   void SetWeakRefAccessEnabled(bool enabled) {
1301     DCHECK(gUseReadBarrier);
1302     WeakRefAccessState new_state = enabled ?
1303         WeakRefAccessState::kEnabled : WeakRefAccessState::kDisabled;
1304     tls32_.weak_ref_access_enabled.store(new_state, std::memory_order_release);
1305   }
1306 
GetDisableThreadFlipCount()1307   uint32_t GetDisableThreadFlipCount() const {
1308     return tls32_.disable_thread_flip_count;
1309   }
1310 
IncrementDisableThreadFlipCount()1311   void IncrementDisableThreadFlipCount() {
1312     ++tls32_.disable_thread_flip_count;
1313   }
1314 
DecrementDisableThreadFlipCount()1315   void DecrementDisableThreadFlipCount() {
1316     DCHECK_GT(tls32_.disable_thread_flip_count, 0U);
1317     --tls32_.disable_thread_flip_count;
1318   }
1319 
1320   // Returns true if the thread is a runtime thread (eg from a ThreadPool).
IsRuntimeThread()1321   bool IsRuntimeThread() const {
1322     return is_runtime_thread_;
1323   }
1324 
SetIsRuntimeThread(bool is_runtime_thread)1325   void SetIsRuntimeThread(bool is_runtime_thread) {
1326     is_runtime_thread_ = is_runtime_thread;
1327   }
1328 
CorePlatformApiCookie()1329   uint32_t CorePlatformApiCookie() {
1330     return core_platform_api_cookie_;
1331   }
1332 
SetCorePlatformApiCookie(uint32_t cookie)1333   void SetCorePlatformApiCookie(uint32_t cookie) {
1334     core_platform_api_cookie_ = cookie;
1335   }
1336 
1337   // Returns true if the thread is allowed to load java classes.
1338   bool CanLoadClasses() const;
1339 
1340   // Returns the fake exception used to activate deoptimization.
GetDeoptimizationException()1341   static mirror::Throwable* GetDeoptimizationException() {
1342     // Note that the mirror::Throwable must be aligned to kObjectAlignment or else it cannot be
1343     // represented by ObjPtr.
1344     return reinterpret_cast<mirror::Throwable*>(0x100);
1345   }
1346 
1347   // Currently deoptimization invokes verifier which can trigger class loading
1348   // and execute Java code, so there might be nested deoptimizations happening.
1349   // We need to save the ongoing deoptimization shadow frames and return
1350   // values on stacks.
1351   // 'from_code' denotes whether the deoptimization was explicitly made from
1352   // compiled code.
1353   // 'method_type' contains info on whether deoptimization should advance
1354   // dex_pc.
1355   void PushDeoptimizationContext(const JValue& return_value,
1356                                  bool is_reference,
1357                                  ObjPtr<mirror::Throwable> exception,
1358                                  bool from_code,
1359                                  DeoptimizationMethodType method_type)
1360       REQUIRES_SHARED(Locks::mutator_lock_);
1361   void PopDeoptimizationContext(JValue* result,
1362                                 ObjPtr<mirror::Throwable>* exception,
1363                                 bool* from_code,
1364                                 DeoptimizationMethodType* method_type)
1365       REQUIRES_SHARED(Locks::mutator_lock_);
1366   void AssertHasDeoptimizationContext()
1367       REQUIRES_SHARED(Locks::mutator_lock_);
1368   void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
1369   ShadowFrame* PopStackedShadowFrame();
1370   ShadowFrame* MaybePopDeoptimizedStackedShadowFrame();
1371 
1372   // For debugger, find the shadow frame that corresponds to a frame id.
1373   // Or return null if there is none.
1374   ShadowFrame* FindDebuggerShadowFrame(size_t frame_id)
1375       REQUIRES_SHARED(Locks::mutator_lock_);
1376   // For debugger, find the bool array that keeps track of the updated vreg set
1377   // for a frame id.
1378   bool* GetUpdatedVRegFlags(size_t frame_id) REQUIRES_SHARED(Locks::mutator_lock_);
1379   // For debugger, find the shadow frame that corresponds to a frame id. If
1380   // one doesn't exist yet, create one and track it in frame_id_to_shadow_frame.
1381   ShadowFrame* FindOrCreateDebuggerShadowFrame(size_t frame_id,
1382                                                uint32_t num_vregs,
1383                                                ArtMethod* method,
1384                                                uint32_t dex_pc)
1385       REQUIRES_SHARED(Locks::mutator_lock_);
1386 
1387   // Delete the entry that maps from frame_id to shadow_frame.
1388   void RemoveDebuggerShadowFrameMapping(size_t frame_id)
1389       REQUIRES_SHARED(Locks::mutator_lock_);
1390 
GetStackTraceSample()1391   std::vector<ArtMethod*>* GetStackTraceSample() const {
1392     DCHECK(!IsAotCompiler());
1393     return tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
1394   }
1395 
SetStackTraceSample(std::vector<ArtMethod * > * sample)1396   void SetStackTraceSample(std::vector<ArtMethod*>* sample) {
1397     DCHECK(!IsAotCompiler());
1398     tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample = sample;
1399   }
1400 
GetVerifierDeps()1401   verifier::VerifierDeps* GetVerifierDeps() const {
1402     DCHECK(IsAotCompiler());
1403     return tlsPtr_.deps_or_stack_trace_sample.verifier_deps;
1404   }
1405 
1406   // It is the responsability of the caller to make sure the verifier_deps
1407   // entry in the thread is cleared before destruction of the actual VerifierDeps
1408   // object, or the thread.
SetVerifierDeps(verifier::VerifierDeps * verifier_deps)1409   void SetVerifierDeps(verifier::VerifierDeps* verifier_deps) {
1410     DCHECK(IsAotCompiler());
1411     DCHECK(verifier_deps == nullptr || tlsPtr_.deps_or_stack_trace_sample.verifier_deps == nullptr);
1412     tlsPtr_.deps_or_stack_trace_sample.verifier_deps = verifier_deps;
1413   }
1414 
GetMethodTraceBuffer()1415   uintptr_t* GetMethodTraceBuffer() { return tlsPtr_.method_trace_buffer; }
1416 
GetTraceBufferCurrEntryPtr()1417   uintptr_t** GetTraceBufferCurrEntryPtr() { return &tlsPtr_.method_trace_buffer_curr_entry; }
1418 
SetMethodTraceBuffer(uintptr_t * buffer,int init_index)1419   void SetMethodTraceBuffer(uintptr_t* buffer, int init_index) {
1420     tlsPtr_.method_trace_buffer = buffer;
1421     SetMethodTraceBufferCurrentEntry(init_index);
1422   }
1423 
SetMethodTraceBufferCurrentEntry(int index)1424   void SetMethodTraceBufferCurrentEntry(int index) {
1425     uintptr_t* buffer = tlsPtr_.method_trace_buffer;
1426     if (buffer == nullptr) {
1427       tlsPtr_.method_trace_buffer_curr_entry = nullptr;
1428     } else {
1429       DCHECK(buffer != nullptr);
1430       tlsPtr_.method_trace_buffer_curr_entry = buffer + index;
1431     }
1432   }
1433 
1434   void UpdateTlsLowOverheadTraceEntrypoints(LowOverheadTraceType type);
1435 
GetTraceClockBase()1436   uint64_t GetTraceClockBase() const {
1437     return tls64_.trace_clock_base;
1438   }
1439 
SetTraceClockBase(uint64_t clock_base)1440   void SetTraceClockBase(uint64_t clock_base) {
1441     tls64_.trace_clock_base = clock_base;
1442   }
1443 
GetHeldMutex(LockLevel level)1444   BaseMutex* GetHeldMutex(LockLevel level) const {
1445     return tlsPtr_.held_mutexes[level];
1446   }
1447 
SetHeldMutex(LockLevel level,BaseMutex * mutex)1448   void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
1449     tlsPtr_.held_mutexes[level] = mutex;
1450   }
1451 
1452   // Possibly check that no mutexes at level kMonitorLock or above are subsequently acquired.
1453   // Only invoked by the thread itself.
1454   void DisallowPreMonitorMutexes();
1455 
1456   // Undo the effect of the previous call. Again only invoked by the thread itself.
1457   void AllowPreMonitorMutexes();
1458 
1459   // Read a flag with the given memory order. See mutator_gc_coord.md for memory ordering
1460   // considerations.
ReadFlag(ThreadFlag flag,std::memory_order order)1461   bool ReadFlag(ThreadFlag flag, std::memory_order order) const {
1462     return GetStateAndFlags(order).IsFlagSet(flag);
1463   }
1464 
1465   void AtomicSetFlag(ThreadFlag flag, std::memory_order order = std::memory_order_seq_cst) {
1466     // Since we discard the returned value, memory_order_release will often suffice.
1467     tls32_.state_and_flags.fetch_or(enum_cast<uint32_t>(flag), order);
1468   }
1469 
1470   void AtomicClearFlag(ThreadFlag flag, std::memory_order order = std::memory_order_seq_cst) {
1471     // Since we discard the returned value, memory_order_release will often suffice.
1472     tls32_.state_and_flags.fetch_and(~enum_cast<uint32_t>(flag), order);
1473   }
1474 
1475   void ResetQuickAllocEntryPointsForThread();
1476 
1477   // Returns the remaining space in the TLAB.
TlabSize()1478   size_t TlabSize() const {
1479     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_pos;
1480   }
1481 
1482   // Returns pos offset from start.
GetTlabPosOffset()1483   size_t GetTlabPosOffset() const {
1484     return tlsPtr_.thread_local_pos - tlsPtr_.thread_local_start;
1485   }
1486 
1487   // Returns the remaining space in the TLAB if we were to expand it to maximum capacity.
TlabRemainingCapacity()1488   size_t TlabRemainingCapacity() const {
1489     return tlsPtr_.thread_local_limit - tlsPtr_.thread_local_pos;
1490   }
1491 
1492   // Expand the TLAB by a fixed number of bytes. There must be enough capacity to do so.
ExpandTlab(size_t bytes)1493   void ExpandTlab(size_t bytes) {
1494     tlsPtr_.thread_local_end += bytes;
1495     DCHECK_LE(tlsPtr_.thread_local_end, tlsPtr_.thread_local_limit);
1496   }
1497 
1498   // Called from Concurrent mark-compact GC to slide the TLAB pointers backwards
1499   // to adjust to post-compact addresses.
1500   void AdjustTlab(size_t slide_bytes);
1501 
1502   // Doesn't check that there is room.
1503   mirror::Object* AllocTlab(size_t bytes);
1504   void SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit);
1505   bool HasTlab() const;
1506   void ResetTlab();
GetTlabStart()1507   uint8_t* GetTlabStart() {
1508     return tlsPtr_.thread_local_start;
1509   }
GetTlabPos()1510   uint8_t* GetTlabPos() {
1511     return tlsPtr_.thread_local_pos;
1512   }
GetTlabEnd()1513   uint8_t* GetTlabEnd() {
1514     return tlsPtr_.thread_local_end;
1515   }
1516   // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
1517   // equal to a valid pointer.
RemoveSuspendTrigger()1518   void RemoveSuspendTrigger() {
1519     tlsPtr_.suspend_trigger.store(reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger),
1520                                   std::memory_order_relaxed);
1521   }
1522 
1523   // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
1524   // The next time a suspend check is done, it will load from the value at this address
1525   // and trigger a SIGSEGV.
1526   // Only needed if Runtime::implicit_suspend_checks_ is true. On some platforms, and in the
1527   // interpreter, client code currently just looks at the thread flags directly to determine
1528   // whether we should suspend, so this call is not always necessary.
TriggerSuspend()1529   void TriggerSuspend() { tlsPtr_.suspend_trigger.store(nullptr, std::memory_order_release); }
1530 
1531   // Push an object onto the allocation stack.
1532   bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
1533       REQUIRES_SHARED(Locks::mutator_lock_);
1534 
1535   // Set the thread local allocation pointers to the given pointers.
1536   void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
1537                                      StackReference<mirror::Object>* end);
1538 
1539   // Resets the thread local allocation pointers.
1540   void RevokeThreadLocalAllocationStack();
1541 
GetThreadLocalBytesAllocated()1542   size_t GetThreadLocalBytesAllocated() const {
1543     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
1544   }
1545 
GetThreadLocalObjectsAllocated()1546   size_t GetThreadLocalObjectsAllocated() const {
1547     return tlsPtr_.thread_local_objects;
1548   }
1549 
GetRosAllocRun(size_t index)1550   void* GetRosAllocRun(size_t index) const {
1551     return tlsPtr_.rosalloc_runs[index];
1552   }
1553 
SetRosAllocRun(size_t index,void * run)1554   void SetRosAllocRun(size_t index, void* run) {
1555     tlsPtr_.rosalloc_runs[index] = run;
1556   }
1557 
1558   template <StackType stack_type>
1559   bool ProtectStack(bool fatal_on_error = true);
1560   template <StackType stack_type>
1561   bool UnprotectStack();
1562 
DecrementForceInterpreterCount()1563   uint32_t DecrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
1564     return --tls32_.force_interpreter_count;
1565   }
1566 
IncrementForceInterpreterCount()1567   uint32_t IncrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
1568     return ++tls32_.force_interpreter_count;
1569   }
1570 
SetForceInterpreterCount(uint32_t value)1571   void SetForceInterpreterCount(uint32_t value) REQUIRES(Locks::thread_list_lock_) {
1572     tls32_.force_interpreter_count = value;
1573   }
1574 
ForceInterpreterCount()1575   uint32_t ForceInterpreterCount() const {
1576     return tls32_.force_interpreter_count;
1577   }
1578 
IsForceInterpreter()1579   bool IsForceInterpreter() const {
1580     return tls32_.force_interpreter_count != 0;
1581   }
1582 
IncrementMakeVisiblyInitializedCounter()1583   bool IncrementMakeVisiblyInitializedCounter() {
1584     tls32_.make_visibly_initialized_counter += 1u;
1585     DCHECK_LE(tls32_.make_visibly_initialized_counter, kMakeVisiblyInitializedCounterTriggerCount);
1586     if (tls32_.make_visibly_initialized_counter == kMakeVisiblyInitializedCounterTriggerCount) {
1587       tls32_.make_visibly_initialized_counter = 0u;
1588       return true;
1589     }
1590     return false;
1591   }
1592 
1593   void InitStringEntryPoints();
1594 
ModifyDebugDisallowReadBarrier(int8_t delta)1595   void ModifyDebugDisallowReadBarrier(int8_t delta) {
1596     if (kCheckDebugDisallowReadBarrierCount) {
1597       debug_disallow_read_barrier_ += delta;
1598     }
1599   }
1600 
GetDebugDisallowReadBarrierCount()1601   uint8_t GetDebugDisallowReadBarrierCount() const {
1602     return kCheckDebugDisallowReadBarrierCount ? debug_disallow_read_barrier_ : 0u;
1603   }
1604 
1605   // Gets the current TLSData associated with the key or nullptr if there isn't any. Note that users
1606   // do not gain ownership of TLSData and must synchronize with SetCustomTls themselves to prevent
1607   // it from being deleted.
1608   TLSData* GetCustomTLS(const char* key) REQUIRES(!Locks::custom_tls_lock_);
1609 
1610   // Sets the tls entry at 'key' to data. The thread takes ownership of the TLSData. The destructor
1611   // will be run when the thread exits or when SetCustomTLS is called again with the same key.
1612   void SetCustomTLS(const char* key, TLSData* data) REQUIRES(!Locks::custom_tls_lock_);
1613 
1614   // Returns true if the current thread is the jit sensitive thread.
IsJitSensitiveThread()1615   bool IsJitSensitiveThread() const {
1616     return this == jit_sensitive_thread_;
1617   }
1618 
1619   bool IsSystemDaemon() const REQUIRES_SHARED(Locks::mutator_lock_);
1620 
1621   // Cause the 'this' thread to abort the process by sending SIGABRT.  Thus we should get an
1622   // asynchronous stack trace for 'this' thread, rather than waiting for it to process a
1623   // checkpoint. Useful mostly to discover why a thread isn't responding to a suspend request or
1624   // checkpoint. The caller should "suspend" (in the Java sense) 'thread' before invoking this, so
1625   // 'thread' can't get deallocated before we access it.
1626   NO_RETURN void AbortInThis(const std::string& message);
1627 
1628   // Returns true if StrictMode events are traced for the current thread.
IsSensitiveThread()1629   static bool IsSensitiveThread() {
1630     if (is_sensitive_thread_hook_ != nullptr) {
1631       return (*is_sensitive_thread_hook_)();
1632     }
1633     return false;
1634   }
1635 
1636   // Set to the read barrier marking entrypoints to be non-null.
1637   void SetReadBarrierEntrypoints();
1638 
1639   ObjPtr<mirror::Object> CreateCompileTimePeer(const char* name,
1640                                                bool as_daemon,
1641                                                jobject thread_group)
1642       REQUIRES_SHARED(Locks::mutator_lock_);
1643 
GetInterpreterCache()1644   ALWAYS_INLINE InterpreterCache* GetInterpreterCache() {
1645     return &interpreter_cache_;
1646   }
1647 
1648   // Clear all thread-local interpreter caches.
1649   //
1650   // Since the caches are keyed by memory pointer to dex instructions, this must be
1651   // called when any dex code is unloaded (before different code gets loaded at the
1652   // same memory location).
1653   //
1654   // If presence of cache entry implies some pre-conditions, this must also be
1655   // called if the pre-conditions might no longer hold true.
1656   static void ClearAllInterpreterCaches();
1657 
1658   template<PointerSize pointer_size>
InterpreterCacheOffset()1659   static constexpr ThreadOffset<pointer_size> InterpreterCacheOffset() {
1660     return ThreadOffset<pointer_size>(OFFSETOF_MEMBER(Thread, interpreter_cache_));
1661   }
1662 
InterpreterCacheSizeLog2()1663   static constexpr int InterpreterCacheSizeLog2() {
1664     return WhichPowerOf2(InterpreterCache::kSize);
1665   }
1666 
AllThreadFlags()1667   static constexpr uint32_t AllThreadFlags() {
1668     return enum_cast<uint32_t>(ThreadFlag::kLastFlag) |
1669            (enum_cast<uint32_t>(ThreadFlag::kLastFlag) - 1u);
1670   }
1671 
SuspendOrCheckpointRequestFlags()1672   static constexpr uint32_t SuspendOrCheckpointRequestFlags() {
1673     return enum_cast<uint32_t>(ThreadFlag::kSuspendRequest) |
1674            enum_cast<uint32_t>(ThreadFlag::kCheckpointRequest) |
1675            enum_cast<uint32_t>(ThreadFlag::kEmptyCheckpointRequest);
1676   }
1677 
FlipFunctionFlags()1678   static constexpr uint32_t FlipFunctionFlags() {
1679     return enum_cast<uint32_t>(ThreadFlag::kPendingFlipFunction) |
1680            enum_cast<uint32_t>(ThreadFlag::kRunningFlipFunction);
1681   }
1682 
StoredThreadStateValue(ThreadState state)1683   static constexpr uint32_t StoredThreadStateValue(ThreadState state) {
1684     return StateAndFlags::EncodeState(state);
1685   }
1686 
ResetSharedMethodHotness()1687   void ResetSharedMethodHotness() {
1688     tls32_.shared_method_hotness = kSharedMethodHotnessThreshold;
1689   }
1690 
GetSharedMethodHotness()1691   uint32_t GetSharedMethodHotness() const {
1692     return tls32_.shared_method_hotness;
1693   }
1694 
DecrementSharedMethodHotness()1695   uint32_t DecrementSharedMethodHotness() {
1696     tls32_.shared_method_hotness = (tls32_.shared_method_hotness - 1) & 0xffff;
1697     return tls32_.shared_method_hotness;
1698   }
1699 
1700  private:
1701   // We pretend to acquire this while running a checkpoint to detect lock ordering issues.
1702   // Initialized lazily.
1703   static std::atomic<Mutex*> cp_placeholder_mutex_;
1704 
1705   explicit Thread(bool daemon);
1706 
1707   // A successfully started thread is only deleted by the thread itself.
1708   // Threads are deleted after they have been removed from the thread list while holding
1709   // suspend_count_lock_ and thread_list_lock_. We refuse to do this while either kSuspendRequest
1710   // or kRunningFlipFunction are set. We can prevent Thread destruction by holding either of those
1711   // locks, ensuring that either of those flags are set, or possibly by registering and checking a
1712   // ThreadExitFlag.
1713   ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
1714 
1715   // Thread destruction actions that do not invalidate the thread. Checkpoints and flip_functions
1716   // may still be called on this Thread object, though not by this thread, during and after the
1717   // Destroy() call.
1718   void Destroy(bool should_run_callbacks);
1719 
1720   // Deletes and clears the tlsPtr_.jpeer field. Done in a way so that both it and opeer cannot be
1721   // observed to be set at the same time by instrumentation.
1722   void DeleteJPeer(JNIEnv* env);
1723 
1724   // Attaches the calling native thread to the runtime, returning the new native peer.
1725   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
1726   template <typename PeerAction>
1727   static Thread* Attach(const char* thread_name,
1728                         bool as_daemon,
1729                         PeerAction p,
1730                         bool should_run_callbacks);
1731 
1732   void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
1733 
1734   template<bool kTransactionActive>
1735   static void InitPeer(ObjPtr<mirror::Object> peer,
1736                        bool as_daemon,
1737                        ObjPtr<mirror::Object> thread_group,
1738                        ObjPtr<mirror::String> thread_name,
1739                        jint thread_priority)
1740       REQUIRES_SHARED(Locks::mutator_lock_);
1741 
1742   // Avoid use, callers should use SetState.
1743   // Used only by `Thread` destructor and stack trace collection in semi-space GC (currently
1744   // disabled by `kStoreStackTraces = false`). May not be called on a runnable thread other
1745   // than Thread::Current().
1746   // NO_THREAD_SAFETY_ANALYSIS: This function is "Unsafe" and can be called in
1747   // different states, so clang cannot perform the thread safety analysis.
SetStateUnsafe(ThreadState new_state)1748   ThreadState SetStateUnsafe(ThreadState new_state) NO_THREAD_SAFETY_ANALYSIS {
1749     StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
1750     ThreadState old_state = old_state_and_flags.GetState();
1751     if (old_state == new_state) {
1752       // Nothing to do.
1753     } else if (old_state == ThreadState::kRunnable) {
1754       DCHECK_EQ(this, Thread::Current());
1755       // Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in
1756       // case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA
1757       // attempts to TransitionFromSuspendedToRunnable, it results in a deadlock.
1758       TransitionToSuspendedAndRunCheckpoints(new_state);
1759       // Since we transitioned to a suspended state, check the pass barrier requests.
1760       CheckActiveSuspendBarriers();
1761     } else {
1762       while (true) {
1763         StateAndFlags new_state_and_flags = old_state_and_flags;
1764         new_state_and_flags.SetState(new_state);
1765         if (LIKELY(tls32_.state_and_flags.CompareAndSetWeakAcquire(
1766                 old_state_and_flags.GetValue(), new_state_and_flags.GetValue()))) {
1767           break;
1768         }
1769         // Reload state and flags.
1770         old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
1771         DCHECK_EQ(old_state, old_state_and_flags.GetState());
1772       }
1773     }
1774     return old_state;
1775   }
1776 
GetMutatorLock()1777   MutatorMutex* GetMutatorLock() RETURN_CAPABILITY(Locks::mutator_lock_) {
1778     DCHECK_EQ(tlsPtr_.mutator_lock, Locks::mutator_lock_);
1779     return tlsPtr_.mutator_lock;
1780   }
1781 
1782   void VerifyStackImpl() REQUIRES_SHARED(Locks::mutator_lock_);
1783 
1784   void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
1785   DumpOrder DumpStack(std::ostream& os,
1786                       bool dump_native_stack = true,
1787                       bool force_dump_stack = false) const
1788       REQUIRES_SHARED(Locks::mutator_lock_);
1789   DumpOrder DumpStack(std::ostream& os,
1790                       unwindstack::AndroidLocalUnwinder& unwinder,
1791                       bool dump_native_stack = true,
1792                       bool force_dump_stack = false) const
1793       REQUIRES_SHARED(Locks::mutator_lock_);
1794 
1795   // Out-of-line conveniences for debugging in gdb.
1796   static Thread* CurrentFromGdb();  // Like Thread::Current.
1797   // Like Thread::Dump(std::cerr).
1798   void DumpFromGdb() const REQUIRES_SHARED(Locks::mutator_lock_);
1799 
1800   // A wrapper around CreateCallback used when userfaultfd GC is used to
1801   // identify the GC by stacktrace.
1802   static NO_INLINE void* CreateCallbackWithUffdGc(void* arg);
1803   static void* CreateCallback(void* arg);
1804 
1805   void HandleUncaughtExceptions() REQUIRES_SHARED(Locks::mutator_lock_);
1806   void RemoveFromThreadGroup() REQUIRES_SHARED(Locks::mutator_lock_);
1807 
1808   // Initialize a thread.
1809   //
1810   // The third parameter is not mandatory. If given, the thread will use this JNIEnvExt. In case
1811   // Init succeeds, this means the thread takes ownership of it. If Init fails, it is the caller's
1812   // responsibility to destroy the given JNIEnvExt. If the parameter is null, Init will try to
1813   // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value
1814   // of false).
1815   bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr)
1816       REQUIRES(Locks::runtime_shutdown_lock_);
1817   void InitCardTable();
1818   void InitCpu();
1819   void CleanupCpu();
1820   void InitTlsEntryPoints();
1821   void InitTid();
1822   void InitPthreadKeySelf();
1823   template <StackType stack_type>
1824   bool InitStack(uint8_t* read_stack_base, size_t read_stack_size, size_t read_guard_size);
1825 
1826   void SetUpAlternateSignalStack();
1827   void TearDownAlternateSignalStack();
1828   void MadviseAwayAlternateSignalStack();
1829 
1830   ALWAYS_INLINE void TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)
1831       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
1832       REQUIRES_SHARED(Locks::mutator_lock_);
1833 
1834   // Call PassActiveSuspendBarriers() if there are active barriers. Only called on current thread.
1835   ALWAYS_INLINE void CheckActiveSuspendBarriers()
1836       REQUIRES(!Locks::thread_suspend_count_lock_, !Locks::mutator_lock_, !Roles::uninterruptible_);
1837 
1838   // Decrement all "suspend barriers" for the current thread, notifying threads that requested our
1839   // suspension. Only called on current thread, when suspended. If suspend_count_ > 0 then we
1840   // promise that we are and will remain "suspended" until the suspend count is decremented.
1841   bool PassActiveSuspendBarriers()
1842       REQUIRES(!Locks::thread_suspend_count_lock_, !Locks::mutator_lock_);
1843 
1844   // Add an entry to active_suspend1_barriers.
1845   ALWAYS_INLINE void AddSuspend1Barrier(WrappedSuspend1Barrier* suspend1_barrier)
1846       REQUIRES(Locks::thread_suspend_count_lock_);
1847 
1848   // Remove last-added entry from active_suspend1_barriers.
1849   // Only makes sense if we're still holding thread_suspend_count_lock_ since insertion.
1850   // We redundantly pass in the barrier to be removed in order to enable a DCHECK.
1851   ALWAYS_INLINE void RemoveFirstSuspend1Barrier(WrappedSuspend1Barrier* suspend1_barrier)
1852       REQUIRES(Locks::thread_suspend_count_lock_);
1853 
1854   // Remove the "barrier" from the list no matter where it appears. Called only under exceptional
1855   // circumstances. The barrier must be in the list.
1856   ALWAYS_INLINE void RemoveSuspend1Barrier(WrappedSuspend1Barrier* suspend1_barrier)
1857       REQUIRES(Locks::thread_suspend_count_lock_);
1858 
1859   ALWAYS_INLINE bool HasActiveSuspendBarrier() REQUIRES(Locks::thread_suspend_count_lock_);
1860 
1861   // CHECK that the given barrier is no longer on our list.
1862   ALWAYS_INLINE void CheckBarrierInactive(WrappedSuspend1Barrier* suspend1_barrier)
1863       REQUIRES(Locks::thread_suspend_count_lock_);
1864 
1865   // Registers the current thread as the jit sensitive thread. Should be called just once.
SetJitSensitiveThread()1866   static void SetJitSensitiveThread() {
1867     if (jit_sensitive_thread_ == nullptr) {
1868       jit_sensitive_thread_ = Thread::Current();
1869     } else {
1870       LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:"
1871           << Thread::Current()->GetTid();
1872     }
1873   }
1874 
SetSensitiveThreadHook(bool (* is_sensitive_thread_hook)())1875   static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) {
1876     is_sensitive_thread_hook_ = is_sensitive_thread_hook;
1877   }
1878 
1879   // Runs a single checkpoint function. If there are no more pending checkpoint functions it will
1880   // clear the kCheckpointRequest flag. The caller is responsible for calling this in a loop until
1881   // the kCheckpointRequest flag is cleared.
1882   void RunCheckpointFunction()
1883       REQUIRES(!Locks::thread_suspend_count_lock_)
1884       REQUIRES_SHARED(Locks::mutator_lock_);
1885   void RunEmptyCheckpoint();
1886 
1887   // Return the nearest page-aligned address below the current stack top.
1888   template <StackType>
1889   NO_INLINE uint8_t* FindStackTop();
1890 
1891   // Install the protected region for implicit stack checks.
1892   template <StackType>
1893   void InstallImplicitProtection();
1894 
1895   template <bool kPrecise>
1896   void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
1897 
1898   static bool IsAotCompiler();
1899 
1900   void SetCachedThreadName(const char* name);
1901 
1902   // Helper functions to get/set the tls stack pointer variables.
1903   template <StackType stack_type>
1904   ALWAYS_INLINE void SetStackEnd(uint8_t* new_stack_end);
1905 
1906   template <StackType stack_type>
1907   ALWAYS_INLINE uint8_t* GetStackBegin() const;
1908 
1909   template <StackType stack_type>
1910   ALWAYS_INLINE void SetStackBegin(uint8_t* new_stack_begin);
1911 
1912   template <StackType stack_type>
1913   ALWAYS_INLINE size_t GetStackSize() const;
1914 
1915   template <StackType stack_type>
1916   ALWAYS_INLINE void SetStackSize(size_t new_stack_size);
1917 
1918   // Helper class for manipulating the 32 bits of atomically changed state and flags.
1919   class StateAndFlags {
1920    public:
StateAndFlags(uint32_t value)1921     explicit StateAndFlags(uint32_t value) :value_(value) {}
1922 
GetValue()1923     uint32_t GetValue() const {
1924       return value_;
1925     }
1926 
SetValue(uint32_t value)1927     void SetValue(uint32_t value) {
1928       value_ = value;
1929     }
1930 
IsAnyOfFlagsSet(uint32_t flags)1931     bool IsAnyOfFlagsSet(uint32_t flags) const {
1932       DCHECK_EQ(flags & ~AllThreadFlags(), 0u);
1933       return (value_ & flags) != 0u;
1934     }
1935 
IsFlagSet(ThreadFlag flag)1936     bool IsFlagSet(ThreadFlag flag) const {
1937       return (value_ & enum_cast<uint32_t>(flag)) != 0u;
1938     }
1939 
SetFlag(ThreadFlag flag)1940     void SetFlag(ThreadFlag flag) {
1941       value_ |= enum_cast<uint32_t>(flag);
1942     }
1943 
WithFlag(ThreadFlag flag)1944     StateAndFlags WithFlag(ThreadFlag flag) const {
1945       StateAndFlags result = *this;
1946       result.SetFlag(flag);
1947       return result;
1948     }
1949 
WithoutFlag(ThreadFlag flag)1950     StateAndFlags WithoutFlag(ThreadFlag flag) const {
1951       StateAndFlags result = *this;
1952       result.ClearFlag(flag);
1953       return result;
1954     }
1955 
ClearFlag(ThreadFlag flag)1956     void ClearFlag(ThreadFlag flag) {
1957       value_ &= ~enum_cast<uint32_t>(flag);
1958     }
1959 
GetState()1960     ThreadState GetState() const {
1961       ThreadState state = ThreadStateField::Decode(value_);
1962       ValidateThreadState(state);
1963       return state;
1964     }
1965 
SetState(ThreadState state)1966     void SetState(ThreadState state) {
1967       ValidateThreadState(state);
1968       value_ = ThreadStateField::Update(state, value_);
1969     }
1970 
WithState(ThreadState state)1971     StateAndFlags WithState(ThreadState state) const {
1972       StateAndFlags result = *this;
1973       result.SetState(state);
1974       return result;
1975     }
1976 
EncodeState(ThreadState state)1977     static constexpr uint32_t EncodeState(ThreadState state) {
1978       ValidateThreadState(state);
1979       return ThreadStateField::Encode(state);
1980     }
1981 
ValidateThreadState(ThreadState state)1982     static constexpr void ValidateThreadState(ThreadState state) {
1983       if (kIsDebugBuild && state != ThreadState::kRunnable) {
1984         CHECK_GE(state, ThreadState::kTerminated);
1985         CHECK_LE(state, ThreadState::kSuspended);
1986         CHECK_NE(state, ThreadState::kObsoleteRunnable);
1987       }
1988     }
1989 
1990     // The value holds thread flags and thread state.
1991     uint32_t value_;
1992 
1993     static constexpr size_t kThreadStateBitSize = BitSizeOf<std::underlying_type_t<ThreadState>>();
1994     static constexpr size_t kThreadStatePosition = BitSizeOf<uint32_t>() - kThreadStateBitSize;
1995     using ThreadStateField = BitField<ThreadState, kThreadStatePosition, kThreadStateBitSize>;
1996     static_assert(
1997         WhichPowerOf2(enum_cast<uint32_t>(ThreadFlag::kLastFlag)) < kThreadStatePosition);
1998   };
1999   static_assert(sizeof(StateAndFlags) == sizeof(uint32_t), "Unexpected StateAndFlags size");
2000 
GetStateAndFlags(std::memory_order order)2001   StateAndFlags GetStateAndFlags(std::memory_order order) const {
2002     return StateAndFlags(tls32_.state_and_flags.load(order));
2003   }
2004 
2005   // Format state and flags as a hex string. For diagnostic output.
2006   std::string StateAndFlagsAsHexString() const;
2007 
2008   // Run the flip function and notify other threads that may have tried
2009   // to do that concurrently.
2010   void RunFlipFunction(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
2011 
2012   // Ensure that thread flip function for thread target started running. If no other thread is
2013   // executing it, the calling thread shall run the flip function and then notify other threads
2014   // that have tried to do that concurrently. After this function returns, the
2015   // `ThreadFlag::kPendingFlipFunction` is cleared but another thread may still be running the
2016   // flip function as indicated by the `ThreadFlag::kRunningFlipFunction`. Optional arguments:
2017   //  - old_state_and_flags indicates the current and state and flags value for the thread, with
2018   //    at least kPendingFlipFunction set. The thread should logically acquire the
2019   //    mutator lock before running the flip function.  A special zero value indicates that the
2020   //    thread already holds the mutator lock, and the actual state_and_flags must be read.
2021   //    A non-zero value implies this == Current().
2022   //  - If tef is non-null, we check that the target thread has not yet exited, as indicated by
2023   //    tef. In that case, we acquire thread_list_lock_ as needed.
2024   //  - If finished is non-null, we assign to *finished to indicate whether the flip was known to
2025   //    be completed when we returned.
2026   //  Returns true if and only if we acquired the mutator lock (which implies that we ran the flip
2027   //  function after finding old_state_and_flags unchanged).
2028   static bool EnsureFlipFunctionStarted(Thread* self,
2029                                         Thread* target,
2030                                         StateAndFlags old_state_and_flags = StateAndFlags(0),
2031                                         ThreadExitFlag* tef = nullptr,
2032                                         /*out*/ bool* finished = nullptr)
2033       REQUIRES(!Locks::thread_list_lock_) TRY_ACQUIRE_SHARED(true, Locks::mutator_lock_);
2034 
2035   static void ThreadExitCallback(void* arg);
2036 
2037   // Maximum number of suspend barriers.
2038   static constexpr uint32_t kMaxSuspendBarriers = 3;
2039 
2040   // Has Thread::Startup been called?
2041   static bool is_started_;
2042 
2043   // TLS key used to retrieve the Thread*.
2044   static pthread_key_t pthread_key_self_;
2045 
2046   // Used to notify threads that they should attempt to resume, they will suspend again if
2047   // their suspend count is > 0.
2048   static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
2049 
2050   // Hook passed by framework which returns true
2051   // when StrictMode events are traced for the current thread.
2052   static bool (*is_sensitive_thread_hook_)();
2053   // Stores the jit sensitive thread (which for now is the UI thread).
2054   static Thread* jit_sensitive_thread_;
2055 
2056   static constexpr uint32_t kMakeVisiblyInitializedCounterTriggerCount = 128;
2057 
2058   /***********************************************************************************************/
2059   // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
2060   // pointer size differences. To encourage shorter encoding, more frequently used values appear
2061   // first if possible.
2062   /***********************************************************************************************/
2063 
2064   struct alignas(4) tls_32bit_sized_values {
2065     // We have no control over the size of 'bool', but want our boolean fields
2066     // to be 4-byte quantities.
2067     using bool32_t = uint32_t;
2068 
tls_32bit_sized_valuestls_32bit_sized_values2069     explicit tls_32bit_sized_values(bool is_daemon)
2070         : state_and_flags(0u),
2071           suspend_count(0),
2072           thin_lock_thread_id(0),
2073           tid(0),
2074           daemon(is_daemon),
2075           throwing_OutOfMemoryError(false),
2076           no_thread_suspension(0),
2077           thread_exit_check_count(0),
2078           is_gc_marking(false),
2079           is_deopt_check_required(false),
2080           weak_ref_access_enabled(WeakRefAccessState::kVisiblyEnabled),
2081           disable_thread_flip_count(0),
2082           user_code_suspend_count(0),
2083           force_interpreter_count(0),
2084           make_visibly_initialized_counter(0),
2085           define_class_counter(0),
2086           num_name_readers(0),
2087           shared_method_hotness(kSharedMethodHotnessThreshold) {}
2088 
2089     // The state and flags field must be changed atomically so that flag values aren't lost.
2090     // See `StateAndFlags` for bit assignments of `ThreadFlag` and `ThreadState` values.
2091     // Keeping the state and flags together allows an atomic CAS to change from being
2092     // Suspended to Runnable without a suspend request occurring.
2093     Atomic<uint32_t> state_and_flags;
2094     static_assert(sizeof(state_and_flags) == sizeof(uint32_t),
2095                   "Size of state_and_flags and uint32 are different");
2096 
2097     // A non-zero value is used to tell the current thread to enter a safe point
2098     // at the next poll.
2099     int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
2100 
2101     // Thin lock thread id. This is a small integer used by the thin lock implementation.
2102     // This is not to be confused with the native thread's tid, nor is it the value returned
2103     // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
2104     // important difference between this id and the ids visible to managed code is that these
2105     // ones get reused (to ensure that they fit in the number of bits available).
2106     uint32_t thin_lock_thread_id;
2107 
2108     // System thread id.
2109     uint32_t tid;
2110 
2111     // Is the thread a daemon?
2112     const bool32_t daemon;
2113 
2114     // A boolean telling us whether we're recursively throwing OOME.
2115     bool32_t throwing_OutOfMemoryError;
2116 
2117     // A positive value implies we're in a region where thread suspension isn't expected.
2118     uint32_t no_thread_suspension;
2119 
2120     // How many times has our pthread key's destructor been called?
2121     uint32_t thread_exit_check_count;
2122 
2123     // True if the GC is in the marking phase. This is used for the CC collector only. This is
2124     // thread local so that we can simplify the logic to check for the fast path of read barriers of
2125     // GC roots.
2126     bool32_t is_gc_marking;
2127 
2128     // True if we need to check for deoptimization when returning from the runtime functions. This
2129     // is required only when a class is redefined to prevent executing code that has field offsets
2130     // embedded. For non-debuggable apps redefinition is not allowed and this flag should always be
2131     // set to false.
2132     bool32_t is_deopt_check_required;
2133 
2134     // Thread "interrupted" status; stays raised until queried or thrown.
2135     Atomic<bool32_t> interrupted;
2136 
2137     AtomicInteger park_state_;
2138 
2139     // Determines whether the thread is allowed to directly access a weak ref
2140     // (Reference::GetReferent() and system weaks) and to potentially mark an object alive/gray.
2141     // This is used for concurrent reference processing of the CC collector only. This is thread
2142     // local so that we can enable/disable weak ref access by using a checkpoint and avoid a race
2143     // around the time weak ref access gets disabled and concurrent reference processing begins
2144     // (if weak ref access is disabled during a pause, this is not an issue.) Other collectors use
2145     // Runtime::DisallowNewSystemWeaks() and ReferenceProcessor::EnableSlowPath().  Can be
2146     // concurrently accessed by GetReferent() and set (by iterating over threads).
2147     // Can be changed from kEnabled to kVisiblyEnabled by readers. No other concurrent access is
2148     // possible when that happens.
2149     mutable std::atomic<WeakRefAccessState> weak_ref_access_enabled;
2150 
2151     // A thread local version of Heap::disable_thread_flip_count_. This keeps track of how many
2152     // levels of (nested) JNI critical sections the thread is in and is used to detect a nested JNI
2153     // critical section enter.
2154     uint32_t disable_thread_flip_count;
2155 
2156     // How much of 'suspend_count_' is by request of user code, used to distinguish threads
2157     // suspended by the runtime from those suspended by user code.
2158     // This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be
2159     // told that AssertHeld should be good enough.
2160     int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
2161 
2162     // Count of how many times this thread has been forced to interpreter. If this is not 0 the
2163     // thread must remain in interpreted code as much as possible.
2164     uint32_t force_interpreter_count;
2165 
2166     // Counter for calls to initialize a class that's initialized but not visibly initialized.
2167     // When this reaches kMakeVisiblyInitializedCounterTriggerCount, we call the runtime to
2168     // make initialized classes visibly initialized. This is needed because we usually make
2169     // classes visibly initialized in batches but we do not want to be stuck with a class
2170     // initialized but not visibly initialized for a long time even if no more classes are
2171     // being initialized anymore.
2172     uint32_t make_visibly_initialized_counter;
2173 
2174     // Counter for how many nested define-classes are ongoing in this thread. Used to allow waiting
2175     // for threads to be done with class-definition work.
2176     uint32_t define_class_counter;
2177 
2178     // A count of the number of readers of tlsPtr_.name that may still be looking at a string they
2179     // retrieved.
2180     mutable std::atomic<uint32_t> num_name_readers;
2181     static_assert(std::atomic<uint32_t>::is_always_lock_free);
2182 
2183     // Thread-local hotness counter for shared memory methods. Initialized with
2184     // `kSharedMethodHotnessThreshold`. The interpreter decrements it and goes
2185     // into the runtime when hitting zero. Note that all previous decrements
2186     // could have been executed by another method than the one seeing zero.
2187     // There is a second level counter in `Jit::shared_method_counters_` to make
2188     // sure we at least have a few samples before compiling a method.
2189     uint32_t shared_method_hotness;
2190   } tls32_;
2191 
2192   struct alignas(8) tls_64bit_sized_values {
tls_64bit_sized_valuestls_64bit_sized_values2193     tls_64bit_sized_values() : trace_clock_base(0) {
2194     }
2195 
2196     // The clock base used for tracing.
2197     uint64_t trace_clock_base;
2198 
2199     RuntimeStats stats;
2200   } tls64_;
2201 
2202   struct alignas(sizeof(void*)) tls_ptr_sized_values {
tls_ptr_sized_valuestls_ptr_sized_values2203       tls_ptr_sized_values() : card_table(nullptr),
2204                                exception(nullptr),
2205                                stack_end(nullptr),
2206                                managed_stack(),
2207                                suspend_trigger(nullptr),
2208                                jni_env(nullptr),
2209                                tmp_jni_env(nullptr),
2210                                self(nullptr),
2211                                opeer(nullptr),
2212                                jpeer(nullptr),
2213                                stack_begin(nullptr),
2214                                stack_size(0),
2215                                deps_or_stack_trace_sample(),
2216                                wait_next(nullptr),
2217                                monitor_enter_object(nullptr),
2218                                top_handle_scope(nullptr),
2219                                class_loader_override(nullptr),
2220                                stacked_shadow_frame_record(nullptr),
2221                                deoptimization_context_stack(nullptr),
2222                                frame_id_to_shadow_frame(nullptr),
2223                                name(nullptr),
2224                                pthread_self(0),
2225                                active_suspendall_barrier(nullptr),
2226                                active_suspend1_barriers(nullptr),
2227                                thread_local_pos(nullptr),
2228                                thread_local_end(nullptr),
2229                                thread_local_start(nullptr),
2230                                thread_local_limit(nullptr),
2231                                thread_local_objects(0),
2232                                checkpoint_function(nullptr),
2233                                thread_local_alloc_stack_top(nullptr),
2234                                thread_local_alloc_stack_end(nullptr),
2235                                mutator_lock(nullptr),
2236                                flip_function(nullptr),
2237                                thread_local_mark_stack(nullptr),
2238                                async_exception(nullptr),
2239                                top_reflective_handle_scope(nullptr),
2240                                method_trace_buffer(nullptr),
2241                                method_trace_buffer_curr_entry(nullptr),
2242                                thread_exit_flags(nullptr),
2243                                last_no_thread_suspension_cause(nullptr),
2244                                last_no_transaction_checks_cause(nullptr) {
2245       std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
2246     }
2247 
2248     // The biased card table, see CardTable for details.
2249     uint8_t* card_table;
2250 
2251     // The pending exception or null.
2252     mirror::Throwable* exception;
2253 
2254     // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
2255     // We leave extra space so there's room for the code that throws StackOverflowError.
2256     // Note: do not use directly, instead use GetStackEnd/SetStackEnd template function instead.
2257     uint8_t* stack_end;
2258 
2259     // The top of the managed stack often manipulated directly by compiler generated code.
2260     ManagedStack managed_stack;
2261 
2262     // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check.  It is
2263     // normally set to the address of itself. It should be cleared with release semantics to ensure
2264     // that prior state changes etc. are visible to any thread that faults as a result.
2265     // We assume that the kernel ensures that such changes are then visible to the faulting
2266     // thread, even if it is not an acquire load that faults. (Indeed, it seems unlikely that the
2267     // ordering semantics associated with the faulting load has any impact.)
2268     std::atomic<uintptr_t*> suspend_trigger;
2269 
2270     // Every thread may have an associated JNI environment
2271     JNIEnvExt* jni_env;
2272 
2273     // Temporary storage to transfer a pre-allocated JNIEnvExt from the creating thread to the
2274     // created thread.
2275     JNIEnvExt* tmp_jni_env;
2276 
2277     // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
2278     // is easy but getting the address of Thread::Current is hard. This field can be read off of
2279     // Thread::Current to give the address.
2280     Thread* self;
2281 
2282     // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
2283     // start up, until the thread is registered and the local opeer_ is used.
2284     mirror::Object* opeer;
2285     jobject jpeer;
2286 
2287     // The "lowest addressable byte" of the stack.
2288     // Note: do not use directly, instead use GetStackBegin/SetStackBegin template function instead.
2289     uint8_t* stack_begin;
2290 
2291     // Size of the stack.
2292     // Note: do not use directly, instead use GetStackSize/SetStackSize template function instead.
2293     size_t stack_size;
2294 
2295     // Sampling profiler and AOT verification cannot happen on the same run, so we share
2296     // the same entry for the stack trace and the verifier deps.
2297     union DepsOrStackTraceSample {
DepsOrStackTraceSample()2298       DepsOrStackTraceSample() {
2299         verifier_deps = nullptr;
2300         stack_trace_sample = nullptr;
2301       }
2302       // Pointer to previous stack trace captured by sampling profiler.
2303       std::vector<ArtMethod*>* stack_trace_sample;
2304       // When doing AOT verification, per-thread VerifierDeps.
2305       verifier::VerifierDeps* verifier_deps;
2306     } deps_or_stack_trace_sample;
2307 
2308     // The next thread in the wait set this thread is part of or null if not waiting.
2309     Thread* wait_next;
2310 
2311     // If we're blocked in MonitorEnter, this is the object we're trying to lock.
2312     mirror::Object* monitor_enter_object;
2313 
2314     // Top of linked list of handle scopes or null for none.
2315     BaseHandleScope* top_handle_scope;
2316 
2317     // Needed to get the right ClassLoader in JNI_OnLoad, but also
2318     // useful for testing.
2319     jobject class_loader_override;
2320 
2321     // For gc purpose, a shadow frame record stack that keeps track of:
2322     // 1) shadow frames under construction.
2323     // 2) deoptimization shadow frames.
2324     StackedShadowFrameRecord* stacked_shadow_frame_record;
2325 
2326     // Deoptimization return value record stack.
2327     DeoptimizationContextRecord* deoptimization_context_stack;
2328 
2329     // For debugger, a linked list that keeps the mapping from frame_id to shadow frame.
2330     // Shadow frames may be created before deoptimization happens so that the debugger can
2331     // set local values there first.
2332     FrameIdToShadowFrame* frame_id_to_shadow_frame;
2333 
2334     // A cached copy of the java.lang.Thread's (modified UTF-8) name.
2335     // If this is not null or kThreadNameDuringStartup, then it owns the malloc memory holding
2336     // the string. Updated in an RCU-like manner.
2337     std::atomic<const char*> name;
2338     static_assert(std::atomic<const char*>::is_always_lock_free);
2339 
2340     // A cached pthread_t for the pthread underlying this Thread*.
2341     pthread_t pthread_self;
2342 
2343     // After a thread observes a suspend request and enters a suspended state,
2344     // it notifies the requestor by arriving at a "suspend barrier". This consists of decrementing
2345     // the atomic integer representing the barrier. (This implementation was introduced in 2015 to
2346     // minimize cost. There may be other options.) These atomic integer barriers are always
2347     // stored on the requesting thread's stack. They are referenced from the target thread's
2348     // data structure in one of two ways; in either case the data structure referring to these
2349     // barriers is guarded by suspend_count_lock:
2350     // 1. A SuspendAll barrier is directly referenced from the target thread. Only one of these
2351     // can be active at a time:
2352     AtomicInteger* active_suspendall_barrier GUARDED_BY(Locks::thread_suspend_count_lock_);
2353     // 2. For individual thread suspensions, active barriers are embedded in a struct that is used
2354     // to link together all suspend requests for this thread. Unlike the SuspendAll case, each
2355     // barrier is referenced by a single target thread, and thus can appear only on a single list.
2356     // The struct as a whole is still stored on the requesting thread's stack.
2357     WrappedSuspend1Barrier* active_suspend1_barriers GUARDED_BY(Locks::thread_suspend_count_lock_);
2358 
2359     // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
2360     // potentially better performance.
2361     uint8_t* thread_local_pos;
2362     uint8_t* thread_local_end;
2363 
2364     // Thread-local allocation pointer. Can be moved above the preceding two to correct alignment.
2365     uint8_t* thread_local_start;
2366 
2367     // Thread local limit is how much we can expand the thread local buffer to, it is greater or
2368     // equal to thread_local_end.
2369     uint8_t* thread_local_limit;
2370 
2371     size_t thread_local_objects;
2372 
2373     // Pending checkpoint function or null if non-pending. If this checkpoint is set and someone
2374     // requests another checkpoint, it goes to the checkpoint overflow list.
2375     Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_);
2376 
2377     // Entrypoint function pointers.
2378     // TODO: move this to more of a global offset table model to avoid per-thread duplication.
2379     JniEntryPoints jni_entrypoints;
2380     QuickEntryPoints quick_entrypoints;
2381 
2382     // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
2383     void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread];
2384 
2385     // Thread-local allocation stack data/routines.
2386     StackReference<mirror::Object>* thread_local_alloc_stack_top;
2387     StackReference<mirror::Object>* thread_local_alloc_stack_end;
2388 
2389     // Pointer to the mutator lock.
2390     // This is the same as `Locks::mutator_lock_` but cached for faster state transitions.
2391     MutatorMutex* mutator_lock;
2392 
2393     // Support for Mutex lock hierarchy bug detection.
2394     BaseMutex* held_mutexes[kLockLevelCount];
2395 
2396     // The function used for thread flip.  Set while holding Locks::thread_suspend_count_lock_ and
2397     // with all other threads suspended.  May be cleared while being read.
2398     std::atomic<Closure*> flip_function;
2399 
2400     union {
2401       // Thread-local mark stack for the concurrent copying collector.
2402       gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack;
2403       // Thread-local page-sized buffer for userfaultfd GC.
2404       uint8_t* thread_local_gc_buffer;
2405     };
2406 
2407     // The pending async-exception or null.
2408     mirror::Throwable* async_exception;
2409 
2410     // Top of the linked-list for reflective-handle scopes or null if none.
2411     BaseReflectiveHandleScope* top_reflective_handle_scope;
2412 
2413     // Pointer to a thread-local buffer for method tracing.
2414     uintptr_t* method_trace_buffer;
2415 
2416     // Pointer to the current entry in the buffer.
2417     uintptr_t* method_trace_buffer_curr_entry;
2418 
2419     // Pointer to the first node of an intrusively doubly-linked list of ThreadExitFlags.
2420     ThreadExitFlag* thread_exit_flags GUARDED_BY(Locks::thread_list_lock_);
2421 
2422     // If no_thread_suspension_ is > 0, what is causing that assertion.
2423     const char* last_no_thread_suspension_cause;
2424 
2425     // If the thread is asserting that there should be no transaction checks,
2426     // what is causing that assertion (debug builds only).
2427     const char* last_no_transaction_checks_cause;
2428   } tlsPtr_;
2429 
2430   // Small thread-local cache to be used from the interpreter.
2431   // It is keyed by dex instruction pointer.
2432   // The value is opcode-depended (e.g. field offset).
2433   InterpreterCache interpreter_cache_;
2434 
2435   // All fields below this line should not be accessed by native code. This means these fields can
2436   // be modified, rearranged, added or removed without having to modify asm_support.h
2437 
2438   // Guards the 'wait_monitor_' members.
2439   Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
2440 
2441   // Condition variable waited upon during a wait.
2442   ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
2443   // Pointer to the monitor lock we're currently waiting on or null if not waiting.
2444   Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
2445 
2446   // Debug disable read barrier count, only is checked for debug builds and only in the runtime.
2447   uint8_t debug_disallow_read_barrier_ = 0;
2448 
2449   // Counters used only for debugging and error reporting.  Likely to wrap.  Small to avoid
2450   // increasing Thread size.
2451   // We currently maintain these unconditionally, since it doesn't cost much, and we seem to have
2452   // persistent issues with suspension timeouts, which these should help to diagnose.
2453   // TODO: Reconsider this.
2454   std::atomic<uint8_t> suspended_count_ = 0;   // Number of times we entered a suspended state after
2455                                                // running checkpoints.
2456   std::atomic<uint8_t> checkpoint_count_ = 0;  // Number of checkpoints we started running.
2457 
2458   // Note that it is not in the packed struct, may not be accessed for cross compilation.
2459   uintptr_t poison_object_cookie_ = 0;
2460 
2461   // Pending extra checkpoints if checkpoint_function_ is already used.
2462   std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_);
2463 
2464   // Custom TLS field that can be used by plugins or the runtime. Should not be accessed directly by
2465   // compiled code or entrypoints.
2466   SafeMap<std::string, std::unique_ptr<TLSData>, std::less<>> custom_tls_
2467       GUARDED_BY(Locks::custom_tls_lock_);
2468 
2469 #if !defined(__BIONIC__)
2470 #if !defined(ANDROID_HOST_MUSL)
2471     __attribute__((tls_model("initial-exec")))
2472 #endif
2473   static thread_local Thread* self_tls_;
2474 #endif
2475 
2476   // True if the thread is some form of runtime thread (ex, GC or JIT).
2477   bool is_runtime_thread_;
2478 
2479   // Set during execution of JNI methods that get field and method id's as part of determining if
2480   // the caller is allowed to access all fields and methods in the Core Platform API.
2481   uint32_t core_platform_api_cookie_ = 0;
2482 
2483   friend class gc::collector::SemiSpace;  // For getting stack traces.
2484   friend class Runtime;  // For CreatePeer.
2485   friend class QuickExceptionHandler;  // For dumping the stack.
2486   friend class ScopedAssertNoTransactionChecks;
2487   friend class ScopedThreadStateChange;
2488   friend class StubTest;  // For accessing entrypoints.
2489   friend class ThreadList;  // For ~Thread, Destroy and EnsureFlipFunctionStarted.
2490   friend class EntrypointsOrderTest;  // To test the order of tls entries.
2491   friend class JniCompilerTest;  // For intercepting JNI entrypoint calls.
2492 
2493   DISALLOW_COPY_AND_ASSIGN(Thread);
2494 };
2495 
2496 class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension {
2497  public:
2498   ALWAYS_INLINE ScopedAssertNoThreadSuspension(const char* cause,
2499                                                bool enabled = true)
ACQUIRE(Roles::uninterruptible_)2500       ACQUIRE(Roles::uninterruptible_)
2501       : enabled_(enabled) {
2502     if (!enabled_) {
2503       return;
2504     }
2505     if (kIsDebugBuild) {
2506       self_ = Thread::Current();
2507       old_cause_ = self_->StartAssertNoThreadSuspension(cause);
2508     } else {
2509       Roles::uninterruptible_.Acquire();  // No-op.
2510     }
2511   }
~ScopedAssertNoThreadSuspension()2512   ALWAYS_INLINE ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) {
2513     if (!enabled_) {
2514       return;
2515     }
2516     if (kIsDebugBuild) {
2517       self_->EndAssertNoThreadSuspension(old_cause_);
2518     } else {
2519       Roles::uninterruptible_.Release();  // No-op.
2520     }
2521   }
2522 
2523  private:
2524   Thread* self_;
2525   const bool enabled_;
2526   const char* old_cause_;
2527 };
2528 
2529 class ScopedAllowThreadSuspension {
2530  public:
ScopedAllowThreadSuspension()2531   ALWAYS_INLINE ScopedAllowThreadSuspension() RELEASE(Roles::uninterruptible_) {
2532     if (kIsDebugBuild) {
2533       self_ = Thread::Current();
2534       old_cause_ = self_->EndAssertNoThreadSuspension();
2535     } else {
2536       Roles::uninterruptible_.Release();  // No-op.
2537     }
2538   }
~ScopedAllowThreadSuspension()2539   ALWAYS_INLINE ~ScopedAllowThreadSuspension() ACQUIRE(Roles::uninterruptible_) {
2540     if (kIsDebugBuild) {
2541       CHECK(self_->StartAssertNoThreadSuspension(old_cause_) == nullptr);
2542     } else {
2543       Roles::uninterruptible_.Acquire();  // No-op.
2544     }
2545   }
2546 
2547  private:
2548   Thread* self_;
2549   const char* old_cause_;
2550 };
2551 
2552 
2553 class ScopedStackedShadowFramePusher {
2554  public:
ScopedStackedShadowFramePusher(Thread * self,ShadowFrame * sf)2555   ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf) : self_(self), sf_(sf) {
2556     DCHECK_EQ(sf->GetLink(), nullptr);
2557     self_->PushStackedShadowFrame(sf, StackedShadowFrameType::kShadowFrameUnderConstruction);
2558   }
~ScopedStackedShadowFramePusher()2559   ~ScopedStackedShadowFramePusher() {
2560     ShadowFrame* sf = self_->PopStackedShadowFrame();
2561     DCHECK_EQ(sf, sf_);
2562   }
2563 
2564  private:
2565   Thread* const self_;
2566   ShadowFrame* const sf_;
2567 
2568   DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher);
2569 };
2570 
2571 // Only works for debug builds.
2572 class ScopedDebugDisallowReadBarriers {
2573  public:
ScopedDebugDisallowReadBarriers(Thread * self)2574   explicit ScopedDebugDisallowReadBarriers(Thread* self) : self_(self) {
2575     self_->ModifyDebugDisallowReadBarrier(1);
2576   }
~ScopedDebugDisallowReadBarriers()2577   ~ScopedDebugDisallowReadBarriers() {
2578     self_->ModifyDebugDisallowReadBarrier(-1);
2579   }
2580 
2581  private:
2582   Thread* const self_;
2583 };
2584 
2585 class ThreadLifecycleCallback {
2586  public:
~ThreadLifecycleCallback()2587   virtual ~ThreadLifecycleCallback() {}
2588 
2589   virtual void ThreadStart(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
2590   virtual void ThreadDeath(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
2591 };
2592 
2593 // Store an exception from the thread and suppress it for the duration of this object.
2594 class ScopedExceptionStorage {
2595  public:
2596   EXPORT explicit ScopedExceptionStorage(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
2597   void SuppressOldException(const char* message = "") REQUIRES_SHARED(Locks::mutator_lock_);
2598   EXPORT ~ScopedExceptionStorage() REQUIRES_SHARED(Locks::mutator_lock_);
2599 
2600  private:
2601   Thread* self_;
2602   StackHandleScope<1> hs_;
2603   MutableHandle<mirror::Throwable> excp_;
2604 };
2605 
2606 EXPORT std::ostream& operator<<(std::ostream& os, const Thread& thread);
2607 std::ostream& operator<<(std::ostream& os, StackedShadowFrameType thread);
2608 
2609 }  // namespace art
2610 
2611 #endif  // ART_RUNTIME_THREAD_H_
2612