• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_THREAD_H_
18 #define ART_RUNTIME_THREAD_H_
19 
20 #include <bitset>
21 #include <deque>
22 #include <iosfwd>
23 #include <list>
24 #include <memory>
25 #include <setjmp.h>
26 #include <string>
27 
28 #include "arch/context.h"
29 #include "arch/instruction_set.h"
30 #include "atomic.h"
31 #include "base/enums.h"
32 #include "base/macros.h"
33 #include "base/mutex.h"
34 #include "entrypoints/jni/jni_entrypoints.h"
35 #include "entrypoints/quick/quick_entrypoints.h"
36 #include "globals.h"
37 #include "handle_scope.h"
38 #include "instrumentation.h"
39 #include "jvalue.h"
40 #include "managed_stack.h"
41 #include "offsets.h"
42 #include "runtime_stats.h"
43 #include "suspend_reason.h"
44 #include "thread_state.h"
45 
46 class BacktraceMap;
47 
48 namespace art {
49 
50 namespace gc {
51 namespace accounting {
52   template<class T> class AtomicStack;
53 }  // namespace accounting
54 namespace collector {
55   class SemiSpace;
56 }  // namespace collector
57 }  // namespace gc
58 
59 namespace mirror {
60   class Array;
61   class Class;
62   class ClassLoader;
63   class Object;
64   template<class T> class ObjectArray;
65   template<class T> class PrimitiveArray;
66   typedef PrimitiveArray<int32_t> IntArray;
67   class StackTraceElement;
68   class String;
69   class Throwable;
70 }  // namespace mirror
71 
72 namespace verifier {
73   class MethodVerifier;
74   class VerifierDeps;
75 }  // namespace verifier
76 
77 class ArtMethod;
78 class BaseMutex;
79 class ClassLinker;
80 class Closure;
81 class Context;
82 struct DebugInvokeReq;
83 class DeoptimizationContextRecord;
84 class DexFile;
85 class FrameIdToShadowFrame;
86 class JavaVMExt;
87 struct JNIEnvExt;
88 class Monitor;
89 class RootVisitor;
90 class ScopedObjectAccessAlreadyRunnable;
91 class ShadowFrame;
92 class SingleStepControl;
93 class StackedShadowFrameRecord;
94 class Thread;
95 class ThreadList;
96 enum VisitRootFlags : uint8_t;
97 
98 // Thread priorities. These must match the Thread.MIN_PRIORITY,
99 // Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
100 enum ThreadPriority {
101   kMinThreadPriority = 1,
102   kNormThreadPriority = 5,
103   kMaxThreadPriority = 10,
104 };
105 
106 enum ThreadFlag {
107   kSuspendRequest   = 1,  // If set implies that suspend_count_ > 0 and the Thread should enter the
108                           // safepoint handler.
109   kCheckpointRequest = 2,  // Request that the thread do some checkpoint work and then continue.
110   kEmptyCheckpointRequest = 4,  // Request that the thread do empty checkpoint and then continue.
111   kActiveSuspendBarrier = 8,  // Register that at least 1 suspend barrier needs to be passed.
112 };
113 
114 enum class StackedShadowFrameType {
115   kShadowFrameUnderConstruction,
116   kDeoptimizationShadowFrame,
117 };
118 
119 // This should match RosAlloc::kNumThreadLocalSizeBrackets.
120 static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
121 
122 // Thread's stack layout for implicit stack overflow checks:
123 //
124 //   +---------------------+  <- highest address of stack memory
125 //   |                     |
126 //   .                     .  <- SP
127 //   |                     |
128 //   |                     |
129 //   +---------------------+  <- stack_end
130 //   |                     |
131 //   |  Gap                |
132 //   |                     |
133 //   +---------------------+  <- stack_begin
134 //   |                     |
135 //   | Protected region    |
136 //   |                     |
137 //   +---------------------+  <- lowest address of stack memory
138 //
139 // The stack always grows down in memory.  At the lowest address is a region of memory
140 // that is set mprotect(PROT_NONE).  Any attempt to read/write to this region will
141 // result in a segmentation fault signal.  At any point, the thread's SP will be somewhere
142 // between the stack_end and the highest address in stack memory.  An implicit stack
143 // overflow check is a read of memory at a certain offset below the current SP (4K typically).
144 // If the thread's SP is below the stack_end address this will be a read into the protected
145 // region.  If the SP is above the stack_end address, the thread is guaranteed to have
146 // at least 4K of space.  Because stack overflow checks are only performed in generated code,
147 // if the thread makes a call out to a native function (through JNI), that native function
148 // might only have 4K of memory (if the SP is adjacent to stack_end).
149 
150 class Thread {
151  public:
152   static const size_t kStackOverflowImplicitCheckSize;
153   static constexpr bool kVerifyStack = kIsDebugBuild;
154 
155   // Creates a new native thread corresponding to the given managed peer.
156   // Used to implement Thread.start.
157   static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
158 
159   // Attaches the calling native thread to the runtime, returning the new native peer.
160   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
161   static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
162                         bool create_peer);
163   // Attaches the calling native thread to the runtime, returning the new native peer.
164   static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_peer);
165 
166   // Reset internal state of child thread after fork.
167   void InitAfterFork();
168 
169   // Get the currently executing thread, frequently referred to as 'self'. This call has reasonably
170   // high cost and so we favor passing self around when possible.
171   // TODO: mark as PURE so the compiler may coalesce and remove?
172   static Thread* Current();
173 
174   // On a runnable thread, check for pending thread suspension request and handle if pending.
175   void AllowThreadSuspension() REQUIRES_SHARED(Locks::mutator_lock_);
176 
177   // Process pending thread suspension request and handle if pending.
178   void CheckSuspend() REQUIRES_SHARED(Locks::mutator_lock_);
179 
180   // Process a pending empty checkpoint if pending.
181   void CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex);
182   void CheckEmptyCheckpointFromMutex();
183 
184   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
185                                    ObjPtr<mirror::Object> thread_peer)
186       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
187       REQUIRES_SHARED(Locks::mutator_lock_);
188   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
189       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
190       REQUIRES_SHARED(Locks::mutator_lock_);
191 
192   // Translates 172 to pAllocArrayFromCode and so on.
193   template<PointerSize size_of_pointers>
194   static void DumpThreadOffset(std::ostream& os, uint32_t offset);
195 
196   // Dumps a one-line summary of thread state (used for operator<<).
197   void ShortDump(std::ostream& os) const;
198 
199   // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
200   void Dump(std::ostream& os,
201             bool dump_native_stack = true,
202             BacktraceMap* backtrace_map = nullptr,
203             bool force_dump_stack = false) const
204       REQUIRES(!Locks::thread_suspend_count_lock_)
205       REQUIRES_SHARED(Locks::mutator_lock_);
206 
207   void DumpJavaStack(std::ostream& os,
208                      bool check_suspended = true,
209                      bool dump_locks = true) const
210       REQUIRES(!Locks::thread_suspend_count_lock_)
211       REQUIRES_SHARED(Locks::mutator_lock_);
212 
213   // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
214   // case we use 'tid' to identify the thread, and we'll include as much information as we can.
215   static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
216       REQUIRES(!Locks::thread_suspend_count_lock_)
217       REQUIRES_SHARED(Locks::mutator_lock_);
218 
GetState()219   ThreadState GetState() const {
220     DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
221     DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended);
222     return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state);
223   }
224 
225   ThreadState SetState(ThreadState new_state);
226 
GetSuspendCount()227   int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
228     return tls32_.suspend_count;
229   }
230 
GetUserCodeSuspendCount()231   int GetUserCodeSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_,
232                                                Locks::user_code_suspension_lock_) {
233     return tls32_.user_code_suspend_count;
234   }
235 
GetDebugSuspendCount()236   int GetDebugSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
237     return tls32_.debug_suspend_count;
238   }
239 
IsSuspended()240   bool IsSuspended() const {
241     union StateAndFlags state_and_flags;
242     state_and_flags.as_int = tls32_.state_and_flags.as_int;
243     return state_and_flags.as_struct.state != kRunnable &&
244         (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
245   }
246 
247   // If delta > 0 and (this != self or suspend_barrier is not null), this function may temporarily
248   // release thread_suspend_count_lock_ internally.
249   ALWAYS_INLINE
250   bool ModifySuspendCount(Thread* self,
251                           int delta,
252                           AtomicInteger* suspend_barrier,
253                           SuspendReason reason)
254       WARN_UNUSED
255       REQUIRES(Locks::thread_suspend_count_lock_);
256 
257   bool RequestCheckpoint(Closure* function)
258       REQUIRES(Locks::thread_suspend_count_lock_);
259   bool RequestSynchronousCheckpoint(Closure* function)
260       REQUIRES_SHARED(Locks::mutator_lock_)
261       REQUIRES(Locks::thread_list_lock_)
262       REQUIRES(!Locks::thread_suspend_count_lock_);
263   bool RequestEmptyCheckpoint()
264       REQUIRES(Locks::thread_suspend_count_lock_);
265 
266   void SetFlipFunction(Closure* function);
267   Closure* GetFlipFunction();
268 
GetThreadLocalMarkStack()269   gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() {
270     CHECK(kUseReadBarrier);
271     return tlsPtr_.thread_local_mark_stack;
272   }
SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object> * stack)273   void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) {
274     CHECK(kUseReadBarrier);
275     tlsPtr_.thread_local_mark_stack = stack;
276   }
277 
278   // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
279   // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
280   void FullSuspendCheck()
281       REQUIRES(!Locks::thread_suspend_count_lock_)
282       REQUIRES_SHARED(Locks::mutator_lock_);
283 
284   // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
285   ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable()
286       REQUIRES(!Locks::thread_suspend_count_lock_)
287       SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
288 
289   // Transition from runnable into a state where mutator privileges are denied. Releases share of
290   // mutator lock.
291   ALWAYS_INLINE void TransitionFromRunnableToSuspended(ThreadState new_state)
292       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
293       UNLOCK_FUNCTION(Locks::mutator_lock_);
294 
295   // Once called thread suspension will cause an assertion failure.
StartAssertNoThreadSuspension(const char * cause)296   const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
297     Roles::uninterruptible_.Acquire();  // No-op.
298     if (kIsDebugBuild) {
299       CHECK(cause != nullptr);
300       const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
301       tls32_.no_thread_suspension++;
302       tlsPtr_.last_no_thread_suspension_cause = cause;
303       return previous_cause;
304     } else {
305       return nullptr;
306     }
307   }
308 
309   // End region where no thread suspension is expected.
EndAssertNoThreadSuspension(const char * old_cause)310   void EndAssertNoThreadSuspension(const char* old_cause) RELEASE(Roles::uninterruptible_) {
311     if (kIsDebugBuild) {
312       CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
313       CHECK_GT(tls32_.no_thread_suspension, 0U);
314       tls32_.no_thread_suspension--;
315       tlsPtr_.last_no_thread_suspension_cause = old_cause;
316     }
317     Roles::uninterruptible_.Release();  // No-op.
318   }
319 
320   void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
321 
322   // Return true if thread suspension is allowable.
323   bool IsThreadSuspensionAllowable() const;
324 
IsDaemon()325   bool IsDaemon() const {
326     return tls32_.daemon;
327   }
328 
329   size_t NumberOfHeldMutexes() const;
330 
331   bool HoldsLock(ObjPtr<mirror::Object> object) const REQUIRES_SHARED(Locks::mutator_lock_);
332 
333   /*
334    * Changes the priority of this thread to match that of the java.lang.Thread object.
335    *
336    * We map a priority value from 1-10 to Linux "nice" values, where lower
337    * numbers indicate higher priority.
338    */
339   void SetNativePriority(int newPriority);
340 
341   /*
342    * Returns the thread priority for the current thread by querying the system.
343    * This is useful when attaching a thread through JNI.
344    *
345    * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
346    */
347   static int GetNativePriority();
348 
349   // Guaranteed to be non-zero.
GetThreadId()350   uint32_t GetThreadId() const {
351     return tls32_.thin_lock_thread_id;
352   }
353 
GetTid()354   pid_t GetTid() const {
355     return tls32_.tid;
356   }
357 
358   // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
359   mirror::String* GetThreadName() const REQUIRES_SHARED(Locks::mutator_lock_);
360 
361   // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
362   // allocation, or locking.
363   void GetThreadName(std::string& name) const;
364 
365   // Sets the thread's name.
366   void SetThreadName(const char* name) REQUIRES_SHARED(Locks::mutator_lock_);
367 
368   // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
369   uint64_t GetCpuMicroTime() const;
370 
GetPeer()371   mirror::Object* GetPeer() const REQUIRES_SHARED(Locks::mutator_lock_) {
372     DCHECK(Thread::Current() == this) << "Use GetPeerFromOtherThread instead";
373     CHECK(tlsPtr_.jpeer == nullptr);
374     return tlsPtr_.opeer;
375   }
376   // GetPeer is not safe if called on another thread in the middle of the CC thread flip and
377   // the thread's stack may have not been flipped yet and peer may be a from-space (stale) ref.
378   // This function will explicitly mark/forward it.
379   mirror::Object* GetPeerFromOtherThread() const REQUIRES_SHARED(Locks::mutator_lock_);
380 
HasPeer()381   bool HasPeer() const {
382     return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
383   }
384 
GetStats()385   RuntimeStats* GetStats() {
386     return &tls64_.stats;
387   }
388 
389   bool IsStillStarting() const;
390 
IsExceptionPending()391   bool IsExceptionPending() const {
392     return tlsPtr_.exception != nullptr;
393   }
394 
GetException()395   mirror::Throwable* GetException() const REQUIRES_SHARED(Locks::mutator_lock_) {
396     return tlsPtr_.exception;
397   }
398 
399   void AssertPendingException() const;
400   void AssertPendingOOMException() const REQUIRES_SHARED(Locks::mutator_lock_);
401   void AssertNoPendingException() const;
402   void AssertNoPendingExceptionForNewException(const char* msg) const;
403 
404   void SetException(ObjPtr<mirror::Throwable> new_exception) REQUIRES_SHARED(Locks::mutator_lock_);
405 
ClearException()406   void ClearException() REQUIRES_SHARED(Locks::mutator_lock_) {
407     tlsPtr_.exception = nullptr;
408   }
409 
410   // Find catch block and perform long jump to appropriate exception handle
411   NO_RETURN void QuickDeliverException() REQUIRES_SHARED(Locks::mutator_lock_);
412 
413   Context* GetLongJumpContext();
ReleaseLongJumpContext(Context * context)414   void ReleaseLongJumpContext(Context* context) {
415     if (tlsPtr_.long_jump_context != nullptr) {
416       // Each QuickExceptionHandler gets a long jump context and uses
417       // it for doing the long jump, after finding catch blocks/doing deoptimization.
418       // Both finding catch blocks and deoptimization can trigger another
419       // exception such as a result of class loading. So there can be nested
420       // cases of exception handling and multiple contexts being used.
421       // ReleaseLongJumpContext tries to save the context in tlsPtr_.long_jump_context
422       // for reuse so there is no need to always allocate a new one each time when
423       // getting a context. Since we only keep one context for reuse, delete the
424       // existing one since the passed in context is yet to be used for longjump.
425       delete tlsPtr_.long_jump_context;
426     }
427     tlsPtr_.long_jump_context = context;
428   }
429 
430   // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
431   // abort the runtime iff abort_on_error is true.
432   ArtMethod* GetCurrentMethod(uint32_t* dex_pc,
433                               bool check_suspended = true,
434                               bool abort_on_error = true) const
435       REQUIRES_SHARED(Locks::mutator_lock_);
436 
437   // Returns whether the given exception was thrown by the current Java method being executed
438   // (Note that this includes native Java methods).
439   bool IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const
440       REQUIRES_SHARED(Locks::mutator_lock_);
441 
SetTopOfStack(ArtMethod ** top_method)442   void SetTopOfStack(ArtMethod** top_method) {
443     tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
444   }
445 
SetTopOfShadowStack(ShadowFrame * top)446   void SetTopOfShadowStack(ShadowFrame* top) {
447     tlsPtr_.managed_stack.SetTopShadowFrame(top);
448   }
449 
HasManagedStack()450   bool HasManagedStack() const {
451     return (tlsPtr_.managed_stack.GetTopQuickFrame() != nullptr) ||
452         (tlsPtr_.managed_stack.GetTopShadowFrame() != nullptr);
453   }
454 
455   // If 'msg' is null, no detail message is set.
456   void ThrowNewException(const char* exception_class_descriptor, const char* msg)
457       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
458 
459   // If 'msg' is null, no detail message is set. An exception must be pending, and will be
460   // used as the new exception's cause.
461   void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
462       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
463 
464   void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
465       __attribute__((format(printf, 3, 4)))
466       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
467 
468   void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
469       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
470 
471   // OutOfMemoryError is special, because we need to pre-allocate an instance.
472   // Only the GC should call this.
473   void ThrowOutOfMemoryError(const char* msg) REQUIRES_SHARED(Locks::mutator_lock_)
474       REQUIRES(!Roles::uninterruptible_);
475 
476   static void Startup();
477   static void FinishStartup();
478   static void Shutdown();
479 
480   // JNI methods
GetJniEnv()481   JNIEnvExt* GetJniEnv() const {
482     return tlsPtr_.jni_env;
483   }
484 
485   // Convert a jobject into a Object*
486   ObjPtr<mirror::Object> DecodeJObject(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
487   // Checks if the weak global ref has been cleared by the GC without decoding it.
488   bool IsJWeakCleared(jweak obj) const REQUIRES_SHARED(Locks::mutator_lock_);
489 
GetMonitorEnterObject()490   mirror::Object* GetMonitorEnterObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
491     return tlsPtr_.monitor_enter_object;
492   }
493 
SetMonitorEnterObject(mirror::Object * obj)494   void SetMonitorEnterObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
495     tlsPtr_.monitor_enter_object = obj;
496   }
497 
498   // Implements java.lang.Thread.interrupted.
499   bool Interrupted();
500   // Implements java.lang.Thread.isInterrupted.
501   bool IsInterrupted();
502   void Interrupt(Thread* self) REQUIRES(!*wait_mutex_);
SetInterrupted(bool i)503   void SetInterrupted(bool i) {
504     tls32_.interrupted.StoreSequentiallyConsistent(i);
505   }
506   void Notify() REQUIRES(!*wait_mutex_);
507 
PoisonObjectPointers()508   ALWAYS_INLINE void PoisonObjectPointers() {
509     ++poison_object_cookie_;
510   }
511 
512   ALWAYS_INLINE static void PoisonObjectPointersIfDebug();
513 
GetPoisonObjectCookie()514   ALWAYS_INLINE uintptr_t GetPoisonObjectCookie() const {
515     return poison_object_cookie_;
516   }
517 
518  private:
519   void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
520 
521  public:
GetWaitMutex()522   Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
523     return wait_mutex_;
524   }
525 
GetWaitConditionVariable()526   ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) {
527     return wait_cond_;
528   }
529 
GetWaitMonitor()530   Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) {
531     return wait_monitor_;
532   }
533 
SetWaitMonitor(Monitor * mon)534   void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) {
535     wait_monitor_ = mon;
536   }
537 
538   // Waiter link-list support.
GetWaitNext()539   Thread* GetWaitNext() const {
540     return tlsPtr_.wait_next;
541   }
542 
SetWaitNext(Thread * next)543   void SetWaitNext(Thread* next) {
544     tlsPtr_.wait_next = next;
545   }
546 
GetClassLoaderOverride()547   jobject GetClassLoaderOverride() {
548     return tlsPtr_.class_loader_override;
549   }
550 
551   void SetClassLoaderOverride(jobject class_loader_override);
552 
553   // Create the internal representation of a stack trace, that is more time
554   // and space efficient to compute than the StackTraceElement[].
555   template<bool kTransactionActive>
556   jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
557       REQUIRES_SHARED(Locks::mutator_lock_);
558 
559   // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
560   // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
561   // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
562   // with the number of valid frames in the returned array.
563   static jobjectArray InternalStackTraceToStackTraceElementArray(
564       const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
565       jobjectArray output_array = nullptr, int* stack_depth = nullptr)
566       REQUIRES_SHARED(Locks::mutator_lock_);
567 
HasDebuggerShadowFrames()568   bool HasDebuggerShadowFrames() const {
569     return tlsPtr_.frame_id_to_shadow_frame != nullptr;
570   }
571 
572   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
573       REQUIRES_SHARED(Locks::mutator_lock_);
574 
VerifyStack()575   void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_) {
576     if (kVerifyStack) {
577       VerifyStackImpl();
578     }
579   }
580 
581   //
582   // Offsets of various members of native Thread class, used by compiled code.
583   //
584 
585   template<PointerSize pointer_size>
ThinLockIdOffset()586   static ThreadOffset<pointer_size> ThinLockIdOffset() {
587     return ThreadOffset<pointer_size>(
588         OFFSETOF_MEMBER(Thread, tls32_) +
589         OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
590   }
591 
592   template<PointerSize pointer_size>
InterruptedOffset()593   static ThreadOffset<pointer_size> InterruptedOffset() {
594     return ThreadOffset<pointer_size>(
595         OFFSETOF_MEMBER(Thread, tls32_) +
596         OFFSETOF_MEMBER(tls_32bit_sized_values, interrupted));
597   }
598 
599   template<PointerSize pointer_size>
ThreadFlagsOffset()600   static ThreadOffset<pointer_size> ThreadFlagsOffset() {
601     return ThreadOffset<pointer_size>(
602         OFFSETOF_MEMBER(Thread, tls32_) +
603         OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
604   }
605 
606   template<PointerSize pointer_size>
IsGcMarkingOffset()607   static ThreadOffset<pointer_size> IsGcMarkingOffset() {
608     return ThreadOffset<pointer_size>(
609         OFFSETOF_MEMBER(Thread, tls32_) +
610         OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
611   }
612 
IsGcMarkingSize()613   static constexpr size_t IsGcMarkingSize() {
614     return sizeof(tls32_.is_gc_marking);
615   }
616 
617   // Deoptimize the Java stack.
618   void DeoptimizeWithDeoptimizationException(JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
619 
620  private:
621   template<PointerSize pointer_size>
ThreadOffsetFromTlsPtr(size_t tls_ptr_offset)622   static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
623     size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
624     size_t scale;
625     size_t shrink;
626     if (pointer_size == kRuntimePointerSize) {
627       scale = 1;
628       shrink = 1;
629     } else if (pointer_size > kRuntimePointerSize) {
630       scale = static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize);
631       shrink = 1;
632     } else {
633       DCHECK_GT(kRuntimePointerSize, pointer_size);
634       scale = 1;
635       shrink = static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size);
636     }
637     return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
638   }
639 
640  public:
QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,PointerSize pointer_size)641   static uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
642                                                 PointerSize pointer_size) {
643     if (pointer_size == PointerSize::k32) {
644       return QuickEntryPointOffset<PointerSize::k32>(quick_entrypoint_offset).
645           Uint32Value();
646     } else {
647       return QuickEntryPointOffset<PointerSize::k64>(quick_entrypoint_offset).
648           Uint32Value();
649     }
650   }
651 
652   template<PointerSize pointer_size>
QuickEntryPointOffset(size_t quick_entrypoint_offset)653   static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) {
654     return ThreadOffsetFromTlsPtr<pointer_size>(
655         OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
656   }
657 
658   template<PointerSize pointer_size>
JniEntryPointOffset(size_t jni_entrypoint_offset)659   static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
660     return ThreadOffsetFromTlsPtr<pointer_size>(
661         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
662   }
663 
664   // Return the entry point offset integer value for ReadBarrierMarkRegX, where X is `reg`.
665   template <PointerSize pointer_size>
ReadBarrierMarkEntryPointsOffset(size_t reg)666   static int32_t ReadBarrierMarkEntryPointsOffset(size_t reg) {
667     // The entry point list defines 30 ReadBarrierMarkRegX entry points.
668     DCHECK_LT(reg, 30u);
669     // The ReadBarrierMarkRegX entry points are ordered by increasing
670     // register number in Thread::tls_Ptr_.quick_entrypoints.
671     return QUICK_ENTRYPOINT_OFFSET(pointer_size, pReadBarrierMarkReg00).Int32Value()
672         + static_cast<size_t>(pointer_size) * reg;
673   }
674 
675   template<PointerSize pointer_size>
SelfOffset()676   static ThreadOffset<pointer_size> SelfOffset() {
677     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
678   }
679 
680   template<PointerSize pointer_size>
MterpCurrentIBaseOffset()681   static ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
682     return ThreadOffsetFromTlsPtr<pointer_size>(
683         OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_current_ibase));
684   }
685 
686   template<PointerSize pointer_size>
MterpDefaultIBaseOffset()687   static ThreadOffset<pointer_size> MterpDefaultIBaseOffset() {
688     return ThreadOffsetFromTlsPtr<pointer_size>(
689         OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_default_ibase));
690   }
691 
692   template<PointerSize pointer_size>
MterpAltIBaseOffset()693   static ThreadOffset<pointer_size> MterpAltIBaseOffset() {
694     return ThreadOffsetFromTlsPtr<pointer_size>(
695         OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_alt_ibase));
696   }
697 
698   template<PointerSize pointer_size>
ExceptionOffset()699   static ThreadOffset<pointer_size> ExceptionOffset() {
700     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
701   }
702 
703   template<PointerSize pointer_size>
PeerOffset()704   static ThreadOffset<pointer_size> PeerOffset() {
705     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
706   }
707 
708 
709   template<PointerSize pointer_size>
CardTableOffset()710   static ThreadOffset<pointer_size> CardTableOffset() {
711     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
712   }
713 
714   template<PointerSize pointer_size>
ThreadSuspendTriggerOffset()715   static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
716     return ThreadOffsetFromTlsPtr<pointer_size>(
717         OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
718   }
719 
720   template<PointerSize pointer_size>
ThreadLocalPosOffset()721   static ThreadOffset<pointer_size> ThreadLocalPosOffset() {
722     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
723                                                                 thread_local_pos));
724   }
725 
726   template<PointerSize pointer_size>
ThreadLocalEndOffset()727   static ThreadOffset<pointer_size> ThreadLocalEndOffset() {
728     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
729                                                                 thread_local_end));
730   }
731 
732   template<PointerSize pointer_size>
ThreadLocalObjectsOffset()733   static ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
734     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
735                                                                 thread_local_objects));
736   }
737 
738   template<PointerSize pointer_size>
RosAllocRunsOffset()739   static ThreadOffset<pointer_size> RosAllocRunsOffset() {
740     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
741                                                                 rosalloc_runs));
742   }
743 
744   template<PointerSize pointer_size>
ThreadLocalAllocStackTopOffset()745   static ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
746     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
747                                                                 thread_local_alloc_stack_top));
748   }
749 
750   template<PointerSize pointer_size>
ThreadLocalAllocStackEndOffset()751   static ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
752     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
753                                                                 thread_local_alloc_stack_end));
754   }
755 
756   // Size of stack less any space reserved for stack overflow
GetStackSize()757   size_t GetStackSize() const {
758     return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
759   }
760 
GetStackEndForInterpreter(bool implicit_overflow_check)761   uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const {
762     if (implicit_overflow_check) {
763       // The interpreter needs the extra overflow bytes that stack_end does
764       // not include.
765       return tlsPtr_.stack_end + GetStackOverflowReservedBytes(kRuntimeISA);
766     } else {
767       return tlsPtr_.stack_end;
768     }
769   }
770 
GetStackEnd()771   uint8_t* GetStackEnd() const {
772     return tlsPtr_.stack_end;
773   }
774 
775   // Set the stack end to that to be used during a stack overflow
776   void SetStackEndForStackOverflow() REQUIRES_SHARED(Locks::mutator_lock_);
777 
778   // Set the stack end to that to be used during regular execution
ResetDefaultStackEnd()779   void ResetDefaultStackEnd() {
780     // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
781     // to throw a StackOverflowError.
782     tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
783   }
784 
IsHandlingStackOverflow()785   bool IsHandlingStackOverflow() const {
786     return tlsPtr_.stack_end == tlsPtr_.stack_begin;
787   }
788 
789   template<PointerSize pointer_size>
StackEndOffset()790   static ThreadOffset<pointer_size> StackEndOffset() {
791     return ThreadOffsetFromTlsPtr<pointer_size>(
792         OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
793   }
794 
795   template<PointerSize pointer_size>
JniEnvOffset()796   static ThreadOffset<pointer_size> JniEnvOffset() {
797     return ThreadOffsetFromTlsPtr<pointer_size>(
798         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
799   }
800 
801   template<PointerSize pointer_size>
TopOfManagedStackOffset()802   static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
803     return ThreadOffsetFromTlsPtr<pointer_size>(
804         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
805         ManagedStack::TopQuickFrameOffset());
806   }
807 
GetManagedStack()808   const ManagedStack* GetManagedStack() const {
809     return &tlsPtr_.managed_stack;
810   }
811 
812   // Linked list recording fragments of managed stack.
PushManagedStackFragment(ManagedStack * fragment)813   void PushManagedStackFragment(ManagedStack* fragment) {
814     tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
815   }
PopManagedStackFragment(const ManagedStack & fragment)816   void PopManagedStackFragment(const ManagedStack& fragment) {
817     tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
818   }
819 
820   ALWAYS_INLINE ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame);
821   ALWAYS_INLINE ShadowFrame* PopShadowFrame();
822 
823   template<PointerSize pointer_size>
TopShadowFrameOffset()824   static ThreadOffset<pointer_size> TopShadowFrameOffset() {
825     return ThreadOffsetFromTlsPtr<pointer_size>(
826         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
827         ManagedStack::TopShadowFrameOffset());
828   }
829 
830   // Is the given obj in this thread's stack indirect reference table?
831   bool HandleScopeContains(jobject obj) const;
832 
833   void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
834       REQUIRES_SHARED(Locks::mutator_lock_);
835 
GetTopHandleScope()836   BaseHandleScope* GetTopHandleScope() {
837     return tlsPtr_.top_handle_scope;
838   }
839 
PushHandleScope(BaseHandleScope * handle_scope)840   void PushHandleScope(BaseHandleScope* handle_scope) {
841     DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope);
842     tlsPtr_.top_handle_scope = handle_scope;
843   }
844 
PopHandleScope()845   BaseHandleScope* PopHandleScope() {
846     BaseHandleScope* handle_scope = tlsPtr_.top_handle_scope;
847     DCHECK(handle_scope != nullptr);
848     tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
849     return handle_scope;
850   }
851 
852   template<PointerSize pointer_size>
TopHandleScopeOffset()853   static ThreadOffset<pointer_size> TopHandleScopeOffset() {
854     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
855                                                                 top_handle_scope));
856   }
857 
GetInvokeReq()858   DebugInvokeReq* GetInvokeReq() const {
859     return tlsPtr_.debug_invoke_req;
860   }
861 
GetSingleStepControl()862   SingleStepControl* GetSingleStepControl() const {
863     return tlsPtr_.single_step_control;
864   }
865 
866   // Indicates whether this thread is ready to invoke a method for debugging. This
867   // is only true if the thread has been suspended by a debug event.
IsReadyForDebugInvoke()868   bool IsReadyForDebugInvoke() const {
869     return tls32_.ready_for_debug_invoke;
870   }
871 
SetReadyForDebugInvoke(bool ready)872   void SetReadyForDebugInvoke(bool ready) {
873     tls32_.ready_for_debug_invoke = ready;
874   }
875 
IsDebugMethodEntry()876   bool IsDebugMethodEntry() const {
877     return tls32_.debug_method_entry_;
878   }
879 
SetDebugMethodEntry()880   void SetDebugMethodEntry() {
881     tls32_.debug_method_entry_ = true;
882   }
883 
ClearDebugMethodEntry()884   void ClearDebugMethodEntry() {
885     tls32_.debug_method_entry_ = false;
886   }
887 
GetIsGcMarking()888   bool GetIsGcMarking() const {
889     CHECK(kUseReadBarrier);
890     return tls32_.is_gc_marking;
891   }
892 
893   void SetIsGcMarkingAndUpdateEntrypoints(bool is_marking);
894 
GetWeakRefAccessEnabled()895   bool GetWeakRefAccessEnabled() const {
896     CHECK(kUseReadBarrier);
897     return tls32_.weak_ref_access_enabled;
898   }
899 
SetWeakRefAccessEnabled(bool enabled)900   void SetWeakRefAccessEnabled(bool enabled) {
901     CHECK(kUseReadBarrier);
902     tls32_.weak_ref_access_enabled = enabled;
903   }
904 
GetDisableThreadFlipCount()905   uint32_t GetDisableThreadFlipCount() const {
906     CHECK(kUseReadBarrier);
907     return tls32_.disable_thread_flip_count;
908   }
909 
IncrementDisableThreadFlipCount()910   void IncrementDisableThreadFlipCount() {
911     CHECK(kUseReadBarrier);
912     ++tls32_.disable_thread_flip_count;
913   }
914 
DecrementDisableThreadFlipCount()915   void DecrementDisableThreadFlipCount() {
916     CHECK(kUseReadBarrier);
917     DCHECK_GT(tls32_.disable_thread_flip_count, 0U);
918     --tls32_.disable_thread_flip_count;
919   }
920 
921   // Returns true if the thread is allowed to call into java.
CanCallIntoJava()922   bool CanCallIntoJava() const {
923     return can_call_into_java_;
924   }
925 
SetCanCallIntoJava(bool can_call_into_java)926   void SetCanCallIntoJava(bool can_call_into_java) {
927     can_call_into_java_ = can_call_into_java;
928   }
929 
930   // Activates single step control for debugging. The thread takes the
931   // ownership of the given SingleStepControl*. It is deleted by a call
932   // to DeactivateSingleStepControl or upon thread destruction.
933   void ActivateSingleStepControl(SingleStepControl* ssc);
934 
935   // Deactivates single step control for debugging.
936   void DeactivateSingleStepControl();
937 
938   // Sets debug invoke request for debugging. When the thread is resumed,
939   // it executes the method described by this request then sends the reply
940   // before suspending itself. The thread takes the ownership of the given
941   // DebugInvokeReq*. It is deleted by a call to ClearDebugInvokeReq.
942   void SetDebugInvokeReq(DebugInvokeReq* req);
943 
944   // Clears debug invoke request for debugging. When the thread completes
945   // method invocation, it deletes its debug invoke request and suspends
946   // itself.
947   void ClearDebugInvokeReq();
948 
949   // Returns the fake exception used to activate deoptimization.
GetDeoptimizationException()950   static mirror::Throwable* GetDeoptimizationException() {
951     // Note that the mirror::Throwable must be aligned to kObjectAlignment or else it cannot be
952     // represented by ObjPtr.
953     return reinterpret_cast<mirror::Throwable*>(0x100);
954   }
955 
956   // Currently deoptimization invokes verifier which can trigger class loading
957   // and execute Java code, so there might be nested deoptimizations happening.
958   // We need to save the ongoing deoptimization shadow frames and return
959   // values on stacks.
960   // 'from_code' denotes whether the deoptimization was explicitly made from
961   // compiled code.
962   void PushDeoptimizationContext(const JValue& return_value,
963                                  bool is_reference,
964                                  bool from_code,
965                                  ObjPtr<mirror::Throwable> exception)
966       REQUIRES_SHARED(Locks::mutator_lock_);
967   void PopDeoptimizationContext(JValue* result,
968                                 ObjPtr<mirror::Throwable>* exception,
969                                 bool* from_code)
970       REQUIRES_SHARED(Locks::mutator_lock_);
971   void AssertHasDeoptimizationContext()
972       REQUIRES_SHARED(Locks::mutator_lock_);
973   void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
974   ShadowFrame* PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present = true);
975 
976   // For debugger, find the shadow frame that corresponds to a frame id.
977   // Or return null if there is none.
978   ShadowFrame* FindDebuggerShadowFrame(size_t frame_id)
979       REQUIRES_SHARED(Locks::mutator_lock_);
980   // For debugger, find the bool array that keeps track of the updated vreg set
981   // for a frame id.
982   bool* GetUpdatedVRegFlags(size_t frame_id) REQUIRES_SHARED(Locks::mutator_lock_);
983   // For debugger, find the shadow frame that corresponds to a frame id. If
984   // one doesn't exist yet, create one and track it in frame_id_to_shadow_frame.
985   ShadowFrame* FindOrCreateDebuggerShadowFrame(size_t frame_id,
986                                                uint32_t num_vregs,
987                                                ArtMethod* method,
988                                                uint32_t dex_pc)
989       REQUIRES_SHARED(Locks::mutator_lock_);
990 
991   // Delete the entry that maps from frame_id to shadow_frame.
992   void RemoveDebuggerShadowFrameMapping(size_t frame_id)
993       REQUIRES_SHARED(Locks::mutator_lock_);
994 
GetInstrumentationStack()995   std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
996     return tlsPtr_.instrumentation_stack;
997   }
998 
GetStackTraceSample()999   std::vector<ArtMethod*>* GetStackTraceSample() const {
1000     DCHECK(!IsAotCompiler());
1001     return tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
1002   }
1003 
SetStackTraceSample(std::vector<ArtMethod * > * sample)1004   void SetStackTraceSample(std::vector<ArtMethod*>* sample) {
1005     DCHECK(!IsAotCompiler());
1006     tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample = sample;
1007   }
1008 
GetVerifierDeps()1009   verifier::VerifierDeps* GetVerifierDeps() const {
1010     DCHECK(IsAotCompiler());
1011     return tlsPtr_.deps_or_stack_trace_sample.verifier_deps;
1012   }
1013 
1014   // It is the responsability of the caller to make sure the verifier_deps
1015   // entry in the thread is cleared before destruction of the actual VerifierDeps
1016   // object, or the thread.
SetVerifierDeps(verifier::VerifierDeps * verifier_deps)1017   void SetVerifierDeps(verifier::VerifierDeps* verifier_deps) {
1018     DCHECK(IsAotCompiler());
1019     DCHECK(verifier_deps == nullptr || tlsPtr_.deps_or_stack_trace_sample.verifier_deps == nullptr);
1020     tlsPtr_.deps_or_stack_trace_sample.verifier_deps = verifier_deps;
1021   }
1022 
GetTraceClockBase()1023   uint64_t GetTraceClockBase() const {
1024     return tls64_.trace_clock_base;
1025   }
1026 
SetTraceClockBase(uint64_t clock_base)1027   void SetTraceClockBase(uint64_t clock_base) {
1028     tls64_.trace_clock_base = clock_base;
1029   }
1030 
GetHeldMutex(LockLevel level)1031   BaseMutex* GetHeldMutex(LockLevel level) const {
1032     return tlsPtr_.held_mutexes[level];
1033   }
1034 
SetHeldMutex(LockLevel level,BaseMutex * mutex)1035   void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
1036     tlsPtr_.held_mutexes[level] = mutex;
1037   }
1038 
1039   void ClearSuspendBarrier(AtomicInteger* target)
1040       REQUIRES(Locks::thread_suspend_count_lock_);
1041 
ReadFlag(ThreadFlag flag)1042   bool ReadFlag(ThreadFlag flag) const {
1043     return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
1044   }
1045 
TestAllFlags()1046   bool TestAllFlags() const {
1047     return (tls32_.state_and_flags.as_struct.flags != 0);
1048   }
1049 
AtomicSetFlag(ThreadFlag flag)1050   void AtomicSetFlag(ThreadFlag flag) {
1051     tls32_.state_and_flags.as_atomic_int.FetchAndOrSequentiallyConsistent(flag);
1052   }
1053 
AtomicClearFlag(ThreadFlag flag)1054   void AtomicClearFlag(ThreadFlag flag) {
1055     tls32_.state_and_flags.as_atomic_int.FetchAndAndSequentiallyConsistent(-1 ^ flag);
1056   }
1057 
1058   void ResetQuickAllocEntryPointsForThread(bool is_marking);
1059 
1060   // Returns the remaining space in the TLAB.
TlabSize()1061   size_t TlabSize() const {
1062     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_pos;
1063   }
1064 
1065   // Returns the remaining space in the TLAB if we were to expand it to maximum capacity.
TlabRemainingCapacity()1066   size_t TlabRemainingCapacity() const {
1067     return tlsPtr_.thread_local_limit - tlsPtr_.thread_local_pos;
1068   }
1069 
1070   // Expand the TLAB by a fixed number of bytes. There must be enough capacity to do so.
ExpandTlab(size_t bytes)1071   void ExpandTlab(size_t bytes) {
1072     tlsPtr_.thread_local_end += bytes;
1073     DCHECK_LE(tlsPtr_.thread_local_end, tlsPtr_.thread_local_limit);
1074   }
1075 
1076   // Doesn't check that there is room.
1077   mirror::Object* AllocTlab(size_t bytes);
1078   void SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit);
1079   bool HasTlab() const;
GetTlabStart()1080   uint8_t* GetTlabStart() {
1081     return tlsPtr_.thread_local_start;
1082   }
GetTlabPos()1083   uint8_t* GetTlabPos() {
1084     return tlsPtr_.thread_local_pos;
1085   }
1086 
1087   // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
1088   // equal to a valid pointer.
1089   // TODO: does this need to atomic?  I don't think so.
RemoveSuspendTrigger()1090   void RemoveSuspendTrigger() {
1091     tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
1092   }
1093 
1094   // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
1095   // The next time a suspend check is done, it will load from the value at this address
1096   // and trigger a SIGSEGV.
TriggerSuspend()1097   void TriggerSuspend() {
1098     tlsPtr_.suspend_trigger = nullptr;
1099   }
1100 
1101 
1102   // Push an object onto the allocation stack.
1103   bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
1104       REQUIRES_SHARED(Locks::mutator_lock_);
1105 
1106   // Set the thread local allocation pointers to the given pointers.
1107   void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
1108                                      StackReference<mirror::Object>* end);
1109 
1110   // Resets the thread local allocation pointers.
1111   void RevokeThreadLocalAllocationStack();
1112 
GetThreadLocalBytesAllocated()1113   size_t GetThreadLocalBytesAllocated() const {
1114     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
1115   }
1116 
GetThreadLocalObjectsAllocated()1117   size_t GetThreadLocalObjectsAllocated() const {
1118     return tlsPtr_.thread_local_objects;
1119   }
1120 
GetRosAllocRun(size_t index)1121   void* GetRosAllocRun(size_t index) const {
1122     return tlsPtr_.rosalloc_runs[index];
1123   }
1124 
SetRosAllocRun(size_t index,void * run)1125   void SetRosAllocRun(size_t index, void* run) {
1126     tlsPtr_.rosalloc_runs[index] = run;
1127   }
1128 
1129   bool ProtectStack(bool fatal_on_error = true);
1130   bool UnprotectStack();
1131 
SetMterpDefaultIBase(void * ibase)1132   void SetMterpDefaultIBase(void* ibase) {
1133     tlsPtr_.mterp_default_ibase = ibase;
1134   }
1135 
SetMterpCurrentIBase(void * ibase)1136   void SetMterpCurrentIBase(void* ibase) {
1137     tlsPtr_.mterp_current_ibase = ibase;
1138   }
1139 
SetMterpAltIBase(void * ibase)1140   void SetMterpAltIBase(void* ibase) {
1141     tlsPtr_.mterp_alt_ibase = ibase;
1142   }
1143 
GetMterpDefaultIBase()1144   const void* GetMterpDefaultIBase() const {
1145     return tlsPtr_.mterp_default_ibase;
1146   }
1147 
GetMterpCurrentIBase()1148   const void* GetMterpCurrentIBase() const {
1149     return tlsPtr_.mterp_current_ibase;
1150   }
1151 
GetMterpAltIBase()1152   const void* GetMterpAltIBase() const {
1153     return tlsPtr_.mterp_alt_ibase;
1154   }
1155 
HandlingSignal()1156   bool HandlingSignal() const {
1157     return tls32_.handling_signal_;
1158   }
1159 
SetHandlingSignal(bool handling_signal)1160   void SetHandlingSignal(bool handling_signal) {
1161     tls32_.handling_signal_ = handling_signal;
1162   }
1163 
IsTransitioningToRunnable()1164   bool IsTransitioningToRunnable() const {
1165     return tls32_.is_transitioning_to_runnable;
1166   }
1167 
SetIsTransitioningToRunnable(bool value)1168   void SetIsTransitioningToRunnable(bool value) {
1169     tls32_.is_transitioning_to_runnable = value;
1170   }
1171 
1172   void PushVerifier(verifier::MethodVerifier* verifier);
1173   void PopVerifier(verifier::MethodVerifier* verifier);
1174 
1175   void InitStringEntryPoints();
1176 
ModifyDebugDisallowReadBarrier(int8_t delta)1177   void ModifyDebugDisallowReadBarrier(int8_t delta) {
1178     debug_disallow_read_barrier_ += delta;
1179   }
1180 
GetDebugDisallowReadBarrierCount()1181   uint8_t GetDebugDisallowReadBarrierCount() const {
1182     return debug_disallow_read_barrier_;
1183   }
1184 
GetCustomTLS()1185   void* GetCustomTLS() const REQUIRES(Locks::thread_list_lock_) {
1186     return custom_tls_;
1187   }
1188 
SetCustomTLS(void * data)1189   void SetCustomTLS(void* data) REQUIRES(Locks::thread_list_lock_) {
1190     custom_tls_ = data;
1191   }
1192 
1193   // Returns true if the current thread is the jit sensitive thread.
IsJitSensitiveThread()1194   bool IsJitSensitiveThread() const {
1195     return this == jit_sensitive_thread_;
1196   }
1197 
1198   // Returns true if StrictMode events are traced for the current thread.
IsSensitiveThread()1199   static bool IsSensitiveThread() {
1200     if (is_sensitive_thread_hook_ != nullptr) {
1201       return (*is_sensitive_thread_hook_)();
1202     }
1203     return false;
1204   }
1205 
1206   // Set to the read barrier marking entrypoints to be non-null.
1207   void SetReadBarrierEntrypoints();
1208 
1209   static jobject CreateCompileTimePeer(JNIEnv* env,
1210                                        const char* name,
1211                                        bool as_daemon,
1212                                        jobject thread_group)
1213       REQUIRES_SHARED(Locks::mutator_lock_);
1214 
1215  private:
1216   explicit Thread(bool daemon);
1217   ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
1218   void Destroy();
1219 
1220   // Attaches the calling native thread to the runtime, returning the new native peer.
1221   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
1222   template <typename PeerAction>
1223   static Thread* Attach(const char* thread_name,
1224                         bool as_daemon,
1225                         PeerAction p);
1226 
1227   void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
1228 
1229   template<bool kTransactionActive>
1230   static void InitPeer(ScopedObjectAccessAlreadyRunnable& soa,
1231                        ObjPtr<mirror::Object> peer,
1232                        jboolean thread_is_daemon,
1233                        jobject thread_group,
1234                        jobject thread_name,
1235                        jint thread_priority)
1236       REQUIRES_SHARED(Locks::mutator_lock_);
1237 
1238   // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
1239   // Dbg::ManageDeoptimization.
SetStateUnsafe(ThreadState new_state)1240   ThreadState SetStateUnsafe(ThreadState new_state) {
1241     ThreadState old_state = GetState();
1242     if (old_state == kRunnable && new_state != kRunnable) {
1243       // Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in
1244       // case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA
1245       // attempts to TransitionFromSuspendedToRunnable, it results in a deadlock.
1246       TransitionToSuspendedAndRunCheckpoints(new_state);
1247       // Since we transitioned to a suspended state, check the pass barrier requests.
1248       PassActiveSuspendBarriers();
1249     } else {
1250       tls32_.state_and_flags.as_struct.state = new_state;
1251     }
1252     return old_state;
1253   }
1254 
1255   void VerifyStackImpl() REQUIRES_SHARED(Locks::mutator_lock_);
1256 
1257   void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
1258   void DumpStack(std::ostream& os,
1259                  bool dump_native_stack = true,
1260                  BacktraceMap* backtrace_map = nullptr,
1261                  bool force_dump_stack = false) const
1262       REQUIRES(!Locks::thread_suspend_count_lock_)
1263       REQUIRES_SHARED(Locks::mutator_lock_);
1264 
1265   // Out-of-line conveniences for debugging in gdb.
1266   static Thread* CurrentFromGdb();  // Like Thread::Current.
1267   // Like Thread::Dump(std::cerr).
1268   void DumpFromGdb() const REQUIRES_SHARED(Locks::mutator_lock_);
1269 
1270   static void* CreateCallback(void* arg);
1271 
1272   void HandleUncaughtExceptions(ScopedObjectAccessAlreadyRunnable& soa)
1273       REQUIRES_SHARED(Locks::mutator_lock_);
1274   void RemoveFromThreadGroup(ScopedObjectAccessAlreadyRunnable& soa)
1275       REQUIRES_SHARED(Locks::mutator_lock_);
1276 
1277   // Initialize a thread.
1278   //
1279   // The third parameter is not mandatory. If given, the thread will use this JNIEnvExt. In case
1280   // Init succeeds, this means the thread takes ownership of it. If Init fails, it is the caller's
1281   // responsibility to destroy the given JNIEnvExt. If the parameter is null, Init will try to
1282   // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value
1283   // of false).
1284   bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr)
1285       REQUIRES(Locks::runtime_shutdown_lock_);
1286   void InitCardTable();
1287   void InitCpu();
1288   void CleanupCpu();
1289   void InitTlsEntryPoints();
1290   void InitTid();
1291   void InitPthreadKeySelf();
1292   bool InitStackHwm();
1293 
1294   void SetUpAlternateSignalStack();
1295   void TearDownAlternateSignalStack();
1296 
1297   ALWAYS_INLINE void TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)
1298       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1299 
1300   ALWAYS_INLINE void PassActiveSuspendBarriers()
1301       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1302 
1303   // Registers the current thread as the jit sensitive thread. Should be called just once.
SetJitSensitiveThread()1304   static void SetJitSensitiveThread() {
1305     if (jit_sensitive_thread_ == nullptr) {
1306       jit_sensitive_thread_ = Thread::Current();
1307     } else {
1308       LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:"
1309           << Thread::Current()->GetTid();
1310     }
1311   }
1312 
SetSensitiveThreadHook(bool (* is_sensitive_thread_hook)())1313   static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) {
1314     is_sensitive_thread_hook_ = is_sensitive_thread_hook;
1315   }
1316 
1317   bool ModifySuspendCountInternal(Thread* self,
1318                                   int delta,
1319                                   AtomicInteger* suspend_barrier,
1320                                   SuspendReason reason)
1321       WARN_UNUSED
1322       REQUIRES(Locks::thread_suspend_count_lock_);
1323 
1324   void RunCheckpointFunction();
1325   void RunEmptyCheckpoint();
1326 
1327   bool PassActiveSuspendBarriers(Thread* self)
1328       REQUIRES(!Locks::thread_suspend_count_lock_);
1329 
1330   // Install the protected region for implicit stack checks.
1331   void InstallImplicitProtection();
1332 
1333   template <bool kPrecise>
1334   void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
1335 
1336   static bool IsAotCompiler();
1337 
1338   // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
1339   // change from being Suspended to Runnable without a suspend request occurring.
1340   union PACKED(4) StateAndFlags {
StateAndFlags()1341     StateAndFlags() {}
1342     struct PACKED(4) {
1343       // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
1344       // ThreadFlags for bit field meanings.
1345       volatile uint16_t flags;
1346       // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
1347       // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
1348       // operation. If a thread is suspended and a suspend_request is present, a thread may not
1349       // change to Runnable as a GC or other operation is in progress.
1350       volatile uint16_t state;
1351     } as_struct;
1352     AtomicInteger as_atomic_int;
1353     volatile int32_t as_int;
1354 
1355    private:
1356     // gcc does not handle struct with volatile member assignments correctly.
1357     // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
1358     DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
1359   };
1360   static_assert(sizeof(StateAndFlags) == sizeof(int32_t), "Weird state_and_flags size");
1361 
1362   static void ThreadExitCallback(void* arg);
1363 
1364   // Maximum number of suspend barriers.
1365   static constexpr uint32_t kMaxSuspendBarriers = 3;
1366 
1367   // Has Thread::Startup been called?
1368   static bool is_started_;
1369 
1370   // TLS key used to retrieve the Thread*.
1371   static pthread_key_t pthread_key_self_;
1372 
1373   // Used to notify threads that they should attempt to resume, they will suspend again if
1374   // their suspend count is > 0.
1375   static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1376 
1377   // Hook passed by framework which returns true
1378   // when StrictMode events are traced for the current thread.
1379   static bool (*is_sensitive_thread_hook_)();
1380   // Stores the jit sensitive thread (which for now is the UI thread).
1381   static Thread* jit_sensitive_thread_;
1382 
1383   /***********************************************************************************************/
1384   // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
1385   // pointer size differences. To encourage shorter encoding, more frequently used values appear
1386   // first if possible.
1387   /***********************************************************************************************/
1388 
1389   struct PACKED(4) tls_32bit_sized_values {
1390     // We have no control over the size of 'bool', but want our boolean fields
1391     // to be 4-byte quantities.
1392     typedef uint32_t bool32_t;
1393 
tls_32bit_sized_valuestls_32bit_sized_values1394     explicit tls_32bit_sized_values(bool is_daemon) :
1395       suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
1396       daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
1397       thread_exit_check_count(0), handling_signal_(false),
1398       is_transitioning_to_runnable(false), ready_for_debug_invoke(false),
1399       debug_method_entry_(false), is_gc_marking(false), weak_ref_access_enabled(true),
1400       disable_thread_flip_count(0), user_code_suspend_count(0) {
1401     }
1402 
1403     union StateAndFlags state_and_flags;
1404     static_assert(sizeof(union StateAndFlags) == sizeof(int32_t),
1405                   "Size of state_and_flags and int32 are different");
1406 
1407     // A non-zero value is used to tell the current thread to enter a safe point
1408     // at the next poll.
1409     int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1410 
1411     // How much of 'suspend_count_' is by request of the debugger, used to set things right
1412     // when the debugger detaches. Must be <= suspend_count_.
1413     int debug_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1414 
1415     // Thin lock thread id. This is a small integer used by the thin lock implementation.
1416     // This is not to be confused with the native thread's tid, nor is it the value returned
1417     // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
1418     // important difference between this id and the ids visible to managed code is that these
1419     // ones get reused (to ensure that they fit in the number of bits available).
1420     uint32_t thin_lock_thread_id;
1421 
1422     // System thread id.
1423     uint32_t tid;
1424 
1425     // Is the thread a daemon?
1426     const bool32_t daemon;
1427 
1428     // A boolean telling us whether we're recursively throwing OOME.
1429     bool32_t throwing_OutOfMemoryError;
1430 
1431     // A positive value implies we're in a region where thread suspension isn't expected.
1432     uint32_t no_thread_suspension;
1433 
1434     // How many times has our pthread key's destructor been called?
1435     uint32_t thread_exit_check_count;
1436 
1437     // True if signal is being handled by this thread.
1438     bool32_t handling_signal_;
1439 
1440     // True if the thread is in TransitionFromSuspendedToRunnable(). This is used to distinguish the
1441     // non-runnable threads (eg. kNative, kWaiting) that are about to transition to runnable from
1442     // the rest of them.
1443     bool32_t is_transitioning_to_runnable;
1444 
1445     // True if the thread has been suspended by a debugger event. This is
1446     // used to invoke method from the debugger which is only allowed when
1447     // the thread is suspended by an event.
1448     bool32_t ready_for_debug_invoke;
1449 
1450     // True if the thread enters a method. This is used to detect method entry
1451     // event for the debugger.
1452     bool32_t debug_method_entry_;
1453 
1454     // True if the GC is in the marking phase. This is used for the CC collector only. This is
1455     // thread local so that we can simplify the logic to check for the fast path of read barriers of
1456     // GC roots.
1457     bool32_t is_gc_marking;
1458 
1459     // Thread "interrupted" status; stays raised until queried or thrown.
1460     Atomic<bool32_t> interrupted;
1461 
1462     // True if the thread is allowed to access a weak ref (Reference::GetReferent() and system
1463     // weaks) and to potentially mark an object alive/gray. This is used for concurrent reference
1464     // processing of the CC collector only. This is thread local so that we can enable/disable weak
1465     // ref access by using a checkpoint and avoid a race around the time weak ref access gets
1466     // disabled and concurrent reference processing begins (if weak ref access is disabled during a
1467     // pause, this is not an issue.) Other collectors use Runtime::DisallowNewSystemWeaks() and
1468     // ReferenceProcessor::EnableSlowPath().
1469     bool32_t weak_ref_access_enabled;
1470 
1471     // A thread local version of Heap::disable_thread_flip_count_. This keeps track of how many
1472     // levels of (nested) JNI critical sections the thread is in and is used to detect a nested JNI
1473     // critical section enter.
1474     uint32_t disable_thread_flip_count;
1475 
1476     // How much of 'suspend_count_' is by request of user code, used to distinguish threads
1477     // suspended by the runtime from those suspended by user code.
1478     // This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be
1479     // told that AssertHeld should be good enough.
1480     int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1481   } tls32_;
1482 
1483   struct PACKED(8) tls_64bit_sized_values {
tls_64bit_sized_valuestls_64bit_sized_values1484     tls_64bit_sized_values() : trace_clock_base(0) {
1485     }
1486 
1487     // The clock base used for tracing.
1488     uint64_t trace_clock_base;
1489 
1490     RuntimeStats stats;
1491   } tls64_;
1492 
PACKED(sizeof (void *))1493   struct PACKED(sizeof(void*)) tls_ptr_sized_values {
1494       tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
1495       managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), tmp_jni_env(nullptr),
1496       self(nullptr), opeer(nullptr), jpeer(nullptr), stack_begin(nullptr), stack_size(0),
1497       deps_or_stack_trace_sample(), wait_next(nullptr), monitor_enter_object(nullptr),
1498       top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
1499       instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
1500       stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr),
1501       frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0),
1502       last_no_thread_suspension_cause(nullptr), checkpoint_function(nullptr),
1503       thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr),
1504       thread_local_limit(nullptr),
1505       thread_local_objects(0), mterp_current_ibase(nullptr), mterp_default_ibase(nullptr),
1506       mterp_alt_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
1507       thread_local_alloc_stack_end(nullptr),
1508       flip_function(nullptr), method_verifier(nullptr), thread_local_mark_stack(nullptr) {
1509       std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
1510     }
1511 
1512     // The biased card table, see CardTable for details.
1513     uint8_t* card_table;
1514 
1515     // The pending exception or null.
1516     mirror::Throwable* exception;
1517 
1518     // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
1519     // We leave extra space so there's room for the code that throws StackOverflowError.
1520     uint8_t* stack_end;
1521 
1522     // The top of the managed stack often manipulated directly by compiler generated code.
1523     ManagedStack managed_stack;
1524 
1525     // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check.  It is
1526     // normally set to the address of itself.
1527     uintptr_t* suspend_trigger;
1528 
1529     // Every thread may have an associated JNI environment
1530     JNIEnvExt* jni_env;
1531 
1532     // Temporary storage to transfer a pre-allocated JNIEnvExt from the creating thread to the
1533     // created thread.
1534     JNIEnvExt* tmp_jni_env;
1535 
1536     // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
1537     // is easy but getting the address of Thread::Current is hard. This field can be read off of
1538     // Thread::Current to give the address.
1539     Thread* self;
1540 
1541     // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
1542     // start up, until the thread is registered and the local opeer_ is used.
1543     mirror::Object* opeer;
1544     jobject jpeer;
1545 
1546     // The "lowest addressable byte" of the stack.
1547     uint8_t* stack_begin;
1548 
1549     // Size of the stack.
1550     size_t stack_size;
1551 
1552     // Sampling profiler and AOT verification cannot happen on the same run, so we share
1553     // the same entry for the stack trace and the verifier deps.
1554     union DepsOrStackTraceSample {
1555       DepsOrStackTraceSample() {
1556         verifier_deps = nullptr;
1557         stack_trace_sample = nullptr;
1558       }
1559       // Pointer to previous stack trace captured by sampling profiler.
1560       std::vector<ArtMethod*>* stack_trace_sample;
1561       // When doing AOT verification, per-thread VerifierDeps.
1562       verifier::VerifierDeps* verifier_deps;
1563     } deps_or_stack_trace_sample;
1564 
1565     // The next thread in the wait set this thread is part of or null if not waiting.
1566     Thread* wait_next;
1567 
1568     // If we're blocked in MonitorEnter, this is the object we're trying to lock.
1569     mirror::Object* monitor_enter_object;
1570 
1571     // Top of linked list of handle scopes or null for none.
1572     BaseHandleScope* top_handle_scope;
1573 
1574     // Needed to get the right ClassLoader in JNI_OnLoad, but also
1575     // useful for testing.
1576     jobject class_loader_override;
1577 
1578     // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
1579     Context* long_jump_context;
1580 
1581     // Additional stack used by method instrumentation to store method and return pc values.
1582     // Stored as a pointer since std::deque is not PACKED.
1583     std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack;
1584 
1585     // JDWP invoke-during-breakpoint support.
1586     DebugInvokeReq* debug_invoke_req;
1587 
1588     // JDWP single-stepping support.
1589     SingleStepControl* single_step_control;
1590 
1591     // For gc purpose, a shadow frame record stack that keeps track of:
1592     // 1) shadow frames under construction.
1593     // 2) deoptimization shadow frames.
1594     StackedShadowFrameRecord* stacked_shadow_frame_record;
1595 
1596     // Deoptimization return value record stack.
1597     DeoptimizationContextRecord* deoptimization_context_stack;
1598 
1599     // For debugger, a linked list that keeps the mapping from frame_id to shadow frame.
1600     // Shadow frames may be created before deoptimization happens so that the debugger can
1601     // set local values there first.
1602     FrameIdToShadowFrame* frame_id_to_shadow_frame;
1603 
1604     // A cached copy of the java.lang.Thread's name.
1605     std::string* name;
1606 
1607     // A cached pthread_t for the pthread underlying this Thread*.
1608     pthread_t pthread_self;
1609 
1610     // If no_thread_suspension_ is > 0, what is causing that assertion.
1611     const char* last_no_thread_suspension_cause;
1612 
1613     // Pending checkpoint function or null if non-pending. If this checkpoint is set and someone\
1614     // requests another checkpoint, it goes to the checkpoint overflow list.
1615     Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_);
1616 
1617     // Pending barriers that require passing or NULL if non-pending. Installation guarding by
1618     // Locks::thread_suspend_count_lock_.
1619     // They work effectively as art::Barrier, but implemented directly using AtomicInteger and futex
1620     // to avoid additional cost of a mutex and a condition variable, as used in art::Barrier.
1621     AtomicInteger* active_suspend_barriers[kMaxSuspendBarriers];
1622 
1623     // Thread-local allocation pointer. Moved here to force alignment for thread_local_pos on ARM.
1624     uint8_t* thread_local_start;
1625 
1626     // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
1627     // potentially better performance.
1628     uint8_t* thread_local_pos;
1629     uint8_t* thread_local_end;
1630 
1631     // Thread local limit is how much we can expand the thread local buffer to, it is greater or
1632     // equal to thread_local_end.
1633     uint8_t* thread_local_limit;
1634 
1635     size_t thread_local_objects;
1636 
1637     // Entrypoint function pointers.
1638     // TODO: move this to more of a global offset table model to avoid per-thread duplication.
1639     JniEntryPoints jni_entrypoints;
1640     QuickEntryPoints quick_entrypoints;
1641 
1642     // Mterp jump table bases.
1643     void* mterp_current_ibase;
1644     void* mterp_default_ibase;
1645     void* mterp_alt_ibase;
1646 
1647     // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
1648     void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread];
1649 
1650     // Thread-local allocation stack data/routines.
1651     StackReference<mirror::Object>* thread_local_alloc_stack_top;
1652     StackReference<mirror::Object>* thread_local_alloc_stack_end;
1653 
1654     // Support for Mutex lock hierarchy bug detection.
1655     BaseMutex* held_mutexes[kLockLevelCount];
1656 
1657     // The function used for thread flip.
1658     Closure* flip_function;
1659 
1660     // Current method verifier, used for root marking.
1661     verifier::MethodVerifier* method_verifier;
1662 
1663     // Thread-local mark stack for the concurrent copying collector.
1664     gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack;
1665   } tlsPtr_;
1666 
1667   // Guards the 'wait_monitor_' members.
1668   Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1669 
1670   // Condition variable waited upon during a wait.
1671   ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
1672   // Pointer to the monitor lock we're currently waiting on or null if not waiting.
1673   Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
1674 
1675   // Debug disable read barrier count, only is checked for debug builds and only in the runtime.
1676   uint8_t debug_disallow_read_barrier_ = 0;
1677 
1678   // Note that it is not in the packed struct, may not be accessed for cross compilation.
1679   uintptr_t poison_object_cookie_ = 0;
1680 
1681   // Pending extra checkpoints if checkpoint_function_ is already used.
1682   std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1683 
1684   // Custom TLS field that can be used by plugins.
1685   // TODO: Generalize once we have more plugins.
1686   void* custom_tls_;
1687 
1688   // True if the thread is allowed to call back into java (for e.g. during class resolution).
1689   // By default this is true.
1690   bool can_call_into_java_;
1691 
1692   friend class Dbg;  // For SetStateUnsafe.
1693   friend class gc::collector::SemiSpace;  // For getting stack traces.
1694   friend class Runtime;  // For CreatePeer.
1695   friend class QuickExceptionHandler;  // For dumping the stack.
1696   friend class ScopedThreadStateChange;
1697   friend class StubTest;  // For accessing entrypoints.
1698   friend class ThreadList;  // For ~Thread and Destroy.
1699 
1700   friend class EntrypointsOrderTest;  // To test the order of tls entries.
1701 
1702   DISALLOW_COPY_AND_ASSIGN(Thread);
1703 };
1704 
1705 class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension {
1706  public:
ScopedAssertNoThreadSuspension(const char * cause)1707   ALWAYS_INLINE explicit ScopedAssertNoThreadSuspension(const char* cause)
1708       ACQUIRE(Roles::uninterruptible_) {
1709     if (kIsDebugBuild) {
1710       self_ = Thread::Current();
1711       old_cause_ = self_->StartAssertNoThreadSuspension(cause);
1712     } else {
1713       Roles::uninterruptible_.Acquire();  // No-op.
1714     }
1715   }
~ScopedAssertNoThreadSuspension()1716   ALWAYS_INLINE ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) {
1717     if (kIsDebugBuild) {
1718       self_->EndAssertNoThreadSuspension(old_cause_);
1719     } else {
1720       Roles::uninterruptible_.Release();  // No-op.
1721     }
1722   }
1723 
1724  private:
1725   Thread* self_;
1726   const char* old_cause_;
1727 };
1728 
1729 class ScopedStackedShadowFramePusher {
1730  public:
ScopedStackedShadowFramePusher(Thread * self,ShadowFrame * sf,StackedShadowFrameType type)1731   ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf, StackedShadowFrameType type)
1732     : self_(self), type_(type) {
1733     self_->PushStackedShadowFrame(sf, type);
1734   }
~ScopedStackedShadowFramePusher()1735   ~ScopedStackedShadowFramePusher() {
1736     self_->PopStackedShadowFrame(type_);
1737   }
1738 
1739  private:
1740   Thread* const self_;
1741   const StackedShadowFrameType type_;
1742 
1743   DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher);
1744 };
1745 
1746 // Only works for debug builds.
1747 class ScopedDebugDisallowReadBarriers {
1748  public:
ScopedDebugDisallowReadBarriers(Thread * self)1749   explicit ScopedDebugDisallowReadBarriers(Thread* self) : self_(self) {
1750     self_->ModifyDebugDisallowReadBarrier(1);
1751   }
~ScopedDebugDisallowReadBarriers()1752   ~ScopedDebugDisallowReadBarriers() {
1753     self_->ModifyDebugDisallowReadBarrier(-1);
1754   }
1755 
1756  private:
1757   Thread* const self_;
1758 };
1759 
1760 class ScopedTransitioningToRunnable : public ValueObject {
1761  public:
ScopedTransitioningToRunnable(Thread * self)1762   explicit ScopedTransitioningToRunnable(Thread* self)
1763       : self_(self) {
1764     DCHECK_EQ(self, Thread::Current());
1765     if (kUseReadBarrier) {
1766       self_->SetIsTransitioningToRunnable(true);
1767     }
1768   }
1769 
~ScopedTransitioningToRunnable()1770   ~ScopedTransitioningToRunnable() {
1771     if (kUseReadBarrier) {
1772       self_->SetIsTransitioningToRunnable(false);
1773     }
1774   }
1775 
1776  private:
1777   Thread* const self_;
1778 };
1779 
1780 class ThreadLifecycleCallback {
1781  public:
~ThreadLifecycleCallback()1782   virtual ~ThreadLifecycleCallback() {}
1783 
1784   virtual void ThreadStart(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
1785   virtual void ThreadDeath(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
1786 };
1787 
1788 std::ostream& operator<<(std::ostream& os, const Thread& thread);
1789 std::ostream& operator<<(std::ostream& os, const StackedShadowFrameType& thread);
1790 
1791 }  // namespace art
1792 
1793 #endif  // ART_RUNTIME_THREAD_H_
1794