• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_THREAD_H_
18 #define ART_RUNTIME_THREAD_H_
19 
20 #include <atomic>
21 #include <bitset>
22 #include <deque>
23 #include <iosfwd>
24 #include <list>
25 #include <memory>
26 #include <string>
27 
28 #include "base/atomic.h"
29 #include "base/enums.h"
30 #include "base/locks.h"
31 #include "base/macros.h"
32 #include "base/safe_map.h"
33 #include "base/value_object.h"
34 #include "entrypoints/jni/jni_entrypoints.h"
35 #include "entrypoints/quick/quick_entrypoints.h"
36 #include "handle.h"
37 #include "handle_scope.h"
38 #include "interpreter/interpreter_cache.h"
39 #include "javaheapprof/javaheapsampler.h"
40 #include "jvalue.h"
41 #include "managed_stack.h"
42 #include "offsets.h"
43 #include "read_barrier_config.h"
44 #include "reflective_handle_scope.h"
45 #include "runtime_globals.h"
46 #include "runtime_stats.h"
47 #include "thread_state.h"
48 
49 class BacktraceMap;
50 
51 namespace art {
52 
53 namespace gc {
54 namespace accounting {
55 template<class T> class AtomicStack;
56 }  // namespace accounting
57 namespace collector {
58 class SemiSpace;
59 }  // namespace collector
60 }  // namespace gc
61 
62 namespace instrumentation {
63 struct InstrumentationStackFrame;
64 }  // namespace instrumentation
65 
66 namespace mirror {
67 class Array;
68 class Class;
69 class ClassLoader;
70 class Object;
71 template<class T> class ObjectArray;
72 template<class T> class PrimitiveArray;
73 typedef PrimitiveArray<int32_t> IntArray;
74 class StackTraceElement;
75 class String;
76 class Throwable;
77 }  // namespace mirror
78 
79 namespace verifier {
80 class MethodVerifier;
81 class VerifierDeps;
82 }  // namespace verifier
83 
84 class ArtMethod;
85 class BaseMutex;
86 class ClassLinker;
87 class Closure;
88 class Context;
89 class DeoptimizationContextRecord;
90 class DexFile;
91 class FrameIdToShadowFrame;
92 class IsMarkedVisitor;
93 class JavaVMExt;
94 class JNIEnvExt;
95 class Monitor;
96 class RootVisitor;
97 class ScopedObjectAccessAlreadyRunnable;
98 class ShadowFrame;
99 class StackedShadowFrameRecord;
100 enum class SuspendReason : char;
101 class Thread;
102 class ThreadList;
103 enum VisitRootFlags : uint8_t;
104 
105 // A piece of data that can be held in the CustomTls. The destructor will be called during thread
106 // shutdown. The thread the destructor is called on is not necessarily the same thread it was stored
107 // on.
108 class TLSData {
109  public:
~TLSData()110   virtual ~TLSData() {}
111 };
112 
113 // Thread priorities. These must match the Thread.MIN_PRIORITY,
114 // Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
115 enum ThreadPriority {
116   kMinThreadPriority = 1,
117   kNormThreadPriority = 5,
118   kMaxThreadPriority = 10,
119 };
120 
121 enum ThreadFlag {
122   kSuspendRequest   = 1,  // If set implies that suspend_count_ > 0 and the Thread should enter the
123                           // safepoint handler.
124   kCheckpointRequest = 2,  // Request that the thread do some checkpoint work and then continue.
125   kEmptyCheckpointRequest = 4,  // Request that the thread do empty checkpoint and then continue.
126   kActiveSuspendBarrier = 8,  // Register that at least 1 suspend barrier needs to be passed.
127 };
128 
129 enum class StackedShadowFrameType {
130   kShadowFrameUnderConstruction,
131   kDeoptimizationShadowFrame,
132 };
133 
134 // The type of method that triggers deoptimization. It contains info on whether
135 // the deoptimized method should advance dex_pc.
136 enum class DeoptimizationMethodType {
137   kKeepDexPc,  // dex pc is required to be kept upon deoptimization.
138   kDefault     // dex pc may or may not advance depending on other conditions.
139 };
140 
141 // This should match RosAlloc::kNumThreadLocalSizeBrackets.
142 static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
143 
144 // Thread's stack layout for implicit stack overflow checks:
145 //
146 //   +---------------------+  <- highest address of stack memory
147 //   |                     |
148 //   .                     .  <- SP
149 //   |                     |
150 //   |                     |
151 //   +---------------------+  <- stack_end
152 //   |                     |
153 //   |  Gap                |
154 //   |                     |
155 //   +---------------------+  <- stack_begin
156 //   |                     |
157 //   | Protected region    |
158 //   |                     |
159 //   +---------------------+  <- lowest address of stack memory
160 //
161 // The stack always grows down in memory.  At the lowest address is a region of memory
162 // that is set mprotect(PROT_NONE).  Any attempt to read/write to this region will
163 // result in a segmentation fault signal.  At any point, the thread's SP will be somewhere
164 // between the stack_end and the highest address in stack memory.  An implicit stack
165 // overflow check is a read of memory at a certain offset below the current SP (4K typically).
166 // If the thread's SP is below the stack_end address this will be a read into the protected
167 // region.  If the SP is above the stack_end address, the thread is guaranteed to have
168 // at least 4K of space.  Because stack overflow checks are only performed in generated code,
169 // if the thread makes a call out to a native function (through JNI), that native function
170 // might only have 4K of memory (if the SP is adjacent to stack_end).
171 
172 class Thread {
173  public:
174   static const size_t kStackOverflowImplicitCheckSize;
175   static constexpr bool kVerifyStack = kIsDebugBuild;
176 
177   // Creates a new native thread corresponding to the given managed peer.
178   // Used to implement Thread.start.
179   static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
180 
181   // Attaches the calling native thread to the runtime, returning the new native peer.
182   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
183   static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
184                         bool create_peer);
185   // Attaches the calling native thread to the runtime, returning the new native peer.
186   static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_peer);
187 
188   // Reset internal state of child thread after fork.
189   void InitAfterFork();
190 
191   // Get the currently executing thread, frequently referred to as 'self'. This call has reasonably
192   // high cost and so we favor passing self around when possible.
193   // TODO: mark as PURE so the compiler may coalesce and remove?
194   static Thread* Current();
195 
196   // On a runnable thread, check for pending thread suspension request and handle if pending.
197   void AllowThreadSuspension() REQUIRES_SHARED(Locks::mutator_lock_);
198 
199   // Process pending thread suspension request and handle if pending.
200   void CheckSuspend() REQUIRES_SHARED(Locks::mutator_lock_);
201 
202   // Process a pending empty checkpoint if pending.
203   void CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex);
204   void CheckEmptyCheckpointFromMutex();
205 
206   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
207                                    ObjPtr<mirror::Object> thread_peer)
208       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
209       REQUIRES_SHARED(Locks::mutator_lock_);
210   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
211       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
212       REQUIRES_SHARED(Locks::mutator_lock_);
213 
214   // Translates 172 to pAllocArrayFromCode and so on.
215   template<PointerSize size_of_pointers>
216   static void DumpThreadOffset(std::ostream& os, uint32_t offset);
217 
218   // Dumps a one-line summary of thread state (used for operator<<).
219   void ShortDump(std::ostream& os) const;
220 
221   // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
222   void Dump(std::ostream& os,
223             bool dump_native_stack = true,
224             BacktraceMap* backtrace_map = nullptr,
225             bool force_dump_stack = false) const
226       REQUIRES_SHARED(Locks::mutator_lock_);
227 
228   void DumpJavaStack(std::ostream& os,
229                      bool check_suspended = true,
230                      bool dump_locks = true) const
231       REQUIRES_SHARED(Locks::mutator_lock_);
232 
233   // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
234   // case we use 'tid' to identify the thread, and we'll include as much information as we can.
235   static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
236       REQUIRES_SHARED(Locks::mutator_lock_);
237 
GetState()238   ThreadState GetState() const {
239     DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
240     DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended);
241     return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state);
242   }
243 
244   ThreadState SetState(ThreadState new_state);
245 
GetSuspendCount()246   int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
247     return tls32_.suspend_count;
248   }
249 
GetUserCodeSuspendCount()250   int GetUserCodeSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_,
251                                                Locks::user_code_suspension_lock_) {
252     return tls32_.user_code_suspend_count;
253   }
254 
IsSuspended()255   bool IsSuspended() const {
256     union StateAndFlags state_and_flags;
257     state_and_flags.as_int = tls32_.state_and_flags.as_int;
258     return state_and_flags.as_struct.state != kRunnable &&
259         (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
260   }
261 
DecrDefineClassCount()262   void DecrDefineClassCount() {
263     tls32_.define_class_counter--;
264   }
265 
IncrDefineClassCount()266   void IncrDefineClassCount() {
267     tls32_.define_class_counter++;
268   }
GetDefineClassCount()269   uint32_t GetDefineClassCount() const {
270     return tls32_.define_class_counter;
271   }
272 
273   // If delta > 0 and (this != self or suspend_barrier is not null), this function may temporarily
274   // release thread_suspend_count_lock_ internally.
275   ALWAYS_INLINE
276   bool ModifySuspendCount(Thread* self,
277                           int delta,
278                           AtomicInteger* suspend_barrier,
279                           SuspendReason reason)
280       WARN_UNUSED
281       REQUIRES(Locks::thread_suspend_count_lock_);
282 
283   // Requests a checkpoint closure to run on another thread. The closure will be run when the
284   // thread notices the request, either in an explicit runtime CheckSuspend() call, or in a call
285   // originating from a compiler generated suspend point check. This returns true if the closure
286   // was added and will (eventually) be executed. It returns false otherwise.
287   //
288   // Since multiple closures can be queued and some closures can delay other threads from running,
289   // no closure should attempt to suspend another thread while running.
290   // TODO We should add some debug option that verifies this.
291   //
292   // This guarantees that the RequestCheckpoint invocation happens-before the function invocation:
293   // RequestCheckpointFunction holds thread_suspend_count_lock_, and RunCheckpointFunction
294   // acquires it.
295   bool RequestCheckpoint(Closure* function)
296       REQUIRES(Locks::thread_suspend_count_lock_);
297 
298   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. This is
299   // due to the fact that Thread::Current() needs to go to sleep to allow the targeted thread to
300   // execute the checkpoint for us if it is Runnable. The suspend_state is the state that the thread
301   // will go into while it is awaiting the checkpoint to be run.
302   // NB Passing ThreadState::kRunnable may cause the current thread to wait in a condition variable
303   // while holding the mutator_lock_.  Callers should ensure that this will not cause any problems
304   // for the closure or the rest of the system.
305   // NB Since multiple closures can be queued and some closures can delay other threads from running
306   // no closure should attempt to suspend another thread while running.
307   bool RequestSynchronousCheckpoint(Closure* function,
308                                     ThreadState suspend_state = ThreadState::kWaiting)
309       REQUIRES_SHARED(Locks::mutator_lock_)
310       RELEASE(Locks::thread_list_lock_)
311       REQUIRES(!Locks::thread_suspend_count_lock_);
312 
313   bool RequestEmptyCheckpoint()
314       REQUIRES(Locks::thread_suspend_count_lock_);
315 
316   void SetFlipFunction(Closure* function);
317   Closure* GetFlipFunction();
318 
GetThreadLocalMarkStack()319   gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() {
320     CHECK(kUseReadBarrier);
321     return tlsPtr_.thread_local_mark_stack;
322   }
SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object> * stack)323   void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) {
324     CHECK(kUseReadBarrier);
325     tlsPtr_.thread_local_mark_stack = stack;
326   }
327 
328   // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
329   // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
330   void FullSuspendCheck()
331       REQUIRES(!Locks::thread_suspend_count_lock_)
332       REQUIRES_SHARED(Locks::mutator_lock_);
333 
334   // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
335   ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable()
336       REQUIRES(!Locks::thread_suspend_count_lock_)
337       SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
338 
339   // Transition from runnable into a state where mutator privileges are denied. Releases share of
340   // mutator lock.
341   ALWAYS_INLINE void TransitionFromRunnableToSuspended(ThreadState new_state)
342       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
343       UNLOCK_FUNCTION(Locks::mutator_lock_);
344 
345   // Once called thread suspension will cause an assertion failure.
StartAssertNoThreadSuspension(const char * cause)346   const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
347     Roles::uninterruptible_.Acquire();  // No-op.
348     if (kIsDebugBuild) {
349       CHECK(cause != nullptr);
350       const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
351       tls32_.no_thread_suspension++;
352       tlsPtr_.last_no_thread_suspension_cause = cause;
353       return previous_cause;
354     } else {
355       return nullptr;
356     }
357   }
358 
359   // End region where no thread suspension is expected.
EndAssertNoThreadSuspension(const char * old_cause)360   void EndAssertNoThreadSuspension(const char* old_cause) RELEASE(Roles::uninterruptible_) {
361     if (kIsDebugBuild) {
362       CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
363       CHECK_GT(tls32_.no_thread_suspension, 0U);
364       tls32_.no_thread_suspension--;
365       tlsPtr_.last_no_thread_suspension_cause = old_cause;
366     }
367     Roles::uninterruptible_.Release();  // No-op.
368   }
369 
370   // End region where no thread suspension is expected. Returns the current open region in case we
371   // want to reopen it. Used for ScopedAllowThreadSuspension. Not supported if no_thread_suspension
372   // is larger than one.
EndAssertNoThreadSuspension()373   const char* EndAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) WARN_UNUSED {
374     const char* ret = nullptr;
375     if (kIsDebugBuild) {
376       CHECK_EQ(tls32_.no_thread_suspension, 1u);
377       tls32_.no_thread_suspension--;
378       ret = tlsPtr_.last_no_thread_suspension_cause;
379       tlsPtr_.last_no_thread_suspension_cause = nullptr;
380     }
381     Roles::uninterruptible_.Release();  // No-op.
382     return ret;
383   }
384 
385   void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
386 
387   // Return true if thread suspension is allowable.
388   bool IsThreadSuspensionAllowable() const;
389 
IsDaemon()390   bool IsDaemon() const {
391     return tls32_.daemon;
392   }
393 
394   size_t NumberOfHeldMutexes() const;
395 
396   bool HoldsLock(ObjPtr<mirror::Object> object) const REQUIRES_SHARED(Locks::mutator_lock_);
397 
398   /*
399    * Changes the priority of this thread to match that of the java.lang.Thread object.
400    *
401    * We map a priority value from 1-10 to Linux "nice" values, where lower
402    * numbers indicate higher priority.
403    */
404   void SetNativePriority(int newPriority);
405 
406   /*
407    * Returns the priority of this thread by querying the system.
408    * This is useful when attaching a thread through JNI.
409    *
410    * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
411    */
412   int GetNativePriority() const;
413 
414   // Guaranteed to be non-zero.
GetThreadId()415   uint32_t GetThreadId() const {
416     return tls32_.thin_lock_thread_id;
417   }
418 
GetTid()419   pid_t GetTid() const {
420     return tls32_.tid;
421   }
422 
423   // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
424   ObjPtr<mirror::String> GetThreadName() const REQUIRES_SHARED(Locks::mutator_lock_);
425 
426   // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
427   // allocation, or locking.
428   void GetThreadName(std::string& name) const;
429 
430   // Sets the thread's name.
431   void SetThreadName(const char* name) REQUIRES_SHARED(Locks::mutator_lock_);
432 
433   // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
434   uint64_t GetCpuMicroTime() const;
435 
GetPeer()436   mirror::Object* GetPeer() const REQUIRES_SHARED(Locks::mutator_lock_) {
437     DCHECK(Thread::Current() == this) << "Use GetPeerFromOtherThread instead";
438     CHECK(tlsPtr_.jpeer == nullptr);
439     return tlsPtr_.opeer;
440   }
441   // GetPeer is not safe if called on another thread in the middle of the CC thread flip and
442   // the thread's stack may have not been flipped yet and peer may be a from-space (stale) ref.
443   // This function will explicitly mark/forward it.
444   mirror::Object* GetPeerFromOtherThread() const REQUIRES_SHARED(Locks::mutator_lock_);
445 
HasPeer()446   bool HasPeer() const {
447     return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
448   }
449 
GetStats()450   RuntimeStats* GetStats() {
451     return &tls64_.stats;
452   }
453 
454   bool IsStillStarting() const;
455 
IsExceptionPending()456   bool IsExceptionPending() const {
457     return tlsPtr_.exception != nullptr;
458   }
459 
IsAsyncExceptionPending()460   bool IsAsyncExceptionPending() const {
461     return tlsPtr_.async_exception != nullptr;
462   }
463 
GetException()464   mirror::Throwable* GetException() const REQUIRES_SHARED(Locks::mutator_lock_) {
465     return tlsPtr_.exception;
466   }
467 
468   void AssertPendingException() const;
469   void AssertPendingOOMException() const REQUIRES_SHARED(Locks::mutator_lock_);
470   void AssertNoPendingException() const;
471   void AssertNoPendingExceptionForNewException(const char* msg) const;
472 
473   void SetException(ObjPtr<mirror::Throwable> new_exception) REQUIRES_SHARED(Locks::mutator_lock_);
474 
475   // Set an exception that is asynchronously thrown from a different thread. This will be checked
476   // periodically and might overwrite the current 'Exception'. This can only be called from a
477   // checkpoint.
478   //
479   // The caller should also make sure that the thread has been deoptimized so that the exception
480   // could be detected on back-edges.
481   void SetAsyncException(ObjPtr<mirror::Throwable> new_exception)
482       REQUIRES_SHARED(Locks::mutator_lock_);
483 
ClearException()484   void ClearException() REQUIRES_SHARED(Locks::mutator_lock_) {
485     tlsPtr_.exception = nullptr;
486   }
487 
488   // Move the current async-exception to the main exception. This should be called when the current
489   // thread is ready to deal with any async exceptions. Returns true if there is an async exception
490   // that needs to be dealt with, false otherwise.
491   bool ObserveAsyncException() REQUIRES_SHARED(Locks::mutator_lock_);
492 
493   // Find catch block and perform long jump to appropriate exception handle
494   NO_RETURN void QuickDeliverException() REQUIRES_SHARED(Locks::mutator_lock_);
495 
496   Context* GetLongJumpContext();
ReleaseLongJumpContext(Context * context)497   void ReleaseLongJumpContext(Context* context) {
498     if (tlsPtr_.long_jump_context != nullptr) {
499       ReleaseLongJumpContextInternal();
500     }
501     tlsPtr_.long_jump_context = context;
502   }
503 
504   // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
505   // abort the runtime iff abort_on_error is true.
506   ArtMethod* GetCurrentMethod(uint32_t* dex_pc,
507                               bool check_suspended = true,
508                               bool abort_on_error = true) const
509       REQUIRES_SHARED(Locks::mutator_lock_);
510 
511   // Returns whether the given exception was thrown by the current Java method being executed
512   // (Note that this includes native Java methods).
513   bool IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const
514       REQUIRES_SHARED(Locks::mutator_lock_);
515 
SetTopOfStack(ArtMethod ** top_method)516   void SetTopOfStack(ArtMethod** top_method) {
517     tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
518   }
519 
SetTopOfStackTagged(ArtMethod ** top_method)520   void SetTopOfStackTagged(ArtMethod** top_method) {
521     tlsPtr_.managed_stack.SetTopQuickFrameTagged(top_method);
522   }
523 
SetTopOfShadowStack(ShadowFrame * top)524   void SetTopOfShadowStack(ShadowFrame* top) {
525     tlsPtr_.managed_stack.SetTopShadowFrame(top);
526   }
527 
HasManagedStack()528   bool HasManagedStack() const {
529     return tlsPtr_.managed_stack.HasTopQuickFrame() || tlsPtr_.managed_stack.HasTopShadowFrame();
530   }
531 
532   // If 'msg' is null, no detail message is set.
533   void ThrowNewException(const char* exception_class_descriptor, const char* msg)
534       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
535 
536   // If 'msg' is null, no detail message is set. An exception must be pending, and will be
537   // used as the new exception's cause.
538   void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
539       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
540 
541   void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
542       __attribute__((format(printf, 3, 4)))
543       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
544 
545   void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
546       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
547 
548   // OutOfMemoryError is special, because we need to pre-allocate an instance.
549   // Only the GC should call this.
550   void ThrowOutOfMemoryError(const char* msg) REQUIRES_SHARED(Locks::mutator_lock_)
551       REQUIRES(!Roles::uninterruptible_);
552 
553   static void Startup();
554   static void FinishStartup();
555   static void Shutdown();
556 
557   // Notify this thread's thread-group that this thread has started.
558   // Note: the given thread-group is used as a fast path and verified in debug build. If the value
559   //       is null, the thread's thread-group is loaded from the peer.
560   void NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject thread_group = nullptr)
561       REQUIRES_SHARED(Locks::mutator_lock_);
562 
563   // JNI methods
GetJniEnv()564   JNIEnvExt* GetJniEnv() const {
565     return tlsPtr_.jni_env;
566   }
567 
568   // Convert a jobject into a Object*
569   ObjPtr<mirror::Object> DecodeJObject(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
570   // Checks if the weak global ref has been cleared by the GC without decoding it.
571   bool IsJWeakCleared(jweak obj) const REQUIRES_SHARED(Locks::mutator_lock_);
572 
GetMonitorEnterObject()573   mirror::Object* GetMonitorEnterObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
574     return tlsPtr_.monitor_enter_object;
575   }
576 
SetMonitorEnterObject(mirror::Object * obj)577   void SetMonitorEnterObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
578     tlsPtr_.monitor_enter_object = obj;
579   }
580 
581   // Implements java.lang.Thread.interrupted.
582   bool Interrupted();
583   // Implements java.lang.Thread.isInterrupted.
584   bool IsInterrupted();
585   void Interrupt(Thread* self) REQUIRES(!wait_mutex_);
SetInterrupted(bool i)586   void SetInterrupted(bool i) {
587     tls32_.interrupted.store(i, std::memory_order_seq_cst);
588   }
589   void Notify() REQUIRES(!wait_mutex_);
590 
PoisonObjectPointers()591   ALWAYS_INLINE void PoisonObjectPointers() {
592     ++poison_object_cookie_;
593   }
594 
595   ALWAYS_INLINE static void PoisonObjectPointersIfDebug();
596 
GetPoisonObjectCookie()597   ALWAYS_INLINE uintptr_t GetPoisonObjectCookie() const {
598     return poison_object_cookie_;
599   }
600 
601   // Parking for 0ns of relative time means an untimed park, negative (though
602   // should be handled in java code) returns immediately
603   void Park(bool is_absolute, int64_t time) REQUIRES_SHARED(Locks::mutator_lock_);
604   void Unpark();
605 
606  private:
607   void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
608 
609  public:
GetWaitMutex()610   Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
611     return wait_mutex_;
612   }
613 
GetWaitConditionVariable()614   ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) {
615     return wait_cond_;
616   }
617 
GetWaitMonitor()618   Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) {
619     return wait_monitor_;
620   }
621 
SetWaitMonitor(Monitor * mon)622   void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) {
623     wait_monitor_ = mon;
624   }
625 
626   // Waiter link-list support.
GetWaitNext()627   Thread* GetWaitNext() const {
628     return tlsPtr_.wait_next;
629   }
630 
SetWaitNext(Thread * next)631   void SetWaitNext(Thread* next) {
632     tlsPtr_.wait_next = next;
633   }
634 
GetClassLoaderOverride()635   jobject GetClassLoaderOverride() {
636     return tlsPtr_.class_loader_override;
637   }
638 
639   void SetClassLoaderOverride(jobject class_loader_override);
640 
641   // Create the internal representation of a stack trace, that is more time
642   // and space efficient to compute than the StackTraceElement[].
643   jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
644       REQUIRES_SHARED(Locks::mutator_lock_);
645 
646   // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
647   // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
648   // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
649   // with the number of valid frames in the returned array.
650   static jobjectArray InternalStackTraceToStackTraceElementArray(
651       const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
652       jobjectArray output_array = nullptr, int* stack_depth = nullptr)
653       REQUIRES_SHARED(Locks::mutator_lock_);
654 
655   jobjectArray CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
656       REQUIRES_SHARED(Locks::mutator_lock_);
657 
HasDebuggerShadowFrames()658   bool HasDebuggerShadowFrames() const {
659     return tlsPtr_.frame_id_to_shadow_frame != nullptr;
660   }
661 
662   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
663       REQUIRES_SHARED(Locks::mutator_lock_);
664 
665   void VisitReflectiveTargets(ReflectiveValueVisitor* visitor)
666       REQUIRES(Locks::mutator_lock_);
667 
VerifyStack()668   void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_) {
669     if (kVerifyStack) {
670       VerifyStackImpl();
671     }
672   }
673 
674   //
675   // Offsets of various members of native Thread class, used by compiled code.
676   //
677 
678   template<PointerSize pointer_size>
ThinLockIdOffset()679   static constexpr ThreadOffset<pointer_size> ThinLockIdOffset() {
680     return ThreadOffset<pointer_size>(
681         OFFSETOF_MEMBER(Thread, tls32_) +
682         OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
683   }
684 
685   template<PointerSize pointer_size>
InterruptedOffset()686   static constexpr ThreadOffset<pointer_size> InterruptedOffset() {
687     return ThreadOffset<pointer_size>(
688         OFFSETOF_MEMBER(Thread, tls32_) +
689         OFFSETOF_MEMBER(tls_32bit_sized_values, interrupted));
690   }
691 
692   template<PointerSize pointer_size>
WeakRefAccessEnabledOffset()693   static constexpr ThreadOffset<pointer_size> WeakRefAccessEnabledOffset() {
694     return ThreadOffset<pointer_size>(
695         OFFSETOF_MEMBER(Thread, tls32_) +
696         OFFSETOF_MEMBER(tls_32bit_sized_values, weak_ref_access_enabled));
697   }
698 
699   template<PointerSize pointer_size>
ThreadFlagsOffset()700   static constexpr ThreadOffset<pointer_size> ThreadFlagsOffset() {
701     return ThreadOffset<pointer_size>(
702         OFFSETOF_MEMBER(Thread, tls32_) +
703         OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
704   }
705 
706   template<PointerSize pointer_size>
UseMterpOffset()707   static constexpr ThreadOffset<pointer_size> UseMterpOffset() {
708     return ThreadOffset<pointer_size>(
709         OFFSETOF_MEMBER(Thread, tls32_) +
710         OFFSETOF_MEMBER(tls_32bit_sized_values, use_mterp));
711   }
712 
713   template<PointerSize pointer_size>
IsGcMarkingOffset()714   static constexpr ThreadOffset<pointer_size> IsGcMarkingOffset() {
715     return ThreadOffset<pointer_size>(
716         OFFSETOF_MEMBER(Thread, tls32_) +
717         OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
718   }
719 
IsGcMarkingSize()720   static constexpr size_t IsGcMarkingSize() {
721     return sizeof(tls32_.is_gc_marking);
722   }
723 
724   // Deoptimize the Java stack.
725   void DeoptimizeWithDeoptimizationException(JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
726 
727  private:
728   template<PointerSize pointer_size>
ThreadOffsetFromTlsPtr(size_t tls_ptr_offset)729   static constexpr ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
730     size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
731     size_t scale = (pointer_size > kRuntimePointerSize) ?
732       static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize) : 1;
733     size_t shrink = (kRuntimePointerSize > pointer_size) ?
734       static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size) : 1;
735     return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
736   }
737 
738  public:
739   template<PointerSize pointer_size>
QuickEntryPointOffset(size_t quick_entrypoint_offset)740   static constexpr ThreadOffset<pointer_size> QuickEntryPointOffset(
741       size_t quick_entrypoint_offset) {
742     return ThreadOffsetFromTlsPtr<pointer_size>(
743         OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
744   }
745 
QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,PointerSize pointer_size)746   static constexpr uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
747                                                           PointerSize pointer_size) {
748     if (pointer_size == PointerSize::k32) {
749       return QuickEntryPointOffset<PointerSize::k32>(quick_entrypoint_offset).
750           Uint32Value();
751     } else {
752       return QuickEntryPointOffset<PointerSize::k64>(quick_entrypoint_offset).
753           Uint32Value();
754     }
755   }
756 
757   template<PointerSize pointer_size>
JniEntryPointOffset(size_t jni_entrypoint_offset)758   static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
759     return ThreadOffsetFromTlsPtr<pointer_size>(
760         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
761   }
762 
763   // Return the entry point offset integer value for ReadBarrierMarkRegX, where X is `reg`.
764   template <PointerSize pointer_size>
ReadBarrierMarkEntryPointsOffset(size_t reg)765   static constexpr int32_t ReadBarrierMarkEntryPointsOffset(size_t reg) {
766     // The entry point list defines 30 ReadBarrierMarkRegX entry points.
767     DCHECK_LT(reg, 30u);
768     // The ReadBarrierMarkRegX entry points are ordered by increasing
769     // register number in Thread::tls_Ptr_.quick_entrypoints.
770     return QUICK_ENTRYPOINT_OFFSET(pointer_size, pReadBarrierMarkReg00).Int32Value()
771         + static_cast<size_t>(pointer_size) * reg;
772   }
773 
774   template<PointerSize pointer_size>
SelfOffset()775   static constexpr ThreadOffset<pointer_size> SelfOffset() {
776     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
777   }
778 
779   template<PointerSize pointer_size>
MterpCurrentIBaseOffset()780   static constexpr ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
781     return ThreadOffsetFromTlsPtr<pointer_size>(
782         OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_current_ibase));
783   }
784 
785   template<PointerSize pointer_size>
ExceptionOffset()786   static constexpr ThreadOffset<pointer_size> ExceptionOffset() {
787     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
788   }
789 
790   template<PointerSize pointer_size>
PeerOffset()791   static constexpr ThreadOffset<pointer_size> PeerOffset() {
792     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
793   }
794 
795 
796   template<PointerSize pointer_size>
CardTableOffset()797   static constexpr ThreadOffset<pointer_size> CardTableOffset() {
798     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
799   }
800 
801   template<PointerSize pointer_size>
ThreadSuspendTriggerOffset()802   static constexpr ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
803     return ThreadOffsetFromTlsPtr<pointer_size>(
804         OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
805   }
806 
807   template<PointerSize pointer_size>
ThreadLocalPosOffset()808   static constexpr ThreadOffset<pointer_size> ThreadLocalPosOffset() {
809     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
810                                                                 thread_local_pos));
811   }
812 
813   template<PointerSize pointer_size>
ThreadLocalEndOffset()814   static constexpr ThreadOffset<pointer_size> ThreadLocalEndOffset() {
815     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
816                                                                 thread_local_end));
817   }
818 
819   template<PointerSize pointer_size>
ThreadLocalObjectsOffset()820   static constexpr ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
821     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
822                                                                 thread_local_objects));
823   }
824 
825   template<PointerSize pointer_size>
RosAllocRunsOffset()826   static constexpr ThreadOffset<pointer_size> RosAllocRunsOffset() {
827     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
828                                                                 rosalloc_runs));
829   }
830 
831   template<PointerSize pointer_size>
ThreadLocalAllocStackTopOffset()832   static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
833     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
834                                                                 thread_local_alloc_stack_top));
835   }
836 
837   template<PointerSize pointer_size>
ThreadLocalAllocStackEndOffset()838   static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
839     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
840                                                                 thread_local_alloc_stack_end));
841   }
842 
843   // Size of stack less any space reserved for stack overflow
GetStackSize()844   size_t GetStackSize() const {
845     return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
846   }
847 
848   ALWAYS_INLINE uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const;
849 
GetStackEnd()850   uint8_t* GetStackEnd() const {
851     return tlsPtr_.stack_end;
852   }
853 
854   // Set the stack end to that to be used during a stack overflow
855   void SetStackEndForStackOverflow() REQUIRES_SHARED(Locks::mutator_lock_);
856 
857   // Set the stack end to that to be used during regular execution
858   ALWAYS_INLINE void ResetDefaultStackEnd();
859 
IsHandlingStackOverflow()860   bool IsHandlingStackOverflow() const {
861     return tlsPtr_.stack_end == tlsPtr_.stack_begin;
862   }
863 
864   template<PointerSize pointer_size>
StackEndOffset()865   static constexpr ThreadOffset<pointer_size> StackEndOffset() {
866     return ThreadOffsetFromTlsPtr<pointer_size>(
867         OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
868   }
869 
870   template<PointerSize pointer_size>
JniEnvOffset()871   static constexpr ThreadOffset<pointer_size> JniEnvOffset() {
872     return ThreadOffsetFromTlsPtr<pointer_size>(
873         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
874   }
875 
876   template<PointerSize pointer_size>
TopOfManagedStackOffset()877   static constexpr ThreadOffset<pointer_size> TopOfManagedStackOffset() {
878     return ThreadOffsetFromTlsPtr<pointer_size>(
879         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
880         ManagedStack::TaggedTopQuickFrameOffset());
881   }
882 
GetManagedStack()883   const ManagedStack* GetManagedStack() const {
884     return &tlsPtr_.managed_stack;
885   }
886 
887   // Linked list recording fragments of managed stack.
PushManagedStackFragment(ManagedStack * fragment)888   void PushManagedStackFragment(ManagedStack* fragment) {
889     tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
890   }
PopManagedStackFragment(const ManagedStack & fragment)891   void PopManagedStackFragment(const ManagedStack& fragment) {
892     tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
893   }
894 
895   ALWAYS_INLINE ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame);
896   ALWAYS_INLINE ShadowFrame* PopShadowFrame();
897 
898   template<PointerSize pointer_size>
TopShadowFrameOffset()899   static constexpr ThreadOffset<pointer_size> TopShadowFrameOffset() {
900     return ThreadOffsetFromTlsPtr<pointer_size>(
901         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
902         ManagedStack::TopShadowFrameOffset());
903   }
904 
905   // Is the given obj in one of this thread's JNI transition frames?
906   bool IsJniTransitionReference(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
907 
908   void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
909       REQUIRES_SHARED(Locks::mutator_lock_);
910 
GetTopHandleScope()911   BaseHandleScope* GetTopHandleScope() REQUIRES_SHARED(Locks::mutator_lock_) {
912     return tlsPtr_.top_handle_scope;
913   }
914 
PushHandleScope(BaseHandleScope * handle_scope)915   void PushHandleScope(BaseHandleScope* handle_scope) REQUIRES_SHARED(Locks::mutator_lock_) {
916     DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope);
917     tlsPtr_.top_handle_scope = handle_scope;
918   }
919 
PopHandleScope()920   BaseHandleScope* PopHandleScope() REQUIRES_SHARED(Locks::mutator_lock_) {
921     BaseHandleScope* handle_scope = tlsPtr_.top_handle_scope;
922     DCHECK(handle_scope != nullptr);
923     tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
924     return handle_scope;
925   }
926 
927   template<PointerSize pointer_size>
TopHandleScopeOffset()928   static constexpr ThreadOffset<pointer_size> TopHandleScopeOffset() {
929     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
930                                                                 top_handle_scope));
931   }
932 
GetTopReflectiveHandleScope()933   BaseReflectiveHandleScope* GetTopReflectiveHandleScope() {
934     return tlsPtr_.top_reflective_handle_scope;
935   }
936 
PushReflectiveHandleScope(BaseReflectiveHandleScope * scope)937   void PushReflectiveHandleScope(BaseReflectiveHandleScope* scope) {
938     DCHECK_EQ(scope->GetLink(), tlsPtr_.top_reflective_handle_scope);
939     DCHECK_EQ(scope->GetThread(), this);
940     tlsPtr_.top_reflective_handle_scope = scope;
941   }
942 
PopReflectiveHandleScope()943   BaseReflectiveHandleScope* PopReflectiveHandleScope() {
944     BaseReflectiveHandleScope* handle_scope = tlsPtr_.top_reflective_handle_scope;
945     DCHECK(handle_scope != nullptr);
946     tlsPtr_.top_reflective_handle_scope = tlsPtr_.top_reflective_handle_scope->GetLink();
947     return handle_scope;
948   }
949 
950   // Indicates whether this thread is ready to invoke a method for debugging. This
951   // is only true if the thread has been suspended by a debug event.
IsReadyForDebugInvoke()952   bool IsReadyForDebugInvoke() const {
953     return tls32_.ready_for_debug_invoke;
954   }
955 
SetReadyForDebugInvoke(bool ready)956   void SetReadyForDebugInvoke(bool ready) {
957     tls32_.ready_for_debug_invoke = ready;
958   }
959 
IsDebugMethodEntry()960   bool IsDebugMethodEntry() const {
961     return tls32_.debug_method_entry_;
962   }
963 
SetDebugMethodEntry()964   void SetDebugMethodEntry() {
965     tls32_.debug_method_entry_ = true;
966   }
967 
ClearDebugMethodEntry()968   void ClearDebugMethodEntry() {
969     tls32_.debug_method_entry_ = false;
970   }
971 
GetIsGcMarking()972   bool GetIsGcMarking() const {
973     CHECK(kUseReadBarrier);
974     return tls32_.is_gc_marking;
975   }
976 
977   void SetIsGcMarkingAndUpdateEntrypoints(bool is_marking);
978 
GetWeakRefAccessEnabled()979   bool GetWeakRefAccessEnabled() const {
980     CHECK(kUseReadBarrier);
981     return tls32_.weak_ref_access_enabled;
982   }
983 
SetWeakRefAccessEnabled(bool enabled)984   void SetWeakRefAccessEnabled(bool enabled) {
985     CHECK(kUseReadBarrier);
986     tls32_.weak_ref_access_enabled = enabled;
987   }
988 
GetDisableThreadFlipCount()989   uint32_t GetDisableThreadFlipCount() const {
990     CHECK(kUseReadBarrier);
991     return tls32_.disable_thread_flip_count;
992   }
993 
IncrementDisableThreadFlipCount()994   void IncrementDisableThreadFlipCount() {
995     CHECK(kUseReadBarrier);
996     ++tls32_.disable_thread_flip_count;
997   }
998 
DecrementDisableThreadFlipCount()999   void DecrementDisableThreadFlipCount() {
1000     CHECK(kUseReadBarrier);
1001     DCHECK_GT(tls32_.disable_thread_flip_count, 0U);
1002     --tls32_.disable_thread_flip_count;
1003   }
1004 
1005   // Returns true if the thread is a runtime thread (eg from a ThreadPool).
IsRuntimeThread()1006   bool IsRuntimeThread() const {
1007     return is_runtime_thread_;
1008   }
1009 
SetIsRuntimeThread(bool is_runtime_thread)1010   void SetIsRuntimeThread(bool is_runtime_thread) {
1011     is_runtime_thread_ = is_runtime_thread;
1012   }
1013 
CorePlatformApiCookie()1014   uint32_t CorePlatformApiCookie() {
1015     return core_platform_api_cookie_;
1016   }
1017 
SetCorePlatformApiCookie(uint32_t cookie)1018   void SetCorePlatformApiCookie(uint32_t cookie) {
1019     core_platform_api_cookie_ = cookie;
1020   }
1021 
1022   // Returns true if the thread is allowed to load java classes.
1023   bool CanLoadClasses() const;
1024 
1025   // Returns the fake exception used to activate deoptimization.
GetDeoptimizationException()1026   static mirror::Throwable* GetDeoptimizationException() {
1027     // Note that the mirror::Throwable must be aligned to kObjectAlignment or else it cannot be
1028     // represented by ObjPtr.
1029     return reinterpret_cast<mirror::Throwable*>(0x100);
1030   }
1031 
1032   // Currently deoptimization invokes verifier which can trigger class loading
1033   // and execute Java code, so there might be nested deoptimizations happening.
1034   // We need to save the ongoing deoptimization shadow frames and return
1035   // values on stacks.
1036   // 'from_code' denotes whether the deoptimization was explicitly made from
1037   // compiled code.
1038   // 'method_type' contains info on whether deoptimization should advance
1039   // dex_pc.
1040   void PushDeoptimizationContext(const JValue& return_value,
1041                                  bool is_reference,
1042                                  ObjPtr<mirror::Throwable> exception,
1043                                  bool from_code,
1044                                  DeoptimizationMethodType method_type)
1045       REQUIRES_SHARED(Locks::mutator_lock_);
1046   void PopDeoptimizationContext(JValue* result,
1047                                 ObjPtr<mirror::Throwable>* exception,
1048                                 bool* from_code,
1049                                 DeoptimizationMethodType* method_type)
1050       REQUIRES_SHARED(Locks::mutator_lock_);
1051   void AssertHasDeoptimizationContext()
1052       REQUIRES_SHARED(Locks::mutator_lock_);
1053   void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
1054   ShadowFrame* PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present = true);
1055 
1056   // For debugger, find the shadow frame that corresponds to a frame id.
1057   // Or return null if there is none.
1058   ShadowFrame* FindDebuggerShadowFrame(size_t frame_id)
1059       REQUIRES_SHARED(Locks::mutator_lock_);
1060   // For debugger, find the bool array that keeps track of the updated vreg set
1061   // for a frame id.
1062   bool* GetUpdatedVRegFlags(size_t frame_id) REQUIRES_SHARED(Locks::mutator_lock_);
1063   // For debugger, find the shadow frame that corresponds to a frame id. If
1064   // one doesn't exist yet, create one and track it in frame_id_to_shadow_frame.
1065   ShadowFrame* FindOrCreateDebuggerShadowFrame(size_t frame_id,
1066                                                uint32_t num_vregs,
1067                                                ArtMethod* method,
1068                                                uint32_t dex_pc)
1069       REQUIRES_SHARED(Locks::mutator_lock_);
1070 
1071   // Delete the entry that maps from frame_id to shadow_frame.
1072   void RemoveDebuggerShadowFrameMapping(size_t frame_id)
1073       REQUIRES_SHARED(Locks::mutator_lock_);
1074 
1075   // While getting this map requires shared the mutator lock, manipulating it
1076   // should actually follow these rules:
1077   // (1) The owner of this map (the thread) can change it with its mutator lock.
1078   // (2) Other threads can read this map when the owner is suspended and they
1079   //     hold the mutator lock.
1080   // (3) Other threads can change this map when owning the mutator lock exclusively.
1081   //
1082   // The reason why (3) needs the mutator lock exclusively (and not just having
1083   // the owner suspended) is that we don't want other threads to concurrently read the map.
1084   //
1085   // TODO: Add a class abstraction to express these rules.
GetInstrumentationStack()1086   std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* GetInstrumentationStack()
1087       REQUIRES_SHARED(Locks::mutator_lock_) {
1088     return tlsPtr_.instrumentation_stack;
1089   }
1090 
GetStackTraceSample()1091   std::vector<ArtMethod*>* GetStackTraceSample() const {
1092     DCHECK(!IsAotCompiler());
1093     return tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
1094   }
1095 
SetStackTraceSample(std::vector<ArtMethod * > * sample)1096   void SetStackTraceSample(std::vector<ArtMethod*>* sample) {
1097     DCHECK(!IsAotCompiler());
1098     tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample = sample;
1099   }
1100 
GetVerifierDeps()1101   verifier::VerifierDeps* GetVerifierDeps() const {
1102     DCHECK(IsAotCompiler());
1103     return tlsPtr_.deps_or_stack_trace_sample.verifier_deps;
1104   }
1105 
1106   // It is the responsability of the caller to make sure the verifier_deps
1107   // entry in the thread is cleared before destruction of the actual VerifierDeps
1108   // object, or the thread.
SetVerifierDeps(verifier::VerifierDeps * verifier_deps)1109   void SetVerifierDeps(verifier::VerifierDeps* verifier_deps) {
1110     DCHECK(IsAotCompiler());
1111     DCHECK(verifier_deps == nullptr || tlsPtr_.deps_or_stack_trace_sample.verifier_deps == nullptr);
1112     tlsPtr_.deps_or_stack_trace_sample.verifier_deps = verifier_deps;
1113   }
1114 
GetTraceClockBase()1115   uint64_t GetTraceClockBase() const {
1116     return tls64_.trace_clock_base;
1117   }
1118 
SetTraceClockBase(uint64_t clock_base)1119   void SetTraceClockBase(uint64_t clock_base) {
1120     tls64_.trace_clock_base = clock_base;
1121   }
1122 
GetHeldMutex(LockLevel level)1123   BaseMutex* GetHeldMutex(LockLevel level) const {
1124     return tlsPtr_.held_mutexes[level];
1125   }
1126 
SetHeldMutex(LockLevel level,BaseMutex * mutex)1127   void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
1128     tlsPtr_.held_mutexes[level] = mutex;
1129   }
1130 
1131   void ClearSuspendBarrier(AtomicInteger* target)
1132       REQUIRES(Locks::thread_suspend_count_lock_);
1133 
ReadFlag(ThreadFlag flag)1134   bool ReadFlag(ThreadFlag flag) const {
1135     return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
1136   }
1137 
TestAllFlags()1138   bool TestAllFlags() const {
1139     return (tls32_.state_and_flags.as_struct.flags != 0);
1140   }
1141 
AtomicSetFlag(ThreadFlag flag)1142   void AtomicSetFlag(ThreadFlag flag) {
1143     tls32_.state_and_flags.as_atomic_int.fetch_or(flag, std::memory_order_seq_cst);
1144   }
1145 
AtomicClearFlag(ThreadFlag flag)1146   void AtomicClearFlag(ThreadFlag flag) {
1147     tls32_.state_and_flags.as_atomic_int.fetch_and(-1 ^ flag, std::memory_order_seq_cst);
1148   }
1149 
UseMterp()1150   bool UseMterp() const {
1151     return tls32_.use_mterp.load();
1152   }
1153 
1154   void ResetQuickAllocEntryPointsForThread();
1155 
1156   // Returns the remaining space in the TLAB.
TlabSize()1157   size_t TlabSize() const {
1158     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_pos;
1159   }
1160 
1161   // Returns pos offset from start.
GetTlabPosOffset()1162   size_t GetTlabPosOffset() const {
1163     return tlsPtr_.thread_local_pos - tlsPtr_.thread_local_start;
1164   }
1165 
1166   // Returns the remaining space in the TLAB if we were to expand it to maximum capacity.
TlabRemainingCapacity()1167   size_t TlabRemainingCapacity() const {
1168     return tlsPtr_.thread_local_limit - tlsPtr_.thread_local_pos;
1169   }
1170 
1171   // Expand the TLAB by a fixed number of bytes. There must be enough capacity to do so.
ExpandTlab(size_t bytes)1172   void ExpandTlab(size_t bytes) {
1173     tlsPtr_.thread_local_end += bytes;
1174     DCHECK_LE(tlsPtr_.thread_local_end, tlsPtr_.thread_local_limit);
1175   }
1176 
1177   // Doesn't check that there is room.
1178   mirror::Object* AllocTlab(size_t bytes);
1179   void SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit);
1180   bool HasTlab() const;
1181   void ResetTlab();
GetTlabStart()1182   uint8_t* GetTlabStart() {
1183     return tlsPtr_.thread_local_start;
1184   }
GetTlabPos()1185   uint8_t* GetTlabPos() {
1186     return tlsPtr_.thread_local_pos;
1187   }
GetTlabEnd()1188   uint8_t* GetTlabEnd() {
1189     return tlsPtr_.thread_local_end;
1190   }
1191   // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
1192   // equal to a valid pointer.
1193   // TODO: does this need to atomic?  I don't think so.
RemoveSuspendTrigger()1194   void RemoveSuspendTrigger() {
1195     tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
1196   }
1197 
1198   // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
1199   // The next time a suspend check is done, it will load from the value at this address
1200   // and trigger a SIGSEGV.
1201   // Only needed if Runtime::implicit_suspend_checks_ is true and fully implemented.  It currently
1202   // is always false. Client code currently just looks at the thread flags directly to determine
1203   // whether we should suspend, so this call is currently unnecessary.
TriggerSuspend()1204   void TriggerSuspend() {
1205     tlsPtr_.suspend_trigger = nullptr;
1206   }
1207 
1208 
1209   // Push an object onto the allocation stack.
1210   bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
1211       REQUIRES_SHARED(Locks::mutator_lock_);
1212 
1213   // Set the thread local allocation pointers to the given pointers.
1214   void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
1215                                      StackReference<mirror::Object>* end);
1216 
1217   // Resets the thread local allocation pointers.
1218   void RevokeThreadLocalAllocationStack();
1219 
GetThreadLocalBytesAllocated()1220   size_t GetThreadLocalBytesAllocated() const {
1221     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
1222   }
1223 
GetThreadLocalObjectsAllocated()1224   size_t GetThreadLocalObjectsAllocated() const {
1225     return tlsPtr_.thread_local_objects;
1226   }
1227 
GetRosAllocRun(size_t index)1228   void* GetRosAllocRun(size_t index) const {
1229     return tlsPtr_.rosalloc_runs[index];
1230   }
1231 
SetRosAllocRun(size_t index,void * run)1232   void SetRosAllocRun(size_t index, void* run) {
1233     tlsPtr_.rosalloc_runs[index] = run;
1234   }
1235 
1236   bool ProtectStack(bool fatal_on_error = true);
1237   bool UnprotectStack();
1238 
SetMterpCurrentIBase(void * ibase)1239   void SetMterpCurrentIBase(void* ibase) {
1240     tlsPtr_.mterp_current_ibase = ibase;
1241   }
1242 
GetMterpCurrentIBase()1243   const void* GetMterpCurrentIBase() const {
1244     return tlsPtr_.mterp_current_ibase;
1245   }
1246 
HandlingSignal()1247   bool HandlingSignal() const {
1248     return tls32_.handling_signal_;
1249   }
1250 
SetHandlingSignal(bool handling_signal)1251   void SetHandlingSignal(bool handling_signal) {
1252     tls32_.handling_signal_ = handling_signal;
1253   }
1254 
IsTransitioningToRunnable()1255   bool IsTransitioningToRunnable() const {
1256     return tls32_.is_transitioning_to_runnable;
1257   }
1258 
SetIsTransitioningToRunnable(bool value)1259   void SetIsTransitioningToRunnable(bool value) {
1260     tls32_.is_transitioning_to_runnable = value;
1261   }
1262 
DecrementForceInterpreterCount()1263   uint32_t DecrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
1264     return --tls32_.force_interpreter_count;
1265   }
1266 
IncrementForceInterpreterCount()1267   uint32_t IncrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
1268     return ++tls32_.force_interpreter_count;
1269   }
1270 
SetForceInterpreterCount(uint32_t value)1271   void SetForceInterpreterCount(uint32_t value) REQUIRES(Locks::thread_list_lock_) {
1272     tls32_.force_interpreter_count = value;
1273   }
1274 
ForceInterpreterCount()1275   uint32_t ForceInterpreterCount() const {
1276     return tls32_.force_interpreter_count;
1277   }
1278 
IsForceInterpreter()1279   bool IsForceInterpreter() const {
1280     return tls32_.force_interpreter_count != 0;
1281   }
1282 
IncrementMakeVisiblyInitializedCounter()1283   bool IncrementMakeVisiblyInitializedCounter() {
1284     tls32_.make_visibly_initialized_counter += 1u;
1285     return tls32_.make_visibly_initialized_counter == kMakeVisiblyInitializedCounterTriggerCount;
1286   }
1287 
ClearMakeVisiblyInitializedCounter()1288   void ClearMakeVisiblyInitializedCounter() {
1289     tls32_.make_visibly_initialized_counter = 0u;
1290   }
1291 
1292   void PushVerifier(verifier::MethodVerifier* verifier);
1293   void PopVerifier(verifier::MethodVerifier* verifier);
1294 
1295   void InitStringEntryPoints();
1296 
ModifyDebugDisallowReadBarrier(int8_t delta)1297   void ModifyDebugDisallowReadBarrier(int8_t delta) {
1298     debug_disallow_read_barrier_ += delta;
1299   }
1300 
GetDebugDisallowReadBarrierCount()1301   uint8_t GetDebugDisallowReadBarrierCount() const {
1302     return debug_disallow_read_barrier_;
1303   }
1304 
1305   // Gets the current TLSData associated with the key or nullptr if there isn't any. Note that users
1306   // do not gain ownership of TLSData and must synchronize with SetCustomTls themselves to prevent
1307   // it from being deleted.
1308   TLSData* GetCustomTLS(const char* key) REQUIRES(!Locks::custom_tls_lock_);
1309 
1310   // Sets the tls entry at 'key' to data. The thread takes ownership of the TLSData. The destructor
1311   // will be run when the thread exits or when SetCustomTLS is called again with the same key.
1312   void SetCustomTLS(const char* key, TLSData* data) REQUIRES(!Locks::custom_tls_lock_);
1313 
1314   // Returns true if the current thread is the jit sensitive thread.
IsJitSensitiveThread()1315   bool IsJitSensitiveThread() const {
1316     return this == jit_sensitive_thread_;
1317   }
1318 
1319   bool IsSystemDaemon() const REQUIRES_SHARED(Locks::mutator_lock_);
1320 
1321   // Returns true if StrictMode events are traced for the current thread.
IsSensitiveThread()1322   static bool IsSensitiveThread() {
1323     if (is_sensitive_thread_hook_ != nullptr) {
1324       return (*is_sensitive_thread_hook_)();
1325     }
1326     return false;
1327   }
1328 
1329   // Set to the read barrier marking entrypoints to be non-null.
1330   void SetReadBarrierEntrypoints();
1331 
1332   static jobject CreateCompileTimePeer(JNIEnv* env,
1333                                        const char* name,
1334                                        bool as_daemon,
1335                                        jobject thread_group)
1336       REQUIRES_SHARED(Locks::mutator_lock_);
1337 
GetInterpreterCache()1338   ALWAYS_INLINE InterpreterCache* GetInterpreterCache() {
1339     return &interpreter_cache_;
1340   }
1341 
1342   // Clear all thread-local interpreter caches.
1343   //
1344   // Since the caches are keyed by memory pointer to dex instructions, this must be
1345   // called when any dex code is unloaded (before different code gets loaded at the
1346   // same memory location).
1347   //
1348   // If presence of cache entry implies some pre-conditions, this must also be
1349   // called if the pre-conditions might no longer hold true.
1350   static void ClearAllInterpreterCaches();
1351 
1352   template<PointerSize pointer_size>
InterpreterCacheOffset()1353   static constexpr ThreadOffset<pointer_size> InterpreterCacheOffset() {
1354     return ThreadOffset<pointer_size>(OFFSETOF_MEMBER(Thread, interpreter_cache_));
1355   }
1356 
InterpreterCacheSizeLog2()1357   static constexpr int InterpreterCacheSizeLog2() {
1358     return WhichPowerOf2(InterpreterCache::kSize);
1359   }
1360 
1361  private:
1362   explicit Thread(bool daemon);
1363   ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
1364   void Destroy();
1365 
1366   // Deletes and clears the tlsPtr_.jpeer field. Done in a way so that both it and opeer cannot be
1367   // observed to be set at the same time by instrumentation.
1368   void DeleteJPeer(JNIEnv* env);
1369 
1370   void NotifyInTheadList()
1371       REQUIRES_SHARED(Locks::thread_list_lock_);
1372 
1373   // Attaches the calling native thread to the runtime, returning the new native peer.
1374   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
1375   template <typename PeerAction>
1376   static Thread* Attach(const char* thread_name,
1377                         bool as_daemon,
1378                         PeerAction p);
1379 
1380   void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
1381 
1382   template<bool kTransactionActive>
1383   static void InitPeer(ScopedObjectAccessAlreadyRunnable& soa,
1384                        ObjPtr<mirror::Object> peer,
1385                        jboolean thread_is_daemon,
1386                        jobject thread_group,
1387                        jobject thread_name,
1388                        jint thread_priority)
1389       REQUIRES_SHARED(Locks::mutator_lock_);
1390 
1391   // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit and, ~Thread
SetStateUnsafe(ThreadState new_state)1392   ThreadState SetStateUnsafe(ThreadState new_state) {
1393     ThreadState old_state = GetState();
1394     if (old_state == kRunnable && new_state != kRunnable) {
1395       // Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in
1396       // case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA
1397       // attempts to TransitionFromSuspendedToRunnable, it results in a deadlock.
1398       TransitionToSuspendedAndRunCheckpoints(new_state);
1399       // Since we transitioned to a suspended state, check the pass barrier requests.
1400       PassActiveSuspendBarriers();
1401     } else {
1402       tls32_.state_and_flags.as_struct.state = new_state;
1403     }
1404     return old_state;
1405   }
1406 
1407   void VerifyStackImpl() REQUIRES_SHARED(Locks::mutator_lock_);
1408 
1409   void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
1410   void DumpStack(std::ostream& os,
1411                  bool dump_native_stack = true,
1412                  BacktraceMap* backtrace_map = nullptr,
1413                  bool force_dump_stack = false) const
1414       REQUIRES_SHARED(Locks::mutator_lock_);
1415 
1416   // Out-of-line conveniences for debugging in gdb.
1417   static Thread* CurrentFromGdb();  // Like Thread::Current.
1418   // Like Thread::Dump(std::cerr).
1419   void DumpFromGdb() const REQUIRES_SHARED(Locks::mutator_lock_);
1420 
1421   static void* CreateCallback(void* arg);
1422 
1423   void HandleUncaughtExceptions(ScopedObjectAccessAlreadyRunnable& soa)
1424       REQUIRES_SHARED(Locks::mutator_lock_);
1425   void RemoveFromThreadGroup(ScopedObjectAccessAlreadyRunnable& soa)
1426       REQUIRES_SHARED(Locks::mutator_lock_);
1427 
1428   // Initialize a thread.
1429   //
1430   // The third parameter is not mandatory. If given, the thread will use this JNIEnvExt. In case
1431   // Init succeeds, this means the thread takes ownership of it. If Init fails, it is the caller's
1432   // responsibility to destroy the given JNIEnvExt. If the parameter is null, Init will try to
1433   // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value
1434   // of false).
1435   bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr)
1436       REQUIRES(Locks::runtime_shutdown_lock_);
1437   void InitCardTable();
1438   void InitCpu();
1439   void CleanupCpu();
1440   void InitTlsEntryPoints();
1441   void InitTid();
1442   void InitPthreadKeySelf();
1443   bool InitStackHwm();
1444 
1445   void SetUpAlternateSignalStack();
1446   void TearDownAlternateSignalStack();
1447 
1448   ALWAYS_INLINE void TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)
1449       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1450 
1451   ALWAYS_INLINE void PassActiveSuspendBarriers()
1452       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1453 
1454   // Registers the current thread as the jit sensitive thread. Should be called just once.
SetJitSensitiveThread()1455   static void SetJitSensitiveThread() {
1456     if (jit_sensitive_thread_ == nullptr) {
1457       jit_sensitive_thread_ = Thread::Current();
1458     } else {
1459       LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:"
1460           << Thread::Current()->GetTid();
1461     }
1462   }
1463 
SetSensitiveThreadHook(bool (* is_sensitive_thread_hook)())1464   static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) {
1465     is_sensitive_thread_hook_ = is_sensitive_thread_hook;
1466   }
1467 
1468   bool ModifySuspendCountInternal(Thread* self,
1469                                   int delta,
1470                                   AtomicInteger* suspend_barrier,
1471                                   SuspendReason reason)
1472       WARN_UNUSED
1473       REQUIRES(Locks::thread_suspend_count_lock_);
1474 
1475   // Runs a single checkpoint function. If there are no more pending checkpoint functions it will
1476   // clear the kCheckpointRequest flag. The caller is responsible for calling this in a loop until
1477   // the kCheckpointRequest flag is cleared.
1478   void RunCheckpointFunction() REQUIRES(!Locks::thread_suspend_count_lock_);
1479   void RunEmptyCheckpoint();
1480 
1481   bool PassActiveSuspendBarriers(Thread* self)
1482       REQUIRES(!Locks::thread_suspend_count_lock_);
1483 
1484   // Install the protected region for implicit stack checks.
1485   void InstallImplicitProtection();
1486 
1487   template <bool kPrecise>
1488   void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
1489 
1490   void SweepInterpreterCache(IsMarkedVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
1491 
1492   static bool IsAotCompiler();
1493 
1494   void ReleaseLongJumpContextInternal();
1495 
1496   // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
1497   // change from being Suspended to Runnable without a suspend request occurring.
1498   union PACKED(4) StateAndFlags {
StateAndFlags()1499     StateAndFlags() {}
1500     struct PACKED(4) {
1501       // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
1502       // ThreadFlag for bit field meanings.
1503       volatile uint16_t flags;
1504       // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
1505       // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
1506       // operation. If a thread is suspended and a suspend_request is present, a thread may not
1507       // change to Runnable as a GC or other operation is in progress.
1508       volatile uint16_t state;
1509     } as_struct;
1510     AtomicInteger as_atomic_int;
1511     volatile int32_t as_int;
1512 
1513    private:
1514     // gcc does not handle struct with volatile member assignments correctly.
1515     // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
1516     DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
1517   };
1518   static_assert(sizeof(StateAndFlags) == sizeof(int32_t), "Weird state_and_flags size");
1519 
1520   static void ThreadExitCallback(void* arg);
1521 
1522   // Maximum number of suspend barriers.
1523   static constexpr uint32_t kMaxSuspendBarriers = 3;
1524 
1525   // Has Thread::Startup been called?
1526   static bool is_started_;
1527 
1528   // TLS key used to retrieve the Thread*.
1529   static pthread_key_t pthread_key_self_;
1530 
1531   // Used to notify threads that they should attempt to resume, they will suspend again if
1532   // their suspend count is > 0.
1533   static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1534 
1535   // Hook passed by framework which returns true
1536   // when StrictMode events are traced for the current thread.
1537   static bool (*is_sensitive_thread_hook_)();
1538   // Stores the jit sensitive thread (which for now is the UI thread).
1539   static Thread* jit_sensitive_thread_;
1540 
1541   static constexpr uint32_t kMakeVisiblyInitializedCounterTriggerCount = 128;
1542 
1543   /***********************************************************************************************/
1544   // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
1545   // pointer size differences. To encourage shorter encoding, more frequently used values appear
1546   // first if possible.
1547   /***********************************************************************************************/
1548 
1549   struct PACKED(4) tls_32bit_sized_values {
1550     // We have no control over the size of 'bool', but want our boolean fields
1551     // to be 4-byte quantities.
1552     typedef uint32_t bool32_t;
1553 
tls_32bit_sized_valuestls_32bit_sized_values1554     explicit tls_32bit_sized_values(bool is_daemon)
1555         : suspend_count(0),
1556           thin_lock_thread_id(0),
1557           tid(0),
1558           daemon(is_daemon),
1559           throwing_OutOfMemoryError(false),
1560           no_thread_suspension(0),
1561           thread_exit_check_count(0),
1562           handling_signal_(false),
1563           is_transitioning_to_runnable(false),
1564           ready_for_debug_invoke(false),
1565           debug_method_entry_(false),
1566           is_gc_marking(false),
1567           weak_ref_access_enabled(true),
1568           disable_thread_flip_count(0),
1569           user_code_suspend_count(0),
1570           force_interpreter_count(0),
1571           use_mterp(0),
1572           make_visibly_initialized_counter(0),
1573           define_class_counter(0) {}
1574 
1575     union StateAndFlags state_and_flags;
1576     static_assert(sizeof(union StateAndFlags) == sizeof(int32_t),
1577                   "Size of state_and_flags and int32 are different");
1578 
1579     // A non-zero value is used to tell the current thread to enter a safe point
1580     // at the next poll.
1581     int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1582 
1583     // Thin lock thread id. This is a small integer used by the thin lock implementation.
1584     // This is not to be confused with the native thread's tid, nor is it the value returned
1585     // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
1586     // important difference between this id and the ids visible to managed code is that these
1587     // ones get reused (to ensure that they fit in the number of bits available).
1588     uint32_t thin_lock_thread_id;
1589 
1590     // System thread id.
1591     uint32_t tid;
1592 
1593     // Is the thread a daemon?
1594     const bool32_t daemon;
1595 
1596     // A boolean telling us whether we're recursively throwing OOME.
1597     bool32_t throwing_OutOfMemoryError;
1598 
1599     // A positive value implies we're in a region where thread suspension isn't expected.
1600     uint32_t no_thread_suspension;
1601 
1602     // How many times has our pthread key's destructor been called?
1603     uint32_t thread_exit_check_count;
1604 
1605     // True if signal is being handled by this thread.
1606     bool32_t handling_signal_;
1607 
1608     // True if the thread is in TransitionFromSuspendedToRunnable(). This is used to distinguish the
1609     // non-runnable threads (eg. kNative, kWaiting) that are about to transition to runnable from
1610     // the rest of them.
1611     bool32_t is_transitioning_to_runnable;
1612 
1613     // True if the thread has been suspended by a debugger event. This is
1614     // used to invoke method from the debugger which is only allowed when
1615     // the thread is suspended by an event.
1616     bool32_t ready_for_debug_invoke;
1617 
1618     // True if the thread enters a method. This is used to detect method entry
1619     // event for the debugger.
1620     bool32_t debug_method_entry_;
1621 
1622     // True if the GC is in the marking phase. This is used for the CC collector only. This is
1623     // thread local so that we can simplify the logic to check for the fast path of read barriers of
1624     // GC roots.
1625     bool32_t is_gc_marking;
1626 
1627     // Thread "interrupted" status; stays raised until queried or thrown.
1628     Atomic<bool32_t> interrupted;
1629 
1630     AtomicInteger park_state_;
1631 
1632     // True if the thread is allowed to access a weak ref (Reference::GetReferent() and system
1633     // weaks) and to potentially mark an object alive/gray. This is used for concurrent reference
1634     // processing of the CC collector only. This is thread local so that we can enable/disable weak
1635     // ref access by using a checkpoint and avoid a race around the time weak ref access gets
1636     // disabled and concurrent reference processing begins (if weak ref access is disabled during a
1637     // pause, this is not an issue.) Other collectors use Runtime::DisallowNewSystemWeaks() and
1638     // ReferenceProcessor::EnableSlowPath().
1639     bool32_t weak_ref_access_enabled;
1640 
1641     // A thread local version of Heap::disable_thread_flip_count_. This keeps track of how many
1642     // levels of (nested) JNI critical sections the thread is in and is used to detect a nested JNI
1643     // critical section enter.
1644     uint32_t disable_thread_flip_count;
1645 
1646     // How much of 'suspend_count_' is by request of user code, used to distinguish threads
1647     // suspended by the runtime from those suspended by user code.
1648     // This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be
1649     // told that AssertHeld should be good enough.
1650     int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1651 
1652     // Count of how many times this thread has been forced to interpreter. If this is not 0 the
1653     // thread must remain in interpreted code as much as possible.
1654     uint32_t force_interpreter_count;
1655 
1656     // True if everything is in the ideal state for fast interpretation.
1657     // False if we need to switch to the C++ interpreter to handle special cases.
1658     std::atomic<bool32_t> use_mterp;
1659 
1660     // Counter for calls to initialize a class that's initialized but not visibly initialized.
1661     // When this reaches kMakeVisiblyInitializedCounterTriggerCount, we call the runtime to
1662     // make initialized classes visibly initialized. This is needed because we usually make
1663     // classes visibly initialized in batches but we do not want to be stuck with a class
1664     // initialized but not visibly initialized for a long time even if no more classes are
1665     // being initialized anymore.
1666     uint32_t make_visibly_initialized_counter;
1667 
1668     // Counter for how many nested define-classes are ongoing in this thread. Used to allow waiting
1669     // for threads to be done with class-definition work.
1670     uint32_t define_class_counter;
1671   } tls32_;
1672 
1673   struct PACKED(8) tls_64bit_sized_values {
tls_64bit_sized_valuestls_64bit_sized_values1674     tls_64bit_sized_values() : trace_clock_base(0) {
1675     }
1676 
1677     // The clock base used for tracing.
1678     uint64_t trace_clock_base;
1679 
1680     RuntimeStats stats;
1681   } tls64_;
1682 
PACKED(sizeof (void *))1683   struct PACKED(sizeof(void*)) tls_ptr_sized_values {
1684       tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
1685       managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), tmp_jni_env(nullptr),
1686       self(nullptr), opeer(nullptr), jpeer(nullptr), stack_begin(nullptr), stack_size(0),
1687       deps_or_stack_trace_sample(), wait_next(nullptr), monitor_enter_object(nullptr),
1688       top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
1689       instrumentation_stack(nullptr),
1690       stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr),
1691       frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0),
1692       last_no_thread_suspension_cause(nullptr), checkpoint_function(nullptr),
1693       thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr),
1694       thread_local_limit(nullptr),
1695       thread_local_objects(0), mterp_current_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
1696       thread_local_alloc_stack_end(nullptr),
1697       flip_function(nullptr), method_verifier(nullptr), thread_local_mark_stack(nullptr),
1698       async_exception(nullptr), top_reflective_handle_scope(nullptr) {
1699       std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
1700     }
1701 
1702     // The biased card table, see CardTable for details.
1703     uint8_t* card_table;
1704 
1705     // The pending exception or null.
1706     mirror::Throwable* exception;
1707 
1708     // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
1709     // We leave extra space so there's room for the code that throws StackOverflowError.
1710     uint8_t* stack_end;
1711 
1712     // The top of the managed stack often manipulated directly by compiler generated code.
1713     ManagedStack managed_stack;
1714 
1715     // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check.  It is
1716     // normally set to the address of itself.
1717     uintptr_t* suspend_trigger;
1718 
1719     // Every thread may have an associated JNI environment
1720     JNIEnvExt* jni_env;
1721 
1722     // Temporary storage to transfer a pre-allocated JNIEnvExt from the creating thread to the
1723     // created thread.
1724     JNIEnvExt* tmp_jni_env;
1725 
1726     // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
1727     // is easy but getting the address of Thread::Current is hard. This field can be read off of
1728     // Thread::Current to give the address.
1729     Thread* self;
1730 
1731     // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
1732     // start up, until the thread is registered and the local opeer_ is used.
1733     mirror::Object* opeer;
1734     jobject jpeer;
1735 
1736     // The "lowest addressable byte" of the stack.
1737     uint8_t* stack_begin;
1738 
1739     // Size of the stack.
1740     size_t stack_size;
1741 
1742     // Sampling profiler and AOT verification cannot happen on the same run, so we share
1743     // the same entry for the stack trace and the verifier deps.
1744     union DepsOrStackTraceSample {
1745       DepsOrStackTraceSample() {
1746         verifier_deps = nullptr;
1747         stack_trace_sample = nullptr;
1748       }
1749       // Pointer to previous stack trace captured by sampling profiler.
1750       std::vector<ArtMethod*>* stack_trace_sample;
1751       // When doing AOT verification, per-thread VerifierDeps.
1752       verifier::VerifierDeps* verifier_deps;
1753     } deps_or_stack_trace_sample;
1754 
1755     // The next thread in the wait set this thread is part of or null if not waiting.
1756     Thread* wait_next;
1757 
1758     // If we're blocked in MonitorEnter, this is the object we're trying to lock.
1759     mirror::Object* monitor_enter_object;
1760 
1761     // Top of linked list of handle scopes or null for none.
1762     BaseHandleScope* top_handle_scope;
1763 
1764     // Needed to get the right ClassLoader in JNI_OnLoad, but also
1765     // useful for testing.
1766     jobject class_loader_override;
1767 
1768     // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
1769     Context* long_jump_context;
1770 
1771     // Additional stack used by method instrumentation to store method and return pc values.
1772     // Stored as a pointer since std::map is not PACKED.
1773     // !DO NOT CHANGE! to std::unordered_map: the users of this map require an
1774     // ordered iteration on the keys (which are stack addresses).
1775     // Also see Thread::GetInstrumentationStack for the requirements on
1776     // manipulating and reading this map.
1777     std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* instrumentation_stack;
1778 
1779     // For gc purpose, a shadow frame record stack that keeps track of:
1780     // 1) shadow frames under construction.
1781     // 2) deoptimization shadow frames.
1782     StackedShadowFrameRecord* stacked_shadow_frame_record;
1783 
1784     // Deoptimization return value record stack.
1785     DeoptimizationContextRecord* deoptimization_context_stack;
1786 
1787     // For debugger, a linked list that keeps the mapping from frame_id to shadow frame.
1788     // Shadow frames may be created before deoptimization happens so that the debugger can
1789     // set local values there first.
1790     FrameIdToShadowFrame* frame_id_to_shadow_frame;
1791 
1792     // A cached copy of the java.lang.Thread's name.
1793     std::string* name;
1794 
1795     // A cached pthread_t for the pthread underlying this Thread*.
1796     pthread_t pthread_self;
1797 
1798     // If no_thread_suspension_ is > 0, what is causing that assertion.
1799     const char* last_no_thread_suspension_cause;
1800 
1801     // Pending checkpoint function or null if non-pending. If this checkpoint is set and someone\
1802     // requests another checkpoint, it goes to the checkpoint overflow list.
1803     Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_);
1804 
1805     // Pending barriers that require passing or NULL if non-pending. Installation guarding by
1806     // Locks::thread_suspend_count_lock_.
1807     // They work effectively as art::Barrier, but implemented directly using AtomicInteger and futex
1808     // to avoid additional cost of a mutex and a condition variable, as used in art::Barrier.
1809     AtomicInteger* active_suspend_barriers[kMaxSuspendBarriers];
1810 
1811     // Thread-local allocation pointer. Moved here to force alignment for thread_local_pos on ARM.
1812     uint8_t* thread_local_start;
1813 
1814     // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
1815     // potentially better performance.
1816     uint8_t* thread_local_pos;
1817     uint8_t* thread_local_end;
1818 
1819     // Thread local limit is how much we can expand the thread local buffer to, it is greater or
1820     // equal to thread_local_end.
1821     uint8_t* thread_local_limit;
1822 
1823     size_t thread_local_objects;
1824 
1825     // Entrypoint function pointers.
1826     // TODO: move this to more of a global offset table model to avoid per-thread duplication.
1827     JniEntryPoints jni_entrypoints;
1828     QuickEntryPoints quick_entrypoints;
1829 
1830     // Mterp jump table base.
1831     void* mterp_current_ibase;
1832 
1833     // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
1834     void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread];
1835 
1836     // Thread-local allocation stack data/routines.
1837     StackReference<mirror::Object>* thread_local_alloc_stack_top;
1838     StackReference<mirror::Object>* thread_local_alloc_stack_end;
1839 
1840     // Support for Mutex lock hierarchy bug detection.
1841     BaseMutex* held_mutexes[kLockLevelCount];
1842 
1843     // The function used for thread flip.
1844     Closure* flip_function;
1845 
1846     // Current method verifier, used for root marking.
1847     verifier::MethodVerifier* method_verifier;
1848 
1849     // Thread-local mark stack for the concurrent copying collector.
1850     gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack;
1851 
1852     // The pending async-exception or null.
1853     mirror::Throwable* async_exception;
1854 
1855     // Top of the linked-list for reflective-handle scopes or null if none.
1856     BaseReflectiveHandleScope* top_reflective_handle_scope;
1857   } tlsPtr_;
1858 
1859   // Small thread-local cache to be used from the interpreter.
1860   // It is keyed by dex instruction pointer.
1861   // The value is opcode-depended (e.g. field offset).
1862   InterpreterCache interpreter_cache_;
1863 
1864   // All fields below this line should not be accessed by native code. This means these fields can
1865   // be modified, rearranged, added or removed without having to modify asm_support.h
1866 
1867   // Guards the 'wait_monitor_' members.
1868   Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1869 
1870   // Condition variable waited upon during a wait.
1871   ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
1872   // Pointer to the monitor lock we're currently waiting on or null if not waiting.
1873   Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
1874 
1875   // Debug disable read barrier count, only is checked for debug builds and only in the runtime.
1876   uint8_t debug_disallow_read_barrier_ = 0;
1877 
1878   // Note that it is not in the packed struct, may not be accessed for cross compilation.
1879   uintptr_t poison_object_cookie_ = 0;
1880 
1881   // Pending extra checkpoints if checkpoint_function_ is already used.
1882   std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1883 
1884   // Custom TLS field that can be used by plugins or the runtime. Should not be accessed directly by
1885   // compiled code or entrypoints.
1886   SafeMap<std::string, std::unique_ptr<TLSData>, std::less<>> custom_tls_
1887       GUARDED_BY(Locks::custom_tls_lock_);
1888 
1889 #ifndef __BIONIC__
1890   __attribute__((tls_model("initial-exec")))
1891   static thread_local Thread* self_tls_;
1892 #endif
1893 
1894   // True if the thread is some form of runtime thread (ex, GC or JIT).
1895   bool is_runtime_thread_;
1896 
1897   // Set during execution of JNI methods that get field and method id's as part of determining if
1898   // the caller is allowed to access all fields and methods in the Core Platform API.
1899   uint32_t core_platform_api_cookie_ = 0;
1900 
1901   friend class gc::collector::SemiSpace;  // For getting stack traces.
1902   friend class Runtime;  // For CreatePeer.
1903   friend class QuickExceptionHandler;  // For dumping the stack.
1904   friend class ScopedThreadStateChange;
1905   friend class StubTest;  // For accessing entrypoints.
1906   friend class ThreadList;  // For ~Thread and Destroy.
1907 
1908   friend class EntrypointsOrderTest;  // To test the order of tls entries.
1909   friend class JniCompilerTest;  // For intercepting JNI entrypoint calls.
1910 
1911   DISALLOW_COPY_AND_ASSIGN(Thread);
1912 };
1913 
1914 class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension {
1915  public:
1916   ALWAYS_INLINE ScopedAssertNoThreadSuspension(const char* cause,
1917                                                bool enabled = true)
ACQUIRE(Roles::uninterruptible_)1918       ACQUIRE(Roles::uninterruptible_)
1919       : enabled_(enabled) {
1920     if (!enabled_) {
1921       return;
1922     }
1923     if (kIsDebugBuild) {
1924       self_ = Thread::Current();
1925       old_cause_ = self_->StartAssertNoThreadSuspension(cause);
1926     } else {
1927       Roles::uninterruptible_.Acquire();  // No-op.
1928     }
1929   }
~ScopedAssertNoThreadSuspension()1930   ALWAYS_INLINE ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) {
1931     if (!enabled_) {
1932       return;
1933     }
1934     if (kIsDebugBuild) {
1935       self_->EndAssertNoThreadSuspension(old_cause_);
1936     } else {
1937       Roles::uninterruptible_.Release();  // No-op.
1938     }
1939   }
1940 
1941  private:
1942   Thread* self_;
1943   const bool enabled_;
1944   const char* old_cause_;
1945 };
1946 
1947 class ScopedAllowThreadSuspension {
1948  public:
ScopedAllowThreadSuspension()1949   ALWAYS_INLINE ScopedAllowThreadSuspension() RELEASE(Roles::uninterruptible_) {
1950     if (kIsDebugBuild) {
1951       self_ = Thread::Current();
1952       old_cause_ = self_->EndAssertNoThreadSuspension();
1953     } else {
1954       Roles::uninterruptible_.Release();  // No-op.
1955     }
1956   }
~ScopedAllowThreadSuspension()1957   ALWAYS_INLINE ~ScopedAllowThreadSuspension() ACQUIRE(Roles::uninterruptible_) {
1958     if (kIsDebugBuild) {
1959       CHECK(self_->StartAssertNoThreadSuspension(old_cause_) == nullptr);
1960     } else {
1961       Roles::uninterruptible_.Acquire();  // No-op.
1962     }
1963   }
1964 
1965  private:
1966   Thread* self_;
1967   const char* old_cause_;
1968 };
1969 
1970 
1971 class ScopedStackedShadowFramePusher {
1972  public:
ScopedStackedShadowFramePusher(Thread * self,ShadowFrame * sf,StackedShadowFrameType type)1973   ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf, StackedShadowFrameType type)
1974     : self_(self), type_(type) {
1975     self_->PushStackedShadowFrame(sf, type);
1976   }
~ScopedStackedShadowFramePusher()1977   ~ScopedStackedShadowFramePusher() {
1978     self_->PopStackedShadowFrame(type_);
1979   }
1980 
1981  private:
1982   Thread* const self_;
1983   const StackedShadowFrameType type_;
1984 
1985   DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher);
1986 };
1987 
1988 // Only works for debug builds.
1989 class ScopedDebugDisallowReadBarriers {
1990  public:
ScopedDebugDisallowReadBarriers(Thread * self)1991   explicit ScopedDebugDisallowReadBarriers(Thread* self) : self_(self) {
1992     self_->ModifyDebugDisallowReadBarrier(1);
1993   }
~ScopedDebugDisallowReadBarriers()1994   ~ScopedDebugDisallowReadBarriers() {
1995     self_->ModifyDebugDisallowReadBarrier(-1);
1996   }
1997 
1998  private:
1999   Thread* const self_;
2000 };
2001 
2002 class ScopedTransitioningToRunnable : public ValueObject {
2003  public:
ScopedTransitioningToRunnable(Thread * self)2004   explicit ScopedTransitioningToRunnable(Thread* self)
2005       : self_(self) {
2006     DCHECK_EQ(self, Thread::Current());
2007     if (kUseReadBarrier) {
2008       self_->SetIsTransitioningToRunnable(true);
2009     }
2010   }
2011 
~ScopedTransitioningToRunnable()2012   ~ScopedTransitioningToRunnable() {
2013     if (kUseReadBarrier) {
2014       self_->SetIsTransitioningToRunnable(false);
2015     }
2016   }
2017 
2018  private:
2019   Thread* const self_;
2020 };
2021 
2022 class ThreadLifecycleCallback {
2023  public:
~ThreadLifecycleCallback()2024   virtual ~ThreadLifecycleCallback() {}
2025 
2026   virtual void ThreadStart(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
2027   virtual void ThreadDeath(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
2028 };
2029 
2030 // Store an exception from the thread and suppress it for the duration of this object.
2031 class ScopedExceptionStorage {
2032  public:
2033   explicit ScopedExceptionStorage(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
2034   void SuppressOldException(const char* message = "") REQUIRES_SHARED(Locks::mutator_lock_);
2035   ~ScopedExceptionStorage() REQUIRES_SHARED(Locks::mutator_lock_);
2036 
2037  private:
2038   Thread* self_;
2039   StackHandleScope<1> hs_;
2040   MutableHandle<mirror::Throwable> excp_;
2041 };
2042 
2043 std::ostream& operator<<(std::ostream& os, const Thread& thread);
2044 std::ostream& operator<<(std::ostream& os, StackedShadowFrameType thread);
2045 
2046 }  // namespace art
2047 
2048 #endif  // ART_RUNTIME_THREAD_H_
2049