• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_THREAD_H_
18 #define ART_RUNTIME_THREAD_H_
19 
20 #include <bitset>
21 #include <deque>
22 #include <iosfwd>
23 #include <list>
24 #include <memory>
25 #include <setjmp.h>
26 #include <string>
27 
28 #include "arch/context.h"
29 #include "arch/instruction_set.h"
30 #include "atomic.h"
31 #include "base/macros.h"
32 #include "base/mutex.h"
33 #include "entrypoints/jni/jni_entrypoints.h"
34 #include "entrypoints/quick/quick_entrypoints.h"
35 #include "globals.h"
36 #include "handle_scope.h"
37 #include "instrumentation.h"
38 #include "jvalue.h"
39 #include "object_callbacks.h"
40 #include "offsets.h"
41 #include "runtime_stats.h"
42 #include "stack.h"
43 #include "thread_state.h"
44 
45 class BacktraceMap;
46 
47 namespace art {
48 
49 namespace gc {
50 namespace accounting {
51   template<class T> class AtomicStack;
52 }  // namespace accounting
53 namespace collector {
54   class SemiSpace;
55 }  // namespace collector
56 }  // namespace gc
57 
58 namespace mirror {
59   class Array;
60   class Class;
61   class ClassLoader;
62   class Object;
63   template<class T> class ObjectArray;
64   template<class T> class PrimitiveArray;
65   typedef PrimitiveArray<int32_t> IntArray;
66   class StackTraceElement;
67   class String;
68   class Throwable;
69 }  // namespace mirror
70 
71 namespace verifier {
72 class MethodVerifier;
73 }  // namespace verifier
74 
75 class ArtMethod;
76 class BaseMutex;
77 class ClassLinker;
78 class Closure;
79 class Context;
80 struct DebugInvokeReq;
81 class DeoptimizationContextRecord;
82 class DexFile;
83 class FrameIdToShadowFrame;
84 class JavaVMExt;
85 struct JNIEnvExt;
86 class Monitor;
87 class Runtime;
88 class ScopedObjectAccessAlreadyRunnable;
89 class ShadowFrame;
90 class SingleStepControl;
91 class StackedShadowFrameRecord;
92 class Thread;
93 class ThreadList;
94 
95 // Thread priorities. These must match the Thread.MIN_PRIORITY,
96 // Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
97 enum ThreadPriority {
98   kMinThreadPriority = 1,
99   kNormThreadPriority = 5,
100   kMaxThreadPriority = 10,
101 };
102 
103 enum ThreadFlag {
104   kSuspendRequest   = 1,  // If set implies that suspend_count_ > 0 and the Thread should enter the
105                           // safepoint handler.
106   kCheckpointRequest = 2,  // Request that the thread do some checkpoint work and then continue.
107   kActiveSuspendBarrier = 4  // Register that at least 1 suspend barrier needs to be passed.
108 };
109 
110 enum class StackedShadowFrameType {
111   kShadowFrameUnderConstruction,
112   kDeoptimizationShadowFrame,
113   kSingleFrameDeoptimizationShadowFrame
114 };
115 
116 // This should match RosAlloc::kNumThreadLocalSizeBrackets.
117 static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
118 
119 // Thread's stack layout for implicit stack overflow checks:
120 //
121 //   +---------------------+  <- highest address of stack memory
122 //   |                     |
123 //   .                     .  <- SP
124 //   |                     |
125 //   |                     |
126 //   +---------------------+  <- stack_end
127 //   |                     |
128 //   |  Gap                |
129 //   |                     |
130 //   +---------------------+  <- stack_begin
131 //   |                     |
132 //   | Protected region    |
133 //   |                     |
134 //   +---------------------+  <- lowest address of stack memory
135 //
136 // The stack always grows down in memory.  At the lowest address is a region of memory
137 // that is set mprotect(PROT_NONE).  Any attempt to read/write to this region will
138 // result in a segmentation fault signal.  At any point, the thread's SP will be somewhere
139 // between the stack_end and the highest address in stack memory.  An implicit stack
140 // overflow check is a read of memory at a certain offset below the current SP (4K typically).
141 // If the thread's SP is below the stack_end address this will be a read into the protected
142 // region.  If the SP is above the stack_end address, the thread is guaranteed to have
143 // at least 4K of space.  Because stack overflow checks are only performed in generated code,
144 // if the thread makes a call out to a native function (through JNI), that native function
145 // might only have 4K of memory (if the SP is adjacent to stack_end).
146 
147 class Thread {
148  public:
149   static const size_t kStackOverflowImplicitCheckSize;
150 
151   // Creates a new native thread corresponding to the given managed peer.
152   // Used to implement Thread.start.
153   static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
154 
155   // Attaches the calling native thread to the runtime, returning the new native peer.
156   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
157   static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
158                         bool create_peer);
159 
160   // Reset internal state of child thread after fork.
161   void InitAfterFork();
162 
163   // Get the currently executing thread, frequently referred to as 'self'. This call has reasonably
164   // high cost and so we favor passing self around when possible.
165   // TODO: mark as PURE so the compiler may coalesce and remove?
166   static Thread* Current();
167 
168   // On a runnable thread, check for pending thread suspension request and handle if pending.
169   void AllowThreadSuspension() SHARED_REQUIRES(Locks::mutator_lock_);
170 
171   // Process pending thread suspension request and handle if pending.
172   void CheckSuspend() SHARED_REQUIRES(Locks::mutator_lock_);
173 
174   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
175                                    mirror::Object* thread_peer)
176       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
177       SHARED_REQUIRES(Locks::mutator_lock_);
178   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
179       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
180       SHARED_REQUIRES(Locks::mutator_lock_);
181 
182   // Translates 172 to pAllocArrayFromCode and so on.
183   template<size_t size_of_pointers>
184   static void DumpThreadOffset(std::ostream& os, uint32_t offset);
185 
186   // Dumps a one-line summary of thread state (used for operator<<).
187   void ShortDump(std::ostream& os) const;
188 
189   // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
190   void Dump(std::ostream& os,
191             bool dump_native_stack = true,
192             BacktraceMap* backtrace_map = nullptr) const
193       REQUIRES(!Locks::thread_suspend_count_lock_)
194       SHARED_REQUIRES(Locks::mutator_lock_);
195 
196   void DumpJavaStack(std::ostream& os) const
197       REQUIRES(!Locks::thread_suspend_count_lock_)
198       SHARED_REQUIRES(Locks::mutator_lock_);
199 
200   // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
201   // case we use 'tid' to identify the thread, and we'll include as much information as we can.
202   static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
203       REQUIRES(!Locks::thread_suspend_count_lock_)
204       SHARED_REQUIRES(Locks::mutator_lock_);
205 
GetState()206   ThreadState GetState() const {
207     DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
208     DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended);
209     return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state);
210   }
211 
212   ThreadState SetState(ThreadState new_state);
213 
GetSuspendCount()214   int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
215     return tls32_.suspend_count;
216   }
217 
GetDebugSuspendCount()218   int GetDebugSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
219     return tls32_.debug_suspend_count;
220   }
221 
IsSuspended()222   bool IsSuspended() const {
223     union StateAndFlags state_and_flags;
224     state_and_flags.as_int = tls32_.state_and_flags.as_int;
225     return state_and_flags.as_struct.state != kRunnable &&
226         (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
227   }
228 
229   bool ModifySuspendCount(Thread* self, int delta, AtomicInteger* suspend_barrier, bool for_debugger)
230       REQUIRES(Locks::thread_suspend_count_lock_);
231 
232   bool RequestCheckpoint(Closure* function)
233       REQUIRES(Locks::thread_suspend_count_lock_);
234 
235   void SetFlipFunction(Closure* function);
236   Closure* GetFlipFunction();
237 
GetThreadLocalMarkStack()238   gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() {
239     CHECK(kUseReadBarrier);
240     return tlsPtr_.thread_local_mark_stack;
241   }
SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object> * stack)242   void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) {
243     CHECK(kUseReadBarrier);
244     tlsPtr_.thread_local_mark_stack = stack;
245   }
246 
247   // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
248   // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
249   void FullSuspendCheck()
250       REQUIRES(!Locks::thread_suspend_count_lock_)
251       SHARED_REQUIRES(Locks::mutator_lock_);
252 
253   // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
254   ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable()
255       REQUIRES(!Locks::thread_suspend_count_lock_)
256       SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
257 
258   // Transition from runnable into a state where mutator privileges are denied. Releases share of
259   // mutator lock.
260   ALWAYS_INLINE void TransitionFromRunnableToSuspended(ThreadState new_state)
261       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
262       UNLOCK_FUNCTION(Locks::mutator_lock_);
263 
264   // Once called thread suspension will cause an assertion failure.
StartAssertNoThreadSuspension(const char * cause)265   const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
266     Roles::uninterruptible_.Acquire();  // No-op.
267     if (kIsDebugBuild) {
268       CHECK(cause != nullptr);
269       const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
270       tls32_.no_thread_suspension++;
271       tlsPtr_.last_no_thread_suspension_cause = cause;
272       return previous_cause;
273     } else {
274       return nullptr;
275     }
276   }
277 
278   // End region where no thread suspension is expected.
EndAssertNoThreadSuspension(const char * old_cause)279   void EndAssertNoThreadSuspension(const char* old_cause) RELEASE(Roles::uninterruptible_) {
280     if (kIsDebugBuild) {
281       CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
282       CHECK_GT(tls32_.no_thread_suspension, 0U);
283       tls32_.no_thread_suspension--;
284       tlsPtr_.last_no_thread_suspension_cause = old_cause;
285     }
286     Roles::uninterruptible_.Release();  // No-op.
287   }
288 
289   void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
290 
IsDaemon()291   bool IsDaemon() const {
292     return tls32_.daemon;
293   }
294 
295   size_t NumberOfHeldMutexes() const;
296 
297   bool HoldsLock(mirror::Object*) const SHARED_REQUIRES(Locks::mutator_lock_);
298 
299   /*
300    * Changes the priority of this thread to match that of the java.lang.Thread object.
301    *
302    * We map a priority value from 1-10 to Linux "nice" values, where lower
303    * numbers indicate higher priority.
304    */
305   void SetNativePriority(int newPriority);
306 
307   /*
308    * Returns the thread priority for the current thread by querying the system.
309    * This is useful when attaching a thread through JNI.
310    *
311    * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
312    */
313   static int GetNativePriority();
314 
315   // Guaranteed to be non-zero.
GetThreadId()316   uint32_t GetThreadId() const {
317     return tls32_.thin_lock_thread_id;
318   }
319 
GetTid()320   pid_t GetTid() const {
321     return tls32_.tid;
322   }
323 
324   // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
325   mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const
326       SHARED_REQUIRES(Locks::mutator_lock_);
327 
328   // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
329   // allocation, or locking.
330   void GetThreadName(std::string& name) const;
331 
332   // Sets the thread's name.
333   void SetThreadName(const char* name) SHARED_REQUIRES(Locks::mutator_lock_);
334 
335   // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
336   uint64_t GetCpuMicroTime() const;
337 
GetPeer()338   mirror::Object* GetPeer() const SHARED_REQUIRES(Locks::mutator_lock_) {
339     CHECK(tlsPtr_.jpeer == nullptr);
340     return tlsPtr_.opeer;
341   }
342 
HasPeer()343   bool HasPeer() const {
344     return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
345   }
346 
GetStats()347   RuntimeStats* GetStats() {
348     return &tls64_.stats;
349   }
350 
351   bool IsStillStarting() const;
352 
IsExceptionPending()353   bool IsExceptionPending() const {
354     return tlsPtr_.exception != nullptr;
355   }
356 
GetException()357   mirror::Throwable* GetException() const SHARED_REQUIRES(Locks::mutator_lock_) {
358     return tlsPtr_.exception;
359   }
360 
361   void AssertPendingException() const;
362   void AssertPendingOOMException() const SHARED_REQUIRES(Locks::mutator_lock_);
363   void AssertNoPendingException() const;
364   void AssertNoPendingExceptionForNewException(const char* msg) const;
365 
366   void SetException(mirror::Throwable* new_exception) SHARED_REQUIRES(Locks::mutator_lock_);
367 
ClearException()368   void ClearException() SHARED_REQUIRES(Locks::mutator_lock_) {
369     tlsPtr_.exception = nullptr;
370   }
371 
372   // Find catch block and perform long jump to appropriate exception handle
373   NO_RETURN void QuickDeliverException() SHARED_REQUIRES(Locks::mutator_lock_);
374 
375   Context* GetLongJumpContext();
ReleaseLongJumpContext(Context * context)376   void ReleaseLongJumpContext(Context* context) {
377     if (tlsPtr_.long_jump_context != nullptr) {
378       // Each QuickExceptionHandler gets a long jump context and uses
379       // it for doing the long jump, after finding catch blocks/doing deoptimization.
380       // Both finding catch blocks and deoptimization can trigger another
381       // exception such as a result of class loading. So there can be nested
382       // cases of exception handling and multiple contexts being used.
383       // ReleaseLongJumpContext tries to save the context in tlsPtr_.long_jump_context
384       // for reuse so there is no need to always allocate a new one each time when
385       // getting a context. Since we only keep one context for reuse, delete the
386       // existing one since the passed in context is yet to be used for longjump.
387       delete tlsPtr_.long_jump_context;
388     }
389     tlsPtr_.long_jump_context = context;
390   }
391 
392   // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
393   // abort the runtime iff abort_on_error is true.
394   ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const
395       SHARED_REQUIRES(Locks::mutator_lock_);
396 
397   // Returns whether the given exception was thrown by the current Java method being executed
398   // (Note that this includes native Java methods).
399   bool IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const
400       SHARED_REQUIRES(Locks::mutator_lock_);
401 
SetTopOfStack(ArtMethod ** top_method)402   void SetTopOfStack(ArtMethod** top_method) {
403     tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
404   }
405 
SetTopOfShadowStack(ShadowFrame * top)406   void SetTopOfShadowStack(ShadowFrame* top) {
407     tlsPtr_.managed_stack.SetTopShadowFrame(top);
408   }
409 
HasManagedStack()410   bool HasManagedStack() const {
411     return (tlsPtr_.managed_stack.GetTopQuickFrame() != nullptr) ||
412         (tlsPtr_.managed_stack.GetTopShadowFrame() != nullptr);
413   }
414 
415   // If 'msg' is null, no detail message is set.
416   void ThrowNewException(const char* exception_class_descriptor, const char* msg)
417       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
418 
419   // If 'msg' is null, no detail message is set. An exception must be pending, and will be
420   // used as the new exception's cause.
421   void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
422       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
423 
424   void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
425       __attribute__((format(printf, 3, 4)))
426       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
427 
428   void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
429       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
430 
431   // OutOfMemoryError is special, because we need to pre-allocate an instance.
432   // Only the GC should call this.
433   void ThrowOutOfMemoryError(const char* msg) SHARED_REQUIRES(Locks::mutator_lock_)
434       REQUIRES(!Roles::uninterruptible_);
435 
436   static void Startup();
437   static void FinishStartup();
438   static void Shutdown();
439 
440   // JNI methods
GetJniEnv()441   JNIEnvExt* GetJniEnv() const {
442     return tlsPtr_.jni_env;
443   }
444 
445   // Convert a jobject into a Object*
446   mirror::Object* DecodeJObject(jobject obj) const SHARED_REQUIRES(Locks::mutator_lock_);
447   // Checks if the weak global ref has been cleared by the GC without decoding it.
448   bool IsJWeakCleared(jweak obj) const SHARED_REQUIRES(Locks::mutator_lock_);
449 
GetMonitorEnterObject()450   mirror::Object* GetMonitorEnterObject() const SHARED_REQUIRES(Locks::mutator_lock_) {
451     return tlsPtr_.monitor_enter_object;
452   }
453 
SetMonitorEnterObject(mirror::Object * obj)454   void SetMonitorEnterObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
455     tlsPtr_.monitor_enter_object = obj;
456   }
457 
458   // Implements java.lang.Thread.interrupted.
459   bool Interrupted() REQUIRES(!*wait_mutex_);
460   // Implements java.lang.Thread.isInterrupted.
461   bool IsInterrupted() REQUIRES(!*wait_mutex_);
IsInterruptedLocked()462   bool IsInterruptedLocked() REQUIRES(wait_mutex_) {
463     return interrupted_;
464   }
465   void Interrupt(Thread* self) REQUIRES(!*wait_mutex_);
SetInterruptedLocked(bool i)466   void SetInterruptedLocked(bool i) REQUIRES(wait_mutex_) {
467     interrupted_ = i;
468   }
469   void Notify() REQUIRES(!*wait_mutex_);
470 
471  private:
472   void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
473 
474  public:
GetWaitMutex()475   Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
476     return wait_mutex_;
477   }
478 
GetWaitConditionVariable()479   ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) {
480     return wait_cond_;
481   }
482 
GetWaitMonitor()483   Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) {
484     return wait_monitor_;
485   }
486 
SetWaitMonitor(Monitor * mon)487   void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) {
488     wait_monitor_ = mon;
489   }
490 
491   // Waiter link-list support.
GetWaitNext()492   Thread* GetWaitNext() const {
493     return tlsPtr_.wait_next;
494   }
495 
SetWaitNext(Thread * next)496   void SetWaitNext(Thread* next) {
497     tlsPtr_.wait_next = next;
498   }
499 
GetClassLoaderOverride()500   jobject GetClassLoaderOverride() {
501     return tlsPtr_.class_loader_override;
502   }
503 
504   void SetClassLoaderOverride(jobject class_loader_override);
505 
506   // Create the internal representation of a stack trace, that is more time
507   // and space efficient to compute than the StackTraceElement[].
508   template<bool kTransactionActive>
509   jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
510       SHARED_REQUIRES(Locks::mutator_lock_);
511 
512   // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
513   // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
514   // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
515   // with the number of valid frames in the returned array.
516   static jobjectArray InternalStackTraceToStackTraceElementArray(
517       const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
518       jobjectArray output_array = nullptr, int* stack_depth = nullptr)
519       SHARED_REQUIRES(Locks::mutator_lock_);
520 
HasDebuggerShadowFrames()521   bool HasDebuggerShadowFrames() const {
522     return tlsPtr_.frame_id_to_shadow_frame != nullptr;
523   }
524 
525   void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
526 
527   ALWAYS_INLINE void VerifyStack() SHARED_REQUIRES(Locks::mutator_lock_);
528 
529   //
530   // Offsets of various members of native Thread class, used by compiled code.
531   //
532 
533   template<size_t pointer_size>
ThinLockIdOffset()534   static ThreadOffset<pointer_size> ThinLockIdOffset() {
535     return ThreadOffset<pointer_size>(
536         OFFSETOF_MEMBER(Thread, tls32_) +
537         OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
538   }
539 
540   template<size_t pointer_size>
ThreadFlagsOffset()541   static ThreadOffset<pointer_size> ThreadFlagsOffset() {
542     return ThreadOffset<pointer_size>(
543         OFFSETOF_MEMBER(Thread, tls32_) +
544         OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
545   }
546 
547   template<size_t pointer_size>
IsGcMarkingOffset()548   static ThreadOffset<pointer_size> IsGcMarkingOffset() {
549     return ThreadOffset<pointer_size>(
550         OFFSETOF_MEMBER(Thread, tls32_) +
551         OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
552   }
553 
554   // Deoptimize the Java stack.
555   void DeoptimizeWithDeoptimizationException(JValue* result) SHARED_REQUIRES(Locks::mutator_lock_);
556 
557  private:
558   template<size_t pointer_size>
ThreadOffsetFromTlsPtr(size_t tls_ptr_offset)559   static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
560     size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
561     size_t scale;
562     size_t shrink;
563     if (pointer_size == sizeof(void*)) {
564       scale = 1;
565       shrink = 1;
566     } else if (pointer_size > sizeof(void*)) {
567       scale = pointer_size / sizeof(void*);
568       shrink = 1;
569     } else {
570       DCHECK_GT(sizeof(void*), pointer_size);
571       scale = 1;
572       shrink = sizeof(void*) / pointer_size;
573     }
574     return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
575   }
576 
577  public:
QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,size_t pointer_size)578   static uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
579                                                 size_t pointer_size) {
580     DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
581     if (pointer_size == 4) {
582       return QuickEntryPointOffset<4>(quick_entrypoint_offset).Uint32Value();
583     } else {
584       return QuickEntryPointOffset<8>(quick_entrypoint_offset).Uint32Value();
585     }
586   }
587 
588   template<size_t pointer_size>
QuickEntryPointOffset(size_t quick_entrypoint_offset)589   static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) {
590     return ThreadOffsetFromTlsPtr<pointer_size>(
591         OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
592   }
593 
594   template<size_t pointer_size>
JniEntryPointOffset(size_t jni_entrypoint_offset)595   static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
596     return ThreadOffsetFromTlsPtr<pointer_size>(
597         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
598   }
599 
600   template<size_t pointer_size>
SelfOffset()601   static ThreadOffset<pointer_size> SelfOffset() {
602     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
603   }
604 
605   template<size_t pointer_size>
MterpCurrentIBaseOffset()606   static ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
607     return ThreadOffsetFromTlsPtr<pointer_size>(
608         OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_current_ibase));
609   }
610 
611   template<size_t pointer_size>
MterpDefaultIBaseOffset()612   static ThreadOffset<pointer_size> MterpDefaultIBaseOffset() {
613     return ThreadOffsetFromTlsPtr<pointer_size>(
614         OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_default_ibase));
615   }
616 
617   template<size_t pointer_size>
MterpAltIBaseOffset()618   static ThreadOffset<pointer_size> MterpAltIBaseOffset() {
619     return ThreadOffsetFromTlsPtr<pointer_size>(
620         OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_alt_ibase));
621   }
622 
623   template<size_t pointer_size>
ExceptionOffset()624   static ThreadOffset<pointer_size> ExceptionOffset() {
625     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
626   }
627 
628   template<size_t pointer_size>
PeerOffset()629   static ThreadOffset<pointer_size> PeerOffset() {
630     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
631   }
632 
633 
634   template<size_t pointer_size>
CardTableOffset()635   static ThreadOffset<pointer_size> CardTableOffset() {
636     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
637   }
638 
639   template<size_t pointer_size>
ThreadSuspendTriggerOffset()640   static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
641     return ThreadOffsetFromTlsPtr<pointer_size>(
642         OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
643   }
644 
645   template<size_t pointer_size>
ThreadLocalPosOffset()646   static ThreadOffset<pointer_size> ThreadLocalPosOffset() {
647     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_pos));
648   }
649 
650   template<size_t pointer_size>
ThreadLocalEndOffset()651   static ThreadOffset<pointer_size> ThreadLocalEndOffset() {
652     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_end));
653   }
654 
655   template<size_t pointer_size>
ThreadLocalObjectsOffset()656   static ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
657     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_objects));
658   }
659 
660   template<size_t pointer_size>
RosAllocRunsOffset()661   static ThreadOffset<pointer_size> RosAllocRunsOffset() {
662     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
663                                                                 rosalloc_runs));
664   }
665 
666   template<size_t pointer_size>
ThreadLocalAllocStackTopOffset()667   static ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
668     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
669                                                                 thread_local_alloc_stack_top));
670   }
671 
672   template<size_t pointer_size>
ThreadLocalAllocStackEndOffset()673   static ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
674     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
675                                                                 thread_local_alloc_stack_end));
676   }
677 
678   // Size of stack less any space reserved for stack overflow
GetStackSize()679   size_t GetStackSize() const {
680     return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
681   }
682 
GetStackEndForInterpreter(bool implicit_overflow_check)683   uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const {
684     if (implicit_overflow_check) {
685       // The interpreter needs the extra overflow bytes that stack_end does
686       // not include.
687       return tlsPtr_.stack_end + GetStackOverflowReservedBytes(kRuntimeISA);
688     } else {
689       return tlsPtr_.stack_end;
690     }
691   }
692 
GetStackEnd()693   uint8_t* GetStackEnd() const {
694     return tlsPtr_.stack_end;
695   }
696 
697   // Set the stack end to that to be used during a stack overflow
698   void SetStackEndForStackOverflow() SHARED_REQUIRES(Locks::mutator_lock_);
699 
700   // Set the stack end to that to be used during regular execution
ResetDefaultStackEnd()701   void ResetDefaultStackEnd() {
702     // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
703     // to throw a StackOverflowError.
704     tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
705   }
706 
707   // Install the protected region for implicit stack checks.
708   void InstallImplicitProtection();
709 
IsHandlingStackOverflow()710   bool IsHandlingStackOverflow() const {
711     return tlsPtr_.stack_end == tlsPtr_.stack_begin;
712   }
713 
714   template<size_t pointer_size>
StackEndOffset()715   static ThreadOffset<pointer_size> StackEndOffset() {
716     return ThreadOffsetFromTlsPtr<pointer_size>(
717         OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
718   }
719 
720   template<size_t pointer_size>
JniEnvOffset()721   static ThreadOffset<pointer_size> JniEnvOffset() {
722     return ThreadOffsetFromTlsPtr<pointer_size>(
723         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
724   }
725 
726   template<size_t pointer_size>
TopOfManagedStackOffset()727   static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
728     return ThreadOffsetFromTlsPtr<pointer_size>(
729         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
730         ManagedStack::TopQuickFrameOffset());
731   }
732 
GetManagedStack()733   const ManagedStack* GetManagedStack() const {
734     return &tlsPtr_.managed_stack;
735   }
736 
737   // Linked list recording fragments of managed stack.
PushManagedStackFragment(ManagedStack * fragment)738   void PushManagedStackFragment(ManagedStack* fragment) {
739     tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
740   }
PopManagedStackFragment(const ManagedStack & fragment)741   void PopManagedStackFragment(const ManagedStack& fragment) {
742     tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
743   }
744 
PushShadowFrame(ShadowFrame * new_top_frame)745   ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
746     return tlsPtr_.managed_stack.PushShadowFrame(new_top_frame);
747   }
748 
PopShadowFrame()749   ShadowFrame* PopShadowFrame() {
750     return tlsPtr_.managed_stack.PopShadowFrame();
751   }
752 
753   template<size_t pointer_size>
TopShadowFrameOffset()754   static ThreadOffset<pointer_size> TopShadowFrameOffset() {
755     return ThreadOffsetFromTlsPtr<pointer_size>(
756         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
757         ManagedStack::TopShadowFrameOffset());
758   }
759 
760   // Number of references allocated in JNI ShadowFrames on this thread.
NumJniShadowFrameReferences()761   size_t NumJniShadowFrameReferences() const SHARED_REQUIRES(Locks::mutator_lock_) {
762     return tlsPtr_.managed_stack.NumJniShadowFrameReferences();
763   }
764 
765   // Number of references in handle scope on this thread.
766   size_t NumHandleReferences();
767 
768   // Number of references allocated in handle scopes & JNI shadow frames on this thread.
NumStackReferences()769   size_t NumStackReferences() SHARED_REQUIRES(Locks::mutator_lock_) {
770     return NumHandleReferences() + NumJniShadowFrameReferences();
771   }
772 
773   // Is the given obj in this thread's stack indirect reference table?
774   bool HandleScopeContains(jobject obj) const;
775 
776   void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
777       SHARED_REQUIRES(Locks::mutator_lock_);
778 
GetTopHandleScope()779   HandleScope* GetTopHandleScope() {
780     return tlsPtr_.top_handle_scope;
781   }
782 
PushHandleScope(HandleScope * handle_scope)783   void PushHandleScope(HandleScope* handle_scope) {
784     DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope);
785     tlsPtr_.top_handle_scope = handle_scope;
786   }
787 
PopHandleScope()788   HandleScope* PopHandleScope() {
789     HandleScope* handle_scope = tlsPtr_.top_handle_scope;
790     DCHECK(handle_scope != nullptr);
791     tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
792     return handle_scope;
793   }
794 
795   template<size_t pointer_size>
TopHandleScopeOffset()796   static ThreadOffset<pointer_size> TopHandleScopeOffset() {
797     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
798                                                                 top_handle_scope));
799   }
800 
GetInvokeReq()801   DebugInvokeReq* GetInvokeReq() const {
802     return tlsPtr_.debug_invoke_req;
803   }
804 
GetSingleStepControl()805   SingleStepControl* GetSingleStepControl() const {
806     return tlsPtr_.single_step_control;
807   }
808 
809   // Indicates whether this thread is ready to invoke a method for debugging. This
810   // is only true if the thread has been suspended by a debug event.
IsReadyForDebugInvoke()811   bool IsReadyForDebugInvoke() const {
812     return tls32_.ready_for_debug_invoke;
813   }
814 
SetReadyForDebugInvoke(bool ready)815   void SetReadyForDebugInvoke(bool ready) {
816     tls32_.ready_for_debug_invoke = ready;
817   }
818 
IsDebugMethodEntry()819   bool IsDebugMethodEntry() const {
820     return tls32_.debug_method_entry_;
821   }
822 
SetDebugMethodEntry()823   void SetDebugMethodEntry() {
824     tls32_.debug_method_entry_ = true;
825   }
826 
ClearDebugMethodEntry()827   void ClearDebugMethodEntry() {
828     tls32_.debug_method_entry_ = false;
829   }
830 
GetIsGcMarking()831   bool GetIsGcMarking() const {
832     CHECK(kUseReadBarrier);
833     return tls32_.is_gc_marking;
834   }
835 
SetIsGcMarking(bool is_marking)836   void SetIsGcMarking(bool is_marking) {
837     CHECK(kUseReadBarrier);
838     tls32_.is_gc_marking = is_marking;
839   }
840 
GetWeakRefAccessEnabled()841   bool GetWeakRefAccessEnabled() const {
842     CHECK(kUseReadBarrier);
843     return tls32_.weak_ref_access_enabled;
844   }
845 
SetWeakRefAccessEnabled(bool enabled)846   void SetWeakRefAccessEnabled(bool enabled) {
847     CHECK(kUseReadBarrier);
848     tls32_.weak_ref_access_enabled = enabled;
849   }
850 
GetDisableThreadFlipCount()851   uint32_t GetDisableThreadFlipCount() const {
852     CHECK(kUseReadBarrier);
853     return tls32_.disable_thread_flip_count;
854   }
855 
IncrementDisableThreadFlipCount()856   void IncrementDisableThreadFlipCount() {
857     CHECK(kUseReadBarrier);
858     ++tls32_.disable_thread_flip_count;
859   }
860 
DecrementDisableThreadFlipCount()861   void DecrementDisableThreadFlipCount() {
862     CHECK(kUseReadBarrier);
863     DCHECK_GT(tls32_.disable_thread_flip_count, 0U);
864     --tls32_.disable_thread_flip_count;
865   }
866 
867   // Returns true if the thread is allowed to call into java.
CanCallIntoJava()868   bool CanCallIntoJava() const {
869     return can_call_into_java_;
870   }
871 
SetCanCallIntoJava(bool can_call_into_java)872   void SetCanCallIntoJava(bool can_call_into_java) {
873     can_call_into_java_ = can_call_into_java;
874   }
875 
876   // Activates single step control for debugging. The thread takes the
877   // ownership of the given SingleStepControl*. It is deleted by a call
878   // to DeactivateSingleStepControl or upon thread destruction.
879   void ActivateSingleStepControl(SingleStepControl* ssc);
880 
881   // Deactivates single step control for debugging.
882   void DeactivateSingleStepControl();
883 
884   // Sets debug invoke request for debugging. When the thread is resumed,
885   // it executes the method described by this request then sends the reply
886   // before suspending itself. The thread takes the ownership of the given
887   // DebugInvokeReq*. It is deleted by a call to ClearDebugInvokeReq.
888   void SetDebugInvokeReq(DebugInvokeReq* req);
889 
890   // Clears debug invoke request for debugging. When the thread completes
891   // method invocation, it deletes its debug invoke request and suspends
892   // itself.
893   void ClearDebugInvokeReq();
894 
895   // Returns the fake exception used to activate deoptimization.
GetDeoptimizationException()896   static mirror::Throwable* GetDeoptimizationException() {
897     return reinterpret_cast<mirror::Throwable*>(-1);
898   }
899 
900   // Currently deoptimization invokes verifier which can trigger class loading
901   // and execute Java code, so there might be nested deoptimizations happening.
902   // We need to save the ongoing deoptimization shadow frames and return
903   // values on stacks.
904   // 'from_code' denotes whether the deoptimization was explicitly made from
905   // compiled code.
906   void PushDeoptimizationContext(const JValue& return_value,
907                                  bool is_reference,
908                                  bool from_code,
909                                  mirror::Throwable* exception)
910       SHARED_REQUIRES(Locks::mutator_lock_);
911   void PopDeoptimizationContext(JValue* result, mirror::Throwable** exception, bool* from_code)
912       SHARED_REQUIRES(Locks::mutator_lock_);
913   void AssertHasDeoptimizationContext()
914       SHARED_REQUIRES(Locks::mutator_lock_);
915   void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
916   ShadowFrame* PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present = true);
917 
918   // For debugger, find the shadow frame that corresponds to a frame id.
919   // Or return null if there is none.
920   ShadowFrame* FindDebuggerShadowFrame(size_t frame_id)
921       SHARED_REQUIRES(Locks::mutator_lock_);
922   // For debugger, find the bool array that keeps track of the updated vreg set
923   // for a frame id.
924   bool* GetUpdatedVRegFlags(size_t frame_id) SHARED_REQUIRES(Locks::mutator_lock_);
925   // For debugger, find the shadow frame that corresponds to a frame id. If
926   // one doesn't exist yet, create one and track it in frame_id_to_shadow_frame.
927   ShadowFrame* FindOrCreateDebuggerShadowFrame(size_t frame_id,
928                                                uint32_t num_vregs,
929                                                ArtMethod* method,
930                                                uint32_t dex_pc)
931       SHARED_REQUIRES(Locks::mutator_lock_);
932 
933   // Delete the entry that maps from frame_id to shadow_frame.
934   void RemoveDebuggerShadowFrameMapping(size_t frame_id)
935       SHARED_REQUIRES(Locks::mutator_lock_);
936 
GetInstrumentationStack()937   std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
938     return tlsPtr_.instrumentation_stack;
939   }
940 
GetStackTraceSample()941   std::vector<ArtMethod*>* GetStackTraceSample() const {
942     return tlsPtr_.stack_trace_sample;
943   }
944 
SetStackTraceSample(std::vector<ArtMethod * > * sample)945   void SetStackTraceSample(std::vector<ArtMethod*>* sample) {
946     tlsPtr_.stack_trace_sample = sample;
947   }
948 
GetTraceClockBase()949   uint64_t GetTraceClockBase() const {
950     return tls64_.trace_clock_base;
951   }
952 
SetTraceClockBase(uint64_t clock_base)953   void SetTraceClockBase(uint64_t clock_base) {
954     tls64_.trace_clock_base = clock_base;
955   }
956 
GetHeldMutex(LockLevel level)957   BaseMutex* GetHeldMutex(LockLevel level) const {
958     return tlsPtr_.held_mutexes[level];
959   }
960 
SetHeldMutex(LockLevel level,BaseMutex * mutex)961   void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
962     tlsPtr_.held_mutexes[level] = mutex;
963   }
964 
965   void RunCheckpointFunction();
966 
967   bool PassActiveSuspendBarriers(Thread* self)
968       REQUIRES(!Locks::thread_suspend_count_lock_);
969 
970   void ClearSuspendBarrier(AtomicInteger* target)
971       REQUIRES(Locks::thread_suspend_count_lock_);
972 
ReadFlag(ThreadFlag flag)973   bool ReadFlag(ThreadFlag flag) const {
974     return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
975   }
976 
TestAllFlags()977   bool TestAllFlags() const {
978     return (tls32_.state_and_flags.as_struct.flags != 0);
979   }
980 
AtomicSetFlag(ThreadFlag flag)981   void AtomicSetFlag(ThreadFlag flag) {
982     tls32_.state_and_flags.as_atomic_int.FetchAndOrSequentiallyConsistent(flag);
983   }
984 
AtomicClearFlag(ThreadFlag flag)985   void AtomicClearFlag(ThreadFlag flag) {
986     tls32_.state_and_flags.as_atomic_int.FetchAndAndSequentiallyConsistent(-1 ^ flag);
987   }
988 
989   void ResetQuickAllocEntryPointsForThread();
990 
991   // Returns the remaining space in the TLAB.
992   size_t TlabSize() const;
993   // Doesn't check that there is room.
994   mirror::Object* AllocTlab(size_t bytes);
995   void SetTlab(uint8_t* start, uint8_t* end);
996   bool HasTlab() const;
GetTlabStart()997   uint8_t* GetTlabStart() {
998     return tlsPtr_.thread_local_start;
999   }
GetTlabPos()1000   uint8_t* GetTlabPos() {
1001     return tlsPtr_.thread_local_pos;
1002   }
1003 
1004   // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
1005   // equal to a valid pointer.
1006   // TODO: does this need to atomic?  I don't think so.
RemoveSuspendTrigger()1007   void RemoveSuspendTrigger() {
1008     tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
1009   }
1010 
1011   // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
1012   // The next time a suspend check is done, it will load from the value at this address
1013   // and trigger a SIGSEGV.
TriggerSuspend()1014   void TriggerSuspend() {
1015     tlsPtr_.suspend_trigger = nullptr;
1016   }
1017 
1018 
1019   // Push an object onto the allocation stack.
1020   bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
1021       SHARED_REQUIRES(Locks::mutator_lock_);
1022 
1023   // Set the thread local allocation pointers to the given pointers.
1024   void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
1025                                      StackReference<mirror::Object>* end);
1026 
1027   // Resets the thread local allocation pointers.
1028   void RevokeThreadLocalAllocationStack();
1029 
GetThreadLocalBytesAllocated()1030   size_t GetThreadLocalBytesAllocated() const {
1031     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
1032   }
1033 
GetThreadLocalObjectsAllocated()1034   size_t GetThreadLocalObjectsAllocated() const {
1035     return tlsPtr_.thread_local_objects;
1036   }
1037 
GetRosAllocRun(size_t index)1038   void* GetRosAllocRun(size_t index) const {
1039     return tlsPtr_.rosalloc_runs[index];
1040   }
1041 
SetRosAllocRun(size_t index,void * run)1042   void SetRosAllocRun(size_t index, void* run) {
1043     tlsPtr_.rosalloc_runs[index] = run;
1044   }
1045 
1046   bool ProtectStack(bool fatal_on_error = true);
1047   bool UnprotectStack();
1048 
SetMterpDefaultIBase(void * ibase)1049   void SetMterpDefaultIBase(void* ibase) {
1050     tlsPtr_.mterp_default_ibase = ibase;
1051   }
1052 
SetMterpCurrentIBase(void * ibase)1053   void SetMterpCurrentIBase(void* ibase) {
1054     tlsPtr_.mterp_current_ibase = ibase;
1055   }
1056 
SetMterpAltIBase(void * ibase)1057   void SetMterpAltIBase(void* ibase) {
1058     tlsPtr_.mterp_alt_ibase = ibase;
1059   }
1060 
GetMterpDefaultIBase()1061   const void* GetMterpDefaultIBase() const {
1062     return tlsPtr_.mterp_default_ibase;
1063   }
1064 
GetMterpCurrentIBase()1065   const void* GetMterpCurrentIBase() const {
1066     return tlsPtr_.mterp_current_ibase;
1067   }
1068 
GetMterpAltIBase()1069   const void* GetMterpAltIBase() const {
1070     return tlsPtr_.mterp_alt_ibase;
1071   }
1072 
NoteSignalBeingHandled()1073   void NoteSignalBeingHandled() {
1074     if (tls32_.handling_signal_) {
1075       LOG(FATAL) << "Detected signal while processing a signal";
1076     }
1077     tls32_.handling_signal_ = true;
1078   }
1079 
NoteSignalHandlerDone()1080   void NoteSignalHandlerDone() {
1081     tls32_.handling_signal_ = false;
1082   }
1083 
GetNestedSignalState()1084   jmp_buf* GetNestedSignalState() {
1085     return tlsPtr_.nested_signal_state;
1086   }
1087 
IsSuspendedAtSuspendCheck()1088   bool IsSuspendedAtSuspendCheck() const {
1089     return tls32_.suspended_at_suspend_check;
1090   }
1091 
1092   void PushVerifier(verifier::MethodVerifier* verifier);
1093   void PopVerifier(verifier::MethodVerifier* verifier);
1094 
1095   void InitStringEntryPoints();
1096 
ModifyDebugDisallowReadBarrier(int8_t delta)1097   void ModifyDebugDisallowReadBarrier(int8_t delta) {
1098     debug_disallow_read_barrier_ += delta;
1099   }
1100 
GetDebugDisallowReadBarrierCount()1101   uint8_t GetDebugDisallowReadBarrierCount() const {
1102     return debug_disallow_read_barrier_;
1103   }
1104 
1105   // Returns true if the current thread is the jit sensitive thread.
IsJitSensitiveThread()1106   bool IsJitSensitiveThread() const {
1107     return this == jit_sensitive_thread_;
1108   }
1109 
1110   // Returns true if StrictMode events are traced for the current thread.
IsSensitiveThread()1111   static bool IsSensitiveThread() {
1112     if (is_sensitive_thread_hook_ != nullptr) {
1113       return (*is_sensitive_thread_hook_)();
1114     }
1115     return false;
1116   }
1117 
1118  private:
1119   explicit Thread(bool daemon);
1120   ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
1121   void Destroy();
1122 
1123   void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
1124 
1125   template<bool kTransactionActive>
1126   void InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
1127                 jobject thread_name, jint thread_priority)
1128       SHARED_REQUIRES(Locks::mutator_lock_);
1129 
1130   // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
1131   // Dbg::Disconnected.
SetStateUnsafe(ThreadState new_state)1132   ThreadState SetStateUnsafe(ThreadState new_state) {
1133     ThreadState old_state = GetState();
1134     if (old_state == kRunnable && new_state != kRunnable) {
1135       // Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in
1136       // case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA
1137       // attempts to TransitionFromSuspendedToRunnable, it results in a deadlock.
1138       TransitionToSuspendedAndRunCheckpoints(new_state);
1139       // Since we transitioned to a suspended state, check the pass barrier requests.
1140       PassActiveSuspendBarriers();
1141     } else {
1142       tls32_.state_and_flags.as_struct.state = new_state;
1143     }
1144     return old_state;
1145   }
1146 
1147   void VerifyStackImpl() SHARED_REQUIRES(Locks::mutator_lock_);
1148 
1149   void DumpState(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_);
1150   void DumpStack(std::ostream& os,
1151                  bool dump_native_stack = true,
1152                  BacktraceMap* backtrace_map = nullptr) const
1153       REQUIRES(!Locks::thread_suspend_count_lock_)
1154       SHARED_REQUIRES(Locks::mutator_lock_);
1155 
1156   // Out-of-line conveniences for debugging in gdb.
1157   static Thread* CurrentFromGdb();  // Like Thread::Current.
1158   // Like Thread::Dump(std::cerr).
1159   void DumpFromGdb() const SHARED_REQUIRES(Locks::mutator_lock_);
1160 
1161   static void* CreateCallback(void* arg);
1162 
1163   void HandleUncaughtExceptions(ScopedObjectAccess& soa)
1164       SHARED_REQUIRES(Locks::mutator_lock_);
1165   void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_REQUIRES(Locks::mutator_lock_);
1166 
1167   // Initialize a thread.
1168   //
1169   // The third parameter is not mandatory. If given, the thread will use this JNIEnvExt. In case
1170   // Init succeeds, this means the thread takes ownership of it. If Init fails, it is the caller's
1171   // responsibility to destroy the given JNIEnvExt. If the parameter is null, Init will try to
1172   // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value
1173   // of false).
1174   bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr)
1175       REQUIRES(Locks::runtime_shutdown_lock_);
1176   void InitCardTable();
1177   void InitCpu();
1178   void CleanupCpu();
1179   void InitTlsEntryPoints();
1180   void InitTid();
1181   void InitPthreadKeySelf();
1182   bool InitStackHwm();
1183 
1184   void SetUpAlternateSignalStack();
1185   void TearDownAlternateSignalStack();
1186 
1187   ALWAYS_INLINE void TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)
1188       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1189 
1190   ALWAYS_INLINE void PassActiveSuspendBarriers()
1191       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1192 
1193   // Registers the current thread as the jit sensitive thread. Should be called just once.
SetJitSensitiveThread()1194   static void SetJitSensitiveThread() {
1195     if (jit_sensitive_thread_ == nullptr) {
1196       jit_sensitive_thread_ = Thread::Current();
1197     } else {
1198       LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:"
1199           << Thread::Current()->GetTid();
1200     }
1201   }
1202 
SetSensitiveThreadHook(bool (* is_sensitive_thread_hook)())1203   static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) {
1204     is_sensitive_thread_hook_ = is_sensitive_thread_hook;
1205   }
1206 
1207   // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
1208   // change from being Suspended to Runnable without a suspend request occurring.
1209   union PACKED(4) StateAndFlags {
StateAndFlags()1210     StateAndFlags() {}
1211     struct PACKED(4) {
1212       // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
1213       // ThreadFlags for bit field meanings.
1214       volatile uint16_t flags;
1215       // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
1216       // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
1217       // operation. If a thread is suspended and a suspend_request is present, a thread may not
1218       // change to Runnable as a GC or other operation is in progress.
1219       volatile uint16_t state;
1220     } as_struct;
1221     AtomicInteger as_atomic_int;
1222     volatile int32_t as_int;
1223 
1224    private:
1225     // gcc does not handle struct with volatile member assignments correctly.
1226     // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
1227     DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
1228   };
1229   static_assert(sizeof(StateAndFlags) == sizeof(int32_t), "Weird state_and_flags size");
1230 
1231   static void ThreadExitCallback(void* arg);
1232 
1233   // Maximum number of checkpoint functions.
1234   static constexpr uint32_t kMaxCheckpoints = 3;
1235 
1236   // Maximum number of suspend barriers.
1237   static constexpr uint32_t kMaxSuspendBarriers = 3;
1238 
1239   // Has Thread::Startup been called?
1240   static bool is_started_;
1241 
1242   // TLS key used to retrieve the Thread*.
1243   static pthread_key_t pthread_key_self_;
1244 
1245   // Used to notify threads that they should attempt to resume, they will suspend again if
1246   // their suspend count is > 0.
1247   static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1248 
1249   // Hook passed by framework which returns true
1250   // when StrictMode events are traced for the current thread.
1251   static bool (*is_sensitive_thread_hook_)();
1252   // Stores the jit sensitive thread (which for now is the UI thread).
1253   static Thread* jit_sensitive_thread_;
1254 
1255   /***********************************************************************************************/
1256   // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
1257   // pointer size differences. To encourage shorter encoding, more frequently used values appear
1258   // first if possible.
1259   /***********************************************************************************************/
1260 
1261   struct PACKED(4) tls_32bit_sized_values {
1262     // We have no control over the size of 'bool', but want our boolean fields
1263     // to be 4-byte quantities.
1264     typedef uint32_t bool32_t;
1265 
tls_32bit_sized_valuestls_32bit_sized_values1266     explicit tls_32bit_sized_values(bool is_daemon) :
1267       suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
1268       daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
1269       thread_exit_check_count(0), handling_signal_(false),
1270       suspended_at_suspend_check(false), ready_for_debug_invoke(false),
1271       debug_method_entry_(false), is_gc_marking(false), weak_ref_access_enabled(true),
1272       disable_thread_flip_count(0) {
1273     }
1274 
1275     union StateAndFlags state_and_flags;
1276     static_assert(sizeof(union StateAndFlags) == sizeof(int32_t),
1277                   "Size of state_and_flags and int32 are different");
1278 
1279     // A non-zero value is used to tell the current thread to enter a safe point
1280     // at the next poll.
1281     int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1282 
1283     // How much of 'suspend_count_' is by request of the debugger, used to set things right
1284     // when the debugger detaches. Must be <= suspend_count_.
1285     int debug_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1286 
1287     // Thin lock thread id. This is a small integer used by the thin lock implementation.
1288     // This is not to be confused with the native thread's tid, nor is it the value returned
1289     // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
1290     // important difference between this id and the ids visible to managed code is that these
1291     // ones get reused (to ensure that they fit in the number of bits available).
1292     uint32_t thin_lock_thread_id;
1293 
1294     // System thread id.
1295     uint32_t tid;
1296 
1297     // Is the thread a daemon?
1298     const bool32_t daemon;
1299 
1300     // A boolean telling us whether we're recursively throwing OOME.
1301     bool32_t throwing_OutOfMemoryError;
1302 
1303     // A positive value implies we're in a region where thread suspension isn't expected.
1304     uint32_t no_thread_suspension;
1305 
1306     // How many times has our pthread key's destructor been called?
1307     uint32_t thread_exit_check_count;
1308 
1309     // True if signal is being handled by this thread.
1310     bool32_t handling_signal_;
1311 
1312     // True if the thread is suspended in FullSuspendCheck(). This is
1313     // used to distinguish runnable threads that are suspended due to
1314     // a normal suspend check from other threads.
1315     bool32_t suspended_at_suspend_check;
1316 
1317     // True if the thread has been suspended by a debugger event. This is
1318     // used to invoke method from the debugger which is only allowed when
1319     // the thread is suspended by an event.
1320     bool32_t ready_for_debug_invoke;
1321 
1322     // True if the thread enters a method. This is used to detect method entry
1323     // event for the debugger.
1324     bool32_t debug_method_entry_;
1325 
1326     // True if the GC is in the marking phase. This is used for the CC collector only. This is
1327     // thread local so that we can simplify the logic to check for the fast path of read barriers of
1328     // GC roots.
1329     bool32_t is_gc_marking;
1330 
1331     // True if the thread is allowed to access a weak ref (Reference::GetReferent() and system
1332     // weaks) and to potentially mark an object alive/gray. This is used for concurrent reference
1333     // processing of the CC collector only. This is thread local so that we can enable/disable weak
1334     // ref access by using a checkpoint and avoid a race around the time weak ref access gets
1335     // disabled and concurrent reference processing begins (if weak ref access is disabled during a
1336     // pause, this is not an issue.) Other collectors use Runtime::DisallowNewSystemWeaks() and
1337     // ReferenceProcessor::EnableSlowPath().
1338     bool32_t weak_ref_access_enabled;
1339 
1340     // A thread local version of Heap::disable_thread_flip_count_. This keeps track of how many
1341     // levels of (nested) JNI critical sections the thread is in and is used to detect a nested JNI
1342     // critical section enter.
1343     uint32_t disable_thread_flip_count;
1344   } tls32_;
1345 
1346   struct PACKED(8) tls_64bit_sized_values {
tls_64bit_sized_valuestls_64bit_sized_values1347     tls_64bit_sized_values() : trace_clock_base(0) {
1348     }
1349 
1350     // The clock base used for tracing.
1351     uint64_t trace_clock_base;
1352 
1353     RuntimeStats stats;
1354   } tls64_;
1355 
PACKED(sizeof (void *))1356   struct PACKED(sizeof(void*)) tls_ptr_sized_values {
1357       tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
1358       managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), tmp_jni_env(nullptr),
1359       self(nullptr), opeer(nullptr), jpeer(nullptr), stack_begin(nullptr), stack_size(0),
1360       stack_trace_sample(nullptr), wait_next(nullptr), monitor_enter_object(nullptr),
1361       top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
1362       instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
1363       stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr),
1364       frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0),
1365       last_no_thread_suspension_cause(nullptr), thread_local_objects(0),
1366       thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr),
1367       mterp_current_ibase(nullptr), mterp_default_ibase(nullptr), mterp_alt_ibase(nullptr),
1368       thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr),
1369       nested_signal_state(nullptr), flip_function(nullptr), method_verifier(nullptr),
1370       thread_local_mark_stack(nullptr) {
1371       std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
1372     }
1373 
1374     // The biased card table, see CardTable for details.
1375     uint8_t* card_table;
1376 
1377     // The pending exception or null.
1378     mirror::Throwable* exception;
1379 
1380     // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
1381     // We leave extra space so there's room for the code that throws StackOverflowError.
1382     uint8_t* stack_end;
1383 
1384     // The top of the managed stack often manipulated directly by compiler generated code.
1385     ManagedStack managed_stack;
1386 
1387     // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check.  It is
1388     // normally set to the address of itself.
1389     uintptr_t* suspend_trigger;
1390 
1391     // Every thread may have an associated JNI environment
1392     JNIEnvExt* jni_env;
1393 
1394     // Temporary storage to transfer a pre-allocated JNIEnvExt from the creating thread to the
1395     // created thread.
1396     JNIEnvExt* tmp_jni_env;
1397 
1398     // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
1399     // is easy but getting the address of Thread::Current is hard. This field can be read off of
1400     // Thread::Current to give the address.
1401     Thread* self;
1402 
1403     // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
1404     // start up, until the thread is registered and the local opeer_ is used.
1405     mirror::Object* opeer;
1406     jobject jpeer;
1407 
1408     // The "lowest addressable byte" of the stack.
1409     uint8_t* stack_begin;
1410 
1411     // Size of the stack.
1412     size_t stack_size;
1413 
1414     // Pointer to previous stack trace captured by sampling profiler.
1415     std::vector<ArtMethod*>* stack_trace_sample;
1416 
1417     // The next thread in the wait set this thread is part of or null if not waiting.
1418     Thread* wait_next;
1419 
1420     // If we're blocked in MonitorEnter, this is the object we're trying to lock.
1421     mirror::Object* monitor_enter_object;
1422 
1423     // Top of linked list of handle scopes or null for none.
1424     HandleScope* top_handle_scope;
1425 
1426     // Needed to get the right ClassLoader in JNI_OnLoad, but also
1427     // useful for testing.
1428     jobject class_loader_override;
1429 
1430     // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
1431     Context* long_jump_context;
1432 
1433     // Additional stack used by method instrumentation to store method and return pc values.
1434     // Stored as a pointer since std::deque is not PACKED.
1435     std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack;
1436 
1437     // JDWP invoke-during-breakpoint support.
1438     DebugInvokeReq* debug_invoke_req;
1439 
1440     // JDWP single-stepping support.
1441     SingleStepControl* single_step_control;
1442 
1443     // For gc purpose, a shadow frame record stack that keeps track of:
1444     // 1) shadow frames under construction.
1445     // 2) deoptimization shadow frames.
1446     StackedShadowFrameRecord* stacked_shadow_frame_record;
1447 
1448     // Deoptimization return value record stack.
1449     DeoptimizationContextRecord* deoptimization_context_stack;
1450 
1451     // For debugger, a linked list that keeps the mapping from frame_id to shadow frame.
1452     // Shadow frames may be created before deoptimization happens so that the debugger can
1453     // set local values there first.
1454     FrameIdToShadowFrame* frame_id_to_shadow_frame;
1455 
1456     // A cached copy of the java.lang.Thread's name.
1457     std::string* name;
1458 
1459     // A cached pthread_t for the pthread underlying this Thread*.
1460     pthread_t pthread_self;
1461 
1462     // If no_thread_suspension_ is > 0, what is causing that assertion.
1463     const char* last_no_thread_suspension_cause;
1464 
1465     // Pending checkpoint function or null if non-pending. Installation guarding by
1466     // Locks::thread_suspend_count_lock_.
1467     Closure* checkpoint_functions[kMaxCheckpoints];
1468 
1469     // Pending barriers that require passing or NULL if non-pending. Installation guarding by
1470     // Locks::thread_suspend_count_lock_.
1471     // They work effectively as art::Barrier, but implemented directly using AtomicInteger and futex
1472     // to avoid additional cost of a mutex and a condition variable, as used in art::Barrier.
1473     AtomicInteger* active_suspend_barriers[kMaxSuspendBarriers];
1474 
1475     // Entrypoint function pointers.
1476     // TODO: move this to more of a global offset table model to avoid per-thread duplication.
1477     JniEntryPoints jni_entrypoints;
1478     QuickEntryPoints quick_entrypoints;
1479 
1480     // Thread-local allocation pointer.
1481     size_t thread_local_objects;
1482     uint8_t* thread_local_start;
1483     // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
1484     // potentially better performance.
1485     uint8_t* thread_local_pos;
1486     uint8_t* thread_local_end;
1487 
1488     // Mterp jump table bases.
1489     void* mterp_current_ibase;
1490     void* mterp_default_ibase;
1491     void* mterp_alt_ibase;
1492 
1493     // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
1494     void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread];
1495 
1496     // Thread-local allocation stack data/routines.
1497     StackReference<mirror::Object>* thread_local_alloc_stack_top;
1498     StackReference<mirror::Object>* thread_local_alloc_stack_end;
1499 
1500     // Support for Mutex lock hierarchy bug detection.
1501     BaseMutex* held_mutexes[kLockLevelCount];
1502 
1503     // Recorded thread state for nested signals.
1504     jmp_buf* nested_signal_state;
1505 
1506     // The function used for thread flip.
1507     Closure* flip_function;
1508 
1509     // Current method verifier, used for root marking.
1510     verifier::MethodVerifier* method_verifier;
1511 
1512     // Thread-local mark stack for the concurrent copying collector.
1513     gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack;
1514   } tlsPtr_;
1515 
1516   // Guards the 'interrupted_' and 'wait_monitor_' members.
1517   Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1518 
1519   // Condition variable waited upon during a wait.
1520   ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
1521   // Pointer to the monitor lock we're currently waiting on or null if not waiting.
1522   Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
1523 
1524   // Thread "interrupted" status; stays raised until queried or thrown.
1525   bool interrupted_ GUARDED_BY(wait_mutex_);
1526 
1527   // Debug disable read barrier count, only is checked for debug builds and only in the runtime.
1528   uint8_t debug_disallow_read_barrier_ = 0;
1529 
1530   // True if the thread is allowed to call back into java (for e.g. during class resolution).
1531   // By default this is true.
1532   bool can_call_into_java_;
1533 
1534   friend class Dbg;  // For SetStateUnsafe.
1535   friend class gc::collector::SemiSpace;  // For getting stack traces.
1536   friend class Runtime;  // For CreatePeer.
1537   friend class QuickExceptionHandler;  // For dumping the stack.
1538   friend class ScopedThreadStateChange;
1539   friend class StubTest;  // For accessing entrypoints.
1540   friend class ThreadList;  // For ~Thread and Destroy.
1541 
1542   friend class EntrypointsOrderTest;  // To test the order of tls entries.
1543 
1544   DISALLOW_COPY_AND_ASSIGN(Thread);
1545 };
1546 
1547 class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension {
1548  public:
ScopedAssertNoThreadSuspension(Thread * self,const char * cause)1549   ScopedAssertNoThreadSuspension(Thread* self, const char* cause) ACQUIRE(Roles::uninterruptible_)
1550       : self_(self), old_cause_(self->StartAssertNoThreadSuspension(cause)) {
1551   }
RELEASE(Roles::uninterruptible_)1552   ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) {
1553     self_->EndAssertNoThreadSuspension(old_cause_);
1554   }
Self()1555   Thread* Self() {
1556     return self_;
1557   }
1558 
1559  private:
1560   Thread* const self_;
1561   const char* const old_cause_;
1562 };
1563 
1564 class ScopedStackedShadowFramePusher {
1565  public:
ScopedStackedShadowFramePusher(Thread * self,ShadowFrame * sf,StackedShadowFrameType type)1566   ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf, StackedShadowFrameType type)
1567     : self_(self), type_(type) {
1568     self_->PushStackedShadowFrame(sf, type);
1569   }
~ScopedStackedShadowFramePusher()1570   ~ScopedStackedShadowFramePusher() {
1571     self_->PopStackedShadowFrame(type_);
1572   }
1573 
1574  private:
1575   Thread* const self_;
1576   const StackedShadowFrameType type_;
1577 
1578   DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher);
1579 };
1580 
1581 // Only works for debug builds.
1582 class ScopedDebugDisallowReadBarriers {
1583  public:
ScopedDebugDisallowReadBarriers(Thread * self)1584   explicit ScopedDebugDisallowReadBarriers(Thread* self) : self_(self) {
1585     self_->ModifyDebugDisallowReadBarrier(1);
1586   }
~ScopedDebugDisallowReadBarriers()1587   ~ScopedDebugDisallowReadBarriers() {
1588     self_->ModifyDebugDisallowReadBarrier(-1);
1589   }
1590 
1591  private:
1592   Thread* const self_;
1593 };
1594 
1595 std::ostream& operator<<(std::ostream& os, const Thread& thread);
1596 std::ostream& operator<<(std::ostream& os, const StackedShadowFrameType& thread);
1597 
1598 }  // namespace art
1599 
1600 #endif  // ART_RUNTIME_THREAD_H_
1601