• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_THREAD_H_
18 #define ART_RUNTIME_THREAD_H_
19 
20 #include <pthread.h>
21 
22 #include <bitset>
23 #include <deque>
24 #include <iosfwd>
25 #include <list>
26 #include <string>
27 
28 #include "base/macros.h"
29 #include "entrypoints/interpreter/interpreter_entrypoints.h"
30 #include "entrypoints/jni/jni_entrypoints.h"
31 #include "entrypoints/portable/portable_entrypoints.h"
32 #include "entrypoints/quick/quick_entrypoints.h"
33 #include "globals.h"
34 #include "jvalue.h"
35 #include "locks.h"
36 #include "offsets.h"
37 #include "root_visitor.h"
38 #include "runtime_stats.h"
39 #include "stack.h"
40 #include "stack_indirect_reference_table.h"
41 #include "thread_state.h"
42 #include "throw_location.h"
43 #include "UniquePtr.h"
44 
45 namespace art {
46 
47 namespace mirror {
48   class ArtMethod;
49   class Array;
50   class Class;
51   class ClassLoader;
52   class Object;
53   template<class T> class ObjectArray;
54   template<class T> class PrimitiveArray;
55   typedef PrimitiveArray<int32_t> IntArray;
56   class StackTraceElement;
57   class StaticStorageBase;
58   class Throwable;
59 }  // namespace mirror
60 class BaseMutex;
61 class ClassLinker;
62 class Closure;
63 class Context;
64 struct DebugInvokeReq;
65 class DexFile;
66 struct JavaVMExt;
67 struct JNIEnvExt;
68 class Monitor;
69 class Runtime;
70 class ScopedObjectAccess;
71 class ScopedObjectAccessUnchecked;
72 class ShadowFrame;
73 class Thread;
74 class ThreadList;
75 
76 // Thread priorities. These must match the Thread.MIN_PRIORITY,
77 // Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
78 enum ThreadPriority {
79   kMinThreadPriority = 1,
80   kNormThreadPriority = 5,
81   kMaxThreadPriority = 10,
82 };
83 
84 enum ThreadFlag {
85   kSuspendRequest   = 1,  // If set implies that suspend_count_ > 0 and the Thread should enter the
86                           // safepoint handler.
87   kCheckpointRequest = 2  // Request that the thread do some checkpoint work and then continue.
88 };
89 
90 class PACKED(4) Thread {
91  public:
92   // Space to throw a StackOverflowError in.
93   static const size_t kStackOverflowReservedBytes = 16 * KB;
94 
95   // Creates a new native thread corresponding to the given managed peer.
96   // Used to implement Thread.start.
97   static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
98 
99   // Attaches the calling native thread to the runtime, returning the new native peer.
100   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
101   static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
102                         bool create_peer);
103 
104   // Reset internal state of child thread after fork.
105   void InitAfterFork();
106 
Current()107   static Thread* Current() {
108     // We rely on Thread::Current returning NULL for a detached thread, so it's not obvious
109     // that we can replace this with a direct %fs access on x86.
110     if (!is_started_) {
111       return NULL;
112     } else {
113       void* thread = pthread_getspecific(Thread::pthread_key_self_);
114       return reinterpret_cast<Thread*>(thread);
115     }
116   }
117 
118   static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts,
119                                    mirror::Object* thread_peer)
120       EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
121       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
122       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
123   static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts, jobject thread)
124       EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
125       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
126       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
127 
128   // Translates 172 to pAllocArrayFromCode and so on.
129   static void DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers);
130 
131   // Dumps a one-line summary of thread state (used for operator<<).
132   void ShortDump(std::ostream& os) const;
133 
134   // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
135   void Dump(std::ostream& os) const
136       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
137       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
138 
139   // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which
140   // case we use 'tid' to identify the thread, and we'll include as much information as we can.
141   static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
142       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
143       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
144 
GetState()145   ThreadState GetState() const {
146     return static_cast<ThreadState>(state_and_flags_.as_struct.state);
147   }
148 
149   ThreadState SetState(ThreadState new_state);
150 
GetSuspendCount()151   int GetSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
152     return suspend_count_;
153   }
154 
GetDebugSuspendCount()155   int GetDebugSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
156     return debug_suspend_count_;
157   }
158 
IsSuspended()159   bool IsSuspended() const {
160     union StateAndFlags state_and_flags = state_and_flags_;
161     return state_and_flags.as_struct.state != kRunnable &&
162         (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
163   }
164 
165   void ModifySuspendCount(Thread* self, int delta, bool for_debugger)
166       EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
167 
168   bool RequestCheckpoint(Closure* function);
169 
170   // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
171   // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
172   void FullSuspendCheck()
173       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
174       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
175 
176   // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
177   ThreadState TransitionFromSuspendedToRunnable()
178       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
179       SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
180       ALWAYS_INLINE;
181 
182   // Transition from runnable into a state where mutator privileges are denied. Releases share of
183   // mutator lock.
184   void TransitionFromRunnableToSuspended(ThreadState new_state)
185       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
186       UNLOCK_FUNCTION(Locks::mutator_lock_)
187       ALWAYS_INLINE;
188 
189   // Wait for a debugger suspension on the thread associated with the given peer. Returns the
190   // thread on success, else NULL. If the thread should be suspended then request_suspension should
191   // be true on entry. If the suspension times out then *timeout is set to true.
192   static Thread* SuspendForDebugger(jobject peer,  bool request_suspension, bool* timed_out)
193       LOCKS_EXCLUDED(Locks::mutator_lock_,
194                      Locks::thread_list_lock_,
195                      Locks::thread_suspend_count_lock_);
196 
197   // Once called thread suspension will cause an assertion failure.
198 #ifndef NDEBUG
StartAssertNoThreadSuspension(const char * cause)199   const char* StartAssertNoThreadSuspension(const char* cause) {
200     CHECK(cause != NULL);
201     const char* previous_cause = last_no_thread_suspension_cause_;
202     no_thread_suspension_++;
203     last_no_thread_suspension_cause_ = cause;
204     return previous_cause;
205   }
206 #else
StartAssertNoThreadSuspension(const char * cause)207   const char* StartAssertNoThreadSuspension(const char* cause) {
208     CHECK(cause != NULL);
209     return NULL;
210   }
211 #endif
212 
213   // End region where no thread suspension is expected.
214 #ifndef NDEBUG
EndAssertNoThreadSuspension(const char * old_cause)215   void EndAssertNoThreadSuspension(const char* old_cause) {
216     CHECK(old_cause != NULL || no_thread_suspension_ == 1);
217     CHECK_GT(no_thread_suspension_, 0U);
218     no_thread_suspension_--;
219     last_no_thread_suspension_cause_ = old_cause;
220   }
221 #else
EndAssertNoThreadSuspension(const char *)222   void EndAssertNoThreadSuspension(const char*) {
223   }
224 #endif
225 
226 
227   void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
228 
IsDaemon()229   bool IsDaemon() const {
230     return daemon_;
231   }
232 
233   bool HoldsLock(mirror::Object*);
234 
235   /*
236    * Changes the priority of this thread to match that of the java.lang.Thread object.
237    *
238    * We map a priority value from 1-10 to Linux "nice" values, where lower
239    * numbers indicate higher priority.
240    */
241   void SetNativePriority(int newPriority);
242 
243   /*
244    * Returns the thread priority for the current thread by querying the system.
245    * This is useful when attaching a thread through JNI.
246    *
247    * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
248    */
249   static int GetNativePriority();
250 
GetThinLockId()251   uint32_t GetThinLockId() const {
252     return thin_lock_id_;
253   }
254 
GetTid()255   pid_t GetTid() const {
256     return tid_;
257   }
258 
259   // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer.
260   mirror::String* GetThreadName(const ScopedObjectAccessUnchecked& ts) const
261       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
262 
263   // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
264   // allocation, or locking.
265   void GetThreadName(std::string& name) const;
266 
267   // Sets the thread's name.
268   void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
269 
270   // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
271   uint64_t GetCpuMicroTime() const;
272 
GetPeer()273   mirror::Object* GetPeer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
274     CHECK(jpeer_ == NULL);
275     return opeer_;
276   }
277 
HasPeer()278   bool HasPeer() const {
279     return jpeer_ != NULL || opeer_ != NULL;
280   }
281 
GetStats()282   RuntimeStats* GetStats() {
283     return &stats_;
284   }
285 
286   bool IsStillStarting() const;
287 
IsExceptionPending()288   bool IsExceptionPending() const {
289     return exception_ != NULL;
290   }
291 
GetException(ThrowLocation * throw_location)292   mirror::Throwable* GetException(ThrowLocation* throw_location) const
293       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
294     if (throw_location != NULL) {
295       *throw_location = throw_location_;
296     }
297     return exception_;
298   }
299 
300   void AssertNoPendingException() const;
301 
SetException(const ThrowLocation & throw_location,mirror::Throwable * new_exception)302   void SetException(const ThrowLocation& throw_location, mirror::Throwable* new_exception)
303       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
304     CHECK(new_exception != NULL);
305     // TODO: DCHECK(!IsExceptionPending());
306     exception_ = new_exception;
307     throw_location_ = throw_location;
308   }
309 
ClearException()310   void ClearException() {
311     exception_ = NULL;
312     throw_location_.Clear();
313   }
314 
315   // Find catch block and perform long jump to appropriate exception handle
316   void QuickDeliverException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
317 
318   Context* GetLongJumpContext();
ReleaseLongJumpContext(Context * context)319   void ReleaseLongJumpContext(Context* context) {
320     DCHECK(long_jump_context_ == NULL);
321     long_jump_context_ = context;
322   }
323 
324   mirror::ArtMethod* GetCurrentMethod(uint32_t* dex_pc) const
325       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
326 
327   ThrowLocation GetCurrentLocationForThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
328 
SetTopOfStack(void * stack,uintptr_t pc)329   void SetTopOfStack(void* stack, uintptr_t pc) {
330     mirror::ArtMethod** top_method = reinterpret_cast<mirror::ArtMethod**>(stack);
331     managed_stack_.SetTopQuickFrame(top_method);
332     managed_stack_.SetTopQuickFramePc(pc);
333   }
334 
SetTopOfShadowStack(ShadowFrame * top)335   void SetTopOfShadowStack(ShadowFrame* top) {
336     managed_stack_.SetTopShadowFrame(top);
337   }
338 
HasManagedStack()339   bool HasManagedStack() const {
340     return managed_stack_.GetTopQuickFrame() != NULL || managed_stack_.GetTopShadowFrame() != NULL;
341   }
342 
343   // If 'msg' is NULL, no detail message is set.
344   void ThrowNewException(const ThrowLocation& throw_location,
345                          const char* exception_class_descriptor, const char* msg)
346       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
347 
348   // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be
349   // used as the new exception's cause.
350   void ThrowNewWrappedException(const ThrowLocation& throw_location,
351                                 const char* exception_class_descriptor,
352                                 const char* msg)
353       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
354 
355   void ThrowNewExceptionF(const ThrowLocation& throw_location,
356                           const char* exception_class_descriptor, const char* fmt, ...)
357       __attribute__((format(printf, 4, 5)))
358       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
359 
360   void ThrowNewExceptionV(const ThrowLocation& throw_location,
361                           const char* exception_class_descriptor, const char* fmt, va_list ap)
362       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
363 
364   // OutOfMemoryError is special, because we need to pre-allocate an instance.
365   // Only the GC should call this.
366   void ThrowOutOfMemoryError(const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
367 
368   static void Startup();
369   static void FinishStartup();
370   static void Shutdown();
371 
372   // JNI methods
GetJniEnv()373   JNIEnvExt* GetJniEnv() const {
374     return jni_env_;
375   }
376 
377   // Convert a jobject into a Object*
378   mirror::Object* DecodeJObject(jobject obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
379 
380   // Implements java.lang.Thread.interrupted.
381   bool Interrupted();
382   // Implements java.lang.Thread.isInterrupted.
383   bool IsInterrupted();
384   void Interrupt();
385   void Notify();
386 
GetClassLoaderOverride()387   mirror::ClassLoader* GetClassLoaderOverride() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
388     return class_loader_override_;
389   }
390 
SetClassLoaderOverride(mirror::ClassLoader * class_loader_override)391   void SetClassLoaderOverride(mirror::ClassLoader* class_loader_override) {
392     class_loader_override_ = class_loader_override;
393   }
394 
395   // Create the internal representation of a stack trace, that is more time
396   // and space efficient to compute than the StackTraceElement[]
397   jobject CreateInternalStackTrace(const ScopedObjectAccessUnchecked& soa) const
398       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
399 
400   // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
401   // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
402   // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
403   // with the number of valid frames in the returned array.
404   static jobjectArray InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal,
405       jobjectArray output_array = NULL, int* stack_depth = NULL);
406 
407   void VisitRoots(RootVisitor* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
408 
409   void VerifyRoots(VerifyRootVisitor* visitor, void* arg)
410       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
411 
412   void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
413 
414   //
415   // Offsets of various members of native Thread class, used by compiled code.
416   //
417 
SelfOffset()418   static ThreadOffset SelfOffset() {
419     return ThreadOffset(OFFSETOF_MEMBER(Thread, self_));
420   }
421 
ExceptionOffset()422   static ThreadOffset ExceptionOffset() {
423     return ThreadOffset(OFFSETOF_MEMBER(Thread, exception_));
424   }
425 
PeerOffset()426   static ThreadOffset PeerOffset() {
427     return ThreadOffset(OFFSETOF_MEMBER(Thread, opeer_));
428   }
429 
ThinLockIdOffset()430   static ThreadOffset ThinLockIdOffset() {
431     return ThreadOffset(OFFSETOF_MEMBER(Thread, thin_lock_id_));
432   }
433 
CardTableOffset()434   static ThreadOffset CardTableOffset() {
435     return ThreadOffset(OFFSETOF_MEMBER(Thread, card_table_));
436   }
437 
ThreadFlagsOffset()438   static ThreadOffset ThreadFlagsOffset() {
439     return ThreadOffset(OFFSETOF_MEMBER(Thread, state_and_flags_));
440   }
441 
442   // Size of stack less any space reserved for stack overflow
GetStackSize()443   size_t GetStackSize() const {
444     return stack_size_ - (stack_end_ - stack_begin_);
445   }
446 
GetStackEnd()447   byte* GetStackEnd() const {
448     return stack_end_;
449   }
450 
451   // Set the stack end to that to be used during a stack overflow
452   void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
453 
454   // Set the stack end to that to be used during regular execution
ResetDefaultStackEnd()455   void ResetDefaultStackEnd() {
456     // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
457     // to throw a StackOverflowError.
458     stack_end_ = stack_begin_ + kStackOverflowReservedBytes;
459   }
460 
IsHandlingStackOverflow()461   bool IsHandlingStackOverflow() const {
462     return stack_end_ == stack_begin_;
463   }
464 
StackEndOffset()465   static ThreadOffset StackEndOffset() {
466     return ThreadOffset(OFFSETOF_MEMBER(Thread, stack_end_));
467   }
468 
JniEnvOffset()469   static ThreadOffset JniEnvOffset() {
470     return ThreadOffset(OFFSETOF_MEMBER(Thread, jni_env_));
471   }
472 
TopOfManagedStackOffset()473   static ThreadOffset TopOfManagedStackOffset() {
474     return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) +
475                         ManagedStack::TopQuickFrameOffset());
476   }
477 
TopOfManagedStackPcOffset()478   static ThreadOffset TopOfManagedStackPcOffset() {
479     return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) +
480                         ManagedStack::TopQuickFramePcOffset());
481   }
482 
GetManagedStack()483   const ManagedStack* GetManagedStack() const {
484     return &managed_stack_;
485   }
486 
487   // Linked list recording fragments of managed stack.
PushManagedStackFragment(ManagedStack * fragment)488   void PushManagedStackFragment(ManagedStack* fragment) {
489     managed_stack_.PushManagedStackFragment(fragment);
490   }
PopManagedStackFragment(const ManagedStack & fragment)491   void PopManagedStackFragment(const ManagedStack& fragment) {
492     managed_stack_.PopManagedStackFragment(fragment);
493   }
494 
PushShadowFrame(ShadowFrame * new_top_frame)495   ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
496     return managed_stack_.PushShadowFrame(new_top_frame);
497   }
498 
PopShadowFrame()499   ShadowFrame* PopShadowFrame() {
500     return managed_stack_.PopShadowFrame();
501   }
502 
TopShadowFrameOffset()503   static ThreadOffset TopShadowFrameOffset() {
504     return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) +
505                         ManagedStack::TopShadowFrameOffset());
506   }
507 
508   // Number of references allocated in JNI ShadowFrames on this thread
NumJniShadowFrameReferences()509   size_t NumJniShadowFrameReferences() const {
510     return managed_stack_.NumJniShadowFrameReferences();
511   }
512 
513   // Number of references in SIRTs on this thread
514   size_t NumSirtReferences();
515 
516   // Number of references allocated in SIRTs & JNI shadow frames on this thread
NumStackReferences()517   size_t NumStackReferences() {
518     return NumSirtReferences() + NumJniShadowFrameReferences();
519   };
520 
521   // Is the given obj in this thread's stack indirect reference table?
522   bool SirtContains(jobject obj) const;
523 
524   void SirtVisitRoots(RootVisitor* visitor, void* arg);
525 
PushSirt(StackIndirectReferenceTable * sirt)526   void PushSirt(StackIndirectReferenceTable* sirt) {
527     sirt->SetLink(top_sirt_);
528     top_sirt_ = sirt;
529   }
530 
PopSirt()531   StackIndirectReferenceTable* PopSirt() {
532     StackIndirectReferenceTable* sirt = top_sirt_;
533     DCHECK(sirt != NULL);
534     top_sirt_ = top_sirt_->GetLink();
535     return sirt;
536   }
537 
TopSirtOffset()538   static ThreadOffset TopSirtOffset() {
539     return ThreadOffset(OFFSETOF_MEMBER(Thread, top_sirt_));
540   }
541 
GetInvokeReq()542   DebugInvokeReq* GetInvokeReq() {
543     return debug_invoke_req_;
544   }
545 
546   void SetDeoptimizationShadowFrame(ShadowFrame* sf);
547   void SetDeoptimizationReturnValue(const JValue& ret_val);
548 
549   ShadowFrame* GetAndClearDeoptimizationShadowFrame(JValue* ret_val);
550 
GetInstrumentationStack()551   std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
552     return instrumentation_stack_;
553   }
554 
GetStackTraceSample()555   std::vector<mirror::ArtMethod*>* GetStackTraceSample() const {
556     return stack_trace_sample_;
557   }
558 
SetStackTraceSample(std::vector<mirror::ArtMethod * > * sample)559   void SetStackTraceSample(std::vector<mirror::ArtMethod*>* sample) {
560     stack_trace_sample_ = sample;
561   }
562 
GetTraceClockBase()563   uint64_t GetTraceClockBase() const {
564     return trace_clock_base_;
565   }
566 
SetTraceClockBase(uint64_t clock_base)567   void SetTraceClockBase(uint64_t clock_base) {
568     trace_clock_base_ = clock_base;
569   }
570 
GetHeldMutex(LockLevel level)571   BaseMutex* GetHeldMutex(LockLevel level) const {
572     return held_mutexes_[level];
573   }
574 
SetHeldMutex(LockLevel level,BaseMutex * mutex)575   void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
576     held_mutexes_[level] = mutex;
577   }
578 
579   void RunCheckpointFunction();
580 
ReadFlag(ThreadFlag flag)581   bool ReadFlag(ThreadFlag flag) const {
582     return (state_and_flags_.as_struct.flags & flag) != 0;
583   }
584 
TestAllFlags()585   bool TestAllFlags() const {
586     return (state_and_flags_.as_struct.flags != 0);
587   }
588 
589   void AtomicSetFlag(ThreadFlag flag);
590 
591   void AtomicClearFlag(ThreadFlag flag);
592 
593  private:
594   // We have no control over the size of 'bool', but want our boolean fields
595   // to be 4-byte quantities.
596   typedef uint32_t bool32_t;
597 
598   explicit Thread(bool daemon);
599   ~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_,
600                            Locks::thread_suspend_count_lock_);
601   void Destroy();
602   friend class ThreadList;  // For ~Thread and Destroy.
603 
604   void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
605   friend class Runtime;  // For CreatePeer.
606 
607   // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
608   // Dbg::Disconnected.
SetStateUnsafe(ThreadState new_state)609   ThreadState SetStateUnsafe(ThreadState new_state) {
610     ThreadState old_state = GetState();
611     state_and_flags_.as_struct.state = new_state;
612     return old_state;
613   }
614   friend class SignalCatcher;  // For SetStateUnsafe.
615   friend class Dbg;  // F or SetStateUnsafe.
616 
617   void VerifyStackImpl() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
618 
619   void DumpState(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
620   void DumpStack(std::ostream& os) const
621       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
622       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
623 
624   // Out-of-line conveniences for debugging in gdb.
625   static Thread* CurrentFromGdb();  // Like Thread::Current.
626   // Like Thread::Dump(std::cerr).
627   void DumpFromGdb() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
628 
629   static void* CreateCallback(void* arg);
630 
631   void HandleUncaughtExceptions(ScopedObjectAccess& soa)
632       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
633   void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
634 
635   void Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
636   void InitCardTable();
637   void InitCpu();
638   void InitTlsEntryPoints();
639   void InitTid();
640   void InitPthreadKeySelf();
641   void InitStackHwm();
642 
643   void SetUpAlternateSignalStack();
644   void TearDownAlternateSignalStack();
645 
646   void NotifyLocked(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_);
647 
648   static void ThreadExitCallback(void* arg);
649 
650   // Has Thread::Startup been called?
651   static bool is_started_;
652 
653   // TLS key used to retrieve the Thread*.
654   static pthread_key_t pthread_key_self_;
655 
656   // Used to notify threads that they should attempt to resume, they will suspend again if
657   // their suspend count is > 0.
658   static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
659 
660   // --- Frequently accessed fields first for short offsets ---
661 
662   // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
663   // change from being Suspended to Runnable without a suspend request occurring.
664   union StateAndFlags {
665     struct PACKED(4) {
666       // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
667       // ThreadFlags for bit field meanings.
668       volatile uint16_t flags;
669       // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
670       // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
671       // operation. If a thread is suspended and a suspend_request is present, a thread may not
672       // change to Runnable as a GC or other operation is in progress.
673       volatile uint16_t state;
674     } as_struct;
675     volatile int32_t as_int;
676   };
677   union StateAndFlags state_and_flags_;
678   COMPILE_ASSERT(sizeof(union StateAndFlags) == sizeof(int32_t),
679                  sizeof_state_and_flags_and_int32_are_different);
680 
681   // A non-zero value is used to tell the current thread to enter a safe point
682   // at the next poll.
683   int suspend_count_ GUARDED_BY(Locks::thread_suspend_count_lock_);
684 
685   // The biased card table, see CardTable for details
686   byte* card_table_;
687 
688   // The pending exception or NULL.
689   mirror::Throwable* exception_;
690 
691   // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
692   // We leave extra space so there's room for the code that throws StackOverflowError.
693   byte* stack_end_;
694 
695   // The top of the managed stack often manipulated directly by compiler generated code.
696   ManagedStack managed_stack_;
697 
698   // Every thread may have an associated JNI environment
699   JNIEnvExt* jni_env_;
700 
701   // Initialized to "this". On certain architectures (such as x86) reading
702   // off of Thread::Current is easy but getting the address of Thread::Current
703   // is hard. This field can be read off of Thread::Current to give the address.
704   Thread* self_;
705 
706   // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
707   // start up, until the thread is registered and the local opeer_ is used.
708   mirror::Object* opeer_;
709   jobject jpeer_;
710 
711   // The "lowest addressable byte" of the stack
712   byte* stack_begin_;
713 
714   // Size of the stack
715   size_t stack_size_;
716 
717   // Pointer to previous stack trace captured by sampling profiler.
718   std::vector<mirror::ArtMethod*>* stack_trace_sample_;
719 
720   // The clock base used for tracing.
721   uint64_t trace_clock_base_;
722 
723   // Thin lock thread id. This is a small integer used by the thin lock implementation.
724   // This is not to be confused with the native thread's tid, nor is it the value returned
725   // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
726   // important difference between this id and the ids visible to managed code is that these
727   // ones get reused (to ensure that they fit in the number of bits available).
728   uint32_t thin_lock_id_;
729 
730   // System thread id.
731   pid_t tid_;
732 
733   ThrowLocation throw_location_;
734 
735   // Guards the 'interrupted_' and 'wait_monitor_' members.
736   mutable Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
737   ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
738   // Pointer to the monitor lock we're currently waiting on (or NULL).
739   Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
740   // Thread "interrupted" status; stays raised until queried or thrown.
741   bool32_t interrupted_ GUARDED_BY(wait_mutex_);
742   // The next thread in the wait set this thread is part of.
743   Thread* wait_next_;
744   // If we're blocked in MonitorEnter, this is the object we're trying to lock.
745   mirror::Object* monitor_enter_object_;
746 
747   friend class Monitor;
748   friend class MonitorInfo;
749 
750   // Top of linked list of stack indirect reference tables or NULL for none
751   StackIndirectReferenceTable* top_sirt_;
752 
753   Runtime* runtime_;
754 
755   RuntimeStats stats_;
756 
757   // Needed to get the right ClassLoader in JNI_OnLoad, but also
758   // useful for testing.
759   mirror::ClassLoader* class_loader_override_;
760 
761   // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
762   Context* long_jump_context_;
763 
764   // A boolean telling us whether we're recursively throwing OOME.
765   bool32_t throwing_OutOfMemoryError_;
766 
767   // How much of 'suspend_count_' is by request of the debugger, used to set things right
768   // when the debugger detaches. Must be <= suspend_count_.
769   int debug_suspend_count_ GUARDED_BY(Locks::thread_suspend_count_lock_);
770 
771   // JDWP invoke-during-breakpoint support.
772   DebugInvokeReq* debug_invoke_req_;
773 
774   // Shadow frame that is used temporarily during the deoptimization of a method.
775   ShadowFrame* deoptimization_shadow_frame_;
776   JValue deoptimization_return_value_;
777 
778   // Additional stack used by method instrumentation to store method and return pc values.
779   // Stored as a pointer since std::deque is not PACKED.
780   std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack_;
781 
782   // A cached copy of the java.lang.Thread's name.
783   std::string* name_;
784 
785   // Is the thread a daemon?
786   const bool32_t daemon_;
787 
788   // A cached pthread_t for the pthread underlying this Thread*.
789   pthread_t pthread_self_;
790 
791   // Support for Mutex lock hierarchy bug detection.
792   BaseMutex* held_mutexes_[kLockLevelCount];
793 
794   // A positive value implies we're in a region where thread suspension isn't expected.
795   uint32_t no_thread_suspension_;
796 
797   // Cause for last suspension.
798   const char* last_no_thread_suspension_cause_;
799 
800   // Pending checkpoint functions.
801   Closure* checkpoint_function_;
802 
803  public:
804   // Entrypoint function pointers
805   // TODO: move this near the top, since changing its offset requires all oats to be recompiled!
806   InterpreterEntryPoints interpreter_entrypoints_;
807   JniEntryPoints jni_entrypoints_;
808   PortableEntryPoints portable_entrypoints_;
809   QuickEntryPoints quick_entrypoints_;
810 
811  private:
812   // How many times has our pthread key's destructor been called?
813   uint32_t thread_exit_check_count_;
814 
815   friend class ScopedThreadStateChange;
816 
817   DISALLOW_COPY_AND_ASSIGN(Thread);
818 };
819 
820 std::ostream& operator<<(std::ostream& os, const Thread& thread);
821 std::ostream& operator<<(std::ostream& os, const ThreadState& state);
822 
823 }  // namespace art
824 
825 #endif  // ART_RUNTIME_THREAD_H_
826