1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_RUNTIME_THREAD_INL_H_
18 #define ART_RUNTIME_THREAD_INL_H_
19
20 #include "thread.h"
21
22 #include "arch/instruction_set.h"
23 #include "base/aborting.h"
24 #include "base/casts.h"
25 #include "base/mutex-inl.h"
26 #include "base/time_utils.h"
27 #include "indirect_reference_table.h"
28 #include "jni/jni_env_ext.h"
29 #include "managed_stack-inl.h"
30 #include "obj_ptr-inl.h"
31 #include "suspend_reason.h"
32 #include "thread-current-inl.h"
33 #include "thread_pool.h"
34
35 namespace art {
36
37 // Quickly access the current thread from a JNIEnv.
ForEnv(JNIEnv * env)38 inline Thread* Thread::ForEnv(JNIEnv* env) {
39 JNIEnvExt* full_env(down_cast<JNIEnvExt*>(env));
40 return full_env->GetSelf();
41 }
42
DecodeJObject(jobject obj)43 inline ObjPtr<mirror::Object> Thread::DecodeJObject(jobject obj) const {
44 if (obj == nullptr) {
45 return nullptr;
46 }
47 IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
48 if (LIKELY(IndirectReferenceTable::IsJniTransitionOrLocalReference(ref))) {
49 // For JNI transitions, the `jclass` for a static method points to the
50 // `CompressedReference<>` in the `ArtMethod::declaring_class_` and other `jobject`
51 // arguments point to spilled stack references but a `StackReference<>` is just
52 // a subclass of `CompressedReference<>`. Local references also point to
53 // a `CompressedReference<>` encapsulated in a `GcRoot<>`.
54 if (kIsDebugBuild && IndirectReferenceTable::GetIndirectRefKind(ref) == kJniTransition) {
55 CHECK(IsJniTransitionReference(obj));
56 }
57 auto* cref = IndirectReferenceTable::ClearIndirectRefKind<
58 mirror::CompressedReference<mirror::Object>*>(ref);
59 ObjPtr<mirror::Object> result = cref->AsMirrorPtr();
60 if (kIsDebugBuild && IndirectReferenceTable::GetIndirectRefKind(ref) != kJniTransition) {
61 CHECK_EQ(result, tlsPtr_.jni_env->locals_.Get(ref));
62 }
63 return result;
64 } else {
65 return DecodeGlobalJObject(obj);
66 }
67 }
68
AllowThreadSuspension()69 inline void Thread::AllowThreadSuspension() {
70 CheckSuspend();
71 // Invalidate the current thread's object pointers (ObjPtr) to catch possible moving GC bugs due
72 // to missing handles.
73 PoisonObjectPointers();
74 }
75
CheckSuspend(bool implicit)76 inline void Thread::CheckSuspend(bool implicit) {
77 DCHECK_EQ(Thread::Current(), this);
78 while (true) {
79 StateAndFlags state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
80 if (LIKELY(!state_and_flags.IsAnyOfFlagsSet(SuspendOrCheckpointRequestFlags()))) {
81 break;
82 } else if (state_and_flags.IsFlagSet(ThreadFlag::kCheckpointRequest)) {
83 RunCheckpointFunction();
84 } else if (state_and_flags.IsFlagSet(ThreadFlag::kSuspendRequest)) {
85 FullSuspendCheck(implicit);
86 implicit = false; // We do not need to `MadviseAwayAlternateSignalStack()` anymore.
87 } else {
88 DCHECK(state_and_flags.IsFlagSet(ThreadFlag::kEmptyCheckpointRequest));
89 RunEmptyCheckpoint();
90 }
91 }
92 if (implicit) {
93 // For implicit suspend check we want to `madvise()` away
94 // the alternate signal stack to avoid wasting memory.
95 MadviseAwayAlternateSignalStack();
96 }
97 }
98
CheckEmptyCheckpointFromWeakRefAccess(BaseMutex * cond_var_mutex)99 inline void Thread::CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex) {
100 Thread* self = Thread::Current();
101 DCHECK_EQ(self, this);
102 for (;;) {
103 if (ReadFlag(ThreadFlag::kEmptyCheckpointRequest)) {
104 RunEmptyCheckpoint();
105 // Check we hold only an expected mutex when accessing weak ref.
106 if (kIsDebugBuild) {
107 for (int i = kLockLevelCount - 1; i >= 0; --i) {
108 BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
109 if (held_mutex != nullptr &&
110 held_mutex != GetMutatorLock() &&
111 held_mutex != cond_var_mutex) {
112 CHECK(Locks::IsExpectedOnWeakRefAccess(held_mutex))
113 << "Holding unexpected mutex " << held_mutex->GetName()
114 << " when accessing weak ref";
115 }
116 }
117 }
118 } else {
119 break;
120 }
121 }
122 }
123
CheckEmptyCheckpointFromMutex()124 inline void Thread::CheckEmptyCheckpointFromMutex() {
125 DCHECK_EQ(Thread::Current(), this);
126 for (;;) {
127 if (ReadFlag(ThreadFlag::kEmptyCheckpointRequest)) {
128 RunEmptyCheckpoint();
129 } else {
130 break;
131 }
132 }
133 }
134
SetState(ThreadState new_state)135 inline ThreadState Thread::SetState(ThreadState new_state) {
136 // Should only be used to change between suspended states.
137 // Cannot use this code to change into or from Runnable as changing to Runnable should
138 // fail if the `ThreadFlag::kSuspendRequest` is set and changing from Runnable might
139 // miss passing an active suspend barrier.
140 DCHECK_NE(new_state, ThreadState::kRunnable);
141 if (kIsDebugBuild && this != Thread::Current()) {
142 std::string name;
143 GetThreadName(name);
144 LOG(FATAL) << "Thread \"" << name << "\"(" << this << " != Thread::Current()="
145 << Thread::Current() << ") changing state to " << new_state;
146 }
147
148 while (true) {
149 StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
150 CHECK_NE(old_state_and_flags.GetState(), ThreadState::kRunnable)
151 << new_state << " " << *this << " " << *Thread::Current();
152 StateAndFlags new_state_and_flags = old_state_and_flags.WithState(new_state);
153 bool done =
154 tls32_.state_and_flags.CompareAndSetWeakRelaxed(old_state_and_flags.GetValue(),
155 new_state_and_flags.GetValue());
156 if (done) {
157 return static_cast<ThreadState>(old_state_and_flags.GetState());
158 }
159 }
160 }
161
IsThreadSuspensionAllowable()162 inline bool Thread::IsThreadSuspensionAllowable() const {
163 if (tls32_.no_thread_suspension != 0) {
164 return false;
165 }
166 for (int i = kLockLevelCount - 1; i >= 0; --i) {
167 if (i != kMutatorLock &&
168 i != kUserCodeSuspensionLock &&
169 GetHeldMutex(static_cast<LockLevel>(i)) != nullptr) {
170 return false;
171 }
172 }
173 // Thread autoanalysis isn't able to understand that the GetHeldMutex(...) or AssertHeld means we
174 // have the mutex meaning we need to do this hack.
175 auto is_suspending_for_user_code = [this]() NO_THREAD_SAFETY_ANALYSIS {
176 return tls32_.user_code_suspend_count != 0;
177 };
178 if (GetHeldMutex(kUserCodeSuspensionLock) != nullptr && is_suspending_for_user_code()) {
179 return false;
180 }
181 return true;
182 }
183
AssertThreadSuspensionIsAllowable(bool check_locks)184 inline void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const {
185 if (kIsDebugBuild) {
186 if (gAborting == 0) {
187 CHECK_EQ(0u, tls32_.no_thread_suspension) << tlsPtr_.last_no_thread_suspension_cause;
188 }
189 if (check_locks) {
190 bool bad_mutexes_held = false;
191 for (int i = kLockLevelCount - 1; i >= 0; --i) {
192 // We expect no locks except the mutator lock. User code suspension lock is OK as long as
193 // we aren't going to be held suspended due to SuspendReason::kForUserCode.
194 if (i != kMutatorLock && i != kUserCodeSuspensionLock) {
195 BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i));
196 if (held_mutex != nullptr) {
197 LOG(ERROR) << "holding \"" << held_mutex->GetName()
198 << "\" at point where thread suspension is expected";
199 bad_mutexes_held = true;
200 }
201 }
202 }
203 // Make sure that if we hold the user_code_suspension_lock_ we aren't suspending due to
204 // user_code_suspend_count which would prevent the thread from ever waking up. Thread
205 // autoanalysis isn't able to understand that the GetHeldMutex(...) or AssertHeld means we
206 // have the mutex meaning we need to do this hack.
207 auto is_suspending_for_user_code = [this]() NO_THREAD_SAFETY_ANALYSIS {
208 return tls32_.user_code_suspend_count != 0;
209 };
210 if (GetHeldMutex(kUserCodeSuspensionLock) != nullptr && is_suspending_for_user_code()) {
211 LOG(ERROR) << "suspending due to user-code while holding \""
212 << Locks::user_code_suspension_lock_->GetName() << "\"! Thread would never "
213 << "wake up.";
214 bad_mutexes_held = true;
215 }
216 if (gAborting == 0) {
217 CHECK(!bad_mutexes_held);
218 }
219 }
220 }
221 }
222
TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)223 inline void Thread::TransitionToSuspendedAndRunCheckpoints(ThreadState new_state) {
224 DCHECK_NE(new_state, ThreadState::kRunnable);
225 while (true) {
226 StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
227 DCHECK_EQ(old_state_and_flags.GetState(), ThreadState::kRunnable);
228 if (UNLIKELY(old_state_and_flags.IsFlagSet(ThreadFlag::kCheckpointRequest))) {
229 RunCheckpointFunction();
230 continue;
231 }
232 if (UNLIKELY(old_state_and_flags.IsFlagSet(ThreadFlag::kEmptyCheckpointRequest))) {
233 RunEmptyCheckpoint();
234 continue;
235 }
236 // Change the state but keep the current flags (kCheckpointRequest is clear).
237 DCHECK(!old_state_and_flags.IsFlagSet(ThreadFlag::kCheckpointRequest));
238 DCHECK(!old_state_and_flags.IsFlagSet(ThreadFlag::kEmptyCheckpointRequest));
239 StateAndFlags new_state_and_flags = old_state_and_flags.WithState(new_state);
240
241 // CAS the value, ensuring that prior memory operations are visible to any thread
242 // that observes that we are suspended.
243 bool done =
244 tls32_.state_and_flags.CompareAndSetWeakRelease(old_state_and_flags.GetValue(),
245 new_state_and_flags.GetValue());
246 if (LIKELY(done)) {
247 break;
248 }
249 }
250 }
251
PassActiveSuspendBarriers()252 inline void Thread::PassActiveSuspendBarriers() {
253 while (true) {
254 StateAndFlags state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
255 if (LIKELY(!state_and_flags.IsFlagSet(ThreadFlag::kCheckpointRequest) &&
256 !state_and_flags.IsFlagSet(ThreadFlag::kEmptyCheckpointRequest) &&
257 !state_and_flags.IsFlagSet(ThreadFlag::kActiveSuspendBarrier))) {
258 break;
259 } else if (state_and_flags.IsFlagSet(ThreadFlag::kActiveSuspendBarrier)) {
260 PassActiveSuspendBarriers(this);
261 } else {
262 // Impossible
263 LOG(FATAL) << "Fatal, thread transitioned into suspended without running the checkpoint";
264 }
265 }
266 }
267
TransitionFromRunnableToSuspended(ThreadState new_state)268 inline void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) {
269 // Note: JNI stubs inline a fast path of this method that transitions to suspended if
270 // there are no flags set and then clears the `held_mutexes[kMutatorLock]` (this comes
271 // from a specialized `BaseMutex::RegisterAsLockedImpl(., kMutatorLock)` inlined from
272 // the `GetMutatorLock()->TransitionFromRunnableToSuspended(this)` below).
273 // Therefore any code added here (other than debug build assertions) should be gated
274 // on some flag being set, so that the JNI stub can take the slow path to get here.
275 AssertThreadSuspensionIsAllowable();
276 PoisonObjectPointersIfDebug();
277 DCHECK_EQ(this, Thread::Current());
278 // Change to non-runnable state, thereby appearing suspended to the system.
279 TransitionToSuspendedAndRunCheckpoints(new_state);
280 // Mark the release of the share of the mutator lock.
281 GetMutatorLock()->TransitionFromRunnableToSuspended(this);
282 // Once suspended - check the active suspend barrier flag
283 PassActiveSuspendBarriers();
284 }
285
TransitionFromSuspendedToRunnable()286 inline ThreadState Thread::TransitionFromSuspendedToRunnable() {
287 // Note: JNI stubs inline a fast path of this method that transitions to Runnable if
288 // there are no flags set and then stores the mutator lock to `held_mutexes[kMutatorLock]`
289 // (this comes from a specialized `BaseMutex::RegisterAsUnlockedImpl(., kMutatorLock)`
290 // inlined from the `GetMutatorLock()->TransitionFromSuspendedToRunnable(this)` below).
291 // Therefore any code added here (other than debug build assertions) should be gated
292 // on some flag being set, so that the JNI stub can take the slow path to get here.
293 StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
294 ThreadState old_state = old_state_and_flags.GetState();
295 DCHECK_NE(old_state, ThreadState::kRunnable);
296 while (true) {
297 GetMutatorLock()->AssertNotHeld(this); // Otherwise we starve GC.
298 // Optimize for the return from native code case - this is the fast path.
299 // Atomically change from suspended to runnable if no suspend request pending.
300 constexpr uint32_t kCheckedFlags =
301 SuspendOrCheckpointRequestFlags() |
302 enum_cast<uint32_t>(ThreadFlag::kActiveSuspendBarrier) |
303 FlipFunctionFlags();
304 if (LIKELY(!old_state_and_flags.IsAnyOfFlagsSet(kCheckedFlags))) {
305 // CAS the value with a memory barrier.
306 StateAndFlags new_state_and_flags = old_state_and_flags.WithState(ThreadState::kRunnable);
307 if (LIKELY(tls32_.state_and_flags.CompareAndSetWeakAcquire(old_state_and_flags.GetValue(),
308 new_state_and_flags.GetValue()))) {
309 // Mark the acquisition of a share of the mutator lock.
310 GetMutatorLock()->TransitionFromSuspendedToRunnable(this);
311 break;
312 }
313 } else if (old_state_and_flags.IsFlagSet(ThreadFlag::kActiveSuspendBarrier)) {
314 PassActiveSuspendBarriers(this);
315 } else if (UNLIKELY(old_state_and_flags.IsFlagSet(ThreadFlag::kCheckpointRequest) ||
316 old_state_and_flags.IsFlagSet(ThreadFlag::kEmptyCheckpointRequest))) {
317 // Checkpoint flags should not be set while in suspended state.
318 static_assert(static_cast<std::underlying_type_t<ThreadState>>(ThreadState::kRunnable) == 0u);
319 LOG(FATAL) << "Transitioning to Runnable with checkpoint flag,"
320 // Note: Keeping unused flags. If they are set, it points to memory corruption.
321 << " flags=" << old_state_and_flags.WithState(ThreadState::kRunnable).GetValue()
322 << " state=" << old_state_and_flags.GetState();
323 } else if (old_state_and_flags.IsFlagSet(ThreadFlag::kSuspendRequest)) {
324 // Wait while our suspend count is non-zero.
325
326 // We pass null to the MutexLock as we may be in a situation where the
327 // runtime is shutting down. Guarding ourselves from that situation
328 // requires to take the shutdown lock, which is undesirable here.
329 Thread* thread_to_pass = nullptr;
330 if (kIsDebugBuild && !IsDaemon()) {
331 // We know we can make our debug locking checks on non-daemon threads,
332 // so re-enable them on debug builds.
333 thread_to_pass = this;
334 }
335 MutexLock mu(thread_to_pass, *Locks::thread_suspend_count_lock_);
336 ScopedTransitioningToRunnable scoped_transitioning_to_runnable(this);
337 // Reload state and flags after locking the mutex.
338 old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
339 DCHECK_EQ(old_state, old_state_and_flags.GetState());
340 while (old_state_and_flags.IsFlagSet(ThreadFlag::kSuspendRequest)) {
341 // Re-check when Thread::resume_cond_ is notified.
342 Thread::resume_cond_->Wait(thread_to_pass);
343 // Reload state and flags after waiting.
344 old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
345 DCHECK_EQ(old_state, old_state_and_flags.GetState());
346 }
347 DCHECK_EQ(GetSuspendCount(), 0);
348 } else if (UNLIKELY(old_state_and_flags.IsFlagSet(ThreadFlag::kRunningFlipFunction)) ||
349 UNLIKELY(old_state_and_flags.IsFlagSet(ThreadFlag::kWaitingForFlipFunction))) {
350 // The thread should be suspended while another thread is running the flip function.
351 static_assert(static_cast<std::underlying_type_t<ThreadState>>(ThreadState::kRunnable) == 0u);
352 LOG(FATAL) << "Transitioning to Runnable while another thread is running the flip function,"
353 // Note: Keeping unused flags. If they are set, it points to memory corruption.
354 << " flags=" << old_state_and_flags.WithState(ThreadState::kRunnable).GetValue()
355 << " state=" << old_state_and_flags.GetState();
356 } else {
357 DCHECK(old_state_and_flags.IsFlagSet(ThreadFlag::kPendingFlipFunction));
358 // CAS the value with a memory barrier.
359 // Do not set `ThreadFlag::kRunningFlipFunction` as no other thread can run
360 // the flip function for a thread that is not suspended.
361 StateAndFlags new_state_and_flags = old_state_and_flags.WithState(ThreadState::kRunnable)
362 .WithoutFlag(ThreadFlag::kPendingFlipFunction);
363 if (LIKELY(tls32_.state_and_flags.CompareAndSetWeakAcquire(old_state_and_flags.GetValue(),
364 new_state_and_flags.GetValue()))) {
365 // Mark the acquisition of a share of the mutator lock.
366 GetMutatorLock()->TransitionFromSuspendedToRunnable(this);
367 // Run the flip function.
368 RunFlipFunction(this, /*notify=*/ false);
369 break;
370 }
371 }
372 // Reload state and flags.
373 old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
374 DCHECK_EQ(old_state, old_state_and_flags.GetState());
375 }
376 return static_cast<ThreadState>(old_state);
377 }
378
AllocTlab(size_t bytes)379 inline mirror::Object* Thread::AllocTlab(size_t bytes) {
380 DCHECK_GE(TlabSize(), bytes);
381 ++tlsPtr_.thread_local_objects;
382 mirror::Object* ret = reinterpret_cast<mirror::Object*>(tlsPtr_.thread_local_pos);
383 tlsPtr_.thread_local_pos += bytes;
384 return ret;
385 }
386
PushOnThreadLocalAllocationStack(mirror::Object * obj)387 inline bool Thread::PushOnThreadLocalAllocationStack(mirror::Object* obj) {
388 DCHECK_LE(tlsPtr_.thread_local_alloc_stack_top, tlsPtr_.thread_local_alloc_stack_end);
389 if (tlsPtr_.thread_local_alloc_stack_top < tlsPtr_.thread_local_alloc_stack_end) {
390 // There's room.
391 DCHECK_LE(reinterpret_cast<uint8_t*>(tlsPtr_.thread_local_alloc_stack_top) +
392 sizeof(StackReference<mirror::Object>),
393 reinterpret_cast<uint8_t*>(tlsPtr_.thread_local_alloc_stack_end));
394 DCHECK(tlsPtr_.thread_local_alloc_stack_top->AsMirrorPtr() == nullptr);
395 tlsPtr_.thread_local_alloc_stack_top->Assign(obj);
396 ++tlsPtr_.thread_local_alloc_stack_top;
397 return true;
398 }
399 return false;
400 }
401
GetWeakRefAccessEnabled()402 inline bool Thread::GetWeakRefAccessEnabled() const {
403 DCHECK(gUseReadBarrier);
404 DCHECK(this == Thread::Current());
405 WeakRefAccessState s = tls32_.weak_ref_access_enabled.load(std::memory_order_relaxed);
406 if (LIKELY(s == WeakRefAccessState::kVisiblyEnabled)) {
407 return true;
408 }
409 s = tls32_.weak_ref_access_enabled.load(std::memory_order_acquire);
410 if (s == WeakRefAccessState::kVisiblyEnabled) {
411 return true;
412 } else if (s == WeakRefAccessState::kDisabled) {
413 return false;
414 }
415 DCHECK(s == WeakRefAccessState::kEnabled)
416 << "state = " << static_cast<std::underlying_type_t<WeakRefAccessState>>(s);
417 // The state is only changed back to DISABLED during a checkpoint. Thus no other thread can
418 // change the value concurrently here. No other thread reads the value we store here, so there
419 // is no need for a release store.
420 tls32_.weak_ref_access_enabled.store(WeakRefAccessState::kVisiblyEnabled,
421 std::memory_order_relaxed);
422 return true;
423 }
424
SetThreadLocalAllocationStack(StackReference<mirror::Object> * start,StackReference<mirror::Object> * end)425 inline void Thread::SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
426 StackReference<mirror::Object>* end) {
427 DCHECK(Thread::Current() == this) << "Should be called by self";
428 DCHECK(start != nullptr);
429 DCHECK(end != nullptr);
430 DCHECK_ALIGNED(start, sizeof(StackReference<mirror::Object>));
431 DCHECK_ALIGNED(end, sizeof(StackReference<mirror::Object>));
432 DCHECK_LT(start, end);
433 tlsPtr_.thread_local_alloc_stack_end = end;
434 tlsPtr_.thread_local_alloc_stack_top = start;
435 }
436
RevokeThreadLocalAllocationStack()437 inline void Thread::RevokeThreadLocalAllocationStack() {
438 if (kIsDebugBuild) {
439 // Note: self is not necessarily equal to this thread since thread may be suspended.
440 Thread* self = Thread::Current();
441 DCHECK(this == self || IsSuspended() || GetState() == ThreadState::kWaitingPerformingGc)
442 << GetState() << " thread " << this << " self " << self;
443 }
444 tlsPtr_.thread_local_alloc_stack_end = nullptr;
445 tlsPtr_.thread_local_alloc_stack_top = nullptr;
446 }
447
PoisonObjectPointersIfDebug()448 inline void Thread::PoisonObjectPointersIfDebug() {
449 if (kObjPtrPoisoning) {
450 Thread::Current()->PoisonObjectPointers();
451 }
452 }
453
ModifySuspendCount(Thread * self,int delta,AtomicInteger * suspend_barrier,SuspendReason reason)454 inline bool Thread::ModifySuspendCount(Thread* self,
455 int delta,
456 AtomicInteger* suspend_barrier,
457 SuspendReason reason) {
458 if (delta > 0 &&
459 (((gUseUserfaultfd || gUseReadBarrier) && this != self) || suspend_barrier != nullptr)) {
460 // When delta > 0 (requesting a suspend), ModifySuspendCountInternal() may fail either if
461 // active_suspend_barriers is full or we are in the middle of a thread flip. Retry in a loop.
462 while (true) {
463 if (LIKELY(ModifySuspendCountInternal(self, delta, suspend_barrier, reason))) {
464 return true;
465 } else {
466 // Failure means the list of active_suspend_barriers is full or we are in the middle of a
467 // thread flip, we should release the thread_suspend_count_lock_ (to avoid deadlock) and
468 // wait till the target thread has executed or Thread::PassActiveSuspendBarriers() or the
469 // flip function. Note that we could not simply wait for the thread to change to a suspended
470 // state, because it might need to run checkpoint function before the state change or
471 // resumes from the resume_cond_, which also needs thread_suspend_count_lock_.
472 //
473 // The list of active_suspend_barriers is very unlikely to be full since more than
474 // kMaxSuspendBarriers threads need to execute SuspendAllInternal() simultaneously, and
475 // target thread stays in kRunnable in the mean time.
476 Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
477 NanoSleep(100000);
478 Locks::thread_suspend_count_lock_->ExclusiveLock(self);
479 }
480 }
481 } else {
482 return ModifySuspendCountInternal(self, delta, suspend_barrier, reason);
483 }
484 }
485
PushShadowFrame(ShadowFrame * new_top_frame)486 inline ShadowFrame* Thread::PushShadowFrame(ShadowFrame* new_top_frame) {
487 new_top_frame->CheckConsistentVRegs();
488 return tlsPtr_.managed_stack.PushShadowFrame(new_top_frame);
489 }
490
PopShadowFrame()491 inline ShadowFrame* Thread::PopShadowFrame() {
492 return tlsPtr_.managed_stack.PopShadowFrame();
493 }
494
GetStackEndForInterpreter(bool implicit_overflow_check)495 inline uint8_t* Thread::GetStackEndForInterpreter(bool implicit_overflow_check) const {
496 uint8_t* end = tlsPtr_.stack_end + (implicit_overflow_check
497 ? GetStackOverflowReservedBytes(kRuntimeISA)
498 : 0);
499 if (kIsDebugBuild) {
500 // In a debuggable build, but especially under ASAN, the access-checks interpreter has a
501 // potentially humongous stack size. We don't want to take too much of the stack regularly,
502 // so do not increase the regular reserved size (for compiled code etc) and only report the
503 // virtually smaller stack to the interpreter here.
504 end += GetStackOverflowReservedBytes(kRuntimeISA);
505 }
506 return end;
507 }
508
ResetDefaultStackEnd()509 inline void Thread::ResetDefaultStackEnd() {
510 // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
511 // to throw a StackOverflowError.
512 tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
513 }
514
515 } // namespace art
516
517 #endif // ART_RUNTIME_THREAD_INL_H_
518