• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   * Copyright (C) 2011 The Android Open Source Project
3   *
4   * Licensed under the Apache License, Version 2.0 (the "License");
5   * you may not use this file except in compliance with the License.
6   * You may obtain a copy of the License at
7   *
8   *      http://www.apache.org/licenses/LICENSE-2.0
9   *
10   * Unless required by applicable law or agreed to in writing, software
11   * distributed under the License is distributed on an "AS IS" BASIS,
12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13   * See the License for the specific language governing permissions and
14   * limitations under the License.
15   */
16  
17  #ifndef ART_RUNTIME_THREAD_INL_H_
18  #define ART_RUNTIME_THREAD_INL_H_
19  
20  #include "thread.h"
21  
22  #include "arch/instruction_set.h"
23  #include "base/aborting.h"
24  #include "base/casts.h"
25  #include "base/mutex-inl.h"
26  #include "base/time_utils.h"
27  #include "jni/jni_env_ext.h"
28  #include "managed_stack-inl.h"
29  #include "obj_ptr.h"
30  #include "suspend_reason.h"
31  #include "thread-current-inl.h"
32  #include "thread_pool.h"
33  
34  namespace art {
35  
36  // Quickly access the current thread from a JNIEnv.
ThreadForEnv(JNIEnv * env)37  static inline Thread* ThreadForEnv(JNIEnv* env) {
38    JNIEnvExt* full_env(down_cast<JNIEnvExt*>(env));
39    return full_env->GetSelf();
40  }
41  
AllowThreadSuspension()42  inline void Thread::AllowThreadSuspension() {
43    DCHECK_EQ(Thread::Current(), this);
44    if (UNLIKELY(TestAllFlags())) {
45      CheckSuspend();
46    }
47    // Invalidate the current thread's object pointers (ObjPtr) to catch possible moving GC bugs due
48    // to missing handles.
49    PoisonObjectPointers();
50  }
51  
CheckSuspend()52  inline void Thread::CheckSuspend() {
53    DCHECK_EQ(Thread::Current(), this);
54    for (;;) {
55      if (ReadFlag(kCheckpointRequest)) {
56        RunCheckpointFunction();
57      } else if (ReadFlag(kSuspendRequest)) {
58        FullSuspendCheck();
59      } else if (ReadFlag(kEmptyCheckpointRequest)) {
60        RunEmptyCheckpoint();
61      } else {
62        break;
63      }
64    }
65  }
66  
CheckEmptyCheckpointFromWeakRefAccess(BaseMutex * cond_var_mutex)67  inline void Thread::CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex) {
68    Thread* self = Thread::Current();
69    DCHECK_EQ(self, this);
70    for (;;) {
71      if (ReadFlag(kEmptyCheckpointRequest)) {
72        RunEmptyCheckpoint();
73        // Check we hold only an expected mutex when accessing weak ref.
74        if (kIsDebugBuild) {
75          for (int i = kLockLevelCount - 1; i >= 0; --i) {
76            BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
77            if (held_mutex != nullptr &&
78                held_mutex != Locks::mutator_lock_ &&
79                held_mutex != cond_var_mutex) {
80              CHECK(Locks::IsExpectedOnWeakRefAccess(held_mutex))
81                  << "Holding unexpected mutex " << held_mutex->GetName()
82                  << " when accessing weak ref";
83            }
84          }
85        }
86      } else {
87        break;
88      }
89    }
90  }
91  
CheckEmptyCheckpointFromMutex()92  inline void Thread::CheckEmptyCheckpointFromMutex() {
93    DCHECK_EQ(Thread::Current(), this);
94    for (;;) {
95      if (ReadFlag(kEmptyCheckpointRequest)) {
96        RunEmptyCheckpoint();
97      } else {
98        break;
99      }
100    }
101  }
102  
SetState(ThreadState new_state)103  inline ThreadState Thread::SetState(ThreadState new_state) {
104    // Should only be used to change between suspended states.
105    // Cannot use this code to change into or from Runnable as changing to Runnable should
106    // fail if old_state_and_flags.suspend_request is true and changing from Runnable might
107    // miss passing an active suspend barrier.
108    DCHECK_NE(new_state, kRunnable);
109    if (kIsDebugBuild && this != Thread::Current()) {
110      std::string name;
111      GetThreadName(name);
112      LOG(FATAL) << "Thread \"" << name << "\"(" << this << " != Thread::Current()="
113                 << Thread::Current() << ") changing state to " << new_state;
114    }
115    union StateAndFlags old_state_and_flags;
116    old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
117    CHECK_NE(old_state_and_flags.as_struct.state, kRunnable) << new_state << " " << *this << " "
118        << *Thread::Current();
119    tls32_.state_and_flags.as_struct.state = new_state;
120    return static_cast<ThreadState>(old_state_and_flags.as_struct.state);
121  }
122  
IsThreadSuspensionAllowable()123  inline bool Thread::IsThreadSuspensionAllowable() const {
124    if (tls32_.no_thread_suspension != 0) {
125      return false;
126    }
127    for (int i = kLockLevelCount - 1; i >= 0; --i) {
128      if (i != kMutatorLock &&
129          i != kUserCodeSuspensionLock &&
130          GetHeldMutex(static_cast<LockLevel>(i)) != nullptr) {
131        return false;
132      }
133    }
134    // Thread autoanalysis isn't able to understand that the GetHeldMutex(...) or AssertHeld means we
135    // have the mutex meaning we need to do this hack.
136    auto is_suspending_for_user_code = [this]() NO_THREAD_SAFETY_ANALYSIS {
137      return tls32_.user_code_suspend_count != 0;
138    };
139    if (GetHeldMutex(kUserCodeSuspensionLock) != nullptr && is_suspending_for_user_code()) {
140      return false;
141    }
142    return true;
143  }
144  
AssertThreadSuspensionIsAllowable(bool check_locks)145  inline void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const {
146    if (kIsDebugBuild) {
147      if (gAborting == 0) {
148        CHECK_EQ(0u, tls32_.no_thread_suspension) << tlsPtr_.last_no_thread_suspension_cause;
149      }
150      if (check_locks) {
151        bool bad_mutexes_held = false;
152        for (int i = kLockLevelCount - 1; i >= 0; --i) {
153          // We expect no locks except the mutator_lock_. User code suspension lock is OK as long as
154          // we aren't going to be held suspended due to SuspendReason::kForUserCode.
155          if (i != kMutatorLock && i != kUserCodeSuspensionLock) {
156            BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i));
157            if (held_mutex != nullptr) {
158              LOG(ERROR) << "holding \"" << held_mutex->GetName()
159                        << "\" at point where thread suspension is expected";
160              bad_mutexes_held = true;
161            }
162          }
163        }
164        // Make sure that if we hold the user_code_suspension_lock_ we aren't suspending due to
165        // user_code_suspend_count which would prevent the thread from ever waking up.  Thread
166        // autoanalysis isn't able to understand that the GetHeldMutex(...) or AssertHeld means we
167        // have the mutex meaning we need to do this hack.
168        auto is_suspending_for_user_code = [this]() NO_THREAD_SAFETY_ANALYSIS {
169          return tls32_.user_code_suspend_count != 0;
170        };
171        if (GetHeldMutex(kUserCodeSuspensionLock) != nullptr && is_suspending_for_user_code()) {
172          LOG(ERROR) << "suspending due to user-code while holding \""
173                     << Locks::user_code_suspension_lock_->GetName() << "\"! Thread would never "
174                     << "wake up.";
175          bad_mutexes_held = true;
176        }
177        if (gAborting == 0) {
178          CHECK(!bad_mutexes_held);
179        }
180      }
181    }
182  }
183  
TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)184  inline void Thread::TransitionToSuspendedAndRunCheckpoints(ThreadState new_state) {
185    DCHECK_NE(new_state, kRunnable);
186    DCHECK_EQ(GetState(), kRunnable);
187    union StateAndFlags old_state_and_flags;
188    union StateAndFlags new_state_and_flags;
189    while (true) {
190      old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
191      if (UNLIKELY((old_state_and_flags.as_struct.flags & kCheckpointRequest) != 0)) {
192        RunCheckpointFunction();
193        continue;
194      }
195      if (UNLIKELY((old_state_and_flags.as_struct.flags & kEmptyCheckpointRequest) != 0)) {
196        RunEmptyCheckpoint();
197        continue;
198      }
199      // Change the state but keep the current flags (kCheckpointRequest is clear).
200      DCHECK_EQ((old_state_and_flags.as_struct.flags & kCheckpointRequest), 0);
201      DCHECK_EQ((old_state_and_flags.as_struct.flags & kEmptyCheckpointRequest), 0);
202      new_state_and_flags.as_struct.flags = old_state_and_flags.as_struct.flags;
203      new_state_and_flags.as_struct.state = new_state;
204  
205      // CAS the value with a memory ordering.
206      bool done =
207          tls32_.state_and_flags.as_atomic_int.CompareAndSetWeakRelease(old_state_and_flags.as_int,
208                                                                          new_state_and_flags.as_int);
209      if (LIKELY(done)) {
210        break;
211      }
212    }
213  }
214  
PassActiveSuspendBarriers()215  inline void Thread::PassActiveSuspendBarriers() {
216    while (true) {
217      uint16_t current_flags = tls32_.state_and_flags.as_struct.flags;
218      if (LIKELY((current_flags &
219                  (kCheckpointRequest | kEmptyCheckpointRequest | kActiveSuspendBarrier)) == 0)) {
220        break;
221      } else if ((current_flags & kActiveSuspendBarrier) != 0) {
222        PassActiveSuspendBarriers(this);
223      } else {
224        // Impossible
225        LOG(FATAL) << "Fatal, thread transitioned into suspended without running the checkpoint";
226      }
227    }
228  }
229  
TransitionFromRunnableToSuspended(ThreadState new_state)230  inline void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) {
231    AssertThreadSuspensionIsAllowable();
232    PoisonObjectPointersIfDebug();
233    DCHECK_EQ(this, Thread::Current());
234    // Change to non-runnable state, thereby appearing suspended to the system.
235    TransitionToSuspendedAndRunCheckpoints(new_state);
236    // Mark the release of the share of the mutator_lock_.
237    Locks::mutator_lock_->TransitionFromRunnableToSuspended(this);
238    // Once suspended - check the active suspend barrier flag
239    PassActiveSuspendBarriers();
240  }
241  
TransitionFromSuspendedToRunnable()242  inline ThreadState Thread::TransitionFromSuspendedToRunnable() {
243    union StateAndFlags old_state_and_flags;
244    old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
245    int16_t old_state = old_state_and_flags.as_struct.state;
246    DCHECK_NE(static_cast<ThreadState>(old_state), kRunnable);
247    do {
248      Locks::mutator_lock_->AssertNotHeld(this);  // Otherwise we starve GC..
249      old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
250      DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
251      if (LIKELY(old_state_and_flags.as_struct.flags == 0)) {
252        // Optimize for the return from native code case - this is the fast path.
253        // Atomically change from suspended to runnable if no suspend request pending.
254        union StateAndFlags new_state_and_flags;
255        new_state_and_flags.as_int = old_state_and_flags.as_int;
256        new_state_and_flags.as_struct.state = kRunnable;
257  
258        // CAS the value with a memory barrier.
259        if (LIKELY(tls32_.state_and_flags.as_atomic_int.CompareAndSetWeakAcquire(
260                                                   old_state_and_flags.as_int,
261                                                   new_state_and_flags.as_int))) {
262          // Mark the acquisition of a share of the mutator_lock_.
263          Locks::mutator_lock_->TransitionFromSuspendedToRunnable(this);
264          break;
265        }
266      } else if ((old_state_and_flags.as_struct.flags & kActiveSuspendBarrier) != 0) {
267        PassActiveSuspendBarriers(this);
268      } else if ((old_state_and_flags.as_struct.flags &
269                  (kCheckpointRequest | kEmptyCheckpointRequest)) != 0) {
270        // Impossible
271        LOG(FATAL) << "Transitioning to runnable with checkpoint flag, "
272                   << " flags=" << old_state_and_flags.as_struct.flags
273                   << " state=" << old_state_and_flags.as_struct.state;
274      } else if ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) {
275        // Wait while our suspend count is non-zero.
276  
277        // We pass null to the MutexLock as we may be in a situation where the
278        // runtime is shutting down. Guarding ourselves from that situation
279        // requires to take the shutdown lock, which is undesirable here.
280        Thread* thread_to_pass = nullptr;
281        if (kIsDebugBuild && !IsDaemon()) {
282          // We know we can make our debug locking checks on non-daemon threads,
283          // so re-enable them on debug builds.
284          thread_to_pass = this;
285        }
286        MutexLock mu(thread_to_pass, *Locks::thread_suspend_count_lock_);
287        ScopedTransitioningToRunnable scoped_transitioning_to_runnable(this);
288        old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
289        DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
290        while ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) {
291          // Re-check when Thread::resume_cond_ is notified.
292          Thread::resume_cond_->Wait(thread_to_pass);
293          old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
294          DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
295        }
296        DCHECK_EQ(GetSuspendCount(), 0);
297      }
298    } while (true);
299    // Run the flip function, if set.
300    Closure* flip_func = GetFlipFunction();
301    if (flip_func != nullptr) {
302      flip_func->Run(this);
303    }
304    return static_cast<ThreadState>(old_state);
305  }
306  
AllocTlab(size_t bytes)307  inline mirror::Object* Thread::AllocTlab(size_t bytes) {
308    DCHECK_GE(TlabSize(), bytes);
309    ++tlsPtr_.thread_local_objects;
310    mirror::Object* ret = reinterpret_cast<mirror::Object*>(tlsPtr_.thread_local_pos);
311    tlsPtr_.thread_local_pos += bytes;
312    return ret;
313  }
314  
PushOnThreadLocalAllocationStack(mirror::Object * obj)315  inline bool Thread::PushOnThreadLocalAllocationStack(mirror::Object* obj) {
316    DCHECK_LE(tlsPtr_.thread_local_alloc_stack_top, tlsPtr_.thread_local_alloc_stack_end);
317    if (tlsPtr_.thread_local_alloc_stack_top < tlsPtr_.thread_local_alloc_stack_end) {
318      // There's room.
319      DCHECK_LE(reinterpret_cast<uint8_t*>(tlsPtr_.thread_local_alloc_stack_top) +
320                sizeof(StackReference<mirror::Object>),
321                reinterpret_cast<uint8_t*>(tlsPtr_.thread_local_alloc_stack_end));
322      DCHECK(tlsPtr_.thread_local_alloc_stack_top->AsMirrorPtr() == nullptr);
323      tlsPtr_.thread_local_alloc_stack_top->Assign(obj);
324      ++tlsPtr_.thread_local_alloc_stack_top;
325      return true;
326    }
327    return false;
328  }
329  
SetThreadLocalAllocationStack(StackReference<mirror::Object> * start,StackReference<mirror::Object> * end)330  inline void Thread::SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
331                                                    StackReference<mirror::Object>* end) {
332    DCHECK(Thread::Current() == this) << "Should be called by self";
333    DCHECK(start != nullptr);
334    DCHECK(end != nullptr);
335    DCHECK_ALIGNED(start, sizeof(StackReference<mirror::Object>));
336    DCHECK_ALIGNED(end, sizeof(StackReference<mirror::Object>));
337    DCHECK_LT(start, end);
338    tlsPtr_.thread_local_alloc_stack_end = end;
339    tlsPtr_.thread_local_alloc_stack_top = start;
340  }
341  
RevokeThreadLocalAllocationStack()342  inline void Thread::RevokeThreadLocalAllocationStack() {
343    if (kIsDebugBuild) {
344      // Note: self is not necessarily equal to this thread since thread may be suspended.
345      Thread* self = Thread::Current();
346      DCHECK(this == self || IsSuspended() || GetState() == kWaitingPerformingGc)
347          << GetState() << " thread " << this << " self " << self;
348    }
349    tlsPtr_.thread_local_alloc_stack_end = nullptr;
350    tlsPtr_.thread_local_alloc_stack_top = nullptr;
351  }
352  
PoisonObjectPointersIfDebug()353  inline void Thread::PoisonObjectPointersIfDebug() {
354    if (kObjPtrPoisoning) {
355      Thread::Current()->PoisonObjectPointers();
356    }
357  }
358  
ModifySuspendCount(Thread * self,int delta,AtomicInteger * suspend_barrier,SuspendReason reason)359  inline bool Thread::ModifySuspendCount(Thread* self,
360                                         int delta,
361                                         AtomicInteger* suspend_barrier,
362                                         SuspendReason reason) {
363    if (delta > 0 && ((kUseReadBarrier && this != self) || suspend_barrier != nullptr)) {
364      // When delta > 0 (requesting a suspend), ModifySuspendCountInternal() may fail either if
365      // active_suspend_barriers is full or we are in the middle of a thread flip. Retry in a loop.
366      while (true) {
367        if (LIKELY(ModifySuspendCountInternal(self, delta, suspend_barrier, reason))) {
368          return true;
369        } else {
370          // Failure means the list of active_suspend_barriers is full or we are in the middle of a
371          // thread flip, we should release the thread_suspend_count_lock_ (to avoid deadlock) and
372          // wait till the target thread has executed or Thread::PassActiveSuspendBarriers() or the
373          // flip function. Note that we could not simply wait for the thread to change to a suspended
374          // state, because it might need to run checkpoint function before the state change or
375          // resumes from the resume_cond_, which also needs thread_suspend_count_lock_.
376          //
377          // The list of active_suspend_barriers is very unlikely to be full since more than
378          // kMaxSuspendBarriers threads need to execute SuspendAllInternal() simultaneously, and
379          // target thread stays in kRunnable in the mean time.
380          Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
381          NanoSleep(100000);
382          Locks::thread_suspend_count_lock_->ExclusiveLock(self);
383        }
384      }
385    } else {
386      return ModifySuspendCountInternal(self, delta, suspend_barrier, reason);
387    }
388  }
389  
PushShadowFrame(ShadowFrame * new_top_frame)390  inline ShadowFrame* Thread::PushShadowFrame(ShadowFrame* new_top_frame) {
391    new_top_frame->CheckConsistentVRegs();
392    return tlsPtr_.managed_stack.PushShadowFrame(new_top_frame);
393  }
394  
PopShadowFrame()395  inline ShadowFrame* Thread::PopShadowFrame() {
396    return tlsPtr_.managed_stack.PopShadowFrame();
397  }
398  
GetStackEndForInterpreter(bool implicit_overflow_check)399  inline uint8_t* Thread::GetStackEndForInterpreter(bool implicit_overflow_check) const {
400    uint8_t* end = tlsPtr_.stack_end + (implicit_overflow_check
401        ? GetStackOverflowReservedBytes(kRuntimeISA)
402            : 0);
403    if (kIsDebugBuild) {
404      // In a debuggable build, but especially under ASAN, the access-checks interpreter has a
405      // potentially humongous stack size. We don't want to take too much of the stack regularly,
406      // so do not increase the regular reserved size (for compiled code etc) and only report the
407      // virtually smaller stack to the interpreter here.
408      end += GetStackOverflowReservedBytes(kRuntimeISA);
409    }
410    return end;
411  }
412  
ResetDefaultStackEnd()413  inline void Thread::ResetDefaultStackEnd() {
414    // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
415    // to throw a StackOverflowError.
416    tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
417  }
418  
419  }  // namespace art
420  
421  #endif  // ART_RUNTIME_THREAD_INL_H_
422