1 /* 2 * Copyright (c) 2021 Huawei Device Co., Ltd. 3 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * you may not use this file except in compliance with the License. 5 * You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software 10 * distributed under the License is distributed on an "AS IS" BASIS, 11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 * See the License for the specific language governing permissions and 13 * limitations under the License. 14 */ 15 16 #ifndef PANDA_RUNTIME_INCLUDE_MTMANAGED_THREAD_H_ 17 #define PANDA_RUNTIME_INCLUDE_MTMANAGED_THREAD_H_ 18 19 #include "managed_thread.h" 20 21 // See issue 4100, js thread always true 22 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) 23 #define ASSERT_MANAGED_CODE() ASSERT(::panda::MTManagedThread::GetCurrent()->IsManagedCode()) 24 #define ASSERT_NATIVE_CODE() ASSERT(::panda::MTManagedThread::GetCurrent()->IsInNativeCode()) // NOLINT 25 26 namespace panda { 27 class MTManagedThread : public ManagedThread { 28 public: 29 enum ThreadState : uint8_t { NATIVE_CODE = 0, MANAGED_CODE = 1 }; 30 31 ThreadId GetInternalId(); 32 33 static MTManagedThread *Create(Runtime *runtime, PandaVM *vm); 34 35 explicit MTManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *vm); 36 ~MTManagedThread() override; 37 38 std::unordered_set<Monitor *> &GetMonitors(); 39 void AddMonitor(Monitor *monitor); 40 void RemoveMonitor(Monitor *monitor); 41 void ReleaseMonitors(); 42 43 void PushLocalObjectLocked(ObjectHeader *obj); 44 void PopLocalObjectLocked(ObjectHeader *out); 45 const PandaVector<LockedObjectInfo> &GetLockedObjectInfos(); 46 47 void VisitGCRoots(const ObjectVisitor &cb) override; 48 void UpdateGCRoots() override; 49 GetWaitingMonitorOldStatus()50 ThreadStatus GetWaitingMonitorOldStatus() const 51 { 52 return monitor_old_status_; 53 } 54 SetWaitingMonitorOldStatus(ThreadStatus status)55 void SetWaitingMonitorOldStatus(ThreadStatus status) 56 { 57 monitor_old_status_ = status; 58 } 59 IsManagedScope()60 static bool IsManagedScope() 61 { 62 auto thread = GetCurrent(); 63 return thread != nullptr && thread->is_managed_scope_; 64 } 65 66 void FreeInternalMemory() override; 67 68 static bool Sleep(uint64_t ms); 69 70 void SuspendImpl(bool internal_suspend = false); 71 void ResumeImpl(bool internal_resume = false); 72 GetWaitingMonitor()73 Monitor *GetWaitingMonitor() const 74 { 75 return waiting_monitor_; 76 } 77 SetWaitingMonitor(Monitor * monitor)78 void SetWaitingMonitor(Monitor *monitor) 79 { 80 ASSERT(waiting_monitor_ == nullptr || monitor == nullptr); 81 waiting_monitor_ = monitor; 82 } 83 84 virtual void StopDaemonThread(); 85 IsDaemon()86 bool IsDaemon() 87 { 88 return is_daemon_; 89 } 90 91 void SetDaemon(); 92 93 virtual void Destroy(); 94 95 static void Yield(); 96 97 static void Interrupt(MTManagedThread *thread); 98 99 [[nodiscard]] bool HasManagedCodeOnStack() const; 100 [[nodiscard]] bool HasClearStack() const; 101 102 /** 103 * Transition to suspended and back to runnable, re-acquire share on mutator_lock_ 104 */ 105 void SuspendCheck(); 106 IsUserSuspended()107 bool IsUserSuspended() 108 { 109 return user_code_suspend_count_ > 0; 110 } 111 112 // Need to acquire the mutex before waiting to avoid scheduling between monitor release and clond_lock acquire GetWaitingMutex()113 os::memory::Mutex *GetWaitingMutex() RETURN_CAPABILITY(cond_lock_) 114 { 115 return &cond_lock_; 116 } 117 Signal()118 void Signal() 119 { 120 os::memory::LockHolder lock(cond_lock_); 121 cond_var_.Signal(); 122 } 123 124 bool Interrupted(); 125 IsInterrupted()126 bool IsInterrupted() const 127 { 128 os::memory::LockHolder lock(cond_lock_); 129 return is_interrupted_; 130 } 131 IsInterruptedWithLockHeld()132 bool IsInterruptedWithLockHeld() const REQUIRES(cond_lock_) 133 { 134 return is_interrupted_; 135 } 136 ClearInterrupted()137 void ClearInterrupted() 138 { 139 os::memory::LockHolder lock(cond_lock_); 140 is_interrupted_ = false; 141 } 142 IncSuspended(bool is_internal)143 void IncSuspended(bool is_internal) REQUIRES(suspend_lock_) 144 { 145 if (!is_internal) { 146 user_code_suspend_count_++; 147 } 148 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) 149 auto old_count = suspend_count_++; 150 if (old_count == 0) { 151 SetFlag(SUSPEND_REQUEST); 152 } 153 } 154 DecSuspended(bool is_internal)155 void DecSuspended(bool is_internal) REQUIRES(suspend_lock_) 156 { 157 if (!is_internal) { 158 ASSERT(user_code_suspend_count_ != 0); 159 user_code_suspend_count_--; 160 } 161 if (suspend_count_ > 0) { 162 suspend_count_--; 163 if (suspend_count_ == 0) { 164 ClearFlag(SUSPEND_REQUEST); 165 } 166 } 167 } 168 ThreadIsMTManagedThread(Thread * thread)169 static bool ThreadIsMTManagedThread(Thread *thread) 170 { 171 ASSERT(thread != nullptr); 172 return thread->GetThreadType() == Thread::ThreadType::THREAD_TYPE_MT_MANAGED; 173 } 174 CastFromThread(Thread * thread)175 static MTManagedThread *CastFromThread(Thread *thread) 176 { 177 ASSERT(thread != nullptr); 178 ASSERT(ThreadIsMTManagedThread(thread)); 179 return static_cast<MTManagedThread *>(thread); 180 } 181 182 /** 183 * @brief GetCurrentRaw Unsafe method to get current MTManagedThread. 184 * It can be used in hotspots to get the best performance. 185 * We can only use this method in places where the MTManagedThread exists. 186 * @return pointer to MTManagedThread 187 */ GetCurrentRaw()188 static MTManagedThread *GetCurrentRaw() 189 { 190 return CastFromThread(Thread::GetCurrent()); 191 } 192 193 /** 194 * @brief GetCurrent Safe method to gets current MTManagedThread. 195 * @return pointer to MTManagedThread or nullptr (if current thread is not a managed thread) 196 */ GetCurrent()197 static MTManagedThread *GetCurrent() 198 { 199 Thread *thread = Thread::GetCurrent(); 200 ASSERT(thread != nullptr); 201 if (ThreadIsMTManagedThread(thread)) { 202 return CastFromThread(thread); 203 } 204 // no guarantee that we will return nullptr here in the future 205 return nullptr; 206 } 207 208 void SafepointPoll(); 209 210 /** 211 * From NativeCode you can call ManagedCodeBegin. 212 * From ManagedCode you can call NativeCodeBegin. 213 * Call the same type is forbidden. 214 */ 215 virtual void NativeCodeBegin(); 216 virtual void NativeCodeEnd(); 217 [[nodiscard]] virtual bool IsInNativeCode() const; 218 219 virtual void ManagedCodeBegin(); 220 virtual void ManagedCodeEnd(); 221 [[nodiscard]] virtual bool IsManagedCode() const; 222 WaitWithLockHeld(ThreadStatus wait_status)223 void WaitWithLockHeld(ThreadStatus wait_status) REQUIRES(cond_lock_) 224 { 225 ASSERT(wait_status == IS_WAITING); 226 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) 227 auto old_status = GetStatus(); 228 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) 229 UpdateStatus(wait_status); 230 WaitWithLockHeldInternal(); 231 // Unlock before setting status RUNNING to handle MutatorReadLock without inversed lock order. 232 cond_lock_.Unlock(); 233 UpdateStatus(old_status); 234 cond_lock_.Lock(); 235 } 236 WaitForSuspension(ManagedThread * thread)237 static void WaitForSuspension(ManagedThread *thread) 238 { 239 static constexpr uint32_t YIELD_ITERS = 500; 240 uint32_t loop_iter = 0; 241 while (thread->GetStatus() == RUNNING) { 242 if (!thread->IsSuspended()) { 243 LOG(WARNING, RUNTIME) << "No request for suspension, do not wait thread " << thread->GetId(); 244 break; 245 } 246 247 loop_iter++; 248 if (loop_iter < YIELD_ITERS) { 249 MTManagedThread::Yield(); 250 } else { 251 // Use native sleep over ManagedThread::Sleep to prevent potentially time consuming 252 // mutator_lock locking and unlocking 253 static constexpr uint32_t SHORT_SLEEP_MS = 1; 254 os::thread::NativeSleep(SHORT_SLEEP_MS); 255 } 256 } 257 } 258 Wait(ThreadStatus wait_status)259 void Wait(ThreadStatus wait_status) 260 { 261 ASSERT(wait_status == IS_WAITING); 262 auto old_status = GetStatus(); 263 { 264 os::memory::LockHolder lock(cond_lock_); 265 UpdateStatus(wait_status); 266 WaitWithLockHeldInternal(); 267 } 268 UpdateStatus(old_status); 269 } 270 271 bool TimedWaitWithLockHeld(ThreadStatus wait_status, uint64_t timeout, uint64_t nanos, bool is_absolute = false) REQUIRES(cond_lock_)272 REQUIRES(cond_lock_) 273 { 274 ASSERT(wait_status == IS_TIMED_WAITING || wait_status == IS_SLEEPING || wait_status == IS_BLOCKED || 275 wait_status == IS_SUSPENDED || wait_status == IS_COMPILER_WAITING || 276 wait_status == IS_WAITING_INFLATION); 277 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) 278 auto old_status = GetStatus(); 279 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) 280 UpdateStatus(wait_status); 281 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) 282 bool res = TimedWaitWithLockHeldInternal(timeout, nanos, is_absolute); 283 // Unlock before setting status RUNNING to handle MutatorReadLock without inversed lock order. 284 cond_lock_.Unlock(); 285 UpdateStatus(old_status); 286 cond_lock_.Lock(); 287 return res; 288 } 289 290 bool TimedWait(ThreadStatus wait_status, uint64_t timeout, uint64_t nanos = 0, bool is_absolute = false) 291 { 292 ASSERT(wait_status == IS_TIMED_WAITING || wait_status == IS_SLEEPING || wait_status == IS_BLOCKED || 293 wait_status == IS_SUSPENDED || wait_status == IS_COMPILER_WAITING || 294 wait_status == IS_WAITING_INFLATION); 295 auto old_status = GetStatus(); 296 bool res = false; 297 { 298 os::memory::LockHolder lock(cond_lock_); 299 UpdateStatus(wait_status); 300 res = TimedWaitWithLockHeldInternal(timeout, nanos, is_absolute); 301 } 302 UpdateStatus(old_status); 303 return res; 304 } 305 WaitSuspension()306 void WaitSuspension() 307 { 308 constexpr int TIMEOUT = 100; 309 auto old_status = GetStatus(); 310 UpdateStatus(IS_SUSPENDED); 311 { 312 PrintSuspensionStackIfNeeded(); 313 os::memory::LockHolder lock(suspend_lock_); 314 while (suspend_count_ > 0) { 315 suspend_var_.TimedWait(&suspend_lock_, TIMEOUT); 316 // In case runtime is being terminated, we should abort suspension and release monitors 317 if (UNLIKELY(IsRuntimeTerminated())) { 318 suspend_lock_.Unlock(); 319 TerminationLoop(); 320 } 321 } 322 ASSERT(!IsSuspended()); 323 } 324 UpdateStatus(old_status); 325 } 326 TerminationLoop()327 void TerminationLoop() 328 { 329 ASSERT(IsRuntimeTerminated()); 330 // Free all monitors first in case we are suspending in status IS_BLOCKED 331 ReleaseMonitors(); 332 UpdateStatus(IS_TERMINATED_LOOP); 333 while (true) { 334 static constexpr unsigned int LONG_SLEEP_MS = 1000000; 335 os::thread::NativeSleep(LONG_SLEEP_MS); 336 } 337 } 338 339 // NO_THREAD_SAFETY_ANALYSIS due to TSAN not being able to determine lock status TransitionFromRunningToSuspended(enum ThreadStatus status)340 void TransitionFromRunningToSuspended(enum ThreadStatus status) NO_THREAD_SAFETY_ANALYSIS 341 { 342 // Workaround: We masked the assert for 'ManagedThread::GetCurrent() == null' condition, 343 // because JSThread updates status_ not from current thread. 344 // (Remove it when issue 5183 is resolved) 345 ASSERT(ManagedThread::GetCurrent() == this || ManagedThread::GetCurrent() == nullptr); 346 347 Locks::mutator_lock->Unlock(); 348 StoreStatus(status); 349 } 350 351 // NO_THREAD_SAFETY_ANALYSIS due to TSAN not being able to determine lock status TransitionFromSuspendedToRunning(enum ThreadStatus status)352 void TransitionFromSuspendedToRunning(enum ThreadStatus status) NO_THREAD_SAFETY_ANALYSIS 353 { 354 // Workaround: We masked the assert for 'ManagedThread::GetCurrent() == null' condition, 355 // because JSThread updates status_ not from current thread. 356 // (Remove it when issue 5183 is resolved) 357 ASSERT(ManagedThread::GetCurrent() == this || ManagedThread::GetCurrent() == nullptr); 358 359 // NB! This thread is treated as suspended so when we transition from suspended state to 360 // running we need to check suspension flag and counter so SafepointPoll has to be done before 361 // acquiring mutator_lock. 362 StoreStatusWithSafepoint(status); 363 Locks::mutator_lock->ReadLock(); 364 } 365 UpdateStatus(enum ThreadStatus status)366 void UpdateStatus(enum ThreadStatus status) 367 { 368 // Workaround: We masked the assert for 'ManagedThread::GetCurrent() == null' condition, 369 // because JSThread updates status_ not from current thread. 370 // (Remove it when issue 5183 is resolved) 371 ASSERT(ManagedThread::GetCurrent() == this || ManagedThread::GetCurrent() == nullptr); 372 373 ThreadStatus old_status = GetStatus(); 374 if (old_status == RUNNING && status != RUNNING) { 375 TransitionFromRunningToSuspended(status); 376 } else if (old_status != RUNNING && status == RUNNING) { 377 TransitionFromSuspendedToRunning(status); 378 } else if (status == TERMINATING) { 379 // Using Store with safepoint to be sure that main thread didn't suspend us while trying to update status 380 StoreStatusWithSafepoint(status); 381 } else { 382 // NB! Status is not a simple bit, without atomics it can produce faulty GetStatus. 383 StoreStatus(status); 384 } 385 } 386 GetNextWait()387 MTManagedThread *GetNextWait() const 388 { 389 return next_; 390 } 391 SetWaitNext(MTManagedThread * next)392 void SetWaitNext(MTManagedThread *next) 393 { 394 next_ = next; 395 } 396 GetPtReferenceStorage()397 mem::ReferenceStorage *GetPtReferenceStorage() const 398 { 399 return pt_reference_storage_.get(); 400 } 401 402 protected: 403 virtual void ProcessCreatedThread(); 404 405 virtual void StopDaemon0(); 406 StopSuspension()407 void StopSuspension() REQUIRES(suspend_lock_) 408 { 409 // Lock before this call. 410 suspend_var_.Signal(); 411 } 412 GetSuspendMutex()413 os::memory::Mutex *GetSuspendMutex() RETURN_CAPABILITY(suspend_lock_) 414 { 415 return &suspend_lock_; 416 } 417 WaitInternal()418 void WaitInternal() 419 { 420 os::memory::LockHolder lock(cond_lock_); 421 WaitWithLockHeldInternal(); 422 } 423 WaitWithLockHeldInternal()424 void WaitWithLockHeldInternal() REQUIRES(cond_lock_) 425 { 426 ASSERT(this == ManagedThread::GetCurrent()); 427 cond_var_.Wait(&cond_lock_); 428 } 429 430 bool TimedWaitInternal(uint64_t timeout, uint64_t nanos, bool is_absolute = false) 431 { 432 os::memory::LockHolder lock(cond_lock_); 433 return TimedWaitWithLockHeldInternal(timeout, nanos, is_absolute); 434 } 435 REQUIRES(cond_lock_)436 bool TimedWaitWithLockHeldInternal(uint64_t timeout, uint64_t nanos, bool is_absolute = false) REQUIRES(cond_lock_) 437 { 438 ASSERT(this == ManagedThread::GetCurrent()); 439 return cond_var_.TimedWait(&cond_lock_, timeout, nanos, is_absolute); 440 } 441 SignalWithLockHeld()442 void SignalWithLockHeld() REQUIRES(cond_lock_) 443 { 444 cond_var_.Signal(); 445 } 446 SetInterruptedWithLockHeld(bool interrupted)447 void SetInterruptedWithLockHeld(bool interrupted) REQUIRES(cond_lock_) 448 { 449 is_interrupted_ = interrupted; 450 } 451 452 private: 453 PandaString LogThreadStack(ThreadState new_state) const; 454 StoreStatusWithSafepoint(ThreadStatus status)455 void StoreStatusWithSafepoint(ThreadStatus status) 456 { 457 while (true) { 458 SafepointPoll(); 459 union FlagsAndThreadStatus old_fts { 460 }; 461 union FlagsAndThreadStatus new_fts { 462 }; 463 old_fts.as_int = ReadFlagsAndThreadStatusUnsafe(); // NOLINT(cppcoreguidelines-pro-type-union-access) 464 new_fts.as_struct.flags = old_fts.as_struct.flags; // NOLINT(cppcoreguidelines-pro-type-union-access) 465 new_fts.as_struct.status = status; // NOLINT(cppcoreguidelines-pro-type-union-access) 466 bool no_flags = (old_fts.as_struct.flags == NO_FLAGS); // NOLINT(cppcoreguidelines-pro-type-union-access) 467 468 // clang-format conflicts with CodeCheckAgent, so disable it here 469 // clang-format off 470 if (no_flags && stor_32_.fts_.as_atomic.compare_exchange_weak( 471 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) 472 old_fts.as_nonvolatile_int, new_fts.as_nonvolatile_int, std::memory_order_release)) { 473 // If CAS succeeded, we set new status and no request occured here, safe to proceed. 474 break; 475 } 476 // clang-format on 477 } 478 } 479 480 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) 481 MTManagedThread *next_ {nullptr}; 482 483 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) 484 ThreadId internal_id_ {0}; 485 486 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) 487 PandaStack<ThreadState> thread_frame_states_; 488 489 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) 490 PandaVector<LockedObjectInfo> local_objects_locked_; 491 492 // Implementation of Wait/Notify 493 os::memory::ConditionVariable cond_var_ GUARDED_BY(cond_lock_); 494 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) 495 mutable os::memory::Mutex cond_lock_; 496 497 bool is_interrupted_ GUARDED_BY(cond_lock_) = false; 498 499 os::memory::ConditionVariable suspend_var_ GUARDED_BY(suspend_lock_); 500 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) 501 os::memory::Mutex suspend_lock_; 502 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) 503 uint32_t suspend_count_ GUARDED_BY(suspend_lock_) = 0; 504 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) 505 std::atomic_uint32_t user_code_suspend_count_ {0}; 506 507 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) 508 bool is_daemon_ = false; 509 510 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) 511 Monitor *waiting_monitor_; 512 513 // Monitor lock is required for multithreaded AddMonitor; RecursiveMutex to allow calling RemoveMonitor 514 // in ReleaseMonitors 515 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) 516 os::memory::RecursiveMutex monitor_lock_; 517 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) 518 std::unordered_set<Monitor *> entered_monitors_ GUARDED_BY(monitor_lock_); 519 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) 520 ThreadStatus monitor_old_status_ = FINISHED; 521 522 // Boolean which is safe to access after runtime is destroyed 523 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) 524 bool is_managed_scope_ {false}; 525 526 PandaUniquePtr<mem::ReferenceStorage> pt_reference_storage_ {nullptr}; 527 528 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) 529 NO_COPY_SEMANTIC(MTManagedThread); 530 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) 531 NO_MOVE_SEMANTIC(MTManagedThread); 532 }; 533 534 } // namespace panda 535 536 #endif // PANDA_RUNTIME_INCLUDE_MTMANAGED_THREAD_H_