1 /* 2 * Copyright (c) 2021 Huawei Device Co., Ltd. 3 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * you may not use this file except in compliance with the License. 5 * You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software 10 * distributed under the License is distributed on an "AS IS" BASIS, 11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 * See the License for the specific language governing permissions and 13 * limitations under the License. 14 */ 15 16 #ifndef PANDA_RUNTIME_THREAD_MANAGER_H_ 17 #define PANDA_RUNTIME_THREAD_MANAGER_H_ 18 19 #include <bitset> 20 21 #include "libpandabase/os/mutex.h" 22 #include "libpandabase/utils/time.h" 23 #include "libpandabase/os/time.h" 24 #include "runtime/include/coretypes/array-inl.h" 25 #include "runtime/include/mem/panda_containers.h" 26 #include "runtime/include/mem/panda_smart_pointers.h" 27 #include "runtime/include/mtmanaged_thread.h" 28 #include "runtime/include/thread_status.h" 29 #include "runtime/include/locks.h" 30 31 namespace openjdkjvmti { 32 class TiThread; 33 } // namespace openjdkjvmti 34 35 namespace panda { 36 37 // This interval is required for waiting for threads to stop. 38 static const int WAIT_INTERVAL = 10; 39 static constexpr int64_t K_MAX_DUMP_TIME_NS = UINT64_C(6 * 1000 * 1000 * 1000); // 6s 40 static constexpr int64_t K_MAX_SINGLE_DUMP_TIME_NS = UINT64_C(50 * 1000 * 1000); // 50ms 41 42 enum class EnumerationFlag { 43 NONE = 0, // Nothing 44 JAVA_THREAD = 1, // JAVA thread 45 JS_THREAD = 2, // JS thread 46 MANAGED_CODE_THREAD = 47 4, // Thread which can execute managed code - should be used with JAVA_THREAD and/or JS_THREAD 48 VM_THREAD = 8, // Includes VM threads 49 ALL = 16, // Not 15, see the comment in the function SatisfyTheMask below 50 }; 51 52 class ThreadManager { 53 public: 54 NO_COPY_SEMANTIC(ThreadManager); 55 NO_MOVE_SEMANTIC(ThreadManager); 56 57 // For performance reasons don't exceed specified amount of bits. 58 static constexpr size_t MAX_INTERNAL_THREAD_ID = std::min(0xffffU, ManagedThread::MAX_INTERNAL_THREAD_ID); 59 60 explicit ThreadManager(mem::InternalAllocatorPtr allocator); 61 62 virtual ~ThreadManager(); 63 64 template <class Callback> 65 void EnumerateThreads(const Callback &cb, unsigned int mask, 66 unsigned int xor_mask = static_cast<unsigned int>(EnumerationFlag::NONE)) const 67 { 68 os::memory::LockHolder lock(thread_lock_); 69 70 EnumerateThreadsWithLockheld(cb, mask, xor_mask); 71 } 72 73 template <class Callback> 74 void EnumerateThreadsWithLockheld(const Callback &cb, unsigned int inc_mask, 75 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_INDENT_CHECK) 76 unsigned int xor_mask = static_cast<unsigned int>(EnumerationFlag::NONE)) const REQUIRES(thread_lock_)77 REQUIRES(thread_lock_) 78 { 79 for (auto t : threads_) { 80 bool inc_target = SatisfyTheMask(t, inc_mask); 81 bool xor_target = SatisfyTheMask(t, xor_mask); 82 if (inc_target != xor_target) { 83 if (!cb(t)) { 84 break; 85 } 86 } 87 } 88 } 89 90 template <class Callback> EnumerateThreadsForDump(const Callback & cb,std::ostream & os)91 void EnumerateThreadsForDump(const Callback &cb, std::ostream &os) 92 { 93 SuspendAllThreads(); 94 Locks::mutator_lock->WriteLock(); 95 MTManagedThread *self = MTManagedThread::GetCurrent(); 96 { 97 os << "ARK THREADS (" << threads_count_ << "):\n"; 98 } 99 if (self != nullptr) { 100 os::memory::LockHolder lock(thread_lock_); 101 int64_t start = panda::os::time::GetClockTimeInThreadCpuTime(); 102 int64_t end; 103 int64_t last_time = start; 104 cb(self, os); 105 for (const auto &thread : threads_) { 106 if (thread != self) { 107 cb(thread, os); 108 end = panda::os::time::GetClockTimeInThreadCpuTime(); 109 if ((end - last_time) > K_MAX_SINGLE_DUMP_TIME_NS) { 110 LOG(ERROR, RUNTIME) << "signal catcher: thread_list_dump thread : " << thread->GetId() 111 << "timeout : " << (end - last_time); 112 } 113 last_time = end; 114 if ((end - start) > K_MAX_DUMP_TIME_NS) { 115 LOG(ERROR, RUNTIME) << "signal catcher: thread_list_dump timeout : " << end - start << "\n"; 116 break; 117 } 118 } 119 } 120 } 121 DumpUnattachedThreads(os); 122 Locks::mutator_lock->Unlock(); 123 ResumeAllThreads(); 124 } 125 DeleteFinishedThreads()126 void DeleteFinishedThreads() 127 { 128 os::memory::LockHolder lock(thread_lock_); 129 while (!finished_threads_.empty()) { 130 MTManagedThread *thread = finished_threads_.front(); 131 // Explicitly delete thread structure 132 delete thread; 133 finished_threads_.pop(); 134 } 135 } 136 137 void DumpUnattachedThreads(std::ostream &os); 138 RegisterThread(MTManagedThread * thread)139 void RegisterThread(MTManagedThread *thread) 140 { 141 os::memory::LockHolder lock(thread_lock_); 142 threads_count_++; 143 #ifndef NDEBUG 144 registered_threads_count_++; 145 #endif // NDEBUG 146 threads_.emplace_back(thread); 147 for (uint32_t i = suspend_new_count_; i > 0; i--) { 148 thread->SuspendImpl(true); 149 } 150 } 151 IncPendingThreads()152 void IncPendingThreads() 153 { 154 os::memory::LockHolder lock(thread_lock_); 155 pending_threads_++; 156 } 157 DecPendingThreads()158 void DecPendingThreads() 159 { 160 os::memory::LockHolder lock(thread_lock_); 161 pending_threads_--; 162 } 163 AddDaemonThread()164 void AddDaemonThread() 165 { 166 daemon_threads_count_++; 167 } 168 169 int GetThreadsCount(); 170 171 #ifndef NDEBUG 172 uint32_t GetAllRegisteredThreadsCount(); 173 #endif // NDEBUG 174 175 void WaitForDeregistration(); 176 177 void SuspendAllThreads(); 178 void ResumeAllThreads(); 179 180 uint32_t GetInternalThreadId(); 181 182 void RemoveInternalThreadId(uint32_t id); 183 184 bool IsThreadExists(uint32_t thread_id); 185 186 // Returns true if unregistration succeeded; for now it can fail when we are trying to unregister main thread 187 bool UnregisterExitedThread(MTManagedThread *java_thread); 188 189 uint32_t GetThreadIdByInternalThreadId(uint32_t thread_id); 190 GetThreadByInternalThreadId(uint32_t thread_id)191 MTManagedThread *GetThreadByInternalThreadId(uint32_t thread_id) 192 { 193 os::memory::LockHolder lock(thread_lock_); 194 return GetThreadByInternalThreadIdWithLockHeld(thread_id); 195 } 196 197 MTManagedThread *SuspendAndWaitThreadByInternalThreadId(uint32_t thread_id); 198 199 void RegisterSensitiveThread() const; 200 GetThreadsLock()201 os::memory::Mutex *GetThreadsLock() 202 { 203 return &thread_lock_; 204 } 205 SetMainThread(ManagedThread * thread)206 void SetMainThread(ManagedThread *thread) 207 { 208 main_thread_ = thread; 209 } 210 211 private: HasNoActiveThreads()212 bool HasNoActiveThreads() const REQUIRES(thread_lock_) 213 { 214 ASSERT(threads_count_ >= daemon_threads_count_); 215 auto thread = static_cast<uint32_t>(threads_count_ - daemon_threads_count_); 216 return thread < 2 && pending_threads_ == 0; 217 } 218 SatisfyTheMask(MTManagedThread * t,unsigned int mask)219 bool SatisfyTheMask(MTManagedThread *t, unsigned int mask) const 220 { 221 if ((mask & static_cast<unsigned int>(EnumerationFlag::ALL)) != 0) { 222 // Some uninitialized threads may not have attached flag, 223 // So, they are not included as MANAGED_CODE_THREAD. 224 // Newly created threads are using flag suspend new count. 225 // The case leads to deadlocks, when the thread can not be resumed. 226 // To deal with it, just add a specific ALL case 227 return true; 228 } 229 230 bool target = true; 231 232 // For NONE mask 233 target = false; 234 if ((mask & static_cast<unsigned int>(EnumerationFlag::MANAGED_CODE_THREAD)) != 0) { 235 target = t->IsAttached(); 236 if ((mask & static_cast<unsigned int>(EnumerationFlag::JAVA_THREAD)) != 0 || 237 (mask & static_cast<unsigned int>(EnumerationFlag::JS_THREAD)) != 0) { 238 // Due to hyerarhical structure, we need to conjunct types 239 bool target_type = false; 240 if ((mask & static_cast<unsigned int>(EnumerationFlag::JAVA_THREAD)) != 0) { 241 target_type |= t->IsJavaThread(); 242 } 243 if ((mask & static_cast<unsigned int>(EnumerationFlag::JS_THREAD)) != 0) { 244 target_type |= t->IsJSThread(); 245 } 246 target &= target_type; 247 } 248 } 249 250 if ((mask & static_cast<unsigned int>(EnumerationFlag::VM_THREAD)) != 0) { 251 target |= t->IsVMThread(); 252 } 253 254 return target; 255 } 256 257 /** 258 * Tries to stop all daemon threads in case there are no active basic threads 259 * returns false if we need to wait 260 */ 261 void StopDaemonThreads() REQUIRES(thread_lock_); 262 263 void DeregisterSuspendedThreads() REQUIRES(thread_lock_); 264 265 uint32_t GetInternalThreadIdWithLockHeld(); 266 267 MTManagedThread *GetThreadByInternalThreadIdWithLockHeld(uint32_t thread_id) REQUIRES(thread_lock_); 268 269 void RemoveInternalThreadIdWithLockHeld(uint32_t id); 270 CanDeregister(enum ThreadStatus status)271 bool CanDeregister(enum ThreadStatus status) 272 { 273 // Do not deregister CREATED threads until they finish initializing which requires communication with 274 // ThreadManaged; Do not deregister BLOCKED threads as it means we are trying to acquire lock in Monitor, which 275 // was created in internalAllocator; Do not deregister TERMINATING threads which requires communication with 276 // Runtime; If thread status is not RUNNING, it's treated as suspended and we can deregister it. 277 return status != CREATED && status != RUNNING && status != IS_BLOCKED && status != TERMINATING; 278 } 279 280 mutable os::memory::Mutex thread_lock_; 281 ManagedThread *main_thread_ {nullptr}; 282 // Counter used to suspend newly created threads after SuspendAllThreads/SuspendDaemonThreads 283 uint32_t suspend_new_count_ GUARDED_BY(thread_lock_) = 0; 284 // We should delete only finished thread structures, so call delete explicitly on finished threads 285 // and don't touch other pointers 286 PandaList<MTManagedThread *> threads_ GUARDED_BY(thread_lock_); 287 // Storage of finished threads which GC deletes, it's unsafe to call delete without safepoint 288 // (i.e. java.lang.Thread intrinsics can fetch nativePeer before its nullified but call JavaThread functions 289 // after thread destroys itself) 290 PandaQueue<MTManagedThread *> finished_threads_ GUARDED_BY(thread_lock_); 291 os::memory::Mutex ids_lock_; 292 std::bitset<MAX_INTERNAL_THREAD_ID> internal_thread_ids_ GUARDED_BY(ids_lock_); 293 uint32_t last_id_ GUARDED_BY(ids_lock_); 294 PandaList<MTManagedThread *> daemon_threads_; 295 296 os::memory::ConditionVariable stop_var_; 297 std::atomic_uint32_t threads_count_ = 0; 298 #ifndef NDEBUG 299 // This field is required for counting all registered threads (including finished daemons) 300 // in AttachThreadTest. It is not needed in production mode. 301 std::atomic_uint32_t registered_threads_count_ = 0; 302 #endif // NDEBUG 303 std::atomic_uint32_t daemon_threads_count_ = 0; 304 // A specific counter of threads, which are not completely created 305 // When the counter != 0, operations with thread set are permitted to avoid destruction of shared data (mutexes) 306 // Synchronized with lock (not atomic) for mutual exclusion with thread operations 307 int pending_threads_ GUARDED_BY(thread_lock_); 308 309 friend class openjdkjvmti::TiThread; 310 }; 311 312 } // namespace panda 313 314 #endif // PANDA_RUNTIME_THREAD_MANAGER_H_ 315