1 /** 2 * Copyright (c) 2024-2025 Huawei Device Co., Ltd. 3 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * you may not use this file except in compliance with the License. 5 * You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software 10 * distributed under the License is distributed on an "AS IS" BASIS, 11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 * See the License for the specific language governing permissions and 13 * limitations under the License. 14 */ 15 #ifndef PANDA_RUNTIME_THREAD_H_ 16 #define PANDA_RUNTIME_THREAD_H_ 17 18 #include <memory> 19 #include <chrono> 20 #include <limits> 21 #include <thread> 22 #include <atomic> 23 #include <csignal> 24 25 #include "libpandabase/mem/gc_barrier.h" 26 #include "libpandabase/mem/ringbuf/lock_free_ring_buffer.h" 27 #include "libpandabase/mem/weighted_adaptive_tlab_average.h" 28 #include "libpandabase/os/mutex.h" 29 #include "libpandabase/os/thread.h" 30 #include "libpandabase/utils/arch.h" 31 #include "libpandabase/utils/list.h" 32 #include "libpandabase/utils/tsan_interface.h" 33 #include "runtime/include/mem/panda_containers.h" 34 #include "runtime/include/mem/panda_smart_pointers.h" 35 #include "runtime/include/object_header-inl.h" 36 #include "runtime/include/stack_walker.h" 37 #include "runtime/include/language_context.h" 38 #include "runtime/include/thread_proxy.h" 39 #include "runtime/include/locks.h" 40 #include "runtime/include/thread_status.h" 41 #include "runtime/interpreter/cache.h" 42 #include "runtime/mem/frame_allocator-inl.h" 43 #include "runtime/mem/gc/gc.h" 44 #include "runtime/mem/internal_allocator.h" 45 #include "runtime/mem/tlab.h" 46 #include "runtime/mem/refstorage/reference_storage.h" 47 #include "runtime/entrypoints/entrypoints.h" 48 #include "events/events.h" 49 50 #define ASSERT_HAVE_ACCESS_TO_MANAGED_OBJECTS() 51 52 namespace ark { 53 54 template <class TYPE> 55 class HandleStorage; 56 template <class TYPE> 57 class GlobalHandleStorage; 58 template <class TYPE> 59 class HandleScope; 60 61 namespace test { 62 class ThreadTest; 63 } // namespace test 64 65 class ThreadManager; 66 class Runtime; 67 class PandaVM; 68 class MonitorPool; 69 70 namespace mem { 71 class GCBarrierSet; 72 } // namespace mem 73 74 namespace tooling { 75 class PtThreadInfo; 76 } // namespace tooling 77 78 struct CustomTLSData { 79 CustomTLSData() = default; 80 virtual ~CustomTLSData() = default; 81 82 NO_COPY_SEMANTIC(CustomTLSData); 83 NO_MOVE_SEMANTIC(CustomTLSData); 84 }; 85 86 class LockedObjectInfo { 87 public: LockedObjectInfo(ObjectHeader * obj,void * fp)88 LockedObjectInfo(ObjectHeader *obj, void *fp) : object_(obj), stack_(fp) {} GetObject()89 inline ObjectHeader *GetObject() const 90 { 91 return object_; 92 } 93 SetObject(ObjectHeader * objNew)94 inline void SetObject(ObjectHeader *objNew) 95 { 96 object_ = objNew; 97 } 98 UpdateObject(const GCRootUpdater & gcRootUpdater)99 ALWAYS_INLINE void UpdateObject(const GCRootUpdater &gcRootUpdater) 100 { 101 gcRootUpdater(&object_); 102 } 103 GetStack()104 inline void *GetStack() const 105 { 106 return stack_; 107 } 108 SetStack(void * stackNew)109 inline void SetStack(void *stackNew) 110 { 111 stack_ = stackNew; 112 } 113 GetMonitorOffset()114 static constexpr uint32_t GetMonitorOffset() 115 { 116 return MEMBER_OFFSET(LockedObjectInfo, object_); 117 } 118 GetStackOffset()119 static constexpr uint32_t GetStackOffset() 120 { 121 return MEMBER_OFFSET(LockedObjectInfo, stack_); 122 } 123 124 private: 125 ObjectHeader *object_; 126 void *stack_; 127 }; 128 129 template <typename Adapter = mem::AllocatorAdapter<LockedObjectInfo>> 130 class LockedObjectList { 131 static constexpr uint32_t DEFAULT_CAPACITY = 16; 132 133 public: LockedObjectList()134 LockedObjectList() : capacity_(DEFAULT_CAPACITY), allocator_(Adapter()) 135 { 136 storage_ = allocator_.allocate(DEFAULT_CAPACITY); 137 } 138 ~LockedObjectList()139 ~LockedObjectList() 140 { 141 allocator_.deallocate(storage_, capacity_); 142 } 143 144 NO_COPY_SEMANTIC(LockedObjectList); 145 NO_MOVE_SEMANTIC(LockedObjectList); 146 PushBack(LockedObjectInfo data)147 void PushBack(LockedObjectInfo data) 148 { 149 ExtendIfNeeded(); 150 storage_[size_++] = data; 151 } 152 153 template <typename... Args> EmplaceBack(Args &&...args)154 LockedObjectInfo &EmplaceBack(Args &&...args) 155 { 156 ExtendIfNeeded(); 157 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) 158 auto *rawMem = &storage_[size_]; 159 auto *datum = new (rawMem) LockedObjectInfo(std::forward<Args>(args)...); 160 size_++; 161 return *datum; 162 } 163 Back()164 LockedObjectInfo &Back() 165 { 166 ASSERT(size_ > 0); 167 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) 168 return storage_[size_ - 1]; 169 } 170 Empty()171 bool Empty() const 172 { 173 return size_ == 0; 174 } 175 PopBack()176 void PopBack() 177 { 178 ASSERT(size_ > 0); 179 --size_; 180 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) 181 (&storage_[size_])->~LockedObjectInfo(); 182 } 183 Data()184 Span<LockedObjectInfo> Data() 185 { 186 return Span<LockedObjectInfo>(storage_, size_); 187 } 188 GetCapacityOffset()189 static constexpr uint32_t GetCapacityOffset() 190 { 191 return MEMBER_OFFSET(LockedObjectList, capacity_); 192 } 193 GetSizeOffset()194 static constexpr uint32_t GetSizeOffset() 195 { 196 return MEMBER_OFFSET(LockedObjectList, size_); 197 } 198 GetDataOffset()199 static constexpr uint32_t GetDataOffset() 200 { 201 return MEMBER_OFFSET(LockedObjectList, storage_); 202 } 203 204 private: ExtendIfNeeded()205 void ExtendIfNeeded() 206 { 207 ASSERT(size_ <= capacity_); 208 if (size_ < capacity_) { 209 return; 210 } 211 uint32_t newCapacity = capacity_ * 3U / 2U; // expand by 1.5 212 LockedObjectInfo *newStorage = allocator_.allocate(newCapacity); 213 ASSERT(newStorage != nullptr); 214 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) 215 std::copy(storage_, storage_ + size_, newStorage); 216 allocator_.deallocate(storage_, capacity_); 217 storage_ = newStorage; 218 capacity_ = newCapacity; 219 } 220 221 template <typename T, size_t ALIGNMENT = sizeof(T)> 222 using Aligned __attribute__((aligned(ALIGNMENT))) = T; 223 // Use uint32_t instead of size_t to guarantee the same size 224 // on all platforms and simplify compiler stubs accessing this fields. 225 // uint32_t is large enough to fit locked objects list's size. 226 Aligned<uint32_t> capacity_; 227 Aligned<uint32_t> size_ {0}; 228 Aligned<LockedObjectInfo *> storage_; 229 Adapter allocator_; 230 }; 231 232 /** 233 * Hierarchy of thread classes 234 * 235 * +--------+ 236 * | Thread | 237 * +--------+ 238 * | 239 * +---------------+ 240 * | ManagedThread | 241 * +---------------+ 242 * | 243 * +-----------------+ 244 * | MTManagedThread | 245 * +-----------------+ 246 * 247 * 248 * Thread - is the most low-level entity. This class contains pointers to VM which this thread associated. 249 * ManagedThread - stores runtime context to run managed code in single-threaded environment 250 * MTManagedThread - extends ManagedThread to be able to run code in multi-threaded environment 251 */ 252 253 /// @brief Class represents arbitrary runtime thread 254 // NOLINTNEXTLINE(clang-analyzer-optin.performance.Padding) 255 class Thread : public ThreadProxy { 256 public: 257 using ThreadId = uint32_t; 258 enum class ThreadType { 259 THREAD_TYPE_NONE, 260 THREAD_TYPE_GC, 261 THREAD_TYPE_COMPILER, 262 THREAD_TYPE_MANAGED, 263 THREAD_TYPE_MT_MANAGED, 264 THREAD_TYPE_TASK, 265 THREAD_TYPE_WORKER_THREAD, 266 }; 267 268 Thread(PandaVM *vm, ThreadType threadType); 269 virtual ~Thread(); 270 NO_COPY_SEMANTIC(Thread); 271 NO_MOVE_SEMANTIC(Thread); 272 273 PANDA_PUBLIC_API static Thread *GetCurrent(); 274 PANDA_PUBLIC_API static void SetCurrent(Thread *thread); 275 276 virtual void FreeInternalMemory(); 277 278 void FreeAllocatedMemory(); 279 GetVM()280 PandaVM *GetVM() const 281 { 282 return vm_; 283 } 284 SetVM(PandaVM * vm)285 void SetVM(PandaVM *vm) 286 { 287 vm_ = vm; 288 } 289 GetPreWrbEntrypoint()290 void *GetPreWrbEntrypoint() const 291 { 292 // Atomic with relaxed order reason: only atomicity and modification order consistency needed 293 return preWrbEntrypoint_.load(std::memory_order_relaxed); 294 } 295 SetPreWrbEntrypoint(void * entry)296 void SetPreWrbEntrypoint(void *entry) 297 { 298 preWrbEntrypoint_ = entry; 299 } 300 GetThreadType()301 ThreadType GetThreadType() const 302 { 303 return threadType_; 304 } 305 GetBarrierSet()306 ALWAYS_INLINE mem::GCBarrierSet *GetBarrierSet() const 307 { 308 return barrierSet_; 309 } 310 311 // pre_buff_ may be destroyed during Detach(), so it should be initialized once more 312 void InitPreBuff(); 313 GetVmOffset()314 static constexpr size_t GetVmOffset() 315 { 316 return MEMBER_OFFSET(Thread, vm_); 317 } 318 319 private: 320 void InitCardTableData(mem::GCBarrierSet *barrier); 321 322 protected: 323 // NOLINTBEGIN(misc-non-private-member-variables-in-classes) 324 bool isCompiledFrame_ {false}; 325 ThreadId internalId_ {0}; 326 327 EntrypointsTable entrypoints_ {}; 328 void *object_ {nullptr}; 329 Frame *frame_ {nullptr}; 330 ObjectHeader *exception_ {nullptr}; 331 uintptr_t nativePc_ {}; 332 mem::TLAB *tlab_ {nullptr}; 333 void *cardTableAddr_ {nullptr}; 334 void *cardTableMinAddr_ {nullptr}; 335 std::atomic<void *> preWrbEntrypoint_ {nullptr}; // if NOT nullptr, stores pointer to PreWrbFunc and indicates we 336 // are currently in concurrent marking phase 337 // keeps IRtoC GC PostWrb impl for storing one object 338 void *postWrbOneObject_ {nullptr}; 339 // keeps IRtoC GC PostWrb impl for storing two objects 340 void *postWrbTwoObjects_ {nullptr}; 341 void *stringClassPtr_ {nullptr}; // ClassRoot::STRING 342 void *arrayU16ClassPtr_ {nullptr}; // ClassRoot::ARRAY_U16 343 void *arrayU8ClassPtr_ {nullptr}; // ClassRoot::ARRAY_U8 344 PandaVector<ObjectHeader *> *preBuff_ {nullptr}; 345 void *languageExtensionData_ {nullptr}; 346 #ifndef NDEBUG 347 uintptr_t runtimeCallEnabled_ {1}; 348 #endif 349 // NOLINTEND(misc-non-private-member-variables-in-classes) 350 351 private: 352 PandaVM *vm_ {nullptr}; 353 ThreadType threadType_ {ThreadType::THREAD_TYPE_NONE}; 354 mem::GCBarrierSet *barrierSet_ {nullptr}; 355 #ifndef PANDA_TARGET_WINDOWS 356 stack_t signalStack_ {}; 357 #endif 358 }; 359 360 template <typename ThreadT> 361 class ScopedCurrentThread { 362 public: ScopedCurrentThread(ThreadT * thread)363 explicit ScopedCurrentThread(ThreadT *thread) : thread_(thread) 364 { 365 ASSERT(Thread::GetCurrent() == nullptr); 366 367 // Set current thread 368 Thread::SetCurrent(thread_); 369 } 370 ~ScopedCurrentThread()371 ~ScopedCurrentThread() 372 { 373 // Reset current thread 374 Thread::SetCurrent(nullptr); 375 } 376 377 NO_COPY_SEMANTIC(ScopedCurrentThread); 378 NO_MOVE_SEMANTIC(ScopedCurrentThread); 379 380 private: 381 ThreadT *thread_; 382 }; 383 384 } // namespace ark 385 386 #ifdef PANDA_TARGET_MOBILE_WITH_NATIVE_LIBS 387 #include "platforms/mobile/runtime/thread-inl.cpp" 388 #endif // PANDA_TARGET_MOBILE_WITH_NATIVE_LIBS 389 390 #endif // PANDA_RUNTIME_THREAD_H_ 391