• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_THREAD_H
16 #define PANDA_RUNTIME_THREAD_H
17 
18 #include <memory>
19 #include <chrono>
20 #include <limits>
21 #include <thread>
22 #include <atomic>
23 #include <csignal>
24 
25 #include "libpandabase/mem/gc_barrier.h"
26 #include "libpandabase/mem/ringbuf/lock_free_ring_buffer.h"
27 #include "libpandabase/os/mutex.h"
28 #include "libpandabase/os/thread.h"
29 #include "libpandabase/utils/arch.h"
30 #include "libpandabase/utils/list.h"
31 #include "libpandabase/utils/tsan_interface.h"
32 #include "runtime/include/mem/panda_containers.h"
33 #include "runtime/include/mem/panda_smart_pointers.h"
34 #include "runtime/include/object_header-inl.h"
35 #include "runtime/include/stack_walker.h"
36 #include "runtime/include/language_context.h"
37 #include "runtime/include/locks.h"
38 #include "runtime/include/thread_status.h"
39 #include "runtime/interpreter/cache.h"
40 #include "runtime/mem/frame_allocator-inl.h"
41 #include "runtime/mem/gc/gc.h"
42 #include "runtime/mem/internal_allocator.h"
43 #include "runtime/mem/tlab.h"
44 #include "runtime/mem/refstorage/reference_storage.h"
45 #include "runtime/entrypoints/entrypoints.h"
46 #include "events/events.h"
47 
48 #define ASSERT_HAVE_ACCESS_TO_MANAGED_OBJECTS()
49 
50 namespace panda {
51 
52 template <class TYPE>
53 class HandleStorage;
54 template <class TYPE>
55 class GlobalHandleStorage;
56 template <class TYPE>
57 class HandleScope;
58 
59 namespace test {
60 class ThreadTest;
61 }  // namespace test
62 
63 class ThreadManager;
64 class Runtime;
65 class PandaVM;
66 class MonitorPool;
67 
68 namespace mem {
69 class GCBarrierSet;
70 }  // namespace mem
71 
72 namespace tooling {
73 class PtThreadInfo;
74 }  // namespace tooling
75 
76 enum ThreadFlag { NO_FLAGS = 0, SUSPEND_REQUEST = 2, RUNTIME_TERMINATION_REQUEST = 4, SAFEPOINT_REQUEST = 8 };
77 
78 struct CustomTLSData {
79     CustomTLSData() = default;
80     virtual ~CustomTLSData() = default;
81 
82     NO_COPY_SEMANTIC(CustomTLSData);
83     NO_MOVE_SEMANTIC(CustomTLSData);
84 };
85 
86 class LockedObjectInfo {
87 public:
LockedObjectInfo(ObjectHeader * obj,void * fp)88     LockedObjectInfo(ObjectHeader *obj, void *fp) : object_(obj), stack_(fp) {}
GetObject()89     inline ObjectHeader *GetObject() const
90     {
91         return object_;
92     }
93 
SetObject(ObjectHeader * objNew)94     inline void SetObject(ObjectHeader *objNew)
95     {
96         object_ = objNew;
97     }
98 
GetStack()99     inline void *GetStack() const
100     {
101         return stack_;
102     }
103 
SetStack(void * stackNew)104     inline void SetStack(void *stackNew)
105     {
106         stack_ = stackNew;
107     }
108 
GetMonitorOffset()109     static constexpr uint32_t GetMonitorOffset()
110     {
111         return MEMBER_OFFSET(LockedObjectInfo, object_);
112     }
113 
GetStackOffset()114     static constexpr uint32_t GetStackOffset()
115     {
116         return MEMBER_OFFSET(LockedObjectInfo, stack_);
117     }
118 
119 private:
120     ObjectHeader *object_;
121     void *stack_;
122 };
123 
124 template <typename Adapter = mem::AllocatorAdapter<LockedObjectInfo>>
125 class LockedObjectList {
126     static constexpr uint32_t DEFAULT_CAPACITY = 16;
127 
128 public:
LockedObjectList()129     LockedObjectList() : capacity_(DEFAULT_CAPACITY), allocator_(Adapter())
130     {
131         storage_ = allocator_.allocate(DEFAULT_CAPACITY);
132     }
133 
~LockedObjectList()134     ~LockedObjectList()
135     {
136         allocator_.deallocate(storage_, capacity_);
137     }
138 
139     NO_COPY_SEMANTIC(LockedObjectList);
140     NO_MOVE_SEMANTIC(LockedObjectList);
141 
PushBack(LockedObjectInfo data)142     void PushBack(LockedObjectInfo data)
143     {
144         ExtendIfNeeded();
145         storage_[size_++] = data;
146     }
147 
148     template <typename... Args>
EmplaceBack(Args &&...args)149     LockedObjectInfo &EmplaceBack(Args &&...args)
150     {
151         ExtendIfNeeded();
152         // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
153         auto *rawMem = &storage_[size_];
154         auto *datum = new (rawMem) LockedObjectInfo(std::forward<Args>(args)...);
155         size_++;
156         return *datum;
157     }
158 
Back()159     LockedObjectInfo &Back()
160     {
161         ASSERT(size_ > 0);
162         // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
163         return storage_[size_ - 1];
164     }
165 
Empty()166     bool Empty() const
167     {
168         return size_ == 0;
169     }
170 
PopBack()171     void PopBack()
172     {
173         ASSERT(size_ > 0);
174         --size_;
175         // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
176         (&storage_[size_])->~LockedObjectInfo();
177     }
178 
Data()179     Span<LockedObjectInfo> Data()
180     {
181         return Span<LockedObjectInfo>(storage_, size_);
182     }
183 
GetCapacityOffset()184     static constexpr uint32_t GetCapacityOffset()
185     {
186         return MEMBER_OFFSET(LockedObjectList, capacity_);
187     }
188 
GetSizeOffset()189     static constexpr uint32_t GetSizeOffset()
190     {
191         return MEMBER_OFFSET(LockedObjectList, size_);
192     }
193 
GetDataOffset()194     static constexpr uint32_t GetDataOffset()
195     {
196         return MEMBER_OFFSET(LockedObjectList, storage_);
197     }
198 
199 private:
ExtendIfNeeded()200     void ExtendIfNeeded()
201     {
202         ASSERT(size_ <= capacity_);
203         if (size_ < capacity_) {
204             return;
205         }
206         uint32_t newCapacity = capacity_ * 3U / 2U;  // expand by 1.5
207         LockedObjectInfo *newStorage = allocator_.allocate(newCapacity);
208         ASSERT(newStorage != nullptr);
209         // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
210         std::copy(storage_, storage_ + size_, newStorage);
211         allocator_.deallocate(storage_, capacity_);
212         storage_ = newStorage;
213         capacity_ = newCapacity;
214     }
215 
216     template <typename T, size_t ALIGNMENT = sizeof(T)>
217     using Aligned __attribute__((aligned(ALIGNMENT))) = T;
218     // Use uint32_t instead of size_t to guarantee the same size
219     // on all platforms and simplify compiler stubs accessing this fields.
220     // uint32_t is large enough to fit locked objects list's size.
221     Aligned<uint32_t> capacity_;
222     Aligned<uint32_t> size_ {0};
223     Aligned<LockedObjectInfo *> storage_;
224     Adapter allocator_;
225 };
226 
227 /**
228  *  Hierarchy of thread classes
229  *
230  *         +--------+
231  *         | Thread |
232  *         +--------+
233  *             |
234  *      +---------------+
235  *      | ManagedThread |
236  *      +---------------+
237  *             |
238  *     +-----------------+
239  *     | MTManagedThread |
240  *     +-----------------+
241  *
242  *
243  *  Thread - is the most low-level entity. This class contains pointers to VM which this thread associated.
244  *  ManagedThread - stores runtime context to run managed code in single-threaded environment
245  *  MTManagedThread - extends ManagedThread to be able to run code in multi-threaded environment
246  */
247 
248 /// @brief Class represents arbitrary runtime thread
249 // NOLINTNEXTLINE(clang-analyzer-optin.performance.Padding)
250 class Thread {
251 public:
252     using ThreadId = uint32_t;
253     enum class ThreadType {
254         THREAD_TYPE_NONE,
255         THREAD_TYPE_GC,
256         THREAD_TYPE_COMPILER,
257         THREAD_TYPE_MANAGED,
258         THREAD_TYPE_MT_MANAGED,
259         THREAD_TYPE_TASK,
260         THREAD_TYPE_WORKER_THREAD,
261     };
262 
263     Thread(PandaVM *vm, ThreadType threadType);
264     virtual ~Thread();
265     NO_COPY_SEMANTIC(Thread);
266     NO_MOVE_SEMANTIC(Thread);
267 
268     PANDA_PUBLIC_API static Thread *GetCurrent();
269     PANDA_PUBLIC_API static void SetCurrent(Thread *thread);
270 
271     virtual void FreeInternalMemory();
272 
273     void FreeAllocatedMemory();
274 
GetVM()275     PandaVM *GetVM() const
276     {
277         return vm_;
278     }
279 
SetVM(PandaVM * vm)280     void SetVM(PandaVM *vm)
281     {
282         vm_ = vm;
283     }
284 
GetMutatorLock()285     MutatorLock *GetMutatorLock()
286     {
287         return mutatorLock_;
288     }
289 
GetMutatorLock()290     const MutatorLock *GetMutatorLock() const
291     {
292         return mutatorLock_;
293     }
294 
GetPreWrbEntrypoint()295     void *GetPreWrbEntrypoint() const
296     {
297         // Atomic with relaxed order reason: only atomicity and modification order consistency needed
298         return preWrbEntrypoint_.load(std::memory_order_relaxed);
299     }
300 
SetPreWrbEntrypoint(void * entry)301     void SetPreWrbEntrypoint(void *entry)
302     {
303         preWrbEntrypoint_ = entry;
304     }
305 
GetThreadType()306     ThreadType GetThreadType() const
307     {
308         return threadType_;
309     }
310 
GetBarrierSet()311     ALWAYS_INLINE mem::GCBarrierSet *GetBarrierSet() const
312     {
313         return barrierSet_;
314     }
315 
316 #ifndef NDEBUG
GetLockState()317     MutatorLock::MutatorLockState GetLockState() const
318     {
319         return lockState_;
320     }
321 
SetLockState(MutatorLock::MutatorLockState state)322     void SetLockState(MutatorLock::MutatorLockState state)
323     {
324         lockState_ = state;
325     }
326 #endif
327 
328     // pre_buff_ may be destroyed during Detach(), so it should be initialized once more
329     void InitPreBuff();
330 
GetVmOffset()331     static constexpr size_t GetVmOffset()
332     {
333         return MEMBER_OFFSET(Thread, vm_);
334     }
335 
336 private:
337     void InitCardTableData(mem::GCBarrierSet *barrier);
338 
339 protected:
340     union __attribute__((__aligned__(4))) FlagsAndThreadStatus {
341         FlagsAndThreadStatus() = default;
342         ~FlagsAndThreadStatus() = default;
343         struct __attribute__((packed)) {
344             volatile uint16_t flags;
345             volatile enum ThreadStatus status;
346         } asStruct;
347         volatile uint32_t asInt;
348         uint32_t asNonvolatileInt;
349         std::atomic_uint32_t asAtomic;
350 
351         NO_COPY_SEMANTIC(FlagsAndThreadStatus);
352         NO_MOVE_SEMANTIC(FlagsAndThreadStatus);
353     };
354 
355     // NOLINTBEGIN(misc-non-private-member-variables-in-classes)
356     bool isCompiledFrame_ {false};
357     FlagsAndThreadStatus fts_ {};
358     ThreadId internalId_ {0};
359 
360     EntrypointsTable entrypoints_ {};
361     void *object_ {nullptr};
362     Frame *frame_ {nullptr};
363     ObjectHeader *exception_ {nullptr};
364     uintptr_t nativePc_ {};
365     mem::TLAB *tlab_ {nullptr};
366     void *cardTableAddr_ {nullptr};
367     void *cardTableMinAddr_ {nullptr};
368     std::atomic<void *> preWrbEntrypoint_ {nullptr};  // if NOT nullptr, stores pointer to PreWrbFunc and indicates we
369                                                       // are currently in concurrent marking phase
370     // keeps IRtoC GC PostWrb impl for storing one object
371     void *postWrbOneObject_ {nullptr};
372     // keeps IRtoC GC PostWrb impl for storing two objects
373     void *postWrbTwoObjects_ {nullptr};
374     void *stringClassPtr_ {nullptr};    // ClassRoot::STRING
375     void *arrayU16ClassPtr_ {nullptr};  // ClassRoot::ARRAY_U16
376     PandaVector<ObjectHeader *> *preBuff_ {nullptr};
377     void *languageExtensionData_ {nullptr};
378 #ifndef NDEBUG
379     uintptr_t runtimeCallEnabled_ {1};
380 #endif
381     PANDA_PUBLIC_API static ThreadFlag initialThreadFlag_;
382     // NOLINTEND(misc-non-private-member-variables-in-classes)
383 
384 private:
385     PandaVM *vm_ {nullptr};
386     ThreadType threadType_ {ThreadType::THREAD_TYPE_NONE};
387     mem::GCBarrierSet *barrierSet_ {nullptr};
388     MutatorLock *mutatorLock_;
389 #ifndef NDEBUG
390     MutatorLock::MutatorLockState lockState_ = MutatorLock::UNLOCKED;
391 #endif
392 #ifndef PANDA_TARGET_WINDOWS
393     stack_t signalStack_ {};
394 #endif
395 };
396 
397 template <typename ThreadT>
398 class ScopedCurrentThread {
399 public:
ScopedCurrentThread(ThreadT * thread)400     explicit ScopedCurrentThread(ThreadT *thread) : thread_(thread)
401     {
402         ASSERT(Thread::GetCurrent() == nullptr);
403 
404         // Set current thread
405         Thread::SetCurrent(thread_);
406     }
407 
~ScopedCurrentThread()408     ~ScopedCurrentThread()
409     {
410         // Reset current thread
411         Thread::SetCurrent(nullptr);
412     }
413 
414     NO_COPY_SEMANTIC(ScopedCurrentThread);
415     NO_MOVE_SEMANTIC(ScopedCurrentThread);
416 
417 private:
418     ThreadT *thread_;
419 };
420 
421 }  // namespace panda
422 
423 #ifdef PANDA_TARGET_MOBILE_WITHNATIVE_LIBS
424 #include "platforms/mobile/runtime/thread-inl.cpp"
425 #endif  // PANDA_TARGET_MOBILE_WITHNATIVE_LIBS
426 
427 #endif  // PANDA_RUNTIME_THREAD_H
428