• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_MTMANAGED_THREAD_H
16 #define PANDA_RUNTIME_MTMANAGED_THREAD_H
17 
18 #include "managed_thread.h"
19 
20 namespace ark {
21 class MTManagedThread : public ManagedThread {
22 public:
23     ThreadId GetInternalId();
24 
25     PANDA_PUBLIC_API static MTManagedThread *Create(
26         Runtime *runtime, PandaVM *vm,
27         ark::panda_file::SourceLang threadLang = ark::panda_file::SourceLang::PANDA_ASSEMBLY);
28 
29     explicit MTManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *vm,
30                              ark::panda_file::SourceLang threadLang = ark::panda_file::SourceLang::PANDA_ASSEMBLY);
31     ~MTManagedThread() override;
32 
33     MonitorPool *GetMonitorPool();
34     int32_t GetMonitorCount();
35     void AddMonitor(Monitor *monitor);
36     void RemoveMonitor(Monitor *monitor);
37     void ReleaseMonitors();
38 
39     void PushLocalObjectLocked(ObjectHeader *obj);
40     void PopLocalObjectLocked(ObjectHeader *out);
41     Span<LockedObjectInfo> GetLockedObjectInfos();
42 
43     void VisitGCRoots(const ObjectVisitor &cb) override;
44     void UpdateGCRoots() override;
45 
GetWaitingMonitorOldStatus()46     ThreadStatus GetWaitingMonitorOldStatus()
47     {
48         return monitorOldStatus_;
49     }
50 
SetWaitingMonitorOldStatus(ThreadStatus status)51     void SetWaitingMonitorOldStatus(ThreadStatus status)
52     {
53         monitorOldStatus_ = status;
54     }
55 
56     void FreeInternalMemory() override;
57 
58     static bool Sleep(uint64_t ms);
59 
GetWaitingMonitor()60     Monitor *GetWaitingMonitor()
61     {
62         return waitingMonitor_;
63     }
64 
SetWaitingMonitor(Monitor * monitor)65     void SetWaitingMonitor(Monitor *monitor)
66     {
67         ASSERT(waitingMonitor_ == nullptr || monitor == nullptr);
68         waitingMonitor_ = monitor;
69     }
70 
GetEnteringMonitor()71     Monitor *GetEnteringMonitor() const
72     {
73         // Atomic with relaxed order reason: ordering constraints are not required
74         return enteringMonitor_.load(std::memory_order_relaxed);
75     }
76 
SetEnteringMonitor(Monitor * monitor)77     void SetEnteringMonitor(Monitor *monitor)
78     {
79         // Atomic with relaxed order reason: ordering constraints are not required
80         ASSERT(enteringMonitor_.load(std::memory_order_relaxed) == nullptr || monitor == nullptr);
81         // Atomic with relaxed order reason: ordering constraints are not required
82         enteringMonitor_.store(monitor, std::memory_order_relaxed);
83     }
84 
85     virtual void StopDaemonThread();
86 
87     /* @sync 1
88      * @description This synchronization point can be used to add
89      * new attributes or methods to this class.
90      */
91 
IsDaemon()92     bool IsDaemon()
93     {
94         return isDaemon_;
95     }
96 
97     void SetDaemon();
98 
99     virtual void Destroy();
100 
101     PANDA_PUBLIC_API static void Yield();
102 
103     PANDA_PUBLIC_API static void Interrupt(MTManagedThread *thread);
104 
105     // Need to acquire the mutex before waiting to avoid scheduling between monitor release and clond_lock acquire
GetWaitingMutex()106     os::memory::Mutex *GetWaitingMutex() RETURN_CAPABILITY(condLock_)
107     {
108         return &condLock_;
109     }
110 
Signal()111     void Signal()
112     {
113         os::memory::LockHolder lock(condLock_);
114         condVar_.Signal();
115     }
116 
117     PANDA_PUBLIC_API bool Interrupted();
118 
IsInterrupted()119     bool IsInterrupted()
120     {
121         os::memory::LockHolder lock(condLock_);
122         return isInterrupted_;
123     }
124 
IsInterruptedWithLockHeld()125     bool IsInterruptedWithLockHeld() const REQUIRES(condLock_)
126     {
127         return isInterrupted_;
128     }
129 
ClearInterrupted()130     void ClearInterrupted()
131     {
132         os::memory::LockHolder lock(condLock_);
133         /* @sync 1
134          * @description Before we clear is_interrupted_ flag.
135          * */
136         isInterrupted_ = false;
137     }
138 
ThreadIsMTManagedThread(Thread * thread)139     static bool ThreadIsMTManagedThread(Thread *thread)
140     {
141         ASSERT(thread != nullptr);
142         return thread->GetThreadType() == Thread::ThreadType::THREAD_TYPE_MT_MANAGED;
143     }
144 
CastFromThread(Thread * thread)145     static MTManagedThread *CastFromThread(Thread *thread)
146     {
147         ASSERT(thread != nullptr);
148         ASSERT(ThreadIsMTManagedThread(thread));
149         return static_cast<MTManagedThread *>(thread);
150     }
151 
152     /**
153      * @brief GetCurrentRaw Unsafe method to get current MTManagedThread.
154      * It can be used in hotspots to get the best performance.
155      * We can only use this method in places where the MTManagedThread exists.
156      * @return pointer to MTManagedThread
157      */
GetCurrentRaw()158     static MTManagedThread *GetCurrentRaw()
159     {
160         return CastFromThread(Thread::GetCurrent());
161     }
162 
163     /**
164      * @brief GetCurrent Safe method to gets current MTManagedThread.
165      * @return pointer to MTManagedThread or nullptr (if current thread is not a managed thread)
166      */
GetCurrent()167     static PANDA_PUBLIC_API MTManagedThread *GetCurrent()
168     {
169         Thread *thread = Thread::GetCurrent();
170         ASSERT(thread != nullptr);
171         if (ThreadIsMTManagedThread(thread)) {
172             return CastFromThread(thread);
173         }
174         // no guarantee that we will return nullptr here in the future
175         return nullptr;
176     }
177 
WaitWithLockHeld(ThreadStatus waitStatus)178     void WaitWithLockHeld(ThreadStatus waitStatus) REQUIRES(condLock_)
179     {
180         ASSERT(waitStatus == ThreadStatus::IS_WAITING);
181         auto oldStatus = GetStatus();
182         UpdateStatus(waitStatus);
183         WaitWithLockHeldInternal();
184         // Unlock before setting status RUNNING to handle MutatorReadLock without inversed lock order.
185         condLock_.Unlock();
186         UpdateStatus(oldStatus);
187         condLock_.Lock();
188     }
189 
WaitForSuspension(ManagedThread * thread)190     static void WaitForSuspension(ManagedThread *thread)
191     {
192         static constexpr uint32_t YIELD_ITERS = 500;
193         uint32_t loopIter = 0;
194         while (thread->GetStatus() == ThreadStatus::RUNNING) {
195             if (!thread->IsSuspended()) {
196                 LOG(WARNING, RUNTIME) << "No request for suspension, do not wait thread " << thread->GetId();
197                 break;
198             }
199 
200             loopIter++;
201             if (loopIter < YIELD_ITERS) {
202                 MTManagedThread::Yield();
203             } else {
204                 // Use native sleep over ManagedThread::Sleep to prevent potentially time consuming
205                 // mutator_lock locking and unlocking
206                 static constexpr uint32_t SHORT_SLEEP_MS = 1;
207                 os::thread::NativeSleep(SHORT_SLEEP_MS);
208             }
209         }
210     }
211 
212     bool TimedWaitWithLockHeld(ThreadStatus waitStatus, uint64_t timeout, uint64_t nanos, bool isAbsolute = false)
REQUIRES(condLock_)213         REQUIRES(condLock_)
214     {
215         ASSERT(waitStatus == ThreadStatus::IS_TIMED_WAITING || waitStatus == ThreadStatus::IS_SLEEPING ||
216                waitStatus == ThreadStatus::IS_BLOCKED || waitStatus == ThreadStatus::IS_SUSPENDED ||
217                waitStatus == ThreadStatus::IS_COMPILER_WAITING || waitStatus == ThreadStatus::IS_WAITING_INFLATION);
218         auto oldStatus = GetStatus();
219         UpdateStatus(waitStatus);
220         bool res = TimedWaitWithLockHeldInternal(timeout, nanos, isAbsolute);
221         // Unlock before setting status RUNNING to handle MutatorReadLock without inversed lock order.
222         condLock_.Unlock();
223         UpdateStatus(oldStatus);
224         condLock_.Lock();
225         return res;
226     }
227 
228     bool TimedWait(ThreadStatus waitStatus, uint64_t timeout, uint64_t nanos = 0, bool isAbsolute = false)
229     {
230         ASSERT(waitStatus == ThreadStatus::IS_TIMED_WAITING || waitStatus == ThreadStatus::IS_SLEEPING ||
231                waitStatus == ThreadStatus::IS_BLOCKED || waitStatus == ThreadStatus::IS_SUSPENDED ||
232                waitStatus == ThreadStatus::IS_COMPILER_WAITING || waitStatus == ThreadStatus::IS_WAITING_INFLATION);
233         auto oldStatus = GetStatus();
234         bool res = false;
235         {
236             os::memory::LockHolder lock(condLock_);
237             UpdateStatus(waitStatus);
238             /* @sync 1
239              * @description Right after changing the thread's status and before going to sleep
240              * */
241             res = TimedWaitWithLockHeldInternal(timeout, nanos, isAbsolute);
242         }
243         UpdateStatus(oldStatus);
244         return res;
245     }
246 
OnRuntimeTerminated()247     void OnRuntimeTerminated() override
248     {
249         TerminationLoop();
250     }
251 
TerminationLoop()252     void TerminationLoop()
253     {
254         ASSERT(IsRuntimeTerminated());
255         /* @sync 1
256          * @description This point is right before the thread starts to release all his monitors.
257          * All monitors should be released by the thread before completing its execution by stepping into the
258          * termination loop.
259          * */
260         if (GetStatus() == ThreadStatus::NATIVE) {
261             // There is a chance, that the runtime will be destroyed at this time.
262             // Thus we should not release monitors for NATIVE status
263         } else {
264             ReleaseMonitors();
265             /* @sync 2
266              * @description This point is right after the thread has released all his monitors and right before it steps
267              * into the termination loop.
268              * */
269             UpdateStatus(ThreadStatus::IS_TERMINATED_LOOP);
270             /* @sync 3
271              * @description This point is right after the thread has released all his monitors and changed status to
272              * IS_TERMINATED_LOOP
273              * */
274         }
275         while (true) {
276             static constexpr unsigned int LONG_SLEEP_MS = 1000000;
277             os::thread::NativeSleep(LONG_SLEEP_MS);
278         }
279     }
280 
GetEnterMonitorObject()281     ObjectHeader *GetEnterMonitorObject()
282     {
283         ASSERT_MANAGED_CODE();
284         return enterMonitorObject_;
285     }
286 
SetEnterMonitorObject(ObjectHeader * objectHeader)287     void SetEnterMonitorObject(ObjectHeader *objectHeader)
288     {
289         ASSERT_MANAGED_CODE();
290         enterMonitorObject_ = objectHeader;
291     }
292 
GetNextWait()293     MTManagedThread *GetNextWait() const
294     {
295         return next_;
296     }
297 
SetWaitNext(MTManagedThread * next)298     void SetWaitNext(MTManagedThread *next)
299     {
300         next_ = next;
301     }
302 
GetPtReferenceStorage()303     mem::ReferenceStorage *GetPtReferenceStorage() const
304     {
305         return ptReferenceStorage_.get();
306     }
307 
GetLockedObjectCapacityOffset()308     static constexpr uint32_t GetLockedObjectCapacityOffset()
309     {
310         return GetLocalObjectLockedOffset() + LockedObjectList<>::GetCapacityOffset();
311     }
312 
GetLockedObjectSizeOffset()313     static constexpr uint32_t GetLockedObjectSizeOffset()
314     {
315         return GetLocalObjectLockedOffset() + LockedObjectList<>::GetSizeOffset();
316     }
317 
GetLockedObjectDataOffset()318     static constexpr uint32_t GetLockedObjectDataOffset()
319     {
320         return GetLocalObjectLockedOffset() + LockedObjectList<>::GetDataOffset();
321     }
322 
GetLocalObjectLockedOffset()323     static constexpr uint32_t GetLocalObjectLockedOffset()
324     {
325         return MEMBER_OFFSET(MTManagedThread, localObjectsLocked_);
326     }
327 
328 protected:
329     virtual void ProcessCreatedThread();
330 
WaitWithLockHeldInternal()331     void WaitWithLockHeldInternal() REQUIRES(condLock_)
332     {
333         ASSERT(this == ManagedThread::GetCurrent());
334         condVar_.Wait(&condLock_);
335     }
336 
REQUIRES(condLock_)337     bool TimedWaitWithLockHeldInternal(uint64_t timeout, uint64_t nanos, bool isAbsolute = false) REQUIRES(condLock_)
338     {
339         ASSERT(this == ManagedThread::GetCurrent());
340         return condVar_.TimedWait(&condLock_, timeout, nanos, isAbsolute);
341     }
342 
SignalWithLockHeld()343     void SignalWithLockHeld() REQUIRES(condLock_)
344     {
345         condVar_.Signal();
346     }
347 
SetInterruptedWithLockHeld(bool interrupted)348     void SetInterruptedWithLockHeld(bool interrupted) REQUIRES(condLock_)
349     {
350         isInterrupted_ = interrupted;
351     }
352 
353 private:
354     MTManagedThread *next_ {nullptr};
355 
356     LockedObjectList<> localObjectsLocked_;
357 
358     // Implementation of Wait/Notify
359     os::memory::ConditionVariable condVar_ GUARDED_BY(condLock_);
360     os::memory::Mutex condLock_;
361 
362     bool isInterrupted_ GUARDED_BY(condLock_) = false;
363 
364     bool isDaemon_ = false;
365 
366     Monitor *waitingMonitor_ {nullptr};
367 
368     // Count of monitors owned by this thread
369     std::atomic_int32_t monitorCount_ {0};
370     // Used for dumping stack info
371     ThreadStatus monitorOldStatus_ {ThreadStatus::FINISHED};
372     ObjectHeader *enterMonitorObject_ {nullptr};
373 
374     // Monitor, in which this thread is entering. It is required only to detect deadlocks with daemon threads.
375     std::atomic<Monitor *> enteringMonitor_;
376 
377     PandaUniquePtr<mem::ReferenceStorage> ptReferenceStorage_ {nullptr};
378 
379     NO_COPY_SEMANTIC(MTManagedThread);
380     NO_MOVE_SEMANTIC(MTManagedThread);
381 };
382 
383 }  // namespace ark
384 
385 #endif  // PANDA_RUNTIME_MTMANAGED_THREAD_H
386