• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_MTMANAGED_THREAD_H
16 #define PANDA_RUNTIME_MTMANAGED_THREAD_H
17 
18 #include "managed_thread.h"
19 
20 namespace panda {
21 class MTManagedThread : public ManagedThread {
22 public:
23     ThreadId GetInternalId();
24 
25     static MTManagedThread *Create(
26         Runtime *runtime, PandaVM *vm,
27         panda::panda_file::SourceLang thread_lang = panda::panda_file::SourceLang::PANDA_ASSEMBLY);
28 
29     explicit MTManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *vm,
30                              panda::panda_file::SourceLang thread_lang = panda::panda_file::SourceLang::PANDA_ASSEMBLY);
31     ~MTManagedThread() override;
32 
33     MonitorPool *GetMonitorPool();
34     int32_t GetMonitorCount();
35     void AddMonitor(Monitor *monitor);
36     void RemoveMonitor(Monitor *monitor);
37     void ReleaseMonitors();
38 
39     void PushLocalObjectLocked(ObjectHeader *obj);
40     void PopLocalObjectLocked(ObjectHeader *out);
41     Span<LockedObjectInfo> GetLockedObjectInfos();
42 
43     void VisitGCRoots(const ObjectVisitor &cb) override;
44     void UpdateGCRoots() override;
45 
GetWaitingMonitorOldStatus()46     ThreadStatus GetWaitingMonitorOldStatus()
47     {
48         return monitor_old_status_;
49     }
50 
SetWaitingMonitorOldStatus(ThreadStatus status)51     void SetWaitingMonitorOldStatus(ThreadStatus status)
52     {
53         monitor_old_status_ = status;
54     }
55 
56     void FreeInternalMemory() override;
57 
58     static bool Sleep(uint64_t ms);
59 
GetWaitingMonitor()60     Monitor *GetWaitingMonitor()
61     {
62         return waiting_monitor_;
63     }
64 
SetWaitingMonitor(Monitor * monitor)65     void SetWaitingMonitor(Monitor *monitor)
66     {
67         ASSERT(waiting_monitor_ == nullptr || monitor == nullptr);
68         waiting_monitor_ = monitor;
69     }
70 
GetEnteringMonitor()71     Monitor *GetEnteringMonitor() const
72     {
73         // Atomic with relaxed order reason: ordering constraints are not required
74         return entering_monitor_.load(std::memory_order_relaxed);
75     }
76 
SetEnteringMonitor(Monitor * monitor)77     void SetEnteringMonitor(Monitor *monitor)
78     {
79         // Atomic with relaxed order reason: ordering constraints are not required
80         ASSERT(entering_monitor_.load(std::memory_order_relaxed) == nullptr || monitor == nullptr);
81         // Atomic with relaxed order reason: ordering constraints are not required
82         entering_monitor_.store(monitor, std::memory_order_relaxed);
83     }
84 
85     virtual void StopDaemonThread();
86 
IsDaemon()87     bool IsDaemon()
88     {
89         return is_daemon_;
90     }
91 
92     void SetDaemon();
93 
94     virtual void Destroy();
95 
96     static void Yield();
97 
98     static void Interrupt(MTManagedThread *thread);
99 
100     // Need to acquire the mutex before waiting to avoid scheduling between monitor release and clond_lock acquire
GetWaitingMutex()101     os::memory::Mutex *GetWaitingMutex() RETURN_CAPABILITY(cond_lock_)
102     {
103         return &cond_lock_;
104     }
105 
Signal()106     void Signal()
107     {
108         os::memory::LockHolder lock(cond_lock_);
109         cond_var_.Signal();
110     }
111 
112     bool Interrupted();
113 
IsInterrupted()114     bool IsInterrupted()
115     {
116         os::memory::LockHolder lock(cond_lock_);
117         return is_interrupted_;
118     }
119 
IsInterruptedWithLockHeld()120     bool IsInterruptedWithLockHeld() const REQUIRES(cond_lock_)
121     {
122         return is_interrupted_;
123     }
124 
ClearInterrupted()125     void ClearInterrupted()
126     {
127         os::memory::LockHolder lock(cond_lock_);
128         is_interrupted_ = false;
129     }
130 
ThreadIsMTManagedThread(Thread * thread)131     static bool ThreadIsMTManagedThread(Thread *thread)
132     {
133         ASSERT(thread != nullptr);
134         return thread->GetThreadType() == Thread::ThreadType::THREAD_TYPE_MT_MANAGED;
135     }
136 
CastFromThread(Thread * thread)137     static MTManagedThread *CastFromThread(Thread *thread)
138     {
139         ASSERT(thread != nullptr);
140         ASSERT(ThreadIsMTManagedThread(thread));
141         return static_cast<MTManagedThread *>(thread);
142     }
143 
144     /**
145      * @brief GetCurrentRaw Unsafe method to get current MTManagedThread.
146      * It can be used in hotspots to get the best performance.
147      * We can only use this method in places where the MTManagedThread exists.
148      * @return pointer to MTManagedThread
149      */
GetCurrentRaw()150     static MTManagedThread *GetCurrentRaw()
151     {
152         return CastFromThread(Thread::GetCurrent());
153     }
154 
155     /**
156      * @brief GetCurrent Safe method to gets current MTManagedThread.
157      * @return pointer to MTManagedThread or nullptr (if current thread is not a managed thread)
158      */
GetCurrent()159     static MTManagedThread *GetCurrent()
160     {
161         Thread *thread = Thread::GetCurrent();
162         ASSERT(thread != nullptr);
163         if (ThreadIsMTManagedThread(thread)) {
164             return CastFromThread(thread);
165         }
166         // no guarantee that we will return nullptr here in the future
167         return nullptr;
168     }
169 
WaitWithLockHeld(ThreadStatus wait_status)170     void WaitWithLockHeld(ThreadStatus wait_status) REQUIRES(cond_lock_)
171     {
172         ASSERT(wait_status == ThreadStatus::IS_WAITING);
173         auto old_status = GetStatus();
174         UpdateStatus(wait_status);
175         WaitWithLockHeldInternal();
176         // Unlock before setting status RUNNING to handle MutatorReadLock without inversed lock order.
177         cond_lock_.Unlock();
178         UpdateStatus(old_status);
179         cond_lock_.Lock();
180     }
181 
WaitForSuspension(ManagedThread * thread)182     static void WaitForSuspension(ManagedThread *thread)
183     {
184         static constexpr uint32_t YIELD_ITERS = 500;
185         uint32_t loop_iter = 0;
186         while (thread->GetStatus() == ThreadStatus::RUNNING) {
187             if (!thread->IsSuspended()) {
188                 LOG(WARNING, RUNTIME) << "No request for suspension, do not wait thread " << thread->GetId();
189                 break;
190             }
191 
192             loop_iter++;
193             if (loop_iter < YIELD_ITERS) {
194                 MTManagedThread::Yield();
195             } else {
196                 // Use native sleep over ManagedThread::Sleep to prevent potentially time consuming
197                 // mutator_lock locking and unlocking
198                 static constexpr uint32_t SHORT_SLEEP_MS = 1;
199                 os::thread::NativeSleep(SHORT_SLEEP_MS);
200             }
201         }
202     }
203 
204     bool TimedWaitWithLockHeld(ThreadStatus wait_status, uint64_t timeout, uint64_t nanos, bool is_absolute = false)
REQUIRES(cond_lock_)205         REQUIRES(cond_lock_)
206     {
207         ASSERT(wait_status == ThreadStatus::IS_TIMED_WAITING || wait_status == ThreadStatus::IS_SLEEPING ||
208                wait_status == ThreadStatus::IS_BLOCKED || wait_status == ThreadStatus::IS_SUSPENDED ||
209                wait_status == ThreadStatus::IS_COMPILER_WAITING || wait_status == ThreadStatus::IS_WAITING_INFLATION);
210         auto old_status = GetStatus();
211         UpdateStatus(wait_status);
212         bool res = TimedWaitWithLockHeldInternal(timeout, nanos, is_absolute);
213         // Unlock before setting status RUNNING to handle MutatorReadLock without inversed lock order.
214         cond_lock_.Unlock();
215         UpdateStatus(old_status);
216         cond_lock_.Lock();
217         return res;
218     }
219 
220     bool TimedWait(ThreadStatus wait_status, uint64_t timeout, uint64_t nanos = 0, bool is_absolute = false)
221     {
222         ASSERT(wait_status == ThreadStatus::IS_TIMED_WAITING || wait_status == ThreadStatus::IS_SLEEPING ||
223                wait_status == ThreadStatus::IS_BLOCKED || wait_status == ThreadStatus::IS_SUSPENDED ||
224                wait_status == ThreadStatus::IS_COMPILER_WAITING || wait_status == ThreadStatus::IS_WAITING_INFLATION);
225         auto old_status = GetStatus();
226         bool res = false;
227         {
228             os::memory::LockHolder lock(cond_lock_);
229             UpdateStatus(wait_status);
230             res = TimedWaitWithLockHeldInternal(timeout, nanos, is_absolute);
231         }
232         UpdateStatus(old_status);
233         return res;
234     }
235 
OnRuntimeTerminated()236     void OnRuntimeTerminated() override
237     {
238         TerminationLoop();
239     }
240 
TerminationLoop()241     void TerminationLoop()
242     {
243         ASSERT(IsRuntimeTerminated());
244         if (GetStatus() == ThreadStatus::NATIVE) {
245             // There is a chance, that the runtime will be destroyed at this time.
246             // Thus we should not release monitors for NATIVE status
247         } else {
248             ReleaseMonitors();
249             UpdateStatus(ThreadStatus::IS_TERMINATED_LOOP);
250         }
251         while (true) {
252             static constexpr unsigned int LONG_SLEEP_MS = 1000000;
253             os::thread::NativeSleep(LONG_SLEEP_MS);
254         }
255     }
256 
GetEnterMonitorObject()257     ObjectHeader *GetEnterMonitorObject()
258     {
259         ASSERT_MANAGED_CODE();
260         return enter_monitor_object_;
261     }
262 
SetEnterMonitorObject(ObjectHeader * object_header)263     void SetEnterMonitorObject(ObjectHeader *object_header)
264     {
265         ASSERT_MANAGED_CODE();
266         enter_monitor_object_ = object_header;
267     }
268 
GetNextWait()269     MTManagedThread *GetNextWait() const
270     {
271         return next_;
272     }
273 
SetWaitNext(MTManagedThread * next)274     void SetWaitNext(MTManagedThread *next)
275     {
276         next_ = next;
277     }
278 
GetPtReferenceStorage()279     mem::ReferenceStorage *GetPtReferenceStorage() const
280     {
281         return pt_reference_storage_.get();
282     }
283 
GetLockedObjectCapacityOffset()284     static constexpr uint32_t GetLockedObjectCapacityOffset()
285     {
286         return GetLocalObjectLockedOffset() + LockedObjectList<>::GetCapacityOffset();
287     }
288 
GetLockedObjectSizeOffset()289     static constexpr uint32_t GetLockedObjectSizeOffset()
290     {
291         return GetLocalObjectLockedOffset() + LockedObjectList<>::GetSizeOffset();
292     }
293 
GetLockedObjectDataOffset()294     static constexpr uint32_t GetLockedObjectDataOffset()
295     {
296         return GetLocalObjectLockedOffset() + LockedObjectList<>::GetDataOffset();
297     }
298 
GetLocalObjectLockedOffset()299     static constexpr uint32_t GetLocalObjectLockedOffset()
300     {
301         return MEMBER_OFFSET(MTManagedThread, local_objects_locked_);
302     }
303 
304 protected:
305     virtual void ProcessCreatedThread();
306 
WaitWithLockHeldInternal()307     void WaitWithLockHeldInternal() REQUIRES(cond_lock_)
308     {
309         ASSERT(this == ManagedThread::GetCurrent());
310         cond_var_.Wait(&cond_lock_);
311     }
312 
REQUIRES(cond_lock_)313     bool TimedWaitWithLockHeldInternal(uint64_t timeout, uint64_t nanos, bool is_absolute = false) REQUIRES(cond_lock_)
314     {
315         ASSERT(this == ManagedThread::GetCurrent());
316         return cond_var_.TimedWait(&cond_lock_, timeout, nanos, is_absolute);
317     }
318 
SignalWithLockHeld()319     void SignalWithLockHeld() REQUIRES(cond_lock_)
320     {
321         cond_var_.Signal();
322     }
323 
SetInterruptedWithLockHeld(bool interrupted)324     void SetInterruptedWithLockHeld(bool interrupted) REQUIRES(cond_lock_)
325     {
326         is_interrupted_ = interrupted;
327     }
328 
329 private:
330     MTManagedThread *next_ {nullptr};
331 
332     LockedObjectList<> local_objects_locked_;
333 
334     // Implementation of Wait/Notify
335     os::memory::ConditionVariable cond_var_ GUARDED_BY(cond_lock_);
336     os::memory::Mutex cond_lock_;
337 
338     bool is_interrupted_ GUARDED_BY(cond_lock_) = false;
339 
340     bool is_daemon_ = false;
341 
342     Monitor *waiting_monitor_;
343 
344     // Count of monitors owned by this thread
345     std::atomic_int32_t monitor_count_ {0};
346     // Used for dumping stack info
347     ThreadStatus monitor_old_status_ {ThreadStatus::FINISHED};
348     ObjectHeader *enter_monitor_object_ {nullptr};
349 
350     // Monitor, in which this thread is entering. It is required only to detect deadlocks with daemon threads.
351     std::atomic<Monitor *> entering_monitor_;
352 
353     PandaUniquePtr<mem::ReferenceStorage> pt_reference_storage_ {nullptr};
354 
355     NO_COPY_SEMANTIC(MTManagedThread);
356     NO_MOVE_SEMANTIC(MTManagedThread);
357 };
358 
359 }  // namespace panda
360 
361 #endif  // PANDA_RUNTIME_MTMANAGED_THREAD_H
362