1 /**
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "libpandabase/os/thread.h"
17 #include "libpandabase/utils/logger.h"
18 #include "libpandabase/utils/utf.h"
19 #include "runtime/include/mem/allocator.h"
20 #include "runtime/include/panda_vm.h"
21 #include "runtime/include/runtime.h"
22 #include "runtime/include/thread-inl.h"
23 #include "runtime/include/thread_scopes.h"
24 #include "runtime/lock_order_graph.h"
25 #include "runtime/thread_manager.h"
26
27 namespace panda {
28
ThreadManager(mem::InternalAllocatorPtr allocator)29 ThreadManager::ThreadManager(mem::InternalAllocatorPtr allocator) : threads_(allocator->Adapter())
30 {
31 last_id_ = 0;
32 pending_threads_ = 0;
33 }
34
~ThreadManager()35 ThreadManager::~ThreadManager()
36 {
37 threads_.clear();
38 }
39
GetInternalThreadId()40 uint32_t ThreadManager::GetInternalThreadId()
41 {
42 os::memory::LockHolder lock(ids_lock_);
43 for (size_t i = 0; i < internal_thread_ids_.size(); i++) {
44 last_id_ = (last_id_ + 1) % internal_thread_ids_.size();
45 if (!internal_thread_ids_[last_id_]) {
46 internal_thread_ids_.set(last_id_);
47 return last_id_ + 1; // 0 is reserved as uninitialized value.
48 }
49 }
50 LOG(FATAL, RUNTIME) << "Out of internal thread ids";
51 UNREACHABLE();
52 }
53
RemoveInternalThreadId(uint32_t id)54 void ThreadManager::RemoveInternalThreadId(uint32_t id)
55 {
56 id--; // 0 is reserved as uninitialized value.
57 os::memory::LockHolder lock(ids_lock_);
58 ASSERT(internal_thread_ids_[id]);
59 internal_thread_ids_.reset(id);
60 }
61
GetThreadByInternalThreadIdWithLockHeld(uint32_t thread_id)62 MTManagedThread *ThreadManager::GetThreadByInternalThreadIdWithLockHeld(uint32_t thread_id)
63 {
64 // Do not optimize with std::find_if - sometimes there are problems with incorrect memory accesses
65 for (auto thread : threads_) {
66 if (thread->GetInternalId() == thread_id) {
67 return thread;
68 }
69 }
70 return nullptr;
71 }
72
DeregisterSuspendedThreads()73 bool ThreadManager::DeregisterSuspendedThreads()
74 {
75 auto current = MTManagedThread::GetCurrent();
76 auto i = threads_.begin();
77 bool is_potentially_blocked_thread_present = false;
78 bool is_nonblocked_thread_present = false;
79 while (i != threads_.end()) {
80 MTManagedThread *thread = *i;
81 auto status = thread->GetStatus();
82 // Do not deregister current thread (which should be in status NATIVE) as HasNoActiveThreads
83 // assumes it stays registered; only threads in statuses FINISHED, IS_TERMINATED_LOOP and NATIVE
84 // can be deregistered.
85 if (thread != current && CanDeregister(status)) {
86 DecreaseCountersForThread(thread);
87 i = threads_.erase(i);
88 continue;
89 }
90 if (status == ThreadStatus::NATIVE || status == ThreadStatus::IS_BLOCKED) {
91 // We have a blocked thread - there is a potential deadlock
92 is_potentially_blocked_thread_present = true;
93 } else if (thread != current) {
94 // We have at least one non-blocked thread - deadlock is impossible
95 is_nonblocked_thread_present = true;
96 }
97 if (thread != current) {
98 LOG(DEBUG, RUNTIME) << "Daemon thread " << thread->GetId()
99 << " remains in DeregisterSuspendedThreads, status = "
100 << ManagedThread::ThreadStatusAsString(status);
101 }
102 i++;
103 }
104 if (is_potentially_blocked_thread_present && !is_nonblocked_thread_present) {
105 // All threads except current are blocked (have BLOCKED or NATIVE status)
106 LOG(DEBUG, RUNTIME) << "Potential deadlock with daemon threads is detected";
107 return StopThreadsOnDeadlock(current);
108 }
109 // Sanity check, we should get at least current thread in that list.
110 ASSERT(!threads_.empty());
111 return threads_.size() == 1;
112 }
113
DecreaseCountersForThread(MTManagedThread * thread)114 void ThreadManager::DecreaseCountersForThread(MTManagedThread *thread)
115 {
116 if (thread->IsDaemon()) {
117 daemon_threads_count_--;
118 // Do not delete this thread structure as it may be used by suspended thread
119 daemon_threads_.push_back(thread);
120 }
121 threads_count_--;
122 }
123
StopThreadsOnDeadlock(MTManagedThread * current)124 bool ThreadManager::StopThreadsOnDeadlock(MTManagedThread *current)
125 {
126 if (!LockOrderGraph::CheckForTerminationLoops(threads_, daemon_threads_, current)) {
127 LOG(DEBUG, RUNTIME) << "Deadlock with daemon threads was not confirmed";
128 return false;
129 }
130
131 os::memory::Mutex::IgnoreChecksOnDeadlock();
132 auto i = threads_.begin();
133 while (i != threads_.end()) {
134 MTManagedThread *thread = *i;
135 if (thread != current) {
136 DecreaseCountersForThread(thread);
137 i = threads_.erase(i);
138 continue;
139 }
140 i++;
141 }
142 return true;
143 }
144
WaitForDeregistration()145 void ThreadManager::WaitForDeregistration()
146 {
147 trace::ScopedTrace scoped_trace(__FUNCTION__);
148 {
149 os::memory::LockHolder lock(thread_lock_);
150
151 // First wait for non-daemon threads to finish
152 while (!HasNoActiveThreads()) {
153 stop_var_.TimedWait(&thread_lock_, WAIT_INTERVAL);
154 }
155
156 // Then stop daemon threads
157 StopDaemonThreads();
158
159 // Finally wait until all threads are suspended
160 while (true) {
161 if (pending_threads_ != 0) {
162 // There are threads, which are not completely registered
163 // We can not destroy other threads, as they may use shared data (waiting mutexes)
164 stop_var_.TimedWait(&thread_lock_, WAIT_INTERVAL);
165 continue;
166 }
167 if (DeregisterSuspendedThreads()) {
168 break;
169 }
170 stop_var_.TimedWait(&thread_lock_, WAIT_INTERVAL);
171 }
172
173 for (const auto &thread : daemon_threads_) {
174 thread->FreeInternalMemory();
175 }
176 }
177 auto threshold = Runtime::GetOptions().GetIgnoreDaemonMemoryLeaksThreshold();
178 Runtime::GetCurrent()->SetDaemonMemoryLeakThreshold(daemon_threads_.size() * threshold);
179 }
180
StopDaemonThreads()181 void ThreadManager::StopDaemonThreads() REQUIRES(thread_lock_)
182 {
183 trace::ScopedTrace scoped_trace(__FUNCTION__);
184 for (auto thread : threads_) {
185 if (thread->IsDaemon()) {
186 LOG(DEBUG, RUNTIME) << "Stopping daemon thread " << thread->GetId();
187 thread->StopDaemonThread();
188 }
189 }
190 // Suspend any future new threads
191 suspend_new_count_++;
192 }
193
GetThreadsCount()194 int ThreadManager::GetThreadsCount()
195 {
196 return threads_count_;
197 }
198
199 #ifndef NDEBUG
GetAllRegisteredThreadsCount()200 uint32_t ThreadManager::GetAllRegisteredThreadsCount()
201 {
202 return registered_threads_count_;
203 }
204 #endif // NDEBUG
205
SuspendAllThreads()206 void ThreadManager::SuspendAllThreads()
207 {
208 trace::ScopedTrace scoped_trace("Suspending mutator threads");
209 auto cur_thread = MTManagedThread::GetCurrent();
210 os::memory::LockHolder lock(thread_lock_);
211 EnumerateThreadsWithLockheld([cur_thread](MTManagedThread *thread) {
212 if (thread != cur_thread) {
213 thread->SuspendImpl(true);
214 }
215 return true;
216 });
217 suspend_new_count_++;
218 }
219
IsRunningThreadExist()220 bool ThreadManager::IsRunningThreadExist()
221 {
222 auto cur_thread = MTManagedThread::GetCurrent();
223 os::memory::LockHolder lock(thread_lock_);
224 bool is_exists = false;
225 EnumerateThreadsWithLockheld([cur_thread, &is_exists](MTManagedThread *thread) {
226 if (thread != cur_thread) {
227 if (thread->GetStatus() == ThreadStatus::RUNNING) {
228 is_exists = true;
229 return false;
230 };
231 }
232 return true;
233 });
234 return is_exists;
235 }
236
ResumeAllThreads()237 void ThreadManager::ResumeAllThreads()
238 {
239 trace::ScopedTrace scoped_trace("Resuming mutator threads");
240 auto cur_thread = MTManagedThread::GetCurrent();
241 os::memory::LockHolder lock(thread_lock_);
242 if (suspend_new_count_ > 0) {
243 suspend_new_count_--;
244 }
245 EnumerateThreadsWithLockheld([cur_thread](MTManagedThread *thread) {
246 if (thread != cur_thread) {
247 thread->ResumeImpl(true);
248 }
249 return true;
250 });
251 }
252
UnregisterExitedThread(MTManagedThread * thread)253 bool ThreadManager::UnregisterExitedThread(MTManagedThread *thread)
254 {
255 ASSERT(MTManagedThread::GetCurrent() == thread);
256 {
257 thread->NativeCodeEnd();
258
259 os::memory::LockHolder lock(thread_lock_);
260 // While this thread is suspended, do not delete it as other thread can be accessing it.
261 // TestAllFlags is required because termination request can be sent while thread_lock_ is unlocked
262 while (thread->TestAllFlags()) {
263 thread_lock_.Unlock();
264 thread->SafepointPoll();
265 thread_lock_.Lock();
266 }
267
268 thread->DestroyInternalResources();
269
270 LOG(DEBUG, RUNTIME) << "Stopping thread " << thread->GetId();
271 thread->UpdateStatus(ThreadStatus::FINISHED);
272 // Do not delete main thread, Runtime::GetMainThread is expected to always return valid object
273 if (thread == main_thread_) {
274 return false;
275 }
276
277 // This code should happen after thread has been resumed: Both WaitSuspension and ResumeImps requires locking
278 // suspend_lock_, so it acts as a memory barrier; flag clean should be visible in this thread after exit from
279 // WaitSuspenion
280 TSAN_ANNOTATE_HAPPENS_AFTER(&thread->fts_);
281
282 threads_.remove(thread);
283 if (thread->IsDaemon()) {
284 daemon_threads_count_--;
285 }
286 threads_count_--;
287
288 // If managed_thread, its nativePeer should be 0 before
289 delete thread;
290 stop_var_.Signal();
291 return true;
292 }
293 }
294
RegisterSensitiveThread() const295 void ThreadManager::RegisterSensitiveThread() const
296 {
297 LOG(INFO, RUNTIME) << __func__ << " is an empty implementation now.";
298 }
299
SuspendAndWaitThreadByInternalThreadId(uint32_t thread_id)300 MTManagedThread *ThreadManager::SuspendAndWaitThreadByInternalThreadId(uint32_t thread_id)
301 {
302 static constexpr uint32_t YIELD_ITERS = 500;
303 // NB! Expected to be called in registered thread, change implementation if this function used elsewhere
304 MTManagedThread *current = MTManagedThread::GetCurrent();
305 MTManagedThread *suspended = nullptr;
306 ASSERT(current->GetStatus() != ThreadStatus::RUNNING);
307
308 // Extract target thread
309 while (true) {
310 // If two threads call SuspendAndWaitThreadByInternalThreadId concurrently, one has to get suspended
311 // while other waits for thread to be suspended, so thread_lock_ is required to be held until
312 // SuspendImpl is called
313 current->SafepointPoll();
314 {
315 os::memory::LockHolder lock(thread_lock_);
316
317 suspended = GetThreadByInternalThreadIdWithLockHeld(thread_id);
318 if (UNLIKELY(suspended == nullptr)) {
319 // no thread found, exit
320 return nullptr;
321 }
322 ASSERT(current != suspended);
323 if (LIKELY(!current->IsSuspended())) {
324 suspended->SuspendImpl(true);
325 break;
326 }
327 // Unsafe to suspend as other thread may be waiting for this thread to suspend;
328 // Should get suspended on Safepoint()
329 }
330 }
331
332 // Now wait until target thread is really suspended
333 for (uint32_t loop_iter = 0;; loop_iter++) {
334 if (suspended->GetStatus() != ThreadStatus::RUNNING) {
335 // Thread is suspended now
336 return suspended;
337 }
338 if (loop_iter < YIELD_ITERS) {
339 MTManagedThread::Yield();
340 } else {
341 static constexpr uint32_t SHORT_SLEEP_MS = 1;
342 os::thread::NativeSleep(SHORT_SLEEP_MS);
343 }
344 }
345 UNREACHABLE();
346 }
347
348 } // namespace panda
349