1 /*
2 * Copyright (c) 2025 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #include "common_components/heap/collector/finalizer_processor.h"
16
17 #include "common_components/common/scoped_object_access.h"
18 #include "common_components/mutator/mutator.h"
19
20 namespace common {
21 constexpr uint32_t DEFAULT_FINALIZER_TIMEOUT_MS = 2000;
22
23 // Note: can only be called by FinalizerProcessor thread
ArkProcessFinalizers(void * arg)24 extern "C" PUBLIC_API void* ArkProcessFinalizers(void* arg)
25 {
26 #ifdef __APPLE__
27 CHECK_CALL(pthread_setname_np, ("gc-helper"), "finalizer-processor thread setname");
28 #elif defined(__linux__) || defined(PANDA_TARGET_OHOS)
29 CHECK_CALL(prctl, (PR_SET_NAME, "gc-helper"), "finalizer-processor thread setname");
30 #endif
31 reinterpret_cast<FinalizerProcessor*>(arg)->Run();
32 return nullptr;
33 }
34
Start()35 void FinalizerProcessor::Start()
36 {
37 pthread_t thread;
38 pthread_attr_t attr;
39 // size_t stackSize = ArkCommonRuntime::GetConcurrencyParam().thStackSize * KB; // default 1MB stacksize
40 // FinalizerProcessor needs to be revied
41 size_t stackSize = 1024 * KB;
42 #if defined(__linux__) || defined(PANDA_TARGET_OHOS) || defined(__APPLE__)
43 // PTHREAD_STACK_MIN is not supported in Windows.
44 if (stackSize < static_cast<size_t>(PTHREAD_STACK_MIN)) {
45 stackSize = static_cast<size_t>(PTHREAD_STACK_MIN);
46 }
47 #endif
48 CHECK_CALL(pthread_attr_init, (&attr), "init pthread attr");
49 CHECK_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_JOINABLE), "set pthread joinable");
50 CHECK_CALL(pthread_attr_setstacksize, (&attr, stackSize), "set pthread stacksize");
51 CHECK_CALL(pthread_create, (&thread, &attr, ArkProcessFinalizers, this),
52 "create finalizer-process thread");
53 #ifdef __WIN64
54 CHECK_CALL(pthread_setname_np, (thread, "gc-helper"), "finalizer-processor thread setname");
55 #endif
56 CHECK_CALL(pthread_attr_destroy, (&attr), "destroy pthread attr");
57 threadHandle_ = thread;
58
59 WaitStarted();
60 }
61
62 // Stop FinalizerProcessor is only invoked at Fork or Runtime finliazaiton
63 // Should only invoke once.
Stop()64 void FinalizerProcessor::Stop()
65 {
66 // This will only occur in the prefork scenario.
67 if (running_ == false) {
68 return;
69 }
70 running_ = false;
71 Notify();
72 WaitStop();
73 }
74
FinalizerProcessor()75 FinalizerProcessor::FinalizerProcessor()
76 {
77 started_ = false;
78 running_ = false;
79 iterationWaitTime_ = DEFAULT_FINALIZER_TIMEOUT_MS;
80 timeProcessorBegin_ = 0;
81 timeProcessUsed_ = 0;
82 timeCurrentProcessBegin_ = 0;
83 hasFinalizableJob_.store(false, std::memory_order_relaxed);
84 shouldReclaimHeapGarbage_.store(false, std::memory_order_relaxed);
85 shouldFeedHungryBuffers_.store(false, std::memory_order_relaxed);
86 }
87
Run()88 void FinalizerProcessor::Run()
89 {
90 Init();
91 NotifyStarted();
92 while (running_) {
93 if (hasFinalizableJob_.load(std::memory_order_relaxed)) {
94 // In theory we currently won't have this
95 ProcessFinalizables();
96 #if defined(GCINFO_DEBUG) && GCINFO_DEBUG
97 LogAfterProcess();
98 #endif
99 }
100
101 if (shouldFeedHungryBuffers_.load(std::memory_order_relaxed)) {
102 FeedHungryBuffers();
103 }
104
105 if (shouldReclaimHeapGarbage_.load(std::memory_order_relaxed)) {
106 ReclaimHeapGarbage();
107 }
108
109 {
110 COMMON_PHASE_TIMER("finalizerProcessor waitting time");
111 while (running_) {
112 Wait(iterationWaitTime_);
113 if (hasFinalizableJob_.load(std::memory_order_relaxed) ||
114 shouldReclaimHeapGarbage_.load(std::memory_order_relaxed) ||
115 shouldFeedHungryBuffers_.load(std::memory_order_relaxed)) {
116 break;
117 }
118 }
119 }
120 }
121 Fini();
122 }
123
Init()124 void FinalizerProcessor::Init()
125 {
126 Mutator* mutator = MutatorManager::Instance().CreateRuntimeMutator(ThreadType::FP_THREAD);
127 (void)mutator->EnterSaferegion(true);
128 tid_ = mutator->GetTid();
129 ThreadLocal::SetProtectAddr(reinterpret_cast<uint8_t*>(0));
130 running_ = true;
131 timeProcessorBegin_ = TimeUtil::MicroSeconds();
132 timeProcessUsed_ = 0;
133 MutatorManager::Instance().MutatorManagementRLock();
134 fpMutator_ = mutator;
135 MutatorManager::Instance().MutatorManagementRUnlock();
136 VLOG(INFO, "FinalizerProcessor thread started");
137 }
138
Fini()139 void FinalizerProcessor::Fini()
140 {
141 MutatorManager::Instance().MutatorManagementRLock();
142 fpMutator_ = nullptr;
143 MutatorManager::Instance().MutatorManagementRUnlock();
144 MutatorManager::Instance().DestroyRuntimeMutator(ThreadType::FP_THREAD);
145 VLOG(INFO, "FinalizerProcessor thread stopped");
146 }
147
WaitStop()148 void FinalizerProcessor::WaitStop()
149 {
150 pthread_t thread = threadHandle_;
151 int tmpResult = ::pthread_join(thread, nullptr);
152 LOGF_CHECK(tmpResult == 0) << "::pthread_join() in FinalizerProcessor::WaitStop() return " <<
153 tmpResult << " rather than 0.";
154 started_ = false;
155 threadHandle_ = 0;
156 }
157
Notify()158 void FinalizerProcessor::Notify() { wakeCondition_.notify_one(); }
159
Wait(uint32_t timeoutMilliSeconds)160 void FinalizerProcessor::Wait(uint32_t timeoutMilliSeconds)
161 {
162 std::unique_lock<std::mutex> lock(wakeLock_);
163 std::chrono::milliseconds epoch(timeoutMilliSeconds);
164 wakeCondition_.wait_for(lock, epoch);
165 }
166
NotifyStarted()167 void FinalizerProcessor::NotifyStarted()
168 {
169 {
170 std::unique_lock<std::mutex> lock(startedLock_);
171 LOGF_CHECK(started_ != true) << "unpexcted true, FinalizerProcessor might not wait stopped";
172 started_ = true;
173 }
174 startedCondition_.notify_all();
175 }
176
WaitStarted()177 void FinalizerProcessor::WaitStarted()
178 {
179 std::unique_lock<std::mutex> lock(startedLock_);
180 if (started_) {
181 return;
182 }
183 startedCondition_.wait(lock, [this] { return started_; });
184 }
185
EnqueueFinalizables(const std::function<bool (BaseObject *)> & finalizable,uint32_t countLimit)186 void FinalizerProcessor::EnqueueFinalizables(const std::function<bool(BaseObject*)>& finalizable, uint32_t countLimit)
187 {
188 std::lock_guard<std::mutex> l(listLock_);
189 auto it = finalizers_.begin();
190 while (it != finalizers_.end() && countLimit != 0) {
191 RefField<> tmpField(reinterpret_cast<HeapAddress>(*it));
192 BaseObject* obj = tmpField.GetTargetObject();
193 --countLimit;
194 if (finalizable(obj)) {
195 finalizables_.push_back(reinterpret_cast<BaseObject*>(tmpField.GetFieldValue()));
196 it = finalizers_.erase(it);
197 } else {
198 ++it;
199 }
200 }
201
202 if (!finalizables_.empty()) {
203 hasFinalizableJob_.store(true, std::memory_order_relaxed);
204 }
205 }
206
207 // Process finalizable list
208 // 1. always process list head
209 // 2. Leave safe region (calling in finalizerProcessor thread)
210 // 3. Invoke finalize method
211 // 4. remove processed finalizables
ProcessFinalizableList()212 void FinalizerProcessor::ProcessFinalizableList()
213 {
214 LOG_COMMON(FATAL) << "Unresolved fatal";
215 UNREACHABLE_CC();
216 }
217
ProcessFinalizables()218 void FinalizerProcessor::ProcessFinalizables()
219 {
220 COMMON_PHASE_TIMER("Finalizer");
221 {
222 // we leave saferegion to avoid GC visit those changing queues.
223 ScopedObjectAccess soa;
224 std::lock_guard<std::mutex> l(listLock_);
225 // FP will not come here before cleaning up workingFinalizables
226 // workingFinalizables is expected empty, thus we could use std::swap here
227 workingFinalizables_.swap(finalizables_);
228 }
229 DLOG(FINALIZE, "finalizer: working size %zu", workingFinalizables_.size());
230 ProcessFinalizableList();
231 if (finalizables_.empty()) {
232 hasFinalizableJob_.store(false, std::memory_order_relaxed);
233 }
234 }
235
236 #if defined(GCINFO_DEBUG) && GCINFO_DEBUG
LogAfterProcess()237 void FinalizerProcessor::LogAfterProcess()
238 {
239 if (!ENABLE_LOG(FINALIZE)) {
240 return;
241 }
242 uint64_t timeNow = TimeUtil::MicroSeconds();
243 uint64_t timeConsumed = timeNow - timeCurrentProcessBegin_;
244 uint64_t totalTimePassed = timeNow - timeProcessorBegin_;
245 timeProcessUsed_ += timeConsumed;
246 constexpr float percentageDivend = 100.0f;
247 float percentage = (static_cast<float>(TIME_FACTOR * timeProcessUsed_) / totalTimePassed) / percentageDivend;
248 DLOG(FINALIZE, "[FinalizerProcessor] End (%luus [%luus] [%.2f%%])", timeConsumed, timeProcessUsed_, percentage);
249 }
250 #endif
251
RegisterFinalizer(BaseObject * obj)252 void FinalizerProcessor::RegisterFinalizer(BaseObject* obj)
253 {
254 RefField<> tmpField(nullptr);
255 Heap::GetHeap().GetBarrier().WriteStaticRef(tmpField, obj);
256 std::lock_guard<std::mutex> l(listLock_);
257 finalizers_.push_back(reinterpret_cast<BaseObject*>(tmpField.GetFieldValue()));
258 }
259
ReclaimHeapGarbage()260 void FinalizerProcessor::ReclaimHeapGarbage()
261 {
262 OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "ARK_RT_GC_RECLAIM", "");
263 Heap::GetHeap().GetAllocator().ReclaimGarbageMemory(false);
264 shouldReclaimHeapGarbage_.store(false, std::memory_order_relaxed);
265 }
266
FeedHungryBuffers()267 void FinalizerProcessor::FeedHungryBuffers()
268 {
269 Heap::GetHeap().GetAllocator().FeedHungryBuffers();
270 shouldFeedHungryBuffers_.store(false, std::memory_order_relaxed);
271 }
272 } // namespace common
273