1 /*
2 * Copyright (c) 2025 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <cstdint>
17 #include <stack>
18 #include <unistd.h>
19
20 #include "common_components/common_runtime/hooks.h"
21 #include "common_components/common/type_def.h"
22 #if defined(_WIN64)
23 #define NOGDI
24 #include <windows.h>
25 #endif
26 #include "common_components/heap/allocator/region_manager.h"
27 #include "common_components/heap/collector/marking_collector.h"
28 #include "common_components/common/scoped_object_access.h"
29 #include "common_components/mutator/mutator_manager.h"
30
31 namespace common {
GetThreadLocalData()32 ThreadLocalData *GetThreadLocalData()
33 {
34 uintptr_t tlDataAddr = reinterpret_cast<uintptr_t>(ThreadLocal::GetThreadLocalData());
35 #if defined(__aarch64__)
36 if (Heap::GetHeap().IsGcStarted()) {
37 // Since the TBI(top bit ignore) feature in Aarch64,
38 // set gc phase to high 8-bit of ThreadLocalData Address for gc barrier fast path.
39 // 56: make gcphase value shift left 56 bit to set the high 8-bit
40 tlDataAddr = tlDataAddr | (static_cast<uint64_t>(Heap::GetHeap().GetGCPhase()) << 56);
41 }
42 #endif
43 return reinterpret_cast<ThreadLocalData *>(tlDataAddr);
44 }
45
46 // Ensure that mutator phase is changed only once by mutator itself or GC
TransitionGCPhase(bool bySelf)47 bool MutatorBase::TransitionGCPhase(bool bySelf)
48 {
49 do {
50 GCPhaseTransitionState state = transitionState_.load();
51 // If this mutator phase transition has finished, just return
52 if (state == FINISH_TRANSITION) {
53 bool result = mutatorPhase_.load() == Heap::GetHeap().GetGCPhase();
54 if (!bySelf && !result) { // why check bySelf?
55 LOG_COMMON(FATAL) << "Unresolved fatal";
56 UNREACHABLE_CC();
57 }
58 return result;
59 }
60
61 // If this mutator is executing phase transition by other thread, mutator should wait but GC just return
62 if (state == IN_TRANSITION) {
63 if (bySelf) {
64 WaitForPhaseTransition();
65 return true;
66 } else {
67 return false;
68 }
69 }
70
71 if (!bySelf && state == NO_TRANSITION) {
72 return true;
73 }
74
75 // Current thread set atomic variable to ensure atomicity of phase transition
76 CHECK_CC(state == NEED_TRANSITION);
77 if (transitionState_.compare_exchange_weak(state, IN_TRANSITION)) {
78 TransitionToGCPhaseExclusive(Heap::GetHeap().GetGCPhase());
79 transitionState_.store(FINISH_TRANSITION, std::memory_order_release);
80 return true;
81 }
82 } while (true);
83 }
84
HandleSuspensionRequest()85 void MutatorBase::HandleSuspensionRequest()
86 {
87 for (;;) {
88 SetInSaferegion(SAFE_REGION_TRUE);
89 if (HasSuspensionRequest(SUSPENSION_FOR_STW)) {
90 SuspendForStw();
91 if (HasSuspensionRequest(SUSPENSION_FOR_GC_PHASE)) {
92 TransitionGCPhase(true);
93 } else if (HasSuspensionRequest(SUSPENSION_FOR_CPU_PROFILE)) {
94 TransitionToCpuProfile(true);
95 }
96 } else if (HasSuspensionRequest(SUSPENSION_FOR_GC_PHASE)) {
97 TransitionGCPhase(true);
98 } else if (HasSuspensionRequest(SUSPENSION_FOR_CPU_PROFILE)) {
99 TransitionToCpuProfile(true);
100 } else if (HasSuspensionRequest(SUSPENSION_FOR_EXIT)) {
101 while (true) {
102 sleep(INT_MAX);
103 }
104 } else if (HasSuspensionRequest(SUSPENSION_FOR_PENDING_CALLBACK)) {
105 reinterpret_cast<Mutator*>(mutator_)->TryRunFlipFunction();
106 } else if (HasSuspensionRequest(SUSPENSION_FOR_RUNNING_CALLBACK)) {
107 reinterpret_cast<Mutator*>(mutator_)->WaitFlipFunctionFinish();
108 }
109 SetInSaferegion(SAFE_REGION_FALSE);
110 // Leave saferegion if current mutator has no suspend request, otherwise try again
111 if (LIKELY_CC(!HasAnySuspensionRequestExceptCallbacks() && !HasObserver())) {
112 if (HasSuspensionRequest(SUSPENSION_FOR_FINALIZE)) {
113 ClearFinalizeRequest();
114 HandleJSGCCallback();
115 }
116 return;
117 }
118 }
119 }
120
HandleJSGCCallback()121 void MutatorBase::HandleJSGCCallback()
122 {
123 if (mutator_ != nullptr) {
124 void *vm = reinterpret_cast<Mutator*>(mutator_)->GetEcmaVMPtr();
125 if (vm != nullptr) {
126 JSGCCallback(vm);
127 }
128 }
129 }
130
SuspendForStw()131 void MutatorBase::SuspendForStw()
132 {
133 ClearSuspensionFlag(SUSPENSION_FOR_STW);
134 // wait until StartTheWorld
135 int curCount = static_cast<int>(MutatorManager::Instance().GetStwFutexWordValue());
136 // Avoid losing wake-ups
137 if (curCount > 0) {
138 #if defined(_WIN64) || defined(__APPLE__)
139 MutatorManager::Instance().MutatorWait();
140 #else
141 int* countAddr = MutatorManager::Instance().GetStwFutexWord();
142 // FUTEX_WAIT may fail when gc thread wakes up all threads before the current thread reaches this position.
143 // But it is not important because there won't be data race between the current thread and the gc thread,
144 // and it also won't be frozen since gc thread also modifies the value at countAddr before its waking option.
145 (void)Futex(countAddr, FUTEX_WAIT, curCount);
146 #endif
147 }
148 SetInSaferegion(SAFE_REGION_FALSE);
149 if (MutatorManager::Instance().StwTriggered()) {
150 // entering this branch means a second request has been broadcasted, we need to reset this flag to avoid
151 // missing the request. And this must be after the behaviour that set saferegion state to false, because
152 // we need to make sure that the mutator can always perceive the gc request when the mutator is not in
153 // safe region.
154 SetSuspensionFlag(SUSPENSION_FOR_STW);
155 }
156 }
157
158 #if defined(GCINFO_DEBUG) && GCINFO_DEBUG
CreateCurrentGCInfo()159 void Mutator::CreateCurrentGCInfo() { gcInfos_.CreateCurrentGCInfo(); }
160 #endif
161
162
VisitRawObjects(const RootVisitor & func)163 void Mutator::VisitRawObjects(const RootVisitor& func)
164 {
165 if (rawObject_.object != nullptr) {
166 func(rawObject_);
167 }
168 }
169
GetMutator()170 Mutator* Mutator::GetMutator() noexcept
171 {
172 return ThreadLocal::GetMutator();
173 }
174
CheckAndPush(BaseObject * obj,std::set<BaseObject * > & rootSet,std::stack<BaseObject * > & rootStack)175 inline void CheckAndPush(BaseObject* obj, std::set<BaseObject*>& rootSet, std::stack<BaseObject*>& rootStack)
176 {
177 auto search = rootSet.find(obj);
178 if (search == rootSet.end()) {
179 rootSet.insert(obj);
180 if (obj->IsValidObject() && obj->HasRefField()) {
181 rootStack.push(obj);
182 }
183 }
184 }
185
GcPhaseEnum(GCPhase newPhase)186 inline void MutatorBase::GcPhaseEnum(GCPhase newPhase)
187 {
188 }
189
190 // comment all
GCPhasePreForward(GCPhase newPhase)191 inline void MutatorBase::GCPhasePreForward(GCPhase newPhase)
192 {
193 }
194
HandleGCPhase(GCPhase newPhase)195 inline void MutatorBase::HandleGCPhase(GCPhase newPhase)
196 {
197 if (newPhase == GCPhase::GC_PHASE_POST_MARK) {
198 std::lock_guard<std::mutex> lg(mutatorBaseLock_);
199 Mutator *actMutator = reinterpret_cast<Mutator*>(mutator_);
200 if (actMutator->satbNode_ != nullptr) {
201 DCHECK_CC(actMutator->satbNode_->IsEmpty());
202 SatbBuffer::Instance().RetireNode(actMutator->satbNode_);
203 actMutator->satbNode_ = nullptr;
204 }
205 } else if (newPhase == GCPhase::GC_PHASE_ENUM) {
206 GcPhaseEnum(newPhase);
207 } else if (newPhase == GCPhase::GC_PHASE_PRECOPY) {
208 GCPhasePreForward(newPhase);
209 } else if (newPhase == GCPhase::GC_PHASE_REMARK_SATB || newPhase == GCPhase::GC_PHASE_FINAL_MARK) {
210 std::lock_guard<std::mutex> lg(mutatorBaseLock_);
211 Mutator *actMutator = reinterpret_cast<Mutator*>(mutator_);
212 if (actMutator->satbNode_ != nullptr) {
213 SatbBuffer::Instance().RetireNode(actMutator->satbNode_);
214 actMutator->satbNode_ = nullptr;
215 }
216 }
217 }
218
TransitionToGCPhaseExclusive(GCPhase newPhase)219 void MutatorBase::TransitionToGCPhaseExclusive(GCPhase newPhase)
220 {
221 HandleGCPhase(newPhase);
222 SetSafepointActive(false);
223 mutatorPhase_.store(newPhase, std::memory_order_relaxed); // handshake between mutator & mainGC thread
224 if (jsThread_ != nullptr) {
225 // non-atomic, should update JSThread local gc state before SuspensionFlag store,
226 // and SuspensionFlag load when transfer to running will guarantee the visibility of
227 // the JSThread local gc state
228 SynchronizeGCPhaseToJSThread(jsThread_, newPhase);
229 }
230 // Clear mutator's suspend request after phase transition
231 ClearSuspensionFlag(SUSPENSION_FOR_GC_PHASE); // atomic seq-cst
232 }
233
HandleCpuProfile()234 inline void MutatorBase::HandleCpuProfile()
235 {
236 LOG_COMMON(FATAL) << "Unresolved fatal";
237 UNREACHABLE_CC();
238 }
239
TransitionToCpuProfileExclusive()240 void MutatorBase::TransitionToCpuProfileExclusive()
241 {
242 HandleCpuProfile();
243 SetSafepointActive(false);
244 ClearSuspensionFlag(SUSPENSION_FOR_CPU_PROFILE);
245 }
246
PreRunManagedCode(Mutator * mutator,int layers,ThreadLocalData * threadData)247 void PreRunManagedCode(Mutator* mutator, int layers, ThreadLocalData* threadData)
248 {
249 if (UNLIKELY_CC(MutatorManager::Instance().StwTriggered())) {
250 mutator->SetSuspensionFlag(Mutator::SuspensionType::SUSPENSION_FOR_STW);
251 mutator->EnterSaferegion(false);
252 }
253 mutator->LeaveSaferegion();
254 mutator->SetMutatorPhase(Heap::GetHeap().GetGCPhase());
255 }
256
257 } // namespace common
258