• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2025 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "runtime/include/runtime.h"
17 #include "runtime/mem/gc/gc_scope.h"
18 #include "runtime/mem/gc/g1/g1-gc.h"
19 #include "runtime/mem/gc/g1/xgc-extension-data.h"
20 #include "plugins/ets/runtime/ets_exceptions.h"
21 #include "plugins/ets/runtime/interop_js/interop_context.h"
22 #include "plugins/ets/runtime/interop_js/xgc/xgc.h"
23 #include "plugins/ets/runtime/interop_js/ets_proxy/shared_reference_storage_verifier.h"
24 #ifdef PANDA_JS_ETS_HYBRID_MODE
25 #include "native_engine/native_reference.h"
26 #include "interfaces/inner_api/napi/native_node_hybrid_api.h"
27 #endif  // PANDA_JS_ETS_HYBRID_MODE
28 
29 namespace ark::ets::interop::js {
30 // CC-OFFNXT(G.PRE.02) necessary macro
31 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
32 #define LOG_XGC(level) LOG(level, ETS_INTEROP_JS) << "[XGC] "
33 
34 class XGCScope : public mem::GCScope<mem::TRACE_TIMING> {
35 public:
XGCScope(std::string_view name,PandaEtsVM * vm)36     XGCScope(std::string_view name, PandaEtsVM *vm)
37         : mem::GCScope<mem::TRACE_TIMING>(name, vm->GetGC()), scopeName_(name)
38     {
39         ASSERT(vm->GetGC()->GetLastGCCause() == GCTaskCause::CROSSREF_CAUSE);
40         LOG_XGC(DEBUG) << scopeName_ << ": start";
41     }
42 
43     NO_COPY_SEMANTIC(XGCScope);
44     NO_MOVE_SEMANTIC(XGCScope);
45 
~XGCScope()46     ~XGCScope()
47     {
48         LOG_XGC(DEBUG) << scopeName_ << ": end";
49     }
50 
51 private:
52     std::string_view scopeName_ {};
53 };
54 
55 XGC *XGC::instance_ = nullptr;
56 
GetXGCTriggerPolicyByString(std::string_view policyStr)57 static constexpr XGC::TriggerPolicy GetXGCTriggerPolicyByString(std::string_view policyStr)
58 {
59     if (policyStr == "default") {
60         return XGC::TriggerPolicy::DEFAULT;
61     }
62     if (policyStr == "force") {
63         return XGC::TriggerPolicy::FORCE;
64     }
65     if (policyStr == "never") {
66         return XGC::TriggerPolicy::NEVER;
67     }
68     return XGC::TriggerPolicy::INVALID;
69 }
70 
XGC(PandaEtsVM * vm,STSVMInterfaceImpl * stsVmIface,ets_proxy::SharedReferenceStorage * storage)71 XGC::XGC(PandaEtsVM *vm, STSVMInterfaceImpl *stsVmIface, ets_proxy::SharedReferenceStorage *storage)
72     : vm_(vm),
73       storage_(storage),
74       stsVmIface_(stsVmIface),
75       minimalThresholdSize_(Runtime::GetCurrent()->GetOptions().GetXgcMinTriggerThreshold()),
76       increaseThresholdPercent_(
77           std::min(PERCENT_100_U32, Runtime::GetCurrent()->GetOptions().GetXgcTriggerPercentThreshold())),
78       treiggerPolicy_(GetXGCTriggerPolicyByString(Runtime::GetCurrent()->GetOptions().GetXgcTriggerType())),
79       enableXgcVerifier_(Runtime::GetCurrent()->GetOptions().IsEnableXgcVerifier())
80 {
81     ASSERT(minimalThresholdSize_ <= storage->MaxSize());
82     ASSERT(treiggerPolicy_ != XGC::TriggerPolicy::INVALID);
83     // Atomic with relaxed order reason: data race with targetThreasholdSize_ with no synchronization or ordering
84     // constraints imposed on other reads or writes
85     targetThreasholdSize_.store(minimalThresholdSize_, std::memory_order_relaxed);
86 }
87 
MarkJsObject(ets_proxy::SharedReference * ref,STSVMInterfaceImpl * stsVmIface)88 ALWAYS_INLINE static void MarkJsObject(ets_proxy::SharedReference *ref, STSVMInterfaceImpl *stsVmIface)
89 {
90     ASSERT(ref->HasJSFlag());
91     LOG_XGC(DEBUG) << "napi_mark_from_object for ref " << ref;
92     ref->GetCtx()->GetXGCVmAdaptor()->MarkFromObject(ref->GetJsRef());
93     LOG_XGC(DEBUG) << "Notify to JS waiters";
94     stsVmIface->NotifyWaiters();
95 }
96 
MarkEtsObject(ets_proxy::SharedReference * ref,PandaEtsVM * vm)97 ALWAYS_INLINE static void MarkEtsObject(ets_proxy::SharedReference *ref, PandaEtsVM *vm)
98 {
99     ASSERT(ref->HasETSFlag());
100     EtsObject *etsObj = ref->GetEtsObject();
101     auto *gc = reinterpret_cast<mem::G1GC<EtsLanguageConfig> *>(vm->GetGC());
102     LOG_XGC(DEBUG) << "Start marking from " << etsObj << " (" << etsObj->GetClass()->GetDescriptor() << ")";
103     gc->MarkObjectRecursively(etsObj->GetCoreType());
104 }
105 
CreateXObjectHandler(ets_proxy::SharedReferenceStorage * storage,STSVMInterfaceImpl * stsVmIface)106 static auto CreateXObjectHandler(ets_proxy::SharedReferenceStorage *storage, STSVMInterfaceImpl *stsVmIface)
107 {
108     return [storage, stsVmIface](ObjectHeader *obj) {
109         auto *etsObj = EtsObject::FromCoreType(obj);
110         if (!etsObj->HasInteropIndex()) {
111             return;
112         }
113         // NOTE(audovichenko): Handle multithreading issue.
114         ets_proxy::SharedReference::Iterator it(storage->GetReference(etsObj));
115         ets_proxy::SharedReference::Iterator end;
116         do {
117             if (it->HasJSFlag() && it->MarkIfNotMarked()) {
118                 MarkJsObject(*it, stsVmIface);
119             }
120             ++it;
121         } while (it != end);
122     };
123 }
124 
125 /* static */
Create(PandaEtsVM * vm,ets_proxy::SharedReferenceStorage * storage,STSVMInterfaceImpl * stsVmIface)126 bool XGC::Create(PandaEtsVM *vm, ets_proxy::SharedReferenceStorage *storage, STSVMInterfaceImpl *stsVmIface)
127 {
128     if (instance_ != nullptr) {
129         return false;
130     }
131     if (stsVmIface == nullptr) {
132         // JS VM is not ArkJS.
133         // NOTE(audovichenko): remove this later
134         return true;
135     }
136     auto xobjHandler = CreateXObjectHandler(storage, stsVmIface);
137     auto allocator = Runtime::GetCurrent()->GetInternalAllocator();
138     auto *extentionGCData = allocator->New<mem::XGCExtensionData>(xobjHandler);
139     if (extentionGCData == nullptr) {
140         return false;
141     }
142     instance_ = allocator->New<XGC>(vm, stsVmIface, storage);
143     if (instance_ == nullptr) {
144         allocator->Delete(extentionGCData);
145         return false;
146     }
147     auto *gc = vm->GetGC();
148     // NOTE(audovichenko): Don't like to use extension data.
149     gc->SetExtensionData(extentionGCData);
150     gc->AddListener(instance_);
151     return true;
152 }
153 
154 /* static */
GetInstance()155 XGC *XGC::GetInstance()
156 {
157     ASSERT(instance_ != nullptr);
158     return instance_;
159 }
160 
161 /* static */
Destroy()162 bool XGC::Destroy()
163 {
164     if (instance_ == nullptr) {
165         return false;
166     }
167     auto *mainCoro = EtsCoroutine::GetCurrent();
168     ASSERT(mainCoro != nullptr);
169     ASSERT(mainCoro == mainCoro->GetCoroutineManager()->GetMainThread());
170     mainCoro->GetPandaVM()->GetGC()->RemoveListener(instance_);
171     auto allocator = Runtime::GetCurrent()->GetInternalAllocator();
172     allocator->Delete(instance_);
173     instance_ = nullptr;
174     return true;
175 }
176 
OnAttach(const InteropCtx * context)177 void XGC::OnAttach([[maybe_unused]] const InteropCtx *context) {}
178 
NotifyToFinishXGC()179 void XGC::NotifyToFinishXGC()
180 {
181     os::memory::LockHolder lh(finishXgcLock_);
182     // Atomic with relaxed order reason: data race with isXGcInProgress_ with no synchronization or ordering
183     // constraints imposed on other reads or writes
184     isXGcInProgress_.store(false, std::memory_order_relaxed);
185     finishXgcCV_.SignalAll();
186 }
187 
WaitForFinishXGC()188 void XGC::WaitForFinishXGC()
189 {
190     os::memory::LockHolder lh(finishXgcLock_);
191     // Atomic with relaxed order reason: data race with isXGcInProgress_ with no synchronization or ordering
192     // constraints imposed on other reads or writes
193     while (isXGcInProgress_.load(std::memory_order_relaxed)) {
194         finishXgcCV_.Wait(&finishXgcLock_);
195     }
196 }
197 
OnDetach(const InteropCtx * context)198 void XGC::OnDetach(const InteropCtx *context)
199 {
200     WaitForFinishXGC();
201     storage_->DeleteAllReferencesWithCtx(context);
202 }
203 
GCStarted(const GCTask & task,size_t heapSize)204 void XGC::GCStarted(const GCTask &task, [[maybe_unused]] size_t heapSize)
205 {
206     if (task.reason != GCTaskCause::CROSSREF_CAUSE) {
207         return;
208     }
209     XGCScope xgcStartScope("XGC Start", vm_);
210     storage_->NotifyXGCStarted();
211     vm_->RemoveRootProvider(storage_);
212     // Atomic with relaxed order reason: data race with isXGcInProgress_ with no synchronization or ordering
213     // constraints imposed on other reads or writes
214     isXGcInProgress_.store(true, std::memory_order_relaxed);
215     remarkFinished_ = false;
216     beforeGCStorageSize_ = storage_->Size();
217 }
218 
VerifySharedReferences(ets_proxy::XgcStatus status)219 void XGC::VerifySharedReferences(ets_proxy::XgcStatus status)
220 {
221     ets_proxy::SharedReferenceStorageVerifier::TraverseAllItems(storage_, status);
222 }
223 
GCFinished(const GCTask & task,size_t heapSizeBeforeGc,size_t heapSize)224 void XGC::GCFinished(const GCTask &task, [[maybe_unused]] size_t heapSizeBeforeGc, [[maybe_unused]] size_t heapSize)
225 {
226     if (task.reason != GCTaskCause::CROSSREF_CAUSE) {
227         return;
228     }
229     if (!remarkFinished_) {
230         // Remark was interrupted, so XGC did not Finish on remark phase and do it here
231         Finish();
232     }
233     // NOTE(ipetrov, XGC): if table will be cleared in concurrent, then compute the new size should not be based on
234     // the current storage size, need storage size without dead references
235     auto newTargetThreshold = this->ComputeNewSize();
236     LOG(DEBUG, GC_TRIGGER) << "XGC's new target threshold storage size = " << newTargetThreshold;
237     // Atomic with relaxed order reason: data race with targetThreasholdSize_ with no synchronization or ordering
238     // constraints imposed on other reads or writes
239     targetThreasholdSize_.store(newTargetThreshold, std::memory_order_relaxed);
240 }
241 
GCPhaseStarted(mem::GCPhase phase)242 void XGC::GCPhaseStarted(mem::GCPhase phase)
243 {
244     // Atomic with relaxed order reason: data race with isXGcInProgress_ with no synchronization or ordering
245     // constraints imposed on other reads or writes
246     if (!isXGcInProgress_.load(std::memory_order_relaxed)) {
247         return;
248     }
249     switch (phase) {
250         case mem::GCPhase::GC_PHASE_INITIAL_MARK: {
251             UnmarkAll();
252             {
253                 XGCScope xgcStartBarrierScope("StartXGCBarrier", vm_);
254                 stsVmIface_->StartXGCBarrier(nullptr);
255             }
256             break;
257         }
258         case mem::GCPhase::GC_PHASE_REMARK: {
259             {
260                 XGCScope xgcRemarkStartScope("RemarkStartBarrier", vm_);
261                 stsVmIface_->RemarkStartBarrier();
262             }
263             Remark();
264             break;
265         }
266         default: {
267             break;
268         }
269     }
270 }
271 
GCPhaseFinished(mem::GCPhase phase)272 void XGC::GCPhaseFinished(mem::GCPhase phase)
273 {
274     // Atomic with relaxed order reason: data race with isXGcInProgress_ with no synchronization or ordering
275     // constraints imposed on other reads or writes
276     if (!isXGcInProgress_.load(std::memory_order_relaxed)) {
277         return;
278     }
279     switch (phase) {
280         case mem::GCPhase::GC_PHASE_MARK: {
281             XGCScope xgcWaitForConcurrentMarkScope("WaitForConcurrentMark", vm_);
282             stsVmIface_->WaitForConcurrentMark(nullptr);
283             break;
284         }
285         case mem::GCPhase::GC_PHASE_REMARK: {
286             {
287                 XGCScope xgcRemarkFinishScope("WaitForRemark", vm_);
288                 stsVmIface_->WaitForRemark(nullptr);
289                 remarkFinished_ = true;
290             }
291             // All common phases with other JS GC threads are done and concurrent mark was not interrupted,
292             // so XGC may finish immediately after common Remark
293             Finish();
294             break;
295         }
296         default: {
297             break;
298         }
299     }
300 }
301 
MarkFromObject(void * data)302 void XGC::MarkFromObject([[maybe_unused]] void *data)
303 {
304     ASSERT(data != nullptr);
305 #if defined(PANDA_JS_ETS_HYBRID_MODE)
306     auto *nativeRef = static_cast<NativeReference *>(data);
307     auto *refRef = static_cast<ets_proxy::SharedReference **>(nativeRef->GetData());
308     // Atomic with acquire order reason: load visibility after shared reference initialization in mutator thread
309     auto *sharedRef = AtomicLoad(refRef, std::memory_order_acquire);
310     // Reference is not initialized yet, will be processed on Remark phase
311     if (sharedRef == nullptr) {
312         return;
313     }
314     LOG_XGC(DEBUG) << "MarkFromObject for " << sharedRef;
315     if (sharedRef->MarkIfNotMarked()) {
316         MarkEtsObject(sharedRef, vm_);
317     }
318 #endif  // PANDA_JS_ETS_HYBRID_MODE
319 }
320 
UnmarkAll()321 void XGC::UnmarkAll()
322 {
323     XGCScope xgcInitialMarkkScope("XGC UnmarkAll", vm_);
324     storage_->UnmarkAll();
325 }
326 
Remark()327 void XGC::Remark()
328 {
329     XGCScope remarkScope("SharedRefsRemark", vm_);
330     auto *ref = storage_->ExtractRefAllocatedDuringXGC();
331     while (ref != nullptr) {
332         if (ref->MarkIfNotMarked()) {
333             if (ref->HasJSFlag()) {
334                 MarkJsObject(ref, stsVmIface_);
335             }
336             if (ref->HasETSFlag()) {
337                 MarkEtsObject(ref, vm_);
338             }
339         }
340         ref = storage_->ExtractRefAllocatedDuringXGC();
341     }
342 }
343 
Sweep()344 void XGC::Sweep()
345 {
346     XGCScope xgcSweepScope("XGC Sweep", vm_);
347     storage_->SweepUnmarkedRefs();
348 }
349 
Finish()350 void XGC::Finish()
351 {
352     XGCScope xgcFinishScope("XGC Finish", vm_);
353     vm_->AddRootProvider(storage_);
354     if (remarkFinished_) {
355         // XGC was not interrupted
356         Sweep();
357     }
358     if (enableXgcVerifier_) {
359         VerifySharedReferences(ets_proxy::XgcStatus::XGC_FINISHED);
360     }
361     storage_->NotifyXGCFinished();
362     // Sweep should be done on common STW, so it's critical to have the barrier here
363     stsVmIface_->FinishXGCBarrier();
364     NotifyToFinishXGC();
365 }
366 
ComputeNewSize()367 size_t XGC::ComputeNewSize()
368 {
369     size_t currentStorageSize = storage_->Size();
370     size_t delta = (currentStorageSize / PERCENT_100_D) * increaseThresholdPercent_;
371 
372     // NOTE(ipetrov, 20146): maybe use an adaptive trigger?
373     if (beforeGCStorageSize_ > currentStorageSize) {
374         delta = std::max(delta, static_cast<size_t>((beforeGCStorageSize_ - currentStorageSize) *
375                                                     (increaseThresholdPercent_ / PERCENT_100_D)));
376     }
377     return std::min(std::max(currentStorageSize + delta, minimalThresholdSize_), storage_->MaxSize());
378 }
379 
Trigger(mem::GC * gc,PandaUniquePtr<GCTask> task)380 bool XGC::Trigger(mem::GC *gc, PandaUniquePtr<GCTask> task)
381 {
382     ASSERT_MANAGED_CODE();
383     LOG(DEBUG, GC_TRIGGER) << "Trigger XGC. Current storage size = " << storage_->Size();
384     // NOTE(ipetrov, #20146): Iterate over all contexts
385     auto *coro = EtsCoroutine::GetCurrent();
386     auto *ctx = InteropCtx::Current(coro);
387     ASSERT(ctx != nullptr);
388     // NOTE(audovichenko): Handle the situation when the function create several equal tasks
389     // NOTE(audovichenko): Handle the situation when GC is triggered in one VM but cannot be triggered in another VM.
390     if (!ctx->GetXGCVmAdaptor()->StartXRefMarking()) {
391         return false;
392     }
393     if (!gc->Trigger(std::move(task))) {
394         ctx->GetXGCVmAdaptor()->NotifyXGCInterruption();
395         stsVmIface_->NotifyWaiters();
396         return false;
397     }
398     return true;
399 }
400 
NeedToTriggerXGC(const mem::GC * gc) const401 ALWAYS_INLINE bool XGC::NeedToTriggerXGC([[maybe_unused]] const mem::GC *gc) const
402 {
403     switch (treiggerPolicy_) {
404         case TriggerPolicy::FORCE:
405             return true;
406         case TriggerPolicy::NEVER:
407             return false;
408         case TriggerPolicy::DEFAULT:
409             [[fallthrough]];
410         default:
411             break;
412     }
413     // Atomic with relaxed order reason: data race with isXGcInProgress_ with no synchronization or ordering
414     // constraints imposed on other reads or writes
415     if (isXGcInProgress_.load(std::memory_order_relaxed)) {
416         return false;
417     }
418     // Atomic with relaxed order reason: data race with targetThreasholdSize_ with no synchronization or ordering
419     // constraints imposed on other reads or writes
420     if (storage_->Size() < targetThreasholdSize_.load(std::memory_order_relaxed)) {
421         return false;
422     }
423 #if defined(PANDA_TARGET_OHOS)
424     // Don't trigger XGC in high sensitive case
425     if (AppStateManager::GetCurrent()->GetAppState().GetState() == AppState::State::SENSITIVE_START) {
426         return false;
427     }
428 #endif  // PANDA_TARGET_OHOS
429     return true;
430 }
431 
TriggerGcIfNeeded(mem::GC * gc)432 void XGC::TriggerGcIfNeeded(mem::GC *gc)
433 {
434     if (!NeedToTriggerXGC(gc)) {
435         return;
436     }
437     this->Trigger(gc, MakePandaUnique<GCTask>(GCTaskCause::CROSSREF_CAUSE, time::GetCurrentTimeInNanos()));
438 }
439 
440 }  // namespace ark::ets::interop::js
441