/* * Copyright (c) 2021-2024 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ECMASCRIPT_MEM_BARRIERS_INL_H #define ECMASCRIPT_MEM_BARRIERS_INL_H #include "ecmascript/base/config.h" #include "ecmascript/daemon/daemon_thread.h" #include "ecmascript/js_tagged_value.h" #include "ecmascript/js_thread.h" #include "ecmascript/mem/assert_scope.h" #include "ecmascript/mem/barriers.h" #include "ecmascript/mem/region-inl.h" #include "ecmascript/mem/heap.h" #include "ecmascript/ecma_vm.h" #include "ecmascript/tagged_array.h" namespace panda::ecmascript { template static ARK_INLINE void WriteBarrier(const JSThread *thread, void *obj, size_t offset, JSTaggedType value) { if (UNLIKELY(thread->IsEnableCMCGC())) { common::BaseRuntime::WriteBarrier(obj, reinterpret_cast(ToUintPtr(obj) + offset), reinterpret_cast(value)); // Ignore barrier for cmc gc allocation return; } // NOTE: The logic in WriteBarrier should be synced with CopyObject. // if any new feature/bugfix be added in WriteBarrier, it should also be added to CopyObject. ASSERT(value != JSTaggedValue::VALUE_UNDEFINED); Region *objectRegion = Region::ObjectAddressToRange(static_cast(obj)); Region *valueRegion = Region::ObjectAddressToRange(reinterpret_cast(value)); #if ECMASCRIPT_ENABLE_BARRIER_CHECK // During the AOT deserialization process, the address of hclass is set on the object first, but in reality, the // object layout of hclass has not been fully initialized, so this check needs to be skipped. if constexpr (writeType != WriteBarrierType::AOT_DESERIALIZE) { if (!thread->GetEcmaVM()->GetHeap()->IsAlive(JSTaggedValue(value).GetHeapObject())) { LOG_FULL(FATAL) << "WriteBarrier checked value:" << value << " is invalid!"; } } #endif uintptr_t slotAddr = ToUintPtr(obj) + offset; if (objectRegion->InGeneralOldSpace() && valueRegion->InYoungSpace()) { // Should align with '8' in 64 and 32 bit platform ASSERT((slotAddr % static_cast(MemAlignment::MEM_ALIGN_OBJECT)) == 0); objectRegion->InsertOldToNewRSet(slotAddr); } else if (!objectRegion->InSharedHeap() && valueRegion->InSharedSweepableSpace()) { #ifndef NDEBUG if (UNLIKELY(JSTaggedValue(value).IsWeakForHeapObject())) { CHECK_NO_LOCAL_TO_SHARE_WEAK_REF_HANDLE; } #endif objectRegion->InsertLocalToShareRSet(slotAddr); } ASSERT(!objectRegion->InSharedHeap() || valueRegion->InSharedHeap()); if (!valueRegion->InSharedHeap() && thread->IsConcurrentMarkingOrFinished()) { Barriers::Update(thread, slotAddr, objectRegion, reinterpret_cast(value), valueRegion, writeType); // NOTE: ConcurrentMarking and SharedConcurrentMarking can be enabled at the same time, but a specific value // can't be "not shared heap" and "in SharedSweepableSpace" at the same time. So using "if - else if" is safe. } else if (valueRegion->InSharedSweepableSpace() && thread->IsSharedConcurrentMarkingOrFinished()) { if constexpr (writeType != WriteBarrierType::DESERIALIZE) { Barriers::UpdateShared(thread, slotAddr, objectRegion, reinterpret_cast(value), valueRegion); } else { // In deserialize, will never add references from old object(not allocated by deserialing) to // new object(allocated by deserializing), only two kinds of references(new->old, new->new) will // be added, the old object is considered as serialize_root, and be marked and pushed in // SharedGC::MarkRoots, so just mark all the new object is enough, do not need to push them to // workmanager and recursively visit slots of that. ASSERT(DaemonThread::GetInstance()->IsConcurrentMarkingOrFinished()); if (valueRegion->InSCollectSet() && objectRegion->InSharedHeap()) { objectRegion->AtomicInsertCrossRegionRSet(slotAddr); } valueRegion->AtomicMark(JSTaggedValue(value).GetHeapObject()); } } } template inline void Barriers::SetObject(const JSThread *thread, void *obj, size_t offset, JSTaggedType value) { #ifndef ARK_USE_SATB_BARRIER // NOLINTNEXTLINE(clang-analyzer-core.NullDereference) *reinterpret_cast(reinterpret_cast(obj) + offset) = value; if constexpr (needWriteBarrier) { WriteBarrier(thread, obj, offset, value); } #else WriteBarrier(thread, obj, offset, value); *reinterpret_cast(reinterpret_cast(obj) + offset) = value; #endif } // explicit Instantiation to avoid compile optimization template void Barriers::SetObject(const JSThread*, void*, size_t, JSTaggedType); template void Barriers::SetObject(const JSThread*, void*, size_t, JSTaggedType); inline void Barriers::SynchronizedSetObject(const JSThread *thread, void *obj, size_t offset, JSTaggedType value, bool isPrimitive) { #ifndef ARK_USE_SATB_BARRIER reinterpret_cast *>(ToUintPtr(obj) + offset)->store(value, std::memory_order_release); if (!isPrimitive) { WriteBarrier(thread, obj, offset, value); } #else WriteBarrier(thread, obj, offset, value); reinterpret_cast *>(ToUintPtr(obj) + offset)->store(value, std::memory_order_release); #endif } template static inline void CopyBackward([[maybe_unused]]const JSThread *thread, JSTaggedValue* dst, const JSTaggedValue* src, size_t count) { if constexpr (needReadBarrier == false) { std::copy_backward(src, src + count, dst + count); return; } Barriers::CMCArrayCopyReadBarrierBackward(thread, dst, src, count); } template static inline void CopyForward([[maybe_unused]]const JSThread *thread, JSTaggedValue* dst, const JSTaggedValue* src, size_t count) { if constexpr (needReadBarrier == false) { std::copy_n(src, count, dst); return; } Barriers::CMCArrayCopyReadBarrierForward(thread, dst, src, count); } template ARK_NOINLINE bool BatchBitSet(const JSThread* thread, Region* objectRegion, JSTaggedValue* dst, size_t count); // to remove parameter dstObj, maybe thread also template void Barriers::CopyObject(const JSThread *thread, const TaggedObject *dstObj, JSTaggedValue *dstAddr, const JSTaggedValue *srcAddr, size_t count) { // NOTE: The logic in CopyObject should be synced with WriteBarrier. // if any new feature/bugfix be added in CopyObject, it should also be added to WriteBarrier. // step 1. copy from src to dst directly. if (thread->NeedReadBarrier()) { CopyObjectPrimitive(thread, dstAddr, srcAddr, count); } else { CopyObjectPrimitive(thread, dstAddr, srcAddr, count); } if constexpr (!needWriteBarrier) { return; } if (UNLIKELY(thread->IsEnableCMCGC())) { Barriers::CMCArrayCopyWriteBarrier(thread, dstObj, (void*)srcAddr, (void*)dstAddr, count); return; } // step 2. According to object region, update the corresponding bit set batch. Region* objectRegion = Region::ObjectAddressToRange(ToUintPtr(dstObj)); if (!objectRegion->InSharedHeap()) { bool allValueNotHeap = false; if (objectRegion->InYoungSpace()) { allValueNotHeap = BatchBitSet(thread, objectRegion, dstAddr, count); } else if (objectRegion->InGeneralOldSpace()) { allValueNotHeap = BatchBitSet(thread, objectRegion, dstAddr, count); } else { allValueNotHeap = BatchBitSet(thread, objectRegion, dstAddr, count); } if (allValueNotHeap) { return; } } // step 3. According to marking status, update the barriers. const bool marking = thread->IsConcurrentMarkingOrFinished(); const bool sharedMarking = thread->IsSharedConcurrentMarkingOrFinished(); if (!marking && !sharedMarking) { return; } for (uint32_t i = 0; i < count; i++) { JSTaggedValue taggedValue = *(dstAddr + i); if (!taggedValue.IsHeapObject()) { continue; } Region* valueRegion = Region::ObjectAddressToRange(taggedValue.GetTaggedObject()); ASSERT(!objectRegion->InSharedHeap() || valueRegion->InSharedHeap()); if (marking && !valueRegion->InSharedHeap()) { const uintptr_t slotAddr = ToUintPtr(dstAddr) + JSTaggedValue::TaggedTypeSize() * i; Barriers::Update(thread, slotAddr, objectRegion, taggedValue.GetTaggedObject(), valueRegion); // NOTE: ConcurrentMarking and SharedConcurrentMarking can be enabled at the same time, but a specific // value can't be "not shared heap" and "in SharedSweepableSpace" at the same time. So using "if - else if" // is safe. } else if (sharedMarking && valueRegion->InSharedSweepableSpace()) { const uintptr_t slotAddr = ToUintPtr(dstAddr) + JSTaggedValue::TaggedTypeSize() * i; Barriers::UpdateShared(thread, slotAddr, objectRegion, taggedValue.GetTaggedObject(), valueRegion); } } } template inline void Barriers::CopyObjectPrimitive(const JSThread *thread, JSTaggedValue* dst, const JSTaggedValue* src, size_t count) { // Copy Primitive value don't need thread. ASSERT((ToUintPtr(dst) % static_cast(MemAlignment::MEM_ALIGN_OBJECT)) == 0); if constexpr (maybeOverlap == false) { CopyForward(thread, dst, src, count); return; } if (dst > src && dst < src + count) { CopyBackward(thread, dst, src, count); } else { CopyForward(thread, dst, src, count); } } } // namespace panda::ecmascript #endif // ECMASCRIPT_MEM_BARRIERS_INL_H