• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef ECMASCRIPT_MEM_BARRIERS_INL_H
17 #define ECMASCRIPT_MEM_BARRIERS_INL_H
18 
19 #include "ecmascript/base/config.h"
20 #include "ecmascript/daemon/daemon_thread.h"
21 #include "ecmascript/js_tagged_value.h"
22 #include "ecmascript/js_thread.h"
23 #include "ecmascript/mem/assert_scope.h"
24 #include "ecmascript/mem/barriers.h"
25 #include "ecmascript/mem/region-inl.h"
26 #include "ecmascript/mem/heap.h"
27 #include "ecmascript/ecma_vm.h"
28 
29 namespace panda::ecmascript {
30 template<WriteBarrierType writeType = WriteBarrierType::NORMAL>
WriteBarrier(const JSThread * thread,void * obj,size_t offset,JSTaggedType value)31 static ARK_INLINE void WriteBarrier(const JSThread *thread, void *obj, size_t offset, JSTaggedType value)
32 {
33     // NOTE: The logic in WriteBarrier should be synced with CopyObject.
34     // if any new feature/bugfix be added in WriteBarrier, it should also be added to CopyObject.
35     ASSERT(value != JSTaggedValue::VALUE_UNDEFINED);
36     Region *objectRegion = Region::ObjectAddressToRange(static_cast<TaggedObject *>(obj));
37     Region *valueRegion = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(value));
38 #if ECMASCRIPT_ENABLE_BARRIER_CHECK
39     // During the AOT deserialization process, the address of hclass is set on the object first, but in reality, the
40     // object layout of hclass has not been fully initialized, so this check needs to be skipped.
41     if constexpr (writeType != WriteBarrierType::AOT_DESERIALIZE) {
42         if (!thread->GetEcmaVM()->GetHeap()->IsAlive(JSTaggedValue(value).GetHeapObject())) {
43             LOG_FULL(FATAL) << "WriteBarrier checked value:" << value << " is invalid!";
44         }
45     }
46 #endif
47     uintptr_t slotAddr = ToUintPtr(obj) + offset;
48     if (objectRegion->InGeneralOldSpace() && valueRegion->InYoungSpace()) {
49         // Should align with '8' in 64 and 32 bit platform
50         ASSERT((slotAddr % static_cast<uint8_t>(MemAlignment::MEM_ALIGN_OBJECT)) == 0);
51         objectRegion->InsertOldToNewRSet(slotAddr);
52     } else if (!objectRegion->InSharedHeap() && valueRegion->InSharedSweepableSpace()) {
53 #ifndef NDEBUG
54         if (UNLIKELY(JSTaggedValue(value).IsWeakForHeapObject())) {
55             CHECK_NO_LOCAL_TO_SHARE_WEAK_REF_HANDLE;
56         }
57 #endif
58         objectRegion->InsertLocalToShareRSet(slotAddr);
59     }
60     ASSERT(!objectRegion->InSharedHeap() || valueRegion->InSharedHeap());
61     if (!valueRegion->InSharedHeap() && thread->IsConcurrentMarkingOrFinished()) {
62         Barriers::Update(thread, slotAddr, objectRegion, reinterpret_cast<TaggedObject *>(value),
63                          valueRegion, writeType);
64         // NOTE: ConcurrentMarking and SharedConcurrentMarking can be enabled at the same time, but a specific value
65         // can't be "not shared heap" and "in SharedSweepableSpace" at the same time. So using "if - else if" is safe.
66     } else if (valueRegion->InSharedSweepableSpace() && thread->IsSharedConcurrentMarkingOrFinished()) {
67         if constexpr (writeType != WriteBarrierType::DESERIALIZE) {
68             Barriers::UpdateShared(thread, slotAddr, objectRegion, reinterpret_cast<TaggedObject *>(value),
69                                    valueRegion);
70         } else {
71             // In deserialize, will never add references from old object(not allocated by deserialing) to
72             // new object(allocated by deserializing), only two kinds of references(new->old, new->new) will
73             // be added, the old object is considered as serialize_root, and be marked and pushed in
74             // SharedGC::MarkRoots, so just mark all the new object is enough, do not need to push them to
75             // workmanager and recursively visit slots of that.
76             ASSERT(DaemonThread::GetInstance()->IsConcurrentMarkingOrFinished());
77             if (valueRegion->InSCollectSet() && objectRegion->InSharedHeap()) {
78                 objectRegion->AtomicInsertCrossRegionRSet(slotAddr);
79             }
80             valueRegion->AtomicMark(JSTaggedValue(value).GetHeapObject());
81         }
82     }
83 }
84 
85 template<bool needWriteBarrier>
SetObject(const JSThread * thread,void * obj,size_t offset,JSTaggedType value)86 inline void Barriers::SetObject(const JSThread *thread, void *obj, size_t offset, JSTaggedType value)
87 {
88     // NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
89     *reinterpret_cast<JSTaggedType *>(reinterpret_cast<uintptr_t>(obj) + offset) = value;
90     if constexpr (needWriteBarrier) {
91         WriteBarrier(thread, obj, offset, value);
92     }
93 }
94 
SynchronizedSetClass(const JSThread * thread,void * obj,JSTaggedType value)95 inline void Barriers::SynchronizedSetClass(const JSThread *thread, void *obj, JSTaggedType value)
96 {
97     reinterpret_cast<volatile std::atomic<JSTaggedType> *>(obj)->store(value, std::memory_order_release);
98     WriteBarrier(thread, obj, 0, value);
99 }
100 
SynchronizedSetObject(const JSThread * thread,void * obj,size_t offset,JSTaggedType value,bool isPrimitive)101 inline void Barriers::SynchronizedSetObject(const JSThread *thread, void *obj, size_t offset, JSTaggedType value,
102                                             bool isPrimitive)
103 {
104     reinterpret_cast<volatile std::atomic<JSTaggedType> *>(ToUintPtr(obj) + offset)->store(value,
105         std::memory_order_release);
106     if (!isPrimitive) {
107         WriteBarrier(thread, obj, offset, value);
108     }
109 }
110 
CopyMaybeOverlap(JSTaggedValue * dst,const JSTaggedValue * src,size_t count)111 static inline void CopyMaybeOverlap(JSTaggedValue* dst, const JSTaggedValue* src, size_t count)
112 {
113     if (dst > src && dst < src + count) {
114         std::copy_backward(src, src + count, dst + count);
115     } else {
116         std::copy_n(src, count, dst);
117     }
118 }
119 
CopyNoOverlap(JSTaggedValue * __restrict__ dst,const JSTaggedValue * __restrict__ src,size_t count)120 static inline void CopyNoOverlap(JSTaggedValue* __restrict__ dst, const JSTaggedValue* __restrict__ src, size_t count)
121 {
122     std::copy_n(src, count, dst);
123 }
124 
125 template <Region::RegionSpaceKind kind>
126 ARK_NOINLINE bool BatchBitSet(const JSThread* thread, Region* objectRegion, JSTaggedValue* dst, size_t count);
127 
128 template <bool needWriteBarrier, bool maybeOverlap>
CopyObject(const JSThread * thread,const TaggedObject * dstObj,JSTaggedValue * dstAddr,const JSTaggedValue * srcAddr,size_t count)129 void Barriers::CopyObject(const JSThread *thread, const TaggedObject *dstObj, JSTaggedValue *dstAddr,
130                           const JSTaggedValue *srcAddr, size_t count)
131 {
132     // NOTE: The logic in CopyObject should be synced with WriteBarrier.
133     // if any new feature/bugfix be added in CopyObject, it should also be added to WriteBarrier.
134 
135     // step 1. copy from src to dst directly.
136     CopyObjectPrimitive<maybeOverlap>(dstAddr, srcAddr, count);
137     if constexpr (!needWriteBarrier) {
138         return;
139     }
140     // step 2. According to object region, update the corresponding bit set batch.
141     Region* objectRegion = Region::ObjectAddressToRange(ToUintPtr(dstObj));
142     if (!objectRegion->InSharedHeap()) {
143         bool allValueNotHeap = false;
144         if (objectRegion->InYoungSpace()) {
145             allValueNotHeap = BatchBitSet<Region::InYoung>(thread, objectRegion, dstAddr, count);
146         } else if (objectRegion->InGeneralOldSpace()) {
147             allValueNotHeap = BatchBitSet<Region::InGeneralOld>(thread, objectRegion, dstAddr, count);
148         } else {
149             allValueNotHeap = BatchBitSet<Region::Other>(thread, objectRegion, dstAddr, count);
150         }
151         if (allValueNotHeap) {
152             return;
153         }
154     }
155     // step 3. According to marking status, update the barriers.
156     const bool marking = thread->IsConcurrentMarkingOrFinished();
157     const bool sharedMarking = thread->IsSharedConcurrentMarkingOrFinished();
158     if (!marking && !sharedMarking) {
159         return;
160     }
161     for (uint32_t i = 0; i < count; i++) {
162         JSTaggedValue taggedValue = *(dstAddr + i);
163         if (!taggedValue.IsHeapObject()) {
164             continue;
165         }
166         Region* valueRegion = Region::ObjectAddressToRange(taggedValue.GetTaggedObject());
167         ASSERT(!objectRegion->InSharedHeap() || valueRegion->InSharedHeap());
168         if (marking && !valueRegion->InSharedHeap()) {
169             const uintptr_t slotAddr = ToUintPtr(dstAddr) + JSTaggedValue::TaggedTypeSize() * i;
170             Barriers::Update(thread, slotAddr, objectRegion, taggedValue.GetTaggedObject(), valueRegion);
171             // NOTE: ConcurrentMarking and SharedConcurrentMarking can be enabled at the same time, but a specific
172             // value can't be "not shared heap" and "in SharedSweepableSpace" at the same time. So using "if - else if"
173             // is safe.
174         } else if (sharedMarking && valueRegion->InSharedSweepableSpace()) {
175             const uintptr_t slotAddr = ToUintPtr(dstAddr) + JSTaggedValue::TaggedTypeSize() * i;
176             Barriers::UpdateShared(thread, slotAddr, objectRegion, taggedValue.GetTaggedObject(), valueRegion);
177         }
178     }
179 }
180 
181 template <bool maybeOverlap>
CopyObjectPrimitive(JSTaggedValue * dst,const JSTaggedValue * src,size_t count)182 inline void Barriers::CopyObjectPrimitive(JSTaggedValue* dst, const JSTaggedValue* src, size_t count)
183 {
184     // Copy Primitive value don't need thread.
185     ASSERT((ToUintPtr(dst) % static_cast<uint8_t>(MemAlignment::MEM_ALIGN_OBJECT)) == 0);
186     if constexpr (maybeOverlap) {
187         CopyMaybeOverlap(dst, src, count);
188     } else {
189         CopyNoOverlap(dst, src, count);
190     }
191 }
192 } // namespace panda::ecmascript
193 
194 #endif  // ECMASCRIPT_MEM_BARRIERS_INL_H
195