1 /*
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #ifndef ECMASCRIPT_MEM_BARRIERS_INL_H
17 #define ECMASCRIPT_MEM_BARRIERS_INL_H
18
19 #include "ecmascript/base/config.h"
20 #include "ecmascript/daemon/daemon_thread.h"
21 #include "ecmascript/js_tagged_value.h"
22 #include "ecmascript/js_thread.h"
23 #include "ecmascript/mem/assert_scope.h"
24 #include "ecmascript/mem/barriers.h"
25 #include "ecmascript/mem/region-inl.h"
26 #include "ecmascript/mem/heap.h"
27 #include "ecmascript/ecma_vm.h"
28 #include "ecmascript/tagged_array.h"
29
30 namespace panda::ecmascript {
31 template<WriteBarrierType writeType = WriteBarrierType::NORMAL>
WriteBarrier(const JSThread * thread,void * obj,size_t offset,JSTaggedType value)32 static ARK_INLINE void WriteBarrier(const JSThread *thread, void *obj, size_t offset, JSTaggedType value)
33 {
34 if (UNLIKELY(thread->IsEnableCMCGC())) {
35 common::BaseRuntime::WriteBarrier(obj, reinterpret_cast<void*>(ToUintPtr(obj) + offset),
36 reinterpret_cast<void*>(value));
37 // Ignore barrier for cmc gc allocation
38 return;
39 }
40
41 // NOTE: The logic in WriteBarrier should be synced with CopyObject.
42 // if any new feature/bugfix be added in WriteBarrier, it should also be added to CopyObject.
43 ASSERT(value != JSTaggedValue::VALUE_UNDEFINED);
44 Region *objectRegion = Region::ObjectAddressToRange(static_cast<TaggedObject *>(obj));
45 Region *valueRegion = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(value));
46 #if ECMASCRIPT_ENABLE_BARRIER_CHECK
47 // During the AOT deserialization process, the address of hclass is set on the object first, but in reality, the
48 // object layout of hclass has not been fully initialized, so this check needs to be skipped.
49 if constexpr (writeType != WriteBarrierType::AOT_DESERIALIZE) {
50 if (!thread->GetEcmaVM()->GetHeap()->IsAlive(JSTaggedValue(value).GetHeapObject())) {
51 LOG_FULL(FATAL) << "WriteBarrier checked value:" << value << " is invalid!";
52 }
53 }
54 #endif
55 uintptr_t slotAddr = ToUintPtr(obj) + offset;
56 if (objectRegion->InGeneralOldSpace() && valueRegion->InYoungSpace()) {
57 // Should align with '8' in 64 and 32 bit platform
58 ASSERT((slotAddr % static_cast<uint8_t>(MemAlignment::MEM_ALIGN_OBJECT)) == 0);
59 objectRegion->InsertOldToNewRSet(slotAddr);
60 } else if (!objectRegion->InSharedHeap() && valueRegion->InSharedSweepableSpace()) {
61 #ifndef NDEBUG
62 if (UNLIKELY(JSTaggedValue(value).IsWeakForHeapObject())) {
63 CHECK_NO_LOCAL_TO_SHARE_WEAK_REF_HANDLE;
64 }
65 #endif
66 objectRegion->InsertLocalToShareRSet(slotAddr);
67 }
68 ASSERT(!objectRegion->InSharedHeap() || valueRegion->InSharedHeap());
69 if (!valueRegion->InSharedHeap() && thread->IsConcurrentMarkingOrFinished()) {
70 Barriers::Update(thread, slotAddr, objectRegion, reinterpret_cast<TaggedObject *>(value),
71 valueRegion, writeType);
72 // NOTE: ConcurrentMarking and SharedConcurrentMarking can be enabled at the same time, but a specific value
73 // can't be "not shared heap" and "in SharedSweepableSpace" at the same time. So using "if - else if" is safe.
74 } else if (valueRegion->InSharedSweepableSpace() && thread->IsSharedConcurrentMarkingOrFinished()) {
75 if constexpr (writeType != WriteBarrierType::DESERIALIZE) {
76 Barriers::UpdateShared(thread, slotAddr, objectRegion, reinterpret_cast<TaggedObject *>(value),
77 valueRegion);
78 } else {
79 // In deserialize, will never add references from old object(not allocated by deserialing) to
80 // new object(allocated by deserializing), only two kinds of references(new->old, new->new) will
81 // be added, the old object is considered as serialize_root, and be marked and pushed in
82 // SharedGC::MarkRoots, so just mark all the new object is enough, do not need to push them to
83 // workmanager and recursively visit slots of that.
84 ASSERT(DaemonThread::GetInstance()->IsConcurrentMarkingOrFinished());
85 if (valueRegion->InSCollectSet() && objectRegion->InSharedHeap()) {
86 objectRegion->AtomicInsertCrossRegionRSet(slotAddr);
87 }
88 valueRegion->AtomicMark(JSTaggedValue(value).GetHeapObject());
89 }
90 }
91 }
92
93 template<bool needWriteBarrier>
SetObject(const JSThread * thread,void * obj,size_t offset,JSTaggedType value)94 inline void Barriers::SetObject(const JSThread *thread, void *obj, size_t offset, JSTaggedType value)
95 {
96 #ifndef ARK_USE_SATB_BARRIER
97 // NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
98 *reinterpret_cast<JSTaggedType *>(reinterpret_cast<uintptr_t>(obj) + offset) = value;
99 if constexpr (needWriteBarrier) {
100 WriteBarrier(thread, obj, offset, value);
101 }
102 #else
103 WriteBarrier(thread, obj, offset, value);
104 *reinterpret_cast<JSTaggedType *>(reinterpret_cast<uintptr_t>(obj) + offset) = value;
105 #endif
106 }
107
108 // explicit Instantiation to avoid compile optimization
109 template void Barriers::SetObject<true>(const JSThread*, void*, size_t, JSTaggedType);
110 template void Barriers::SetObject<false>(const JSThread*, void*, size_t, JSTaggedType);
111
SynchronizedSetObject(const JSThread * thread,void * obj,size_t offset,JSTaggedType value,bool isPrimitive)112 inline void Barriers::SynchronizedSetObject(const JSThread *thread, void *obj, size_t offset, JSTaggedType value,
113 bool isPrimitive)
114 {
115 #ifndef ARK_USE_SATB_BARRIER
116 reinterpret_cast<volatile std::atomic<JSTaggedType> *>(ToUintPtr(obj) + offset)->store(value,
117 std::memory_order_release);
118 if (!isPrimitive) {
119 WriteBarrier(thread, obj, offset, value);
120 }
121 #else
122 WriteBarrier(thread, obj, offset, value);
123 reinterpret_cast<volatile std::atomic<JSTaggedType> *>(ToUintPtr(obj) + offset)->store(value,
124 std::memory_order_release);
125 #endif
126 }
127
128 template <bool needReadBarrier>
CopyBackward(const JSThread * thread,JSTaggedValue * dst,const JSTaggedValue * src,size_t count)129 static inline void CopyBackward([[maybe_unused]]const JSThread *thread, JSTaggedValue* dst, const JSTaggedValue* src,
130 size_t count)
131 {
132 if constexpr (needReadBarrier == false) {
133 std::copy_backward(src, src + count, dst + count);
134 return;
135 }
136
137 Barriers::CMCArrayCopyReadBarrierBackward(thread, dst, src, count);
138 }
139
140 template <bool needReadBarrier>
CopyForward(const JSThread * thread,JSTaggedValue * dst,const JSTaggedValue * src,size_t count)141 static inline void CopyForward([[maybe_unused]]const JSThread *thread, JSTaggedValue* dst, const JSTaggedValue* src,
142 size_t count)
143 {
144 if constexpr (needReadBarrier == false) {
145 std::copy_n(src, count, dst);
146 return;
147 }
148
149 Barriers::CMCArrayCopyReadBarrierForward(thread, dst, src, count);
150 }
151
152 template <Region::RegionSpaceKind kind>
153 ARK_NOINLINE bool BatchBitSet(const JSThread* thread, Region* objectRegion, JSTaggedValue* dst, size_t count);
154
155 // to remove parameter dstObj, maybe thread also
156 template <bool needWriteBarrier, bool maybeOverlap>
CopyObject(const JSThread * thread,const TaggedObject * dstObj,JSTaggedValue * dstAddr,const JSTaggedValue * srcAddr,size_t count)157 void Barriers::CopyObject(const JSThread *thread, const TaggedObject *dstObj, JSTaggedValue *dstAddr,
158 const JSTaggedValue *srcAddr, size_t count)
159 {
160 // NOTE: The logic in CopyObject should be synced with WriteBarrier.
161 // if any new feature/bugfix be added in CopyObject, it should also be added to WriteBarrier.
162
163 // step 1. copy from src to dst directly.
164 if (thread->NeedReadBarrier()) {
165 CopyObjectPrimitive<true, maybeOverlap>(thread, dstAddr, srcAddr, count);
166 } else {
167 CopyObjectPrimitive<false, maybeOverlap>(thread, dstAddr, srcAddr, count);
168 }
169
170 if constexpr (!needWriteBarrier) {
171 return;
172 }
173
174 if (UNLIKELY(thread->IsEnableCMCGC())) {
175 Barriers::CMCArrayCopyWriteBarrier(thread, dstObj, (void*)srcAddr, (void*)dstAddr, count);
176 return;
177 }
178
179 // step 2. According to object region, update the corresponding bit set batch.
180 Region* objectRegion = Region::ObjectAddressToRange(ToUintPtr(dstObj));
181 if (!objectRegion->InSharedHeap()) {
182 bool allValueNotHeap = false;
183 if (objectRegion->InYoungSpace()) {
184 allValueNotHeap = BatchBitSet<Region::InYoung>(thread, objectRegion, dstAddr, count);
185 } else if (objectRegion->InGeneralOldSpace()) {
186 allValueNotHeap = BatchBitSet<Region::InGeneralOld>(thread, objectRegion, dstAddr, count);
187 } else {
188 allValueNotHeap = BatchBitSet<Region::Other>(thread, objectRegion, dstAddr, count);
189 }
190 if (allValueNotHeap) {
191 return;
192 }
193 }
194 // step 3. According to marking status, update the barriers.
195 const bool marking = thread->IsConcurrentMarkingOrFinished();
196 const bool sharedMarking = thread->IsSharedConcurrentMarkingOrFinished();
197 if (!marking && !sharedMarking) {
198 return;
199 }
200 for (uint32_t i = 0; i < count; i++) {
201 JSTaggedValue taggedValue = *(dstAddr + i);
202 if (!taggedValue.IsHeapObject()) {
203 continue;
204 }
205 Region* valueRegion = Region::ObjectAddressToRange(taggedValue.GetTaggedObject());
206 ASSERT(!objectRegion->InSharedHeap() || valueRegion->InSharedHeap());
207 if (marking && !valueRegion->InSharedHeap()) {
208 const uintptr_t slotAddr = ToUintPtr(dstAddr) + JSTaggedValue::TaggedTypeSize() * i;
209 Barriers::Update(thread, slotAddr, objectRegion, taggedValue.GetTaggedObject(), valueRegion);
210 // NOTE: ConcurrentMarking and SharedConcurrentMarking can be enabled at the same time, but a specific
211 // value can't be "not shared heap" and "in SharedSweepableSpace" at the same time. So using "if - else if"
212 // is safe.
213 } else if (sharedMarking && valueRegion->InSharedSweepableSpace()) {
214 const uintptr_t slotAddr = ToUintPtr(dstAddr) + JSTaggedValue::TaggedTypeSize() * i;
215 Barriers::UpdateShared(thread, slotAddr, objectRegion, taggedValue.GetTaggedObject(), valueRegion);
216 }
217 }
218 }
219
220 template <bool needReadBarrier, bool maybeOverlap>
CopyObjectPrimitive(const JSThread * thread,JSTaggedValue * dst,const JSTaggedValue * src,size_t count)221 inline void Barriers::CopyObjectPrimitive(const JSThread *thread, JSTaggedValue* dst, const JSTaggedValue* src,
222 size_t count)
223 {
224 // Copy Primitive value don't need thread.
225 ASSERT((ToUintPtr(dst) % static_cast<uint8_t>(MemAlignment::MEM_ALIGN_OBJECT)) == 0);
226 if constexpr (maybeOverlap == false) {
227 CopyForward<needReadBarrier>(thread, dst, src, count);
228 return;
229 }
230 if (dst > src && dst < src + count) {
231 CopyBackward<needReadBarrier>(thread, dst, src, count);
232 } else {
233 CopyForward<needReadBarrier>(thread, dst, src, count);
234 }
235 }
236 } // namespace panda::ecmascript
237
238 #endif // ECMASCRIPT_MEM_BARRIERS_INL_H
239