• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_OBJECT_ACCESSOR_INL_H_
16 #define PANDA_RUNTIME_OBJECT_ACCESSOR_INL_H_
17 
18 #include <securec.h>
19 
20 #include "libpandabase/mem/mem.h"
21 #include "runtime/include/class.h"
22 #include "runtime/include/field.h"
23 #include "runtime/include/object_accessor.h"
24 #include "runtime/mem/gc/gc_barrier_set.h"
25 
26 namespace ark {
27 
28 /* static */
29 template <bool IS_VOLATILE /* = false */, bool NEED_READ_BARRIER /* = true */, bool IS_DYN /* = false */>
GetObject(const void * obj,size_t offset)30 inline ObjectHeader *ObjectAccessor::GetObject(const void *obj, size_t offset)
31 {
32     // We don't have GC with read barriers now
33     if (!IS_DYN) {
34         return reinterpret_cast<ObjectHeader *>(Get<ObjectPointerType, IS_VOLATILE>(obj, offset));
35     }
36     return Get<ObjectHeader *, IS_VOLATILE>(obj, offset);
37 }
38 
39 /* static */
40 template <bool IS_VOLATILE /* = false */, bool NEED_WRITE_BARRIER /* = true */, bool IS_DYN /* = false */>
SetObject(void * obj,size_t offset,ObjectHeader * value)41 inline void ObjectAccessor::SetObject(void *obj, size_t offset, ObjectHeader *value)
42 {
43     if (NEED_WRITE_BARRIER) {
44         auto *barrierSet = GetBarrierSet();
45 
46         if (barrierSet->IsPreBarrierEnabled()) {
47             ObjectHeader *preVal = GetObject<IS_VOLATILE, false, IS_DYN>(obj, offset);
48             barrierSet->PreBarrier(preVal);
49         }
50 
51         if (!IS_DYN) {
52             Set<ObjectPointerType, IS_VOLATILE>(obj, offset, ToObjPtrType(value));
53         } else {
54             Set<ObjectHeader *, IS_VOLATILE>(obj, offset, value);
55         }
56         auto gcPostBarrierType = barrierSet->GetPostType();
57         if (!mem::IsEmptyBarrier(gcPostBarrierType)) {
58             barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), offset, value);
59         }
60     } else {
61         if (!IS_DYN) {
62             Set<ObjectPointerType, IS_VOLATILE>(obj, offset, ToObjPtrType(value));
63         } else {
64             Set<ObjectHeader *, IS_VOLATILE>(obj, offset, value);
65         }
66     }
67 }
68 
69 /* static */
70 template <bool IS_VOLATILE /* = false */, bool NEED_READ_BARRIER /* = true */, bool IS_DYN /* = false */>
GetObject(const ManagedThread * thread,const void * obj,size_t offset)71 inline ObjectHeader *ObjectAccessor::GetObject([[maybe_unused]] const ManagedThread *thread, const void *obj,
72                                                size_t offset)
73 {
74     // We don't have GC with read barriers now
75     if (!IS_DYN) {
76         return reinterpret_cast<ObjectHeader *>(Get<ObjectPointerType, IS_VOLATILE>(obj, offset));
77     }
78     return Get<ObjectHeader *, IS_VOLATILE>(obj, offset);
79 }
80 
81 /* static */
82 template <bool IS_VOLATILE /* = false */, bool NEED_WRITE_BARRIER /* = true */, bool IS_DYN /* = false */>
SetObject(const ManagedThread * thread,void * obj,size_t offset,ObjectHeader * value)83 inline void ObjectAccessor::SetObject(const ManagedThread *thread, void *obj, size_t offset, ObjectHeader *value)
84 {
85     if (NEED_WRITE_BARRIER) {
86         auto *barrierSet = GetBarrierSet(thread);
87         if (barrierSet->IsPreBarrierEnabled()) {
88             ObjectHeader *preVal = GetObject<IS_VOLATILE, IS_DYN>(obj, offset);
89             barrierSet->PreBarrier(preVal);
90         }
91 
92         if (!IS_DYN) {
93             Set<ObjectPointerType, IS_VOLATILE>(obj, offset, ToObjPtrType(value));
94         } else {
95             Set<ObjectHeader *, IS_VOLATILE>(obj, offset, value);
96         }
97         if (!mem::IsEmptyBarrier(barrierSet->GetPostType())) {
98             barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), offset, value);
99         }
100     } else {
101         if (!IS_DYN) {
102             Set<ObjectPointerType, IS_VOLATILE>(obj, offset, ToObjPtrType(value));
103         } else {
104             Set<ObjectHeader *, IS_VOLATILE>(obj, offset, value);
105         }
106     }
107 }
108 
109 /* static */
110 template <class T>
GetFieldPrimitive(const void * obj,const Field & field)111 inline T ObjectAccessor::GetFieldPrimitive(const void *obj, const Field &field)
112 {
113     if (UNLIKELY(field.IsVolatile())) {
114         return GetPrimitive<T, true>(obj, field.GetOffset());
115     }
116     return GetPrimitive<T, false>(obj, field.GetOffset());
117 }
118 
119 /* static */
120 template <class T>
SetFieldPrimitive(void * obj,const Field & field,T value)121 inline void ObjectAccessor::SetFieldPrimitive(void *obj, const Field &field, T value)
122 {
123     if (UNLIKELY(field.IsVolatile())) {
124         SetPrimitive<T, true>(obj, field.GetOffset(), value);
125     } else {
126         SetPrimitive<T, false>(obj, field.GetOffset(), value);
127     }
128 }
129 
130 /* static */
131 // NEED_READ_BARRIER = true , IS_DYN = false
132 template <bool NEED_READ_BARRIER, bool IS_DYN>
GetFieldObject(const void * obj,const Field & field)133 inline ObjectHeader *ObjectAccessor::GetFieldObject(const void *obj, const Field &field)
134 {
135     if (UNLIKELY(field.IsVolatile())) {
136         return GetObject<true, NEED_READ_BARRIER, IS_DYN>(obj, field.GetOffset());
137     }
138     return GetObject<false, NEED_READ_BARRIER, IS_DYN>(obj, field.GetOffset());
139 }
140 
141 /* static */
142 // NEED_READ_BARRIER = true , IS_DYN = false
143 template <bool NEED_WRITE_BARRIER, bool IS_DYN>
SetFieldObject(void * obj,const Field & field,ObjectHeader * value)144 inline void ObjectAccessor::SetFieldObject(void *obj, const Field &field, ObjectHeader *value)
145 {
146     ASSERT(IsAddressInObjectsHeapOrNull(value));
147     if (UNLIKELY(field.IsVolatile())) {
148         SetObject<true, NEED_WRITE_BARRIER, IS_DYN>(obj, field.GetOffset(), value);
149     } else {
150         SetObject<false, NEED_WRITE_BARRIER, IS_DYN>(obj, field.GetOffset(), value);
151     }
152 }
153 
154 /* static */
155 // NEED_READ_BARRIER = true , IS_DYN = false
156 template <bool NEED_READ_BARRIER, bool IS_DYN>
GetFieldObject(const ManagedThread * thread,const void * obj,const Field & field)157 inline ObjectHeader *ObjectAccessor::GetFieldObject(const ManagedThread *thread, const void *obj, const Field &field)
158 {
159     if (UNLIKELY(field.IsVolatile())) {
160         return GetObject<true, NEED_READ_BARRIER, IS_DYN>(thread, obj, field.GetOffset());
161     }
162     return GetObject<false, NEED_READ_BARRIER, IS_DYN>(thread, obj, field.GetOffset());
163 }
164 
165 /* static */
166 // NEED_READ_BARRIER = true , IS_DYN = false
167 template <bool NEED_WRITE_BARRIER, bool IS_DYN>
SetFieldObject(const ManagedThread * thread,void * obj,const Field & field,ObjectHeader * value)168 inline void ObjectAccessor::SetFieldObject(const ManagedThread *thread, void *obj, const Field &field,
169                                            ObjectHeader *value)
170 {
171     if (UNLIKELY(field.IsVolatile())) {
172         SetObject<true, NEED_WRITE_BARRIER, IS_DYN>(thread, obj, field.GetOffset(), value);
173     } else {
174         SetObject<false, NEED_WRITE_BARRIER, IS_DYN>(thread, obj, field.GetOffset(), value);
175     }
176 }
177 
178 /* static */
179 template <class T>
GetFieldPrimitive(const void * obj,size_t offset,std::memory_order memoryOrder)180 inline T ObjectAccessor::GetFieldPrimitive(const void *obj, size_t offset, std::memory_order memoryOrder)
181 {
182     return Get<T>(obj, offset, memoryOrder);
183 }
184 
185 /* static */
186 template <class T>
SetFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)187 inline void ObjectAccessor::SetFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memoryOrder)
188 {
189     Set<T>(obj, offset, value, memoryOrder);
190 }
191 
192 /* static */
193 // NEED_READ_BARRIER = true , IS_DYN = false
194 template <bool NEED_READ_BARRIER, bool IS_DYN>
GetFieldObject(const void * obj,int offset,std::memory_order memoryOrder)195 inline ObjectHeader *ObjectAccessor::GetFieldObject(const void *obj, int offset, std::memory_order memoryOrder)
196 {
197     if (!IS_DYN) {
198         return reinterpret_cast<ObjectHeader *>(Get<ObjectPointerType>(obj, offset, memoryOrder));
199     }
200     return Get<ObjectHeader *>(obj, offset, memoryOrder);
201 }
202 
GetComplementMemoryOrder(std::memory_order memoryOrder)203 static inline std::memory_order GetComplementMemoryOrder(std::memory_order memoryOrder)
204 {
205     if (memoryOrder == std::memory_order_acquire) {
206         memoryOrder = std::memory_order_release;
207     } else if (memoryOrder == std::memory_order_release) {
208         memoryOrder = std::memory_order_acquire;
209     }
210     return memoryOrder;
211 }
212 
213 /* static */
214 // NEED_WRITE_BARRIER = true , IS_DYN = false
215 template <bool NEED_WRITE_BARRIER, bool IS_DYN>
SetFieldObject(void * obj,size_t offset,ObjectHeader * value,std::memory_order memoryOrder)216 inline void ObjectAccessor::SetFieldObject(void *obj, size_t offset, ObjectHeader *value, std::memory_order memoryOrder)
217 {
218     if (NEED_WRITE_BARRIER) {
219         auto *barrierSet = GetBarrierSet();
220 
221         if (barrierSet->IsPreBarrierEnabled()) {
222             // If SetFieldObject is called with std::memory_order_release
223             // we need to use the complement memory order std::memory_order_acquire
224             // because we read the value.
225             ObjectHeader *preVal = GetFieldObject<IS_DYN>(obj, offset, GetComplementMemoryOrder(memoryOrder));
226             barrierSet->PreBarrier(preVal);
227         }
228 
229         if (!IS_DYN) {
230             Set<ObjectPointerType>(obj, offset, ToObjPtrType(value), memoryOrder);
231         } else {
232             Set<ObjectHeader *>(obj, offset, value, memoryOrder);
233         }
234         auto gcPostBarrierType = barrierSet->GetPostType();
235         if (!mem::IsEmptyBarrier(gcPostBarrierType)) {
236             barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), offset, value);
237         }
238     } else {
239         if (!IS_DYN) {
240             Set<ObjectPointerType>(obj, offset, ToObjPtrType(value), memoryOrder);
241         } else {
242             Set<ObjectHeader *>(obj, offset, value, memoryOrder);
243         }
244     }
245 }
246 
247 /* static */
248 template <typename T>
CompareAndSetFieldPrimitive(void * obj,size_t offset,T oldValue,T newValue,std::memory_order memoryOrder,bool strong)249 inline std::pair<bool, T> ObjectAccessor::CompareAndSetFieldPrimitive(void *obj, size_t offset, T oldValue, T newValue,
250                                                                       std::memory_order memoryOrder, bool strong)
251 {
252     uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
253     ASSERT(IsAddressInObjectsHeap(rawAddr));
254     auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
255     if (strong) {
256         return {atomicAddr->compare_exchange_strong(oldValue, newValue, memoryOrder), oldValue};
257     }
258     return {atomicAddr->compare_exchange_weak(oldValue, newValue, memoryOrder), oldValue};
259 }
260 
261 /* static */
262 // NEED_READ_BARRIER = true , IS_DYN = false
263 template <bool NEED_WRITE_BARRIER, bool IS_DYN>
CompareAndSetFieldObject(void * obj,size_t offset,ObjectHeader * oldValue,ObjectHeader * newValue,std::memory_order memoryOrder,bool strong)264 inline std::pair<bool, ObjectHeader *> ObjectAccessor::CompareAndSetFieldObject(void *obj, size_t offset,
265                                                                                 ObjectHeader *oldValue,
266                                                                                 ObjectHeader *newValue,
267                                                                                 std::memory_order memoryOrder,
268                                                                                 bool strong)
269 {
270     bool success = false;
271     ObjectHeader *result = nullptr;
272     auto getResult = [&]() {
273         if (IS_DYN) {
274             auto value =
275                 CompareAndSetFieldPrimitive<ObjectHeader *>(obj, offset, oldValue, newValue, memoryOrder, strong);
276             success = value.first;
277             result = value.second;
278         } else {
279             auto value = CompareAndSetFieldPrimitive<ObjectPointerType>(obj, offset, ToObjPtrType(oldValue),
280                                                                         ToObjPtrType(newValue), memoryOrder, strong);
281             success = value.first;
282             result = reinterpret_cast<ObjectHeader *>(value.second);
283         }
284     };
285 
286     if (NEED_WRITE_BARRIER) {
287         // update field with pre barrier
288         auto *barrierSet = GetBarrierSet();
289         if (barrierSet->IsPreBarrierEnabled()) {
290             barrierSet->PreBarrier(GetObject<false, IS_DYN>(obj, offset));
291         }
292 
293         getResult();
294         if (success && !mem::IsEmptyBarrier(barrierSet->GetPostType())) {
295             barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), offset, newValue);
296         }
297         return {success, result};
298     }
299 
300     getResult();
301     return {success, result};
302 }
303 
304 /* static */
305 template <typename T>
GetAndSetFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)306 inline T ObjectAccessor::GetAndSetFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memoryOrder)
307 {
308     uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
309     ASSERT(IsAddressInObjectsHeap(rawAddr));
310     auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
311     return atomicAddr->exchange(value, memoryOrder);
312 }
313 
314 /* static */
315 // NEED_WRITE_BARRIER = true , IS_DYN = false
316 template <bool NEED_WRITE_BARRIER, bool IS_DYN>
GetAndSetFieldObject(void * obj,size_t offset,ObjectHeader * value,std::memory_order memoryOrder)317 inline ObjectHeader *ObjectAccessor::GetAndSetFieldObject(void *obj, size_t offset, ObjectHeader *value,
318                                                           std::memory_order memoryOrder)
319 {
320     if (NEED_WRITE_BARRIER) {
321         // update field with pre barrier
322         auto *barrierSet = GetBarrierSet();
323         if (barrierSet->IsPreBarrierEnabled()) {
324             barrierSet->PreBarrier(GetObject<false, IS_DYN>(obj, offset));
325         }
326         ObjectHeader *result = IS_DYN ? GetAndSetFieldPrimitive<ObjectHeader *>(obj, offset, value, memoryOrder)
327                                       : reinterpret_cast<ObjectHeader *>(GetAndSetFieldPrimitive<ObjectPointerType>(
328                                             obj, offset, ToObjPtrType(value), memoryOrder));
329         if (result != nullptr && !mem::IsEmptyBarrier(barrierSet->GetPostType())) {
330             barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), offset, value);
331         }
332 
333         return result;
334     }
335 
336     return IS_DYN ? GetAndSetFieldPrimitive<ObjectHeader *>(obj, offset, value, memoryOrder)
337                   : reinterpret_cast<ObjectHeader *>(
338                         GetAndSetFieldPrimitive<ObjectPointerType>(obj, offset, ToObjPtrType(value), memoryOrder));
339 }
340 
341 /* static */
342 template <typename T>
GetAndAddFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)343 inline T ObjectAccessor::GetAndAddFieldPrimitive([[maybe_unused]] void *obj, [[maybe_unused]] size_t offset,
344                                                  [[maybe_unused]] T value,
345                                                  [[maybe_unused]] std::memory_order memoryOrder)
346 {
347     if constexpr (std::is_same_v<T, uint8_t>) {  // NOLINT(readability-braces-around-statements)
348         LOG(FATAL, RUNTIME) << "Could not do add for boolean";
349         UNREACHABLE();
350     } else {                                          // NOLINT(readability-misleading-indentation)
351         if constexpr (std::is_floating_point_v<T>) {  // NOLINT(readability-braces-around-statements)
352             // Atmoic fetch_add only defined in the atomic specializations for integral and pointer
353             uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
354             ASSERT(IsAddressInObjectsHeap(rawAddr));
355             auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
356             // Atomic with parameterized order reason: memory order passed as argument
357             T oldValue = atomicAddr->load(memoryOrder);
358             T newValue;
359             do {
360                 newValue = oldValue + value;
361             } while (!atomicAddr->compare_exchange_weak(oldValue, newValue, memoryOrder));
362             return oldValue;
363         } else {  // NOLINT(readability-misleading-indentation, readability-else-after-return)
364             uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
365             ASSERT(IsAddressInObjectsHeap(rawAddr));
366             auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
367             // Atomic with parameterized order reason: memory order passed as argument
368             return atomicAddr->fetch_add(value, memoryOrder);
369         }
370     }
371 }
372 
373 /* static */
374 template <typename T>
GetAndBitwiseOrFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)375 inline T ObjectAccessor::GetAndBitwiseOrFieldPrimitive([[maybe_unused]] void *obj, [[maybe_unused]] size_t offset,
376                                                        [[maybe_unused]] T value,
377                                                        [[maybe_unused]] std::memory_order memoryOrder)
378 {
379     if constexpr (std::is_floating_point_v<T>) {  // NOLINT(readability-braces-around-statements)
380         LOG(FATAL, RUNTIME) << "Could not do bitwise or for float/double";
381         UNREACHABLE();
382     } else {  // NOLINT(readability-misleading-indentation)
383         uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
384         ASSERT(IsAddressInObjectsHeap(rawAddr));
385         auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
386         // Atomic with parameterized order reason: memory order passed as argument
387         return atomicAddr->fetch_or(value, memoryOrder);
388     }
389 }
390 
391 /* static */
392 template <typename T>
GetAndBitwiseAndFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)393 inline T ObjectAccessor::GetAndBitwiseAndFieldPrimitive([[maybe_unused]] void *obj, [[maybe_unused]] size_t offset,
394                                                         [[maybe_unused]] T value,
395                                                         [[maybe_unused]] std::memory_order memoryOrder)
396 {
397     if constexpr (std::is_floating_point_v<T>) {  // NOLINT(readability-braces-around-statements)
398         LOG(FATAL, RUNTIME) << "Could not do bitwise and for float/double";
399         UNREACHABLE();
400     } else {  // NOLINT(readability-misleading-indentation)
401         uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
402         ASSERT(IsAddressInObjectsHeap(rawAddr));
403         auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
404         // Atomic with parameterized order reason: memory order passed as argument
405         return atomicAddr->fetch_and(value, memoryOrder);
406     }
407 }
408 
409 /* static */
410 template <typename T>
GetAndBitwiseXorFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)411 inline T ObjectAccessor::GetAndBitwiseXorFieldPrimitive([[maybe_unused]] void *obj, [[maybe_unused]] size_t offset,
412                                                         [[maybe_unused]] T value,
413                                                         [[maybe_unused]] std::memory_order memoryOrder)
414 {
415     if constexpr (std::is_floating_point_v<T>) {  // NOLINT(readability-braces-around-statements)
416         LOG(FATAL, RUNTIME) << "Could not do bitwise xor for float/double";
417         UNREACHABLE();
418     } else {  // NOLINT(readability-misleading-indentation)
419         uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
420         ASSERT(IsAddressInObjectsHeap(rawAddr));
421         auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
422         // Atomic with parameterized order reason: memory order passed as argument
423         return atomicAddr->fetch_xor(value, memoryOrder);
424     }
425 }
426 
427 /* static */
SetDynValueWithoutBarrier(void * obj,size_t offset,coretypes::TaggedType value)428 inline void ObjectAccessor::SetDynValueWithoutBarrier(void *obj, size_t offset, coretypes::TaggedType value)
429 {
430     uintptr_t addr = ToUintPtr(obj) + offset;
431     ASSERT(IsAddressInObjectsHeap(addr));
432     // Atomic with relaxed order reason: concurrent access from GC
433     reinterpret_cast<std::atomic<coretypes::TaggedType> *>(addr)->store(value, std::memory_order_relaxed);
434 }
435 
436 /* static */
SetDynValue(const ManagedThread * thread,void * obj,size_t offset,coretypes::TaggedType value)437 inline void ObjectAccessor::SetDynValue(const ManagedThread *thread, void *obj, size_t offset,
438                                         coretypes::TaggedType value)
439 {
440     if (UNLIKELY(GetBarrierSet(thread)->IsPreBarrierEnabled())) {
441         coretypes::TaggedValue preVal(GetDynValue<coretypes::TaggedType>(obj, offset));
442         if (preVal.IsHeapObject()) {
443             GetBarrierSet(thread)->PreBarrier(preVal.GetRawHeapObject());
444         }
445     }
446     SetDynValueWithoutBarrier(obj, offset, value);
447     coretypes::TaggedValue tv(value);
448     if (tv.IsHeapObject() && tv.GetRawHeapObject() != nullptr) {
449         auto gcPostBarrierType = GetPostBarrierType(thread);
450         if (!mem::IsEmptyBarrier(gcPostBarrierType)) {
451             GetBarrierSet(thread)->PostBarrier(obj, offset, tv.GetRawHeapObject());
452         }
453     }
454 }
455 
456 /* static */
457 template <typename T>
SetDynPrimitive(const ManagedThread * thread,void * obj,size_t offset,T value)458 inline void ObjectAccessor::SetDynPrimitive(const ManagedThread *thread, void *obj, size_t offset, T value)
459 {
460     // Need pre-barrier becuase the previous value may be a reference.
461     if (UNLIKELY(GetBarrierSet(thread)->IsPreBarrierEnabled())) {
462         coretypes::TaggedValue preVal(GetDynValue<coretypes::TaggedType>(obj, offset));
463         if (preVal.IsHeapObject()) {
464             GetBarrierSet(thread)->PreBarrier(preVal.GetRawHeapObject());
465         }
466     }
467     SetDynValueWithoutBarrier(obj, offset, value);
468     // Don't need post barrier because the value is a primitive.
469 }
470 
SetClass(ObjectHeader * obj,BaseClass * newClass)471 inline void ObjectAccessor::SetClass(ObjectHeader *obj, BaseClass *newClass)
472 {
473     auto *barrierSet = GetBarrierSet();
474 
475     if (barrierSet->IsPreBarrierEnabled()) {
476         ASSERT(obj->ClassAddr<BaseClass>() != nullptr);
477         ObjectHeader *preVal = obj->ClassAddr<BaseClass>()->GetManagedObject();
478         barrierSet->PreBarrier(preVal);
479     }
480 
481     obj->SetClass(newClass);
482 
483     auto gcPostBarrierType = barrierSet->GetPostType();
484     if (!mem::IsEmptyBarrier(gcPostBarrierType)) {
485         ASSERT(newClass->GetManagedObject() != nullptr);
486         barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), 0, newClass->GetManagedObject());
487     }
488 }
489 }  // namespace ark
490 
491 #endif  // PANDA_RUNTIME_OBJECT_ACCESSOR_INL_H_
492