• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_OBJECT_ACCESSOR_INL_H
16 #define PANDA_RUNTIME_OBJECT_ACCESSOR_INL_H
17 
18 #include <securec.h>
19 
20 #include "libpandabase/mem/mem.h"
21 #include "runtime/include/class.h"
22 #include "runtime/include/field.h"
23 #include "runtime/include/object_accessor.h"
24 #include "runtime/mem/gc/gc_barrier_set.h"
25 
26 namespace panda {
27 
28 /* static */
29 template <bool IS_VOLATILE /* = false */, bool NEED_READ_BARRIER /* = true */, bool IS_DYN /* = false */>
GetObject(const void * obj,size_t offset)30 inline ObjectHeader *ObjectAccessor::GetObject(const void *obj, size_t offset)
31 {
32     // We don't have GC with read barriers now
33     if (!IS_DYN) {
34         return reinterpret_cast<ObjectHeader *>(Get<ObjectPointerType, IS_VOLATILE>(obj, offset));
35     }
36     return Get<ObjectHeader *, IS_VOLATILE>(obj, offset);
37 }
38 
39 /* static */
40 template <bool IS_VOLATILE /* = false */, bool NEED_WRITE_BARRIER /* = true */, bool IS_DYN /* = false */>
SetObject(void * obj,size_t offset,ObjectHeader * value)41 inline void ObjectAccessor::SetObject(void *obj, size_t offset, ObjectHeader *value)
42 {
43     if (NEED_WRITE_BARRIER) {
44         auto *barrierSet = GetBarrierSet();
45 
46         if (barrierSet->IsPreBarrierEnabled()) {
47             ObjectHeader *preVal = GetObject<IS_VOLATILE, false, IS_DYN>(obj, offset);
48             barrierSet->PreBarrier(preVal);
49         }
50 
51         if (!IS_DYN) {
52             Set<ObjectPointerType, IS_VOLATILE>(obj, offset, ToObjPtrType(value));
53         } else {
54             Set<ObjectHeader *, IS_VOLATILE>(obj, offset, value);
55         }
56         auto gcPostBarrierType = barrierSet->GetPostType();
57         if (!mem::IsEmptyBarrier(gcPostBarrierType)) {
58             barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), offset, value);
59         }
60     } else {
61         if (!IS_DYN) {
62             Set<ObjectPointerType, IS_VOLATILE>(obj, offset, ToObjPtrType(value));
63         } else {
64             Set<ObjectHeader *, IS_VOLATILE>(obj, offset, value);
65         }
66     }
67 }
68 
69 /* static */
70 template <bool IS_VOLATILE /* = false */, bool NEED_READ_BARRIER /* = true */, bool IS_DYN /* = false */>
GetObject(const ManagedThread * thread,const void * obj,size_t offset)71 inline ObjectHeader *ObjectAccessor::GetObject([[maybe_unused]] const ManagedThread *thread, const void *obj,
72                                                size_t offset)
73 {
74     // We don't have GC with read barriers now
75     if (!IS_DYN) {
76         return reinterpret_cast<ObjectHeader *>(Get<ObjectPointerType, IS_VOLATILE>(obj, offset));
77     }
78     return Get<ObjectHeader *, IS_VOLATILE>(obj, offset);
79 }
80 
81 /* static */
82 template <bool IS_VOLATILE /* = false */, bool NEED_WRITE_BARRIER /* = true */, bool IS_DYN /* = false */>
SetObject(const ManagedThread * thread,void * obj,size_t offset,ObjectHeader * value)83 inline void ObjectAccessor::SetObject(const ManagedThread *thread, void *obj, size_t offset, ObjectHeader *value)
84 {
85     if (NEED_WRITE_BARRIER) {
86         auto *barrierSet = GetBarrierSet(thread);
87         if (barrierSet->IsPreBarrierEnabled()) {
88             ObjectHeader *preVal = GetObject<IS_VOLATILE, IS_DYN>(obj, offset);
89             barrierSet->PreBarrier(preVal);
90         }
91 
92         if (!IS_DYN) {
93             Set<ObjectPointerType, IS_VOLATILE>(obj, offset, ToObjPtrType(value));
94         } else {
95             Set<ObjectHeader *, IS_VOLATILE>(obj, offset, value);
96         }
97         if (!mem::IsEmptyBarrier(barrierSet->GetPostType())) {
98             barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), offset, value);
99         }
100     } else {
101         if (!IS_DYN) {
102             Set<ObjectPointerType, IS_VOLATILE>(obj, offset, ToObjPtrType(value));
103         } else {
104             Set<ObjectHeader *, IS_VOLATILE>(obj, offset, value);
105         }
106     }
107 }
108 
109 /* static */
110 template <class T>
GetFieldPrimitive(const void * obj,const Field & field)111 inline T ObjectAccessor::GetFieldPrimitive(const void *obj, const Field &field)
112 {
113     if (UNLIKELY(field.IsVolatile())) {
114         return GetPrimitive<T, true>(obj, field.GetOffset());
115     }
116     return GetPrimitive<T, false>(obj, field.GetOffset());
117 }
118 
119 /* static */
120 template <class T>
SetFieldPrimitive(void * obj,const Field & field,T value)121 inline void ObjectAccessor::SetFieldPrimitive(void *obj, const Field &field, T value)
122 {
123     if (UNLIKELY(field.IsVolatile())) {
124         SetPrimitive<T, true>(obj, field.GetOffset(), value);
125     } else {
126         SetPrimitive<T, false>(obj, field.GetOffset(), value);
127     }
128 }
129 
130 /* static */
131 template <bool NEED_READ_BARRIER, bool IS_DYN>
GetFieldObject(const void * obj,const Field & field)132 inline ObjectHeader *ObjectAccessor::GetFieldObject(const void *obj, const Field &field)
133 {
134     if (UNLIKELY(field.IsVolatile())) {
135         return GetObject<true, NEED_READ_BARRIER, IS_DYN>(obj, field.GetOffset());
136     }
137     return GetObject<false, NEED_READ_BARRIER, IS_DYN>(obj, field.GetOffset());
138 }
139 
140 /* static */
141 template <bool NEED_WRITE_BARRIER, bool IS_DYN>
SetFieldObject(void * obj,const Field & field,ObjectHeader * value)142 inline void ObjectAccessor::SetFieldObject(void *obj, const Field &field, ObjectHeader *value)
143 {
144     ASSERT(IsAddressInObjectsHeapOrNull(value));
145     if (UNLIKELY(field.IsVolatile())) {
146         SetObject<true, NEED_WRITE_BARRIER, IS_DYN>(obj, field.GetOffset(), value);
147     } else {
148         SetObject<false, NEED_WRITE_BARRIER, IS_DYN>(obj, field.GetOffset(), value);
149     }
150 }
151 
152 /* static */
153 template <bool NEED_READ_BARRIER, bool IS_DYN>
GetFieldObject(const ManagedThread * thread,const void * obj,const Field & field)154 inline ObjectHeader *ObjectAccessor::GetFieldObject(const ManagedThread *thread, const void *obj, const Field &field)
155 {
156     if (UNLIKELY(field.IsVolatile())) {
157         return GetObject<true, NEED_READ_BARRIER, IS_DYN>(thread, obj, field.GetOffset());
158     }
159     return GetObject<false, NEED_READ_BARRIER, IS_DYN>(thread, obj, field.GetOffset());
160 }
161 
162 /* static */
163 template <bool NEED_WRITE_BARRIER, bool IS_DYN>
SetFieldObject(const ManagedThread * thread,void * obj,const Field & field,ObjectHeader * value)164 inline void ObjectAccessor::SetFieldObject(const ManagedThread *thread, void *obj, const Field &field,
165                                            ObjectHeader *value)
166 {
167     if (UNLIKELY(field.IsVolatile())) {
168         SetObject<true, NEED_WRITE_BARRIER, IS_DYN>(thread, obj, field.GetOffset(), value);
169     } else {
170         SetObject<false, NEED_WRITE_BARRIER, IS_DYN>(thread, obj, field.GetOffset(), value);
171     }
172 }
173 
174 /* static */
175 template <class T>
GetFieldPrimitive(const void * obj,size_t offset,std::memory_order memoryOrder)176 inline T ObjectAccessor::GetFieldPrimitive(const void *obj, size_t offset, std::memory_order memoryOrder)
177 {
178     return Get<T>(obj, offset, memoryOrder);
179 }
180 
181 /* static */
182 template <class T>
SetFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)183 inline void ObjectAccessor::SetFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memoryOrder)
184 {
185     Set<T>(obj, offset, value, memoryOrder);
186 }
187 
188 /* static */
189 template <bool NEED_READ_BARRIER, bool IS_DYN>
GetFieldObject(const void * obj,int offset,std::memory_order memoryOrder)190 inline ObjectHeader *ObjectAccessor::GetFieldObject(const void *obj, int offset, std::memory_order memoryOrder)
191 {
192     if (!IS_DYN) {
193         return reinterpret_cast<ObjectHeader *>(Get<ObjectPointerType>(obj, offset, memoryOrder));
194     }
195     return Get<ObjectHeader *>(obj, offset, memoryOrder);
196 }
197 
GetComplementMemoryOrder(std::memory_order memoryOrder)198 static inline std::memory_order GetComplementMemoryOrder(std::memory_order memoryOrder)
199 {
200     if (memoryOrder == std::memory_order_acquire) {
201         memoryOrder = std::memory_order_release;
202     } else if (memoryOrder == std::memory_order_release) {
203         memoryOrder = std::memory_order_acquire;
204     }
205     return memoryOrder;
206 }
207 
208 /* static */
209 template <bool NEED_WRITE_BARRIER, bool IS_DYN>
SetFieldObject(void * obj,size_t offset,ObjectHeader * value,std::memory_order memoryOrder)210 inline void ObjectAccessor::SetFieldObject(void *obj, size_t offset, ObjectHeader *value, std::memory_order memoryOrder)
211 {
212     if (NEED_WRITE_BARRIER) {
213         auto *barrierSet = GetBarrierSet();
214 
215         if (barrierSet->IsPreBarrierEnabled()) {
216             // If SetFieldObject is called with std::memory_order_release
217             // we need to use the complement memory order std::memory_order_acquire
218             // because we read the value.
219             ObjectHeader *preVal = GetFieldObject<IS_DYN>(obj, offset, GetComplementMemoryOrder(memoryOrder));
220             barrierSet->PreBarrier(preVal);
221         }
222 
223         if (!IS_DYN) {
224             Set<ObjectPointerType>(obj, offset, ToObjPtrType(value), memoryOrder);
225         } else {
226             Set<ObjectHeader *>(obj, offset, value, memoryOrder);
227         }
228         auto gcPostBarrierType = barrierSet->GetPostType();
229         if (!mem::IsEmptyBarrier(gcPostBarrierType)) {
230             barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), offset, value);
231         }
232     } else {
233         if (!IS_DYN) {
234             Set<ObjectPointerType>(obj, offset, ToObjPtrType(value), memoryOrder);
235         } else {
236             Set<ObjectHeader *>(obj, offset, value, memoryOrder);
237         }
238     }
239 }
240 
241 /* static */
242 template <typename T>
CompareAndSetFieldPrimitive(void * obj,size_t offset,T oldValue,T newValue,std::memory_order memoryOrder,bool strong)243 inline std::pair<bool, T> ObjectAccessor::CompareAndSetFieldPrimitive(void *obj, size_t offset, T oldValue, T newValue,
244                                                                       std::memory_order memoryOrder, bool strong)
245 {
246     uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
247     ASSERT(IsAddressInObjectsHeap(rawAddr));
248     auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
249     if (strong) {
250         return {atomicAddr->compare_exchange_strong(oldValue, newValue, memoryOrder), oldValue};
251     }
252     return {atomicAddr->compare_exchange_weak(oldValue, newValue, memoryOrder), oldValue};
253 }
254 
255 /* static */
256 template <bool NEED_WRITE_BARRIER, bool IS_DYN>
CompareAndSetFieldObject(void * obj,size_t offset,ObjectHeader * oldValue,ObjectHeader * newValue,std::memory_order memoryOrder,bool strong)257 inline std::pair<bool, ObjectHeader *> ObjectAccessor::CompareAndSetFieldObject(void *obj, size_t offset,
258                                                                                 ObjectHeader *oldValue,
259                                                                                 ObjectHeader *newValue,
260                                                                                 std::memory_order memoryOrder,
261                                                                                 bool strong)
262 {
263     bool success = false;
264     ObjectHeader *result = nullptr;
265     auto getResult = [&]() {
266         if (IS_DYN) {
267             auto value =
268                 CompareAndSetFieldPrimitive<ObjectHeader *>(obj, offset, oldValue, newValue, memoryOrder, strong);
269             success = value.first;
270             result = value.second;
271         } else {
272             auto value = CompareAndSetFieldPrimitive<ObjectPointerType>(obj, offset, ToObjPtrType(oldValue),
273                                                                         ToObjPtrType(newValue), memoryOrder, strong);
274             success = value.first;
275             result = reinterpret_cast<ObjectHeader *>(value.second);
276         }
277     };
278 
279     if (NEED_WRITE_BARRIER) {
280         // update field with pre barrier
281         auto *barrierSet = GetBarrierSet();
282         if (barrierSet->IsPreBarrierEnabled()) {
283             barrierSet->PreBarrier(GetObject<false, IS_DYN>(obj, offset));
284         }
285 
286         getResult();
287         if (success && !mem::IsEmptyBarrier(barrierSet->GetPostType())) {
288             barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), offset, newValue);
289         }
290         return {success, result};
291     }
292 
293     getResult();
294     return {success, result};
295 }
296 
297 /* static */
298 template <typename T>
GetAndSetFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)299 inline T ObjectAccessor::GetAndSetFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memoryOrder)
300 {
301     uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
302     ASSERT(IsAddressInObjectsHeap(rawAddr));
303     auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
304     return atomicAddr->exchange(value, memoryOrder);
305 }
306 
307 /* static */
308 template <bool NEED_WRITE_BARRIER, bool IS_DYN>
GetAndSetFieldObject(void * obj,size_t offset,ObjectHeader * value,std::memory_order memoryOrder)309 inline ObjectHeader *ObjectAccessor::GetAndSetFieldObject(void *obj, size_t offset, ObjectHeader *value,
310                                                           std::memory_order memoryOrder)
311 {
312     if (NEED_WRITE_BARRIER) {
313         // update field with pre barrier
314         auto *barrierSet = GetBarrierSet();
315         if (barrierSet->IsPreBarrierEnabled()) {
316             barrierSet->PreBarrier(GetObject<false, IS_DYN>(obj, offset));
317         }
318         ObjectHeader *result = IS_DYN ? GetAndSetFieldPrimitive<ObjectHeader *>(obj, offset, value, memoryOrder)
319                                       : reinterpret_cast<ObjectHeader *>(GetAndSetFieldPrimitive<ObjectPointerType>(
320                                             obj, offset, ToObjPtrType(value), memoryOrder));
321         if (result != nullptr && !mem::IsEmptyBarrier(barrierSet->GetPostType())) {
322             barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), offset, value);
323         }
324 
325         return result;
326     }
327 
328     return IS_DYN ? GetAndSetFieldPrimitive<ObjectHeader *>(obj, offset, value, memoryOrder)
329                   : reinterpret_cast<ObjectHeader *>(
330                         GetAndSetFieldPrimitive<ObjectPointerType>(obj, offset, ToObjPtrType(value), memoryOrder));
331 }
332 
333 /* static */
334 template <typename T>
GetAndAddFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)335 inline T ObjectAccessor::GetAndAddFieldPrimitive([[maybe_unused]] void *obj, [[maybe_unused]] size_t offset,
336                                                  [[maybe_unused]] T value,
337                                                  [[maybe_unused]] std::memory_order memoryOrder)
338 {
339     if constexpr (std::is_same_v<T, uint8_t>) {  // NOLINT(readability-braces-around-statements)
340         LOG(FATAL, RUNTIME) << "Could not do add for boolean";
341         UNREACHABLE();
342     } else {                                          // NOLINT(readability-misleading-indentation)
343         if constexpr (std::is_floating_point_v<T>) {  // NOLINT(readability-braces-around-statements)
344             // Atmoic fetch_add only defined in the atomic specializations for integral and pointer
345             uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
346             ASSERT(IsAddressInObjectsHeap(rawAddr));
347             auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
348             // Atomic with parameterized order reason: memory order passed as argument
349             T oldValue = atomicAddr->load(memoryOrder);
350             T newValue;
351             do {
352                 newValue = oldValue + value;
353             } while (!atomicAddr->compare_exchange_weak(oldValue, newValue, memoryOrder));
354             return oldValue;
355         } else {  // NOLINT(readability-misleading-indentation, readability-else-after-return)
356             uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
357             ASSERT(IsAddressInObjectsHeap(rawAddr));
358             auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
359             // Atomic with parameterized order reason: memory order passed as argument
360             return atomicAddr->fetch_add(value, memoryOrder);
361         }
362     }
363 }
364 
365 /* static */
366 template <typename T>
GetAndBitwiseOrFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)367 inline T ObjectAccessor::GetAndBitwiseOrFieldPrimitive([[maybe_unused]] void *obj, [[maybe_unused]] size_t offset,
368                                                        [[maybe_unused]] T value,
369                                                        [[maybe_unused]] std::memory_order memoryOrder)
370 {
371     if constexpr (std::is_floating_point_v<T>) {  // NOLINT(readability-braces-around-statements)
372         LOG(FATAL, RUNTIME) << "Could not do bitwise or for float/double";
373         UNREACHABLE();
374     } else {  // NOLINT(readability-misleading-indentation)
375         uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
376         ASSERT(IsAddressInObjectsHeap(rawAddr));
377         auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
378         // Atomic with parameterized order reason: memory order passed as argument
379         return atomicAddr->fetch_or(value, memoryOrder);
380     }
381 }
382 
383 /* static */
384 template <typename T>
GetAndBitwiseAndFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)385 inline T ObjectAccessor::GetAndBitwiseAndFieldPrimitive([[maybe_unused]] void *obj, [[maybe_unused]] size_t offset,
386                                                         [[maybe_unused]] T value,
387                                                         [[maybe_unused]] std::memory_order memoryOrder)
388 {
389     if constexpr (std::is_floating_point_v<T>) {  // NOLINT(readability-braces-around-statements)
390         LOG(FATAL, RUNTIME) << "Could not do bitwise and for float/double";
391         UNREACHABLE();
392     } else {  // NOLINT(readability-misleading-indentation)
393         uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
394         ASSERT(IsAddressInObjectsHeap(rawAddr));
395         auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
396         // Atomic with parameterized order reason: memory order passed as argument
397         return atomicAddr->fetch_and(value, memoryOrder);
398     }
399 }
400 
401 /* static */
402 template <typename T>
GetAndBitwiseXorFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)403 inline T ObjectAccessor::GetAndBitwiseXorFieldPrimitive([[maybe_unused]] void *obj, [[maybe_unused]] size_t offset,
404                                                         [[maybe_unused]] T value,
405                                                         [[maybe_unused]] std::memory_order memoryOrder)
406 {
407     if constexpr (std::is_floating_point_v<T>) {  // NOLINT(readability-braces-around-statements)
408         LOG(FATAL, RUNTIME) << "Could not do bitwise xor for float/double";
409         UNREACHABLE();
410     } else {  // NOLINT(readability-misleading-indentation)
411         uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
412         ASSERT(IsAddressInObjectsHeap(rawAddr));
413         auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
414         // Atomic with parameterized order reason: memory order passed as argument
415         return atomicAddr->fetch_xor(value, memoryOrder);
416     }
417 }
418 
419 /* static */
SetDynValueWithoutBarrier(void * obj,size_t offset,coretypes::TaggedType value)420 inline void ObjectAccessor::SetDynValueWithoutBarrier(void *obj, size_t offset, coretypes::TaggedType value)
421 {
422     uintptr_t addr = ToUintPtr(obj) + offset;
423     ASSERT(IsAddressInObjectsHeap(addr));
424     // Atomic with relaxed order reason: concurrent access from GC
425     reinterpret_cast<std::atomic<coretypes::TaggedType> *>(addr)->store(value, std::memory_order_relaxed);
426 }
427 
428 /* static */
SetDynValue(const ManagedThread * thread,void * obj,size_t offset,coretypes::TaggedType value)429 inline void ObjectAccessor::SetDynValue(const ManagedThread *thread, void *obj, size_t offset,
430                                         coretypes::TaggedType value)
431 {
432     if (UNLIKELY(GetBarrierSet(thread)->IsPreBarrierEnabled())) {
433         coretypes::TaggedValue preVal(GetDynValue<coretypes::TaggedType>(obj, offset));
434         if (preVal.IsHeapObject()) {
435             GetBarrierSet(thread)->PreBarrier(preVal.GetRawHeapObject());
436         }
437     }
438     SetDynValueWithoutBarrier(obj, offset, value);
439     coretypes::TaggedValue tv(value);
440     if (tv.IsHeapObject() && tv.GetRawHeapObject() != nullptr) {
441         auto gcPostBarrierType = GetPostBarrierType(thread);
442         if (!mem::IsEmptyBarrier(gcPostBarrierType)) {
443             GetBarrierSet(thread)->PostBarrier(obj, offset, tv.GetRawHeapObject());
444         }
445     }
446 }
447 
448 /* static */
449 template <typename T>
SetDynPrimitive(const ManagedThread * thread,void * obj,size_t offset,T value)450 inline void ObjectAccessor::SetDynPrimitive(const ManagedThread *thread, void *obj, size_t offset, T value)
451 {
452     // Need pre-barrier becuase the previous value may be a reference.
453     if (UNLIKELY(GetBarrierSet(thread)->IsPreBarrierEnabled())) {
454         coretypes::TaggedValue preVal(GetDynValue<coretypes::TaggedType>(obj, offset));
455         if (preVal.IsHeapObject()) {
456             GetBarrierSet(thread)->PreBarrier(preVal.GetRawHeapObject());
457         }
458     }
459     SetDynValueWithoutBarrier(obj, offset, value);
460     // Don't need post barrier because the value is a primitive.
461 }
462 
SetClass(ObjectHeader * obj,BaseClass * newClass)463 inline void ObjectAccessor::SetClass(ObjectHeader *obj, BaseClass *newClass)
464 {
465     auto *barrierSet = GetBarrierSet();
466 
467     if (barrierSet->IsPreBarrierEnabled()) {
468         ASSERT(obj->ClassAddr<BaseClass>() != nullptr);
469         ObjectHeader *preVal = obj->ClassAddr<BaseClass>()->GetManagedObject();
470         barrierSet->PreBarrier(preVal);
471     }
472 
473     obj->SetClass(newClass);
474 
475     auto gcPostBarrierType = barrierSet->GetPostType();
476     if (!mem::IsEmptyBarrier(gcPostBarrierType)) {
477         ASSERT(newClass->GetManagedObject() != nullptr);
478         barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), 0, newClass->GetManagedObject());
479     }
480 }
481 }  // namespace panda
482 
483 #endif  // PANDA_RUNTIME_OBJECT_ACCESSOR_INL_H
484