• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_OBJECT_ACCESSOR_INL_H_
16 #define PANDA_RUNTIME_OBJECT_ACCESSOR_INL_H_
17 
18 #include <securec.h>
19 
20 #include "libpandabase/mem/mem.h"
21 #include "runtime/include/class.h"
22 #include "runtime/include/field.h"
23 #include "runtime/include/object_accessor.h"
24 #include "runtime/mem/gc/gc_barrier_set.h"
25 
26 namespace ark {
27 
28 /* static */
29 template <bool IS_VOLATILE /* = false */, bool NEED_READ_BARRIER /* = true */, bool IS_DYN /* = false */>
GetObject(const void * obj,size_t offset)30 inline ObjectHeader *ObjectAccessor::GetObject(const void *obj, size_t offset)
31 {
32     // We don't have GC with read barriers now
33     if (!IS_DYN) {
34         return reinterpret_cast<ObjectHeader *>(Get<ObjectPointerType, IS_VOLATILE>(obj, offset));
35     }
36     return Get<ObjectHeader *, IS_VOLATILE>(obj, offset);
37 }
38 
39 /* static */
40 template <bool IS_VOLATILE /* = false */, bool NEED_WRITE_BARRIER /* = true */, bool IS_DYN /* = false */>
41 // CC-OFFNXT(G.FUD.06) perf critical
SetObject(void * obj,size_t offset,ObjectHeader * value)42 inline void ObjectAccessor::SetObject(void *obj, size_t offset, ObjectHeader *value)
43 {
44     if (NEED_WRITE_BARRIER) {
45         auto *barrierSet = GetBarrierSet();
46 
47         if (barrierSet->IsPreBarrierEnabled()) {
48             ObjectHeader *preVal = GetObject<IS_VOLATILE, false, IS_DYN>(obj, offset);
49             barrierSet->PreBarrier(preVal);
50         }
51 
52         if (!IS_DYN) {
53             Set<ObjectPointerType, IS_VOLATILE>(obj, offset, ToObjPtrType(value));
54         } else {
55             Set<ObjectHeader *, IS_VOLATILE>(obj, offset, value);
56         }
57         auto gcPostBarrierType = barrierSet->GetPostType();
58         if (!mem::IsEmptyBarrier(gcPostBarrierType)) {
59             barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), offset, value);
60         }
61     } else {
62         if (!IS_DYN) {
63             Set<ObjectPointerType, IS_VOLATILE>(obj, offset, ToObjPtrType(value));
64         } else {
65             Set<ObjectHeader *, IS_VOLATILE>(obj, offset, value);
66         }
67     }
68 }
69 
70 /* static */
71 template <bool IS_VOLATILE /* = false */, bool NEED_READ_BARRIER /* = true */, bool IS_DYN /* = false */>
GetObject(const ManagedThread * thread,const void * obj,size_t offset)72 inline ObjectHeader *ObjectAccessor::GetObject([[maybe_unused]] const ManagedThread *thread, const void *obj,
73                                                size_t offset)
74 {
75     // We don't have GC with read barriers now
76     if (!IS_DYN) {
77         return reinterpret_cast<ObjectHeader *>(Get<ObjectPointerType, IS_VOLATILE>(obj, offset));
78     }
79     return Get<ObjectHeader *, IS_VOLATILE>(obj, offset);
80 }
81 
82 /* static */
83 template <bool IS_VOLATILE /* = false */, bool NEED_WRITE_BARRIER /* = true */, bool IS_DYN /* = false */>
84 // CC-OFFNXT(G.FUD.06) perf critical
SetObject(const ManagedThread * thread,void * obj,size_t offset,ObjectHeader * value)85 inline void ObjectAccessor::SetObject(const ManagedThread *thread, void *obj, size_t offset, ObjectHeader *value)
86 {
87     if (NEED_WRITE_BARRIER) {
88         auto *barrierSet = GetBarrierSet(thread);
89         if (barrierSet->IsPreBarrierEnabled()) {
90             ObjectHeader *preVal = GetObject<IS_VOLATILE, IS_DYN>(obj, offset);
91             barrierSet->PreBarrier(preVal);
92         }
93 
94         if (!IS_DYN) {
95             Set<ObjectPointerType, IS_VOLATILE>(obj, offset, ToObjPtrType(value));
96         } else {
97             Set<ObjectHeader *, IS_VOLATILE>(obj, offset, value);
98         }
99         if (!mem::IsEmptyBarrier(barrierSet->GetPostType())) {
100             barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), offset, value);
101         }
102     } else {
103         if (!IS_DYN) {
104             Set<ObjectPointerType, IS_VOLATILE>(obj, offset, ToObjPtrType(value));
105         } else {
106             Set<ObjectHeader *, IS_VOLATILE>(obj, offset, value);
107         }
108     }
109 }
110 
111 /* static */
112 template <class T>
GetFieldPrimitive(const void * obj,const Field & field)113 inline T ObjectAccessor::GetFieldPrimitive(const void *obj, const Field &field)
114 {
115     if (UNLIKELY(field.IsVolatile())) {
116         return GetPrimitive<T, true>(obj, field.GetOffset());
117     }
118     return GetPrimitive<T, false>(obj, field.GetOffset());
119 }
120 
121 /* static */
122 template <class T>
SetFieldPrimitive(void * obj,const Field & field,T value)123 inline void ObjectAccessor::SetFieldPrimitive(void *obj, const Field &field, T value)
124 {
125     if (UNLIKELY(field.IsVolatile())) {
126         SetPrimitive<T, true>(obj, field.GetOffset(), value);
127     } else {
128         SetPrimitive<T, false>(obj, field.GetOffset(), value);
129     }
130 }
131 
132 /* static */
133 // NEED_READ_BARRIER = true , IS_DYN = false
134 template <bool NEED_READ_BARRIER, bool IS_DYN>
GetFieldObject(const void * obj,const Field & field)135 inline ObjectHeader *ObjectAccessor::GetFieldObject(const void *obj, const Field &field)
136 {
137     if (UNLIKELY(field.IsVolatile())) {
138         return GetObject<true, NEED_READ_BARRIER, IS_DYN>(obj, field.GetOffset());
139     }
140     return GetObject<false, NEED_READ_BARRIER, IS_DYN>(obj, field.GetOffset());
141 }
142 
143 /* static */
144 // NEED_READ_BARRIER = true , IS_DYN = false
145 template <bool NEED_WRITE_BARRIER, bool IS_DYN>
SetFieldObject(void * obj,const Field & field,ObjectHeader * value)146 inline void ObjectAccessor::SetFieldObject(void *obj, const Field &field, ObjectHeader *value)
147 {
148     ASSERT(IsAddressInObjectsHeapOrNull(value));
149     if (UNLIKELY(field.IsVolatile())) {
150         SetObject<true, NEED_WRITE_BARRIER, IS_DYN>(obj, field.GetOffset(), value);
151     } else {
152         SetObject<false, NEED_WRITE_BARRIER, IS_DYN>(obj, field.GetOffset(), value);
153     }
154 }
155 
156 /* static */
157 // NEED_READ_BARRIER = true , IS_DYN = false
158 template <bool NEED_READ_BARRIER, bool IS_DYN>
GetFieldObject(const ManagedThread * thread,const void * obj,const Field & field)159 inline ObjectHeader *ObjectAccessor::GetFieldObject(const ManagedThread *thread, const void *obj, const Field &field)
160 {
161     if (UNLIKELY(field.IsVolatile())) {
162         return GetObject<true, NEED_READ_BARRIER, IS_DYN>(thread, obj, field.GetOffset());
163     }
164     return GetObject<false, NEED_READ_BARRIER, IS_DYN>(thread, obj, field.GetOffset());
165 }
166 
167 /* static */
168 // NEED_READ_BARRIER = true , IS_DYN = false
169 template <bool NEED_WRITE_BARRIER, bool IS_DYN>
SetFieldObject(const ManagedThread * thread,void * obj,const Field & field,ObjectHeader * value)170 inline void ObjectAccessor::SetFieldObject(const ManagedThread *thread, void *obj, const Field &field,
171                                            ObjectHeader *value)
172 {
173     if (UNLIKELY(field.IsVolatile())) {
174         SetObject<true, NEED_WRITE_BARRIER, IS_DYN>(thread, obj, field.GetOffset(), value);
175     } else {
176         SetObject<false, NEED_WRITE_BARRIER, IS_DYN>(thread, obj, field.GetOffset(), value);
177     }
178 }
179 
180 /* static */
181 template <class T>
GetFieldPrimitive(const void * obj,size_t offset,std::memory_order memoryOrder)182 inline T ObjectAccessor::GetFieldPrimitive(const void *obj, size_t offset, std::memory_order memoryOrder)
183 {
184     return Get<T>(obj, offset, memoryOrder);
185 }
186 
187 /* static */
188 template <class T>
SetFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)189 inline void ObjectAccessor::SetFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memoryOrder)
190 {
191     Set<T>(obj, offset, value, memoryOrder);
192 }
193 
194 /* static */
195 // NEED_READ_BARRIER = true , IS_DYN = false
196 template <bool NEED_READ_BARRIER, bool IS_DYN>
GetFieldObject(const void * obj,int offset,std::memory_order memoryOrder)197 inline ObjectHeader *ObjectAccessor::GetFieldObject(const void *obj, int offset, std::memory_order memoryOrder)
198 {
199     if (!IS_DYN) {
200         return reinterpret_cast<ObjectHeader *>(Get<ObjectPointerType>(obj, offset, memoryOrder));
201     }
202     return Get<ObjectHeader *>(obj, offset, memoryOrder);
203 }
204 
GetComplementMemoryOrder(std::memory_order memoryOrder)205 static inline std::memory_order GetComplementMemoryOrder(std::memory_order memoryOrder)
206 {
207     if (memoryOrder == std::memory_order_acquire) {
208         memoryOrder = std::memory_order_release;
209     } else if (memoryOrder == std::memory_order_release) {
210         memoryOrder = std::memory_order_acquire;
211     }
212     return memoryOrder;
213 }
214 
215 /* static */
216 // NEED_WRITE_BARRIER = true , IS_DYN = false
217 template <bool NEED_WRITE_BARRIER, bool IS_DYN>
218 // CC-OFFNXT(G.FUD.06) perf critical
SetFieldObject(void * obj,size_t offset,ObjectHeader * value,std::memory_order memoryOrder)219 inline void ObjectAccessor::SetFieldObject(void *obj, size_t offset, ObjectHeader *value, std::memory_order memoryOrder)
220 {
221     if (NEED_WRITE_BARRIER) {
222         auto *barrierSet = GetBarrierSet();
223 
224         if (barrierSet->IsPreBarrierEnabled()) {
225             // If SetFieldObject is called with std::memory_order_release
226             // we need to use the complement memory order std::memory_order_acquire
227             // because we read the value.
228             ObjectHeader *preVal = GetFieldObject<IS_DYN>(obj, offset, GetComplementMemoryOrder(memoryOrder));
229             barrierSet->PreBarrier(preVal);
230         }
231 
232         if (!IS_DYN) {
233             Set<ObjectPointerType>(obj, offset, ToObjPtrType(value), memoryOrder);
234         } else {
235             Set<ObjectHeader *>(obj, offset, value, memoryOrder);
236         }
237         auto gcPostBarrierType = barrierSet->GetPostType();
238         if (!mem::IsEmptyBarrier(gcPostBarrierType)) {
239             barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), offset, value);
240         }
241     } else {
242         if (!IS_DYN) {
243             Set<ObjectPointerType>(obj, offset, ToObjPtrType(value), memoryOrder);
244         } else {
245             Set<ObjectHeader *>(obj, offset, value, memoryOrder);
246         }
247     }
248 }
249 
250 /* static */
251 template <typename T>
CompareAndSetFieldPrimitive(void * obj,size_t offset,T oldValue,T newValue,std::memory_order memoryOrder,bool strong)252 inline std::pair<bool, T> ObjectAccessor::CompareAndSetFieldPrimitive(void *obj, size_t offset, T oldValue, T newValue,
253                                                                       std::memory_order memoryOrder, bool strong)
254 {
255     uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
256     ASSERT(IsAddressInObjectsHeap(rawAddr));
257     auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
258     if (strong) {
259         return {atomicAddr->compare_exchange_strong(oldValue, newValue, memoryOrder), oldValue};
260     }
261     return {atomicAddr->compare_exchange_weak(oldValue, newValue, memoryOrder), oldValue};
262 }
263 
264 /* static */
265 // NEED_READ_BARRIER = true , IS_DYN = false
266 template <bool NEED_WRITE_BARRIER, bool IS_DYN>
267 // CC-OFFNXT(G.FUD.06) perf critical
CompareAndSetFieldObject(void * obj,size_t offset,ObjectHeader * oldValue,ObjectHeader * newValue,std::memory_order memoryOrder,bool strong)268 inline std::pair<bool, ObjectHeader *> ObjectAccessor::CompareAndSetFieldObject(void *obj, size_t offset,
269                                                                                 ObjectHeader *oldValue,
270                                                                                 ObjectHeader *newValue,
271                                                                                 std::memory_order memoryOrder,
272                                                                                 bool strong)
273 {
274     bool success = false;
275     ObjectHeader *result = nullptr;
276     auto getResult = [&]() {
277         if (IS_DYN) {
278             auto value =
279                 CompareAndSetFieldPrimitive<ObjectHeader *>(obj, offset, oldValue, newValue, memoryOrder, strong);
280             success = value.first;
281             result = value.second;
282         } else {
283             auto value = CompareAndSetFieldPrimitive<ObjectPointerType>(obj, offset, ToObjPtrType(oldValue),
284                                                                         ToObjPtrType(newValue), memoryOrder, strong);
285             success = value.first;
286             result = reinterpret_cast<ObjectHeader *>(value.second);
287         }
288     };
289 
290     if (NEED_WRITE_BARRIER) {
291         // update field with pre barrier
292         auto *barrierSet = GetBarrierSet();
293         if (barrierSet->IsPreBarrierEnabled()) {
294             barrierSet->PreBarrier(GetObject<false, IS_DYN>(obj, offset));
295         }
296 
297         getResult();
298         if (success && !mem::IsEmptyBarrier(barrierSet->GetPostType())) {
299             barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), offset, newValue);
300         }
301         return {success, result};
302     }
303 
304     getResult();
305     return {success, result};
306 }
307 
308 /* static */
309 template <typename T>
GetAndSetFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)310 inline T ObjectAccessor::GetAndSetFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memoryOrder)
311 {
312     uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
313     ASSERT(IsAddressInObjectsHeap(rawAddr));
314     auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
315     return atomicAddr->exchange(value, memoryOrder);
316 }
317 
318 /* static */
319 // NEED_WRITE_BARRIER = true , IS_DYN = false
320 template <bool NEED_WRITE_BARRIER, bool IS_DYN>
321 // CC-OFFNXT(G.FUD.06) perf critical
GetAndSetFieldObject(void * obj,size_t offset,ObjectHeader * value,std::memory_order memoryOrder)322 inline ObjectHeader *ObjectAccessor::GetAndSetFieldObject(void *obj, size_t offset, ObjectHeader *value,
323                                                           std::memory_order memoryOrder)
324 {
325     if (NEED_WRITE_BARRIER) {
326         // update field with pre barrier
327         auto *barrierSet = GetBarrierSet();
328         if (barrierSet->IsPreBarrierEnabled()) {
329             barrierSet->PreBarrier(GetObject<false, IS_DYN>(obj, offset));
330         }
331         ObjectHeader *result = IS_DYN ? GetAndSetFieldPrimitive<ObjectHeader *>(obj, offset, value, memoryOrder)
332                                       : reinterpret_cast<ObjectHeader *>(GetAndSetFieldPrimitive<ObjectPointerType>(
333                                             obj, offset, ToObjPtrType(value), memoryOrder));
334         if (result != nullptr && !mem::IsEmptyBarrier(barrierSet->GetPostType())) {
335             barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), offset, value);
336         }
337 
338         return result;
339     }
340 
341     return IS_DYN ? GetAndSetFieldPrimitive<ObjectHeader *>(obj, offset, value, memoryOrder)
342                   : reinterpret_cast<ObjectHeader *>(
343                         GetAndSetFieldPrimitive<ObjectPointerType>(obj, offset, ToObjPtrType(value), memoryOrder));
344 }
345 
346 /* static */
347 template <typename T>
348 // CC-OFFNXT(G.FUD.06) perf critical
GetAndAddFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)349 inline T ObjectAccessor::GetAndAddFieldPrimitive([[maybe_unused]] void *obj, [[maybe_unused]] size_t offset,
350                                                  [[maybe_unused]] T value,
351                                                  [[maybe_unused]] std::memory_order memoryOrder)
352 {
353     if constexpr (std::is_same_v<T, uint8_t>) {  // NOLINT(readability-braces-around-statements)
354         LOG(FATAL, RUNTIME) << "Could not do add for boolean";
355         UNREACHABLE();
356     } else {                                          // NOLINT(readability-misleading-indentation)
357         if constexpr (std::is_floating_point_v<T>) {  // NOLINT(readability-braces-around-statements)
358             // Atmoic fetch_add only defined in the atomic specializations for integral and pointer
359             uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
360             ASSERT(IsAddressInObjectsHeap(rawAddr));
361             auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
362             // Atomic with parameterized order reason: memory order passed as argument
363             T oldValue = atomicAddr->load(memoryOrder);
364             T newValue;
365             do {
366                 newValue = oldValue + value;
367             } while (!atomicAddr->compare_exchange_weak(oldValue, newValue, memoryOrder));
368             return oldValue;
369         } else {  // NOLINT(readability-misleading-indentation, readability-else-after-return)
370             uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
371             ASSERT(IsAddressInObjectsHeap(rawAddr));
372             auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
373             // Atomic with parameterized order reason: memory order passed as argument
374             return atomicAddr->fetch_add(value, memoryOrder);
375         }
376     }
377 }
378 
379 /* static */
380 template <typename T>
GetAndBitwiseOrFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)381 inline T ObjectAccessor::GetAndBitwiseOrFieldPrimitive([[maybe_unused]] void *obj, [[maybe_unused]] size_t offset,
382                                                        [[maybe_unused]] T value,
383                                                        [[maybe_unused]] std::memory_order memoryOrder)
384 {
385     if constexpr (std::is_floating_point_v<T>) {  // NOLINT(readability-braces-around-statements)
386         LOG(FATAL, RUNTIME) << "Could not do bitwise or for float/double";
387         UNREACHABLE();
388     } else {  // NOLINT(readability-misleading-indentation)
389         uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
390         ASSERT(IsAddressInObjectsHeap(rawAddr));
391         auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
392         // Atomic with parameterized order reason: memory order passed as argument
393         return atomicAddr->fetch_or(value, memoryOrder);
394     }
395 }
396 
397 /* static */
398 template <typename T>
GetAndBitwiseAndFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)399 inline T ObjectAccessor::GetAndBitwiseAndFieldPrimitive([[maybe_unused]] void *obj, [[maybe_unused]] size_t offset,
400                                                         [[maybe_unused]] T value,
401                                                         [[maybe_unused]] std::memory_order memoryOrder)
402 {
403     if constexpr (std::is_floating_point_v<T>) {  // NOLINT(readability-braces-around-statements)
404         LOG(FATAL, RUNTIME) << "Could not do bitwise and for float/double";
405         UNREACHABLE();
406     } else {  // NOLINT(readability-misleading-indentation)
407         uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
408         ASSERT(IsAddressInObjectsHeap(rawAddr));
409         auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
410         // Atomic with parameterized order reason: memory order passed as argument
411         return atomicAddr->fetch_and(value, memoryOrder);
412     }
413 }
414 
415 /* static */
416 template <typename T>
GetAndBitwiseXorFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)417 inline T ObjectAccessor::GetAndBitwiseXorFieldPrimitive([[maybe_unused]] void *obj, [[maybe_unused]] size_t offset,
418                                                         [[maybe_unused]] T value,
419                                                         [[maybe_unused]] std::memory_order memoryOrder)
420 {
421     if constexpr (std::is_floating_point_v<T>) {  // NOLINT(readability-braces-around-statements)
422         LOG(FATAL, RUNTIME) << "Could not do bitwise xor for float/double";
423         UNREACHABLE();
424     } else {  // NOLINT(readability-misleading-indentation)
425         uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
426         ASSERT(IsAddressInObjectsHeap(rawAddr));
427         auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
428         // Atomic with parameterized order reason: memory order passed as argument
429         return atomicAddr->fetch_xor(value, memoryOrder);
430     }
431 }
432 
433 /* static */
SetDynValueWithoutBarrier(void * obj,size_t offset,coretypes::TaggedType value)434 inline void ObjectAccessor::SetDynValueWithoutBarrier(void *obj, size_t offset, coretypes::TaggedType value)
435 {
436     uintptr_t addr = ToUintPtr(obj) + offset;
437     ASSERT(IsAddressInObjectsHeap(addr));
438     // Atomic with relaxed order reason: concurrent access from GC
439     reinterpret_cast<std::atomic<coretypes::TaggedType> *>(addr)->store(value, std::memory_order_relaxed);
440 }
441 
442 /* static */
443 // CC-OFFNXT(G.FUD.06) solid logic
SetDynValue(const ManagedThread * thread,void * obj,size_t offset,coretypes::TaggedType value)444 inline void ObjectAccessor::SetDynValue(const ManagedThread *thread, void *obj, size_t offset,
445                                         coretypes::TaggedType value)
446 {
447     if (UNLIKELY(GetBarrierSet(thread)->IsPreBarrierEnabled())) {
448         coretypes::TaggedValue preVal(GetDynValue<coretypes::TaggedType>(obj, offset));
449         if (preVal.IsHeapObject()) {
450             GetBarrierSet(thread)->PreBarrier(preVal.GetRawHeapObject());
451         }
452     }
453     SetDynValueWithoutBarrier(obj, offset, value);
454     coretypes::TaggedValue tv(value);
455     if (tv.IsHeapObject() && tv.GetRawHeapObject() != nullptr) {
456         auto gcPostBarrierType = GetPostBarrierType(thread);
457         if (!mem::IsEmptyBarrier(gcPostBarrierType)) {
458             GetBarrierSet(thread)->PostBarrier(obj, offset, tv.GetRawHeapObject());
459         }
460     }
461 }
462 
463 /* static */
464 template <typename T>
SetDynPrimitive(const ManagedThread * thread,void * obj,size_t offset,T value)465 inline void ObjectAccessor::SetDynPrimitive(const ManagedThread *thread, void *obj, size_t offset, T value)
466 {
467     // Need pre-barrier becuase the previous value may be a reference.
468     if (UNLIKELY(GetBarrierSet(thread)->IsPreBarrierEnabled())) {
469         coretypes::TaggedValue preVal(GetDynValue<coretypes::TaggedType>(obj, offset));
470         if (preVal.IsHeapObject()) {
471             GetBarrierSet(thread)->PreBarrier(preVal.GetRawHeapObject());
472         }
473     }
474     SetDynValueWithoutBarrier(obj, offset, value);
475     // Don't need post barrier because the value is a primitive.
476 }
477 
478 // CC-OFFNXT(G.FUD.06) solid logic
SetClass(ObjectHeader * obj,BaseClass * newClass)479 inline void ObjectAccessor::SetClass(ObjectHeader *obj, BaseClass *newClass)
480 {
481     auto *barrierSet = GetBarrierSet();
482 
483     if (barrierSet->IsPreBarrierEnabled()) {
484         ASSERT(obj->ClassAddr<BaseClass>() != nullptr);
485         ObjectHeader *preVal = obj->ClassAddr<BaseClass>()->GetManagedObject();
486         barrierSet->PreBarrier(preVal);
487     }
488 
489     obj->SetClass(newClass);
490 
491     auto gcPostBarrierType = barrierSet->GetPostType();
492     if (!mem::IsEmptyBarrier(gcPostBarrierType)) {
493         ASSERT(newClass->GetManagedObject() != nullptr);
494         barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), 0, newClass->GetManagedObject());
495     }
496 }
497 }  // namespace ark
498 
499 #endif  // PANDA_RUNTIME_OBJECT_ACCESSOR_INL_H_
500