1 /**
2 * Copyright (c) 2021-2025 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #ifndef PANDA_RUNTIME_OBJECT_ACCESSOR_INL_H_
16 #define PANDA_RUNTIME_OBJECT_ACCESSOR_INL_H_
17
18 #include <securec.h>
19
20 #include "libpandabase/mem/mem.h"
21 #include "runtime/include/class.h"
22 #include "runtime/include/field.h"
23 #include "runtime/include/object_accessor.h"
24 #include "runtime/mem/gc/gc_barrier_set.h"
25
26 namespace ark {
27
28 /* static */
29 template <bool IS_VOLATILE /* = false */, bool NEED_READ_BARRIER /* = true */, bool IS_DYN /* = false */>
GetObject(const void * obj,size_t offset)30 inline ObjectHeader *ObjectAccessor::GetObject(const void *obj, size_t offset)
31 {
32 // We don't have GC with read barriers now
33 if (!IS_DYN) {
34 return reinterpret_cast<ObjectHeader *>(Get<ObjectPointerType, IS_VOLATILE>(obj, offset));
35 }
36 return Get<ObjectHeader *, IS_VOLATILE>(obj, offset);
37 }
38
39 /* static */
40 template <bool IS_VOLATILE /* = false */, bool NEED_WRITE_BARRIER /* = true */, bool IS_DYN /* = false */>
41 // CC-OFFNXT(G.FUD.06) perf critical
SetObject(void * obj,size_t offset,ObjectHeader * value)42 inline void ObjectAccessor::SetObject(void *obj, size_t offset, ObjectHeader *value)
43 {
44 if (NEED_WRITE_BARRIER) {
45 auto *barrierSet = GetBarrierSet();
46
47 if (barrierSet->IsPreBarrierEnabled()) {
48 ObjectHeader *preVal = GetObject<IS_VOLATILE, false, IS_DYN>(obj, offset);
49 barrierSet->PreBarrier(preVal);
50 }
51
52 if (!IS_DYN) {
53 Set<ObjectPointerType, IS_VOLATILE>(obj, offset, ToObjPtrType(value));
54 } else {
55 Set<ObjectHeader *, IS_VOLATILE>(obj, offset, value);
56 }
57 auto gcPostBarrierType = barrierSet->GetPostType();
58 if (!mem::IsEmptyBarrier(gcPostBarrierType)) {
59 barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), offset, value);
60 }
61 } else {
62 if (!IS_DYN) {
63 Set<ObjectPointerType, IS_VOLATILE>(obj, offset, ToObjPtrType(value));
64 } else {
65 Set<ObjectHeader *, IS_VOLATILE>(obj, offset, value);
66 }
67 }
68 }
69
70 /* static */
71 template <bool IS_VOLATILE /* = false */, bool NEED_READ_BARRIER /* = true */, bool IS_DYN /* = false */>
GetObject(const ManagedThread * thread,const void * obj,size_t offset)72 inline ObjectHeader *ObjectAccessor::GetObject([[maybe_unused]] const ManagedThread *thread, const void *obj,
73 size_t offset)
74 {
75 // We don't have GC with read barriers now
76 if (!IS_DYN) {
77 return reinterpret_cast<ObjectHeader *>(Get<ObjectPointerType, IS_VOLATILE>(obj, offset));
78 }
79 return Get<ObjectHeader *, IS_VOLATILE>(obj, offset);
80 }
81
82 /* static */
83 template <bool IS_VOLATILE /* = false */, bool NEED_WRITE_BARRIER /* = true */, bool IS_DYN /* = false */>
FillObjects(void * objArr,size_t dataOffset,size_t count,size_t elemSize,ObjectHeader * value)84 inline void ObjectAccessor::FillObjects(void *objArr, size_t dataOffset, size_t count, size_t elemSize,
85 ObjectHeader *value)
86 {
87 auto *barrierSet = GetBarrierSet();
88 if (NEED_WRITE_BARRIER && barrierSet->IsPreBarrierEnabled()) {
89 FillObjsWithPreBarrier<IS_VOLATILE, IS_DYN>(objArr, dataOffset, count, elemSize, value);
90 } else {
91 FillObjsNoBarrier<IS_VOLATILE, IS_DYN>(objArr, dataOffset, count, elemSize, value);
92 }
93 if (NEED_WRITE_BARRIER && (!mem::IsEmptyBarrier(barrierSet->GetPostType()))) {
94 barrierSet->PostBarrier(objArr, dataOffset, count * elemSize);
95 }
96 }
97
98 /* static */
99 template <bool IS_VOLATILE /* = false */, bool IS_DYN /* = false */>
FillObjsWithPreBarrier(void * objArr,size_t dataOffset,size_t count,size_t elemSize,ObjectHeader * value)100 inline void ObjectAccessor::FillObjsWithPreBarrier(void *objArr, size_t dataOffset, size_t count, size_t elemSize,
101 ObjectHeader *value)
102 {
103 auto *barrierSet = GetBarrierSet();
104 for (size_t i = 0; i < count; i++) {
105 auto offset = dataOffset + elemSize * i;
106 barrierSet->PreBarrier(GetObject<IS_VOLATILE, false, IS_DYN>(objArr, offset));
107 if (!IS_DYN) {
108 Set<ObjectPointerType, IS_VOLATILE>(objArr, offset, ToObjPtrType(value));
109 } else {
110 Set<ObjectHeader *, IS_VOLATILE>(objArr, offset, value);
111 }
112 }
113 }
114
115 /* static */
116 template <bool IS_VOLATILE /* = false */, bool IS_DYN /* = false */>
FillObjsNoBarrier(void * objArr,size_t dataOffset,size_t count,size_t elemSize,ObjectHeader * value)117 inline void ObjectAccessor::FillObjsNoBarrier(void *objArr, size_t dataOffset, size_t count, size_t elemSize,
118 ObjectHeader *value)
119 {
120 for (size_t i = 0; i < count; i++) {
121 auto offset = dataOffset + elemSize * i;
122 if (!IS_DYN) {
123 Set<ObjectPointerType, IS_VOLATILE>(objArr, offset, ToObjPtrType(value));
124 } else {
125 Set<ObjectHeader *, IS_VOLATILE>(objArr, offset, value);
126 }
127 }
128 }
129
130 /* static */
131 template <bool IS_VOLATILE /* = false */, bool NEED_WRITE_BARRIER /* = true */, bool IS_DYN /* = false */>
132 // CC-OFFNXT(G.FUD.06) perf critical
SetObject(const ManagedThread * thread,void * obj,size_t offset,ObjectHeader * value)133 inline void ObjectAccessor::SetObject(const ManagedThread *thread, void *obj, size_t offset, ObjectHeader *value)
134 {
135 if (NEED_WRITE_BARRIER) {
136 auto *barrierSet = GetBarrierSet(thread);
137 if (barrierSet->IsPreBarrierEnabled()) {
138 ObjectHeader *preVal = GetObject<IS_VOLATILE, IS_DYN>(obj, offset);
139 barrierSet->PreBarrier(preVal);
140 }
141
142 if (!IS_DYN) {
143 Set<ObjectPointerType, IS_VOLATILE>(obj, offset, ToObjPtrType(value));
144 } else {
145 Set<ObjectHeader *, IS_VOLATILE>(obj, offset, value);
146 }
147 if (!mem::IsEmptyBarrier(barrierSet->GetPostType())) {
148 barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), offset, value);
149 }
150 } else {
151 if (!IS_DYN) {
152 Set<ObjectPointerType, IS_VOLATILE>(obj, offset, ToObjPtrType(value));
153 } else {
154 Set<ObjectHeader *, IS_VOLATILE>(obj, offset, value);
155 }
156 }
157 }
158
159 /* static */
160 template <class T>
GetFieldPrimitive(const void * obj,const Field & field)161 inline T ObjectAccessor::GetFieldPrimitive(const void *obj, const Field &field)
162 {
163 if (UNLIKELY(field.IsVolatile())) {
164 return GetPrimitive<T, true>(obj, field.GetOffset());
165 }
166 return GetPrimitive<T, false>(obj, field.GetOffset());
167 }
168
169 /* static */
170 template <class T>
SetFieldPrimitive(void * obj,const Field & field,T value)171 inline void ObjectAccessor::SetFieldPrimitive(void *obj, const Field &field, T value)
172 {
173 if (UNLIKELY(field.IsVolatile())) {
174 SetPrimitive<T, true>(obj, field.GetOffset(), value);
175 } else {
176 SetPrimitive<T, false>(obj, field.GetOffset(), value);
177 }
178 }
179
180 /* static */
181 // NEED_READ_BARRIER = true , IS_DYN = false
182 template <bool NEED_READ_BARRIER, bool IS_DYN>
GetFieldObject(const void * obj,const Field & field)183 inline ObjectHeader *ObjectAccessor::GetFieldObject(const void *obj, const Field &field)
184 {
185 if (UNLIKELY(field.IsVolatile())) {
186 return GetObject<true, NEED_READ_BARRIER, IS_DYN>(obj, field.GetOffset());
187 }
188 return GetObject<false, NEED_READ_BARRIER, IS_DYN>(obj, field.GetOffset());
189 }
190
191 /* static */
192 // NEED_READ_BARRIER = true , IS_DYN = false
193 template <bool NEED_WRITE_BARRIER, bool IS_DYN>
SetFieldObject(void * obj,const Field & field,ObjectHeader * value)194 inline void ObjectAccessor::SetFieldObject(void *obj, const Field &field, ObjectHeader *value)
195 {
196 ASSERT(IsAddressInObjectsHeapOrNull(value));
197 if (UNLIKELY(field.IsVolatile())) {
198 SetObject<true, NEED_WRITE_BARRIER, IS_DYN>(obj, field.GetOffset(), value);
199 } else {
200 SetObject<false, NEED_WRITE_BARRIER, IS_DYN>(obj, field.GetOffset(), value);
201 }
202 }
203
204 /* static */
205 // NEED_READ_BARRIER = true , IS_DYN = false
206 template <bool NEED_READ_BARRIER, bool IS_DYN>
GetFieldObject(const ManagedThread * thread,const void * obj,const Field & field)207 inline ObjectHeader *ObjectAccessor::GetFieldObject(const ManagedThread *thread, const void *obj, const Field &field)
208 {
209 if (UNLIKELY(field.IsVolatile())) {
210 return GetObject<true, NEED_READ_BARRIER, IS_DYN>(thread, obj, field.GetOffset());
211 }
212 return GetObject<false, NEED_READ_BARRIER, IS_DYN>(thread, obj, field.GetOffset());
213 }
214
215 /* static */
216 // NEED_READ_BARRIER = true , IS_DYN = false
217 template <bool NEED_WRITE_BARRIER, bool IS_DYN>
SetFieldObject(const ManagedThread * thread,void * obj,const Field & field,ObjectHeader * value)218 inline void ObjectAccessor::SetFieldObject(const ManagedThread *thread, void *obj, const Field &field,
219 ObjectHeader *value)
220 {
221 if (UNLIKELY(field.IsVolatile())) {
222 SetObject<true, NEED_WRITE_BARRIER, IS_DYN>(thread, obj, field.GetOffset(), value);
223 } else {
224 SetObject<false, NEED_WRITE_BARRIER, IS_DYN>(thread, obj, field.GetOffset(), value);
225 }
226 }
227
228 /* static */
229 template <class T>
GetFieldPrimitive(const void * obj,size_t offset,std::memory_order memoryOrder)230 inline T ObjectAccessor::GetFieldPrimitive(const void *obj, size_t offset, std::memory_order memoryOrder)
231 {
232 return Get<T>(obj, offset, memoryOrder);
233 }
234
235 /* static */
236 template <class T>
SetFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)237 inline void ObjectAccessor::SetFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memoryOrder)
238 {
239 Set<T>(obj, offset, value, memoryOrder);
240 }
241
242 /* static */
243 // NEED_READ_BARRIER = true , IS_DYN = false
244 template <bool NEED_READ_BARRIER, bool IS_DYN>
GetFieldObject(const void * obj,int offset,std::memory_order memoryOrder)245 inline ObjectHeader *ObjectAccessor::GetFieldObject(const void *obj, int offset, std::memory_order memoryOrder)
246 {
247 if (!IS_DYN) {
248 return reinterpret_cast<ObjectHeader *>(Get<ObjectPointerType>(obj, offset, memoryOrder));
249 }
250 return Get<ObjectHeader *>(obj, offset, memoryOrder);
251 }
252
GetComplementMemoryOrder(std::memory_order memoryOrder)253 static inline std::memory_order GetComplementMemoryOrder(std::memory_order memoryOrder)
254 {
255 if (memoryOrder == std::memory_order_acquire) {
256 memoryOrder = std::memory_order_release;
257 } else if (memoryOrder == std::memory_order_release) {
258 memoryOrder = std::memory_order_acquire;
259 }
260 return memoryOrder;
261 }
262
263 /* static */
264 // NEED_WRITE_BARRIER = true , IS_DYN = false
265 template <bool NEED_WRITE_BARRIER, bool IS_DYN>
266 // CC-OFFNXT(G.FUD.06) perf critical
SetFieldObject(void * obj,size_t offset,ObjectHeader * value,std::memory_order memoryOrder)267 inline void ObjectAccessor::SetFieldObject(void *obj, size_t offset, ObjectHeader *value, std::memory_order memoryOrder)
268 {
269 if (NEED_WRITE_BARRIER) {
270 auto *barrierSet = GetBarrierSet();
271
272 if (barrierSet->IsPreBarrierEnabled()) {
273 // If SetFieldObject is called with std::memory_order_release
274 // we need to use the complement memory order std::memory_order_acquire
275 // because we read the value.
276 ObjectHeader *preVal = GetFieldObject<IS_DYN>(obj, offset, GetComplementMemoryOrder(memoryOrder));
277 barrierSet->PreBarrier(preVal);
278 }
279
280 if (!IS_DYN) {
281 Set<ObjectPointerType>(obj, offset, ToObjPtrType(value), memoryOrder);
282 } else {
283 Set<ObjectHeader *>(obj, offset, value, memoryOrder);
284 }
285 auto gcPostBarrierType = barrierSet->GetPostType();
286 if (!mem::IsEmptyBarrier(gcPostBarrierType)) {
287 barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), offset, value);
288 }
289 } else {
290 if (!IS_DYN) {
291 Set<ObjectPointerType>(obj, offset, ToObjPtrType(value), memoryOrder);
292 } else {
293 Set<ObjectHeader *>(obj, offset, value, memoryOrder);
294 }
295 }
296 }
297
298 /* static */
299 template <typename T>
CompareAndSetFieldPrimitive(void * obj,size_t offset,T oldValue,T newValue,std::memory_order memoryOrder,bool strong)300 inline std::pair<bool, T> ObjectAccessor::CompareAndSetFieldPrimitive(void *obj, size_t offset, T oldValue, T newValue,
301 std::memory_order memoryOrder, bool strong)
302 {
303 uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
304 ASSERT(IsAddressInObjectsHeap(rawAddr));
305 auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
306 if (strong) {
307 return {atomicAddr->compare_exchange_strong(oldValue, newValue, memoryOrder), oldValue};
308 }
309 return {atomicAddr->compare_exchange_weak(oldValue, newValue, memoryOrder), oldValue};
310 }
311
312 /* static */
313 // NEED_READ_BARRIER = true , IS_DYN = false
314 template <bool NEED_WRITE_BARRIER, bool IS_DYN>
315 // CC-OFFNXT(G.FUD.06) perf critical
CompareAndSetFieldObject(void * obj,size_t offset,ObjectHeader * oldValue,ObjectHeader * newValue,std::memory_order memoryOrder,bool strong)316 inline std::pair<bool, ObjectHeader *> ObjectAccessor::CompareAndSetFieldObject(void *obj, size_t offset,
317 ObjectHeader *oldValue,
318 ObjectHeader *newValue,
319 std::memory_order memoryOrder,
320 bool strong)
321 {
322 bool success = false;
323 ObjectHeader *result = nullptr;
324 auto getResult = [&]() {
325 if (IS_DYN) {
326 auto value =
327 CompareAndSetFieldPrimitive<ObjectHeader *>(obj, offset, oldValue, newValue, memoryOrder, strong);
328 success = value.first;
329 result = value.second;
330 } else {
331 auto value = CompareAndSetFieldPrimitive<ObjectPointerType>(obj, offset, ToObjPtrType(oldValue),
332 ToObjPtrType(newValue), memoryOrder, strong);
333 success = value.first;
334 result = reinterpret_cast<ObjectHeader *>(value.second);
335 }
336 };
337
338 if (NEED_WRITE_BARRIER) {
339 // update field with pre barrier
340 auto *barrierSet = GetBarrierSet();
341 if (barrierSet->IsPreBarrierEnabled()) {
342 barrierSet->PreBarrier(GetObject<false, IS_DYN>(obj, offset));
343 }
344
345 getResult();
346 if (success && !mem::IsEmptyBarrier(barrierSet->GetPostType())) {
347 barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), offset, newValue);
348 }
349 return {success, result};
350 }
351
352 getResult();
353 return {success, result};
354 }
355
356 /* static */
357 template <typename T>
GetAndSetFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)358 inline T ObjectAccessor::GetAndSetFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memoryOrder)
359 {
360 uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
361 ASSERT(IsAddressInObjectsHeap(rawAddr));
362 auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
363 return atomicAddr->exchange(value, memoryOrder);
364 }
365
366 /* static */
367 // NEED_WRITE_BARRIER = true , IS_DYN = false
368 template <bool NEED_WRITE_BARRIER, bool IS_DYN>
369 // CC-OFFNXT(G.FUD.06) perf critical
GetAndSetFieldObject(void * obj,size_t offset,ObjectHeader * value,std::memory_order memoryOrder)370 inline ObjectHeader *ObjectAccessor::GetAndSetFieldObject(void *obj, size_t offset, ObjectHeader *value,
371 std::memory_order memoryOrder)
372 {
373 if (NEED_WRITE_BARRIER) {
374 // update field with pre barrier
375 auto *barrierSet = GetBarrierSet();
376 if (barrierSet->IsPreBarrierEnabled()) {
377 barrierSet->PreBarrier(GetObject<false, IS_DYN>(obj, offset));
378 }
379 ObjectHeader *result = IS_DYN ? GetAndSetFieldPrimitive<ObjectHeader *>(obj, offset, value, memoryOrder)
380 : reinterpret_cast<ObjectHeader *>(GetAndSetFieldPrimitive<ObjectPointerType>(
381 obj, offset, ToObjPtrType(value), memoryOrder));
382 if (result != nullptr && !mem::IsEmptyBarrier(barrierSet->GetPostType())) {
383 barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), offset, value);
384 }
385
386 return result;
387 }
388
389 return IS_DYN ? GetAndSetFieldPrimitive<ObjectHeader *>(obj, offset, value, memoryOrder)
390 : reinterpret_cast<ObjectHeader *>(
391 GetAndSetFieldPrimitive<ObjectPointerType>(obj, offset, ToObjPtrType(value), memoryOrder));
392 }
393
394 /* static */
395 template <typename T, bool USE_UBYTE_ARITHMETIC>
396 // CC-OFFNXT(G.FUD.06) perf critical
GetAndAddFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)397 inline T ObjectAccessor::GetAndAddFieldPrimitive([[maybe_unused]] void *obj, [[maybe_unused]] size_t offset,
398 [[maybe_unused]] T value,
399 [[maybe_unused]] std::memory_order memoryOrder)
400 {
401 if constexpr (std::is_same_v<T, uint8_t> &&
402 !USE_UBYTE_ARITHMETIC) { // NOLINT(readability-braces-around-statements)
403 LOG(FATAL, RUNTIME) << "Could not do add for boolean";
404 UNREACHABLE();
405 } else { // NOLINT(readability-misleading-indentation)
406 if constexpr (std::is_floating_point_v<T>) { // NOLINT(readability-braces-around-statements)
407 // Atmoic fetch_add only defined in the atomic specializations for integral and pointer
408 uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
409 ASSERT(IsAddressInObjectsHeap(rawAddr));
410 auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
411 // Atomic with parameterized order reason: memory order passed as argument
412 T oldValue = atomicAddr->load(memoryOrder);
413 T newValue;
414 do {
415 newValue = oldValue + value;
416 } while (!atomicAddr->compare_exchange_weak(oldValue, newValue, memoryOrder));
417 return oldValue;
418 } else { // NOLINT(readability-misleading-indentation, readability-else-after-return)
419 uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
420 ASSERT(IsAddressInObjectsHeap(rawAddr));
421 auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
422 // Atomic with parameterized order reason: memory order passed as argument
423 return atomicAddr->fetch_add(value, memoryOrder);
424 }
425 }
426 }
427
428 /* static */
429 template <typename T>
430 // CC-OFFNXT(G.FMT.06, G.FUD.06) project code style
GetAndSubFieldPrimitiveFloat(void * obj,size_t offset,T value,std::memory_order memoryOrder)431 T ObjectAccessor::GetAndSubFieldPrimitiveFloat(void *obj, size_t offset, T value, std::memory_order memoryOrder)
432 {
433 // Atmoic fetch_add only defined in the atomic specializations for integral and pointer
434 uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
435 ASSERT(IsAddressInObjectsHeap(rawAddr));
436 auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
437 // Atomic with parameterized order reason: memory order passed as argument
438 T oldValue = atomicAddr->load(memoryOrder);
439 T newValue;
440 do {
441 newValue = oldValue - value;
442 } while (!atomicAddr->compare_exchange_weak(oldValue, newValue, memoryOrder));
443 return oldValue;
444 }
445
446 /* static */
447 template <typename T, bool USE_UBYTE_ARITHMETIC>
448 // CC-OFFNXT(G.FMT.06, G.FUD.06) project code style
GetAndSubFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)449 inline std::enable_if_t<!std::is_same_v<T, uint8_t> || USE_UBYTE_ARITHMETIC, T> ObjectAccessor::GetAndSubFieldPrimitive(
450 // CC-OFFNXT(G.FMT.06, G.FUD.06) project code style
451 [[maybe_unused]] void *obj, [[maybe_unused]] size_t offset, [[maybe_unused]] T value,
452 // CC-OFFNXT(G.FMT.06, G.FUD.06) project code style
453 [[maybe_unused]] std::memory_order memoryOrder)
454 {
455 if constexpr (std::is_floating_point_v<T>) { // NOLINT(readability-braces-around-statements)
456 return GetAndSubFieldPrimitiveFloat<T>(obj, offset, value, memoryOrder);
457 } else { // NOLINT(readability-misleading-indentation, readability-else-after-return)
458 uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
459 ASSERT(IsAddressInObjectsHeap(rawAddr));
460 auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
461 // Atomic with parameterized order reason: memory order passed as argument
462 return atomicAddr->fetch_sub(value, memoryOrder);
463 }
464 }
465
466 /* static */
467 template <typename T>
GetAndBitwiseOrFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)468 inline T ObjectAccessor::GetAndBitwiseOrFieldPrimitive([[maybe_unused]] void *obj, [[maybe_unused]] size_t offset,
469 [[maybe_unused]] T value,
470 [[maybe_unused]] std::memory_order memoryOrder)
471 {
472 if constexpr (std::is_floating_point_v<T>) { // NOLINT(readability-braces-around-statements)
473 LOG(FATAL, RUNTIME) << "Could not do bitwise or for float/double";
474 UNREACHABLE();
475 } else { // NOLINT(readability-misleading-indentation)
476 uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
477 ASSERT(IsAddressInObjectsHeap(rawAddr));
478 auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
479 // Atomic with parameterized order reason: memory order passed as argument
480 return atomicAddr->fetch_or(value, memoryOrder);
481 }
482 }
483
484 /* static */
485 template <typename T>
GetAndBitwiseAndFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)486 inline T ObjectAccessor::GetAndBitwiseAndFieldPrimitive([[maybe_unused]] void *obj, [[maybe_unused]] size_t offset,
487 [[maybe_unused]] T value,
488 [[maybe_unused]] std::memory_order memoryOrder)
489 {
490 if constexpr (std::is_floating_point_v<T>) { // NOLINT(readability-braces-around-statements)
491 LOG(FATAL, RUNTIME) << "Could not do bitwise and for float/double";
492 UNREACHABLE();
493 } else { // NOLINT(readability-misleading-indentation)
494 uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
495 ASSERT(IsAddressInObjectsHeap(rawAddr));
496 auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
497 // Atomic with parameterized order reason: memory order passed as argument
498 return atomicAddr->fetch_and(value, memoryOrder);
499 }
500 }
501
502 /* static */
503 template <typename T>
GetAndBitwiseXorFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memoryOrder)504 inline T ObjectAccessor::GetAndBitwiseXorFieldPrimitive([[maybe_unused]] void *obj, [[maybe_unused]] size_t offset,
505 [[maybe_unused]] T value,
506 [[maybe_unused]] std::memory_order memoryOrder)
507 {
508 if constexpr (std::is_floating_point_v<T>) { // NOLINT(readability-braces-around-statements)
509 LOG(FATAL, RUNTIME) << "Could not do bitwise xor for float/double";
510 UNREACHABLE();
511 } else { // NOLINT(readability-misleading-indentation)
512 uintptr_t rawAddr = reinterpret_cast<uintptr_t>(obj) + offset;
513 ASSERT(IsAddressInObjectsHeap(rawAddr));
514 auto *atomicAddr = reinterpret_cast<std::atomic<T> *>(rawAddr);
515 // Atomic with parameterized order reason: memory order passed as argument
516 return atomicAddr->fetch_xor(value, memoryOrder);
517 }
518 }
519
520 /* static */
SetDynValueWithoutBarrier(void * obj,size_t offset,coretypes::TaggedType value)521 inline void ObjectAccessor::SetDynValueWithoutBarrier(void *obj, size_t offset, coretypes::TaggedType value)
522 {
523 uintptr_t addr = ToUintPtr(obj) + offset;
524 ASSERT(IsAddressInObjectsHeap(addr));
525 // Atomic with relaxed order reason: concurrent access from GC
526 reinterpret_cast<std::atomic<coretypes::TaggedType> *>(addr)->store(value, std::memory_order_relaxed);
527 }
528
529 /* static */
530 // CC-OFFNXT(G.FUD.06) solid logic
SetDynValue(const ManagedThread * thread,void * obj,size_t offset,coretypes::TaggedType value)531 inline void ObjectAccessor::SetDynValue(const ManagedThread *thread, void *obj, size_t offset,
532 coretypes::TaggedType value)
533 {
534 if (UNLIKELY(GetBarrierSet(thread)->IsPreBarrierEnabled())) {
535 coretypes::TaggedValue preVal(GetDynValue<coretypes::TaggedType>(obj, offset));
536 if (preVal.IsHeapObject()) {
537 GetBarrierSet(thread)->PreBarrier(preVal.GetRawHeapObject());
538 }
539 }
540 SetDynValueWithoutBarrier(obj, offset, value);
541 coretypes::TaggedValue tv(value);
542 if (tv.IsHeapObject() && tv.GetRawHeapObject() != nullptr) {
543 auto gcPostBarrierType = GetPostBarrierType(thread);
544 if (!mem::IsEmptyBarrier(gcPostBarrierType)) {
545 GetBarrierSet(thread)->PostBarrier(obj, offset, tv.GetRawHeapObject());
546 }
547 }
548 }
549
550 /* static */
551 template <typename T>
SetDynPrimitive(const ManagedThread * thread,void * obj,size_t offset,T value)552 inline void ObjectAccessor::SetDynPrimitive(const ManagedThread *thread, void *obj, size_t offset, T value)
553 {
554 // Need pre-barrier becuase the previous value may be a reference.
555 if (UNLIKELY(GetBarrierSet(thread)->IsPreBarrierEnabled())) {
556 coretypes::TaggedValue preVal(GetDynValue<coretypes::TaggedType>(obj, offset));
557 if (preVal.IsHeapObject()) {
558 GetBarrierSet(thread)->PreBarrier(preVal.GetRawHeapObject());
559 }
560 }
561 SetDynValueWithoutBarrier(obj, offset, value);
562 // Don't need post barrier because the value is a primitive.
563 }
564
565 // CC-OFFNXT(G.FUD.06) solid logic
SetClass(ObjectHeader * obj,BaseClass * newClass)566 inline void ObjectAccessor::SetClass(ObjectHeader *obj, BaseClass *newClass)
567 {
568 auto *barrierSet = GetBarrierSet();
569
570 if (barrierSet->IsPreBarrierEnabled()) {
571 ASSERT(obj->ClassAddr<BaseClass>() != nullptr);
572 ObjectHeader *preVal = obj->ClassAddr<BaseClass>()->GetManagedObject();
573 barrierSet->PreBarrier(preVal);
574 }
575
576 obj->SetClass(newClass);
577
578 auto gcPostBarrierType = barrierSet->GetPostType();
579 if (!mem::IsEmptyBarrier(gcPostBarrierType)) {
580 ASSERT(newClass->GetManagedObject() != nullptr);
581 barrierSet->PostBarrier(ToVoidPtr(ToUintPtr(obj)), 0, newClass->GetManagedObject());
582 }
583 }
584 } // namespace ark
585
586 #endif // PANDA_RUNTIME_OBJECT_ACCESSOR_INL_H_
587