1 /**
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #ifndef PANDA_RUNTIME_OBJECT_ACCESSOR_INL_H_
16 #define PANDA_RUNTIME_OBJECT_ACCESSOR_INL_H_
17
18 #include <securec.h>
19
20 #include "libpandabase/mem/mem.h"
21 #include "runtime/include/field.h"
22 #include "runtime/include/object_accessor.h"
23 #include "runtime/mem/gc/gc_barrier_set.h"
24
25 namespace panda {
26
27 /* static */
28 template <bool is_volatile /* = false */, bool need_read_barrier /* = true */, bool is_dyn /* = false */>
GetObject(const void * obj,size_t offset)29 inline ObjectHeader *ObjectAccessor::GetObject(const void *obj, size_t offset)
30 {
31 // We don't have GC with read barriers now
32 if (!is_dyn) {
33 return reinterpret_cast<ObjectHeader *>(Get<object_pointer_type, is_volatile>(obj, offset));
34 }
35 return Get<ObjectHeader *, is_volatile>(obj, offset);
36 }
37
38 /* static */
39 template <bool is_volatile /* = false */, bool need_write_barrier /* = true */, bool is_dyn /* = false */>
SetObject(void * obj,size_t offset,ObjectHeader * value)40 inline void ObjectAccessor::SetObject(void *obj, size_t offset, ObjectHeader *value)
41 {
42 if (need_write_barrier) {
43 auto *barrier_set = GetBarrierSet();
44
45 if (barrier_set->IsPreBarrierEnabled()) {
46 ObjectHeader *pre_val = GetObject<is_volatile, false, is_dyn>(obj, offset);
47 barrier_set->PreBarrier(pre_val);
48 }
49
50 if (!is_dyn) {
51 Set<object_pointer_type, is_volatile>(obj, offset, ToObjPtrType(value));
52 } else {
53 Set<ObjectHeader *, is_volatile>(obj, offset, value);
54 }
55 auto gc_post_barrier_type = barrier_set->GetPostType();
56 if (!mem::IsEmptyBarrier(gc_post_barrier_type)) {
57 barrier_set->PostBarrier(ToVoidPtr(ToUintPtr(obj)), value);
58 }
59 } else {
60 if (!is_dyn) {
61 Set<object_pointer_type, is_volatile>(obj, offset, ToObjPtrType(value));
62 } else {
63 Set<ObjectHeader *, is_volatile>(obj, offset, value);
64 }
65 }
66 }
67
68 /* static */
69 template <bool is_volatile /* = false */, bool need_read_barrier /* = true */, bool is_dyn /* = false */>
GetObject(const ManagedThread * thread,const void * obj,size_t offset)70 inline ObjectHeader *ObjectAccessor::GetObject([[maybe_unused]] const ManagedThread *thread, const void *obj,
71 size_t offset)
72 {
73 // We don't have GC with read barriers now
74 if (!is_dyn) {
75 return reinterpret_cast<ObjectHeader *>(Get<object_pointer_type, is_volatile>(obj, offset));
76 }
77 return Get<ObjectHeader *, is_volatile>(obj, offset);
78 }
79
80 /* static */
81 template <bool is_volatile /* = false */, bool need_write_barrier /* = true */, bool is_dyn /* = false */>
SetObject(const ManagedThread * thread,void * obj,size_t offset,ObjectHeader * value)82 inline void ObjectAccessor::SetObject(const ManagedThread *thread, void *obj, size_t offset, ObjectHeader *value)
83 {
84 if (need_write_barrier) {
85 auto *barrier_set = GetBarrierSet(thread);
86 if (barrier_set->IsPreBarrierEnabled()) {
87 ObjectHeader *pre_val = GetObject<is_volatile, is_dyn>(obj, offset);
88 barrier_set->PreBarrier(pre_val);
89 }
90
91 if (!is_dyn) {
92 Set<object_pointer_type, is_volatile>(obj, offset, ToObjPtrType(value));
93 } else {
94 Set<ObjectHeader *, is_volatile>(obj, offset, value);
95 }
96 if (!mem::IsEmptyBarrier(barrier_set->GetPostType())) {
97 barrier_set->PostBarrier(ToVoidPtr(ToUintPtr(obj)), value);
98 }
99 } else {
100 if (!is_dyn) {
101 Set<object_pointer_type, is_volatile>(obj, offset, ToObjPtrType(value));
102 } else {
103 Set<ObjectHeader *, is_volatile>(obj, offset, value);
104 }
105 }
106 }
107
108 /* static */
109 template <class T>
GetFieldPrimitive(const void * obj,const Field & field)110 inline T ObjectAccessor::GetFieldPrimitive(const void *obj, const Field &field)
111 {
112 if (UNLIKELY(field.IsVolatile())) {
113 return GetPrimitive<T, true>(obj, field.GetOffset());
114 }
115 return GetPrimitive<T, false>(obj, field.GetOffset());
116 }
117
118 /* static */
119 template <class T>
SetFieldPrimitive(void * obj,const Field & field,T value)120 inline void ObjectAccessor::SetFieldPrimitive(void *obj, const Field &field, T value)
121 {
122 if (UNLIKELY(field.IsVolatile())) {
123 SetPrimitive<T, true>(obj, field.GetOffset(), value);
124 } else {
125 SetPrimitive<T, false>(obj, field.GetOffset(), value);
126 }
127 }
128
129 /* static */
130 template <bool need_read_barrier /* = true */, bool is_dyn /* = false */>
GetFieldObject(const void * obj,const Field & field)131 inline ObjectHeader *ObjectAccessor::GetFieldObject(const void *obj, const Field &field)
132 {
133 if (UNLIKELY(field.IsVolatile())) {
134 return GetObject<true, need_read_barrier, is_dyn>(obj, field.GetOffset());
135 }
136 return GetObject<false, need_read_barrier, is_dyn>(obj, field.GetOffset());
137 }
138
139 /* static */
140 template <bool need_write_barrier /* = true */, bool is_dyn /* = false */>
SetFieldObject(void * obj,const Field & field,ObjectHeader * value)141 inline void ObjectAccessor::SetFieldObject(void *obj, const Field &field, ObjectHeader *value)
142 {
143 #ifdef PANDA_USE_32_BIT_POINTER
144 ASSERT(IsInObjectsAddressSpace(ToUintPtr(value)));
145 #endif
146 if (UNLIKELY(field.IsVolatile())) {
147 SetObject<true, need_write_barrier, is_dyn>(obj, field.GetOffset(), value);
148 } else {
149 SetObject<false, need_write_barrier, is_dyn>(obj, field.GetOffset(), value);
150 }
151 }
152
153 /* static */
154 template <bool need_read_barrier /* = true */, bool is_dyn /* = false */>
GetFieldObject(const ManagedThread * thread,const void * obj,const Field & field)155 inline ObjectHeader *ObjectAccessor::GetFieldObject(const ManagedThread *thread, const void *obj, const Field &field)
156 {
157 if (UNLIKELY(field.IsVolatile())) {
158 return GetObject<true, need_read_barrier, is_dyn>(thread, obj, field.GetOffset());
159 }
160 return GetObject<false, need_read_barrier, is_dyn>(thread, obj, field.GetOffset());
161 }
162
163 /* static */
164 template <bool need_write_barrier /* = true */, bool is_dyn /* = false */>
SetFieldObject(const ManagedThread * thread,void * obj,const Field & field,ObjectHeader * value)165 inline void ObjectAccessor::SetFieldObject(const ManagedThread *thread, void *obj, const Field &field,
166 ObjectHeader *value)
167 {
168 if (UNLIKELY(field.IsVolatile())) {
169 SetObject<true, need_write_barrier, is_dyn>(thread, obj, field.GetOffset(), value);
170 } else {
171 SetObject<false, need_write_barrier, is_dyn>(thread, obj, field.GetOffset(), value);
172 }
173 }
174
175 /* static */
176 template <class T>
GetFieldPrimitive(const void * obj,size_t offset,std::memory_order memory_order)177 inline T ObjectAccessor::GetFieldPrimitive(const void *obj, size_t offset, std::memory_order memory_order)
178 {
179 return Get<T>(obj, offset, memory_order);
180 }
181
182 /* static */
183 template <class T>
SetFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memory_order)184 inline void ObjectAccessor::SetFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memory_order)
185 {
186 Set<T>(obj, offset, value, memory_order);
187 }
188
189 /* static */
190 template <bool need_read_barrier /* = true */, bool is_dyn /* = false */>
GetFieldObject(const void * obj,int offset,std::memory_order memory_order)191 inline ObjectHeader *ObjectAccessor::GetFieldObject(const void *obj, int offset, std::memory_order memory_order)
192 {
193 if (!is_dyn) {
194 return reinterpret_cast<ObjectHeader *>(Get<object_pointer_type>(obj, offset, memory_order));
195 }
196 return Get<ObjectHeader *>(obj, offset, memory_order);
197 }
198
GetComplementMemoryOrder(std::memory_order memory_order)199 static inline std::memory_order GetComplementMemoryOrder(std::memory_order memory_order)
200 {
201 if (memory_order == std::memory_order_acquire) {
202 memory_order = std::memory_order_release;
203 } else if (memory_order == std::memory_order_release) {
204 memory_order = std::memory_order_acquire;
205 }
206 return memory_order;
207 }
208
209 /* static */
210 template <bool need_write_barrier /* = true */, bool is_dyn /* = false */>
SetFieldObject(void * obj,size_t offset,ObjectHeader * value,std::memory_order memory_order)211 inline void ObjectAccessor::SetFieldObject(void *obj, size_t offset, ObjectHeader *value,
212 std::memory_order memory_order)
213 {
214 if (need_write_barrier) {
215 auto *barrier_set = GetBarrierSet();
216
217 if (barrier_set->IsPreBarrierEnabled()) {
218 // If SetFieldObject is called with std::memory_order_release
219 // we need to use the complement memory order std::memory_order_acquire
220 // because we read the value.
221 ObjectHeader *pre_val = GetFieldObject<is_dyn>(obj, offset, GetComplementMemoryOrder(memory_order));
222 barrier_set->PreBarrier(pre_val);
223 }
224
225 if (!is_dyn) {
226 Set<object_pointer_type>(obj, offset, ToObjPtrType(value), memory_order);
227 } else {
228 Set<ObjectHeader *>(obj, offset, value, memory_order);
229 }
230 auto gc_post_barrier_type = barrier_set->GetPostType();
231 if (!mem::IsEmptyBarrier(gc_post_barrier_type)) {
232 barrier_set->PostBarrier(ToVoidPtr(ToUintPtr(obj)), value);
233 }
234 } else {
235 if (!is_dyn) {
236 Set<object_pointer_type>(obj, offset, ToObjPtrType(value), memory_order);
237 } else {
238 Set<ObjectHeader *>(obj, offset, value, memory_order);
239 }
240 }
241 }
242
243 /* static */
244 template <typename T>
CompareAndSetFieldPrimitive(void * obj,size_t offset,T old_value,T new_value,std::memory_order memory_order,bool strong)245 inline std::pair<bool, T> ObjectAccessor::CompareAndSetFieldPrimitive(void *obj, size_t offset, T old_value,
246 T new_value, std::memory_order memory_order,
247 bool strong)
248 {
249 uintptr_t raw_addr = reinterpret_cast<uintptr_t>(obj) + offset;
250 ASSERT(IsInObjectsAddressSpace(raw_addr));
251 auto *atomic_addr = reinterpret_cast<std::atomic<T> *>(raw_addr);
252 if (strong) {
253 return {atomic_addr->compare_exchange_strong(old_value, new_value, memory_order), old_value};
254 }
255 return {atomic_addr->compare_exchange_weak(old_value, new_value, memory_order), old_value};
256 }
257
258 /* static */
259 template <bool need_write_barrier /* = true */, bool is_dyn /* = false */>
CompareAndSetFieldObject(void * obj,size_t offset,ObjectHeader * old_value,ObjectHeader * new_value,std::memory_order memory_order,bool strong)260 inline std::pair<bool, ObjectHeader *> ObjectAccessor::CompareAndSetFieldObject(void *obj, size_t offset,
261 ObjectHeader *old_value,
262 ObjectHeader *new_value,
263 std::memory_order memory_order,
264 bool strong)
265 {
266 bool success = false;
267 ObjectHeader *result = nullptr;
268 auto get_result = [&]() {
269 if (is_dyn) {
270 auto value =
271 CompareAndSetFieldPrimitive<ObjectHeader *>(obj, offset, old_value, new_value, memory_order, strong);
272 success = value.first;
273 result = value.second;
274 } else {
275 auto value = CompareAndSetFieldPrimitive<object_pointer_type>(
276 obj, offset, ToObjPtrType(old_value), ToObjPtrType(new_value), memory_order, strong);
277 success = value.first;
278 result = reinterpret_cast<ObjectHeader *>(value.second);
279 }
280 };
281
282 if (need_write_barrier) {
283 // update field with pre barrier
284 auto *barrier_set = GetBarrierSet();
285 if (barrier_set->IsPreBarrierEnabled()) {
286 barrier_set->PreBarrier(GetObject<false, is_dyn>(obj, offset));
287 }
288
289 get_result();
290 if (success && !mem::IsEmptyBarrier(barrier_set->GetPostType())) {
291 barrier_set->PostBarrier(ToVoidPtr(ToUintPtr(obj)), new_value);
292 }
293 return {success, result};
294 }
295
296 get_result();
297 return {success, result};
298 }
299
300 /* static */
301 template <typename T>
GetAndSetFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memory_order)302 inline T ObjectAccessor::GetAndSetFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memory_order)
303 {
304 uintptr_t raw_addr = reinterpret_cast<uintptr_t>(obj) + offset;
305 ASSERT(IsInObjectsAddressSpace(raw_addr));
306 auto *atomic_addr = reinterpret_cast<std::atomic<T> *>(raw_addr);
307 return atomic_addr->exchange(value, memory_order);
308 }
309
310 /* static */
311 template <bool need_write_barrier /* = true */, bool is_dyn /* = false */>
GetAndSetFieldObject(void * obj,size_t offset,ObjectHeader * value,std::memory_order memory_order)312 inline ObjectHeader *ObjectAccessor::GetAndSetFieldObject(void *obj, size_t offset, ObjectHeader *value,
313 std::memory_order memory_order)
314 {
315 if (need_write_barrier) {
316 // update field with pre barrier
317 auto *barrier_set = GetBarrierSet();
318 if (barrier_set->IsPreBarrierEnabled()) {
319 barrier_set->PreBarrier(GetObject<false, is_dyn>(obj, offset));
320 }
321 ObjectHeader *result = is_dyn ? GetAndSetFieldPrimitive<ObjectHeader *>(obj, offset, value, memory_order)
322 : reinterpret_cast<ObjectHeader *>(GetAndSetFieldPrimitive<object_pointer_type>(
323 obj, offset, ToObjPtrType(value), memory_order));
324 if (result != nullptr && !mem::IsEmptyBarrier(barrier_set->GetPostType())) {
325 barrier_set->PostBarrier(ToVoidPtr(ToUintPtr(obj)), value);
326 }
327
328 return result;
329 }
330
331 return is_dyn ? GetAndSetFieldPrimitive<ObjectHeader *>(obj, offset, value, memory_order)
332 : reinterpret_cast<ObjectHeader *>(
333 GetAndSetFieldPrimitive<object_pointer_type>(obj, offset, ToObjPtrType(value), memory_order));
334 }
335
336 /* static */
337 template <typename T>
GetAndAddFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memory_order)338 inline T ObjectAccessor::GetAndAddFieldPrimitive([[maybe_unused]] void *obj, [[maybe_unused]] size_t offset,
339 [[maybe_unused]] T value,
340 [[maybe_unused]] std::memory_order memory_order)
341 {
342 if constexpr (std::is_same_v<T, uint8_t>) { // NOLINT(readability-braces-around-statements)
343 LOG(FATAL, RUNTIME) << "Could not do add for boolean";
344 UNREACHABLE();
345 } else { // NOLINT(readability-misleading-indentation)
346 if constexpr (std::is_floating_point_v<T>) { // NOLINT(readability-braces-around-statements)
347 // Atmoic fetch_add only defined in the atomic specializations for integral and pointer
348 uintptr_t raw_addr = reinterpret_cast<uintptr_t>(obj) + offset;
349 ASSERT(IsInObjectsAddressSpace(raw_addr));
350 auto *atomic_addr = reinterpret_cast<std::atomic<T> *>(raw_addr);
351 // Atomic with parameterized order reason: memory order passed as argument
352 T old_value = atomic_addr->load(memory_order);
353 T new_value;
354 do {
355 new_value = old_value + value;
356 } while (!atomic_addr->compare_exchange_weak(old_value, new_value, memory_order));
357 return old_value;
358 } else { // NOLINT(readability-misleading-indentation, readability-else-after-return)
359 uintptr_t raw_addr = reinterpret_cast<uintptr_t>(obj) + offset;
360 ASSERT(IsInObjectsAddressSpace(raw_addr));
361 auto *atomic_addr = reinterpret_cast<std::atomic<T> *>(raw_addr);
362 // Atomic with parameterized order reason: memory order passed as argument
363 return atomic_addr->fetch_add(value, memory_order);
364 }
365 }
366 }
367
368 /* static */
369 template <typename T>
GetAndBitwiseOrFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memory_order)370 inline T ObjectAccessor::GetAndBitwiseOrFieldPrimitive([[maybe_unused]] void *obj, [[maybe_unused]] size_t offset,
371 [[maybe_unused]] T value,
372 [[maybe_unused]] std::memory_order memory_order)
373 {
374 if constexpr (std::is_floating_point_v<T>) { // NOLINT(readability-braces-around-statements)
375 LOG(FATAL, RUNTIME) << "Could not do bitwise or for float/double";
376 UNREACHABLE();
377 } else { // NOLINT(readability-misleading-indentation)
378 uintptr_t raw_addr = reinterpret_cast<uintptr_t>(obj) + offset;
379 ASSERT(IsInObjectsAddressSpace(raw_addr));
380 auto *atomic_addr = reinterpret_cast<std::atomic<T> *>(raw_addr);
381 // Atomic with parameterized order reason: memory order passed as argument
382 return atomic_addr->fetch_or(value, memory_order);
383 }
384 }
385
386 /* static */
387 template <typename T>
GetAndBitwiseAndFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memory_order)388 inline T ObjectAccessor::GetAndBitwiseAndFieldPrimitive([[maybe_unused]] void *obj, [[maybe_unused]] size_t offset,
389 [[maybe_unused]] T value,
390 [[maybe_unused]] std::memory_order memory_order)
391 {
392 if constexpr (std::is_floating_point_v<T>) { // NOLINT(readability-braces-around-statements)
393 LOG(FATAL, RUNTIME) << "Could not do bitwise and for float/double";
394 UNREACHABLE();
395 } else { // NOLINT(readability-misleading-indentation)
396 uintptr_t raw_addr = reinterpret_cast<uintptr_t>(obj) + offset;
397 ASSERT(IsInObjectsAddressSpace(raw_addr));
398 auto *atomic_addr = reinterpret_cast<std::atomic<T> *>(raw_addr);
399 // Atomic with parameterized order reason: memory order passed as argument
400 return atomic_addr->fetch_and(value, memory_order);
401 }
402 }
403
404 /* static */
405 template <typename T>
GetAndBitwiseXorFieldPrimitive(void * obj,size_t offset,T value,std::memory_order memory_order)406 inline T ObjectAccessor::GetAndBitwiseXorFieldPrimitive([[maybe_unused]] void *obj, [[maybe_unused]] size_t offset,
407 [[maybe_unused]] T value,
408 [[maybe_unused]] std::memory_order memory_order)
409 {
410 if constexpr (std::is_floating_point_v<T>) { // NOLINT(readability-braces-around-statements)
411 LOG(FATAL, RUNTIME) << "Could not do bitwise xor for float/double";
412 UNREACHABLE();
413 } else { // NOLINT(readability-misleading-indentation)
414 uintptr_t raw_addr = reinterpret_cast<uintptr_t>(obj) + offset;
415 ASSERT(IsInObjectsAddressSpace(raw_addr));
416 auto *atomic_addr = reinterpret_cast<std::atomic<T> *>(raw_addr);
417 // Atomic with parameterized order reason: memory order passed as argument
418 return atomic_addr->fetch_xor(value, memory_order);
419 }
420 }
421
422 /* static */
SetDynValueWithoutBarrier(void * obj,size_t offset,coretypes::TaggedType value)423 inline void ObjectAccessor::SetDynValueWithoutBarrier(void *obj, size_t offset, coretypes::TaggedType value)
424 {
425 ASSERT(IsInObjectsAddressSpace(ToUintPtr(obj)));
426 uintptr_t addr = ToUintPtr(obj) + offset;
427 // Atomic with relaxed order reason: concurrent access from GC
428 reinterpret_cast<std::atomic<coretypes::TaggedType> *>(addr)->store(value, std::memory_order_relaxed);
429 }
430
431 /* static */
SetDynValue(const ManagedThread * thread,void * obj,size_t offset,coretypes::TaggedType value)432 inline void ObjectAccessor::SetDynValue(const ManagedThread *thread, void *obj, size_t offset,
433 coretypes::TaggedType value)
434 {
435 if (UNLIKELY(GetBarrierSet(thread)->IsPreBarrierEnabled())) {
436 coretypes::TaggedValue pre_val(GetDynValue<coretypes::TaggedType>(obj, offset));
437 if (pre_val.IsHeapObject()) {
438 GetBarrierSet(thread)->PreBarrier(pre_val.GetRawHeapObject());
439 }
440 }
441 SetDynValueWithoutBarrier(obj, offset, value);
442 coretypes::TaggedValue tv(value);
443 if (tv.IsHeapObject() && tv.GetRawHeapObject() != nullptr) {
444 auto gc_post_barrier_type = GetPostBarrierType(thread);
445 if (!mem::IsEmptyBarrier(gc_post_barrier_type)) {
446 GetBarrierSet(thread)->PostBarrier(obj, tv.GetRawHeapObject());
447 }
448 }
449 }
450
451 /* static */
452 template <typename T>
SetDynPrimitive(const ManagedThread * thread,void * obj,size_t offset,T value)453 inline void ObjectAccessor::SetDynPrimitive(const ManagedThread *thread, void *obj, size_t offset, T value)
454 {
455 // Need pre-barrier becuase the previous value may be a reference.
456 if (UNLIKELY(GetBarrierSet(thread)->IsPreBarrierEnabled())) {
457 coretypes::TaggedValue pre_val(GetDynValue<coretypes::TaggedType>(obj, offset));
458 if (pre_val.IsHeapObject()) {
459 GetBarrierSet(thread)->PreBarrier(pre_val.GetRawHeapObject());
460 }
461 }
462 SetDynValueWithoutBarrier(obj, offset, value);
463 // Don't need post barrier because the value is a primitive.
464 }
465 } // namespace panda
466
467 #endif // PANDA_RUNTIME_OBJECT_ACCESSOR_INL_H_
468