1 /** 2 * Copyright (c) 2021-2025 Huawei Device Co., Ltd. 3 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * you may not use this file except in compliance with the License. 5 * You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software 10 * distributed under the License is distributed on an "AS IS" BASIS, 11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 * See the License for the specific language governing permissions and 13 * limitations under the License. 14 */ 15 #ifndef PANDA_RUNTIME_OBJECT_ACCESSOR_H_ 16 #define PANDA_RUNTIME_OBJECT_ACCESSOR_H_ 17 18 #include <cstddef> 19 20 #include "libpandabase/utils/atomic.h" 21 #include "runtime/include/coretypes/tagged_value.h" 22 #include "runtime/mem/gc/gc_barrier_set.h" 23 24 namespace ark { 25 26 class ObjectHeader; 27 class Field; 28 class ManagedThread; 29 30 class ObjectAccessor { 31 public: 32 template <class T, bool IS_VOLATILE = false> GetPrimitive(const void * obj,size_t offset)33 static T GetPrimitive(const void *obj, size_t offset) 34 { 35 return Get<T, IS_VOLATILE>(obj, offset); 36 } 37 38 template <class T, bool IS_VOLATILE = false> SetPrimitive(void * obj,size_t offset,T value)39 static void SetPrimitive(void *obj, size_t offset, T value) 40 { 41 Set<T, IS_VOLATILE>(obj, offset, value); 42 } 43 44 template <bool IS_VOLATILE = false, bool NEED_READ_BARRIER = true, bool IS_DYN = false> 45 static ObjectHeader *GetObject(const void *obj, size_t offset); 46 47 template <bool IS_VOLATILE = false, bool NEED_WRITE_BARRIER = true, bool IS_DYN = false> 48 static void SetObject(void *obj, size_t offset, ObjectHeader *value); 49 50 template <class T> 51 static T GetFieldPrimitive(const void *obj, const Field &field); 52 53 template <class T> 54 static void SetFieldPrimitive(void *obj, const Field &field, T value); 55 56 template <bool NEED_READ_BARRIER = true, bool IS_DYN = false> 57 static ObjectHeader *GetFieldObject(const void *obj, const Field &field); 58 59 template <bool NEED_WRITE_BARRIER = true, bool IS_DYN = false> 60 static void SetFieldObject(void *obj, const Field &field, ObjectHeader *value); 61 62 // Pass thread parameter to speed up interpreter 63 template <bool IS_VOLATILE = false, bool NEED_READ_BARRIER = true, bool IS_DYN = false> 64 static ObjectHeader *GetObject(const ManagedThread *thread, const void *obj, size_t offset); 65 66 template <bool IS_VOLATILE = false, bool NEED_WRITE_BARRIER = true, bool IS_DYN = false> 67 static void SetObject(const ManagedThread *thread, void *obj, size_t offset, ObjectHeader *value); 68 69 template <bool NEED_READ_BARRIER = true, bool IS_DYN = false> 70 static ObjectHeader *GetFieldObject(const ManagedThread *thread, const void *obj, const Field &field); 71 72 template <bool NEED_WRITE_BARRIER = true, bool IS_DYN = false> 73 static void SetFieldObject(const ManagedThread *thread, void *obj, const Field &field, ObjectHeader *value); 74 75 template <class T> 76 static T GetFieldPrimitive(const void *obj, size_t offset, std::memory_order memoryOrder); 77 78 template <class T> 79 static void SetFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memoryOrder); 80 81 template <bool NEED_READ_BARRIER = true, bool IS_DYN = false> 82 static ObjectHeader *GetFieldObject(const void *obj, int offset, std::memory_order memoryOrder); 83 84 template <bool NEED_WRITE_BARRIER = true, bool IS_DYN = false> 85 static void SetFieldObject(void *obj, size_t offset, ObjectHeader *value, std::memory_order memoryOrder); 86 87 template <typename T> 88 static std::pair<bool, T> CompareAndSetFieldPrimitive(void *obj, size_t offset, T oldValue, T newValue, 89 std::memory_order memoryOrder, bool strong); 90 91 template <bool NEED_WRITE_BARRIER = true, bool IS_DYN = false> 92 static std::pair<bool, ObjectHeader *> CompareAndSetFieldObject(void *obj, size_t offset, ObjectHeader *oldValue, 93 ObjectHeader *newValue, 94 std::memory_order memoryOrder, bool strong); 95 96 template <typename T> 97 static T GetAndSetFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memoryOrder); 98 99 template <bool NEED_WRITE_BARRIER = true, bool IS_DYN = false> 100 static ObjectHeader *GetAndSetFieldObject(void *obj, size_t offset, ObjectHeader *value, 101 std::memory_order memoryOrder); 102 103 template <typename T, bool USE_UBYTE_ARITHMETIC = false> 104 static T GetAndAddFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memoryOrder); 105 106 template <typename T, bool USE_UBYTE_ARITHMETIC = false> 107 static std::enable_if_t<!std::is_same_v<T, uint8_t> || USE_UBYTE_ARITHMETIC, T> GetAndSubFieldPrimitive( 108 void *obj, size_t offset, T value, std::memory_order memoryOrder); 109 110 template <typename T> 111 static T GetAndBitwiseOrFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memoryOrder); 112 113 template <typename T> 114 static T GetAndBitwiseAndFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memoryOrder); 115 116 template <typename T> 117 static T GetAndBitwiseXorFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memoryOrder); 118 119 static inline void SetDynValueWithoutBarrier(void *obj, size_t offset, coretypes::TaggedType value); 120 121 static inline void SetDynValue(const ManagedThread *thread, void *obj, size_t offset, coretypes::TaggedType value); 122 123 template <typename T> 124 static inline void SetDynPrimitive(const ManagedThread *thread, void *obj, size_t offset, T value); 125 126 template <bool IS_VOLATILE = false, bool NEED_WRITE_BARRIER = true, bool IS_DYN = false> 127 static void FillObjects(void *objArr, size_t dataOffset, size_t count, size_t elemSize, ObjectHeader *value); 128 129 template <class T> GetDynValue(const void * obj,size_t offset)130 static inline T GetDynValue(const void *obj, size_t offset) 131 { 132 uintptr_t addr = ToUintPtr(obj) + offset; 133 ASSERT(IsAddressInObjectsHeap(addr)); 134 // Atomic with relaxed order reason: concurrent access from GC 135 return reinterpret_cast<const std::atomic<T> *>(addr)->load(std::memory_order_relaxed); 136 } 137 138 static void SetClass(ObjectHeader *obj, BaseClass *newClass); 139 IsHeapObject(ObjectPointerType v)140 static bool IsHeapObject(ObjectPointerType v) 141 { 142 return reinterpret_cast<ObjectHeader *>(v) != nullptr; 143 } 144 DecodeNotNull(ObjectPointerType v)145 static ObjectHeader *DecodeNotNull(ObjectPointerType v) 146 { 147 auto *p = reinterpret_cast<ObjectHeader *>(v); 148 ASSERT(p != nullptr); 149 return p; 150 } 151 152 template <typename P> Load(P * p)153 static P Load(P *p) 154 { 155 return *p; 156 } 157 158 template <typename P> LoadAtomic(P * p)159 static P LoadAtomic(P *p) 160 { 161 return AtomicLoad(p, std::memory_order_relaxed); 162 } 163 Store(ObjectPointerType * ref,ObjectHeader * val)164 static void Store(ObjectPointerType *ref, ObjectHeader *val) 165 { 166 *ref = EncodeObjectPointerType(val); 167 } 168 169 // NOTE(ipetrov): Hack for 128 bit ObjectHeader 170 #if !defined(ARK_HYBRID) IsHeapObject(coretypes::TaggedType v)171 static bool IsHeapObject(coretypes::TaggedType v) 172 { 173 return coretypes::TaggedValue(v).IsHeapObject(); 174 } 175 DecodeNotNull(coretypes::TaggedType v)176 static ObjectHeader *DecodeNotNull(coretypes::TaggedType v) 177 { 178 return coretypes::TaggedValue(v).GetHeapObject(); 179 } 180 Store(coretypes::TaggedType * ref,ObjectHeader * val)181 static void Store(coretypes::TaggedType *ref, ObjectHeader *val) 182 { 183 *ref = EncodeTaggedType(val); 184 } 185 #endif 186 187 private: 188 template <class T, bool IS_VOLATILE> Get(const void * obj,size_t offset)189 static T Get(const void *obj, size_t offset) 190 { 191 auto *addr = reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(obj) + offset); 192 ASSERT(IsAddressInObjectsHeap(addr)); 193 if (IS_VOLATILE) { 194 // Atomic with seq_cst order reason: required for volatile 195 return reinterpret_cast<const std::atomic<T> *>(addr)->load(std::memory_order_seq_cst); 196 } 197 // Atomic with relaxed order reason: to be compatible with other vms 198 return reinterpret_cast<const std::atomic<T> *>(addr)->load(std::memory_order_relaxed); 199 } 200 201 template <bool IS_VOLATILE = false, bool IS_DYN = false> 202 static void FillObjsWithPreBarrier(void *objArr, size_t dataOffset, size_t count, size_t elemSize, 203 ObjectHeader *value); 204 205 template <bool IS_VOLATILE = false, bool IS_DYN = false> 206 static void FillObjsNoBarrier(void *objArr, size_t dataOffset, size_t count, size_t elemSize, ObjectHeader *value); 207 208 template <typename T> 209 static T GetAndSubFieldPrimitiveFloat(void *obj, size_t offset, T value, std::memory_order memoryOrder); 210 211 template <class T, bool IS_VOLATILE> Set(void * obj,size_t offset,T value)212 static void Set(void *obj, size_t offset, T value) 213 { 214 auto *addr = reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(obj) + offset); 215 ASSERT(IsAddressInObjectsHeap(addr)); 216 if (IS_VOLATILE) { 217 // Atomic with seq_cst order reason: required for volatile 218 return reinterpret_cast<std::atomic<T> *>(addr)->store(value, std::memory_order_seq_cst); 219 } 220 // Atomic with relaxed order reason: to be compatible with other vms 221 return reinterpret_cast<std::atomic<T> *>(addr)->store(value, std::memory_order_relaxed); 222 } 223 224 template <class T> Get(const void * obj,size_t offset,std::memory_order memoryOrder)225 static T Get(const void *obj, size_t offset, std::memory_order memoryOrder) 226 { 227 auto *addr = reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(obj) + offset); 228 ASSERT(IsAddressInObjectsHeap(addr)); 229 // Atomic with parameterized order reason: memory order passed as argument 230 return reinterpret_cast<const std::atomic<T> *>(addr)->load(memoryOrder); 231 } 232 233 template <class T> Set(void * obj,size_t offset,T value,std::memory_order memoryOrder)234 static void Set(void *obj, size_t offset, T value, std::memory_order memoryOrder) 235 { 236 auto *addr = reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(obj) + offset); 237 ASSERT(IsAddressInObjectsHeap(addr)); 238 // Atomic with parameterized order reason: memory order passed as argument 239 return reinterpret_cast<std::atomic<T> *>(addr)->store(value, memoryOrder); 240 } 241 EncodeObjectPointerType(ObjectHeader * obj)242 static ObjectPointerType EncodeObjectPointerType(ObjectHeader *obj) 243 { 244 return static_cast<ObjectPointerType>(ToUintPtr(obj)); 245 } 246 EncodeTaggedType(ObjectHeader * obj)247 static coretypes::TaggedType EncodeTaggedType(ObjectHeader *obj) 248 { 249 return coretypes::TaggedValue::Cast(obj); 250 } 251 252 PANDA_PUBLIC_API static mem::GCBarrierSet *GetBarrierSet(); 253 254 PANDA_PUBLIC_API static mem::GCBarrierSet *GetBarrierSet(const ManagedThread *thread); 255 256 static mem::BarrierType GetPreBarrierType(const ManagedThread *thread); 257 258 PANDA_PUBLIC_API static mem::BarrierType GetPostBarrierType(const ManagedThread *thread); 259 }; 260 261 } // namespace ark 262 263 #endif // PANDA_RUNTIME_OBJECT_ACCESSOR_H_ 264