1 /** 2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd. 3 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * you may not use this file except in compliance with the License. 5 * You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software 10 * distributed under the License is distributed on an "AS IS" BASIS, 11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 * See the License for the specific language governing permissions and 13 * limitations under the License. 14 */ 15 #ifndef PANDA_RUNTIME_OBJECT_ACCESSOR_H_ 16 #define PANDA_RUNTIME_OBJECT_ACCESSOR_H_ 17 18 #include <cstddef> 19 20 #include "libpandabase/utils/atomic.h" 21 #include "runtime/include/coretypes/tagged_value.h" 22 #include "runtime/mem/gc/gc_barrier_set.h" 23 24 namespace ark { 25 26 class ObjectHeader; 27 class Field; 28 class ManagedThread; 29 30 class ObjectAccessor { 31 public: 32 template <class T, bool IS_VOLATILE = false> GetPrimitive(const void * obj,size_t offset)33 static T GetPrimitive(const void *obj, size_t offset) 34 { 35 return Get<T, IS_VOLATILE>(obj, offset); 36 } 37 38 template <class T, bool IS_VOLATILE = false> SetPrimitive(void * obj,size_t offset,T value)39 static void SetPrimitive(void *obj, size_t offset, T value) 40 { 41 Set<T, IS_VOLATILE>(obj, offset, value); 42 } 43 44 template <bool IS_VOLATILE = false, bool NEED_READ_BARRIER = true, bool IS_DYN = false> 45 static ObjectHeader *GetObject(const void *obj, size_t offset); 46 47 template <bool IS_VOLATILE = false, bool NEED_WRITE_BARRIER = true, bool IS_DYN = false> 48 static void SetObject(void *obj, size_t offset, ObjectHeader *value); 49 50 template <class T> 51 static T GetFieldPrimitive(const void *obj, const Field &field); 52 53 template <class T> 54 static void SetFieldPrimitive(void *obj, const Field &field, T value); 55 56 template <bool NEED_READ_BARRIER = true, bool IS_DYN = false> 57 static ObjectHeader *GetFieldObject(const void *obj, const Field &field); 58 59 template <bool NEED_WRITE_BARRIER = true, bool IS_DYN = false> 60 static void SetFieldObject(void *obj, const Field &field, ObjectHeader *value); 61 62 // Pass thread parameter to speed up interpreter 63 template <bool IS_VOLATILE = false, bool NEED_READ_BARRIER = true, bool IS_DYN = false> 64 static ObjectHeader *GetObject(const ManagedThread *thread, const void *obj, size_t offset); 65 66 template <bool IS_VOLATILE = false, bool NEED_WRITE_BARRIER = true, bool IS_DYN = false> 67 static void SetObject(const ManagedThread *thread, void *obj, size_t offset, ObjectHeader *value); 68 69 template <bool NEED_READ_BARRIER = true, bool IS_DYN = false> 70 static ObjectHeader *GetFieldObject(const ManagedThread *thread, const void *obj, const Field &field); 71 72 template <bool NEED_WRITE_BARRIER = true, bool IS_DYN = false> 73 static void SetFieldObject(const ManagedThread *thread, void *obj, const Field &field, ObjectHeader *value); 74 75 template <class T> 76 static T GetFieldPrimitive(const void *obj, size_t offset, std::memory_order memoryOrder); 77 78 template <class T> 79 static void SetFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memoryOrder); 80 81 template <bool NEED_READ_BARRIER = true, bool IS_DYN = false> 82 static ObjectHeader *GetFieldObject(const void *obj, int offset, std::memory_order memoryOrder); 83 84 template <bool NEED_WRITE_BARRIER = true, bool IS_DYN = false> 85 static void SetFieldObject(void *obj, size_t offset, ObjectHeader *value, std::memory_order memoryOrder); 86 87 template <typename T> 88 static std::pair<bool, T> CompareAndSetFieldPrimitive(void *obj, size_t offset, T oldValue, T newValue, 89 std::memory_order memoryOrder, bool strong); 90 91 template <bool NEED_WRITE_BARRIER = true, bool IS_DYN = false> 92 static std::pair<bool, ObjectHeader *> CompareAndSetFieldObject(void *obj, size_t offset, ObjectHeader *oldValue, 93 ObjectHeader *newValue, 94 std::memory_order memoryOrder, bool strong); 95 96 template <typename T> 97 static T GetAndSetFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memoryOrder); 98 99 template <bool NEED_WRITE_BARRIER = true, bool IS_DYN = false> 100 static ObjectHeader *GetAndSetFieldObject(void *obj, size_t offset, ObjectHeader *value, 101 std::memory_order memoryOrder); 102 103 template <typename T> 104 static T GetAndAddFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memoryOrder); 105 106 template <typename T> 107 static T GetAndBitwiseOrFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memoryOrder); 108 109 template <typename T> 110 static T GetAndBitwiseAndFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memoryOrder); 111 112 template <typename T> 113 static T GetAndBitwiseXorFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memoryOrder); 114 115 static inline void SetDynValueWithoutBarrier(void *obj, size_t offset, coretypes::TaggedType value); 116 117 static inline void SetDynValue(const ManagedThread *thread, void *obj, size_t offset, coretypes::TaggedType value); 118 119 template <typename T> 120 static inline void SetDynPrimitive(const ManagedThread *thread, void *obj, size_t offset, T value); 121 122 template <class T> GetDynValue(const void * obj,size_t offset)123 static inline T GetDynValue(const void *obj, size_t offset) 124 { 125 uintptr_t addr = ToUintPtr(obj) + offset; 126 ASSERT(IsAddressInObjectsHeap(addr)); 127 // Atomic with relaxed order reason: concurrent access from GC 128 return reinterpret_cast<const std::atomic<T> *>(addr)->load(std::memory_order_relaxed); 129 } 130 131 static void SetClass(ObjectHeader *obj, BaseClass *newClass); 132 IsHeapObject(ObjectPointerType v)133 static bool IsHeapObject(ObjectPointerType v) 134 { 135 return reinterpret_cast<ObjectHeader *>(v) != nullptr; 136 } 137 IsHeapObject(coretypes::TaggedType v)138 static bool IsHeapObject(coretypes::TaggedType v) 139 { 140 return coretypes::TaggedValue(v).IsHeapObject(); 141 } 142 DecodeNotNull(ObjectPointerType v)143 static ObjectHeader *DecodeNotNull(ObjectPointerType v) 144 { 145 auto *p = reinterpret_cast<ObjectHeader *>(v); 146 ASSERT(p != nullptr); 147 return p; 148 } 149 DecodeNotNull(coretypes::TaggedType v)150 static ObjectHeader *DecodeNotNull(coretypes::TaggedType v) 151 { 152 return coretypes::TaggedValue(v).GetHeapObject(); 153 } 154 155 template <typename P> Load(P * p)156 static P Load(P *p) 157 { 158 return *p; 159 } 160 161 template <typename P> LoadAtomic(P * p)162 static P LoadAtomic(P *p) 163 { 164 return AtomicLoad(p, std::memory_order_relaxed); 165 } 166 Store(ObjectPointerType * ref,ObjectHeader * val)167 static void Store(ObjectPointerType *ref, ObjectHeader *val) 168 { 169 *ref = EncodeObjectPointerType(val); 170 } 171 Store(coretypes::TaggedType * ref,ObjectHeader * val)172 static void Store(coretypes::TaggedType *ref, ObjectHeader *val) 173 { 174 *ref = EncodeTaggedType(val); 175 } 176 177 private: 178 template <class T, bool IS_VOLATILE> Get(const void * obj,size_t offset)179 static T Get(const void *obj, size_t offset) 180 { 181 auto *addr = reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(obj) + offset); 182 ASSERT(IsAddressInObjectsHeap(addr)); 183 if (IS_VOLATILE) { 184 // Atomic with seq_cst order reason: required for volatile 185 return reinterpret_cast<const std::atomic<T> *>(addr)->load(std::memory_order_seq_cst); 186 } 187 // Atomic with relaxed order reason: to be compatible with other vms 188 return reinterpret_cast<const std::atomic<T> *>(addr)->load(std::memory_order_relaxed); 189 } 190 191 template <class T, bool IS_VOLATILE> Set(void * obj,size_t offset,T value)192 static void Set(void *obj, size_t offset, T value) 193 { 194 auto *addr = reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(obj) + offset); 195 ASSERT(IsAddressInObjectsHeap(addr)); 196 if (IS_VOLATILE) { 197 // Atomic with seq_cst order reason: required for volatile 198 return reinterpret_cast<std::atomic<T> *>(addr)->store(value, std::memory_order_seq_cst); 199 } 200 // Atomic with relaxed order reason: to be compatible with other vms 201 return reinterpret_cast<std::atomic<T> *>(addr)->store(value, std::memory_order_relaxed); 202 } 203 204 template <class T> Get(const void * obj,size_t offset,std::memory_order memoryOrder)205 static T Get(const void *obj, size_t offset, std::memory_order memoryOrder) 206 { 207 auto *addr = reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(obj) + offset); 208 ASSERT(IsAddressInObjectsHeap(addr)); 209 // Atomic with parameterized order reason: memory order passed as argument 210 return reinterpret_cast<const std::atomic<T> *>(addr)->load(memoryOrder); 211 } 212 213 template <class T> Set(void * obj,size_t offset,T value,std::memory_order memoryOrder)214 static void Set(void *obj, size_t offset, T value, std::memory_order memoryOrder) 215 { 216 auto *addr = reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(obj) + offset); 217 ASSERT(IsAddressInObjectsHeap(addr)); 218 // Atomic with parameterized order reason: memory order passed as argument 219 return reinterpret_cast<std::atomic<T> *>(addr)->store(value, memoryOrder); 220 } 221 EncodeObjectPointerType(ObjectHeader * obj)222 static ObjectPointerType EncodeObjectPointerType(ObjectHeader *obj) 223 { 224 return static_cast<ObjectPointerType>(ToUintPtr(obj)); 225 } 226 EncodeTaggedType(ObjectHeader * obj)227 static coretypes::TaggedType EncodeTaggedType(ObjectHeader *obj) 228 { 229 return coretypes::TaggedValue::Cast(obj); 230 } 231 232 PANDA_PUBLIC_API static mem::GCBarrierSet *GetBarrierSet(); 233 234 PANDA_PUBLIC_API static mem::GCBarrierSet *GetBarrierSet(const ManagedThread *thread); 235 236 static mem::BarrierType GetPreBarrierType(const ManagedThread *thread); 237 238 PANDA_PUBLIC_API static mem::BarrierType GetPostBarrierType(const ManagedThread *thread); 239 }; 240 241 } // namespace ark 242 243 #endif // PANDA_RUNTIME_OBJECT_ACCESSOR_H_ 244