• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_RUNTIME_OBJECT_ACCESSOR_H_
16 #define PANDA_RUNTIME_OBJECT_ACCESSOR_H_
17 
18 #include <cstddef>
19 
20 #include "runtime/mem/gc/gc_barrier_set.h"
21 
22 namespace panda {
23 
24 class ObjectHeader;
25 class Field;
26 class ManagedThread;
27 
28 class ObjectAccessor {
29 public:
30     template <class T, bool is_volatile = false>
GetPrimitive(const void * obj,size_t offset)31     static T GetPrimitive(const void *obj, size_t offset)
32     {
33         return Get<T, is_volatile>(obj, offset);
34     }
35 
36     template <class T, bool is_volatile = false>
SetPrimitive(void * obj,size_t offset,T value)37     static void SetPrimitive(void *obj, size_t offset, T value)
38     {
39         Set<T, is_volatile>(obj, offset, value);
40     }
41 
42     template <bool is_volatile = false, bool need_read_barrier = true, bool is_dyn = false>
43     static ObjectHeader *GetObject(const void *obj, size_t offset);
44 
45     template <bool is_volatile = false, bool need_write_barrier = true, bool is_dyn = false>
46     static void SetObject(void *obj, size_t offset, ObjectHeader *value);
47 
48     template <class T>
49     static T GetFieldPrimitive(const void *obj, const Field &field);
50 
51     template <class T>
52     static void SetFieldPrimitive(void *obj, const Field &field, T value);
53 
54     template <bool need_read_barrier = true, bool is_dyn = false>
55     static ObjectHeader *GetFieldObject(const void *obj, const Field &field);
56 
57     template <bool need_write_barrier = true, bool is_dyn = false>
58     static void SetFieldObject(void *obj, const Field &field, ObjectHeader *value);
59 
60     // Pass thread parameter to speed up interpreter
61     template <bool is_volatile = false, bool need_read_barrier = true, bool is_dyn = false>
62     static ObjectHeader *GetObject(const ManagedThread *thread, const void *obj, size_t offset);
63 
64     template <bool is_volatile = false, bool need_write_barrier = true, bool is_dyn = false>
65     static void SetObject(const ManagedThread *thread, void *obj, size_t offset, ObjectHeader *value);
66 
67     template <bool need_read_barrier = true, bool is_dyn = false>
68     static ObjectHeader *GetFieldObject(const ManagedThread *thread, const void *obj, const Field &field);
69 
70     template <bool need_write_barrier = true, bool is_dyn = false>
71     static void SetFieldObject(const ManagedThread *thread, void *obj, const Field &field, ObjectHeader *value);
72 
73     template <class T>
74     static T GetFieldPrimitive(const void *obj, size_t offset, std::memory_order memory_order);
75 
76     template <class T>
77     static void SetFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memory_order);
78 
79     template <bool need_read_barrier = true, bool is_dyn = false>
80     static ObjectHeader *GetFieldObject(const void *obj, int offset, std::memory_order memory_order);
81 
82     template <bool need_write_barrier = true, bool is_dyn = false>
83     static void SetFieldObject(void *obj, size_t offset, ObjectHeader *value, std::memory_order memory_order);
84 
85     template <typename T>
86     static std::pair<bool, T> CompareAndSetFieldPrimitive(void *obj, size_t offset, T old_value, T new_value,
87                                                           std::memory_order memory_order, bool strong);
88 
89     template <bool need_write_barrier = true, bool is_dyn = false>
90     static std::pair<bool, ObjectHeader *> CompareAndSetFieldObject(void *obj, size_t offset, ObjectHeader *old_value,
91                                                                     ObjectHeader *new_value,
92                                                                     std::memory_order memory_order, bool strong);
93 
94     template <typename T>
95     static T GetAndSetFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memory_order);
96 
97     template <bool need_write_barrier = true, bool is_dyn = false>
98     static ObjectHeader *GetAndSetFieldObject(void *obj, size_t offset, ObjectHeader *value,
99                                               std::memory_order memory_order);
100 
101     template <typename T>
102     static T GetAndAddFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memory_order);
103 
104     template <typename T>
105     static T GetAndBitwiseOrFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memory_order);
106 
107     template <typename T>
108     static T GetAndBitwiseAndFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memory_order);
109 
110     template <typename T>
111     static T GetAndBitwiseXorFieldPrimitive(void *obj, size_t offset, T value, std::memory_order memory_order);
112 
113     static inline void SetDynValueWithoutBarrier(void *obj, size_t offset, coretypes::TaggedType value);
114 
115     static inline void SetDynValue(const ManagedThread *thread, void *obj, size_t offset, coretypes::TaggedType value);
116 
117     template <typename T>
118     static inline void SetDynPrimitive(const ManagedThread *thread, void *obj, size_t offset, T value);
119 
120     template <class T>
GetDynValue(const void * obj,size_t offset)121     static inline T GetDynValue(const void *obj, size_t offset)
122     {
123         uintptr_t addr = ToUintPtr(obj) + offset;
124         ASSERT(IsInObjectsAddressSpace(addr));
125         // Atomic with relaxed order reason: concurrent access from GC
126         return reinterpret_cast<const std::atomic<T> *>(addr)->load(std::memory_order_relaxed);
127     }
128 
129 private:
130     template <class T, bool is_volatile>
Get(const void * obj,size_t offset)131     static T Get(const void *obj, size_t offset)
132     {
133         auto *addr = reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(obj) + offset);
134         ASSERT(IsInObjectsAddressSpace(ToUintPtr(addr)));
135         if (is_volatile) {
136             // Atomic with seq_cst order reason: required for volatile
137             return reinterpret_cast<const std::atomic<T> *>(addr)->load(std::memory_order_seq_cst);
138         }
139         // Atomic with relaxed order reason: to be compatible with other vms
140         return reinterpret_cast<const std::atomic<T> *>(addr)->load(std::memory_order_relaxed);
141     }
142 
143     template <class T, bool is_volatile>
Set(void * obj,size_t offset,T value)144     static void Set(void *obj, size_t offset, T value)
145     {
146         auto *addr = reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(obj) + offset);
147         ASSERT(IsInObjectsAddressSpace(ToUintPtr(addr)));
148         if (is_volatile) {
149             // Atomic with seq_cst order reason: required for volatile
150             return reinterpret_cast<std::atomic<T> *>(addr)->store(value, std::memory_order_seq_cst);
151         }
152         // Atomic with relaxed order reason: to be compatible with other vms
153         return reinterpret_cast<std::atomic<T> *>(addr)->store(value, std::memory_order_relaxed);
154     }
155 
156     template <class T>
Get(const void * obj,size_t offset,std::memory_order memory_order)157     static T Get(const void *obj, size_t offset, std::memory_order memory_order)
158     {
159         auto *addr = reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(obj) + offset);
160         ASSERT(IsInObjectsAddressSpace(ToUintPtr(addr)));
161         // Atomic with parameterized order reason: memory order passed as argument
162         return reinterpret_cast<const std::atomic<T> *>(addr)->load(memory_order);
163     }
164 
165     template <class T>
Set(void * obj,size_t offset,T value,std::memory_order memory_order)166     static void Set(void *obj, size_t offset, T value, std::memory_order memory_order)
167     {
168         auto *addr = reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(obj) + offset);
169         ASSERT(IsInObjectsAddressSpace(ToUintPtr(addr)));
170         // Atomic with parameterized order reason: memory order passed as argument
171         return reinterpret_cast<std::atomic<T> *>(addr)->store(value, memory_order);
172     }
173 
174     static mem::GCBarrierSet *GetBarrierSet();
175 
176     static mem::GCBarrierSet *GetBarrierSet(const ManagedThread *thread);
177 
178     static mem::BarrierType GetPreBarrierType(const ManagedThread *thread);
179 
180     static mem::BarrierType GetPostBarrierType(const ManagedThread *thread);
181 };
182 
183 }  // namespace panda
184 
185 #endif  // PANDA_RUNTIME_OBJECT_ACCESSOR_H_
186