1 /**
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "runtime/include/object_header.h"
17
18 #include "libpandabase/mem/mem.h"
19 #include "runtime/include/class.h"
20 #include "runtime/include/coretypes/array.h"
21 #include "runtime/include/coretypes/class.h"
22 #include "runtime/include/hclass.h"
23 #include "runtime/include/runtime.h"
24 #include "runtime/include/thread.h"
25 #include "runtime/include/panda_vm.h"
26 #include "runtime/mem/free_object.h"
27 #include "runtime/mem/vm_handle.h"
28 #include "runtime/monitor_pool.h"
29 #include "runtime/handle_base-inl.h"
30
31 namespace ark {
32
33 namespace object_header_traits {
34
35 // NOLINTNEXTLINE(fuchsia-statically-constructed-objects)
36 std::atomic<uint32_t> g_hashSeed = std::atomic<uint32_t>(LINEAR_SEED + std::time(nullptr));
37
38 } // namespace object_header_traits
39
40 /* static */
CreateObject(ManagedThread * thread,ark::BaseClass * klass,bool nonMovable)41 ObjectHeader *ObjectHeader::CreateObject(ManagedThread *thread, ark::BaseClass *klass, bool nonMovable)
42 {
43 ASSERT(klass != nullptr);
44 #ifndef NDEBUG
45 if (!klass->IsDynamicClass()) {
46 auto cls = static_cast<ark::Class *>(klass);
47 ASSERT(cls->IsInstantiable());
48 ASSERT(!cls->IsArrayClass());
49 ASSERT(!cls->IsStringClass());
50 }
51 #endif
52
53 size_t size = klass->GetObjectSize();
54 ASSERT(size != 0);
55 mem::HeapManager *heapManager = thread->GetVM()->GetHeapManager();
56 ObjectHeader *obj {nullptr};
57 if (UNLIKELY(heapManager->IsObjectFinalized(klass))) {
58 nonMovable = true;
59 }
60 if (LIKELY(!nonMovable)) {
61 obj = heapManager->AllocateObject(klass, size);
62 } else {
63 obj = heapManager->AllocateNonMovableObject(klass, size);
64 }
65 return obj;
66 }
67
CreateObject(ark::BaseClass * klass,bool nonMovable)68 ObjectHeader *ObjectHeader::CreateObject(ark::BaseClass *klass, bool nonMovable)
69 {
70 return CreateObject(ManagedThread::GetCurrent(), klass, nonMovable);
71 }
72
73 /* static */
Create(ManagedThread * thread,BaseClass * klass)74 ObjectHeader *ObjectHeader::Create(ManagedThread *thread, BaseClass *klass)
75 {
76 return CreateObject(thread, klass, false);
77 }
78
Create(BaseClass * klass)79 ObjectHeader *ObjectHeader::Create(BaseClass *klass)
80 {
81 return CreateObject(klass, false);
82 }
83
84 /* static */
CreateNonMovable(BaseClass * klass)85 ObjectHeader *ObjectHeader::CreateNonMovable(BaseClass *klass)
86 {
87 return CreateObject(klass, true);
88 }
89
GetHashCodeFromMonitor(Monitor * monitorP)90 uint32_t ObjectHeader::GetHashCodeFromMonitor(Monitor *monitorP)
91 {
92 return monitorP->GetHashCode();
93 }
94
GetHashCodeMTSingle()95 uint32_t ObjectHeader::GetHashCodeMTSingle()
96 {
97 auto mark = GetMark();
98
99 switch (mark.GetState()) {
100 case MarkWord::STATE_UNLOCKED: {
101 mark = mark.DecodeFromHash(GenerateHashCode());
102 ASSERT(mark.GetState() == MarkWord::STATE_HASHED);
103 SetMark(mark);
104 return mark.GetHash();
105 }
106 case MarkWord::STATE_HASHED:
107 return mark.GetHash();
108 default:
109 LOG(FATAL, RUNTIME) << "Error on GetHashCode(): invalid state";
110 return 0;
111 }
112 }
113
GetHashCodeMTMulti()114 uint32_t ObjectHeader::GetHashCodeMTMulti()
115 {
116 ObjectHeader *currentObj = this;
117 while (true) {
118 auto mark = currentObj->AtomicGetMark();
119 auto *thread = MTManagedThread::GetCurrent();
120 ASSERT(thread != nullptr);
121 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
122 VMHandle<ObjectHeader> handleObj(thread, currentObj);
123
124 switch (mark.GetState()) {
125 case MarkWord::STATE_UNLOCKED: {
126 auto hashMark = mark.DecodeFromHash(GenerateHashCode());
127 ASSERT(hashMark.GetState() == MarkWord::STATE_HASHED);
128 currentObj->AtomicSetMark(mark, hashMark);
129 break;
130 }
131 case MarkWord::STATE_LIGHT_LOCKED: {
132 os::thread::ThreadId ownerThreadId = mark.GetThreadId();
133 if (ownerThreadId == thread->GetInternalId()) {
134 Monitor::Inflate(this, thread);
135 } else {
136 Monitor::InflateThinLock(thread, handleObj);
137 currentObj = handleObj.GetPtr();
138 }
139 break;
140 }
141 case MarkWord::STATE_HEAVY_LOCKED: {
142 auto monitorId = mark.GetMonitorId();
143 auto monitorP = MTManagedThread::GetCurrent()->GetMonitorPool()->LookupMonitor(monitorId);
144 if (monitorP != nullptr) {
145 return GetHashCodeFromMonitor(monitorP);
146 }
147 LOG(FATAL, RUNTIME) << "Error on GetHashCode(): no monitor on heavy locked state";
148 break;
149 }
150 case MarkWord::STATE_HASHED: {
151 return mark.GetHash();
152 }
153 default: {
154 LOG(FATAL, RUNTIME) << "Error on GetHashCode(): invalid state";
155 }
156 }
157 }
158 }
159
Clone(ObjectHeader * src)160 ObjectHeader *ObjectHeader::Clone(ObjectHeader *src)
161 {
162 LOG_IF(src->ClassAddr<Class>()->GetManagedObject() == src, FATAL, RUNTIME) << "Can't clone a class";
163 return ObjectHeader::ShallowCopy(src);
164 }
165
AllocateObjectAndGetDst(ObjectHeader * src,Class * objectClass,size_t objSize,mem::HeapManager * heapManager)166 static ObjectHeader *AllocateObjectAndGetDst(ObjectHeader *src, Class *objectClass, size_t objSize,
167 mem::HeapManager *heapManager)
168 {
169 ObjectHeader *dst = nullptr;
170 if (PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(src) == SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT) {
171 dst = heapManager->AllocateNonMovableObject(objectClass, objSize);
172 } else {
173 dst = heapManager->AllocateObject(objectClass, objSize);
174 }
175 return dst;
176 }
177
ShallowCopy(ObjectHeader * src)178 ObjectHeader *ObjectHeader::ShallowCopy(ObjectHeader *src)
179 {
180 /*
181 NOTE(d.trubenkov):
182 use bariers for possible copied reference fields
183 */
184 auto objectClass = src->ClassAddr<Class>();
185 std::size_t objSize = src->ObjectSize();
186
187 // AllocateObject can trigger gc, use handle for src.
188 auto *thread = ManagedThread::GetCurrent();
189 ASSERT(thread != nullptr);
190 mem::HeapManager *heapManager = thread->GetVM()->GetHeapManager();
191 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
192 VMHandle<ObjectHeader> srcHandle(thread, src);
193
194 // Determine whether object is non-movable
195 ObjectHeader *dst = AllocateObjectAndGetDst(src, objectClass, objSize, heapManager);
196 if (dst == nullptr) {
197 return nullptr;
198 }
199 ASSERT(PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(srcHandle.GetPtr()) ==
200 PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(dst));
201
202 Span<uint8_t> srcSp(reinterpret_cast<uint8_t *>(srcHandle.GetPtr()), objSize);
203 Span<uint8_t> dstSp(reinterpret_cast<uint8_t *>(dst), objSize);
204 constexpr const std::size_t WORD_SIZE = sizeof(uintptr_t);
205 std::size_t bytesToCopy = objSize - ObjectHeader::ObjectHeaderSize();
206 std::size_t wordsToCopy = bytesToCopy / WORD_SIZE;
207 std::size_t wordsToCopyEnd = ObjectHeader::ObjectHeaderSize() + WORD_SIZE * wordsToCopy;
208 std::size_t objectPointersToCopy = (bytesToCopy - WORD_SIZE * wordsToCopy) / OBJECT_POINTER_SIZE;
209 std::size_t objectPointersToCopyEnd = wordsToCopyEnd + objectPointersToCopy * OBJECT_POINTER_SIZE;
210 // copy words
211 for (std::size_t i = ObjectHeader::ObjectHeaderSize(); i < wordsToCopyEnd; i += WORD_SIZE) {
212 // Atomic with relaxed order reason: data race with src_handle with no synchronization or ordering constraints
213 // imposed on other reads or writes
214 reinterpret_cast<std::atomic<uintptr_t> *>(&dstSp[i])->store(
215 reinterpret_cast<std::atomic<uintptr_t> *>(&srcSp[i])->load(std::memory_order_relaxed),
216 std::memory_order_relaxed);
217 }
218 // copy remaining memory by object pointer
219 for (std::size_t i = wordsToCopyEnd; i < objectPointersToCopyEnd; i += OBJECT_POINTER_SIZE) {
220 reinterpret_cast<std::atomic<ark::ObjectPointerType> *>(&dstSp[i])->store(
221 // Atomic with relaxed order reason: data race with src_handle with no synchronization or ordering
222 // constraints imposed on other reads or writes
223 reinterpret_cast<std::atomic<ark::ObjectPointerType> *>(&srcSp[i])->load(std::memory_order_relaxed),
224 std::memory_order_relaxed);
225 }
226 // copy remaining memory by bytes
227 for (std::size_t i = objectPointersToCopyEnd; i < objSize; i++) {
228 // Atomic with relaxed order reason: data race with src_handle with no synchronization or ordering constraints
229 // imposed on other reads or writes
230 reinterpret_cast<std::atomic<uint8_t> *>(&dstSp[i])->store(
231 reinterpret_cast<std::atomic<uint8_t> *>(&srcSp[i])->load(std::memory_order_relaxed),
232 std::memory_order_relaxed);
233 }
234
235 // Call barriers here.
236 auto *barrierSet = thread->GetBarrierSet();
237 // We don't need pre barrier here because we don't change any links inside main object
238 // Post barrier
239 if (!mem::IsEmptyBarrier(barrierSet->GetPostType())) {
240 if (!objectClass->IsArrayClass() || !objectClass->GetComponentType()->IsPrimitive()) {
241 barrierSet->PostBarrier(dst, 0, objSize);
242 }
243 }
244 return dst;
245 }
246
ObjectSize() const247 size_t ObjectHeader::ObjectSize() const
248 {
249 auto *baseKlass = ClassAddr<BaseClass>();
250 if (baseKlass->IsDynamicClass()) {
251 return ObjectSizeDyn(baseKlass);
252 }
253 return ObjectSizeStatic(baseKlass);
254 }
255
ObjectSizeDyn(BaseClass * baseKlass) const256 size_t ObjectHeader::ObjectSizeDyn(BaseClass *baseKlass) const
257 {
258 // if we do it concurrently, the real klass may be changed,
259 // but we are ok with that
260 auto *klass = static_cast<HClass *>(baseKlass);
261
262 if (klass->IsArray()) {
263 return static_cast<const coretypes::Array *>(this)->ObjectSize(TaggedValue::TaggedTypeSize());
264 }
265 if (klass->IsString()) {
266 LanguageContext ctx = Runtime::GetCurrent()->GetLanguageContext(klass->GetSourceLang());
267 return ctx.GetStringSize(this);
268 }
269 if (klass->IsFreeObject()) {
270 return static_cast<const mem::FreeObject *>(this)->GetSize();
271 }
272 return baseKlass->GetObjectSize();
273 }
274
ObjectSizeStatic(BaseClass * baseKlass) const275 size_t ObjectHeader::ObjectSizeStatic(BaseClass *baseKlass) const
276 {
277 ASSERT(baseKlass == ClassAddr<BaseClass>());
278 auto *klass = static_cast<Class *>(baseKlass);
279
280 if (klass->IsArrayClass()) {
281 return static_cast<const coretypes::Array *>(this)->ObjectSize(klass->GetComponentSize());
282 }
283
284 if (klass->IsStringClass()) {
285 return static_cast<const coretypes::String *>(this)->ObjectSize();
286 }
287
288 if (klass->IsClassClass()) {
289 auto cls = ark::Class::FromClassObject(const_cast<ObjectHeader *>(this));
290 if (cls != nullptr) {
291 return ark::Class::GetClassObjectSizeFromClass(cls, klass->GetSourceLang());
292 }
293 }
294 return baseKlass->GetObjectSize();
295 }
296
297 } // namespace ark
298