1 /**
2 * Copyright (c) 2021-2025 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "runtime/include/object_header.h"
17
18 #include "libpandabase/mem/mem.h"
19 #include "runtime/include/class.h"
20 #include "runtime/include/coretypes/array.h"
21 #include "runtime/include/coretypes/class.h"
22 #include "runtime/include/hclass.h"
23 #include "runtime/include/runtime.h"
24 #include "runtime/include/thread.h"
25 #include "runtime/include/panda_vm.h"
26 #include "runtime/mem/free_object.h"
27 #include "runtime/mem/vm_handle.h"
28 #include "runtime/monitor_pool.h"
29 #include "runtime/handle_base-inl.h"
30
31 namespace ark {
32
33 namespace object_header_traits {
34
35 // NOLINTNEXTLINE(fuchsia-statically-constructed-objects)
36 std::atomic<uint32_t> g_hashSeed = std::atomic<uint32_t>(LINEAR_SEED + std::time(nullptr));
37
38 } // namespace object_header_traits
39
40 /* static */
CreateObject(ManagedThread * thread,ark::BaseClass * klass,bool nonMovable)41 ObjectHeader *ObjectHeader::CreateObject(ManagedThread *thread, ark::BaseClass *klass, bool nonMovable)
42 {
43 ASSERT(thread != nullptr);
44 ASSERT(klass != nullptr);
45 #ifndef NDEBUG
46 if (!klass->IsDynamicClass()) {
47 auto cls = static_cast<ark::Class *>(klass);
48 ASSERT(cls->IsInstantiable());
49 ASSERT(!cls->IsArrayClass());
50 ASSERT(!cls->IsStringClass());
51 }
52 #endif
53
54 size_t size = klass->GetObjectSize();
55 ASSERT(size != 0);
56 mem::HeapManager *heapManager = thread->GetVM()->GetHeapManager();
57 ObjectHeader *obj {nullptr};
58 if (UNLIKELY(heapManager->IsObjectFinalized(klass))) {
59 nonMovable = true;
60 }
61 if (LIKELY(!nonMovable)) {
62 obj = heapManager->AllocateObject(klass, size);
63 } else {
64 obj = heapManager->AllocateNonMovableObject(klass, size);
65 }
66 return obj;
67 }
68
CreateObject(ark::BaseClass * klass,bool nonMovable)69 ObjectHeader *ObjectHeader::CreateObject(ark::BaseClass *klass, bool nonMovable)
70 {
71 return CreateObject(ManagedThread::GetCurrent(), klass, nonMovable);
72 }
73
74 /* static */
Create(ManagedThread * thread,BaseClass * klass)75 ObjectHeader *ObjectHeader::Create(ManagedThread *thread, BaseClass *klass)
76 {
77 return CreateObject(thread, klass, false);
78 }
79
Create(BaseClass * klass)80 ObjectHeader *ObjectHeader::Create(BaseClass *klass)
81 {
82 return CreateObject(klass, false);
83 }
84
85 /* static */
CreateNonMovable(BaseClass * klass)86 ObjectHeader *ObjectHeader::CreateNonMovable(BaseClass *klass)
87 {
88 return CreateObject(klass, true);
89 }
90
GetHashCodeFromMonitor(Monitor * monitorP)91 uint32_t ObjectHeader::GetHashCodeFromMonitor(Monitor *monitorP)
92 {
93 return monitorP->GetHashCode();
94 }
95
GetHashCodeMTSingle()96 uint32_t ObjectHeader::GetHashCodeMTSingle()
97 {
98 auto mark = GetMark();
99
100 switch (mark.GetState()) {
101 case MarkWord::STATE_UNLOCKED: {
102 mark = mark.DecodeFromHash(GenerateHashCode());
103 ASSERT(mark.GetState() == MarkWord::STATE_HASHED);
104 SetMark(mark);
105 return mark.GetHash();
106 }
107 case MarkWord::STATE_HASHED:
108 return mark.GetHash();
109 default:
110 LOG(FATAL, RUNTIME) << "Error on GetHashCode(): invalid state";
111 return 0;
112 }
113 }
114
GetHashCodeMTMulti()115 uint32_t ObjectHeader::GetHashCodeMTMulti()
116 {
117 ObjectHeader *currentObj = this;
118 while (true) {
119 auto mark = currentObj->AtomicGetMark();
120 auto *thread = MTManagedThread::GetCurrent();
121 ASSERT(thread != nullptr);
122 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
123 VMHandle<ObjectHeader> handleObj(thread, currentObj);
124
125 switch (mark.GetState()) {
126 case MarkWord::STATE_UNLOCKED: {
127 auto hashMark = mark.DecodeFromHash(GenerateHashCode());
128 ASSERT(hashMark.GetState() == MarkWord::STATE_HASHED);
129 currentObj->AtomicSetMark(mark, hashMark);
130 break;
131 }
132 case MarkWord::STATE_LIGHT_LOCKED: {
133 os::thread::ThreadId ownerThreadId = mark.GetThreadId();
134 if (ownerThreadId == thread->GetInternalId()) {
135 Monitor::Inflate(this, thread);
136 } else {
137 Monitor::InflateThinLock(thread, handleObj);
138 currentObj = handleObj.GetPtr();
139 }
140 break;
141 }
142 case MarkWord::STATE_HEAVY_LOCKED: {
143 auto monitorId = mark.GetMonitorId();
144 auto monitorP = thread->GetMonitorPool()->LookupMonitor(monitorId);
145 if (monitorP != nullptr) {
146 return GetHashCodeFromMonitor(monitorP);
147 }
148 LOG(FATAL, RUNTIME) << "Error on GetHashCode(): no monitor on heavy locked state";
149 break;
150 }
151 case MarkWord::STATE_HASHED: {
152 return mark.GetHash();
153 }
154 default: {
155 LOG(FATAL, RUNTIME) << "Error on GetHashCode(): invalid state";
156 }
157 }
158 }
159 }
160
Clone(ObjectHeader * src)161 ObjectHeader *ObjectHeader::Clone(ObjectHeader *src)
162 {
163 LOG_IF(src->ClassAddr<Class>()->GetManagedObject() == src, FATAL, RUNTIME) << "Can't clone a class";
164 return ObjectHeader::ShallowCopy(src);
165 }
166
AllocateObjectAndGetDst(ObjectHeader * src,Class * objectClass,size_t objSize,mem::HeapManager * heapManager)167 static ObjectHeader *AllocateObjectAndGetDst(ObjectHeader *src, Class *objectClass, size_t objSize,
168 mem::HeapManager *heapManager)
169 {
170 ObjectHeader *dst = nullptr;
171 if (PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(src) == SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT) {
172 dst = heapManager->AllocateNonMovableObject(objectClass, objSize);
173 } else {
174 dst = heapManager->AllocateObject(objectClass, objSize);
175 }
176 return dst;
177 }
178
ShallowCopy(ObjectHeader * src)179 ObjectHeader *ObjectHeader::ShallowCopy(ObjectHeader *src)
180 {
181 /*
182 NOTE(d.trubenkov):
183 use bariers for possible copied reference fields
184 */
185 auto objectClass = src->ClassAddr<Class>();
186 std::size_t objSize = src->ObjectSize();
187
188 // AllocateObject can trigger gc, use handle for src.
189 auto *thread = ManagedThread::GetCurrent();
190 ASSERT(thread != nullptr);
191 mem::HeapManager *heapManager = thread->GetVM()->GetHeapManager();
192 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
193 VMHandle<ObjectHeader> srcHandle(thread, src);
194
195 // Determine whether object is non-movable
196 ObjectHeader *dst = AllocateObjectAndGetDst(src, objectClass, objSize, heapManager);
197 if (dst == nullptr) {
198 return nullptr;
199 }
200 ASSERT(PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(srcHandle.GetPtr()) ==
201 PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(dst));
202
203 Span<uint8_t> srcSp(reinterpret_cast<uint8_t *>(srcHandle.GetPtr()), objSize);
204 Span<uint8_t> dstSp(reinterpret_cast<uint8_t *>(dst), objSize);
205 constexpr const std::size_t WORD_SIZE = sizeof(uintptr_t);
206 std::size_t bytesToCopy = objSize - ObjectHeader::ObjectHeaderSize();
207 std::size_t wordsToCopy = bytesToCopy / WORD_SIZE;
208 std::size_t wordsToCopyEnd = ObjectHeader::ObjectHeaderSize() + WORD_SIZE * wordsToCopy;
209 std::size_t objectPointersToCopy = (bytesToCopy - WORD_SIZE * wordsToCopy) / OBJECT_POINTER_SIZE;
210 std::size_t objectPointersToCopyEnd = wordsToCopyEnd + objectPointersToCopy * OBJECT_POINTER_SIZE;
211 // copy words
212 for (std::size_t i = ObjectHeader::ObjectHeaderSize(); i < wordsToCopyEnd; i += WORD_SIZE) {
213 // Atomic with relaxed order reason: data race with src_handle with no synchronization or ordering constraints
214 // imposed on other reads or writes
215 reinterpret_cast<std::atomic<uintptr_t> *>(&dstSp[i])->store(
216 reinterpret_cast<std::atomic<uintptr_t> *>(&srcSp[i])->load(std::memory_order_relaxed),
217 std::memory_order_relaxed);
218 }
219 // copy remaining memory by object pointer
220 for (std::size_t i = wordsToCopyEnd; i < objectPointersToCopyEnd; i += OBJECT_POINTER_SIZE) {
221 reinterpret_cast<std::atomic<ark::ObjectPointerType> *>(&dstSp[i])->store(
222 // Atomic with relaxed order reason: data race with src_handle with no synchronization or ordering
223 // constraints imposed on other reads or writes
224 reinterpret_cast<std::atomic<ark::ObjectPointerType> *>(&srcSp[i])->load(std::memory_order_relaxed),
225 std::memory_order_relaxed);
226 }
227 // copy remaining memory by bytes
228 for (std::size_t i = objectPointersToCopyEnd; i < objSize; i++) {
229 // Atomic with relaxed order reason: data race with src_handle with no synchronization or ordering constraints
230 // imposed on other reads or writes
231 reinterpret_cast<std::atomic<uint8_t> *>(&dstSp[i])->store(
232 reinterpret_cast<std::atomic<uint8_t> *>(&srcSp[i])->load(std::memory_order_relaxed),
233 std::memory_order_relaxed);
234 }
235
236 // Call barriers here.
237 auto *barrierSet = thread->GetBarrierSet();
238 // We don't need pre barrier here because we don't change any links inside main object
239 // Post barrier
240 if (!mem::IsEmptyBarrier(barrierSet->GetPostType())) {
241 if (!objectClass->IsArrayClass() || !objectClass->GetComponentType()->IsPrimitive()) {
242 barrierSet->PostBarrier(dst, 0, objSize);
243 }
244 }
245 return dst;
246 }
247
ObjectSize() const248 size_t ObjectHeader::ObjectSize() const
249 {
250 auto *baseKlass = ClassAddr<BaseClass>();
251 if (baseKlass->IsDynamicClass()) {
252 return ObjectSizeDyn(baseKlass);
253 }
254 return ObjectSizeStatic(baseKlass);
255 }
256
ObjectSizeDyn(BaseClass * baseKlass) const257 size_t ObjectHeader::ObjectSizeDyn(BaseClass *baseKlass) const
258 {
259 // if we do it concurrently, the real klass may be changed,
260 // but we are ok with that
261 auto *klass = static_cast<HClass *>(baseKlass);
262
263 if (klass->IsArray()) {
264 return static_cast<const coretypes::Array *>(this)->ObjectSize(TaggedValue::TaggedTypeSize());
265 }
266 if (klass->IsString()) {
267 LanguageContext ctx = Runtime::GetCurrent()->GetLanguageContext(klass->GetSourceLang());
268 return ctx.GetStringSize(this);
269 }
270 if (klass->IsFreeObject()) {
271 return static_cast<const mem::FreeObject *>(this)->GetSize();
272 }
273 return baseKlass->GetObjectSize();
274 }
275
ObjectSizeStatic(BaseClass * baseKlass) const276 size_t ObjectHeader::ObjectSizeStatic(BaseClass *baseKlass) const
277 {
278 ASSERT(baseKlass == ClassAddr<BaseClass>());
279 auto *klass = static_cast<Class *>(baseKlass);
280
281 if (klass->IsArrayClass()) {
282 return static_cast<const coretypes::Array *>(this)->ObjectSize(klass->GetComponentSize());
283 }
284
285 if (klass->IsStringClass()) {
286 return static_cast<const coretypes::String *>(this)->ObjectSize();
287 }
288
289 if (klass->IsClassClass()) {
290 auto cls = ark::Class::FromClassObject(const_cast<ObjectHeader *>(this));
291 if (cls != nullptr) {
292 return ark::Class::GetClassObjectSizeFromClass(cls, klass->GetSourceLang());
293 }
294 }
295 return baseKlass->GetObjectSize();
296 }
297
298 } // namespace ark
299