1 /**
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "runtime/include/object_header.h"
17
18 #include "libpandabase/mem/mem.h"
19 #include "runtime/include/class.h"
20 #include "runtime/include/coretypes/array.h"
21 #include "runtime/include/coretypes/class.h"
22 #include "runtime/include/hclass.h"
23 #include "runtime/include/runtime.h"
24 #include "runtime/include/thread.h"
25 #include "runtime/include/panda_vm.h"
26 #include "runtime/mem/free_object.h"
27 #include "runtime/mem/vm_handle.h"
28 #include "runtime/monitor_pool.h"
29 #include "runtime/handle_base-inl.h"
30
31 namespace panda {
32
33 namespace object_header_traits {
34
35 // NOLINTNEXTLINE(fuchsia-statically-constructed-objects)
36 std::atomic<uint32_t> g_hashSeed = std::atomic<uint32_t>(LINEAR_SEED + std::time(nullptr));
37
38 } // namespace object_header_traits
39
40 /* static */
CreateObject(ManagedThread * thread,panda::BaseClass * klass,bool nonMovable)41 ObjectHeader *ObjectHeader::CreateObject(ManagedThread *thread, panda::BaseClass *klass, bool nonMovable)
42 {
43 ASSERT(klass != nullptr);
44 #ifndef NDEBUG
45 if (!klass->IsDynamicClass()) {
46 auto cls = static_cast<panda::Class *>(klass);
47 ASSERT(cls->IsInstantiable());
48 ASSERT(!cls->IsArrayClass());
49 ASSERT(!cls->IsStringClass());
50 }
51 #endif
52
53 size_t size = klass->GetObjectSize();
54 ASSERT(size != 0);
55 mem::HeapManager *heapManager = thread->GetVM()->GetHeapManager();
56 ObjectHeader *obj {nullptr};
57 if (UNLIKELY(heapManager->IsObjectFinalized(klass))) {
58 nonMovable = true;
59 }
60 if (LIKELY(!nonMovable)) {
61 obj = heapManager->AllocateObject(klass, size);
62 } else {
63 obj = heapManager->AllocateNonMovableObject(klass, size);
64 }
65 return obj;
66 }
67
CreateObject(panda::BaseClass * klass,bool nonMovable)68 ObjectHeader *ObjectHeader::CreateObject(panda::BaseClass *klass, bool nonMovable)
69 {
70 return CreateObject(ManagedThread::GetCurrent(), klass, nonMovable);
71 }
72
73 /* static */
Create(ManagedThread * thread,BaseClass * klass)74 ObjectHeader *ObjectHeader::Create(ManagedThread *thread, BaseClass *klass)
75 {
76 return CreateObject(thread, klass, false);
77 }
78
Create(BaseClass * klass)79 ObjectHeader *ObjectHeader::Create(BaseClass *klass)
80 {
81 return CreateObject(klass, false);
82 }
83
84 /* static */
CreateNonMovable(BaseClass * klass)85 ObjectHeader *ObjectHeader::CreateNonMovable(BaseClass *klass)
86 {
87 return CreateObject(klass, true);
88 }
89
GetHashCodeFromMonitor(Monitor * monitorP)90 uint32_t ObjectHeader::GetHashCodeFromMonitor(Monitor *monitorP)
91 {
92 return monitorP->GetHashCode();
93 }
94
GetHashCodeMTSingle()95 uint32_t ObjectHeader::GetHashCodeMTSingle()
96 {
97 auto mark = GetMark();
98
99 switch (mark.GetState()) {
100 case MarkWord::STATE_UNLOCKED: {
101 mark = mark.DecodeFromHash(GenerateHashCode());
102 ASSERT(mark.GetState() == MarkWord::STATE_HASHED);
103 SetMark(mark);
104 return mark.GetHash();
105 }
106 case MarkWord::STATE_HASHED:
107 return mark.GetHash();
108 default:
109 LOG(FATAL, RUNTIME) << "Error on GetHashCode(): invalid state";
110 return 0;
111 }
112 }
113
GetHashCodeMTMulti()114 uint32_t ObjectHeader::GetHashCodeMTMulti()
115 {
116 ObjectHeader *currentObj = this;
117 while (true) {
118 auto mark = currentObj->AtomicGetMark();
119 auto *thread = MTManagedThread::GetCurrent();
120 ASSERT(thread != nullptr);
121 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
122 VMHandle<ObjectHeader> handleObj(thread, currentObj);
123
124 switch (mark.GetState()) {
125 case MarkWord::STATE_UNLOCKED: {
126 auto hashMark = mark.DecodeFromHash(GenerateHashCode());
127 ASSERT(hashMark.GetState() == MarkWord::STATE_HASHED);
128 currentObj->AtomicSetMark(mark, hashMark);
129 break;
130 }
131 case MarkWord::STATE_LIGHT_LOCKED: {
132 os::thread::ThreadId ownerThreadId = mark.GetThreadId();
133 if (ownerThreadId == thread->GetInternalId()) {
134 Monitor::Inflate(this, thread);
135 } else {
136 Monitor::InflateThinLock(thread, handleObj);
137 currentObj = handleObj.GetPtr();
138 }
139 break;
140 }
141 case MarkWord::STATE_HEAVY_LOCKED: {
142 auto monitorId = mark.GetMonitorId();
143 auto monitorP = MTManagedThread::GetCurrent()->GetMonitorPool()->LookupMonitor(monitorId);
144 if (monitorP != nullptr) {
145 return GetHashCodeFromMonitor(monitorP);
146 }
147 LOG(FATAL, RUNTIME) << "Error on GetHashCode(): no monitor on heavy locked state";
148 break;
149 }
150 case MarkWord::STATE_HASHED: {
151 return mark.GetHash();
152 }
153 default: {
154 LOG(FATAL, RUNTIME) << "Error on GetHashCode(): invalid state";
155 }
156 }
157 }
158 }
159
Clone(ObjectHeader * src)160 ObjectHeader *ObjectHeader::Clone(ObjectHeader *src)
161 {
162 LOG_IF(src->ClassAddr<Class>()->GetManagedObject() == src, FATAL, RUNTIME) << "Can't clone a class";
163 return ObjectHeader::ShallowCopy(src);
164 }
165
ShallowCopy(ObjectHeader * src)166 ObjectHeader *ObjectHeader::ShallowCopy(ObjectHeader *src)
167 {
168 /*
169 NOTE(d.trubenkov):
170 use bariers for possible copied reference fields
171 */
172 auto objectClass = src->ClassAddr<Class>();
173 std::size_t objSize = src->ObjectSize();
174
175 // AllocateObject can trigger gc, use handle for src.
176 auto *thread = ManagedThread::GetCurrent();
177 ASSERT(thread != nullptr);
178 mem::HeapManager *heapManager = thread->GetVM()->GetHeapManager();
179 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
180 VMHandle<ObjectHeader> srcHandle(thread, src);
181
182 ObjectHeader *dst = nullptr;
183 // Determine whether object is non-movable
184 if (PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(src) == SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT) {
185 dst = heapManager->AllocateNonMovableObject(objectClass, objSize);
186 } else {
187 dst = heapManager->AllocateObject(objectClass, objSize);
188 }
189 if (dst == nullptr) {
190 return nullptr;
191 }
192 ASSERT(PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(srcHandle.GetPtr()) ==
193 PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(dst));
194
195 Span<uint8_t> srcSp(reinterpret_cast<uint8_t *>(srcHandle.GetPtr()), objSize);
196 Span<uint8_t> dstSp(reinterpret_cast<uint8_t *>(dst), objSize);
197 constexpr const std::size_t WORD_SIZE = sizeof(uintptr_t);
198 std::size_t bytesToCopy = objSize - ObjectHeader::ObjectHeaderSize();
199 std::size_t wordsToCopy = bytesToCopy / WORD_SIZE;
200 std::size_t wordsToCopyEnd = ObjectHeader::ObjectHeaderSize() + WORD_SIZE * wordsToCopy;
201 std::size_t objectPointersToCopy = (bytesToCopy - WORD_SIZE * wordsToCopy) / OBJECT_POINTER_SIZE;
202 std::size_t objectPointersToCopyEnd = wordsToCopyEnd + objectPointersToCopy * OBJECT_POINTER_SIZE;
203 // copy words
204 for (std::size_t i = ObjectHeader::ObjectHeaderSize(); i < wordsToCopyEnd; i += WORD_SIZE) {
205 // Atomic with relaxed order reason: data race with src_handle with no synchronization or ordering constraints
206 // imposed on other reads or writes
207 reinterpret_cast<std::atomic<uintptr_t> *>(&dstSp[i])->store(
208 reinterpret_cast<std::atomic<uintptr_t> *>(&srcSp[i])->load(std::memory_order_relaxed),
209 std::memory_order_relaxed);
210 }
211 // copy remaining memory by object pointer
212 for (std::size_t i = wordsToCopyEnd; i < objectPointersToCopyEnd; i += OBJECT_POINTER_SIZE) {
213 reinterpret_cast<std::atomic<panda::ObjectPointerType> *>(&dstSp[i])->store(
214 // Atomic with relaxed order reason: data race with src_handle with no synchronization or ordering
215 // constraints imposed on other reads or writes
216 reinterpret_cast<std::atomic<panda::ObjectPointerType> *>(&srcSp[i])->load(std::memory_order_relaxed),
217 std::memory_order_relaxed);
218 }
219 // copy remaining memory by bytes
220 for (std::size_t i = objectPointersToCopyEnd; i < objSize; i++) {
221 // Atomic with relaxed order reason: data race with src_handle with no synchronization or ordering constraints
222 // imposed on other reads or writes
223 reinterpret_cast<std::atomic<uint8_t> *>(&dstSp[i])->store(
224 reinterpret_cast<std::atomic<uint8_t> *>(&srcSp[i])->load(std::memory_order_relaxed),
225 std::memory_order_relaxed);
226 }
227
228 // Call barriers here.
229 auto *barrierSet = thread->GetBarrierSet();
230 // We don't need pre barrier here because we don't change any links inside main object
231 // Post barrier
232 auto gcPostBarrierType = barrierSet->GetPostType();
233 if (!mem::IsEmptyBarrier(gcPostBarrierType)) {
234 if (!objectClass->IsArrayClass() || !objectClass->GetComponentType()->IsPrimitive()) {
235 barrierSet->PostBarrier(dst, 0, objSize);
236 }
237 }
238 return dst;
239 }
240
ObjectSize() const241 size_t ObjectHeader::ObjectSize() const
242 {
243 auto *baseKlass = ClassAddr<BaseClass>();
244 if (baseKlass->IsDynamicClass()) {
245 return ObjectSizeDyn(baseKlass);
246 }
247 return ObjectSizeStatic(baseKlass);
248 }
249
ObjectSizeDyn(BaseClass * baseKlass) const250 size_t ObjectHeader::ObjectSizeDyn(BaseClass *baseKlass) const
251 {
252 // if we do it concurrently, the real klass may be changed,
253 // but we are ok with that
254 auto *klass = static_cast<HClass *>(baseKlass);
255
256 if (klass->IsArray()) {
257 return static_cast<const coretypes::Array *>(this)->ObjectSize(TaggedValue::TaggedTypeSize());
258 }
259 if (klass->IsString()) {
260 LanguageContext ctx = Runtime::GetCurrent()->GetLanguageContext(klass->GetSourceLang());
261 return ctx.GetStringSize(this);
262 }
263 if (klass->IsFreeObject()) {
264 return static_cast<const mem::FreeObject *>(this)->GetSize();
265 }
266 return baseKlass->GetObjectSize();
267 }
268
ObjectSizeStatic(BaseClass * baseKlass) const269 size_t ObjectHeader::ObjectSizeStatic(BaseClass *baseKlass) const
270 {
271 ASSERT(baseKlass == ClassAddr<BaseClass>());
272 auto *klass = static_cast<Class *>(baseKlass);
273
274 if (klass->IsArrayClass()) {
275 return static_cast<const coretypes::Array *>(this)->ObjectSize(klass->GetComponentSize());
276 }
277
278 if (klass->IsStringClass()) {
279 return static_cast<const coretypes::String *>(this)->ObjectSize();
280 }
281
282 if (klass->IsClassClass()) {
283 auto cls = panda::Class::FromClassObject(const_cast<ObjectHeader *>(this));
284 if (cls != nullptr) {
285 return panda::Class::GetClassObjectSizeFromClass(cls, klass->GetSourceLang());
286 }
287 }
288 return baseKlass->GetObjectSize();
289 }
290
291 } // namespace panda
292