1 /*
2 * Copyright (c) 2021 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "runtime/include/object_header.h"
17
18 #include "runtime/include/coretypes/array.h"
19 #include "runtime/include/coretypes/class.h"
20 #include "runtime/include/runtime.h"
21 #include "runtime/include/thread.h"
22 #include "runtime/include/panda_vm.h"
23 #include "runtime/mem/vm_handle.h"
24 #include "runtime/monitor_pool.h"
25 #include "runtime/handle_base-inl.h"
26
27 namespace panda {
28
29 /* static */
CreateObject(panda::BaseClass * klass,bool non_movable)30 ObjectHeader *ObjectHeader::CreateObject(panda::BaseClass *klass, bool non_movable)
31 {
32 #ifndef NDEBUG
33 if (!klass->IsDynamicClass()) {
34 auto cls = static_cast<panda::Class *>(klass);
35 ASSERT(cls->IsInstantiable());
36 ASSERT(!cls->IsArrayClass());
37 ASSERT(!cls->IsStringClass());
38 }
39 #endif
40
41 size_t size = klass->GetObjectSize();
42 ASSERT(size != 0);
43 mem::HeapManager *heap_manager = Thread::GetCurrent()->GetVM()->GetHeapManager();
44 ObjectHeader *obj {nullptr};
45 if (LIKELY(!non_movable)) {
46 obj = heap_manager->AllocateObject(klass, size);
47 } else {
48 obj = heap_manager->AllocateNonMovableObject(klass, size);
49 }
50 return obj;
51 }
52
53 /* static */
Create(BaseClass * klass)54 ObjectHeader *ObjectHeader::Create(BaseClass *klass)
55 {
56 return CreateObject(klass, false);
57 }
58
59 /* static */
CreateNonMovable(BaseClass * klass)60 ObjectHeader *ObjectHeader::CreateNonMovable(BaseClass *klass)
61 {
62 return CreateObject(klass, true);
63 }
64
AtomicSetMark(MarkWord old_mark_word,MarkWord new_mark_word)65 bool ObjectHeader::AtomicSetMark(MarkWord old_mark_word, MarkWord new_mark_word)
66 {
67 // This is the way to operate with casting MarkWordSize <-> MarkWord and atomics
68 auto ptr = reinterpret_cast<MarkWord *>(&markWord_);
69 auto atomic_ptr = reinterpret_cast<std::atomic<MarkWord> *>(ptr);
70 return atomic_ptr->compare_exchange_weak(old_mark_word, new_mark_word);
71 }
72
GetHashCodeFromMonitor(Monitor * monitor_p)73 uint32_t ObjectHeader::GetHashCodeFromMonitor(Monitor *monitor_p)
74 {
75 if (monitor_p->GetHashCode() == 0) {
76 Monitor::MonitorEnter(this);
77 // We check it again in case someone changed it before we aquire monitor
78 if (monitor_p->GetHashCode() == 0) {
79 monitor_p->SetHashCode(GenerateHashCode());
80 }
81 Monitor::MonitorExit(this);
82 }
83
84 return monitor_p->GetHashCode();
85 }
86
GetHashCode()87 uint32_t ObjectHeader::GetHashCode()
88 {
89 while (true) {
90 auto mark = this->AtomicGetMark();
91 switch (mark.GetState()) {
92 case MarkWord::STATE_UNLOCKED: {
93 auto hash_mark = mark.DecodeFromHash(GenerateHashCode());
94 ASSERT(hash_mark.GetState() == MarkWord::STATE_HASHED);
95 this->AtomicSetMark(mark, hash_mark);
96 break;
97 }
98 case MarkWord::STATE_LIGHT_LOCKED: {
99 // Futexes and has support for locking with non-current thread.
100 auto thread = MTManagedThread::GetCurrent();
101 // Try to inflate and if it fails, wait a bit before trying again.
102 if (!Monitor::Inflate(this, thread)) {
103 static constexpr uint64_t SLEEP_MS = 10;
104 MTManagedThread::Sleep(SLEEP_MS);
105 }
106 break;
107 }
108 case MarkWord::STATE_HEAVY_LOCKED: {
109 auto monitor_id = mark.GetMonitorId();
110 auto monitor_p = MonitorPool::LookupMonitor(Thread::GetCurrent()->GetVM(), monitor_id);
111 if (monitor_p != nullptr) {
112 return GetHashCodeFromMonitor(monitor_p);
113 }
114 LOG(FATAL, RUNTIME) << "Error on GetHashCode(): no monitor on heavy locked state";
115 break;
116 }
117 case MarkWord::STATE_HASHED: {
118 return mark.GetHash();
119 }
120 default: {
121 LOG(FATAL, RUNTIME) << "Error on GetHashCode(): invalid state";
122 }
123 }
124 }
125 }
126
Clone(ObjectHeader * src)127 ObjectHeader *ObjectHeader::Clone(ObjectHeader *src)
128 {
129 LOG_IF(src->ClassAddr<Class>()->GetManagedObject() == src, FATAL, RUNTIME) << "Can't clone a class";
130 return ObjectHeader::ShallowCopy(src);
131 }
132
ShallowCopy(ObjectHeader * src)133 ObjectHeader *ObjectHeader::ShallowCopy(ObjectHeader *src)
134 {
135 auto object_class = src->ClassAddr<Class>();
136 std::size_t obj_size = src->ObjectSize();
137
138 // AllocateObject can trigger gc, use handle for src.
139 auto thread = ManagedThread::GetCurrent();
140 mem::HeapManager *heap_manager = thread->GetVM()->GetHeapManager();
141 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
142 VMHandle<ObjectHeader> src_handle(thread, src);
143
144 ObjectHeader *dst = nullptr;
145 // Determine whether object is non-movable
146 if (PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(src) == SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT) {
147 dst = heap_manager->AllocateNonMovableObject(object_class, obj_size);
148 } else {
149 dst = heap_manager->AllocateObject(object_class, obj_size);
150 }
151 if (dst == nullptr) {
152 return nullptr;
153 }
154 ASSERT(PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(src) ==
155 PoolManager::GetMmapMemPool()->GetSpaceTypeForAddr(dst));
156
157 Span<uint8_t> src_sp(reinterpret_cast<uint8_t *>(src_handle.GetPtr()), obj_size);
158 Span<uint8_t> dst_sp(reinterpret_cast<uint8_t *>(dst), obj_size);
159 constexpr const std::size_t WORD_SIZE = sizeof(uintptr_t);
160 std::size_t bytes_to_copy = obj_size - ObjectHeader::ObjectHeaderSize();
161 std::size_t words_to_copy = bytes_to_copy / WORD_SIZE;
162 std::size_t remaining_offset = ObjectHeader::ObjectHeaderSize() + WORD_SIZE * words_to_copy;
163 // copy words
164 for (std::size_t i = ObjectHeader::ObjectHeaderSize(); i < remaining_offset; i += WORD_SIZE) {
165 reinterpret_cast<std::atomic<uintptr_t> *>(&dst_sp[i])
166 ->store(reinterpret_cast<std::atomic<uintptr_t> *>(&src_sp[i])->load(std::memory_order_relaxed),
167 std::memory_order_relaxed);
168 }
169 // copy remaining bytes
170 for (std::size_t i = remaining_offset; i < obj_size; i++) {
171 reinterpret_cast<std::atomic<uint8_t> *>(&dst_sp[i])
172 ->store(reinterpret_cast<std::atomic<uint8_t> *>(&src_sp[i])->load(std::memory_order_relaxed),
173 std::memory_order_relaxed);
174 }
175
176 // Call barriers here.
177 auto *barrier_set = thread->GetVM()->GetGC()->GetBarrierSet();
178 // We don't need pre barrier here because we don't change any links inside main object
179 // Post barrier
180 auto gc_post_barrier_type = barrier_set->GetPostType();
181 if (!mem::IsEmptyBarrier(gc_post_barrier_type)) {
182 if (object_class->IsArrayClass()) {
183 if (object_class->IsObjectArrayClass()) {
184 barrier_set->PostBarrierArrayWrite(dst, obj_size);
185 }
186 } else {
187 barrier_set->PostBarrierEveryObjectFieldWrite(dst, obj_size);
188 }
189 }
190 return dst;
191 }
192
ObjectSize() const193 size_t ObjectHeader::ObjectSize() const
194 {
195 auto *klass = ClassAddr<Class>();
196
197 if (klass->IsArrayClass()) {
198 return static_cast<const coretypes::Array *>(this)->ObjectSize();
199 }
200
201 if (klass->IsStringClass()) {
202 return static_cast<const coretypes::String *>(this)->ObjectSize();
203 }
204
205 if (klass->IsClassClass()) {
206 auto cls = panda::Class::FromClassObject(const_cast<ObjectHeader *>(this));
207 if (cls != nullptr) {
208 return panda::Class::GetClassObjectSizeFromClass(cls);
209 }
210 }
211
212 return klass->GetObjectSize();
213 }
214
215 } // namespace panda
216