1 /*
2 * Copyright 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_RUNTIME_GC_COLLECTOR_MARK_COMPACT_INL_H_
18 #define ART_RUNTIME_GC_COLLECTOR_MARK_COMPACT_INL_H_
19
20 #include "gc/space/bump_pointer_space.h"
21 #include "mark_compact.h"
22 #include "mirror/object-inl.h"
23
24 namespace art {
25 namespace gc {
26 namespace collector {
27
UpdateClassAfterObjectMap(mirror::Object * obj)28 inline void MarkCompact::UpdateClassAfterObjectMap(mirror::Object* obj) {
29 mirror::Class* klass = obj->GetClass<kVerifyNone, kWithoutReadBarrier>();
30 // Track a class if it needs walking super-classes for visiting references or
31 // if it's higher in address order than its objects and is in moving space.
32 if (UNLIKELY(
33 (std::less<mirror::Object*>{}(obj, klass) && bump_pointer_space_->HasAddress(klass)) ||
34 (klass->GetReferenceInstanceOffsets<kVerifyNone>() == mirror::Class::kClassWalkSuper &&
35 walk_super_class_cache_ != klass))) {
36 // Since this function gets invoked in the compaction pause as well, it is
37 // preferable to store such super class separately rather than updating key
38 // as the latter would require traversing the hierarchy for every object of 'klass'.
39 auto ret1 = class_after_obj_hash_map_.try_emplace(ObjReference::FromMirrorPtr(klass),
40 ObjReference::FromMirrorPtr(obj));
41 if (ret1.second) {
42 if (klass->GetReferenceInstanceOffsets<kVerifyNone>() == mirror::Class::kClassWalkSuper) {
43 // In this case we require traversing through the super class hierarchy
44 // and find the super class at the highest address order.
45 mirror::Class* highest_klass = bump_pointer_space_->HasAddress(klass) ? klass : nullptr;
46 for (ObjPtr<mirror::Class> k = klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>();
47 k != nullptr;
48 k = k->GetSuperClass<kVerifyNone, kWithoutReadBarrier>()) {
49 // TODO: Can we break once we encounter a super class outside the moving space?
50 if (bump_pointer_space_->HasAddress(k.Ptr())) {
51 highest_klass = std::max(highest_klass, k.Ptr(), std::less<mirror::Class*>());
52 }
53 }
54 if (highest_klass != nullptr && highest_klass != klass) {
55 auto ret2 = super_class_after_class_hash_map_.try_emplace(
56 ObjReference::FromMirrorPtr(klass), ObjReference::FromMirrorPtr(highest_klass));
57 DCHECK(ret2.second);
58 } else {
59 walk_super_class_cache_ = klass;
60 }
61 }
62 } else if (std::less<mirror::Object*>{}(obj, ret1.first->second.AsMirrorPtr())) {
63 ret1.first->second = ObjReference::FromMirrorPtr(obj);
64 }
65 }
66 }
67
68 template <size_t kAlignment>
SetLiveWords(uintptr_t begin,size_t size)69 inline uintptr_t MarkCompact::LiveWordsBitmap<kAlignment>::SetLiveWords(uintptr_t begin,
70 size_t size) {
71 const uintptr_t begin_bit_idx = MemRangeBitmap::BitIndexFromAddr(begin);
72 DCHECK(!Bitmap::TestBit(begin_bit_idx));
73 // Range to set bit: [begin, end]
74 uintptr_t end = begin + size - kAlignment;
75 const uintptr_t end_bit_idx = MemRangeBitmap::BitIndexFromAddr(end);
76 uintptr_t* begin_bm_address = Bitmap::Begin() + Bitmap::BitIndexToWordIndex(begin_bit_idx);
77 uintptr_t* end_bm_address = Bitmap::Begin() + Bitmap::BitIndexToWordIndex(end_bit_idx);
78 ptrdiff_t diff = end_bm_address - begin_bm_address;
79 uintptr_t mask = Bitmap::BitIndexToMask(begin_bit_idx);
80 // Bits that needs to be set in the first word, if it's not also the last word
81 mask = ~(mask - 1);
82 if (diff > 0) {
83 *begin_bm_address |= mask;
84 mask = ~0;
85 // Even though memset can handle the (diff == 1) case but we should avoid the
86 // overhead of a function call for this, highly likely (as most of the objects
87 // are small), case.
88 if (diff > 1) {
89 // Set all intermediate bits to 1.
90 std::memset(static_cast<void*>(begin_bm_address + 1), 0xff, (diff - 1) * sizeof(uintptr_t));
91 }
92 }
93 uintptr_t end_mask = Bitmap::BitIndexToMask(end_bit_idx);
94 *end_bm_address |= mask & (end_mask | (end_mask - 1));
95 return begin_bit_idx;
96 }
97
98 template <size_t kAlignment> template <typename Visitor>
VisitLiveStrides(uintptr_t begin_bit_idx,uint8_t * end,const size_t bytes,Visitor && visitor)99 inline void MarkCompact::LiveWordsBitmap<kAlignment>::VisitLiveStrides(uintptr_t begin_bit_idx,
100 uint8_t* end,
101 const size_t bytes,
102 Visitor&& visitor) const {
103 // Range to visit [begin_bit_idx, end_bit_idx]
104 DCHECK(IsAligned<kAlignment>(end));
105 end -= kAlignment;
106 const uintptr_t end_bit_idx = MemRangeBitmap::BitIndexFromAddr(reinterpret_cast<uintptr_t>(end));
107 DCHECK_LE(begin_bit_idx, end_bit_idx);
108 uintptr_t begin_word_idx = Bitmap::BitIndexToWordIndex(begin_bit_idx);
109 const uintptr_t end_word_idx = Bitmap::BitIndexToWordIndex(end_bit_idx);
110 DCHECK(Bitmap::TestBit(begin_bit_idx));
111 size_t stride_size = 0;
112 size_t idx_in_word = 0;
113 size_t num_heap_words = bytes / kAlignment;
114 uintptr_t live_stride_start_idx;
115 uintptr_t word = Bitmap::Begin()[begin_word_idx];
116
117 // Setup the first word.
118 word &= ~(Bitmap::BitIndexToMask(begin_bit_idx) - 1);
119 begin_bit_idx = RoundDown(begin_bit_idx, Bitmap::kBitsPerBitmapWord);
120
121 do {
122 if (UNLIKELY(begin_word_idx == end_word_idx)) {
123 uintptr_t mask = Bitmap::BitIndexToMask(end_bit_idx);
124 word &= mask | (mask - 1);
125 }
126 if (~word == 0) {
127 // All bits in the word are marked.
128 if (stride_size == 0) {
129 live_stride_start_idx = begin_bit_idx;
130 }
131 stride_size += Bitmap::kBitsPerBitmapWord;
132 if (num_heap_words <= stride_size) {
133 break;
134 }
135 } else {
136 while (word != 0) {
137 // discard 0s
138 size_t shift = CTZ(word);
139 idx_in_word += shift;
140 word >>= shift;
141 if (stride_size > 0) {
142 if (shift > 0) {
143 if (num_heap_words <= stride_size) {
144 break;
145 }
146 visitor(live_stride_start_idx, stride_size, /*is_last*/ false);
147 num_heap_words -= stride_size;
148 live_stride_start_idx = begin_bit_idx + idx_in_word;
149 stride_size = 0;
150 }
151 } else {
152 live_stride_start_idx = begin_bit_idx + idx_in_word;
153 }
154 // consume 1s
155 shift = CTZ(~word);
156 DCHECK_NE(shift, 0u);
157 word >>= shift;
158 idx_in_word += shift;
159 stride_size += shift;
160 }
161 // If the whole word == 0 or the higher bits are 0s, then we exit out of
162 // the above loop without completely consuming the word, so call visitor,
163 // if needed.
164 if (idx_in_word < Bitmap::kBitsPerBitmapWord && stride_size > 0) {
165 if (num_heap_words <= stride_size) {
166 break;
167 }
168 visitor(live_stride_start_idx, stride_size, /*is_last*/ false);
169 num_heap_words -= stride_size;
170 stride_size = 0;
171 }
172 idx_in_word = 0;
173 }
174 begin_bit_idx += Bitmap::kBitsPerBitmapWord;
175 begin_word_idx++;
176 if (UNLIKELY(begin_word_idx > end_word_idx)) {
177 num_heap_words = std::min(stride_size, num_heap_words);
178 break;
179 }
180 word = Bitmap::Begin()[begin_word_idx];
181 } while (true);
182
183 if (stride_size > 0) {
184 visitor(live_stride_start_idx, num_heap_words, /*is_last*/ true);
185 }
186 }
187
188 template <size_t kAlignment>
189 inline
FindNthLiveWordOffset(size_t chunk_idx,uint32_t n)190 uint32_t MarkCompact::LiveWordsBitmap<kAlignment>::FindNthLiveWordOffset(size_t chunk_idx,
191 uint32_t n) const {
192 DCHECK_LT(n, kBitsPerVectorWord);
193 const size_t index = chunk_idx * kBitmapWordsPerVectorWord;
194 for (uint32_t i = 0; i < kBitmapWordsPerVectorWord; i++) {
195 uintptr_t word = Bitmap::Begin()[index + i];
196 if (~word == 0) {
197 if (n < Bitmap::kBitsPerBitmapWord) {
198 return i * Bitmap::kBitsPerBitmapWord + n;
199 }
200 n -= Bitmap::kBitsPerBitmapWord;
201 } else {
202 uint32_t j = 0;
203 while (word != 0) {
204 // count contiguous 0s
205 uint32_t shift = CTZ(word);
206 word >>= shift;
207 j += shift;
208 // count contiguous 1s
209 shift = CTZ(~word);
210 DCHECK_NE(shift, 0u);
211 if (shift > n) {
212 return i * Bitmap::kBitsPerBitmapWord + j + n;
213 }
214 n -= shift;
215 word >>= shift;
216 j += shift;
217 }
218 }
219 }
220 UNREACHABLE();
221 }
222
UpdateRef(mirror::Object * obj,MemberOffset offset)223 inline void MarkCompact::UpdateRef(mirror::Object* obj, MemberOffset offset) {
224 mirror::Object* old_ref = obj->GetFieldObject<
225 mirror::Object, kVerifyNone, kWithoutReadBarrier, /*kIsVolatile*/false>(offset);
226 if (kIsDebugBuild) {
227 if (live_words_bitmap_->HasAddress(old_ref)
228 && reinterpret_cast<uint8_t*>(old_ref) < black_allocations_begin_
229 && !moving_space_bitmap_->Test(old_ref)) {
230 mirror::Object* from_ref = GetFromSpaceAddr(old_ref);
231 std::ostringstream oss;
232 heap_->DumpSpaces(oss);
233 MemMap::DumpMaps(oss, /* terse= */ true);
234 LOG(FATAL) << "Not marked in the bitmap ref=" << old_ref
235 << " from_ref=" << from_ref
236 << " offset=" << offset
237 << " obj=" << obj
238 << " obj-validity=" << IsValidObject(obj)
239 << " from-space=" << static_cast<void*>(from_space_begin_)
240 << " bitmap= " << moving_space_bitmap_->DumpMemAround(old_ref)
241 << " from_ref "
242 << heap_->GetVerification()->DumpRAMAroundAddress(
243 reinterpret_cast<uintptr_t>(from_ref), 128)
244 << " obj "
245 << heap_->GetVerification()->DumpRAMAroundAddress(
246 reinterpret_cast<uintptr_t>(obj), 128)
247 << " old_ref " << heap_->GetVerification()->DumpRAMAroundAddress(
248 reinterpret_cast<uintptr_t>(old_ref), 128)
249 << " maps\n" << oss.str();
250 }
251 }
252 mirror::Object* new_ref = PostCompactAddress(old_ref);
253 if (new_ref != old_ref) {
254 obj->SetFieldObjectWithoutWriteBarrier<
255 /*kTransactionActive*/false, /*kCheckTransaction*/false, kVerifyNone, /*kIsVolatile*/false>(
256 offset,
257 new_ref);
258 }
259 }
260
VerifyRootSingleUpdate(void * root,mirror::Object * old_ref,const RootInfo & info)261 inline bool MarkCompact::VerifyRootSingleUpdate(void* root,
262 mirror::Object* old_ref,
263 const RootInfo& info) {
264 // ASAN promotes stack-frames to heap in order to detect
265 // stack-use-after-return issues. So skip using this double-root update
266 // detection on ASAN as well.
267 if (kIsDebugBuild && !kMemoryToolIsAvailable) {
268 void* stack_low_addr = stack_low_addr_;
269 void* stack_high_addr = stack_high_addr_;
270 if (!live_words_bitmap_->HasAddress(old_ref)) {
271 return false;
272 }
273 Thread* self = Thread::Current();
274 if (UNLIKELY(stack_low_addr == nullptr)) {
275 stack_low_addr = self->GetStackEnd();
276 stack_high_addr = reinterpret_cast<char*>(stack_low_addr) + self->GetStackSize();
277 }
278 if (root < stack_low_addr || root > stack_high_addr) {
279 MutexLock mu(self, lock_);
280 auto ret = updated_roots_->insert(root);
281 DCHECK(ret.second) << "root=" << root << " old_ref=" << old_ref
282 << " stack_low_addr=" << stack_low_addr
283 << " stack_high_addr=" << stack_high_addr;
284 }
285 DCHECK(reinterpret_cast<uint8_t*>(old_ref) >= black_allocations_begin_ ||
286 live_words_bitmap_->Test(old_ref))
287 << "ref=" << old_ref << " <" << mirror::Object::PrettyTypeOf(old_ref) << "> RootInfo ["
288 << info << "]";
289 }
290 return true;
291 }
292
UpdateRoot(mirror::CompressedReference<mirror::Object> * root,const RootInfo & info)293 inline void MarkCompact::UpdateRoot(mirror::CompressedReference<mirror::Object>* root,
294 const RootInfo& info) {
295 DCHECK(!root->IsNull());
296 mirror::Object* old_ref = root->AsMirrorPtr();
297 if (VerifyRootSingleUpdate(root, old_ref, info)) {
298 mirror::Object* new_ref = PostCompactAddress(old_ref);
299 if (old_ref != new_ref) {
300 root->Assign(new_ref);
301 }
302 }
303 }
304
UpdateRoot(mirror::Object ** root,const RootInfo & info)305 inline void MarkCompact::UpdateRoot(mirror::Object** root, const RootInfo& info) {
306 mirror::Object* old_ref = *root;
307 if (VerifyRootSingleUpdate(root, old_ref, info)) {
308 mirror::Object* new_ref = PostCompactAddress(old_ref);
309 if (old_ref != new_ref) {
310 *root = new_ref;
311 }
312 }
313 }
314
315 template <size_t kAlignment>
CountLiveWordsUpto(size_t bit_idx)316 inline size_t MarkCompact::LiveWordsBitmap<kAlignment>::CountLiveWordsUpto(size_t bit_idx) const {
317 const size_t word_offset = Bitmap::BitIndexToWordIndex(bit_idx);
318 uintptr_t word;
319 size_t ret = 0;
320 // This is needed only if we decide to make chunks 128-bit but still
321 // choose to use 64-bit word for bitmap. Ideally we should use 128-bit
322 // SIMD instructions to compute popcount.
323 if (kBitmapWordsPerVectorWord > 1) {
324 for (size_t i = RoundDown(word_offset, kBitmapWordsPerVectorWord); i < word_offset; i++) {
325 word = Bitmap::Begin()[i];
326 ret += POPCOUNT(word);
327 }
328 }
329 word = Bitmap::Begin()[word_offset];
330 const uintptr_t mask = Bitmap::BitIndexToMask(bit_idx);
331 DCHECK_NE(word & mask, 0u)
332 << " word_offset:" << word_offset
333 << " bit_idx:" << bit_idx
334 << " bit_idx_in_word:" << (bit_idx % Bitmap::kBitsPerBitmapWord)
335 << std::hex << " word: 0x" << word
336 << " mask: 0x" << mask << std::dec;
337 ret += POPCOUNT(word & (mask - 1));
338 return ret;
339 }
340
PostCompactBlackObjAddr(mirror::Object * old_ref)341 inline mirror::Object* MarkCompact::PostCompactBlackObjAddr(mirror::Object* old_ref) const {
342 return reinterpret_cast<mirror::Object*>(reinterpret_cast<uint8_t*>(old_ref)
343 - black_objs_slide_diff_);
344 }
345
PostCompactOldObjAddr(mirror::Object * old_ref)346 inline mirror::Object* MarkCompact::PostCompactOldObjAddr(mirror::Object* old_ref) const {
347 const uintptr_t begin = live_words_bitmap_->Begin();
348 const uintptr_t addr_offset = reinterpret_cast<uintptr_t>(old_ref) - begin;
349 const size_t vec_idx = addr_offset / kOffsetChunkSize;
350 const size_t live_bytes_in_bitmap_word =
351 live_words_bitmap_->CountLiveWordsUpto(addr_offset / kAlignment) * kAlignment;
352 return reinterpret_cast<mirror::Object*>(begin
353 + chunk_info_vec_[vec_idx]
354 + live_bytes_in_bitmap_word);
355 }
356
PostCompactAddressUnchecked(mirror::Object * old_ref)357 inline mirror::Object* MarkCompact::PostCompactAddressUnchecked(mirror::Object* old_ref) const {
358 if (reinterpret_cast<uint8_t*>(old_ref) >= black_allocations_begin_) {
359 return PostCompactBlackObjAddr(old_ref);
360 }
361 if (kIsDebugBuild) {
362 mirror::Object* from_ref = GetFromSpaceAddr(old_ref);
363 DCHECK(live_words_bitmap_->Test(old_ref))
364 << "ref=" << old_ref;
365 if (!moving_space_bitmap_->Test(old_ref)) {
366 std::ostringstream oss;
367 Runtime::Current()->GetHeap()->DumpSpaces(oss);
368 MemMap::DumpMaps(oss, /* terse= */ true);
369 LOG(FATAL) << "ref=" << old_ref
370 << " from_ref=" << from_ref
371 << " from-space=" << static_cast<void*>(from_space_begin_)
372 << " bitmap= " << moving_space_bitmap_->DumpMemAround(old_ref)
373 << heap_->GetVerification()->DumpRAMAroundAddress(
374 reinterpret_cast<uintptr_t>(from_ref), 128)
375 << " maps\n" << oss.str();
376 }
377 }
378 return PostCompactOldObjAddr(old_ref);
379 }
380
PostCompactAddress(mirror::Object * old_ref)381 inline mirror::Object* MarkCompact::PostCompactAddress(mirror::Object* old_ref) const {
382 // TODO: To further speedup the check, maybe we should consider caching heap
383 // start/end in this object.
384 if (LIKELY(live_words_bitmap_->HasAddress(old_ref))) {
385 return PostCompactAddressUnchecked(old_ref);
386 }
387 return old_ref;
388 }
389
390 } // namespace collector
391 } // namespace gc
392 } // namespace art
393
394 #endif // ART_RUNTIME_GC_COLLECTOR_MARK_COMPACT_INL_H_
395