1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
18 #define ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
19
20 #include "base/mutex-inl.h"
21 #include "mirror/object-inl.h"
22 #include "region_space.h"
23 #include "thread-current-inl.h"
24
25 namespace art {
26 namespace gc {
27 namespace space {
28
Alloc(Thread * self ATTRIBUTE_UNUSED,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)29 inline mirror::Object* RegionSpace::Alloc(Thread* self ATTRIBUTE_UNUSED,
30 size_t num_bytes,
31 /* out */ size_t* bytes_allocated,
32 /* out */ size_t* usable_size,
33 /* out */ size_t* bytes_tl_bulk_allocated) {
34 num_bytes = RoundUp(num_bytes, kAlignment);
35 return AllocNonvirtual<false>(num_bytes, bytes_allocated, usable_size,
36 bytes_tl_bulk_allocated);
37 }
38
AllocThreadUnsafe(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)39 inline mirror::Object* RegionSpace::AllocThreadUnsafe(Thread* self,
40 size_t num_bytes,
41 /* out */ size_t* bytes_allocated,
42 /* out */ size_t* usable_size,
43 /* out */ size_t* bytes_tl_bulk_allocated) {
44 Locks::mutator_lock_->AssertExclusiveHeld(self);
45 return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
46 }
47
48 template<bool kForEvac>
AllocNonvirtual(size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)49 inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes,
50 /* out */ size_t* bytes_allocated,
51 /* out */ size_t* usable_size,
52 /* out */ size_t* bytes_tl_bulk_allocated) {
53 DCHECK_ALIGNED(num_bytes, kAlignment);
54 mirror::Object* obj;
55 if (LIKELY(num_bytes <= kRegionSize)) {
56 // Non-large object.
57 obj = (kForEvac ? evac_region_ : current_region_)->Alloc(num_bytes,
58 bytes_allocated,
59 usable_size,
60 bytes_tl_bulk_allocated);
61 if (LIKELY(obj != nullptr)) {
62 return obj;
63 }
64 MutexLock mu(Thread::Current(), region_lock_);
65 // Retry with current region since another thread may have updated
66 // current_region_ or evac_region_. TODO: fix race.
67 obj = (kForEvac ? evac_region_ : current_region_)->Alloc(num_bytes,
68 bytes_allocated,
69 usable_size,
70 bytes_tl_bulk_allocated);
71 if (LIKELY(obj != nullptr)) {
72 return obj;
73 }
74 Region* r = AllocateRegion(kForEvac);
75 if (LIKELY(r != nullptr)) {
76 obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
77 CHECK(obj != nullptr);
78 // Do our allocation before setting the region, this makes sure no threads race ahead
79 // and fill in the region before we allocate the object. b/63153464
80 if (kForEvac) {
81 evac_region_ = r;
82 } else {
83 current_region_ = r;
84 }
85 return obj;
86 }
87 } else {
88 // Large object.
89 obj = AllocLarge<kForEvac>(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
90 if (LIKELY(obj != nullptr)) {
91 return obj;
92 }
93 }
94 return nullptr;
95 }
96
Alloc(size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)97 inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes,
98 /* out */ size_t* bytes_allocated,
99 /* out */ size_t* usable_size,
100 /* out */ size_t* bytes_tl_bulk_allocated) {
101 DCHECK(IsAllocated() && IsInToSpace());
102 DCHECK_ALIGNED(num_bytes, kAlignment);
103 uint8_t* old_top;
104 uint8_t* new_top;
105 do {
106 old_top = top_.load(std::memory_order_relaxed);
107 new_top = old_top + num_bytes;
108 if (UNLIKELY(new_top > end_)) {
109 return nullptr;
110 }
111 } while (!top_.CompareAndSetWeakRelaxed(old_top, new_top));
112 objects_allocated_.fetch_add(1, std::memory_order_relaxed);
113 DCHECK_LE(Top(), end_);
114 DCHECK_LT(old_top, end_);
115 DCHECK_LE(new_top, end_);
116 *bytes_allocated = num_bytes;
117 if (usable_size != nullptr) {
118 *usable_size = num_bytes;
119 }
120 *bytes_tl_bulk_allocated = num_bytes;
121 return reinterpret_cast<mirror::Object*>(old_top);
122 }
123
124 template<RegionSpace::RegionType kRegionType>
GetBytesAllocatedInternal()125 inline uint64_t RegionSpace::GetBytesAllocatedInternal() {
126 uint64_t bytes = 0;
127 MutexLock mu(Thread::Current(), region_lock_);
128 for (size_t i = 0; i < num_regions_; ++i) {
129 Region* r = ®ions_[i];
130 if (r->IsFree()) {
131 continue;
132 }
133 switch (kRegionType) {
134 case RegionType::kRegionTypeAll:
135 bytes += r->BytesAllocated();
136 break;
137 case RegionType::kRegionTypeFromSpace:
138 if (r->IsInFromSpace()) {
139 bytes += r->BytesAllocated();
140 }
141 break;
142 case RegionType::kRegionTypeUnevacFromSpace:
143 if (r->IsInUnevacFromSpace()) {
144 bytes += r->BytesAllocated();
145 }
146 break;
147 case RegionType::kRegionTypeToSpace:
148 if (r->IsInToSpace()) {
149 bytes += r->BytesAllocated();
150 }
151 break;
152 default:
153 LOG(FATAL) << "Unexpected space type : " << kRegionType;
154 }
155 }
156 return bytes;
157 }
158
159 template<RegionSpace::RegionType kRegionType>
GetObjectsAllocatedInternal()160 inline uint64_t RegionSpace::GetObjectsAllocatedInternal() {
161 uint64_t bytes = 0;
162 MutexLock mu(Thread::Current(), region_lock_);
163 for (size_t i = 0; i < num_regions_; ++i) {
164 Region* r = ®ions_[i];
165 if (r->IsFree()) {
166 continue;
167 }
168 switch (kRegionType) {
169 case RegionType::kRegionTypeAll:
170 bytes += r->ObjectsAllocated();
171 break;
172 case RegionType::kRegionTypeFromSpace:
173 if (r->IsInFromSpace()) {
174 bytes += r->ObjectsAllocated();
175 }
176 break;
177 case RegionType::kRegionTypeUnevacFromSpace:
178 if (r->IsInUnevacFromSpace()) {
179 bytes += r->ObjectsAllocated();
180 }
181 break;
182 case RegionType::kRegionTypeToSpace:
183 if (r->IsInToSpace()) {
184 bytes += r->ObjectsAllocated();
185 }
186 break;
187 default:
188 LOG(FATAL) << "Unexpected space type : " << kRegionType;
189 }
190 }
191 return bytes;
192 }
193
194 template <typename Visitor>
ScanUnevacFromSpace(accounting::ContinuousSpaceBitmap * bitmap,Visitor && visitor)195 inline void RegionSpace::ScanUnevacFromSpace(accounting::ContinuousSpaceBitmap* bitmap,
196 Visitor&& visitor) {
197 const size_t iter_limit = kUseTableLookupReadBarrier
198 ? num_regions_ : std::min(num_regions_, non_free_region_index_limit_);
199 // Instead of region-wise scan, find contiguous blocks of un-evac regions and then
200 // visit them. Everything before visit_block_begin has been processed, while
201 // [visit_block_begin, visit_block_end) still needs to be visited.
202 uint8_t* visit_block_begin = nullptr;
203 uint8_t* visit_block_end = nullptr;
204 for (size_t i = 0; i < iter_limit; ++i) {
205 Region* r = ®ions_[i];
206 if (r->IsInUnevacFromSpace()) {
207 // visit_block_begin set to nullptr means a new visit block needs to be stated.
208 if (visit_block_begin == nullptr) {
209 visit_block_begin = r->Begin();
210 }
211 visit_block_end = r->End();
212 } else if (visit_block_begin != nullptr) {
213 // Visit the block range as r is not adjacent to current visit block.
214 bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(visit_block_begin),
215 reinterpret_cast<uintptr_t>(visit_block_end),
216 visitor);
217 visit_block_begin = nullptr;
218 }
219 }
220 // Visit last block, if not processed yet.
221 if (visit_block_begin != nullptr) {
222 bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(visit_block_begin),
223 reinterpret_cast<uintptr_t>(visit_block_end),
224 visitor);
225 }
226 }
227
228 template<bool kToSpaceOnly, typename Visitor>
WalkInternal(Visitor && visitor)229 inline void RegionSpace::WalkInternal(Visitor&& visitor) {
230 // TODO: MutexLock on region_lock_ won't work due to lock order
231 // issues (the classloader classes lock and the monitor lock). We
232 // call this with threads suspended.
233 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
234 for (size_t i = 0; i < num_regions_; ++i) {
235 Region* r = ®ions_[i];
236 if (r->IsFree() || (kToSpaceOnly && !r->IsInToSpace())) {
237 continue;
238 }
239 if (r->IsLarge()) {
240 // We may visit a large object with live_bytes = 0 here. However, it is
241 // safe as it cannot contain dangling pointers because corresponding regions
242 // (and regions corresponding to dead referents) cannot be allocated for new
243 // allocations without first clearing regions' live_bytes and state.
244 mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
245 DCHECK(obj->GetClass() != nullptr);
246 visitor(obj);
247 } else if (r->IsLargeTail()) {
248 // Do nothing.
249 } else {
250 WalkNonLargeRegion(visitor, r);
251 }
252 }
253 }
254
255 template<typename Visitor>
WalkNonLargeRegion(Visitor && visitor,const Region * r)256 inline void RegionSpace::WalkNonLargeRegion(Visitor&& visitor, const Region* r) {
257 DCHECK(!r->IsLarge() && !r->IsLargeTail());
258 // For newly allocated and evacuated regions, live bytes will be -1.
259 uint8_t* pos = r->Begin();
260 uint8_t* top = r->Top();
261 // We need the region space bitmap to iterate over a region's objects
262 // if
263 // - its live bytes count is invalid (i.e. -1); or
264 // - its live bytes count is lower than the allocated bytes count.
265 //
266 // In both of the previous cases, we do not have the guarantee that
267 // all allocated objects are "alive" (i.e. valid), so we depend on
268 // the region space bitmap to identify which ones to visit.
269 //
270 // On the other hand, when all allocated bytes are known to be alive,
271 // we know that they form a range of consecutive objects (modulo
272 // object alignment constraints) that can be visited iteratively: we
273 // can compute the next object's location by using the current
274 // object's address and size (and object alignment constraints).
275 const bool need_bitmap =
276 r->LiveBytes() != static_cast<size_t>(-1) &&
277 r->LiveBytes() != static_cast<size_t>(top - pos);
278 if (need_bitmap) {
279 GetLiveBitmap()->VisitMarkedRange(
280 reinterpret_cast<uintptr_t>(pos),
281 reinterpret_cast<uintptr_t>(top),
282 visitor);
283 } else {
284 while (pos < top) {
285 mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
286 if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
287 visitor(obj);
288 pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
289 } else {
290 break;
291 }
292 }
293 }
294 }
295
296 template <typename Visitor>
Walk(Visitor && visitor)297 inline void RegionSpace::Walk(Visitor&& visitor) {
298 WalkInternal</* kToSpaceOnly= */ false>(visitor);
299 }
300 template <typename Visitor>
WalkToSpace(Visitor && visitor)301 inline void RegionSpace::WalkToSpace(Visitor&& visitor) {
302 WalkInternal</* kToSpaceOnly= */ true>(visitor);
303 }
304
GetNextObject(mirror::Object * obj)305 inline mirror::Object* RegionSpace::GetNextObject(mirror::Object* obj) {
306 const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
307 return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
308 }
309
310 template<bool kForEvac>
AllocLarge(size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)311 inline mirror::Object* RegionSpace::AllocLarge(size_t num_bytes,
312 /* out */ size_t* bytes_allocated,
313 /* out */ size_t* usable_size,
314 /* out */ size_t* bytes_tl_bulk_allocated) {
315 DCHECK_ALIGNED(num_bytes, kAlignment);
316 DCHECK_GT(num_bytes, kRegionSize);
317 size_t num_regs_in_large_region = RoundUp(num_bytes, kRegionSize) / kRegionSize;
318 DCHECK_GT(num_regs_in_large_region, 0U);
319 DCHECK_LT((num_regs_in_large_region - 1) * kRegionSize, num_bytes);
320 DCHECK_LE(num_bytes, num_regs_in_large_region * kRegionSize);
321 MutexLock mu(Thread::Current(), region_lock_);
322 if (!kForEvac) {
323 // Retain sufficient free regions for full evacuation.
324 if ((num_non_free_regions_ + num_regs_in_large_region) * 2 > num_regions_) {
325 return nullptr;
326 }
327 }
328
329 mirror::Object* region = nullptr;
330 // Find a large enough set of contiguous free regions.
331 if (kCyclicRegionAllocation) {
332 size_t next_region = -1;
333 // Try to find a range of free regions within [cyclic_alloc_region_index_, num_regions_).
334 region = AllocLargeInRange<kForEvac>(cyclic_alloc_region_index_,
335 num_regions_,
336 num_regs_in_large_region,
337 bytes_allocated,
338 usable_size,
339 bytes_tl_bulk_allocated,
340 &next_region);
341
342 if (region == nullptr) {
343 DCHECK_EQ(next_region, static_cast<size_t>(-1));
344 // If the previous attempt failed, try to find a range of free regions within
345 // [0, min(cyclic_alloc_region_index_ + num_regs_in_large_region - 1, num_regions_)).
346 region = AllocLargeInRange<kForEvac>(
347 0,
348 std::min(cyclic_alloc_region_index_ + num_regs_in_large_region - 1, num_regions_),
349 num_regs_in_large_region,
350 bytes_allocated,
351 usable_size,
352 bytes_tl_bulk_allocated,
353 &next_region);
354 }
355
356 if (region != nullptr) {
357 DCHECK_LT(0u, next_region);
358 DCHECK_LE(next_region, num_regions_);
359 // Move the cyclic allocation region marker to the region
360 // following the large region that was just allocated.
361 cyclic_alloc_region_index_ = next_region % num_regions_;
362 }
363 } else {
364 // Try to find a range of free regions within [0, num_regions_).
365 region = AllocLargeInRange<kForEvac>(0,
366 num_regions_,
367 num_regs_in_large_region,
368 bytes_allocated,
369 usable_size,
370 bytes_tl_bulk_allocated);
371 }
372 if (kForEvac && region != nullptr) {
373 TraceHeapSize();
374 }
375 return region;
376 }
377
378 template<bool kForEvac>
AllocLargeInRange(size_t begin,size_t end,size_t num_regs_in_large_region,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated,size_t * next_region)379 inline mirror::Object* RegionSpace::AllocLargeInRange(size_t begin,
380 size_t end,
381 size_t num_regs_in_large_region,
382 /* out */ size_t* bytes_allocated,
383 /* out */ size_t* usable_size,
384 /* out */ size_t* bytes_tl_bulk_allocated,
385 /* out */ size_t* next_region) {
386 DCHECK_LE(0u, begin);
387 DCHECK_LT(begin, end);
388 DCHECK_LE(end, num_regions_);
389 size_t left = begin;
390 while (left + num_regs_in_large_region - 1 < end) {
391 bool found = true;
392 size_t right = left;
393 DCHECK_LT(right, left + num_regs_in_large_region)
394 << "The inner loop should iterate at least once";
395 while (right < left + num_regs_in_large_region) {
396 if (regions_[right].IsFree()) {
397 ++right;
398 // Ensure `right` is not going beyond the past-the-end index of the region space.
399 DCHECK_LE(right, num_regions_);
400 } else {
401 found = false;
402 break;
403 }
404 }
405 if (found) {
406 // `right` points to the one region past the last free region.
407 DCHECK_EQ(left + num_regs_in_large_region, right);
408 Region* first_reg = ®ions_[left];
409 DCHECK(first_reg->IsFree());
410 first_reg->UnfreeLarge(this, time_);
411 if (kForEvac) {
412 ++num_evac_regions_;
413 } else {
414 ++num_non_free_regions_;
415 }
416 size_t allocated = num_regs_in_large_region * kRegionSize;
417 // We make 'top' all usable bytes, as the caller of this
418 // allocation may use all of 'usable_size' (see mirror::Array::Alloc).
419 first_reg->SetTop(first_reg->Begin() + allocated);
420 if (!kForEvac) {
421 // Evac doesn't count as newly allocated.
422 first_reg->SetNewlyAllocated();
423 }
424 for (size_t p = left + 1; p < right; ++p) {
425 DCHECK_LT(p, num_regions_);
426 DCHECK(regions_[p].IsFree());
427 regions_[p].UnfreeLargeTail(this, time_);
428 if (kForEvac) {
429 ++num_evac_regions_;
430 } else {
431 ++num_non_free_regions_;
432 }
433 if (!kForEvac) {
434 // Evac doesn't count as newly allocated.
435 regions_[p].SetNewlyAllocated();
436 }
437 }
438 *bytes_allocated = allocated;
439 if (usable_size != nullptr) {
440 *usable_size = allocated;
441 }
442 *bytes_tl_bulk_allocated = allocated;
443 mirror::Object* large_region = reinterpret_cast<mirror::Object*>(first_reg->Begin());
444 DCHECK(large_region != nullptr);
445 if (next_region != nullptr) {
446 // Return the index to the region next to the allocated large region via `next_region`.
447 *next_region = right;
448 }
449 return large_region;
450 } else {
451 // `right` points to the non-free region. Start with the one after it.
452 left = right + 1;
453 }
454 }
455 return nullptr;
456 }
457
458 template<bool kForEvac>
FreeLarge(mirror::Object * large_obj,size_t bytes_allocated)459 inline void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) {
460 DCHECK(Contains(large_obj));
461 DCHECK_ALIGNED(large_obj, kRegionSize);
462 MutexLock mu(Thread::Current(), region_lock_);
463 uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj);
464 uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize);
465 CHECK_LT(begin_addr, end_addr);
466 for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) {
467 Region* reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr));
468 if (addr == begin_addr) {
469 DCHECK(reg->IsLarge());
470 } else {
471 DCHECK(reg->IsLargeTail());
472 }
473 reg->Clear(/*zero_and_release_pages=*/true);
474 if (kForEvac) {
475 --num_evac_regions_;
476 } else {
477 --num_non_free_regions_;
478 }
479 }
480 if (kIsDebugBuild && end_addr < Limit()) {
481 // If we aren't at the end of the space, check that the next region is not a large tail.
482 Region* following_reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
483 DCHECK(!following_reg->IsLargeTail());
484 }
485 }
486
BytesAllocated()487 inline size_t RegionSpace::Region::BytesAllocated() const {
488 if (IsLarge()) {
489 DCHECK_LT(begin_ + kRegionSize, Top());
490 return static_cast<size_t>(Top() - begin_);
491 } else if (IsLargeTail()) {
492 DCHECK_EQ(begin_, Top());
493 return 0;
494 } else {
495 DCHECK(IsAllocated()) << "state=" << state_;
496 DCHECK_LE(begin_, Top());
497 size_t bytes;
498 if (is_a_tlab_) {
499 bytes = thread_->GetTlabEnd() - begin_;
500 } else {
501 bytes = static_cast<size_t>(Top() - begin_);
502 }
503 DCHECK_LE(bytes, kRegionSize);
504 return bytes;
505 }
506 }
507
ObjectsAllocated()508 inline size_t RegionSpace::Region::ObjectsAllocated() const {
509 if (IsLarge()) {
510 DCHECK_LT(begin_ + kRegionSize, Top());
511 DCHECK_EQ(objects_allocated_.load(std::memory_order_relaxed), 0U);
512 return 1;
513 } else if (IsLargeTail()) {
514 DCHECK_EQ(begin_, Top());
515 DCHECK_EQ(objects_allocated_.load(std::memory_order_relaxed), 0U);
516 return 0;
517 } else {
518 DCHECK(IsAllocated()) << "state=" << state_;
519 return objects_allocated_;
520 }
521 }
522
523 } // namespace space
524 } // namespace gc
525 } // namespace art
526
527 #endif // ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
528