1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include <deque>
17
18 #include "bump_pointer_space-inl.h"
19 #include "bump_pointer_space.h"
20 #include "base/dumpable.h"
21 #include "base/logging.h"
22 #include "gc/accounting/read_barrier_table.h"
23 #include "mirror/class-inl.h"
24 #include "mirror/object-inl.h"
25 #include "thread_list.h"
26
27 namespace art {
28 namespace gc {
29 namespace space {
30
31 // If a region has live objects whose size is less than this percent
32 // value of the region size, evaculate the region.
33 static constexpr uint kEvacuateLivePercentThreshold = 75U;
34
35 // Whether we protect the unused and cleared regions.
36 static constexpr bool kProtectClearedRegions = kIsDebugBuild;
37
38 // Wether we poison memory areas occupied by dead objects in unevacuated regions.
39 static constexpr bool kPoisonDeadObjectsInUnevacuatedRegions = true;
40
41 // Special 32-bit value used to poison memory areas occupied by dead
42 // objects in unevacuated regions. Dereferencing this value is expected
43 // to trigger a memory protection fault, as it is unlikely that it
44 // points to a valid, non-protected memory area.
45 static constexpr uint32_t kPoisonDeadObject = 0xBADDB01D; // "BADDROID"
46
47 // Whether we check a region's live bytes count against the region bitmap.
48 static constexpr bool kCheckLiveBytesAgainstRegionBitmap = kIsDebugBuild;
49
CreateMemMap(const std::string & name,size_t capacity,uint8_t * requested_begin)50 MemMap RegionSpace::CreateMemMap(const std::string& name,
51 size_t capacity,
52 uint8_t* requested_begin) {
53 CHECK_ALIGNED(capacity, kRegionSize);
54 std::string error_msg;
55 // Ask for the capacity of an additional kRegionSize so that we can align the map by kRegionSize
56 // even if we get unaligned base address. This is necessary for the ReadBarrierTable to work.
57 MemMap mem_map;
58 while (true) {
59 mem_map = MemMap::MapAnonymous(name.c_str(),
60 requested_begin,
61 capacity + kRegionSize,
62 PROT_READ | PROT_WRITE,
63 /*low_4gb=*/ true,
64 /*reuse=*/ false,
65 /*reservation=*/ nullptr,
66 &error_msg);
67 if (mem_map.IsValid() || requested_begin == nullptr) {
68 break;
69 }
70 // Retry with no specified request begin.
71 requested_begin = nullptr;
72 }
73 if (!mem_map.IsValid()) {
74 LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
75 << PrettySize(capacity) << " with message " << error_msg;
76 PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
77 MemMap::DumpMaps(LOG_STREAM(ERROR));
78 return MemMap::Invalid();
79 }
80 CHECK_EQ(mem_map.Size(), capacity + kRegionSize);
81 CHECK_EQ(mem_map.Begin(), mem_map.BaseBegin());
82 CHECK_EQ(mem_map.Size(), mem_map.BaseSize());
83 if (IsAlignedParam(mem_map.Begin(), kRegionSize)) {
84 // Got an aligned map. Since we requested a map that's kRegionSize larger. Shrink by
85 // kRegionSize at the end.
86 mem_map.SetSize(capacity);
87 } else {
88 // Got an unaligned map. Align the both ends.
89 mem_map.AlignBy(kRegionSize);
90 }
91 CHECK_ALIGNED(mem_map.Begin(), kRegionSize);
92 CHECK_ALIGNED(mem_map.End(), kRegionSize);
93 CHECK_EQ(mem_map.Size(), capacity);
94 return mem_map;
95 }
96
Create(const std::string & name,MemMap && mem_map,bool use_generational_cc)97 RegionSpace* RegionSpace::Create(
98 const std::string& name, MemMap&& mem_map, bool use_generational_cc) {
99 return new RegionSpace(name, std::move(mem_map), use_generational_cc);
100 }
101
RegionSpace(const std::string & name,MemMap && mem_map,bool use_generational_cc)102 RegionSpace::RegionSpace(const std::string& name, MemMap&& mem_map, bool use_generational_cc)
103 : ContinuousMemMapAllocSpace(name,
104 std::move(mem_map),
105 mem_map.Begin(),
106 mem_map.End(),
107 mem_map.End(),
108 kGcRetentionPolicyAlwaysCollect),
109 region_lock_("Region lock", kRegionSpaceRegionLock),
110 use_generational_cc_(use_generational_cc),
111 time_(1U),
112 num_regions_(mem_map_.Size() / kRegionSize),
113 madvise_time_(0U),
114 num_non_free_regions_(0U),
115 num_evac_regions_(0U),
116 max_peak_num_non_free_regions_(0U),
117 non_free_region_index_limit_(0U),
118 current_region_(&full_region_),
119 evac_region_(nullptr),
120 cyclic_alloc_region_index_(0U) {
121 CHECK_ALIGNED(mem_map_.Size(), kRegionSize);
122 CHECK_ALIGNED(mem_map_.Begin(), kRegionSize);
123 DCHECK_GT(num_regions_, 0U);
124 regions_.reset(new Region[num_regions_]);
125 uint8_t* region_addr = mem_map_.Begin();
126 for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
127 regions_[i].Init(i, region_addr, region_addr + kRegionSize);
128 }
129 mark_bitmap_ =
130 accounting::ContinuousSpaceBitmap::Create("region space live bitmap", Begin(), Capacity());
131 if (kIsDebugBuild) {
132 CHECK_EQ(regions_[0].Begin(), Begin());
133 for (size_t i = 0; i < num_regions_; ++i) {
134 CHECK(regions_[i].IsFree());
135 CHECK_EQ(static_cast<size_t>(regions_[i].End() - regions_[i].Begin()), kRegionSize);
136 if (i + 1 < num_regions_) {
137 CHECK_EQ(regions_[i].End(), regions_[i + 1].Begin());
138 }
139 }
140 CHECK_EQ(regions_[num_regions_ - 1].End(), Limit());
141 }
142 DCHECK(!full_region_.IsFree());
143 DCHECK(full_region_.IsAllocated());
144 size_t ignored;
145 DCHECK(full_region_.Alloc(kAlignment, &ignored, nullptr, &ignored) == nullptr);
146 // Protect the whole region space from the start.
147 Protect();
148 }
149
FromSpaceSize()150 size_t RegionSpace::FromSpaceSize() {
151 uint64_t num_regions = 0;
152 MutexLock mu(Thread::Current(), region_lock_);
153 for (size_t i = 0; i < num_regions_; ++i) {
154 Region* r = ®ions_[i];
155 if (r->IsInFromSpace()) {
156 ++num_regions;
157 }
158 }
159 return num_regions * kRegionSize;
160 }
161
UnevacFromSpaceSize()162 size_t RegionSpace::UnevacFromSpaceSize() {
163 uint64_t num_regions = 0;
164 MutexLock mu(Thread::Current(), region_lock_);
165 for (size_t i = 0; i < num_regions_; ++i) {
166 Region* r = ®ions_[i];
167 if (r->IsInUnevacFromSpace()) {
168 ++num_regions;
169 }
170 }
171 return num_regions * kRegionSize;
172 }
173
ToSpaceSize()174 size_t RegionSpace::ToSpaceSize() {
175 uint64_t num_regions = 0;
176 MutexLock mu(Thread::Current(), region_lock_);
177 for (size_t i = 0; i < num_regions_; ++i) {
178 Region* r = ®ions_[i];
179 if (r->IsInToSpace()) {
180 ++num_regions;
181 }
182 }
183 return num_regions * kRegionSize;
184 }
185
SetAsUnevacFromSpace(bool clear_live_bytes)186 void RegionSpace::Region::SetAsUnevacFromSpace(bool clear_live_bytes) {
187 // Live bytes are only preserved (i.e. not cleared) during sticky-bit CC collections.
188 DCHECK(GetUseGenerationalCC() || clear_live_bytes);
189 DCHECK(!IsFree() && IsInToSpace());
190 type_ = RegionType::kRegionTypeUnevacFromSpace;
191 if (IsNewlyAllocated()) {
192 // A newly allocated region set as unevac from-space must be
193 // a large or large tail region.
194 DCHECK(IsLarge() || IsLargeTail()) << static_cast<uint>(state_);
195 // Always clear the live bytes of a newly allocated (large or
196 // large tail) region.
197 clear_live_bytes = true;
198 // Clear the "newly allocated" status here, as we do not want the
199 // GC to see it when encountering (and processing) references in the
200 // from-space.
201 //
202 // Invariant: There should be no newly-allocated region in the
203 // from-space (when the from-space exists, which is between the calls
204 // to RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace).
205 is_newly_allocated_ = false;
206 }
207 if (clear_live_bytes) {
208 // Reset the live bytes, as we have made a non-evacuation
209 // decision (possibly based on the percentage of live bytes).
210 live_bytes_ = 0;
211 }
212 }
213
GetUseGenerationalCC()214 bool RegionSpace::Region::GetUseGenerationalCC() {
215 // We are retrieving the info from Heap, instead of the cached version in
216 // RegionSpace, because accessing the Heap from a Region object is easier
217 // than accessing the RegionSpace.
218 return art::Runtime::Current()->GetHeap()->GetUseGenerationalCC();
219 }
220
ShouldBeEvacuated(EvacMode evac_mode)221 inline bool RegionSpace::Region::ShouldBeEvacuated(EvacMode evac_mode) {
222 // Evacuation mode `kEvacModeNewlyAllocated` is only used during sticky-bit CC collections.
223 DCHECK(GetUseGenerationalCC() || (evac_mode != kEvacModeNewlyAllocated));
224 DCHECK((IsAllocated() || IsLarge()) && IsInToSpace());
225 // The region should be evacuated if:
226 // - the evacuation is forced (!large && `evac_mode == kEvacModeForceAll`); or
227 // - the region was allocated after the start of the previous GC (newly allocated region); or
228 // - !large and the live ratio is below threshold (`kEvacuateLivePercentThreshold`).
229 if (IsLarge()) {
230 // It makes no sense to evacuate in the large case, since the region only contains zero or
231 // one object. If the regions is completely empty, we'll reclaim it anyhow. If its one object
232 // is live, we would just be moving around region-aligned memory.
233 return false;
234 }
235 if (UNLIKELY(evac_mode == kEvacModeForceAll)) {
236 return true;
237 }
238 DCHECK(IsAllocated());
239 if (is_newly_allocated_) {
240 // Invariant: newly allocated regions have an undefined live bytes count.
241 DCHECK_EQ(live_bytes_, static_cast<size_t>(-1));
242 // We always evacuate newly-allocated non-large regions as we
243 // believe they contain many dead objects (a very simple form of
244 // the generational hypothesis, even before the Sticky-Bit CC
245 // approach).
246 //
247 // TODO: Verify that assertion by collecting statistics on the
248 // number/proportion of live objects in newly allocated regions
249 // in RegionSpace::ClearFromSpace.
250 //
251 // Note that a side effect of evacuating a newly-allocated
252 // non-large region is that the "newly allocated" status will
253 // later be removed, as its live objects will be copied to an
254 // evacuation region, which won't be marked as "newly
255 // allocated" (see RegionSpace::AllocateRegion).
256 return true;
257 } else if (evac_mode == kEvacModeLivePercentNewlyAllocated) {
258 bool is_live_percent_valid = (live_bytes_ != static_cast<size_t>(-1));
259 if (is_live_percent_valid) {
260 DCHECK(IsInToSpace());
261 DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
262 DCHECK_LE(live_bytes_, BytesAllocated());
263 const size_t bytes_allocated = RoundUp(BytesAllocated(), kRegionSize);
264 DCHECK_LE(live_bytes_, bytes_allocated);
265 // Side node: live_percent == 0 does not necessarily mean
266 // there's no live objects due to rounding (there may be a
267 // few).
268 return live_bytes_ * 100U < kEvacuateLivePercentThreshold * bytes_allocated;
269 }
270 }
271 return false;
272 }
273
ZeroLiveBytesForLargeObject(mirror::Object * obj)274 void RegionSpace::ZeroLiveBytesForLargeObject(mirror::Object* obj) {
275 // This method is only used when Generational CC collection is enabled.
276 DCHECK(use_generational_cc_);
277
278 // This code uses a logic similar to the one used in RegionSpace::FreeLarge
279 // to traverse the regions supporting `obj`.
280 // TODO: Refactor.
281 DCHECK(IsLargeObject(obj));
282 DCHECK_ALIGNED(obj, kRegionSize);
283 size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
284 DCHECK_GT(obj_size, space::RegionSpace::kRegionSize);
285 // Size of the memory area allocated for `obj`.
286 size_t obj_alloc_size = RoundUp(obj_size, space::RegionSpace::kRegionSize);
287 uint8_t* begin_addr = reinterpret_cast<uint8_t*>(obj);
288 uint8_t* end_addr = begin_addr + obj_alloc_size;
289 DCHECK_ALIGNED(end_addr, kRegionSize);
290
291 // Zero the live bytes of the large region and large tail regions containing the object.
292 MutexLock mu(Thread::Current(), region_lock_);
293 for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) {
294 Region* region = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr));
295 if (addr == begin_addr) {
296 DCHECK(region->IsLarge());
297 } else {
298 DCHECK(region->IsLargeTail());
299 }
300 region->ZeroLiveBytes();
301 }
302 if (kIsDebugBuild && end_addr < Limit()) {
303 // If we aren't at the end of the space, check that the next region is not a large tail.
304 Region* following_region = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
305 DCHECK(!following_region->IsLargeTail());
306 }
307 }
308
309 // Determine which regions to evacuate and mark them as
310 // from-space. Mark the rest as unevacuated from-space.
SetFromSpace(accounting::ReadBarrierTable * rb_table,EvacMode evac_mode,bool clear_live_bytes)311 void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table,
312 EvacMode evac_mode,
313 bool clear_live_bytes) {
314 // Live bytes are only preserved (i.e. not cleared) during sticky-bit CC collections.
315 DCHECK(use_generational_cc_ || clear_live_bytes);
316 ++time_;
317 if (kUseTableLookupReadBarrier) {
318 DCHECK(rb_table->IsAllCleared());
319 rb_table->SetAll();
320 }
321 MutexLock mu(Thread::Current(), region_lock_);
322 // We cannot use the partially utilized TLABs across a GC. Therefore, revoke
323 // them during the thread-flip.
324 partial_tlabs_.clear();
325
326 // Counter for the number of expected large tail regions following a large region.
327 size_t num_expected_large_tails = 0U;
328 // Flag to store whether the previously seen large region has been evacuated.
329 // This is used to apply the same evacuation policy to related large tail regions.
330 bool prev_large_evacuated = false;
331 VerifyNonFreeRegionLimit();
332 const size_t iter_limit = kUseTableLookupReadBarrier
333 ? num_regions_
334 : std::min(num_regions_, non_free_region_index_limit_);
335 for (size_t i = 0; i < iter_limit; ++i) {
336 Region* r = ®ions_[i];
337 RegionState state = r->State();
338 RegionType type = r->Type();
339 if (!r->IsFree()) {
340 DCHECK(r->IsInToSpace());
341 if (LIKELY(num_expected_large_tails == 0U)) {
342 DCHECK((state == RegionState::kRegionStateAllocated ||
343 state == RegionState::kRegionStateLarge) &&
344 type == RegionType::kRegionTypeToSpace);
345 bool should_evacuate = r->ShouldBeEvacuated(evac_mode);
346 bool is_newly_allocated = r->IsNewlyAllocated();
347 if (should_evacuate) {
348 r->SetAsFromSpace();
349 DCHECK(r->IsInFromSpace());
350 } else {
351 r->SetAsUnevacFromSpace(clear_live_bytes);
352 DCHECK(r->IsInUnevacFromSpace());
353 }
354 if (UNLIKELY(state == RegionState::kRegionStateLarge &&
355 type == RegionType::kRegionTypeToSpace)) {
356 prev_large_evacuated = should_evacuate;
357 // In 2-phase full heap GC, this function is called after marking is
358 // done. So, it is possible that some newly allocated large object is
359 // marked but its live_bytes is still -1. We need to clear the
360 // mark-bit otherwise the live_bytes will not be updated in
361 // ConcurrentCopying::ProcessMarkStackRef() and hence will break the
362 // logic.
363 if (use_generational_cc_ && !should_evacuate && is_newly_allocated) {
364 GetMarkBitmap()->Clear(reinterpret_cast<mirror::Object*>(r->Begin()));
365 }
366 num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1;
367 DCHECK_GT(num_expected_large_tails, 0U);
368 }
369 } else {
370 DCHECK(state == RegionState::kRegionStateLargeTail &&
371 type == RegionType::kRegionTypeToSpace);
372 if (prev_large_evacuated) {
373 r->SetAsFromSpace();
374 DCHECK(r->IsInFromSpace());
375 } else {
376 r->SetAsUnevacFromSpace(clear_live_bytes);
377 DCHECK(r->IsInUnevacFromSpace());
378 }
379 --num_expected_large_tails;
380 }
381 } else {
382 DCHECK_EQ(num_expected_large_tails, 0U);
383 if (kUseTableLookupReadBarrier) {
384 // Clear the rb table for to-space regions.
385 rb_table->Clear(r->Begin(), r->End());
386 }
387 }
388 // Invariant: There should be no newly-allocated region in the from-space.
389 DCHECK(!r->is_newly_allocated_);
390 }
391 DCHECK_EQ(num_expected_large_tails, 0U);
392 current_region_ = &full_region_;
393 evac_region_ = &full_region_;
394 }
395
ZeroAndProtectRegion(uint8_t * begin,uint8_t * end)396 static void ZeroAndProtectRegion(uint8_t* begin, uint8_t* end) {
397 ZeroAndReleasePages(begin, end - begin);
398 if (kProtectClearedRegions) {
399 CheckedCall(mprotect, __FUNCTION__, begin, end - begin, PROT_NONE);
400 }
401 }
402
ClearFromSpace(uint64_t * cleared_bytes,uint64_t * cleared_objects,const bool clear_bitmap)403 void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes,
404 /* out */ uint64_t* cleared_objects,
405 const bool clear_bitmap) {
406 DCHECK(cleared_bytes != nullptr);
407 DCHECK(cleared_objects != nullptr);
408 *cleared_bytes = 0;
409 *cleared_objects = 0;
410 size_t new_non_free_region_index_limit = 0;
411 // We should avoid calling madvise syscalls while holding region_lock_.
412 // Therefore, we split the working of this function into 2 loops. The first
413 // loop gathers memory ranges that must be madvised. Then we release the lock
414 // and perform madvise on the gathered memory ranges. Finally, we reacquire
415 // the lock and loop over the regions to clear the from-space regions and make
416 // them availabe for allocation.
417 std::deque<std::pair<uint8_t*, uint8_t*>> madvise_list;
418 // Gather memory ranges that need to be madvised.
419 {
420 MutexLock mu(Thread::Current(), region_lock_);
421 // Lambda expression `expand_madvise_range` adds a region to the "clear block".
422 //
423 // As we iterate over from-space regions, we maintain a "clear block", composed of
424 // adjacent to-be-cleared regions and whose bounds are `clear_block_begin` and
425 // `clear_block_end`. When processing a new region which is not adjacent to
426 // the clear block (discontinuity in cleared regions), the clear block
427 // is added to madvise_list and the clear block is reset (to the most recent
428 // to-be-cleared region).
429 //
430 // This is done in order to combine zeroing and releasing pages to reduce how
431 // often madvise is called. This helps reduce contention on the mmap semaphore
432 // (see b/62194020).
433 uint8_t* clear_block_begin = nullptr;
434 uint8_t* clear_block_end = nullptr;
435 auto expand_madvise_range = [&madvise_list, &clear_block_begin, &clear_block_end] (Region* r) {
436 if (clear_block_end != r->Begin()) {
437 if (clear_block_begin != nullptr) {
438 DCHECK(clear_block_end != nullptr);
439 madvise_list.push_back(std::pair(clear_block_begin, clear_block_end));
440 }
441 clear_block_begin = r->Begin();
442 }
443 clear_block_end = r->End();
444 };
445 for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) {
446 Region* r = ®ions_[i];
447 // The following check goes through objects in the region, therefore it
448 // must be performed before madvising the region. Therefore, it can't be
449 // executed in the following loop.
450 if (kCheckLiveBytesAgainstRegionBitmap) {
451 CheckLiveBytesAgainstRegionBitmap(r);
452 }
453 if (r->IsInFromSpace()) {
454 expand_madvise_range(r);
455 } else if (r->IsInUnevacFromSpace()) {
456 // We must skip tails of live large objects.
457 if (r->LiveBytes() == 0 && !r->IsLargeTail()) {
458 // Special case for 0 live bytes, this means all of the objects in the region are
459 // dead and we can to clear it. This is important for large objects since we must
460 // not visit dead ones in RegionSpace::Walk because they may contain dangling
461 // references to invalid objects. It is also better to clear these regions now
462 // instead of at the end of the next GC to save RAM. If we don't clear the regions
463 // here, they will be cleared in next GC by the normal live percent evacuation logic.
464 expand_madvise_range(r);
465 // Also release RAM for large tails.
466 while (i + 1 < num_regions_ && regions_[i + 1].IsLargeTail()) {
467 expand_madvise_range(®ions_[i + 1]);
468 i++;
469 }
470 }
471 }
472 }
473 // There is a small probability that we may reach here with
474 // clear_block_{begin, end} = nullptr. If all the regions allocated since
475 // last GC have been for large objects and all of them survive till this GC
476 // cycle, then there will be no regions in from-space.
477 if (LIKELY(clear_block_begin != nullptr)) {
478 DCHECK(clear_block_end != nullptr);
479 madvise_list.push_back(std::pair(clear_block_begin, clear_block_end));
480 }
481 }
482
483 // Madvise the memory ranges.
484 uint64_t start_time = NanoTime();
485 for (const auto &iter : madvise_list) {
486 ZeroAndProtectRegion(iter.first, iter.second);
487 }
488 madvise_time_ += NanoTime() - start_time;
489
490 for (const auto &iter : madvise_list) {
491 if (clear_bitmap) {
492 GetLiveBitmap()->ClearRange(
493 reinterpret_cast<mirror::Object*>(iter.first),
494 reinterpret_cast<mirror::Object*>(iter.second));
495 }
496 }
497 madvise_list.clear();
498
499 // Iterate over regions again and actually make the from space regions
500 // available for allocation.
501 MutexLock mu(Thread::Current(), region_lock_);
502 VerifyNonFreeRegionLimit();
503
504 // Update max of peak non free region count before reclaiming evacuated regions.
505 max_peak_num_non_free_regions_ = std::max(max_peak_num_non_free_regions_,
506 num_non_free_regions_);
507
508 for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) {
509 Region* r = ®ions_[i];
510 if (r->IsInFromSpace()) {
511 DCHECK(!r->IsTlab());
512 *cleared_bytes += r->BytesAllocated();
513 *cleared_objects += r->ObjectsAllocated();
514 --num_non_free_regions_;
515 r->Clear(/*zero_and_release_pages=*/false);
516 } else if (r->IsInUnevacFromSpace()) {
517 if (r->LiveBytes() == 0) {
518 DCHECK(!r->IsLargeTail());
519 *cleared_bytes += r->BytesAllocated();
520 *cleared_objects += r->ObjectsAllocated();
521 r->Clear(/*zero_and_release_pages=*/false);
522 size_t free_regions = 1;
523 // Also release RAM for large tails.
524 while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) {
525 regions_[i + free_regions].Clear(/*zero_and_release_pages=*/false);
526 ++free_regions;
527 }
528 num_non_free_regions_ -= free_regions;
529 // When clear_bitmap is true, this clearing of bitmap is taken care in
530 // clear_region().
531 if (!clear_bitmap) {
532 GetLiveBitmap()->ClearRange(
533 reinterpret_cast<mirror::Object*>(r->Begin()),
534 reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
535 }
536 continue;
537 }
538 r->SetUnevacFromSpaceAsToSpace();
539 if (r->AllAllocatedBytesAreLive()) {
540 // Try to optimize the number of ClearRange calls by checking whether the next regions
541 // can also be cleared.
542 size_t regions_to_clear_bitmap = 1;
543 while (i + regions_to_clear_bitmap < num_regions_) {
544 Region* const cur = ®ions_[i + regions_to_clear_bitmap];
545 if (!cur->AllAllocatedBytesAreLive()) {
546 DCHECK(!cur->IsLargeTail());
547 break;
548 }
549 CHECK(cur->IsInUnevacFromSpace());
550 cur->SetUnevacFromSpaceAsToSpace();
551 ++regions_to_clear_bitmap;
552 }
553
554 // Optimization (for full CC only): If the live bytes are *all* live
555 // in a region then the live-bit information for these objects is
556 // superfluous:
557 // - We can determine that these objects are all live by using
558 // Region::AllAllocatedBytesAreLive (which just checks whether
559 // `LiveBytes() == static_cast<size_t>(Top() - Begin())`.
560 // - We can visit the objects in this region using
561 // RegionSpace::GetNextObject, i.e. without resorting to the
562 // live bits (see RegionSpace::WalkInternal).
563 // Therefore, we can clear the bits for these objects in the
564 // (live) region space bitmap (and release the corresponding pages).
565 //
566 // This optimization is incompatible with Generational CC, because:
567 // - minor (young-generation) collections need to know which objects
568 // where marked during the previous GC cycle, meaning all mark bitmaps
569 // (this includes the region space bitmap) need to be preserved
570 // between a (minor or major) collection N and a following minor
571 // collection N+1;
572 // - at this stage (in the current GC cycle), we cannot determine
573 // whether the next collection will be a minor or a major one;
574 // This means that we need to be conservative and always preserve the
575 // region space bitmap when using Generational CC.
576 // Note that major collections do not require the previous mark bitmaps
577 // to be preserved, and as matter of fact they do clear the region space
578 // bitmap. But they cannot do so before we know the next GC cycle will
579 // be a major one, so this operation happens at the beginning of such a
580 // major collection, before marking starts.
581 if (!use_generational_cc_) {
582 GetLiveBitmap()->ClearRange(
583 reinterpret_cast<mirror::Object*>(r->Begin()),
584 reinterpret_cast<mirror::Object*>(r->Begin()
585 + regions_to_clear_bitmap * kRegionSize));
586 }
587 // Skip over extra regions for which we cleared the bitmaps: we shall not clear them,
588 // as they are unevac regions that are live.
589 // Subtract one for the for-loop.
590 i += regions_to_clear_bitmap - 1;
591 } else {
592 // TODO: Explain why we do not poison dead objects in region
593 // `r` when it has an undefined live bytes count (i.e. when
594 // `r->LiveBytes() == static_cast<size_t>(-1)`) with
595 // Generational CC.
596 if (!use_generational_cc_ || (r->LiveBytes() != static_cast<size_t>(-1))) {
597 // Only some allocated bytes are live in this unevac region.
598 // This should only happen for an allocated non-large region.
599 DCHECK(r->IsAllocated()) << r->State();
600 if (kPoisonDeadObjectsInUnevacuatedRegions) {
601 PoisonDeadObjectsInUnevacuatedRegion(r);
602 }
603 }
604 }
605 }
606 // Note r != last_checked_region if r->IsInUnevacFromSpace() was true above.
607 Region* last_checked_region = ®ions_[i];
608 if (!last_checked_region->IsFree()) {
609 new_non_free_region_index_limit = std::max(new_non_free_region_index_limit,
610 last_checked_region->Idx() + 1);
611 }
612 }
613 // Update non_free_region_index_limit_.
614 SetNonFreeRegionLimit(new_non_free_region_index_limit);
615 evac_region_ = nullptr;
616 num_non_free_regions_ += num_evac_regions_;
617 num_evac_regions_ = 0;
618 }
619
CheckLiveBytesAgainstRegionBitmap(Region * r)620 void RegionSpace::CheckLiveBytesAgainstRegionBitmap(Region* r) {
621 if (r->LiveBytes() == static_cast<size_t>(-1)) {
622 // Live bytes count is undefined for `r`; nothing to check here.
623 return;
624 }
625
626 // Functor walking the region space bitmap for the range corresponding
627 // to region `r` and calculating the sum of live bytes.
628 size_t live_bytes_recount = 0u;
629 auto recount_live_bytes =
630 [&r, &live_bytes_recount](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
631 DCHECK_ALIGNED(obj, kAlignment);
632 if (r->IsLarge()) {
633 // If `r` is a large region, then it contains at most one
634 // object, which must start at the beginning of the
635 // region. The live byte count in that case is equal to the
636 // allocated regions (large region + large tails regions).
637 DCHECK_EQ(reinterpret_cast<uint8_t*>(obj), r->Begin());
638 DCHECK_EQ(live_bytes_recount, 0u);
639 live_bytes_recount = r->Top() - r->Begin();
640 } else {
641 DCHECK(r->IsAllocated())
642 << "r->State()=" << r->State() << " r->LiveBytes()=" << r->LiveBytes();
643 size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
644 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
645 live_bytes_recount += alloc_size;
646 }
647 };
648 // Visit live objects in `r` and recount the live bytes.
649 GetLiveBitmap()->VisitMarkedRange(reinterpret_cast<uintptr_t>(r->Begin()),
650 reinterpret_cast<uintptr_t>(r->Top()),
651 recount_live_bytes);
652 // Check that this recount matches the region's current live bytes count.
653 DCHECK_EQ(live_bytes_recount, r->LiveBytes());
654 }
655
656 // Poison the memory area in range [`begin`, `end`) with value `kPoisonDeadObject`.
PoisonUnevacuatedRange(uint8_t * begin,uint8_t * end)657 static void PoisonUnevacuatedRange(uint8_t* begin, uint8_t* end) {
658 static constexpr size_t kPoisonDeadObjectSize = sizeof(kPoisonDeadObject);
659 static_assert(IsPowerOfTwo(kPoisonDeadObjectSize) &&
660 IsPowerOfTwo(RegionSpace::kAlignment) &&
661 (kPoisonDeadObjectSize < RegionSpace::kAlignment),
662 "RegionSpace::kAlignment should be a multiple of kPoisonDeadObjectSize"
663 " and both should be powers of 2");
664 DCHECK_ALIGNED(begin, kPoisonDeadObjectSize);
665 DCHECK_ALIGNED(end, kPoisonDeadObjectSize);
666 uint32_t* begin_addr = reinterpret_cast<uint32_t*>(begin);
667 uint32_t* end_addr = reinterpret_cast<uint32_t*>(end);
668 std::fill(begin_addr, end_addr, kPoisonDeadObject);
669 }
670
PoisonDeadObjectsInUnevacuatedRegion(Region * r)671 void RegionSpace::PoisonDeadObjectsInUnevacuatedRegion(Region* r) {
672 // The live byte count of `r` should be different from -1, as this
673 // region should neither be a newly allocated region nor an
674 // evacuated region.
675 DCHECK_NE(r->LiveBytes(), static_cast<size_t>(-1))
676 << "Unexpected live bytes count of -1 in " << Dumpable<Region>(*r);
677
678 // Past-the-end address of the previously visited (live) object (or
679 // the beginning of the region, if `maybe_poison` has not run yet).
680 uint8_t* prev_obj_end = reinterpret_cast<uint8_t*>(r->Begin());
681
682 // Functor poisoning the space between `obj` and the previously
683 // visited (live) object (or the beginng of the region), if any.
684 auto maybe_poison = [&prev_obj_end](mirror::Object* obj) REQUIRES(Locks::mutator_lock_) {
685 DCHECK_ALIGNED(obj, kAlignment);
686 uint8_t* cur_obj_begin = reinterpret_cast<uint8_t*>(obj);
687 if (cur_obj_begin != prev_obj_end) {
688 // There is a gap (dead object(s)) between the previously
689 // visited (live) object (or the beginning of the region) and
690 // `obj`; poison that space.
691 PoisonUnevacuatedRange(prev_obj_end, cur_obj_begin);
692 }
693 prev_obj_end = reinterpret_cast<uint8_t*>(GetNextObject(obj));
694 };
695
696 // Visit live objects in `r` and poison gaps (dead objects) between them.
697 GetLiveBitmap()->VisitMarkedRange(reinterpret_cast<uintptr_t>(r->Begin()),
698 reinterpret_cast<uintptr_t>(r->Top()),
699 maybe_poison);
700 // Poison memory between the last live object and the end of the region, if any.
701 if (prev_obj_end < r->Top()) {
702 PoisonUnevacuatedRange(prev_obj_end, r->Top());
703 }
704 }
705
LogFragmentationAllocFailure(std::ostream & os,size_t failed_alloc_bytes)706 bool RegionSpace::LogFragmentationAllocFailure(std::ostream& os,
707 size_t failed_alloc_bytes) {
708 size_t max_contiguous_allocation = 0;
709 MutexLock mu(Thread::Current(), region_lock_);
710
711 if (current_region_->End() - current_region_->Top() > 0) {
712 max_contiguous_allocation = current_region_->End() - current_region_->Top();
713 }
714
715 size_t max_contiguous_free_regions = 0;
716 size_t num_contiguous_free_regions = 0;
717 bool prev_free_region = false;
718 for (size_t i = 0; i < num_regions_; ++i) {
719 Region* r = ®ions_[i];
720 if (r->IsFree()) {
721 if (!prev_free_region) {
722 CHECK_EQ(num_contiguous_free_regions, 0U);
723 prev_free_region = true;
724 }
725 ++num_contiguous_free_regions;
726 } else if (prev_free_region) {
727 CHECK_NE(num_contiguous_free_regions, 0U);
728 max_contiguous_free_regions = std::max(max_contiguous_free_regions,
729 num_contiguous_free_regions);
730 num_contiguous_free_regions = 0U;
731 prev_free_region = false;
732 }
733 }
734 max_contiguous_allocation = std::max(max_contiguous_allocation,
735 max_contiguous_free_regions * kRegionSize);
736
737 // Calculate how many regions are available for allocations as we have to ensure
738 // that enough regions are left for evacuation.
739 size_t regions_free_for_alloc = num_regions_ / 2 - num_non_free_regions_;
740
741 max_contiguous_allocation = std::min(max_contiguous_allocation,
742 regions_free_for_alloc * kRegionSize);
743 if (failed_alloc_bytes > max_contiguous_allocation) {
744 os << "; failed due to fragmentation (largest possible contiguous allocation "
745 << max_contiguous_allocation << " bytes). Number of "
746 << PrettySize(kRegionSize)
747 << " sized free regions are: " << regions_free_for_alloc;
748 return true;
749 }
750 // Caller's job to print failed_alloc_bytes.
751 return false;
752 }
753
Clear()754 void RegionSpace::Clear() {
755 MutexLock mu(Thread::Current(), region_lock_);
756 for (size_t i = 0; i < num_regions_; ++i) {
757 Region* r = ®ions_[i];
758 if (!r->IsFree()) {
759 --num_non_free_regions_;
760 }
761 r->Clear(/*zero_and_release_pages=*/true);
762 }
763 SetNonFreeRegionLimit(0);
764 DCHECK_EQ(num_non_free_regions_, 0u);
765 current_region_ = &full_region_;
766 evac_region_ = &full_region_;
767 }
768
Protect()769 void RegionSpace::Protect() {
770 if (kProtectClearedRegions) {
771 CheckedCall(mprotect, __FUNCTION__, Begin(), Size(), PROT_NONE);
772 }
773 }
774
Unprotect()775 void RegionSpace::Unprotect() {
776 if (kProtectClearedRegions) {
777 CheckedCall(mprotect, __FUNCTION__, Begin(), Size(), PROT_READ | PROT_WRITE);
778 }
779 }
780
ClampGrowthLimit(size_t new_capacity)781 void RegionSpace::ClampGrowthLimit(size_t new_capacity) {
782 MutexLock mu(Thread::Current(), region_lock_);
783 CHECK_LE(new_capacity, NonGrowthLimitCapacity());
784 size_t new_num_regions = new_capacity / kRegionSize;
785 if (non_free_region_index_limit_ > new_num_regions) {
786 LOG(WARNING) << "Couldn't clamp region space as there are regions in use beyond growth limit.";
787 return;
788 }
789 num_regions_ = new_num_regions;
790 if (kCyclicRegionAllocation && cyclic_alloc_region_index_ >= num_regions_) {
791 cyclic_alloc_region_index_ = 0u;
792 }
793 SetLimit(Begin() + new_capacity);
794 if (Size() > new_capacity) {
795 SetEnd(Limit());
796 }
797 GetMarkBitmap()->SetHeapSize(new_capacity);
798 GetMemMap()->SetSize(new_capacity);
799 }
800
Dump(std::ostream & os) const801 void RegionSpace::Dump(std::ostream& os) const {
802 os << GetName() << " "
803 << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(Limit());
804 }
805
DumpRegionForObject(std::ostream & os,mirror::Object * obj)806 void RegionSpace::DumpRegionForObject(std::ostream& os, mirror::Object* obj) {
807 CHECK(HasAddress(obj));
808 MutexLock mu(Thread::Current(), region_lock_);
809 RefToRegionUnlocked(obj)->Dump(os);
810 }
811
DumpRegions(std::ostream & os)812 void RegionSpace::DumpRegions(std::ostream& os) {
813 MutexLock mu(Thread::Current(), region_lock_);
814 for (size_t i = 0; i < num_regions_; ++i) {
815 regions_[i].Dump(os);
816 }
817 }
818
DumpNonFreeRegions(std::ostream & os)819 void RegionSpace::DumpNonFreeRegions(std::ostream& os) {
820 MutexLock mu(Thread::Current(), region_lock_);
821 for (size_t i = 0; i < num_regions_; ++i) {
822 Region* reg = ®ions_[i];
823 if (!reg->IsFree()) {
824 reg->Dump(os);
825 }
826 }
827 }
828
RecordAlloc(mirror::Object * ref)829 void RegionSpace::RecordAlloc(mirror::Object* ref) {
830 CHECK(ref != nullptr);
831 Region* r = RefToRegion(ref);
832 r->objects_allocated_.fetch_add(1, std::memory_order_relaxed);
833 }
834
AllocNewTlab(Thread * self,const size_t tlab_size,size_t * bytes_tl_bulk_allocated)835 bool RegionSpace::AllocNewTlab(Thread* self,
836 const size_t tlab_size,
837 size_t* bytes_tl_bulk_allocated) {
838 MutexLock mu(self, region_lock_);
839 RevokeThreadLocalBuffersLocked(self, /*reuse=*/ gc::Heap::kUsePartialTlabs);
840 Region* r = nullptr;
841 uint8_t* pos = nullptr;
842 *bytes_tl_bulk_allocated = tlab_size;
843 // First attempt to get a partially used TLAB, if available.
844 if (tlab_size < kRegionSize) {
845 // Fetch the largest partial TLAB. The multimap is ordered in decreasing
846 // size.
847 auto largest_partial_tlab = partial_tlabs_.begin();
848 if (largest_partial_tlab != partial_tlabs_.end() && largest_partial_tlab->first >= tlab_size) {
849 r = largest_partial_tlab->second;
850 pos = r->End() - largest_partial_tlab->first;
851 partial_tlabs_.erase(largest_partial_tlab);
852 DCHECK_GT(r->End(), pos);
853 DCHECK_LE(r->Begin(), pos);
854 DCHECK_GE(r->Top(), pos);
855 *bytes_tl_bulk_allocated -= r->Top() - pos;
856 }
857 }
858 if (r == nullptr) {
859 // Fallback to allocating an entire region as TLAB.
860 r = AllocateRegion(/*for_evac=*/ false);
861 }
862 if (r != nullptr) {
863 uint8_t* start = pos != nullptr ? pos : r->Begin();
864 DCHECK_ALIGNED(start, kObjectAlignment);
865 r->is_a_tlab_ = true;
866 r->thread_ = self;
867 r->SetTop(r->End());
868 self->SetTlab(start, start + tlab_size, r->End());
869 return true;
870 }
871 return false;
872 }
873
RevokeThreadLocalBuffers(Thread * thread)874 size_t RegionSpace::RevokeThreadLocalBuffers(Thread* thread) {
875 MutexLock mu(Thread::Current(), region_lock_);
876 RevokeThreadLocalBuffersLocked(thread, /*reuse=*/ gc::Heap::kUsePartialTlabs);
877 return 0U;
878 }
879
RevokeThreadLocalBuffers(Thread * thread,const bool reuse)880 size_t RegionSpace::RevokeThreadLocalBuffers(Thread* thread, const bool reuse) {
881 MutexLock mu(Thread::Current(), region_lock_);
882 RevokeThreadLocalBuffersLocked(thread, reuse);
883 return 0U;
884 }
885
RevokeThreadLocalBuffersLocked(Thread * thread,bool reuse)886 void RegionSpace::RevokeThreadLocalBuffersLocked(Thread* thread, bool reuse) {
887 uint8_t* tlab_start = thread->GetTlabStart();
888 DCHECK_EQ(thread->HasTlab(), tlab_start != nullptr);
889 if (tlab_start != nullptr) {
890 Region* r = RefToRegionLocked(reinterpret_cast<mirror::Object*>(tlab_start));
891 r->is_a_tlab_ = false;
892 r->thread_ = nullptr;
893 DCHECK(r->IsAllocated());
894 DCHECK_LE(thread->GetThreadLocalBytesAllocated(), kRegionSize);
895 r->RecordThreadLocalAllocations(thread->GetThreadLocalObjectsAllocated(),
896 thread->GetTlabEnd() - r->Begin());
897 DCHECK_GE(r->End(), thread->GetTlabPos());
898 DCHECK_LE(r->Begin(), thread->GetTlabPos());
899 size_t remaining_bytes = r->End() - thread->GetTlabPos();
900 if (reuse && remaining_bytes >= gc::Heap::kPartialTlabSize) {
901 partial_tlabs_.insert(std::make_pair(remaining_bytes, r));
902 }
903 }
904 thread->ResetTlab();
905 }
906
RevokeAllThreadLocalBuffers()907 size_t RegionSpace::RevokeAllThreadLocalBuffers() {
908 Thread* self = Thread::Current();
909 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
910 MutexLock mu2(self, *Locks::thread_list_lock_);
911 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
912 for (Thread* thread : thread_list) {
913 RevokeThreadLocalBuffers(thread);
914 }
915 return 0U;
916 }
917
AssertThreadLocalBuffersAreRevoked(Thread * thread)918 void RegionSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
919 if (kIsDebugBuild) {
920 DCHECK(!thread->HasTlab());
921 }
922 }
923
AssertAllThreadLocalBuffersAreRevoked()924 void RegionSpace::AssertAllThreadLocalBuffersAreRevoked() {
925 if (kIsDebugBuild) {
926 Thread* self = Thread::Current();
927 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
928 MutexLock mu2(self, *Locks::thread_list_lock_);
929 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
930 for (Thread* thread : thread_list) {
931 AssertThreadLocalBuffersAreRevoked(thread);
932 }
933 }
934 }
935
Dump(std::ostream & os) const936 void RegionSpace::Region::Dump(std::ostream& os) const {
937 os << "Region[" << idx_ << "]="
938 << reinterpret_cast<void*>(begin_)
939 << "-" << reinterpret_cast<void*>(Top())
940 << "-" << reinterpret_cast<void*>(end_)
941 << " state=" << state_
942 << " type=" << type_
943 << " objects_allocated=" << objects_allocated_
944 << " alloc_time=" << alloc_time_
945 << " live_bytes=" << live_bytes_;
946
947 if (live_bytes_ != static_cast<size_t>(-1)) {
948 os << " ratio over allocated bytes="
949 << (static_cast<float>(live_bytes_) / RoundUp(BytesAllocated(), kRegionSize));
950 uint64_t longest_consecutive_free_bytes = GetLongestConsecutiveFreeBytes();
951 os << " longest_consecutive_free_bytes=" << longest_consecutive_free_bytes
952 << " (" << PrettySize(longest_consecutive_free_bytes) << ")";
953 }
954
955 os << " is_newly_allocated=" << std::boolalpha << is_newly_allocated_ << std::noboolalpha
956 << " is_a_tlab=" << std::boolalpha << is_a_tlab_ << std::noboolalpha
957 << " thread=" << thread_ << '\n';
958 }
959
GetLongestConsecutiveFreeBytes() const960 uint64_t RegionSpace::Region::GetLongestConsecutiveFreeBytes() const {
961 if (IsFree()) {
962 return kRegionSize;
963 }
964 if (IsLarge() || IsLargeTail()) {
965 return 0u;
966 }
967 uintptr_t max_gap = 0u;
968 uintptr_t prev_object_end = reinterpret_cast<uintptr_t>(Begin());
969 // Iterate through all live objects and find the largest free gap.
970 auto visitor = [&max_gap, &prev_object_end](mirror::Object* obj)
971 REQUIRES_SHARED(Locks::mutator_lock_) {
972 uintptr_t current = reinterpret_cast<uintptr_t>(obj);
973 uintptr_t diff = current - prev_object_end;
974 max_gap = std::max(diff, max_gap);
975 uintptr_t object_end = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
976 prev_object_end = RoundUp(object_end, kAlignment);
977 };
978 space::RegionSpace* region_space = art::Runtime::Current()->GetHeap()->GetRegionSpace();
979 region_space->WalkNonLargeRegion(visitor, this);
980 return static_cast<uint64_t>(max_gap);
981 }
982
983
AllocationSizeNonvirtual(mirror::Object * obj,size_t * usable_size)984 size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
985 size_t num_bytes = obj->SizeOf();
986 if (usable_size != nullptr) {
987 if (LIKELY(num_bytes <= kRegionSize)) {
988 DCHECK(RefToRegion(obj)->IsAllocated());
989 *usable_size = RoundUp(num_bytes, kAlignment);
990 } else {
991 DCHECK(RefToRegion(obj)->IsLarge());
992 *usable_size = RoundUp(num_bytes, kRegionSize);
993 }
994 }
995 return num_bytes;
996 }
997
Clear(bool zero_and_release_pages)998 void RegionSpace::Region::Clear(bool zero_and_release_pages) {
999 top_.store(begin_, std::memory_order_relaxed);
1000 state_ = RegionState::kRegionStateFree;
1001 type_ = RegionType::kRegionTypeNone;
1002 objects_allocated_.store(0, std::memory_order_relaxed);
1003 alloc_time_ = 0;
1004 live_bytes_ = static_cast<size_t>(-1);
1005 if (zero_and_release_pages) {
1006 ZeroAndProtectRegion(begin_, end_);
1007 }
1008 is_newly_allocated_ = false;
1009 is_a_tlab_ = false;
1010 thread_ = nullptr;
1011 }
1012
TraceHeapSize()1013 void RegionSpace::TraceHeapSize() {
1014 Heap* heap = Runtime::Current()->GetHeap();
1015 heap->TraceHeapSize(heap->GetBytesAllocated() + EvacBytes());
1016 }
1017
AllocateRegion(bool for_evac)1018 RegionSpace::Region* RegionSpace::AllocateRegion(bool for_evac) {
1019 if (!for_evac && (num_non_free_regions_ + 1) * 2 > num_regions_) {
1020 return nullptr;
1021 }
1022 for (size_t i = 0; i < num_regions_; ++i) {
1023 // When using the cyclic region allocation strategy, try to
1024 // allocate a region starting from the last cyclic allocated
1025 // region marker. Otherwise, try to allocate a region starting
1026 // from the beginning of the region space.
1027 size_t region_index = kCyclicRegionAllocation
1028 ? ((cyclic_alloc_region_index_ + i) % num_regions_)
1029 : i;
1030 Region* r = ®ions_[region_index];
1031 if (r->IsFree()) {
1032 r->Unfree(this, time_);
1033 if (use_generational_cc_) {
1034 // TODO: Add an explanation for this assertion.
1035 DCHECK_IMPLIES(for_evac, !r->is_newly_allocated_);
1036 }
1037 if (for_evac) {
1038 ++num_evac_regions_;
1039 TraceHeapSize();
1040 // Evac doesn't count as newly allocated.
1041 } else {
1042 r->SetNewlyAllocated();
1043 ++num_non_free_regions_;
1044 }
1045 if (kCyclicRegionAllocation) {
1046 // Move the cyclic allocation region marker to the region
1047 // following the one that was just allocated.
1048 cyclic_alloc_region_index_ = (region_index + 1) % num_regions_;
1049 }
1050 return r;
1051 }
1052 }
1053 return nullptr;
1054 }
1055
MarkAsAllocated(RegionSpace * region_space,uint32_t alloc_time)1056 void RegionSpace::Region::MarkAsAllocated(RegionSpace* region_space, uint32_t alloc_time) {
1057 DCHECK(IsFree());
1058 alloc_time_ = alloc_time;
1059 region_space->AdjustNonFreeRegionLimit(idx_);
1060 type_ = RegionType::kRegionTypeToSpace;
1061 if (kProtectClearedRegions) {
1062 CheckedCall(mprotect, __FUNCTION__, Begin(), kRegionSize, PROT_READ | PROT_WRITE);
1063 }
1064 }
1065
Unfree(RegionSpace * region_space,uint32_t alloc_time)1066 void RegionSpace::Region::Unfree(RegionSpace* region_space, uint32_t alloc_time) {
1067 MarkAsAllocated(region_space, alloc_time);
1068 state_ = RegionState::kRegionStateAllocated;
1069 }
1070
UnfreeLarge(RegionSpace * region_space,uint32_t alloc_time)1071 void RegionSpace::Region::UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time) {
1072 MarkAsAllocated(region_space, alloc_time);
1073 state_ = RegionState::kRegionStateLarge;
1074 }
1075
UnfreeLargeTail(RegionSpace * region_space,uint32_t alloc_time)1076 void RegionSpace::Region::UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time) {
1077 MarkAsAllocated(region_space, alloc_time);
1078 state_ = RegionState::kRegionStateLargeTail;
1079 }
1080
1081 } // namespace space
1082 } // namespace gc
1083 } // namespace art
1084