• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #include <deque>
17 
18 #include "bump_pointer_space-inl.h"
19 #include "bump_pointer_space.h"
20 #include "base/dumpable.h"
21 #include "base/logging.h"
22 #include "gc/accounting/read_barrier_table.h"
23 #include "mirror/class-inl.h"
24 #include "mirror/object-inl.h"
25 #include "thread_list.h"
26 
27 namespace art {
28 namespace gc {
29 namespace space {
30 
31 // If a region has live objects whose size is less than this percent
32 // value of the region size, evaculate the region.
33 static constexpr uint kEvacuateLivePercentThreshold = 75U;
34 
35 // Whether we protect the unused and cleared regions.
36 static constexpr bool kProtectClearedRegions = kIsDebugBuild;
37 
38 // Wether we poison memory areas occupied by dead objects in unevacuated regions.
39 static constexpr bool kPoisonDeadObjectsInUnevacuatedRegions = kIsDebugBuild;
40 
41 // Special 32-bit value used to poison memory areas occupied by dead
42 // objects in unevacuated regions. Dereferencing this value is expected
43 // to trigger a memory protection fault, as it is unlikely that it
44 // points to a valid, non-protected memory area.
45 static constexpr uint32_t kPoisonDeadObject = 0xBADDB01D;  // "BADDROID"
46 
47 // Whether we check a region's live bytes count against the region bitmap.
48 static constexpr bool kCheckLiveBytesAgainstRegionBitmap = kIsDebugBuild;
49 
CreateMemMap(const std::string & name,size_t capacity,uint8_t * requested_begin)50 MemMap RegionSpace::CreateMemMap(const std::string& name,
51                                  size_t capacity,
52                                  uint8_t* requested_begin) {
53   CHECK_ALIGNED(capacity, kRegionSize);
54   std::string error_msg;
55   // Ask for the capacity of an additional kRegionSize so that we can align the map by kRegionSize
56   // even if we get unaligned base address. This is necessary for the ReadBarrierTable to work.
57   MemMap mem_map;
58   while (true) {
59     mem_map = MemMap::MapAnonymous(name.c_str(),
60                                    requested_begin,
61                                    capacity + kRegionSize,
62                                    PROT_READ | PROT_WRITE,
63                                    /*low_4gb=*/ true,
64                                    /*reuse=*/ false,
65                                    /*reservation=*/ nullptr,
66                                    &error_msg);
67     if (mem_map.IsValid() || requested_begin == nullptr) {
68       break;
69     }
70     // Retry with no specified request begin.
71     requested_begin = nullptr;
72   }
73   if (!mem_map.IsValid()) {
74     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
75         << PrettySize(capacity) << " with message " << error_msg;
76     PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
77     MemMap::DumpMaps(LOG_STREAM(ERROR));
78     return MemMap::Invalid();
79   }
80   CHECK_EQ(mem_map.Size(), capacity + kRegionSize);
81   CHECK_EQ(mem_map.Begin(), mem_map.BaseBegin());
82   CHECK_EQ(mem_map.Size(), mem_map.BaseSize());
83   if (IsAlignedParam(mem_map.Begin(), kRegionSize)) {
84     // Got an aligned map. Since we requested a map that's kRegionSize larger. Shrink by
85     // kRegionSize at the end.
86     mem_map.SetSize(capacity);
87   } else {
88     // Got an unaligned map. Align the both ends.
89     mem_map.AlignBy(kRegionSize);
90   }
91   CHECK_ALIGNED(mem_map.Begin(), kRegionSize);
92   CHECK_ALIGNED(mem_map.End(), kRegionSize);
93   CHECK_EQ(mem_map.Size(), capacity);
94   return mem_map;
95 }
96 
Create(const std::string & name,MemMap && mem_map,bool use_generational_cc)97 RegionSpace* RegionSpace::Create(
98     const std::string& name, MemMap&& mem_map, bool use_generational_cc) {
99   return new RegionSpace(name, std::move(mem_map), use_generational_cc);
100 }
101 
RegionSpace(const std::string & name,MemMap && mem_map,bool use_generational_cc)102 RegionSpace::RegionSpace(const std::string& name, MemMap&& mem_map, bool use_generational_cc)
103     : ContinuousMemMapAllocSpace(name,
104                                  std::move(mem_map),
105                                  mem_map.Begin(),
106                                  mem_map.End(),
107                                  mem_map.End(),
108                                  kGcRetentionPolicyAlwaysCollect),
109       region_lock_("Region lock", kRegionSpaceRegionLock),
110       use_generational_cc_(use_generational_cc),
111       time_(1U),
112       num_regions_(mem_map_.Size() / kRegionSize),
113       madvise_time_(0U),
114       num_non_free_regions_(0U),
115       num_evac_regions_(0U),
116       max_peak_num_non_free_regions_(0U),
117       non_free_region_index_limit_(0U),
118       current_region_(&full_region_),
119       evac_region_(nullptr),
120       cyclic_alloc_region_index_(0U) {
121   CHECK_ALIGNED(mem_map_.Size(), kRegionSize);
122   CHECK_ALIGNED(mem_map_.Begin(), kRegionSize);
123   DCHECK_GT(num_regions_, 0U);
124   regions_.reset(new Region[num_regions_]);
125   uint8_t* region_addr = mem_map_.Begin();
126   for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
127     regions_[i].Init(i, region_addr, region_addr + kRegionSize);
128   }
129   mark_bitmap_ =
130       accounting::ContinuousSpaceBitmap::Create("region space live bitmap", Begin(), Capacity());
131   if (kIsDebugBuild) {
132     CHECK_EQ(regions_[0].Begin(), Begin());
133     for (size_t i = 0; i < num_regions_; ++i) {
134       CHECK(regions_[i].IsFree());
135       CHECK_EQ(static_cast<size_t>(regions_[i].End() - regions_[i].Begin()), kRegionSize);
136       if (i + 1 < num_regions_) {
137         CHECK_EQ(regions_[i].End(), regions_[i + 1].Begin());
138       }
139     }
140     CHECK_EQ(regions_[num_regions_ - 1].End(), Limit());
141   }
142   DCHECK(!full_region_.IsFree());
143   DCHECK(full_region_.IsAllocated());
144   size_t ignored;
145   DCHECK(full_region_.Alloc(kAlignment, &ignored, nullptr, &ignored) == nullptr);
146   // Protect the whole region space from the start.
147   Protect();
148 }
149 
FromSpaceSize()150 size_t RegionSpace::FromSpaceSize() {
151   uint64_t num_regions = 0;
152   MutexLock mu(Thread::Current(), region_lock_);
153   for (size_t i = 0; i < num_regions_; ++i) {
154     Region* r = &regions_[i];
155     if (r->IsInFromSpace()) {
156       ++num_regions;
157     }
158   }
159   return num_regions * kRegionSize;
160 }
161 
UnevacFromSpaceSize()162 size_t RegionSpace::UnevacFromSpaceSize() {
163   uint64_t num_regions = 0;
164   MutexLock mu(Thread::Current(), region_lock_);
165   for (size_t i = 0; i < num_regions_; ++i) {
166     Region* r = &regions_[i];
167     if (r->IsInUnevacFromSpace()) {
168       ++num_regions;
169     }
170   }
171   return num_regions * kRegionSize;
172 }
173 
ToSpaceSize()174 size_t RegionSpace::ToSpaceSize() {
175   uint64_t num_regions = 0;
176   MutexLock mu(Thread::Current(), region_lock_);
177   for (size_t i = 0; i < num_regions_; ++i) {
178     Region* r = &regions_[i];
179     if (r->IsInToSpace()) {
180       ++num_regions;
181     }
182   }
183   return num_regions * kRegionSize;
184 }
185 
SetAsUnevacFromSpace(bool clear_live_bytes)186 void RegionSpace::Region::SetAsUnevacFromSpace(bool clear_live_bytes) {
187   // Live bytes are only preserved (i.e. not cleared) during sticky-bit CC collections.
188   DCHECK(GetUseGenerationalCC() || clear_live_bytes);
189   DCHECK(!IsFree() && IsInToSpace());
190   type_ = RegionType::kRegionTypeUnevacFromSpace;
191   if (IsNewlyAllocated()) {
192     // A newly allocated region set as unevac from-space must be
193     // a large or large tail region.
194     DCHECK(IsLarge() || IsLargeTail()) << static_cast<uint>(state_);
195     // Always clear the live bytes of a newly allocated (large or
196     // large tail) region.
197     clear_live_bytes = true;
198     // Clear the "newly allocated" status here, as we do not want the
199     // GC to see it when encountering (and processing) references in the
200     // from-space.
201     //
202     // Invariant: There should be no newly-allocated region in the
203     // from-space (when the from-space exists, which is between the calls
204     // to RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace).
205     is_newly_allocated_ = false;
206   }
207   if (clear_live_bytes) {
208     // Reset the live bytes, as we have made a non-evacuation
209     // decision (possibly based on the percentage of live bytes).
210     live_bytes_ = 0;
211   }
212 }
213 
GetUseGenerationalCC()214 bool RegionSpace::Region::GetUseGenerationalCC() {
215   // We are retrieving the info from Heap, instead of the cached version in
216   // RegionSpace, because accessing the Heap from a Region object is easier
217   // than accessing the RegionSpace.
218   return art::Runtime::Current()->GetHeap()->GetUseGenerationalCC();
219 }
220 
ShouldBeEvacuated(EvacMode evac_mode)221 inline bool RegionSpace::Region::ShouldBeEvacuated(EvacMode evac_mode) {
222   // Evacuation mode `kEvacModeNewlyAllocated` is only used during sticky-bit CC collections.
223   DCHECK(GetUseGenerationalCC() || (evac_mode != kEvacModeNewlyAllocated));
224   DCHECK((IsAllocated() || IsLarge()) && IsInToSpace());
225   // The region should be evacuated if:
226   // - the evacuation is forced (!large && `evac_mode == kEvacModeForceAll`); or
227   // - the region was allocated after the start of the previous GC (newly allocated region); or
228   // - !large and the live ratio is below threshold (`kEvacuateLivePercentThreshold`).
229   if (IsLarge()) {
230     // It makes no sense to evacuate in the large case, since the region only contains zero or
231     // one object. If the regions is completely empty, we'll reclaim it anyhow. If its one object
232     // is live, we would just be moving around region-aligned memory.
233     return false;
234   }
235   if (UNLIKELY(evac_mode == kEvacModeForceAll)) {
236     return true;
237   }
238   DCHECK(IsAllocated());
239   if (is_newly_allocated_) {
240     // Invariant: newly allocated regions have an undefined live bytes count.
241     DCHECK_EQ(live_bytes_, static_cast<size_t>(-1));
242     // We always evacuate newly-allocated non-large regions as we
243     // believe they contain many dead objects (a very simple form of
244     // the generational hypothesis, even before the Sticky-Bit CC
245     // approach).
246     //
247     // TODO: Verify that assertion by collecting statistics on the
248     // number/proportion of live objects in newly allocated regions
249     // in RegionSpace::ClearFromSpace.
250     //
251     // Note that a side effect of evacuating a newly-allocated
252     // non-large region is that the "newly allocated" status will
253     // later be removed, as its live objects will be copied to an
254     // evacuation region, which won't be marked as "newly
255     // allocated" (see RegionSpace::AllocateRegion).
256     return true;
257   } else if (evac_mode == kEvacModeLivePercentNewlyAllocated) {
258     bool is_live_percent_valid = (live_bytes_ != static_cast<size_t>(-1));
259     if (is_live_percent_valid) {
260       DCHECK(IsInToSpace());
261       DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
262       DCHECK_LE(live_bytes_, BytesAllocated());
263       const size_t bytes_allocated = RoundUp(BytesAllocated(), kRegionSize);
264       DCHECK_LE(live_bytes_, bytes_allocated);
265       // Side node: live_percent == 0 does not necessarily mean
266       // there's no live objects due to rounding (there may be a
267       // few).
268       return live_bytes_ * 100U < kEvacuateLivePercentThreshold * bytes_allocated;
269     }
270   }
271   return false;
272 }
273 
ZeroLiveBytesForLargeObject(mirror::Object * obj)274 void RegionSpace::ZeroLiveBytesForLargeObject(mirror::Object* obj) {
275   // This method is only used when Generational CC collection is enabled.
276   DCHECK(use_generational_cc_);
277 
278   // This code uses a logic similar to the one used in RegionSpace::FreeLarge
279   // to traverse the regions supporting `obj`.
280   // TODO: Refactor.
281   DCHECK(IsLargeObject(obj));
282   DCHECK_ALIGNED(obj, kRegionSize);
283   size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
284   DCHECK_GT(obj_size, space::RegionSpace::kRegionSize);
285   // Size of the memory area allocated for `obj`.
286   size_t obj_alloc_size = RoundUp(obj_size, space::RegionSpace::kRegionSize);
287   uint8_t* begin_addr = reinterpret_cast<uint8_t*>(obj);
288   uint8_t* end_addr = begin_addr + obj_alloc_size;
289   DCHECK_ALIGNED(end_addr, kRegionSize);
290 
291   // Zero the live bytes of the large region and large tail regions containing the object.
292   MutexLock mu(Thread::Current(), region_lock_);
293   for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) {
294     Region* region = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr));
295     if (addr == begin_addr) {
296       DCHECK(region->IsLarge());
297     } else {
298       DCHECK(region->IsLargeTail());
299     }
300     region->ZeroLiveBytes();
301   }
302   if (kIsDebugBuild && end_addr < Limit()) {
303     // If we aren't at the end of the space, check that the next region is not a large tail.
304     Region* following_region = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
305     DCHECK(!following_region->IsLargeTail());
306   }
307 }
308 
309 // Determine which regions to evacuate and mark them as
310 // from-space. Mark the rest as unevacuated from-space.
SetFromSpace(accounting::ReadBarrierTable * rb_table,EvacMode evac_mode,bool clear_live_bytes)311 void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table,
312                                EvacMode evac_mode,
313                                bool clear_live_bytes) {
314   // Live bytes are only preserved (i.e. not cleared) during sticky-bit CC collections.
315   DCHECK(use_generational_cc_ || clear_live_bytes);
316   ++time_;
317   if (kUseTableLookupReadBarrier) {
318     DCHECK(rb_table->IsAllCleared());
319     rb_table->SetAll();
320   }
321   MutexLock mu(Thread::Current(), region_lock_);
322   // We cannot use the partially utilized TLABs across a GC. Therefore, revoke
323   // them during the thread-flip.
324   partial_tlabs_.clear();
325 
326   // Counter for the number of expected large tail regions following a large region.
327   size_t num_expected_large_tails = 0U;
328   // Flag to store whether the previously seen large region has been evacuated.
329   // This is used to apply the same evacuation policy to related large tail regions.
330   bool prev_large_evacuated = false;
331   VerifyNonFreeRegionLimit();
332   const size_t iter_limit = kUseTableLookupReadBarrier
333       ? num_regions_
334       : std::min(num_regions_, non_free_region_index_limit_);
335   for (size_t i = 0; i < iter_limit; ++i) {
336     Region* r = &regions_[i];
337     RegionState state = r->State();
338     RegionType type = r->Type();
339     if (!r->IsFree()) {
340       DCHECK(r->IsInToSpace());
341       if (LIKELY(num_expected_large_tails == 0U)) {
342         DCHECK((state == RegionState::kRegionStateAllocated ||
343                 state == RegionState::kRegionStateLarge) &&
344                type == RegionType::kRegionTypeToSpace);
345         bool should_evacuate = r->ShouldBeEvacuated(evac_mode);
346         bool is_newly_allocated = r->IsNewlyAllocated();
347         if (should_evacuate) {
348           r->SetAsFromSpace();
349           DCHECK(r->IsInFromSpace());
350         } else {
351           r->SetAsUnevacFromSpace(clear_live_bytes);
352           DCHECK(r->IsInUnevacFromSpace());
353         }
354         if (UNLIKELY(state == RegionState::kRegionStateLarge &&
355                      type == RegionType::kRegionTypeToSpace)) {
356           prev_large_evacuated = should_evacuate;
357           // In 2-phase full heap GC, this function is called after marking is
358           // done. So, it is possible that some newly allocated large object is
359           // marked but its live_bytes is still -1. We need to clear the
360           // mark-bit otherwise the live_bytes will not be updated in
361           // ConcurrentCopying::ProcessMarkStackRef() and hence will break the
362           // logic.
363           if (use_generational_cc_ && !should_evacuate && is_newly_allocated) {
364             GetMarkBitmap()->Clear(reinterpret_cast<mirror::Object*>(r->Begin()));
365           }
366           num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1;
367           DCHECK_GT(num_expected_large_tails, 0U);
368         }
369       } else {
370         DCHECK(state == RegionState::kRegionStateLargeTail &&
371                type == RegionType::kRegionTypeToSpace);
372         if (prev_large_evacuated) {
373           r->SetAsFromSpace();
374           DCHECK(r->IsInFromSpace());
375         } else {
376           r->SetAsUnevacFromSpace(clear_live_bytes);
377           DCHECK(r->IsInUnevacFromSpace());
378         }
379         --num_expected_large_tails;
380       }
381     } else {
382       DCHECK_EQ(num_expected_large_tails, 0U);
383       if (kUseTableLookupReadBarrier) {
384         // Clear the rb table for to-space regions.
385         rb_table->Clear(r->Begin(), r->End());
386       }
387     }
388     // Invariant: There should be no newly-allocated region in the from-space.
389     DCHECK(!r->is_newly_allocated_);
390   }
391   DCHECK_EQ(num_expected_large_tails, 0U);
392   current_region_ = &full_region_;
393   evac_region_ = &full_region_;
394 }
395 
ZeroAndProtectRegion(uint8_t * begin,uint8_t * end)396 static void ZeroAndProtectRegion(uint8_t* begin, uint8_t* end) {
397   ZeroAndReleasePages(begin, end - begin);
398   if (kProtectClearedRegions) {
399     CheckedCall(mprotect, __FUNCTION__, begin, end - begin, PROT_NONE);
400   }
401 }
402 
ClearFromSpace(uint64_t * cleared_bytes,uint64_t * cleared_objects,const bool clear_bitmap)403 void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes,
404                                  /* out */ uint64_t* cleared_objects,
405                                  const bool clear_bitmap) {
406   DCHECK(cleared_bytes != nullptr);
407   DCHECK(cleared_objects != nullptr);
408   *cleared_bytes = 0;
409   *cleared_objects = 0;
410   size_t new_non_free_region_index_limit = 0;
411   // We should avoid calling madvise syscalls while holding region_lock_.
412   // Therefore, we split the working of this function into 2 loops. The first
413   // loop gathers memory ranges that must be madvised. Then we release the lock
414   // and perform madvise on the gathered memory ranges. Finally, we reacquire
415   // the lock and loop over the regions to clear the from-space regions and make
416   // them availabe for allocation.
417   std::deque<std::pair<uint8_t*, uint8_t*>> madvise_list;
418   // Gather memory ranges that need to be madvised.
419   {
420     MutexLock mu(Thread::Current(), region_lock_);
421     // Lambda expression `expand_madvise_range` adds a region to the "clear block".
422     //
423     // As we iterate over from-space regions, we maintain a "clear block", composed of
424     // adjacent to-be-cleared regions and whose bounds are `clear_block_begin` and
425     // `clear_block_end`. When processing a new region which is not adjacent to
426     // the clear block (discontinuity in cleared regions), the clear block
427     // is added to madvise_list and the clear block is reset (to the most recent
428     // to-be-cleared region).
429     //
430     // This is done in order to combine zeroing and releasing pages to reduce how
431     // often madvise is called. This helps reduce contention on the mmap semaphore
432     // (see b/62194020).
433     uint8_t* clear_block_begin = nullptr;
434     uint8_t* clear_block_end = nullptr;
435     auto expand_madvise_range = [&madvise_list, &clear_block_begin, &clear_block_end] (Region* r) {
436       if (clear_block_end != r->Begin()) {
437         if (clear_block_begin != nullptr) {
438           DCHECK(clear_block_end != nullptr);
439           madvise_list.push_back(std::pair(clear_block_begin, clear_block_end));
440         }
441         clear_block_begin = r->Begin();
442       }
443       clear_block_end = r->End();
444     };
445     for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) {
446       Region* r = &regions_[i];
447       // The following check goes through objects in the region, therefore it
448       // must be performed before madvising the region. Therefore, it can't be
449       // executed in the following loop.
450       if (kCheckLiveBytesAgainstRegionBitmap) {
451         CheckLiveBytesAgainstRegionBitmap(r);
452       }
453       if (r->IsInFromSpace()) {
454         expand_madvise_range(r);
455       } else if (r->IsInUnevacFromSpace()) {
456         // We must skip tails of live large objects.
457         if (r->LiveBytes() == 0 && !r->IsLargeTail()) {
458           // Special case for 0 live bytes, this means all of the objects in the region are
459           // dead and we can to clear it. This is important for large objects since we must
460           // not visit dead ones in RegionSpace::Walk because they may contain dangling
461           // references to invalid objects. It is also better to clear these regions now
462           // instead of at the end of the next GC to save RAM. If we don't clear the regions
463           // here, they will be cleared in next GC by the normal live percent evacuation logic.
464           expand_madvise_range(r);
465           // Also release RAM for large tails.
466           while (i + 1 < num_regions_ && regions_[i + 1].IsLargeTail()) {
467             expand_madvise_range(&regions_[i + 1]);
468             i++;
469           }
470         }
471       }
472     }
473     // There is a small probability that we may reach here with
474     // clear_block_{begin, end} = nullptr. If all the regions allocated since
475     // last GC have been for large objects and all of them survive till this GC
476     // cycle, then there will be no regions in from-space.
477     if (LIKELY(clear_block_begin != nullptr)) {
478       DCHECK(clear_block_end != nullptr);
479       madvise_list.push_back(std::pair(clear_block_begin, clear_block_end));
480     }
481   }
482 
483   // Madvise the memory ranges.
484   uint64_t start_time = NanoTime();
485   for (const auto &iter : madvise_list) {
486     ZeroAndProtectRegion(iter.first, iter.second);
487   }
488   madvise_time_ += NanoTime() - start_time;
489 
490   for (const auto &iter : madvise_list) {
491     if (clear_bitmap) {
492       GetLiveBitmap()->ClearRange(
493           reinterpret_cast<mirror::Object*>(iter.first),
494           reinterpret_cast<mirror::Object*>(iter.second));
495     }
496   }
497   madvise_list.clear();
498 
499   // Iterate over regions again and actually make the from space regions
500   // available for allocation.
501   MutexLock mu(Thread::Current(), region_lock_);
502   VerifyNonFreeRegionLimit();
503 
504   // Update max of peak non free region count before reclaiming evacuated regions.
505   max_peak_num_non_free_regions_ = std::max(max_peak_num_non_free_regions_,
506                                             num_non_free_regions_);
507 
508   for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) {
509     Region* r = &regions_[i];
510     if (r->IsInFromSpace()) {
511       DCHECK(!r->IsTlab());
512       *cleared_bytes += r->BytesAllocated();
513       *cleared_objects += r->ObjectsAllocated();
514       --num_non_free_regions_;
515       r->Clear(/*zero_and_release_pages=*/false);
516     } else if (r->IsInUnevacFromSpace()) {
517       if (r->LiveBytes() == 0) {
518         DCHECK(!r->IsLargeTail());
519         *cleared_bytes += r->BytesAllocated();
520         *cleared_objects += r->ObjectsAllocated();
521         r->Clear(/*zero_and_release_pages=*/false);
522         size_t free_regions = 1;
523         // Also release RAM for large tails.
524         while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) {
525           regions_[i + free_regions].Clear(/*zero_and_release_pages=*/false);
526           ++free_regions;
527         }
528         num_non_free_regions_ -= free_regions;
529         // When clear_bitmap is true, this clearing of bitmap is taken care in
530         // clear_region().
531         if (!clear_bitmap) {
532           GetLiveBitmap()->ClearRange(
533               reinterpret_cast<mirror::Object*>(r->Begin()),
534               reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
535         }
536         continue;
537       }
538       r->SetUnevacFromSpaceAsToSpace();
539       if (r->AllAllocatedBytesAreLive()) {
540         // Try to optimize the number of ClearRange calls by checking whether the next regions
541         // can also be cleared.
542         size_t regions_to_clear_bitmap = 1;
543         while (i + regions_to_clear_bitmap < num_regions_) {
544           Region* const cur = &regions_[i + regions_to_clear_bitmap];
545           if (!cur->AllAllocatedBytesAreLive()) {
546             DCHECK(!cur->IsLargeTail());
547             break;
548           }
549           CHECK(cur->IsInUnevacFromSpace());
550           cur->SetUnevacFromSpaceAsToSpace();
551           ++regions_to_clear_bitmap;
552         }
553 
554         // Optimization (for full CC only): If the live bytes are *all* live
555         // in a region then the live-bit information for these objects is
556         // superfluous:
557         // - We can determine that these objects are all live by using
558         //   Region::AllAllocatedBytesAreLive (which just checks whether
559         //   `LiveBytes() == static_cast<size_t>(Top() - Begin())`.
560         // - We can visit the objects in this region using
561         //   RegionSpace::GetNextObject, i.e. without resorting to the
562         //   live bits (see RegionSpace::WalkInternal).
563         // Therefore, we can clear the bits for these objects in the
564         // (live) region space bitmap (and release the corresponding pages).
565         //
566         // This optimization is incompatible with Generational CC, because:
567         // - minor (young-generation) collections need to know which objects
568         //   where marked during the previous GC cycle, meaning all mark bitmaps
569         //   (this includes the region space bitmap) need to be preserved
570         //   between a (minor or major) collection N and a following minor
571         //   collection N+1;
572         // - at this stage (in the current GC cycle), we cannot determine
573         //   whether the next collection will be a minor or a major one;
574         // This means that we need to be conservative and always preserve the
575         // region space bitmap when using Generational CC.
576         // Note that major collections do not require the previous mark bitmaps
577         // to be preserved, and as matter of fact they do clear the region space
578         // bitmap. But they cannot do so before we know the next GC cycle will
579         // be a major one, so this operation happens at the beginning of such a
580         // major collection, before marking starts.
581         if (!use_generational_cc_) {
582           GetLiveBitmap()->ClearRange(
583               reinterpret_cast<mirror::Object*>(r->Begin()),
584               reinterpret_cast<mirror::Object*>(r->Begin()
585                                                 + regions_to_clear_bitmap * kRegionSize));
586         }
587         // Skip over extra regions for which we cleared the bitmaps: we shall not clear them,
588         // as they are unevac regions that are live.
589         // Subtract one for the for-loop.
590         i += regions_to_clear_bitmap - 1;
591       } else {
592         // TODO: Explain why we do not poison dead objects in region
593         // `r` when it has an undefined live bytes count (i.e. when
594         // `r->LiveBytes() == static_cast<size_t>(-1)`) with
595         // Generational CC.
596         if (!use_generational_cc_ || (r->LiveBytes() != static_cast<size_t>(-1))) {
597           // Only some allocated bytes are live in this unevac region.
598           // This should only happen for an allocated non-large region.
599           DCHECK(r->IsAllocated()) << r->State();
600           if (kPoisonDeadObjectsInUnevacuatedRegions) {
601             PoisonDeadObjectsInUnevacuatedRegion(r);
602           }
603         }
604       }
605     }
606     // Note r != last_checked_region if r->IsInUnevacFromSpace() was true above.
607     Region* last_checked_region = &regions_[i];
608     if (!last_checked_region->IsFree()) {
609       new_non_free_region_index_limit = std::max(new_non_free_region_index_limit,
610                                                  last_checked_region->Idx() + 1);
611     }
612   }
613   // Update non_free_region_index_limit_.
614   SetNonFreeRegionLimit(new_non_free_region_index_limit);
615   evac_region_ = nullptr;
616   num_non_free_regions_ += num_evac_regions_;
617   num_evac_regions_ = 0;
618 }
619 
CheckLiveBytesAgainstRegionBitmap(Region * r)620 void RegionSpace::CheckLiveBytesAgainstRegionBitmap(Region* r) {
621   if (r->LiveBytes() == static_cast<size_t>(-1)) {
622     // Live bytes count is undefined for `r`; nothing to check here.
623     return;
624   }
625 
626   // Functor walking the region space bitmap for the range corresponding
627   // to region `r` and calculating the sum of live bytes.
628   size_t live_bytes_recount = 0u;
629   auto recount_live_bytes =
630       [&r, &live_bytes_recount](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
631     DCHECK_ALIGNED(obj, kAlignment);
632     if (r->IsLarge()) {
633       // If `r` is a large region, then it contains at most one
634       // object, which must start at the beginning of the
635       // region. The live byte count in that case is equal to the
636       // allocated regions (large region + large tails regions).
637       DCHECK_EQ(reinterpret_cast<uint8_t*>(obj), r->Begin());
638       DCHECK_EQ(live_bytes_recount, 0u);
639       live_bytes_recount = r->Top() - r->Begin();
640     } else {
641       DCHECK(r->IsAllocated())
642           << "r->State()=" << r->State() << " r->LiveBytes()=" << r->LiveBytes();
643       size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
644       size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
645       live_bytes_recount += alloc_size;
646     }
647   };
648   // Visit live objects in `r` and recount the live bytes.
649   GetLiveBitmap()->VisitMarkedRange(reinterpret_cast<uintptr_t>(r->Begin()),
650                                     reinterpret_cast<uintptr_t>(r->Top()),
651                                     recount_live_bytes);
652   // Check that this recount matches the region's current live bytes count.
653   DCHECK_EQ(live_bytes_recount, r->LiveBytes());
654 }
655 
656 // Poison the memory area in range [`begin`, `end`) with value `kPoisonDeadObject`.
PoisonUnevacuatedRange(uint8_t * begin,uint8_t * end)657 static void PoisonUnevacuatedRange(uint8_t* begin, uint8_t* end) {
658   static constexpr size_t kPoisonDeadObjectSize = sizeof(kPoisonDeadObject);
659   static_assert(IsPowerOfTwo(kPoisonDeadObjectSize) &&
660                 IsPowerOfTwo(RegionSpace::kAlignment) &&
661                 (kPoisonDeadObjectSize < RegionSpace::kAlignment),
662                 "RegionSpace::kAlignment should be a multiple of kPoisonDeadObjectSize"
663                 " and both should be powers of 2");
664   DCHECK_ALIGNED(begin, kPoisonDeadObjectSize);
665   DCHECK_ALIGNED(end, kPoisonDeadObjectSize);
666   uint32_t* begin_addr = reinterpret_cast<uint32_t*>(begin);
667   uint32_t* end_addr = reinterpret_cast<uint32_t*>(end);
668   std::fill(begin_addr, end_addr, kPoisonDeadObject);
669 }
670 
PoisonDeadObjectsInUnevacuatedRegion(Region * r)671 void RegionSpace::PoisonDeadObjectsInUnevacuatedRegion(Region* r) {
672   // The live byte count of `r` should be different from -1, as this
673   // region should neither be a newly allocated region nor an
674   // evacuated region.
675   DCHECK_NE(r->LiveBytes(), static_cast<size_t>(-1))
676       << "Unexpected live bytes count of -1 in " << Dumpable<Region>(*r);
677 
678   // Past-the-end address of the previously visited (live) object (or
679   // the beginning of the region, if `maybe_poison` has not run yet).
680   uint8_t* prev_obj_end = reinterpret_cast<uint8_t*>(r->Begin());
681 
682   // Functor poisoning the space between `obj` and the previously
683   // visited (live) object (or the beginng of the region), if any.
684   auto maybe_poison = [&prev_obj_end](mirror::Object* obj) REQUIRES(Locks::mutator_lock_) {
685     DCHECK_ALIGNED(obj, kAlignment);
686     uint8_t* cur_obj_begin = reinterpret_cast<uint8_t*>(obj);
687     if (cur_obj_begin != prev_obj_end) {
688       // There is a gap (dead object(s)) between the previously
689       // visited (live) object (or the beginning of the region) and
690       // `obj`; poison that space.
691       PoisonUnevacuatedRange(prev_obj_end, cur_obj_begin);
692     }
693     prev_obj_end = reinterpret_cast<uint8_t*>(GetNextObject(obj));
694   };
695 
696   // Visit live objects in `r` and poison gaps (dead objects) between them.
697   GetLiveBitmap()->VisitMarkedRange(reinterpret_cast<uintptr_t>(r->Begin()),
698                                     reinterpret_cast<uintptr_t>(r->Top()),
699                                     maybe_poison);
700   // Poison memory between the last live object and the end of the region, if any.
701   if (prev_obj_end < r->Top()) {
702     PoisonUnevacuatedRange(prev_obj_end, r->Top());
703   }
704 }
705 
LogFragmentationAllocFailure(std::ostream & os,size_t failed_alloc_bytes)706 bool RegionSpace::LogFragmentationAllocFailure(std::ostream& os,
707                                                size_t failed_alloc_bytes) {
708   size_t max_contiguous_allocation = 0;
709   MutexLock mu(Thread::Current(), region_lock_);
710 
711   if (current_region_->End() - current_region_->Top() > 0) {
712     max_contiguous_allocation = current_region_->End() - current_region_->Top();
713   }
714 
715   size_t max_contiguous_free_regions = 0;
716   size_t num_contiguous_free_regions = 0;
717   bool prev_free_region = false;
718   for (size_t i = 0; i < num_regions_; ++i) {
719     Region* r = &regions_[i];
720     if (r->IsFree()) {
721       if (!prev_free_region) {
722         CHECK_EQ(num_contiguous_free_regions, 0U);
723         prev_free_region = true;
724       }
725       ++num_contiguous_free_regions;
726     } else if (prev_free_region) {
727       CHECK_NE(num_contiguous_free_regions, 0U);
728       max_contiguous_free_regions = std::max(max_contiguous_free_regions,
729                                              num_contiguous_free_regions);
730       num_contiguous_free_regions = 0U;
731       prev_free_region = false;
732     }
733   }
734   max_contiguous_allocation = std::max(max_contiguous_allocation,
735                                        max_contiguous_free_regions * kRegionSize);
736 
737   // Calculate how many regions are available for allocations as we have to ensure
738   // that enough regions are left for evacuation.
739   size_t regions_free_for_alloc = num_regions_ / 2 - num_non_free_regions_;
740 
741   max_contiguous_allocation = std::min(max_contiguous_allocation,
742                                        regions_free_for_alloc * kRegionSize);
743   if (failed_alloc_bytes > max_contiguous_allocation) {
744     // Region space does not normally fragment in the conventional sense. However we can run out
745     // of region space prematurely if we have many threads, each with a partially committed TLAB.
746     // The whole TLAB uses up region address space, but we only count the section that was
747     // actually given to the thread so far as allocated. For unlikely allocation request sequences
748     // involving largish objects that don't qualify for large objects space, we may also be unable
749     // to fully utilize entire TLABs, and thus generate enough actual fragmentation to get
750     // here. This appears less likely, since we usually reuse sufficiently large TLAB "tails"
751     // that are no longer needed.
752     os << "; failed due to fragmentation (largest possible contiguous allocation "
753        << max_contiguous_allocation << " bytes). Number of " << PrettySize(kRegionSize)
754        << " sized free regions are: " << regions_free_for_alloc
755        << ". Likely cause: (1) Too much memory in use, and "
756        << "(2) many threads or many larger objects of the wrong kind";
757     return true;
758   }
759   // Caller's job to print failed_alloc_bytes.
760   return false;
761 }
762 
Clear()763 void RegionSpace::Clear() {
764   MutexLock mu(Thread::Current(), region_lock_);
765   for (size_t i = 0; i < num_regions_; ++i) {
766     Region* r = &regions_[i];
767     if (!r->IsFree()) {
768       --num_non_free_regions_;
769     }
770     r->Clear(/*zero_and_release_pages=*/true);
771   }
772   SetNonFreeRegionLimit(0);
773   DCHECK_EQ(num_non_free_regions_, 0u);
774   current_region_ = &full_region_;
775   evac_region_ = &full_region_;
776 }
777 
Protect()778 void RegionSpace::Protect() {
779   if (kProtectClearedRegions) {
780     CheckedCall(mprotect, __FUNCTION__, Begin(), Size(), PROT_NONE);
781   }
782 }
783 
Unprotect()784 void RegionSpace::Unprotect() {
785   if (kProtectClearedRegions) {
786     CheckedCall(mprotect, __FUNCTION__, Begin(), Size(), PROT_READ | PROT_WRITE);
787   }
788 }
789 
ClampGrowthLimit(size_t new_capacity)790 void RegionSpace::ClampGrowthLimit(size_t new_capacity) {
791   MutexLock mu(Thread::Current(), region_lock_);
792   CHECK_LE(new_capacity, NonGrowthLimitCapacity());
793   size_t new_num_regions = new_capacity / kRegionSize;
794   if (non_free_region_index_limit_ > new_num_regions) {
795     LOG(WARNING) << "Couldn't clamp region space as there are regions in use beyond growth limit.";
796     return;
797   }
798   num_regions_ = new_num_regions;
799   if (kCyclicRegionAllocation && cyclic_alloc_region_index_ >= num_regions_) {
800     cyclic_alloc_region_index_ = 0u;
801   }
802   SetLimit(Begin() + new_capacity);
803   if (Size() > new_capacity) {
804     SetEnd(Limit());
805   }
806   GetMarkBitmap()->SetHeapSize(new_capacity);
807   GetMemMap()->SetSize(new_capacity);
808 }
809 
Dump(std::ostream & os) const810 void RegionSpace::Dump(std::ostream& os) const {
811   os << GetName() << " "
812      << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(Limit());
813 }
814 
DumpRegionForObject(std::ostream & os,mirror::Object * obj)815 void RegionSpace::DumpRegionForObject(std::ostream& os, mirror::Object* obj) {
816   CHECK(HasAddress(obj));
817   MutexLock mu(Thread::Current(), region_lock_);
818   RefToRegionUnlocked(obj)->Dump(os);
819 }
820 
DumpRegions(std::ostream & os)821 void RegionSpace::DumpRegions(std::ostream& os) {
822   MutexLock mu(Thread::Current(), region_lock_);
823   for (size_t i = 0; i < num_regions_; ++i) {
824     regions_[i].Dump(os);
825   }
826 }
827 
DumpNonFreeRegions(std::ostream & os)828 void RegionSpace::DumpNonFreeRegions(std::ostream& os) {
829   MutexLock mu(Thread::Current(), region_lock_);
830   for (size_t i = 0; i < num_regions_; ++i) {
831     Region* reg = &regions_[i];
832     if (!reg->IsFree()) {
833       reg->Dump(os);
834     }
835   }
836 }
837 
RecordAlloc(mirror::Object * ref)838 void RegionSpace::RecordAlloc(mirror::Object* ref) {
839   CHECK(ref != nullptr);
840   Region* r = RefToRegion(ref);
841   r->objects_allocated_.fetch_add(1, std::memory_order_relaxed);
842 }
843 
AllocNewTlab(Thread * self,const size_t tlab_size,size_t * bytes_tl_bulk_allocated)844 bool RegionSpace::AllocNewTlab(Thread* self,
845                                const size_t tlab_size,
846                                size_t* bytes_tl_bulk_allocated) {
847   MutexLock mu(self, region_lock_);
848   RevokeThreadLocalBuffersLocked(self, /*reuse=*/ gc::Heap::kUsePartialTlabs);
849   Region* r = nullptr;
850   uint8_t* pos = nullptr;
851   *bytes_tl_bulk_allocated = tlab_size;
852   // First attempt to get a partially used TLAB, if available.
853   if (tlab_size < kRegionSize) {
854     // Fetch the largest partial TLAB. The multimap is ordered in decreasing
855     // size.
856     auto largest_partial_tlab = partial_tlabs_.begin();
857     if (largest_partial_tlab != partial_tlabs_.end() && largest_partial_tlab->first >= tlab_size) {
858       r = largest_partial_tlab->second;
859       pos = r->End() - largest_partial_tlab->first;
860       partial_tlabs_.erase(largest_partial_tlab);
861       DCHECK_GT(r->End(), pos);
862       DCHECK_LE(r->Begin(), pos);
863       DCHECK_GE(r->Top(), pos);
864       *bytes_tl_bulk_allocated -= r->Top() - pos;
865     }
866   }
867   if (r == nullptr) {
868     // Fallback to allocating an entire region as TLAB.
869     r = AllocateRegion(/*for_evac=*/ false);
870   }
871   if (r != nullptr) {
872     uint8_t* start = pos != nullptr ? pos : r->Begin();
873     DCHECK_ALIGNED(start, kObjectAlignment);
874     r->is_a_tlab_ = true;
875     r->thread_ = self;
876     r->SetTop(r->End());
877     self->SetTlab(start, start + tlab_size, r->End());
878     return true;
879   }
880   return false;
881 }
882 
RevokeThreadLocalBuffers(Thread * thread)883 size_t RegionSpace::RevokeThreadLocalBuffers(Thread* thread) {
884   MutexLock mu(Thread::Current(), region_lock_);
885   RevokeThreadLocalBuffersLocked(thread, /*reuse=*/ gc::Heap::kUsePartialTlabs);
886   return 0U;
887 }
888 
RevokeThreadLocalBuffers(Thread * thread,const bool reuse)889 size_t RegionSpace::RevokeThreadLocalBuffers(Thread* thread, const bool reuse) {
890   MutexLock mu(Thread::Current(), region_lock_);
891   RevokeThreadLocalBuffersLocked(thread, reuse);
892   return 0U;
893 }
894 
RevokeThreadLocalBuffersLocked(Thread * thread,bool reuse)895 void RegionSpace::RevokeThreadLocalBuffersLocked(Thread* thread, bool reuse) {
896   uint8_t* tlab_start = thread->GetTlabStart();
897   DCHECK_EQ(thread->HasTlab(), tlab_start != nullptr);
898   if (tlab_start != nullptr) {
899     Region* r = RefToRegionLocked(reinterpret_cast<mirror::Object*>(tlab_start));
900     r->is_a_tlab_ = false;
901     r->thread_ = nullptr;
902     DCHECK(r->IsAllocated());
903     DCHECK_LE(thread->GetThreadLocalBytesAllocated(), kRegionSize);
904     r->RecordThreadLocalAllocations(thread->GetThreadLocalObjectsAllocated(),
905                                     thread->GetTlabEnd() - r->Begin());
906     DCHECK_GE(r->End(), thread->GetTlabPos());
907     DCHECK_LE(r->Begin(), thread->GetTlabPos());
908     size_t remaining_bytes = r->End() - thread->GetTlabPos();
909     if (reuse && remaining_bytes >= gc::Heap::kPartialTlabSize) {
910       partial_tlabs_.insert(std::make_pair(remaining_bytes, r));
911     }
912   }
913   thread->ResetTlab();
914 }
915 
RevokeAllThreadLocalBuffers()916 size_t RegionSpace::RevokeAllThreadLocalBuffers() {
917   Thread* self = Thread::Current();
918   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
919   MutexLock mu2(self, *Locks::thread_list_lock_);
920   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
921   for (Thread* thread : thread_list) {
922     RevokeThreadLocalBuffers(thread);
923   }
924   return 0U;
925 }
926 
AssertThreadLocalBuffersAreRevoked(Thread * thread)927 void RegionSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
928   if (kIsDebugBuild) {
929     DCHECK(!thread->HasTlab());
930   }
931 }
932 
AssertAllThreadLocalBuffersAreRevoked()933 void RegionSpace::AssertAllThreadLocalBuffersAreRevoked() {
934   if (kIsDebugBuild) {
935     Thread* self = Thread::Current();
936     MutexLock mu(self, *Locks::runtime_shutdown_lock_);
937     MutexLock mu2(self, *Locks::thread_list_lock_);
938     std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
939     for (Thread* thread : thread_list) {
940       AssertThreadLocalBuffersAreRevoked(thread);
941     }
942   }
943 }
944 
Dump(std::ostream & os) const945 void RegionSpace::Region::Dump(std::ostream& os) const {
946   os << "Region[" << idx_ << "]="
947      << reinterpret_cast<void*>(begin_)
948      << "-" << reinterpret_cast<void*>(Top())
949      << "-" << reinterpret_cast<void*>(end_)
950      << " state=" << state_
951      << " type=" << type_
952      << " objects_allocated=" << objects_allocated_
953      << " alloc_time=" << alloc_time_
954      << " live_bytes=" << live_bytes_;
955 
956   if (live_bytes_ != static_cast<size_t>(-1)) {
957     os << " ratio over allocated bytes="
958        << (static_cast<float>(live_bytes_) / RoundUp(BytesAllocated(), kRegionSize));
959     uint64_t longest_consecutive_free_bytes = GetLongestConsecutiveFreeBytes();
960     os << " longest_consecutive_free_bytes=" << longest_consecutive_free_bytes
961        << " (" << PrettySize(longest_consecutive_free_bytes) << ")";
962   }
963 
964   os << " is_newly_allocated=" << std::boolalpha << is_newly_allocated_ << std::noboolalpha
965      << " is_a_tlab=" << std::boolalpha << is_a_tlab_ << std::noboolalpha
966      << " thread=" << thread_ << '\n';
967 }
968 
GetLongestConsecutiveFreeBytes() const969 uint64_t RegionSpace::Region::GetLongestConsecutiveFreeBytes() const {
970   if (IsFree()) {
971     return kRegionSize;
972   }
973   if (IsLarge() || IsLargeTail()) {
974     return 0u;
975   }
976   uintptr_t max_gap = 0u;
977   uintptr_t prev_object_end = reinterpret_cast<uintptr_t>(Begin());
978   // Iterate through all live objects and find the largest free gap.
979   auto visitor = [&max_gap, &prev_object_end](mirror::Object* obj)
980     REQUIRES_SHARED(Locks::mutator_lock_) {
981     uintptr_t current = reinterpret_cast<uintptr_t>(obj);
982     uintptr_t diff = current - prev_object_end;
983     max_gap = std::max(diff, max_gap);
984     uintptr_t object_end = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
985     prev_object_end = RoundUp(object_end, kAlignment);
986   };
987   space::RegionSpace* region_space = art::Runtime::Current()->GetHeap()->GetRegionSpace();
988   region_space->WalkNonLargeRegion(visitor, this);
989   return static_cast<uint64_t>(max_gap);
990 }
991 
992 
AllocationSizeNonvirtual(mirror::Object * obj,size_t * usable_size)993 size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
994   size_t num_bytes = obj->SizeOf();
995   if (usable_size != nullptr) {
996     if (LIKELY(num_bytes <= kRegionSize)) {
997       DCHECK(RefToRegion(obj)->IsAllocated());
998       *usable_size = RoundUp(num_bytes, kAlignment);
999     } else {
1000       DCHECK(RefToRegion(obj)->IsLarge());
1001       *usable_size = RoundUp(num_bytes, kRegionSize);
1002     }
1003   }
1004   return num_bytes;
1005 }
1006 
Clear(bool zero_and_release_pages)1007 void RegionSpace::Region::Clear(bool zero_and_release_pages) {
1008   top_.store(begin_, std::memory_order_relaxed);
1009   state_ = RegionState::kRegionStateFree;
1010   type_ = RegionType::kRegionTypeNone;
1011   objects_allocated_.store(0, std::memory_order_relaxed);
1012   alloc_time_ = 0;
1013   live_bytes_ = static_cast<size_t>(-1);
1014   if (zero_and_release_pages) {
1015     ZeroAndProtectRegion(begin_, end_);
1016   }
1017   is_newly_allocated_ = false;
1018   is_a_tlab_ = false;
1019   thread_ = nullptr;
1020 }
1021 
TraceHeapSize()1022 void RegionSpace::TraceHeapSize() {
1023   Heap* heap = Runtime::Current()->GetHeap();
1024   heap->TraceHeapSize(heap->GetBytesAllocated() + EvacBytes());
1025 }
1026 
AllocateRegion(bool for_evac)1027 RegionSpace::Region* RegionSpace::AllocateRegion(bool for_evac) {
1028   if (!for_evac && (num_non_free_regions_ + 1) * 2 > num_regions_) {
1029     return nullptr;
1030   }
1031   for (size_t i = 0; i < num_regions_; ++i) {
1032     // When using the cyclic region allocation strategy, try to
1033     // allocate a region starting from the last cyclic allocated
1034     // region marker. Otherwise, try to allocate a region starting
1035     // from the beginning of the region space.
1036     size_t region_index = kCyclicRegionAllocation
1037         ? ((cyclic_alloc_region_index_ + i) % num_regions_)
1038         : i;
1039     Region* r = &regions_[region_index];
1040     if (r->IsFree()) {
1041       r->Unfree(this, time_);
1042       if (use_generational_cc_) {
1043         // TODO: Add an explanation for this assertion.
1044         DCHECK_IMPLIES(for_evac, !r->is_newly_allocated_);
1045       }
1046       if (for_evac) {
1047         ++num_evac_regions_;
1048         TraceHeapSize();
1049         // Evac doesn't count as newly allocated.
1050       } else {
1051         r->SetNewlyAllocated();
1052         ++num_non_free_regions_;
1053       }
1054       if (kCyclicRegionAllocation) {
1055         // Move the cyclic allocation region marker to the region
1056         // following the one that was just allocated.
1057         cyclic_alloc_region_index_ = (region_index + 1) % num_regions_;
1058       }
1059       return r;
1060     }
1061   }
1062   return nullptr;
1063 }
1064 
MarkAsAllocated(RegionSpace * region_space,uint32_t alloc_time)1065 void RegionSpace::Region::MarkAsAllocated(RegionSpace* region_space, uint32_t alloc_time) {
1066   DCHECK(IsFree());
1067   alloc_time_ = alloc_time;
1068   region_space->AdjustNonFreeRegionLimit(idx_);
1069   type_ = RegionType::kRegionTypeToSpace;
1070   if (kProtectClearedRegions) {
1071     CheckedCall(mprotect, __FUNCTION__, Begin(), kRegionSize, PROT_READ | PROT_WRITE);
1072   }
1073 }
1074 
Unfree(RegionSpace * region_space,uint32_t alloc_time)1075 void RegionSpace::Region::Unfree(RegionSpace* region_space, uint32_t alloc_time) {
1076   MarkAsAllocated(region_space, alloc_time);
1077   state_ = RegionState::kRegionStateAllocated;
1078 }
1079 
UnfreeLarge(RegionSpace * region_space,uint32_t alloc_time)1080 void RegionSpace::Region::UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time) {
1081   MarkAsAllocated(region_space, alloc_time);
1082   state_ = RegionState::kRegionStateLarge;
1083 }
1084 
UnfreeLargeTail(RegionSpace * region_space,uint32_t alloc_time)1085 void RegionSpace::Region::UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time) {
1086   MarkAsAllocated(region_space, alloc_time);
1087   state_ = RegionState::kRegionStateLargeTail;
1088 }
1089 
1090 }  // namespace space
1091 }  // namespace gc
1092 }  // namespace art
1093