• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "rosalloc.h"
18 
19 #include "base/memory_tool.h"
20 #include "base/mutex-inl.h"
21 #include "gc/space/memory_tool_settings.h"
22 #include "mem_map.h"
23 #include "mirror/class-inl.h"
24 #include "mirror/object.h"
25 #include "mirror/object-inl.h"
26 #include "thread-inl.h"
27 #include "thread_list.h"
28 
29 #include <map>
30 #include <list>
31 #include <sstream>
32 #include <vector>
33 
34 namespace art {
35 namespace gc {
36 namespace allocator {
37 
38 static constexpr bool kUsePrefetchDuringAllocRun = false;
39 static constexpr bool kPrefetchNewRunDataByZeroing = false;
40 static constexpr size_t kPrefetchStride = 64;
41 
42 size_t RosAlloc::bracketSizes[kNumOfSizeBrackets];
43 size_t RosAlloc::numOfPages[kNumOfSizeBrackets];
44 size_t RosAlloc::numOfSlots[kNumOfSizeBrackets];
45 size_t RosAlloc::headerSizes[kNumOfSizeBrackets];
46 bool RosAlloc::initialized_ = false;
47 size_t RosAlloc::dedicated_full_run_storage_[kPageSize / sizeof(size_t)] = { 0 };
48 RosAlloc::Run* RosAlloc::dedicated_full_run_ =
49     reinterpret_cast<RosAlloc::Run*>(dedicated_full_run_storage_);
50 
RosAlloc(void * base,size_t capacity,size_t max_capacity,PageReleaseMode page_release_mode,bool running_on_memory_tool,size_t page_release_size_threshold)51 RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
52                    PageReleaseMode page_release_mode, bool running_on_memory_tool,
53                    size_t page_release_size_threshold)
54     : base_(reinterpret_cast<uint8_t*>(base)), footprint_(capacity),
55       capacity_(capacity), max_capacity_(max_capacity),
56       lock_("rosalloc global lock", kRosAllocGlobalLock),
57       bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
58       page_release_mode_(page_release_mode),
59       page_release_size_threshold_(page_release_size_threshold),
60       is_running_on_memory_tool_(running_on_memory_tool) {
61   DCHECK_ALIGNED(base, kPageSize);
62   DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
63   DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
64   CHECK_LE(capacity, max_capacity);
65   CHECK_ALIGNED(page_release_size_threshold_, kPageSize);
66   // Zero the memory explicitly (don't rely on that the mem map is zero-initialized).
67   if (!kMadviseZeroes) {
68     memset(base_, 0, max_capacity);
69   }
70   CHECK_EQ(madvise(base_, max_capacity, MADV_DONTNEED), 0);
71   if (!initialized_) {
72     Initialize();
73   }
74   VLOG(heap) << "RosAlloc base="
75              << std::hex << (intptr_t)base_ << ", end="
76              << std::hex << (intptr_t)(base_ + capacity_)
77              << ", capacity=" << std::dec << capacity_
78              << ", max_capacity=" << std::dec << max_capacity_;
79   for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
80     size_bracket_lock_names_[i] =
81         StringPrintf("an rosalloc size bracket %d lock", static_cast<int>(i));
82     size_bracket_locks_[i] = new Mutex(size_bracket_lock_names_[i].c_str(), kRosAllocBracketLock);
83     current_runs_[i] = dedicated_full_run_;
84   }
85   DCHECK_EQ(footprint_, capacity_);
86   size_t num_of_pages = footprint_ / kPageSize;
87   size_t max_num_of_pages = max_capacity_ / kPageSize;
88   std::string error_msg;
89   page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", nullptr,
90                                                RoundUp(max_num_of_pages, kPageSize),
91                                                PROT_READ | PROT_WRITE, false, false, &error_msg));
92   CHECK(page_map_mem_map_.get() != nullptr) << "Couldn't allocate the page map : " << error_msg;
93   page_map_ = page_map_mem_map_->Begin();
94   page_map_size_ = num_of_pages;
95   max_page_map_size_ = max_num_of_pages;
96   free_page_run_size_map_.resize(num_of_pages);
97   FreePageRun* free_pages = reinterpret_cast<FreePageRun*>(base_);
98   if (kIsDebugBuild) {
99     free_pages->magic_num_ = kMagicNumFree;
100   }
101   free_pages->SetByteSize(this, capacity_);
102   DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
103   DCHECK(free_pages->IsFree());
104   free_pages->ReleasePages(this);
105   DCHECK(free_pages->IsFree());
106   free_page_runs_.insert(free_pages);
107   if (kTraceRosAlloc) {
108     LOG(INFO) << "RosAlloc::RosAlloc() : Inserted run 0x" << std::hex
109               << reinterpret_cast<intptr_t>(free_pages)
110               << " into free_page_runs_";
111   }
112 }
113 
~RosAlloc()114 RosAlloc::~RosAlloc() {
115   for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
116     delete size_bracket_locks_[i];
117   }
118   if (is_running_on_memory_tool_) {
119     MEMORY_TOOL_MAKE_DEFINED(base_, capacity_);
120   }
121 }
122 
AllocPages(Thread * self,size_t num_pages,uint8_t page_map_type)123 void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) {
124   lock_.AssertHeld(self);
125   DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
126   FreePageRun* res = nullptr;
127   const size_t req_byte_size = num_pages * kPageSize;
128   // Find the lowest address free page run that's large enough.
129   for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
130     FreePageRun* fpr = *it;
131     DCHECK(fpr->IsFree());
132     size_t fpr_byte_size = fpr->ByteSize(this);
133     DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
134     if (req_byte_size <= fpr_byte_size) {
135       // Found one.
136       free_page_runs_.erase(it++);
137       if (kTraceRosAlloc) {
138         LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x"
139                   << std::hex << reinterpret_cast<intptr_t>(fpr)
140                   << " from free_page_runs_";
141       }
142       if (req_byte_size < fpr_byte_size) {
143         // Split.
144         FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
145         if (kIsDebugBuild) {
146           remainder->magic_num_ = kMagicNumFree;
147         }
148         remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
149         DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
150         // Don't need to call madvise on remainder here.
151         free_page_runs_.insert(remainder);
152         if (kTraceRosAlloc) {
153           LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
154                     << reinterpret_cast<intptr_t>(remainder)
155                     << " into free_page_runs_";
156         }
157         fpr->SetByteSize(this, req_byte_size);
158         DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
159       }
160       res = fpr;
161       break;
162     } else {
163       ++it;
164     }
165   }
166 
167   // Failed to allocate pages. Grow the footprint, if possible.
168   if (UNLIKELY(res == nullptr && capacity_ > footprint_)) {
169     FreePageRun* last_free_page_run = nullptr;
170     size_t last_free_page_run_size;
171     auto it = free_page_runs_.rbegin();
172     if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
173       // There is a free page run at the end.
174       DCHECK(last_free_page_run->IsFree());
175       DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run)));
176       last_free_page_run_size = last_free_page_run->ByteSize(this);
177     } else {
178       // There is no free page run at the end.
179       last_free_page_run_size = 0;
180     }
181     DCHECK_LT(last_free_page_run_size, req_byte_size);
182     if (capacity_ - footprint_ + last_free_page_run_size >= req_byte_size) {
183       // If we grow the heap, we can allocate it.
184       size_t increment = std::min(std::max(2 * MB, req_byte_size - last_free_page_run_size),
185                                   capacity_ - footprint_);
186       DCHECK_EQ(increment % kPageSize, static_cast<size_t>(0));
187       size_t new_footprint = footprint_ + increment;
188       size_t new_num_of_pages = new_footprint / kPageSize;
189       DCHECK_LT(page_map_size_, new_num_of_pages);
190       DCHECK_LT(free_page_run_size_map_.size(), new_num_of_pages);
191       page_map_size_ = new_num_of_pages;
192       DCHECK_LE(page_map_size_, max_page_map_size_);
193       free_page_run_size_map_.resize(new_num_of_pages);
194       ArtRosAllocMoreCore(this, increment);
195       if (last_free_page_run_size > 0) {
196         // There was a free page run at the end. Expand its size.
197         DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
198         last_free_page_run->SetByteSize(this, last_free_page_run_size + increment);
199         DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
200         DCHECK_EQ(last_free_page_run->End(this), base_ + new_footprint);
201       } else {
202         // Otherwise, insert a new free page run at the end.
203         FreePageRun* new_free_page_run = reinterpret_cast<FreePageRun*>(base_ + footprint_);
204         if (kIsDebugBuild) {
205           new_free_page_run->magic_num_ = kMagicNumFree;
206         }
207         new_free_page_run->SetByteSize(this, increment);
208         DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
209         free_page_runs_.insert(new_free_page_run);
210         DCHECK_EQ(*free_page_runs_.rbegin(), new_free_page_run);
211         if (kTraceRosAlloc) {
212           LOG(INFO) << "RosAlloc::AlloPages() : Grew the heap by inserting run 0x"
213                     << std::hex << reinterpret_cast<intptr_t>(new_free_page_run)
214                     << " into free_page_runs_";
215         }
216       }
217       DCHECK_LE(footprint_ + increment, capacity_);
218       if (kTraceRosAlloc) {
219         LOG(INFO) << "RosAlloc::AllocPages() : increased the footprint from "
220                   << footprint_ << " to " << new_footprint;
221       }
222       footprint_ = new_footprint;
223 
224       // And retry the last free page run.
225       it = free_page_runs_.rbegin();
226       DCHECK(it != free_page_runs_.rend());
227       FreePageRun* fpr = *it;
228       if (kIsDebugBuild && last_free_page_run_size > 0) {
229         DCHECK(last_free_page_run != nullptr);
230         DCHECK_EQ(last_free_page_run, fpr);
231       }
232       size_t fpr_byte_size = fpr->ByteSize(this);
233       DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
234       DCHECK_LE(req_byte_size, fpr_byte_size);
235       free_page_runs_.erase(fpr);
236       if (kTraceRosAlloc) {
237         LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
238                   << " from free_page_runs_";
239       }
240       if (req_byte_size < fpr_byte_size) {
241         // Split if there's a remainder.
242         FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
243         if (kIsDebugBuild) {
244           remainder->magic_num_ = kMagicNumFree;
245         }
246         remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
247         DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
248         free_page_runs_.insert(remainder);
249         if (kTraceRosAlloc) {
250           LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
251                     << reinterpret_cast<intptr_t>(remainder)
252                     << " into free_page_runs_";
253         }
254         fpr->SetByteSize(this, req_byte_size);
255         DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
256       }
257       res = fpr;
258     }
259   }
260   if (LIKELY(res != nullptr)) {
261     // Update the page map.
262     size_t page_map_idx = ToPageMapIndex(res);
263     for (size_t i = 0; i < num_pages; i++) {
264       DCHECK(IsFreePage(page_map_idx + i));
265     }
266     switch (page_map_type) {
267     case kPageMapRun:
268       page_map_[page_map_idx] = kPageMapRun;
269       for (size_t i = 1; i < num_pages; i++) {
270         page_map_[page_map_idx + i] = kPageMapRunPart;
271       }
272       break;
273     case kPageMapLargeObject:
274       page_map_[page_map_idx] = kPageMapLargeObject;
275       for (size_t i = 1; i < num_pages; i++) {
276         page_map_[page_map_idx + i] = kPageMapLargeObjectPart;
277       }
278       break;
279     default:
280       LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_type);
281       break;
282     }
283     if (kIsDebugBuild) {
284       // Clear the first page since it is not madvised due to the magic number.
285       memset(res, 0, kPageSize);
286     }
287     if (kTraceRosAlloc) {
288       LOG(INFO) << "RosAlloc::AllocPages() : 0x" << std::hex << reinterpret_cast<intptr_t>(res)
289                 << "-0x" << (reinterpret_cast<intptr_t>(res) + num_pages * kPageSize)
290                 << "(" << std::dec << (num_pages * kPageSize) << ")";
291     }
292     return res;
293   }
294 
295   // Fail.
296   if (kTraceRosAlloc) {
297     LOG(INFO) << "RosAlloc::AllocPages() : nullptr";
298   }
299   return nullptr;
300 }
301 
FreePages(Thread * self,void * ptr,bool already_zero)302 size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
303   lock_.AssertHeld(self);
304   size_t pm_idx = ToPageMapIndex(ptr);
305   DCHECK_LT(pm_idx, page_map_size_);
306   uint8_t pm_type = page_map_[pm_idx];
307   DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
308   uint8_t pm_part_type;
309   switch (pm_type) {
310   case kPageMapRun:
311     pm_part_type = kPageMapRunPart;
312     break;
313   case kPageMapLargeObject:
314     pm_part_type = kPageMapLargeObjectPart;
315     break;
316   default:
317     LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << " : " << "pm_idx=" << pm_idx << ", pm_type="
318                << static_cast<int>(pm_type) << ", ptr=" << std::hex
319                << reinterpret_cast<intptr_t>(ptr);
320     return 0;
321   }
322   // Update the page map and count the number of pages.
323   size_t num_pages = 1;
324   page_map_[pm_idx] = kPageMapEmpty;
325   size_t idx = pm_idx + 1;
326   size_t end = page_map_size_;
327   while (idx < end && page_map_[idx] == pm_part_type) {
328     page_map_[idx] = kPageMapEmpty;
329     num_pages++;
330     idx++;
331   }
332   const size_t byte_size = num_pages * kPageSize;
333   if (already_zero) {
334     if (ShouldCheckZeroMemory()) {
335       const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(ptr);
336       for (size_t i = 0; i < byte_size / sizeof(uintptr_t); ++i) {
337         CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
338       }
339     }
340   } else if (!DoesReleaseAllPages()) {
341     memset(ptr, 0, byte_size);
342   }
343 
344   if (kTraceRosAlloc) {
345     LOG(INFO) << __PRETTY_FUNCTION__ << " : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
346               << "-0x" << (reinterpret_cast<intptr_t>(ptr) + byte_size)
347               << "(" << std::dec << (num_pages * kPageSize) << ")";
348   }
349 
350   // Turn it into a free run.
351   FreePageRun* fpr = reinterpret_cast<FreePageRun*>(ptr);
352   if (kIsDebugBuild) {
353     fpr->magic_num_ = kMagicNumFree;
354   }
355   fpr->SetByteSize(this, byte_size);
356   DCHECK_ALIGNED(fpr->ByteSize(this), kPageSize);
357 
358   DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
359   if (!free_page_runs_.empty()) {
360     // Try to coalesce in the higher address direction.
361     if (kTraceRosAlloc) {
362       LOG(INFO) << __PRETTY_FUNCTION__ << "RosAlloc::FreePages() : trying to coalesce a free page run 0x"
363                 << std::hex << reinterpret_cast<uintptr_t>(fpr) << " [" << std::dec << pm_idx << "] -0x"
364                 << std::hex << reinterpret_cast<uintptr_t>(fpr->End(this)) << " [" << std::dec
365                 << (fpr->End(this) == End() ? page_map_size_ : ToPageMapIndex(fpr->End(this))) << "]";
366     }
367     auto higher_it = free_page_runs_.upper_bound(fpr);
368     if (higher_it != free_page_runs_.end()) {
369       for (auto it = higher_it; it != free_page_runs_.end(); ) {
370         FreePageRun* h = *it;
371         DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
372         if (kTraceRosAlloc) {
373           LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
374                     << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
375                     << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
376                     << (h->End(this) == End() ? page_map_size_ : ToPageMapIndex(h->End(this))) << "]";
377         }
378         if (fpr->End(this) == h->Begin()) {
379           if (kTraceRosAlloc) {
380             LOG(INFO) << "Success";
381           }
382           // Clear magic num since this is no longer the start of a free page run.
383           if (kIsDebugBuild) {
384             h->magic_num_ = 0;
385           }
386           free_page_runs_.erase(it++);
387           if (kTraceRosAlloc) {
388             LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
389                       << reinterpret_cast<intptr_t>(h)
390                       << " from free_page_runs_";
391           }
392           fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
393           DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
394         } else {
395           // Not adjacent. Stop.
396           if (kTraceRosAlloc) {
397             LOG(INFO) << "Fail";
398           }
399           break;
400         }
401       }
402     }
403     // Try to coalesce in the lower address direction.
404     auto lower_it = free_page_runs_.upper_bound(fpr);
405     if (lower_it != free_page_runs_.begin()) {
406       --lower_it;
407       for (auto it = lower_it; ; ) {
408         // We want to try to coalesce with the first element but
409         // there's no "<=" operator for the iterator.
410         bool to_exit_loop = it == free_page_runs_.begin();
411 
412         FreePageRun* l = *it;
413         DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
414         if (kTraceRosAlloc) {
415           LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
416                     << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
417                     << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
418                     << (l->End(this) == End() ? page_map_size_ : ToPageMapIndex(l->End(this))) << "]";
419         }
420         if (l->End(this) == fpr->Begin()) {
421           if (kTraceRosAlloc) {
422             LOG(INFO) << "Success";
423           }
424           free_page_runs_.erase(it--);
425           if (kTraceRosAlloc) {
426             LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
427                       << reinterpret_cast<intptr_t>(l)
428                       << " from free_page_runs_";
429           }
430           l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
431           DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
432           // Clear magic num since this is no longer the start of a free page run.
433           if (kIsDebugBuild) {
434             fpr->magic_num_ = 0;
435           }
436           fpr = l;
437         } else {
438           // Not adjacent. Stop.
439           if (kTraceRosAlloc) {
440             LOG(INFO) << "Fail";
441           }
442           break;
443         }
444         if (to_exit_loop) {
445           break;
446         }
447       }
448     }
449   }
450 
451   // Insert it.
452   DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
453   DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
454   DCHECK(fpr->IsFree());
455   fpr->ReleasePages(this);
456   DCHECK(fpr->IsFree());
457   free_page_runs_.insert(fpr);
458   DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
459   if (kTraceRosAlloc) {
460     LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
461               << " into free_page_runs_";
462   }
463   return byte_size;
464 }
465 
AllocLargeObject(Thread * self,size_t size,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)466 void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated,
467                                  size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
468   DCHECK(bytes_allocated != nullptr);
469   DCHECK(usable_size != nullptr);
470   DCHECK_GT(size, kLargeSizeThreshold);
471   size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
472   void* r;
473   {
474     MutexLock mu(self, lock_);
475     r = AllocPages(self, num_pages, kPageMapLargeObject);
476   }
477   if (UNLIKELY(r == nullptr)) {
478     if (kTraceRosAlloc) {
479       LOG(INFO) << "RosAlloc::AllocLargeObject() : nullptr";
480     }
481     return nullptr;
482   }
483   const size_t total_bytes = num_pages * kPageSize;
484   *bytes_allocated = total_bytes;
485   *usable_size = total_bytes;
486   *bytes_tl_bulk_allocated = total_bytes;
487   if (kTraceRosAlloc) {
488     LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
489               << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
490               << "(" << std::dec << (num_pages * kPageSize) << ")";
491   }
492   // Check if the returned memory is really all zero.
493   if (ShouldCheckZeroMemory()) {
494     CHECK_EQ(total_bytes % sizeof(uintptr_t), 0U);
495     const uintptr_t* words = reinterpret_cast<uintptr_t*>(r);
496     for (size_t i = 0; i < total_bytes / sizeof(uintptr_t); ++i) {
497       CHECK_EQ(words[i], 0U);
498     }
499   }
500   return r;
501 }
502 
FreeInternal(Thread * self,void * ptr)503 size_t RosAlloc::FreeInternal(Thread* self, void* ptr) {
504   DCHECK_LE(base_, ptr);
505   DCHECK_LT(ptr, base_ + footprint_);
506   size_t pm_idx = RoundDownToPageMapIndex(ptr);
507   Run* run = nullptr;
508   {
509     MutexLock mu(self, lock_);
510     DCHECK_LT(pm_idx, page_map_size_);
511     uint8_t page_map_entry = page_map_[pm_idx];
512     if (kTraceRosAlloc) {
513       LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
514                 << ", page_map_entry=" << static_cast<int>(page_map_entry);
515     }
516     switch (page_map_[pm_idx]) {
517       case kPageMapLargeObject:
518         return FreePages(self, ptr, false);
519       case kPageMapLargeObjectPart:
520         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
521         return 0;
522       case kPageMapRunPart: {
523         // Find the beginning of the run.
524         do {
525           --pm_idx;
526           DCHECK_LT(pm_idx, capacity_ / kPageSize);
527         } while (page_map_[pm_idx] != kPageMapRun);
528         FALLTHROUGH_INTENDED;
529       case kPageMapRun:
530         run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
531         DCHECK_EQ(run->magic_num_, kMagicNum);
532         break;
533       case kPageMapReleased:
534       case kPageMapEmpty:
535         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
536         return 0;
537       }
538       default:
539         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
540         return 0;
541     }
542   }
543   DCHECK(run != nullptr);
544   return FreeFromRun(self, ptr, run);
545 }
546 
Free(Thread * self,void * ptr)547 size_t RosAlloc::Free(Thread* self, void* ptr) {
548   ReaderMutexLock rmu(self, bulk_free_lock_);
549   return FreeInternal(self, ptr);
550 }
551 
AllocRun(Thread * self,size_t idx)552 RosAlloc::Run* RosAlloc::AllocRun(Thread* self, size_t idx) {
553   RosAlloc::Run* new_run = nullptr;
554   {
555     MutexLock mu(self, lock_);
556     new_run = reinterpret_cast<Run*>(AllocPages(self, numOfPages[idx], kPageMapRun));
557   }
558   if (LIKELY(new_run != nullptr)) {
559     if (kIsDebugBuild) {
560       new_run->magic_num_ = kMagicNum;
561     }
562     new_run->size_bracket_idx_ = idx;
563     DCHECK(!new_run->IsThreadLocal());
564     DCHECK(!new_run->to_be_bulk_freed_);
565     if (kUsePrefetchDuringAllocRun && idx < kNumThreadLocalSizeBrackets) {
566       // Take ownership of the cache lines if we are likely to be thread local run.
567       if (kPrefetchNewRunDataByZeroing) {
568         // Zeroing the data is sometimes faster than prefetching but it increases memory usage
569         // since we end up dirtying zero pages which may have been madvised.
570         new_run->ZeroData();
571       } else {
572         const size_t num_of_slots = numOfSlots[idx];
573         const size_t bracket_size = bracketSizes[idx];
574         const size_t num_of_bytes = num_of_slots * bracket_size;
575         uint8_t* begin = reinterpret_cast<uint8_t*>(new_run) + headerSizes[idx];
576         for (size_t i = 0; i < num_of_bytes; i += kPrefetchStride) {
577           __builtin_prefetch(begin + i);
578         }
579       }
580     }
581     new_run->InitFreeList();
582   }
583   return new_run;
584 }
585 
RefillRun(Thread * self,size_t idx)586 RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
587   // Get the lowest address non-full run from the binary tree.
588   auto* const bt = &non_full_runs_[idx];
589   if (!bt->empty()) {
590     // If there's one, use it as the current run.
591     auto it = bt->begin();
592     Run* non_full_run = *it;
593     DCHECK(non_full_run != nullptr);
594     DCHECK(!non_full_run->IsThreadLocal());
595     bt->erase(it);
596     return non_full_run;
597   }
598   // If there's none, allocate a new run and use it as the current run.
599   return AllocRun(self, idx);
600 }
601 
AllocFromCurrentRunUnlocked(Thread * self,size_t idx)602 inline void* RosAlloc::AllocFromCurrentRunUnlocked(Thread* self, size_t idx) {
603   Run* current_run = current_runs_[idx];
604   DCHECK(current_run != nullptr);
605   void* slot_addr = current_run->AllocSlot();
606   if (UNLIKELY(slot_addr == nullptr)) {
607     // The current run got full. Try to refill it.
608     DCHECK(current_run->IsFull());
609     if (kIsDebugBuild && current_run != dedicated_full_run_) {
610       full_runs_[idx].insert(current_run);
611       if (kTraceRosAlloc) {
612         LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
613                   << reinterpret_cast<intptr_t>(current_run)
614                   << " into full_runs_[" << std::dec << idx << "]";
615       }
616       DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
617       DCHECK(full_runs_[idx].find(current_run) != full_runs_[idx].end());
618     }
619     current_run = RefillRun(self, idx);
620     if (UNLIKELY(current_run == nullptr)) {
621       // Failed to allocate a new run, make sure that it is the dedicated full run.
622       current_runs_[idx] = dedicated_full_run_;
623       return nullptr;
624     }
625     DCHECK(current_run != nullptr);
626     DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
627     DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
628     current_run->SetIsThreadLocal(false);
629     current_runs_[idx] = current_run;
630     DCHECK(!current_run->IsFull());
631     slot_addr = current_run->AllocSlot();
632     // Must succeed now with a new run.
633     DCHECK(slot_addr != nullptr);
634   }
635   return slot_addr;
636 }
637 
AllocFromRunThreadUnsafe(Thread * self,size_t size,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)638 void* RosAlloc::AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated,
639                                          size_t* usable_size,
640                                          size_t* bytes_tl_bulk_allocated) {
641   DCHECK(bytes_allocated != nullptr);
642   DCHECK(usable_size != nullptr);
643   DCHECK(bytes_tl_bulk_allocated != nullptr);
644   DCHECK_LE(size, kLargeSizeThreshold);
645   size_t bracket_size;
646   size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
647   Locks::mutator_lock_->AssertExclusiveHeld(self);
648   void* slot_addr = AllocFromCurrentRunUnlocked(self, idx);
649   if (LIKELY(slot_addr != nullptr)) {
650     *bytes_allocated = bracket_size;
651     *usable_size = bracket_size;
652     *bytes_tl_bulk_allocated = bracket_size;
653   }
654   // Caller verifies that it is all 0.
655   return slot_addr;
656 }
657 
AllocFromRun(Thread * self,size_t size,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)658 void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated,
659                              size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
660   DCHECK(bytes_allocated != nullptr);
661   DCHECK(usable_size != nullptr);
662   DCHECK(bytes_tl_bulk_allocated != nullptr);
663   DCHECK_LE(size, kLargeSizeThreshold);
664   size_t bracket_size;
665   size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
666   void* slot_addr;
667   if (LIKELY(idx < kNumThreadLocalSizeBrackets)) {
668     // Use a thread-local run.
669     Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
670     // Allow invalid since this will always fail the allocation.
671     if (kIsDebugBuild) {
672       // Need the lock to prevent race conditions.
673       MutexLock mu(self, *size_bracket_locks_[idx]);
674       CHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
675       CHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
676     }
677     DCHECK(thread_local_run != nullptr);
678     DCHECK(thread_local_run->IsThreadLocal() || thread_local_run == dedicated_full_run_);
679     slot_addr = thread_local_run->AllocSlot();
680     // The allocation must fail if the run is invalid.
681     DCHECK(thread_local_run != dedicated_full_run_ || slot_addr == nullptr)
682         << "allocated from an invalid run";
683     if (UNLIKELY(slot_addr == nullptr)) {
684       // The run got full. Try to free slots.
685       DCHECK(thread_local_run->IsFull());
686       MutexLock mu(self, *size_bracket_locks_[idx]);
687       bool is_all_free_after_merge;
688       // This is safe to do for the dedicated_full_run_ since the bitmaps are empty.
689       if (thread_local_run->MergeThreadLocalFreeListToFreeList(&is_all_free_after_merge)) {
690         DCHECK_NE(thread_local_run, dedicated_full_run_);
691         // Some slot got freed. Keep it.
692         DCHECK(!thread_local_run->IsFull());
693         DCHECK_EQ(is_all_free_after_merge, thread_local_run->IsAllFree());
694       } else {
695         // No slots got freed. Try to refill the thread-local run.
696         DCHECK(thread_local_run->IsFull());
697         if (thread_local_run != dedicated_full_run_) {
698           thread_local_run->SetIsThreadLocal(false);
699           if (kIsDebugBuild) {
700             full_runs_[idx].insert(thread_local_run);
701             if (kTraceRosAlloc) {
702               LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex
703                         << reinterpret_cast<intptr_t>(thread_local_run)
704                         << " into full_runs_[" << std::dec << idx << "]";
705             }
706           }
707           DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
708           DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
709         }
710 
711         thread_local_run = RefillRun(self, idx);
712         if (UNLIKELY(thread_local_run == nullptr)) {
713           self->SetRosAllocRun(idx, dedicated_full_run_);
714           return nullptr;
715         }
716         DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
717         DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
718         thread_local_run->SetIsThreadLocal(true);
719         self->SetRosAllocRun(idx, thread_local_run);
720         DCHECK(!thread_local_run->IsFull());
721       }
722       DCHECK(thread_local_run != nullptr);
723       DCHECK(!thread_local_run->IsFull());
724       DCHECK(thread_local_run->IsThreadLocal());
725       // Account for all the free slots in the new or refreshed thread local run.
726       *bytes_tl_bulk_allocated = thread_local_run->NumberOfFreeSlots() * bracket_size;
727       slot_addr = thread_local_run->AllocSlot();
728       // Must succeed now with a new run.
729       DCHECK(slot_addr != nullptr);
730     } else {
731       // The slot is already counted. Leave it as is.
732       *bytes_tl_bulk_allocated = 0;
733     }
734     DCHECK(slot_addr != nullptr);
735     if (kTraceRosAlloc) {
736       LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex
737                 << reinterpret_cast<intptr_t>(slot_addr)
738                 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
739                 << "(" << std::dec << (bracket_size) << ")";
740     }
741     *bytes_allocated = bracket_size;
742     *usable_size = bracket_size;
743   } else {
744     // Use the (shared) current run.
745     MutexLock mu(self, *size_bracket_locks_[idx]);
746     slot_addr = AllocFromCurrentRunUnlocked(self, idx);
747     if (kTraceRosAlloc) {
748       LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex
749                 << reinterpret_cast<intptr_t>(slot_addr)
750                 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
751                 << "(" << std::dec << (bracket_size) << ")";
752     }
753     if (LIKELY(slot_addr != nullptr)) {
754       *bytes_allocated = bracket_size;
755       *usable_size = bracket_size;
756       *bytes_tl_bulk_allocated = bracket_size;
757     }
758   }
759   // Caller verifies that it is all 0.
760   return slot_addr;
761 }
762 
FreeFromRun(Thread * self,void * ptr,Run * run)763 size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
764   DCHECK_EQ(run->magic_num_, kMagicNum);
765   DCHECK_LT(run, ptr);
766   DCHECK_LT(ptr, run->End());
767   const size_t idx = run->size_bracket_idx_;
768   const size_t bracket_size = bracketSizes[idx];
769   bool run_was_full = false;
770   MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
771   if (kIsDebugBuild) {
772     run_was_full = run->IsFull();
773   }
774   if (kTraceRosAlloc) {
775     LOG(INFO) << "RosAlloc::FreeFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr);
776   }
777   if (LIKELY(run->IsThreadLocal())) {
778     // It's a thread-local run. Just mark the thread-local free bit map and return.
779     DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
780     DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
781     DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
782     run->AddToThreadLocalFreeList(ptr);
783     if (kTraceRosAlloc) {
784       LOG(INFO) << "RosAlloc::FreeFromRun() : Freed a slot in a thread local run 0x" << std::hex
785                 << reinterpret_cast<intptr_t>(run);
786     }
787     // A thread local run will be kept as a thread local even if it's become all free.
788     return bracket_size;
789   }
790   // Free the slot in the run.
791   run->FreeSlot(ptr);
792   auto* non_full_runs = &non_full_runs_[idx];
793   if (run->IsAllFree()) {
794     // It has just become completely free. Free the pages of this run.
795     std::set<Run*>::iterator pos = non_full_runs->find(run);
796     if (pos != non_full_runs->end()) {
797       non_full_runs->erase(pos);
798       if (kTraceRosAlloc) {
799         LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
800                   << reinterpret_cast<intptr_t>(run) << " from non_full_runs_";
801       }
802     }
803     if (run == current_runs_[idx]) {
804       current_runs_[idx] = dedicated_full_run_;
805     }
806     DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
807     DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
808     run->ZeroHeaderAndSlotHeaders();
809     {
810       MutexLock lock_mu(self, lock_);
811       FreePages(self, run, true);
812     }
813   } else {
814     // It is not completely free. If it wasn't the current run or
815     // already in the non-full run set (i.e., it was full) insert it
816     // into the non-full run set.
817     if (run != current_runs_[idx]) {
818       auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr;
819       auto pos = non_full_runs->find(run);
820       if (pos == non_full_runs->end()) {
821         DCHECK(run_was_full);
822         DCHECK(full_runs->find(run) != full_runs->end());
823         if (kIsDebugBuild) {
824           full_runs->erase(run);
825           if (kTraceRosAlloc) {
826             LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
827                       << reinterpret_cast<intptr_t>(run) << " from full_runs_";
828           }
829         }
830         non_full_runs->insert(run);
831         DCHECK(!run->IsFull());
832         if (kTraceRosAlloc) {
833           LOG(INFO) << "RosAlloc::FreeFromRun() : Inserted run 0x" << std::hex
834                     << reinterpret_cast<intptr_t>(run)
835                     << " into non_full_runs_[" << std::dec << idx << "]";
836         }
837       }
838     }
839   }
840   return bracket_size;
841 }
842 
843 template<bool kUseTail>
FreeListToStr(SlotFreeList<kUseTail> * free_list)844 std::string RosAlloc::Run::FreeListToStr(SlotFreeList<kUseTail>* free_list) {
845   std::string free_list_str;
846   const uint8_t idx = size_bracket_idx_;
847   const size_t bracket_size = bracketSizes[idx];
848   for (Slot* slot = free_list->Head(); slot != nullptr; slot = slot->Next()) {
849     bool is_last = slot->Next() == nullptr;
850     uintptr_t slot_offset = reinterpret_cast<uintptr_t>(slot) -
851         reinterpret_cast<uintptr_t>(FirstSlot());
852     DCHECK_EQ(slot_offset % bracket_size, 0U);
853     uintptr_t slot_idx = slot_offset / bracket_size;
854     if (!is_last) {
855       free_list_str.append(StringPrintf("%u-", static_cast<uint32_t>(slot_idx)));
856     } else {
857       free_list_str.append(StringPrintf("%u", static_cast<uint32_t>(slot_idx)));
858     }
859   }
860   return free_list_str;
861 }
862 
Dump()863 std::string RosAlloc::Run::Dump() {
864   size_t idx = size_bracket_idx_;
865   std::ostringstream stream;
866   stream << "RosAlloc Run = " << reinterpret_cast<void*>(this)
867          << "{ magic_num=" << static_cast<int>(magic_num_)
868          << " size_bracket_idx=" << idx
869          << " is_thread_local=" << static_cast<int>(is_thread_local_)
870          << " to_be_bulk_freed=" << static_cast<int>(to_be_bulk_freed_)
871          << " free_list=" << FreeListToStr(&free_list_)
872          << " bulk_free_list=" << FreeListToStr(&bulk_free_list_)
873          << " thread_local_list=" << FreeListToStr(&thread_local_free_list_)
874          << " }" << std::endl;
875   return stream.str();
876 }
877 
FreeSlot(void * ptr)878 void RosAlloc::Run::FreeSlot(void* ptr) {
879   DCHECK(!IsThreadLocal());
880   const uint8_t idx = size_bracket_idx_;
881   const size_t bracket_size = bracketSizes[idx];
882   Slot* slot = ToSlot(ptr);
883   // Zero out the memory.
884   // TODO: Investigate alternate memset since ptr is guaranteed to be aligned to 16.
885   memset(slot, 0, bracket_size);
886   free_list_.Add(slot);
887   if (kTraceRosAlloc) {
888     LOG(INFO) << "RosAlloc::Run::FreeSlot() : " << slot
889               << ", bracket_size=" << std::dec << bracket_size << ", slot_idx=" << SlotIndex(slot);
890   }
891 }
892 
MergeThreadLocalFreeListToFreeList(bool * is_all_free_after_out)893 inline bool RosAlloc::Run::MergeThreadLocalFreeListToFreeList(bool* is_all_free_after_out) {
894   DCHECK(IsThreadLocal());
895   // Merge the thread local free list into the free list and clear the thread local free list.
896   const uint8_t idx = size_bracket_idx_;
897   bool thread_local_free_list_size = thread_local_free_list_.Size();
898   const size_t size_before = free_list_.Size();
899   free_list_.Merge(&thread_local_free_list_);
900   const size_t size_after = free_list_.Size();
901   DCHECK_EQ(size_before < size_after, thread_local_free_list_size > 0);
902   DCHECK_LE(size_before, size_after);
903   *is_all_free_after_out = free_list_.Size() == numOfSlots[idx];
904   // Return true at least one slot was added to the free list.
905   return size_before < size_after;
906 }
907 
MergeBulkFreeListToFreeList()908 inline void RosAlloc::Run::MergeBulkFreeListToFreeList() {
909   DCHECK(!IsThreadLocal());
910   // Merge the bulk free list into the free list and clear the bulk free list.
911   free_list_.Merge(&bulk_free_list_);
912 }
913 
MergeBulkFreeListToThreadLocalFreeList()914 inline void RosAlloc::Run::MergeBulkFreeListToThreadLocalFreeList() {
915   DCHECK(IsThreadLocal());
916   // Merge the bulk free list into the thread local free list and clear the bulk free list.
917   thread_local_free_list_.Merge(&bulk_free_list_);
918 }
919 
AddToThreadLocalFreeList(void * ptr)920 inline void RosAlloc::Run::AddToThreadLocalFreeList(void* ptr) {
921   DCHECK(IsThreadLocal());
922   AddToFreeListShared(ptr, &thread_local_free_list_, __FUNCTION__);
923 }
924 
AddToBulkFreeList(void * ptr)925 inline size_t RosAlloc::Run::AddToBulkFreeList(void* ptr) {
926   return AddToFreeListShared(ptr, &bulk_free_list_, __FUNCTION__);
927 }
928 
AddToFreeListShared(void * ptr,SlotFreeList<true> * free_list,const char * caller_name)929 inline size_t RosAlloc::Run::AddToFreeListShared(void* ptr,
930                                                  SlotFreeList<true>* free_list,
931                                                  const char* caller_name) {
932   const uint8_t idx = size_bracket_idx_;
933   const size_t bracket_size = bracketSizes[idx];
934   Slot* slot = ToSlot(ptr);
935   memset(slot, 0, bracket_size);
936   free_list->Add(slot);
937   if (kTraceRosAlloc) {
938     LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : " << ptr
939               << ", bracket_size=" << std::dec << bracket_size << ", slot_idx=" << SlotIndex(slot);
940   }
941   return bracket_size;
942 }
943 
ZeroHeaderAndSlotHeaders()944 inline void RosAlloc::Run::ZeroHeaderAndSlotHeaders() {
945   DCHECK(IsAllFree());
946   const uint8_t idx = size_bracket_idx_;
947   // Zero the slot header (next pointers).
948   for (Slot* slot = free_list_.Head(); slot != nullptr; ) {
949     Slot* next_slot = slot->Next();
950     slot->Clear();
951     slot = next_slot;
952   }
953   // Zero the header.
954   memset(this, 0, headerSizes[idx]);
955   // Check that the entire run is all zero.
956   if (kIsDebugBuild) {
957     const size_t size = numOfPages[idx] * kPageSize;
958     const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(this);
959     for (size_t i = 0; i < size / sizeof(uintptr_t); ++i) {
960       CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
961     }
962   }
963 }
964 
ZeroData()965 inline void RosAlloc::Run::ZeroData() {
966   const uint8_t idx = size_bracket_idx_;
967   uint8_t* slot_begin = reinterpret_cast<uint8_t*>(FirstSlot());
968   memset(slot_begin, 0, numOfSlots[idx] * bracketSizes[idx]);
969 }
970 
InspectAllSlots(void (* handler)(void * start,void * end,size_t used_bytes,void * callback_arg),void * arg)971 void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
972                                     void* arg) {
973   size_t idx = size_bracket_idx_;
974   uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
975   size_t num_slots = numOfSlots[idx];
976   size_t bracket_size = IndexToBracketSize(idx);
977   DCHECK_EQ(slot_base + num_slots * bracket_size,
978             reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize);
979   // Free slots are on the free list and the allocated/used slots are not. We traverse the free list
980   // to find out and record which slots are free in the is_free array.
981   std::unique_ptr<bool[]> is_free(new bool[num_slots]());  // zero initialized
982   for (Slot* slot = free_list_.Head(); slot != nullptr; slot = slot->Next()) {
983     size_t slot_idx = SlotIndex(slot);
984     DCHECK_LT(slot_idx, num_slots);
985     is_free[slot_idx] = true;
986   }
987   if (IsThreadLocal()) {
988     for (Slot* slot = thread_local_free_list_.Head(); slot != nullptr; slot = slot->Next()) {
989       size_t slot_idx = SlotIndex(slot);
990       DCHECK_LT(slot_idx, num_slots);
991       is_free[slot_idx] = true;
992     }
993   }
994   for (size_t slot_idx = 0; slot_idx < num_slots; ++slot_idx) {
995     uint8_t* slot_addr = slot_base + slot_idx * bracket_size;
996     if (!is_free[slot_idx]) {
997       handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
998     } else {
999       handler(slot_addr, slot_addr + bracket_size, 0, arg);
1000     }
1001   }
1002 }
1003 
1004 // If true, read the page map entries in BulkFree() without using the
1005 // lock for better performance, assuming that the existence of an
1006 // allocated chunk/pointer being freed in BulkFree() guarantees that
1007 // the page map entry won't change. Disabled for now.
1008 static constexpr bool kReadPageMapEntryWithoutLockInBulkFree = true;
1009 
BulkFree(Thread * self,void ** ptrs,size_t num_ptrs)1010 size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
1011   size_t freed_bytes = 0;
1012   if ((false)) {
1013     // Used only to test Free() as GC uses only BulkFree().
1014     for (size_t i = 0; i < num_ptrs; ++i) {
1015       freed_bytes += FreeInternal(self, ptrs[i]);
1016     }
1017     return freed_bytes;
1018   }
1019 
1020   WriterMutexLock wmu(self, bulk_free_lock_);
1021 
1022   // First mark slots to free in the bulk free bit map without locking the
1023   // size bracket locks. On host, unordered_set is faster than vector + flag.
1024 #ifdef __ANDROID__
1025   std::vector<Run*> runs;
1026 #else
1027   std::unordered_set<Run*, hash_run, eq_run> runs;
1028 #endif
1029   for (size_t i = 0; i < num_ptrs; i++) {
1030     void* ptr = ptrs[i];
1031     DCHECK_LE(base_, ptr);
1032     DCHECK_LT(ptr, base_ + footprint_);
1033     size_t pm_idx = RoundDownToPageMapIndex(ptr);
1034     Run* run = nullptr;
1035     if (kReadPageMapEntryWithoutLockInBulkFree) {
1036       // Read the page map entries without locking the lock.
1037       uint8_t page_map_entry = page_map_[pm_idx];
1038       if (kTraceRosAlloc) {
1039         LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1040                   << std::dec << pm_idx
1041                   << ", page_map_entry=" << static_cast<int>(page_map_entry);
1042       }
1043       if (LIKELY(page_map_entry == kPageMapRun)) {
1044         run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1045       } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1046         size_t pi = pm_idx;
1047         // Find the beginning of the run.
1048         do {
1049           --pi;
1050           DCHECK_LT(pi, capacity_ / kPageSize);
1051         } while (page_map_[pi] != kPageMapRun);
1052         run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
1053       } else if (page_map_entry == kPageMapLargeObject) {
1054         MutexLock mu(self, lock_);
1055         freed_bytes += FreePages(self, ptr, false);
1056         continue;
1057       } else {
1058         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_entry);
1059       }
1060     } else {
1061       // Read the page map entries with a lock.
1062       MutexLock mu(self, lock_);
1063       DCHECK_LT(pm_idx, page_map_size_);
1064       uint8_t page_map_entry = page_map_[pm_idx];
1065       if (kTraceRosAlloc) {
1066         LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1067                   << std::dec << pm_idx
1068                   << ", page_map_entry=" << static_cast<int>(page_map_entry);
1069       }
1070       if (LIKELY(page_map_entry == kPageMapRun)) {
1071         run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1072       } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1073         size_t pi = pm_idx;
1074         // Find the beginning of the run.
1075         do {
1076           --pi;
1077           DCHECK_LT(pi, capacity_ / kPageSize);
1078         } while (page_map_[pi] != kPageMapRun);
1079         run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
1080       } else if (page_map_entry == kPageMapLargeObject) {
1081         freed_bytes += FreePages(self, ptr, false);
1082         continue;
1083       } else {
1084         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_entry);
1085       }
1086     }
1087     DCHECK(run != nullptr);
1088     DCHECK_EQ(run->magic_num_, kMagicNum);
1089     // Set the bit in the bulk free bit map.
1090     freed_bytes += run->AddToBulkFreeList(ptr);
1091 #ifdef __ANDROID__
1092     if (!run->to_be_bulk_freed_) {
1093       run->to_be_bulk_freed_ = true;
1094       runs.push_back(run);
1095     }
1096 #else
1097     runs.insert(run);
1098 #endif
1099   }
1100 
1101   // Now, iterate over the affected runs and update the alloc bit map
1102   // based on the bulk free bit map (for non-thread-local runs) and
1103   // union the bulk free bit map into the thread-local free bit map
1104   // (for thread-local runs.)
1105   for (Run* run : runs) {
1106 #ifdef __ANDROID__
1107     DCHECK(run->to_be_bulk_freed_);
1108     run->to_be_bulk_freed_ = false;
1109 #endif
1110     size_t idx = run->size_bracket_idx_;
1111     MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
1112     if (run->IsThreadLocal()) {
1113       DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
1114       DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
1115       DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
1116       run->MergeBulkFreeListToThreadLocalFreeList();
1117       if (kTraceRosAlloc) {
1118         LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a thread local run 0x"
1119                   << std::hex << reinterpret_cast<intptr_t>(run);
1120       }
1121       DCHECK(run->IsThreadLocal());
1122       // A thread local run will be kept as a thread local even if
1123       // it's become all free.
1124     } else {
1125       bool run_was_full = run->IsFull();
1126       run->MergeBulkFreeListToFreeList();
1127       if (kTraceRosAlloc) {
1128         LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a run 0x" << std::hex
1129                   << reinterpret_cast<intptr_t>(run);
1130       }
1131       // Check if the run should be moved to non_full_runs_ or
1132       // free_page_runs_.
1133       auto* non_full_runs = &non_full_runs_[idx];
1134       auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr;
1135       if (run->IsAllFree()) {
1136         // It has just become completely free. Free the pages of the
1137         // run.
1138         bool run_was_current = run == current_runs_[idx];
1139         if (run_was_current) {
1140           DCHECK(full_runs->find(run) == full_runs->end());
1141           DCHECK(non_full_runs->find(run) == non_full_runs->end());
1142           // If it was a current run, reuse it.
1143         } else if (run_was_full) {
1144           // If it was full, remove it from the full run set (debug
1145           // only.)
1146           if (kIsDebugBuild) {
1147             std::unordered_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
1148             DCHECK(pos != full_runs->end());
1149             full_runs->erase(pos);
1150             if (kTraceRosAlloc) {
1151               LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1152                         << reinterpret_cast<intptr_t>(run)
1153                         << " from full_runs_";
1154             }
1155             DCHECK(full_runs->find(run) == full_runs->end());
1156           }
1157         } else {
1158           // If it was in a non full run set, remove it from the set.
1159           DCHECK(full_runs->find(run) == full_runs->end());
1160           DCHECK(non_full_runs->find(run) != non_full_runs->end());
1161           non_full_runs->erase(run);
1162           if (kTraceRosAlloc) {
1163             LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1164                       << reinterpret_cast<intptr_t>(run)
1165                       << " from non_full_runs_";
1166           }
1167           DCHECK(non_full_runs->find(run) == non_full_runs->end());
1168         }
1169         if (!run_was_current) {
1170           run->ZeroHeaderAndSlotHeaders();
1171           MutexLock lock_mu(self, lock_);
1172           FreePages(self, run, true);
1173         }
1174       } else {
1175         // It is not completely free. If it wasn't the current run or
1176         // already in the non-full run set (i.e., it was full) insert
1177         // it into the non-full run set.
1178         if (run == current_runs_[idx]) {
1179           DCHECK(non_full_runs->find(run) == non_full_runs->end());
1180           DCHECK(full_runs->find(run) == full_runs->end());
1181           // If it was a current run, keep it.
1182         } else if (run_was_full) {
1183           // If it was full, remove it from the full run set (debug
1184           // only) and insert into the non-full run set.
1185           DCHECK(full_runs->find(run) != full_runs->end());
1186           DCHECK(non_full_runs->find(run) == non_full_runs->end());
1187           if (kIsDebugBuild) {
1188             full_runs->erase(run);
1189             if (kTraceRosAlloc) {
1190               LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1191                         << reinterpret_cast<intptr_t>(run)
1192                         << " from full_runs_";
1193             }
1194           }
1195           non_full_runs->insert(run);
1196           if (kTraceRosAlloc) {
1197             LOG(INFO) << "RosAlloc::BulkFree() : Inserted run 0x" << std::hex
1198                       << reinterpret_cast<intptr_t>(run)
1199                       << " into non_full_runs_[" << std::dec << idx;
1200           }
1201         } else {
1202           // If it was not full, so leave it in the non full run set.
1203           DCHECK(full_runs->find(run) == full_runs->end());
1204           DCHECK(non_full_runs->find(run) != non_full_runs->end());
1205         }
1206       }
1207     }
1208   }
1209   return freed_bytes;
1210 }
1211 
DumpPageMap()1212 std::string RosAlloc::DumpPageMap() {
1213   std::ostringstream stream;
1214   stream << "RosAlloc PageMap: " << std::endl;
1215   lock_.AssertHeld(Thread::Current());
1216   size_t end = page_map_size_;
1217   FreePageRun* curr_fpr = nullptr;
1218   size_t curr_fpr_size = 0;
1219   size_t remaining_curr_fpr_size = 0;
1220   size_t num_running_empty_pages = 0;
1221   for (size_t i = 0; i < end; ++i) {
1222     uint8_t pm = page_map_[i];
1223     switch (pm) {
1224       case kPageMapReleased:
1225         // Fall-through.
1226       case kPageMapEmpty: {
1227         FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1228         if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
1229           // Encountered a fresh free page run.
1230           DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1231           DCHECK(fpr->IsFree());
1232           DCHECK(curr_fpr == nullptr);
1233           DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
1234           curr_fpr = fpr;
1235           curr_fpr_size = fpr->ByteSize(this);
1236           DCHECK_EQ(curr_fpr_size % kPageSize, static_cast<size_t>(0));
1237           remaining_curr_fpr_size = curr_fpr_size - kPageSize;
1238           stream << "[" << i << "]=" << (pm == kPageMapReleased ? "Released" : "Empty")
1239                  << " (FPR start) fpr_size=" << curr_fpr_size
1240                  << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
1241           if (remaining_curr_fpr_size == 0) {
1242             // Reset at the end of the current free page run.
1243             curr_fpr = nullptr;
1244             curr_fpr_size = 0;
1245           }
1246           stream << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr) << std::endl;
1247           DCHECK_EQ(num_running_empty_pages, static_cast<size_t>(0));
1248         } else {
1249           // Still part of the current free page run.
1250           DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
1251           DCHECK(curr_fpr != nullptr && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
1252           DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
1253           DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
1254           remaining_curr_fpr_size -= kPageSize;
1255           stream << "[" << i << "]=Empty (FPR part)"
1256                  << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
1257           if (remaining_curr_fpr_size == 0) {
1258             // Reset at the end of the current free page run.
1259             curr_fpr = nullptr;
1260             curr_fpr_size = 0;
1261           }
1262         }
1263         num_running_empty_pages++;
1264         break;
1265       }
1266       case kPageMapLargeObject: {
1267         DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1268         num_running_empty_pages = 0;
1269         stream << "[" << i << "]=Large (start)" << std::endl;
1270         break;
1271       }
1272       case kPageMapLargeObjectPart:
1273         DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1274         num_running_empty_pages = 0;
1275         stream << "[" << i << "]=Large (part)" << std::endl;
1276         break;
1277       case kPageMapRun: {
1278         DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1279         num_running_empty_pages = 0;
1280         Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1281         size_t idx = run->size_bracket_idx_;
1282         stream << "[" << i << "]=Run (start)"
1283                << " idx=" << idx
1284                << " numOfPages=" << numOfPages[idx]
1285                << " is_thread_local=" << run->is_thread_local_
1286                << " is_all_free=" << (run->IsAllFree() ? 1 : 0)
1287                << std::endl;
1288         break;
1289       }
1290       case kPageMapRunPart:
1291         DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1292         num_running_empty_pages = 0;
1293         stream << "[" << i << "]=Run (part)" << std::endl;
1294         break;
1295       default:
1296         stream << "[" << i << "]=Unrecognizable page map type: " << pm;
1297         break;
1298     }
1299   }
1300   return stream.str();
1301 }
1302 
UsableSize(const void * ptr)1303 size_t RosAlloc::UsableSize(const void* ptr) {
1304   DCHECK_LE(base_, ptr);
1305   DCHECK_LT(ptr, base_ + footprint_);
1306   size_t pm_idx = RoundDownToPageMapIndex(ptr);
1307   MutexLock mu(Thread::Current(), lock_);
1308   switch (page_map_[pm_idx]) {
1309     case kPageMapReleased:
1310       // Fall-through.
1311     case kPageMapEmpty:
1312       LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
1313                  << std::hex << reinterpret_cast<intptr_t>(ptr);
1314       break;
1315     case kPageMapLargeObject: {
1316       size_t num_pages = 1;
1317       size_t idx = pm_idx + 1;
1318       size_t end = page_map_size_;
1319       while (idx < end && page_map_[idx] == kPageMapLargeObjectPart) {
1320         num_pages++;
1321         idx++;
1322       }
1323       return num_pages * kPageSize;
1324     }
1325     case kPageMapLargeObjectPart:
1326       LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
1327                  << std::hex << reinterpret_cast<intptr_t>(ptr);
1328       break;
1329     case kPageMapRun:
1330     case kPageMapRunPart: {
1331       // Find the beginning of the run.
1332       while (page_map_[pm_idx] != kPageMapRun) {
1333         pm_idx--;
1334         DCHECK_LT(pm_idx, capacity_ / kPageSize);
1335       }
1336       DCHECK_EQ(page_map_[pm_idx], kPageMapRun);
1337       Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1338       DCHECK_EQ(run->magic_num_, kMagicNum);
1339       size_t idx = run->size_bracket_idx_;
1340       size_t offset_from_slot_base = reinterpret_cast<const uint8_t*>(ptr)
1341           - (reinterpret_cast<uint8_t*>(run) + headerSizes[idx]);
1342       DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
1343       return IndexToBracketSize(idx);
1344     }
1345     default: {
1346       LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
1347       break;
1348     }
1349   }
1350   return 0;
1351 }
1352 
Trim()1353 bool RosAlloc::Trim() {
1354   MutexLock mu(Thread::Current(), lock_);
1355   FreePageRun* last_free_page_run;
1356   DCHECK_EQ(footprint_ % kPageSize, static_cast<size_t>(0));
1357   auto it = free_page_runs_.rbegin();
1358   if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
1359     // Remove the last free page run, if any.
1360     DCHECK(last_free_page_run->IsFree());
1361     DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run)));
1362     DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
1363     DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_);
1364     free_page_runs_.erase(last_free_page_run);
1365     size_t decrement = last_free_page_run->ByteSize(this);
1366     size_t new_footprint = footprint_ - decrement;
1367     DCHECK_EQ(new_footprint % kPageSize, static_cast<size_t>(0));
1368     size_t new_num_of_pages = new_footprint / kPageSize;
1369     DCHECK_GE(page_map_size_, new_num_of_pages);
1370     // Zero out the tail of the page map.
1371     uint8_t* zero_begin = const_cast<uint8_t*>(page_map_) + new_num_of_pages;
1372     uint8_t* madvise_begin = AlignUp(zero_begin, kPageSize);
1373     DCHECK_LE(madvise_begin, page_map_mem_map_->End());
1374     size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
1375     if (madvise_size > 0) {
1376       DCHECK_ALIGNED(madvise_begin, kPageSize);
1377       DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
1378       if (!kMadviseZeroes) {
1379         memset(madvise_begin, 0, madvise_size);
1380       }
1381       CHECK_EQ(madvise(madvise_begin, madvise_size, MADV_DONTNEED), 0);
1382     }
1383     if (madvise_begin - zero_begin) {
1384       memset(zero_begin, 0, madvise_begin - zero_begin);
1385     }
1386     page_map_size_ = new_num_of_pages;
1387     free_page_run_size_map_.resize(new_num_of_pages);
1388     DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages);
1389     ArtRosAllocMoreCore(this, -(static_cast<intptr_t>(decrement)));
1390     if (kTraceRosAlloc) {
1391       LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from "
1392                 << footprint_ << " to " << new_footprint;
1393     }
1394     DCHECK_LT(new_footprint, footprint_);
1395     DCHECK_LT(new_footprint, capacity_);
1396     footprint_ = new_footprint;
1397     return true;
1398   }
1399   return false;
1400 }
1401 
InspectAll(void (* handler)(void * start,void * end,size_t used_bytes,void * callback_arg),void * arg)1402 void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1403                           void* arg) {
1404   // Note: no need to use this to release pages as we already do so in FreePages().
1405   if (handler == nullptr) {
1406     return;
1407   }
1408   MutexLock mu(Thread::Current(), lock_);
1409   size_t pm_end = page_map_size_;
1410   size_t i = 0;
1411   while (i < pm_end) {
1412     uint8_t pm = page_map_[i];
1413     switch (pm) {
1414       case kPageMapReleased:
1415         // Fall-through.
1416       case kPageMapEmpty: {
1417         // The start of a free page run.
1418         FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1419         DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
1420         size_t fpr_size = fpr->ByteSize(this);
1421         DCHECK_ALIGNED(fpr_size, kPageSize);
1422         void* start = fpr;
1423         if (kIsDebugBuild) {
1424           // In the debug build, the first page of a free page run
1425           // contains a magic number for debugging. Exclude it.
1426           start = reinterpret_cast<uint8_t*>(fpr) + kPageSize;
1427         }
1428         void* end = reinterpret_cast<uint8_t*>(fpr) + fpr_size;
1429         handler(start, end, 0, arg);
1430         size_t num_pages = fpr_size / kPageSize;
1431         if (kIsDebugBuild) {
1432           for (size_t j = i + 1; j < i + num_pages; ++j) {
1433             DCHECK(IsFreePage(j));
1434           }
1435         }
1436         i += fpr_size / kPageSize;
1437         DCHECK_LE(i, pm_end);
1438         break;
1439       }
1440       case kPageMapLargeObject: {
1441         // The start of a large object.
1442         size_t num_pages = 1;
1443         size_t idx = i + 1;
1444         while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1445           num_pages++;
1446           idx++;
1447         }
1448         void* start = base_ + i * kPageSize;
1449         void* end = base_ + (i + num_pages) * kPageSize;
1450         size_t used_bytes = num_pages * kPageSize;
1451         handler(start, end, used_bytes, arg);
1452         if (kIsDebugBuild) {
1453           for (size_t j = i + 1; j < i + num_pages; ++j) {
1454             DCHECK_EQ(page_map_[j], kPageMapLargeObjectPart);
1455           }
1456         }
1457         i += num_pages;
1458         DCHECK_LE(i, pm_end);
1459         break;
1460       }
1461       case kPageMapLargeObjectPart:
1462         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
1463         break;
1464       case kPageMapRun: {
1465         // The start of a run.
1466         Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1467         DCHECK_EQ(run->magic_num_, kMagicNum);
1468         // The dedicated full run doesn't contain any real allocations, don't visit the slots in
1469         // there.
1470         run->InspectAllSlots(handler, arg);
1471         size_t num_pages = numOfPages[run->size_bracket_idx_];
1472         if (kIsDebugBuild) {
1473           for (size_t j = i + 1; j < i + num_pages; ++j) {
1474             DCHECK_EQ(page_map_[j], kPageMapRunPart);
1475           }
1476         }
1477         i += num_pages;
1478         DCHECK_LE(i, pm_end);
1479         break;
1480       }
1481       case kPageMapRunPart:
1482         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
1483         break;
1484       default:
1485         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
1486         break;
1487     }
1488   }
1489 }
1490 
Footprint()1491 size_t RosAlloc::Footprint() {
1492   MutexLock mu(Thread::Current(), lock_);
1493   return footprint_;
1494 }
1495 
FootprintLimit()1496 size_t RosAlloc::FootprintLimit() {
1497   MutexLock mu(Thread::Current(), lock_);
1498   return capacity_;
1499 }
1500 
SetFootprintLimit(size_t new_capacity)1501 void RosAlloc::SetFootprintLimit(size_t new_capacity) {
1502   MutexLock mu(Thread::Current(), lock_);
1503   DCHECK_EQ(RoundUp(new_capacity, kPageSize), new_capacity);
1504   // Only growing is supported here. But Trim() is supported.
1505   if (capacity_ < new_capacity) {
1506     CHECK_LE(new_capacity, max_capacity_);
1507     capacity_ = new_capacity;
1508     VLOG(heap) << "new capacity=" << capacity_;
1509   }
1510 }
1511 
1512 // Below may be called by mutator itself just before thread termination.
RevokeThreadLocalRuns(Thread * thread)1513 size_t RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
1514   Thread* self = Thread::Current();
1515   size_t free_bytes = 0U;
1516   for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) {
1517     MutexLock mu(self, *size_bracket_locks_[idx]);
1518     Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
1519     CHECK(thread_local_run != nullptr);
1520     // Invalid means already revoked.
1521     DCHECK(thread_local_run->IsThreadLocal());
1522     if (thread_local_run != dedicated_full_run_) {
1523       // Note the thread local run may not be full here.
1524       thread->SetRosAllocRun(idx, dedicated_full_run_);
1525       DCHECK_EQ(thread_local_run->magic_num_, kMagicNum);
1526       // Count the number of free slots left.
1527       size_t num_free_slots = thread_local_run->NumberOfFreeSlots();
1528       free_bytes += num_free_slots * bracketSizes[idx];
1529       // The above bracket index lock guards thread local free list to avoid race condition
1530       // with unioning bulk free list to thread local free list by GC thread in BulkFree.
1531       // If thread local run is true, GC thread will help update thread local free list
1532       // in BulkFree. And the latest thread local free list will be merged to free list
1533       // either when this thread local run is full or when revoking this run here. In this
1534       // case the free list wll be updated. If thread local run is false, GC thread will help
1535       // merge bulk free list in next BulkFree.
1536       // Thus no need to merge bulk free list to free list again here.
1537       bool dont_care;
1538       thread_local_run->MergeThreadLocalFreeListToFreeList(&dont_care);
1539       thread_local_run->SetIsThreadLocal(false);
1540       DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
1541       DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
1542       RevokeRun(self, idx, thread_local_run);
1543     }
1544   }
1545   return free_bytes;
1546 }
1547 
RevokeRun(Thread * self,size_t idx,Run * run)1548 void RosAlloc::RevokeRun(Thread* self, size_t idx, Run* run) {
1549   size_bracket_locks_[idx]->AssertHeld(self);
1550   DCHECK(run != dedicated_full_run_);
1551   if (run->IsFull()) {
1552     if (kIsDebugBuild) {
1553       full_runs_[idx].insert(run);
1554       DCHECK(full_runs_[idx].find(run) != full_runs_[idx].end());
1555       if (kTraceRosAlloc) {
1556         LOG(INFO) << __PRETTY_FUNCTION__  << " : Inserted run 0x" << std::hex
1557                   << reinterpret_cast<intptr_t>(run)
1558                   << " into full_runs_[" << std::dec << idx << "]";
1559       }
1560     }
1561   } else if (run->IsAllFree()) {
1562     run->ZeroHeaderAndSlotHeaders();
1563     MutexLock mu(self, lock_);
1564     FreePages(self, run, true);
1565   } else {
1566     non_full_runs_[idx].insert(run);
1567     DCHECK(non_full_runs_[idx].find(run) != non_full_runs_[idx].end());
1568     if (kTraceRosAlloc) {
1569       LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
1570                 << reinterpret_cast<intptr_t>(run)
1571                 << " into non_full_runs_[" << std::dec << idx << "]";
1572     }
1573   }
1574 }
1575 
RevokeThreadUnsafeCurrentRuns()1576 void RosAlloc::RevokeThreadUnsafeCurrentRuns() {
1577   // Revoke the current runs which share the same idx as thread local runs.
1578   Thread* self = Thread::Current();
1579   for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
1580     MutexLock mu(self, *size_bracket_locks_[idx]);
1581     if (current_runs_[idx] != dedicated_full_run_) {
1582       RevokeRun(self, idx, current_runs_[idx]);
1583       current_runs_[idx] = dedicated_full_run_;
1584     }
1585   }
1586 }
1587 
RevokeAllThreadLocalRuns()1588 size_t RosAlloc::RevokeAllThreadLocalRuns() {
1589   // This is called when a mutator thread won't allocate such as at
1590   // the Zygote creation time or during the GC pause.
1591   MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
1592   MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
1593   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1594   size_t free_bytes = 0U;
1595   for (Thread* thread : thread_list) {
1596     free_bytes += RevokeThreadLocalRuns(thread);
1597   }
1598   RevokeThreadUnsafeCurrentRuns();
1599   return free_bytes;
1600 }
1601 
AssertThreadLocalRunsAreRevoked(Thread * thread)1602 void RosAlloc::AssertThreadLocalRunsAreRevoked(Thread* thread) {
1603   if (kIsDebugBuild) {
1604     Thread* self = Thread::Current();
1605     // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
1606     ReaderMutexLock wmu(self, bulk_free_lock_);
1607     for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) {
1608       MutexLock mu(self, *size_bracket_locks_[idx]);
1609       Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
1610       DCHECK(thread_local_run == nullptr || thread_local_run == dedicated_full_run_);
1611     }
1612   }
1613 }
1614 
AssertAllThreadLocalRunsAreRevoked()1615 void RosAlloc::AssertAllThreadLocalRunsAreRevoked() {
1616   if (kIsDebugBuild) {
1617     Thread* self = Thread::Current();
1618     MutexLock shutdown_mu(self, *Locks::runtime_shutdown_lock_);
1619     MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
1620     std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1621     for (Thread* t : thread_list) {
1622       AssertThreadLocalRunsAreRevoked(t);
1623     }
1624     for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
1625       MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
1626       CHECK_EQ(current_runs_[idx], dedicated_full_run_);
1627     }
1628   }
1629 }
1630 
Initialize()1631 void RosAlloc::Initialize() {
1632   // bracketSizes.
1633   static_assert(kNumRegularSizeBrackets == kNumOfSizeBrackets - 2,
1634                 "There should be two non-regular brackets");
1635   for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1636     if (i < kNumThreadLocalSizeBrackets) {
1637       bracketSizes[i] = kThreadLocalBracketQuantumSize * (i + 1);
1638     } else if (i < kNumRegularSizeBrackets) {
1639       bracketSizes[i] = kBracketQuantumSize * (i - kNumThreadLocalSizeBrackets + 1) +
1640           (kThreadLocalBracketQuantumSize *  kNumThreadLocalSizeBrackets);
1641     } else if (i == kNumOfSizeBrackets - 2) {
1642       bracketSizes[i] = 1 * KB;
1643     } else {
1644       DCHECK_EQ(i, kNumOfSizeBrackets - 1);
1645       bracketSizes[i] = 2 * KB;
1646     }
1647     if (kTraceRosAlloc) {
1648       LOG(INFO) << "bracketSizes[" << i << "]=" << bracketSizes[i];
1649     }
1650   }
1651   // numOfPages.
1652   for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1653     if (i < kNumThreadLocalSizeBrackets) {
1654       numOfPages[i] = 1;
1655     } else if (i < (kNumThreadLocalSizeBrackets + kNumRegularSizeBrackets) / 2) {
1656       numOfPages[i] = 1;
1657     } else if (i < kNumRegularSizeBrackets) {
1658       numOfPages[i] = 1;
1659     } else if (i == kNumOfSizeBrackets - 2) {
1660       numOfPages[i] = 2;
1661     } else {
1662       DCHECK_EQ(i, kNumOfSizeBrackets - 1);
1663       numOfPages[i] = 4;
1664     }
1665     if (kTraceRosAlloc) {
1666       LOG(INFO) << "numOfPages[" << i << "]=" << numOfPages[i];
1667     }
1668   }
1669   // Compute numOfSlots and slotOffsets.
1670   for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1671     size_t bracket_size = bracketSizes[i];
1672     size_t run_size = kPageSize * numOfPages[i];
1673     size_t max_num_of_slots = run_size / bracket_size;
1674     // Compute the actual number of slots by taking the header and
1675     // alignment into account.
1676     size_t fixed_header_size = RoundUp(Run::fixed_header_size(), sizeof(uint64_t));
1677     DCHECK_EQ(fixed_header_size, 80U);
1678     size_t header_size = 0;
1679     size_t num_of_slots = 0;
1680     // Search for the maximum number of slots that allows enough space
1681     // for the header.
1682     for (int s = max_num_of_slots; s >= 0; s--) {
1683       size_t tmp_slots_size = bracket_size * s;
1684       size_t tmp_unaligned_header_size = fixed_header_size;
1685       // Align up the unaligned header size. bracket_size may not be a power of two.
1686       size_t tmp_header_size = (tmp_unaligned_header_size % bracket_size == 0) ?
1687           tmp_unaligned_header_size :
1688           tmp_unaligned_header_size + (bracket_size - tmp_unaligned_header_size % bracket_size);
1689       DCHECK_EQ(tmp_header_size % bracket_size, 0U);
1690       DCHECK_EQ(tmp_header_size % sizeof(uint64_t), 0U);
1691       if (tmp_slots_size + tmp_header_size <= run_size) {
1692         // Found the right number of slots, that is, there was enough
1693         // space for the header (including the bit maps.)
1694         num_of_slots = s;
1695         header_size = tmp_header_size;
1696         break;
1697       }
1698     }
1699     DCHECK_GT(num_of_slots, 0U) << i;
1700     DCHECK_GT(header_size, 0U) << i;
1701     // Add the padding for the alignment remainder.
1702     header_size += run_size % bracket_size;
1703     DCHECK_EQ(header_size + num_of_slots * bracket_size, run_size);
1704     numOfSlots[i] = num_of_slots;
1705     headerSizes[i] = header_size;
1706     if (kTraceRosAlloc) {
1707       LOG(INFO) << "numOfSlots[" << i << "]=" << numOfSlots[i]
1708                 << ", headerSizes[" << i << "]=" << headerSizes[i];
1709     }
1710   }
1711   // Set up the dedicated full run so that nobody can successfully allocate from it.
1712   if (kIsDebugBuild) {
1713     dedicated_full_run_->magic_num_ = kMagicNum;
1714   }
1715   // It doesn't matter which size bracket we use since the main goal is to have the allocation
1716   // fail 100% of the time you attempt to allocate into the dedicated full run.
1717   dedicated_full_run_->size_bracket_idx_ = 0;
1718   DCHECK_EQ(dedicated_full_run_->FreeList()->Size(), 0U);  // It looks full.
1719   dedicated_full_run_->SetIsThreadLocal(true);
1720 
1721   // The smallest bracket size must be at least as large as the sizeof(Slot).
1722   DCHECK_LE(sizeof(Slot), bracketSizes[0]) << "sizeof(Slot) <= the smallest bracket size";
1723   // Check the invariants between the max bracket sizes and the number of brackets.
1724   DCHECK_EQ(kMaxThreadLocalBracketSize, bracketSizes[kNumThreadLocalSizeBrackets - 1]);
1725   DCHECK_EQ(kMaxRegularBracketSize, bracketSizes[kNumRegularSizeBrackets - 1]);
1726 }
1727 
BytesAllocatedCallback(void * start ATTRIBUTE_UNUSED,void * end ATTRIBUTE_UNUSED,size_t used_bytes,void * arg)1728 void RosAlloc::BytesAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
1729                                       size_t used_bytes, void* arg) {
1730   if (used_bytes == 0) {
1731     return;
1732   }
1733   size_t* bytes_allocated = reinterpret_cast<size_t*>(arg);
1734   *bytes_allocated += used_bytes;
1735 }
1736 
ObjectsAllocatedCallback(void * start ATTRIBUTE_UNUSED,void * end ATTRIBUTE_UNUSED,size_t used_bytes,void * arg)1737 void RosAlloc::ObjectsAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
1738                                         size_t used_bytes, void* arg) {
1739   if (used_bytes == 0) {
1740     return;
1741   }
1742   size_t* objects_allocated = reinterpret_cast<size_t*>(arg);
1743   ++(*objects_allocated);
1744 }
1745 
Verify()1746 void RosAlloc::Verify() {
1747   Thread* self = Thread::Current();
1748   CHECK(Locks::mutator_lock_->IsExclusiveHeld(self))
1749       << "The mutator locks isn't exclusively locked at " << __PRETTY_FUNCTION__;
1750   MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
1751   ReaderMutexLock wmu(self, bulk_free_lock_);
1752   std::vector<Run*> runs;
1753   {
1754     MutexLock lock_mu(self, lock_);
1755     size_t pm_end = page_map_size_;
1756     size_t i = 0;
1757     size_t memory_tool_modifier =  is_running_on_memory_tool_ ?
1758         2 * ::art::gc::space::kDefaultMemoryToolRedZoneBytes :  // Redzones before and after.
1759         0;
1760     while (i < pm_end) {
1761       uint8_t pm = page_map_[i];
1762       switch (pm) {
1763         case kPageMapReleased:
1764           // Fall-through.
1765         case kPageMapEmpty: {
1766           // The start of a free page run.
1767           FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1768           DCHECK_EQ(fpr->magic_num_, kMagicNumFree);
1769           CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
1770               << "An empty page must belong to the free page run set";
1771           size_t fpr_size = fpr->ByteSize(this);
1772           CHECK_ALIGNED(fpr_size, kPageSize)
1773               << "A free page run size isn't page-aligned : " << fpr_size;
1774           size_t num_pages = fpr_size / kPageSize;
1775           CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1776               << "A free page run size must be > 0 : " << fpr_size;
1777           for (size_t j = i + 1; j < i + num_pages; ++j) {
1778             CHECK(IsFreePage(j))
1779                 << "A mismatch between the page map table for kPageMapEmpty "
1780                 << " at page index " << j
1781                 << " and the free page run size : page index range : "
1782                 << i << " to " << (i + num_pages) << std::endl << DumpPageMap();
1783           }
1784           i += num_pages;
1785           CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1786                               << std::endl << DumpPageMap();
1787           break;
1788         }
1789         case kPageMapLargeObject: {
1790           // The start of a large object.
1791           size_t num_pages = 1;
1792           size_t idx = i + 1;
1793           while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1794             num_pages++;
1795             idx++;
1796           }
1797           uint8_t* start = base_ + i * kPageSize;
1798           if (is_running_on_memory_tool_) {
1799             start += ::art::gc::space::kDefaultMemoryToolRedZoneBytes;
1800           }
1801           mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
1802           size_t obj_size = obj->SizeOf();
1803           CHECK_GT(obj_size + memory_tool_modifier, kLargeSizeThreshold)
1804               << "A rosalloc large object size must be > " << kLargeSizeThreshold;
1805           CHECK_EQ(num_pages, RoundUp(obj_size + memory_tool_modifier, kPageSize) / kPageSize)
1806               << "A rosalloc large object size " << obj_size + memory_tool_modifier
1807               << " does not match the page map table " << (num_pages * kPageSize)
1808               << std::endl << DumpPageMap();
1809           i += num_pages;
1810           CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1811                               << std::endl << DumpPageMap();
1812           break;
1813         }
1814         case kPageMapLargeObjectPart:
1815           LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap();
1816           break;
1817         case kPageMapRun: {
1818           // The start of a run.
1819           Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1820           DCHECK_EQ(run->magic_num_, kMagicNum);
1821           size_t idx = run->size_bracket_idx_;
1822           CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << idx;
1823           size_t num_pages = numOfPages[idx];
1824           CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1825               << "Run size must be > 0 : " << num_pages;
1826           for (size_t j = i + 1; j < i + num_pages; ++j) {
1827             CHECK_EQ(page_map_[j], kPageMapRunPart)
1828                 << "A mismatch between the page map table for kPageMapRunPart "
1829                 << " at page index " << j
1830                 << " and the run size : page index range " << i << " to " << (i + num_pages)
1831                 << std::endl << DumpPageMap();
1832           }
1833           // Don't verify the dedicated_full_run_ since it doesn't have any real allocations.
1834           runs.push_back(run);
1835           i += num_pages;
1836           CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1837                               << std::endl << DumpPageMap();
1838           break;
1839         }
1840         case kPageMapRunPart:
1841           // Fall-through.
1842         default:
1843           LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap();
1844           break;
1845       }
1846     }
1847   }
1848   std::list<Thread*> threads = Runtime::Current()->GetThreadList()->GetList();
1849   for (Thread* thread : threads) {
1850     for (size_t i = 0; i < kNumThreadLocalSizeBrackets; ++i) {
1851       MutexLock brackets_mu(self, *size_bracket_locks_[i]);
1852       Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
1853       CHECK(thread_local_run != nullptr);
1854       CHECK(thread_local_run->IsThreadLocal());
1855       CHECK(thread_local_run == dedicated_full_run_ ||
1856             thread_local_run->size_bracket_idx_ == i);
1857     }
1858   }
1859   for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1860     MutexLock brackets_mu(self, *size_bracket_locks_[i]);
1861     Run* current_run = current_runs_[i];
1862     CHECK(current_run != nullptr);
1863     if (current_run != dedicated_full_run_) {
1864       // The dedicated full run is currently marked as thread local.
1865       CHECK(!current_run->IsThreadLocal());
1866       CHECK_EQ(current_run->size_bracket_idx_, i);
1867     }
1868   }
1869   // Call Verify() here for the lock order.
1870   for (auto& run : runs) {
1871     run->Verify(self, this, is_running_on_memory_tool_);
1872   }
1873 }
1874 
Verify(Thread * self,RosAlloc * rosalloc,bool running_on_memory_tool)1875 void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc, bool running_on_memory_tool) {
1876   DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
1877   const size_t idx = size_bracket_idx_;
1878   CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
1879   uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
1880   const size_t num_slots = numOfSlots[idx];
1881   size_t bracket_size = IndexToBracketSize(idx);
1882   CHECK_EQ(slot_base + num_slots * bracket_size,
1883            reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize)
1884       << "Mismatch in the end address of the run " << Dump();
1885   // Check that the bulk free list is empty. It's only used during BulkFree().
1886   CHECK(IsBulkFreeListEmpty()) << "The bulk free isn't empty " << Dump();
1887   // Check the thread local runs, the current runs, and the run sets.
1888   if (IsThreadLocal()) {
1889     // If it's a thread local run, then it must be pointed to by an owner thread.
1890     bool owner_found = false;
1891     std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1892     for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
1893       Thread* thread = *it;
1894       for (size_t i = 0; i < kNumThreadLocalSizeBrackets; i++) {
1895         MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
1896         Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
1897         if (thread_local_run == this) {
1898           CHECK(!owner_found)
1899               << "A thread local run has more than one owner thread " << Dump();
1900           CHECK_EQ(i, idx)
1901               << "A mismatching size bracket index in a thread local run " << Dump();
1902           owner_found = true;
1903         }
1904       }
1905     }
1906     CHECK(owner_found) << "A thread local run has no owner thread " << Dump();
1907   } else {
1908     // If it's not thread local, check that the thread local free list is empty.
1909     CHECK(IsThreadLocalFreeListEmpty())
1910         << "A non-thread-local run's thread local free list isn't empty "
1911         << Dump();
1912     // Check if it's a current run for the size bracket.
1913     bool is_current_run = false;
1914     for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1915       MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
1916       Run* current_run = rosalloc->current_runs_[i];
1917       if (idx == i) {
1918         if (this == current_run) {
1919           is_current_run = true;
1920         }
1921       } else {
1922         // If the size bucket index does not match, then it must not
1923         // be a current run.
1924         CHECK_NE(this, current_run)
1925             << "A current run points to a run with a wrong size bracket index " << Dump();
1926       }
1927     }
1928     // If it's neither a thread local or current run, then it must be
1929     // in a run set.
1930     if (!is_current_run) {
1931       MutexLock mu(self, rosalloc->lock_);
1932       auto& non_full_runs = rosalloc->non_full_runs_[idx];
1933       // If it's all free, it must be a free page run rather than a run.
1934       CHECK(!IsAllFree()) << "A free run must be in a free page run set " << Dump();
1935       if (!IsFull()) {
1936         // If it's not full, it must in the non-full run set.
1937         CHECK(non_full_runs.find(this) != non_full_runs.end())
1938             << "A non-full run isn't in the non-full run set " << Dump();
1939       } else {
1940         // If it's full, it must in the full run set (debug build only.)
1941         if (kIsDebugBuild) {
1942           auto& full_runs = rosalloc->full_runs_[idx];
1943           CHECK(full_runs.find(this) != full_runs.end())
1944               << " A full run isn't in the full run set " << Dump();
1945         }
1946       }
1947     }
1948   }
1949   // Check each slot.
1950   size_t memory_tool_modifier = running_on_memory_tool ?
1951       2 * ::art::gc::space::kDefaultMemoryToolRedZoneBytes :
1952       0U;
1953   // TODO: reuse InspectAllSlots().
1954   std::unique_ptr<bool[]> is_free(new bool[num_slots]());  // zero initialized
1955   // Mark the free slots and the remaining ones are allocated.
1956   for (Slot* slot = free_list_.Head(); slot != nullptr; slot = slot->Next()) {
1957     size_t slot_idx = SlotIndex(slot);
1958     DCHECK_LT(slot_idx, num_slots);
1959     is_free[slot_idx] = true;
1960   }
1961   if (IsThreadLocal()) {
1962     for (Slot* slot = thread_local_free_list_.Head(); slot != nullptr; slot = slot->Next()) {
1963       size_t slot_idx = SlotIndex(slot);
1964       DCHECK_LT(slot_idx, num_slots);
1965       is_free[slot_idx] = true;
1966     }
1967   }
1968   for (size_t slot_idx = 0; slot_idx < num_slots; ++slot_idx) {
1969     uint8_t* slot_addr = slot_base + slot_idx * bracket_size;
1970     if (running_on_memory_tool) {
1971       slot_addr += ::art::gc::space::kDefaultMemoryToolRedZoneBytes;
1972     }
1973     if (!is_free[slot_idx]) {
1974       // The slot is allocated
1975       mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
1976       size_t obj_size = obj->SizeOf();
1977       CHECK_LE(obj_size + memory_tool_modifier, kLargeSizeThreshold)
1978           << "A run slot contains a large object " << Dump();
1979       CHECK_EQ(SizeToIndex(obj_size + memory_tool_modifier), idx)
1980           << PrettyTypeOf(obj) << " "
1981           << "obj_size=" << obj_size << "(" << obj_size + memory_tool_modifier << "), idx=" << idx
1982           << " A run slot contains an object with wrong size " << Dump();
1983     }
1984   }
1985 }
1986 
ReleasePages()1987 size_t RosAlloc::ReleasePages() {
1988   VLOG(heap) << "RosAlloc::ReleasePages()";
1989   DCHECK(!DoesReleaseAllPages());
1990   Thread* self = Thread::Current();
1991   size_t reclaimed_bytes = 0;
1992   size_t i = 0;
1993   // Check the page map size which might have changed due to grow/shrink.
1994   while (i < page_map_size_) {
1995     // Reading the page map without a lock is racy but the race is benign since it should only
1996     // result in occasionally not releasing pages which we could release.
1997     uint8_t pm = page_map_[i];
1998     switch (pm) {
1999       case kPageMapReleased:
2000         // Fall through.
2001       case kPageMapEmpty: {
2002         // This is currently the start of a free page run.
2003         // Acquire the lock to prevent other threads racing in and modifying the page map.
2004         MutexLock mu(self, lock_);
2005         // Check that it's still empty after we acquired the lock since another thread could have
2006         // raced in and placed an allocation here.
2007         if (IsFreePage(i)) {
2008           // Free page runs can start with a released page if we coalesced a released page free
2009           // page run with an empty page run.
2010           FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
2011           // There is a race condition where FreePage can coalesce fpr with the previous
2012           // free page run before we acquire lock_. In that case free_page_runs_.find will not find
2013           // a run starting at fpr. To handle this race, we skip reclaiming the page range and go
2014           // to the next page.
2015           if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
2016             size_t fpr_size = fpr->ByteSize(this);
2017             DCHECK_ALIGNED(fpr_size, kPageSize);
2018             uint8_t* start = reinterpret_cast<uint8_t*>(fpr);
2019             reclaimed_bytes += ReleasePageRange(start, start + fpr_size);
2020             size_t pages = fpr_size / kPageSize;
2021             CHECK_GT(pages, 0U) << "Infinite loop probable";
2022             i += pages;
2023             DCHECK_LE(i, page_map_size_);
2024             break;
2025           }
2026         }
2027         FALLTHROUGH_INTENDED;
2028       }
2029       case kPageMapLargeObject:      // Fall through.
2030       case kPageMapLargeObjectPart:  // Fall through.
2031       case kPageMapRun:              // Fall through.
2032       case kPageMapRunPart:          // Fall through.
2033         ++i;
2034         break;  // Skip.
2035       default:
2036         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
2037         break;
2038     }
2039   }
2040   return reclaimed_bytes;
2041 }
2042 
ReleasePageRange(uint8_t * start,uint8_t * end)2043 size_t RosAlloc::ReleasePageRange(uint8_t* start, uint8_t* end) {
2044   DCHECK_ALIGNED(start, kPageSize);
2045   DCHECK_ALIGNED(end, kPageSize);
2046   DCHECK_LT(start, end);
2047   if (kIsDebugBuild) {
2048     // In the debug build, the first page of a free page run
2049     // contains a magic number for debugging. Exclude it.
2050     start += kPageSize;
2051 
2052     // Single pages won't be released.
2053     if (start == end) {
2054       return 0;
2055     }
2056   }
2057   if (!kMadviseZeroes) {
2058     // TODO: Do this when we resurrect the page instead.
2059     memset(start, 0, end - start);
2060   }
2061   CHECK_EQ(madvise(start, end - start, MADV_DONTNEED), 0);
2062   size_t pm_idx = ToPageMapIndex(start);
2063   size_t reclaimed_bytes = 0;
2064   // Calculate reclaimed bytes and upate page map.
2065   const size_t max_idx = pm_idx + (end - start) / kPageSize;
2066   for (; pm_idx < max_idx; ++pm_idx) {
2067     DCHECK(IsFreePage(pm_idx));
2068     if (page_map_[pm_idx] == kPageMapEmpty) {
2069       // Mark the page as released and update how many bytes we released.
2070       reclaimed_bytes += kPageSize;
2071       page_map_[pm_idx] = kPageMapReleased;
2072     }
2073   }
2074   return reclaimed_bytes;
2075 }
2076 
LogFragmentationAllocFailure(std::ostream & os,size_t failed_alloc_bytes)2077 void RosAlloc::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) {
2078   Thread* self = Thread::Current();
2079   size_t largest_continuous_free_pages = 0;
2080   WriterMutexLock wmu(self, bulk_free_lock_);
2081   MutexLock mu(self, lock_);
2082   for (FreePageRun* fpr : free_page_runs_) {
2083     largest_continuous_free_pages = std::max(largest_continuous_free_pages,
2084                                              fpr->ByteSize(this));
2085   }
2086   if (failed_alloc_bytes > kLargeSizeThreshold) {
2087     // Large allocation.
2088     size_t required_bytes = RoundUp(failed_alloc_bytes, kPageSize);
2089     if (required_bytes > largest_continuous_free_pages) {
2090       os << "; failed due to fragmentation (required continguous free "
2091          << required_bytes << " bytes where largest contiguous free "
2092          <<  largest_continuous_free_pages << " bytes)";
2093     }
2094   } else {
2095     // Non-large allocation.
2096     size_t required_bytes = numOfPages[SizeToIndex(failed_alloc_bytes)] * kPageSize;
2097     if (required_bytes > largest_continuous_free_pages) {
2098       os << "; failed due to fragmentation (required continguous free "
2099          << required_bytes << " bytes for a new buffer where largest contiguous free "
2100          <<  largest_continuous_free_pages << " bytes)";
2101     }
2102   }
2103 }
2104 
DumpStats(std::ostream & os)2105 void RosAlloc::DumpStats(std::ostream& os) {
2106   Thread* self = Thread::Current();
2107   CHECK(Locks::mutator_lock_->IsExclusiveHeld(self))
2108       << "The mutator locks isn't exclusively locked at " << __PRETTY_FUNCTION__;
2109   size_t num_large_objects = 0;
2110   size_t num_pages_large_objects = 0;
2111   // These arrays are zero initialized.
2112   std::unique_ptr<size_t[]> num_runs(new size_t[kNumOfSizeBrackets]());
2113   std::unique_ptr<size_t[]> num_pages_runs(new size_t[kNumOfSizeBrackets]());
2114   std::unique_ptr<size_t[]> num_slots(new size_t[kNumOfSizeBrackets]());
2115   std::unique_ptr<size_t[]> num_used_slots(new size_t[kNumOfSizeBrackets]());
2116   std::unique_ptr<size_t[]> num_metadata_bytes(new size_t[kNumOfSizeBrackets]());
2117   ReaderMutexLock rmu(self, bulk_free_lock_);
2118   MutexLock lock_mu(self, lock_);
2119   for (size_t i = 0; i < page_map_size_; ) {
2120     uint8_t pm = page_map_[i];
2121     switch (pm) {
2122       case kPageMapReleased:
2123       case kPageMapEmpty:
2124         ++i;
2125         break;
2126       case kPageMapLargeObject: {
2127         size_t num_pages = 1;
2128         size_t idx = i + 1;
2129         while (idx < page_map_size_ && page_map_[idx] == kPageMapLargeObjectPart) {
2130           num_pages++;
2131           idx++;
2132         }
2133         num_large_objects++;
2134         num_pages_large_objects += num_pages;
2135         i += num_pages;
2136         break;
2137       }
2138       case kPageMapLargeObjectPart:
2139         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl
2140                    << DumpPageMap();
2141         break;
2142       case kPageMapRun: {
2143         Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
2144         size_t idx = run->size_bracket_idx_;
2145         size_t num_pages = numOfPages[idx];
2146         num_runs[idx]++;
2147         num_pages_runs[idx] += num_pages;
2148         num_slots[idx] += numOfSlots[idx];
2149         size_t num_free_slots = run->NumberOfFreeSlots();
2150         num_used_slots[idx] += numOfSlots[idx] - num_free_slots;
2151         num_metadata_bytes[idx] += headerSizes[idx];
2152         i += num_pages;
2153         break;
2154       }
2155       case kPageMapRunPart:
2156         // Fall-through.
2157       default:
2158         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl
2159                    << DumpPageMap();
2160         break;
2161     }
2162   }
2163   os << "RosAlloc stats:\n";
2164   for (size_t i = 0; i < kNumOfSizeBrackets; ++i) {
2165     os << "Bracket " << i << " (" << bracketSizes[i] << "):"
2166        << " #runs=" << num_runs[i]
2167        << " #pages=" << num_pages_runs[i]
2168        << " (" << PrettySize(num_pages_runs[i] * kPageSize) << ")"
2169        << " #metadata_bytes=" << PrettySize(num_metadata_bytes[i])
2170        << " #slots=" << num_slots[i] << " (" << PrettySize(num_slots[i] * bracketSizes[i]) << ")"
2171        << " #used_slots=" << num_used_slots[i]
2172        << " (" << PrettySize(num_used_slots[i] * bracketSizes[i]) << ")\n";
2173   }
2174   os << "Large #allocations=" << num_large_objects
2175      << " #pages=" << num_pages_large_objects
2176      << " (" << PrettySize(num_pages_large_objects * kPageSize) << ")\n";
2177   size_t total_num_pages = 0;
2178   size_t total_metadata_bytes = 0;
2179   size_t total_allocated_bytes = 0;
2180   for (size_t i = 0; i < kNumOfSizeBrackets; ++i) {
2181     total_num_pages += num_pages_runs[i];
2182     total_metadata_bytes += num_metadata_bytes[i];
2183     total_allocated_bytes += num_used_slots[i] * bracketSizes[i];
2184   }
2185   total_num_pages += num_pages_large_objects;
2186   total_allocated_bytes += num_pages_large_objects * kPageSize;
2187   os << "Total #total_bytes=" << PrettySize(total_num_pages * kPageSize)
2188      << " #metadata_bytes=" << PrettySize(total_metadata_bytes)
2189      << " #used_bytes=" << PrettySize(total_allocated_bytes) << "\n";
2190   os << "\n";
2191 }
2192 
2193 }  // namespace allocator
2194 }  // namespace gc
2195 }  // namespace art
2196