• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 // Header page:
18 //
19 // For minimum allocation size (8 bytes), bitmap can store used allocations for
20 // up to 4032*8*8=258048, which is 256KiB minus the header page
21 
22 #include <assert.h>
23 #include <stdlib.h>
24 
25 #include <sys/cdefs.h>
26 #include <sys/mman.h>
27 #include <sys/prctl.h>
28 
29 #include <cmath>
30 #include <cstddef>
31 #include <cstdint>
32 #include <memory>
33 #include <mutex>
34 
35 #include "android-base/macros.h"
36 
37 #include "Allocator.h"
38 #include "LinkedList.h"
39 
40 namespace android {
41 
42 // runtime interfaces used:
43 // abort
44 // assert - fprintf + mmap
45 // mmap
46 // munmap
47 // prctl
48 
const_log2(size_t n,size_t p=0)49 constexpr size_t const_log2(size_t n, size_t p = 0) {
50   return (n <= 1) ? p : const_log2(n / 2, p + 1);
51 }
52 
div_round_up(unsigned int x,unsigned int y)53 constexpr unsigned int div_round_up(unsigned int x, unsigned int y) {
54   return (x + y - 1) / y;
55 }
56 
57 static constexpr size_t kPageSize = 4096;
58 static constexpr size_t kChunkSize = 256 * 1024;
59 static constexpr size_t kUsableChunkSize = kChunkSize - kPageSize;
60 static constexpr size_t kMaxBucketAllocationSize = kChunkSize / 4;
61 static constexpr size_t kMinBucketAllocationSize = 8;
62 static constexpr unsigned int kNumBuckets =
63     const_log2(kMaxBucketAllocationSize) - const_log2(kMinBucketAllocationSize) + 1;
64 static constexpr unsigned int kUsablePagesPerChunk = kUsableChunkSize / kPageSize;
65 
66 std::atomic<int> heap_count;
67 
68 class Chunk;
69 
70 class HeapImpl {
71  public:
72   HeapImpl();
73   ~HeapImpl();
74   void* operator new(std::size_t count) noexcept;
75   void operator delete(void* ptr);
76 
77   void* Alloc(size_t size);
78   void Free(void* ptr);
79   bool Empty();
80 
81   void MoveToFullList(Chunk* chunk, int bucket_);
82   void MoveToFreeList(Chunk* chunk, int bucket_);
83 
84  private:
85   DISALLOW_COPY_AND_ASSIGN(HeapImpl);
86 
87   LinkedList<Chunk*> free_chunks_[kNumBuckets];
88   LinkedList<Chunk*> full_chunks_[kNumBuckets];
89 
90   void MoveToList(Chunk* chunk, LinkedList<Chunk*>* head);
91   void* MapAlloc(size_t size);
92   void MapFree(void* ptr);
93   void* AllocLocked(size_t size);
94   void FreeLocked(void* ptr);
95 
96   struct MapAllocation {
97     void* ptr;
98     size_t size;
99     MapAllocation* next;
100   };
101   MapAllocation* map_allocation_list_;
102   std::mutex m_;
103 };
104 
105 // Integer log 2, rounds down
log2(size_t n)106 static inline unsigned int log2(size_t n) {
107   return 8 * sizeof(unsigned long long) - __builtin_clzll(n) - 1;
108 }
109 
size_to_bucket(size_t size)110 static inline unsigned int size_to_bucket(size_t size) {
111   if (size < kMinBucketAllocationSize) return kMinBucketAllocationSize;
112   return log2(size - 1) + 1 - const_log2(kMinBucketAllocationSize);
113 }
114 
bucket_to_size(unsigned int bucket)115 static inline size_t bucket_to_size(unsigned int bucket) {
116   return kMinBucketAllocationSize << bucket;
117 }
118 
MapAligned(size_t size,size_t align)119 static void* MapAligned(size_t size, size_t align) {
120   const int prot = PROT_READ | PROT_WRITE;
121   const int flags = MAP_ANONYMOUS | MAP_PRIVATE;
122 
123   size = (size + kPageSize - 1) & ~(kPageSize - 1);
124 
125   // Over-allocate enough to align
126   size_t map_size = size + align - kPageSize;
127   if (map_size < size) {
128     return nullptr;
129   }
130 
131   void* ptr = mmap(NULL, map_size, prot, flags, -1, 0);
132   if (ptr == MAP_FAILED) {
133     return nullptr;
134   }
135 
136   size_t aligned_size = map_size;
137   void* aligned_ptr = ptr;
138 
139   std::align(align, size, aligned_ptr, aligned_size);
140 
141   // Trim beginning
142   if (aligned_ptr != ptr) {
143     ptrdiff_t extra = reinterpret_cast<uintptr_t>(aligned_ptr) - reinterpret_cast<uintptr_t>(ptr);
144     munmap(ptr, extra);
145     map_size -= extra;
146     ptr = aligned_ptr;
147   }
148 
149   // Trim end
150   if (map_size != size) {
151     assert(map_size > size);
152     assert(ptr != NULL);
153     munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) + size), map_size - size);
154   }
155 
156 #if defined(PR_SET_VMA)
157   prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, reinterpret_cast<uintptr_t>(ptr), size,
158         "leak_detector_malloc");
159 #endif
160 
161   return ptr;
162 }
163 
164 class Chunk {
165  public:
166   static void* operator new(std::size_t count) noexcept;
167   static void operator delete(void* ptr);
168   Chunk(HeapImpl* heap, int bucket);
~Chunk()169   ~Chunk() {}
170 
171   void* Alloc();
172   void Free(void* ptr);
173   void Purge();
174   bool Empty();
175 
ptr_to_chunk(void * ptr)176   static Chunk* ptr_to_chunk(void* ptr) {
177     return reinterpret_cast<Chunk*>(reinterpret_cast<uintptr_t>(ptr) & ~(kChunkSize - 1));
178   }
is_chunk(void * ptr)179   static bool is_chunk(void* ptr) {
180     return (reinterpret_cast<uintptr_t>(ptr) & (kChunkSize - 1)) != 0;
181   }
182 
free_count()183   unsigned int free_count() { return free_count_; }
heap()184   HeapImpl* heap() { return heap_; }
185   LinkedList<Chunk*> node_;  // linked list sorted by minimum free count
186 
187  private:
188   DISALLOW_COPY_AND_ASSIGN(Chunk);
189   HeapImpl* heap_;
190   unsigned int bucket_;
191   unsigned int allocation_size_;    // size of allocations in chunk, min 8 bytes
192   unsigned int max_allocations_;    // maximum number of allocations in the chunk
193   unsigned int first_free_bitmap_;  // index into bitmap for first non-full entry
194   unsigned int free_count_;         // number of available allocations
195   unsigned int frees_since_purge_;  // number of calls to Free since last Purge
196 
197   // bitmap of pages that have been dirtied
198   uint32_t dirty_pages_[div_round_up(kUsablePagesPerChunk, 32)];
199 
200   // bitmap of free allocations.
201   uint32_t free_bitmap_[kUsableChunkSize / kMinBucketAllocationSize / 32];
202 
203   char data_[0];
204 
ptr_to_n(void * ptr)205   unsigned int ptr_to_n(void* ptr) {
206     ptrdiff_t offset = reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(data_);
207     return offset / allocation_size_;
208   }
n_to_ptr(unsigned int n)209   void* n_to_ptr(unsigned int n) { return data_ + n * allocation_size_; }
210 };
211 static_assert(sizeof(Chunk) <= kPageSize, "header must fit in page");
212 
213 // Override new operator on chunk to use mmap to allocate kChunkSize
operator new(std::size_t count)214 void* Chunk::operator new(std::size_t count __attribute__((unused))) noexcept {
215   assert(count == sizeof(Chunk));
216   void* mem = MapAligned(kChunkSize, kChunkSize);
217   if (!mem) {
218     abort();  // throw std::bad_alloc;
219   }
220 
221   return mem;
222 }
223 
224 // Override new operator on chunk to use mmap to allocate kChunkSize
operator delete(void * ptr)225 void Chunk::operator delete(void* ptr) {
226   assert(reinterpret_cast<Chunk*>(ptr) == ptr_to_chunk(ptr));
227   munmap(ptr, kChunkSize);
228 }
229 
Chunk(HeapImpl * heap,int bucket)230 Chunk::Chunk(HeapImpl* heap, int bucket)
231     : node_(this),
232       heap_(heap),
233       bucket_(bucket),
234       allocation_size_(bucket_to_size(bucket)),
235       max_allocations_(kUsableChunkSize / allocation_size_),
236       first_free_bitmap_(0),
237       free_count_(max_allocations_),
238       frees_since_purge_(0) {
239   memset(dirty_pages_, 0, sizeof(dirty_pages_));
240   memset(free_bitmap_, 0xff, sizeof(free_bitmap_));
241 }
242 
Empty()243 bool Chunk::Empty() {
244   return free_count_ == max_allocations_;
245 }
246 
Alloc()247 void* Chunk::Alloc() {
248   assert(free_count_ > 0);
249 
250   unsigned int i = first_free_bitmap_;
251   while (free_bitmap_[i] == 0) i++;
252   assert(i < arraysize(free_bitmap_));
253   unsigned int bit = __builtin_ffs(free_bitmap_[i]) - 1;
254   assert(free_bitmap_[i] & (1U << bit));
255   free_bitmap_[i] &= ~(1U << bit);
256   unsigned int n = i * 32 + bit;
257   assert(n < max_allocations_);
258 
259   unsigned int page = n * allocation_size_ / kPageSize;
260   assert(page / 32 < arraysize(dirty_pages_));
261   dirty_pages_[page / 32] |= 1U << (page % 32);
262 
263   free_count_--;
264   if (free_count_ == 0) {
265     heap_->MoveToFullList(this, bucket_);
266   }
267 
268   return n_to_ptr(n);
269 }
270 
Free(void * ptr)271 void Chunk::Free(void* ptr) {
272   assert(is_chunk(ptr));
273   assert(ptr_to_chunk(ptr) == this);
274 
275   unsigned int n = ptr_to_n(ptr);
276   unsigned int i = n / 32;
277   unsigned int bit = n % 32;
278 
279   assert(i < arraysize(free_bitmap_));
280   assert(!(free_bitmap_[i] & (1U << bit)));
281   free_bitmap_[i] |= 1U << bit;
282   free_count_++;
283 
284   if (i < first_free_bitmap_) {
285     first_free_bitmap_ = i;
286   }
287 
288   if (free_count_ == 1) {
289     heap_->MoveToFreeList(this, bucket_);
290   } else {
291     // TODO(ccross): move down free list if necessary
292   }
293 
294   if (frees_since_purge_++ * allocation_size_ > 16 * kPageSize) {
295     Purge();
296   }
297 }
298 
Purge()299 void Chunk::Purge() {
300   frees_since_purge_ = 0;
301 
302   // unsigned int allocsPerPage = kPageSize / allocation_size_;
303 }
304 
305 // Override new operator on HeapImpl to use mmap to allocate a page
operator new(std::size_t count)306 void* HeapImpl::operator new(std::size_t count __attribute__((unused))) noexcept {
307   assert(count == sizeof(HeapImpl));
308   void* mem = MapAligned(kPageSize, kPageSize);
309   if (!mem) {
310     abort();  // throw std::bad_alloc;
311   }
312 
313   heap_count++;
314   return mem;
315 }
316 
operator delete(void * ptr)317 void HeapImpl::operator delete(void* ptr) {
318   munmap(ptr, kPageSize);
319 }
320 
HeapImpl()321 HeapImpl::HeapImpl() : free_chunks_(), full_chunks_(), map_allocation_list_(NULL) {}
322 
Empty()323 bool HeapImpl::Empty() {
324   for (unsigned int i = 0; i < kNumBuckets; i++) {
325     for (LinkedList<Chunk*>* it = free_chunks_[i].next(); it->data() != NULL; it = it->next()) {
326       if (!it->data()->Empty()) {
327         return false;
328       }
329     }
330     for (LinkedList<Chunk*>* it = full_chunks_[i].next(); it->data() != NULL; it = it->next()) {
331       if (!it->data()->Empty()) {
332         return false;
333       }
334     }
335   }
336 
337   return true;
338 }
339 
~HeapImpl()340 HeapImpl::~HeapImpl() {
341   for (unsigned int i = 0; i < kNumBuckets; i++) {
342     while (!free_chunks_[i].empty()) {
343       Chunk* chunk = free_chunks_[i].next()->data();
344       chunk->node_.remove();
345       delete chunk;
346     }
347     while (!full_chunks_[i].empty()) {
348       Chunk* chunk = full_chunks_[i].next()->data();
349       chunk->node_.remove();
350       delete chunk;
351     }
352   }
353 }
354 
Alloc(size_t size)355 void* HeapImpl::Alloc(size_t size) {
356   std::lock_guard<std::mutex> lk(m_);
357   return AllocLocked(size);
358 }
359 
AllocLocked(size_t size)360 void* HeapImpl::AllocLocked(size_t size) {
361   if (size > kMaxBucketAllocationSize) {
362     return MapAlloc(size);
363   }
364   int bucket = size_to_bucket(size);
365   if (free_chunks_[bucket].empty()) {
366     Chunk* chunk = new Chunk(this, bucket);
367     free_chunks_[bucket].insert(chunk->node_);
368   }
369   return free_chunks_[bucket].next()->data()->Alloc();
370 }
371 
Free(void * ptr)372 void HeapImpl::Free(void* ptr) {
373   std::lock_guard<std::mutex> lk(m_);
374   FreeLocked(ptr);
375 }
376 
FreeLocked(void * ptr)377 void HeapImpl::FreeLocked(void* ptr) {
378   if (!Chunk::is_chunk(ptr)) {
379     HeapImpl::MapFree(ptr);
380   } else {
381     Chunk* chunk = Chunk::ptr_to_chunk(ptr);
382     assert(chunk->heap() == this);
383     chunk->Free(ptr);
384   }
385 }
386 
MapAlloc(size_t size)387 void* HeapImpl::MapAlloc(size_t size) {
388   size = (size + kPageSize - 1) & ~(kPageSize - 1);
389 
390   MapAllocation* allocation = reinterpret_cast<MapAllocation*>(AllocLocked(sizeof(MapAllocation)));
391   void* ptr = MapAligned(size, kChunkSize);
392   if (!ptr) {
393     FreeLocked(allocation);
394     abort();  // throw std::bad_alloc;
395   }
396   allocation->ptr = ptr;
397   allocation->size = size;
398   allocation->next = map_allocation_list_;
399   map_allocation_list_ = allocation;
400 
401   return ptr;
402 }
403 
MapFree(void * ptr)404 void HeapImpl::MapFree(void* ptr) {
405   MapAllocation** allocation = &map_allocation_list_;
406   while (*allocation && (*allocation)->ptr != ptr) allocation = &(*allocation)->next;
407 
408   assert(*allocation != nullptr);
409 
410   munmap((*allocation)->ptr, (*allocation)->size);
411   FreeLocked(*allocation);
412 
413   *allocation = (*allocation)->next;
414 }
415 
MoveToFreeList(Chunk * chunk,int bucket)416 void HeapImpl::MoveToFreeList(Chunk* chunk, int bucket) {
417   MoveToList(chunk, &free_chunks_[bucket]);
418 }
419 
MoveToFullList(Chunk * chunk,int bucket)420 void HeapImpl::MoveToFullList(Chunk* chunk, int bucket) {
421   MoveToList(chunk, &full_chunks_[bucket]);
422 }
423 
MoveToList(Chunk * chunk,LinkedList<Chunk * > * head)424 void HeapImpl::MoveToList(Chunk* chunk, LinkedList<Chunk*>* head) {
425   // Remove from old list
426   chunk->node_.remove();
427 
428   LinkedList<Chunk*>* node = head;
429   // Insert into new list, sorted by lowest free count
430   while (node->next() != head && node->data() != nullptr &&
431          node->data()->free_count() < chunk->free_count())
432     node = node->next();
433 
434   node->insert(chunk->node_);
435 }
436 
Heap()437 Heap::Heap() {
438   // HeapImpl overloads the operator new in order to mmap itself instead of
439   // allocating with new.
440   // Can't use a shared_ptr to store the result because shared_ptr needs to
441   // allocate, and Allocator<T> is still being constructed.
442   impl_ = new HeapImpl();
443   owns_impl_ = true;
444 }
445 
~Heap()446 Heap::~Heap() {
447   if (owns_impl_) {
448     delete impl_;
449   }
450 }
451 
allocate(size_t size)452 void* Heap::allocate(size_t size) {
453   return impl_->Alloc(size);
454 }
455 
deallocate(void * ptr)456 void Heap::deallocate(void* ptr) {
457   impl_->Free(ptr);
458 }
459 
deallocate(HeapImpl * impl,void * ptr)460 void Heap::deallocate(HeapImpl* impl, void* ptr) {
461   impl->Free(ptr);
462 }
463 
empty()464 bool Heap::empty() {
465   return impl_->Empty();
466 }
467 
468 }  // namespace android
469