• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "linker_allocator.h"
18 #include "linker.h"
19 
20 #include <algorithm>
21 #include <vector>
22 
23 #include <stdlib.h>
24 #include <sys/mman.h>
25 #include <unistd.h>
26 
27 #include "private/bionic_prctl.h"
28 
29 //
30 // LinkerMemeoryAllocator is general purpose allocator
31 // designed to provide the same functionality as the malloc/free/realloc
32 // libc functions.
33 //
34 // On alloc:
35 // If size is >= 1k allocator proxies malloc call directly to mmap
36 // If size < 1k allocator uses SmallObjectAllocator for the size
37 // rounded up to the nearest power of two.
38 //
39 // On free:
40 //
41 // For a pointer allocated using proxy-to-mmap allocator unmaps
42 // the memory.
43 //
44 // For a pointer allocated using SmallObjectAllocator it adds
45 // the block to free_blocks_list_. If the number of free pages reaches 2,
46 // SmallObjectAllocator munmaps one of the pages keeping the other one
47 // in reserve.
48 
49 static const char kSignature[4] = {'L', 'M', 'A', 1};
50 
51 static const size_t kSmallObjectMaxSize = 1 << kSmallObjectMaxSizeLog2;
52 
53 // This type is used for large allocations (with size >1k)
54 static const uint32_t kLargeObject = 111;
55 
operator <(const small_object_page_record & one,const small_object_page_record & two)56 bool operator<(const small_object_page_record& one, const small_object_page_record& two) {
57   return one.page_addr < two.page_addr;
58 }
59 
log2(size_t number)60 static inline uint16_t log2(size_t number) {
61   uint16_t result = 0;
62   number--;
63 
64   while (number != 0) {
65     result++;
66     number >>= 1;
67   }
68 
69   return result;
70 }
71 
LinkerSmallObjectAllocator()72 LinkerSmallObjectAllocator::LinkerSmallObjectAllocator()
73     : type_(0), name_(nullptr), block_size_(0), free_pages_cnt_(0), free_blocks_list_(nullptr) {}
74 
alloc()75 void* LinkerSmallObjectAllocator::alloc() {
76   if (free_blocks_list_ == nullptr) {
77     alloc_page();
78   }
79 
80   small_object_block_record* block_record = free_blocks_list_;
81   if (block_record->free_blocks_cnt > 1) {
82     small_object_block_record* next_free = reinterpret_cast<small_object_block_record*>(
83         reinterpret_cast<uint8_t*>(block_record) + block_size_);
84     next_free->next = block_record->next;
85     next_free->free_blocks_cnt = block_record->free_blocks_cnt - 1;
86     free_blocks_list_ = next_free;
87   } else {
88     free_blocks_list_ = block_record->next;
89   }
90 
91   // bookkeeping...
92   auto page_record = find_page_record(block_record);
93 
94   if (page_record->allocated_blocks_cnt == 0) {
95     free_pages_cnt_--;
96   }
97 
98   page_record->free_blocks_cnt--;
99   page_record->allocated_blocks_cnt++;
100 
101   memset(block_record, 0, block_size_);
102 
103   return block_record;
104 }
105 
free_page(linker_vector_t::iterator page_record)106 void LinkerSmallObjectAllocator::free_page(linker_vector_t::iterator page_record) {
107   void* page_start = reinterpret_cast<void*>(page_record->page_addr);
108   void* page_end = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(page_start) + PAGE_SIZE);
109 
110   while (free_blocks_list_ != nullptr &&
111       free_blocks_list_ > page_start &&
112       free_blocks_list_ < page_end) {
113     free_blocks_list_ = free_blocks_list_->next;
114   }
115 
116   small_object_block_record* current = free_blocks_list_;
117 
118   while (current != nullptr) {
119     while (current->next > page_start && current->next < page_end) {
120       current->next = current->next->next;
121     }
122 
123     current = current->next;
124   }
125 
126   munmap(page_start, PAGE_SIZE);
127   page_records_.erase(page_record);
128   free_pages_cnt_--;
129 }
130 
free(void * ptr)131 void LinkerSmallObjectAllocator::free(void* ptr) {
132   auto page_record = find_page_record(ptr);
133 
134   ssize_t offset = reinterpret_cast<uintptr_t>(ptr) - sizeof(page_info);
135 
136   if (offset % block_size_ != 0) {
137     __libc_fatal("invalid pointer: %p (block_size=%zd)", ptr, block_size_);
138   }
139 
140   memset(ptr, 0, block_size_);
141   small_object_block_record* block_record = reinterpret_cast<small_object_block_record*>(ptr);
142 
143   block_record->next = free_blocks_list_;
144   block_record->free_blocks_cnt = 1;
145 
146   free_blocks_list_ = block_record;
147 
148   page_record->free_blocks_cnt++;
149   page_record->allocated_blocks_cnt--;
150 
151   if (page_record->allocated_blocks_cnt == 0) {
152     if (free_pages_cnt_++ > 1) {
153       // if we already have a free page - unmap this one.
154       free_page(page_record);
155     }
156   }
157 }
158 
init(uint32_t type,size_t block_size,const char * name)159 void LinkerSmallObjectAllocator::init(uint32_t type, size_t block_size, const char* name) {
160   type_ = type;
161   block_size_ = block_size;
162   name_ = name;
163 }
164 
find_page_record(void * ptr)165 linker_vector_t::iterator LinkerSmallObjectAllocator::find_page_record(void* ptr) {
166   void* addr = reinterpret_cast<void*>(PAGE_START(reinterpret_cast<uintptr_t>(ptr)));
167   small_object_page_record boundary;
168   boundary.page_addr = addr;
169   linker_vector_t::iterator it = std::lower_bound(
170       page_records_.begin(), page_records_.end(), boundary);
171 
172   if (it == page_records_.end() || it->page_addr != addr) {
173     // not found...
174     __libc_fatal("page record for %p was not found (block_size=%zd)", ptr, block_size_);
175   }
176 
177   return it;
178 }
179 
create_page_record(void * page_addr,size_t free_blocks_cnt)180 void LinkerSmallObjectAllocator::create_page_record(void* page_addr, size_t free_blocks_cnt) {
181   small_object_page_record record;
182   record.page_addr = page_addr;
183   record.free_blocks_cnt = free_blocks_cnt;
184   record.allocated_blocks_cnt = 0;
185 
186   linker_vector_t::iterator it = std::lower_bound(
187       page_records_.begin(), page_records_.end(), record);
188   page_records_.insert(it, record);
189 }
190 
alloc_page()191 void LinkerSmallObjectAllocator::alloc_page() {
192   void* map_ptr = mmap(nullptr, PAGE_SIZE,
193       PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
194   if (map_ptr == MAP_FAILED) {
195     __libc_fatal("mmap failed");
196   }
197 
198   prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, PAGE_SIZE, name_);
199 
200   memset(map_ptr, 0, PAGE_SIZE);
201 
202   page_info* info = reinterpret_cast<page_info*>(map_ptr);
203   memcpy(info->signature, kSignature, sizeof(kSignature));
204   info->type = type_;
205   info->allocator_addr = this;
206 
207   size_t free_blocks_cnt = (PAGE_SIZE - sizeof(page_info))/block_size_;
208 
209   create_page_record(map_ptr, free_blocks_cnt);
210 
211   small_object_block_record* first_block = reinterpret_cast<small_object_block_record*>(info + 1);
212 
213   first_block->next = free_blocks_list_;
214   first_block->free_blocks_cnt = free_blocks_cnt;
215 
216   free_blocks_list_ = first_block;
217 }
218 
219 
LinkerMemoryAllocator()220 LinkerMemoryAllocator::LinkerMemoryAllocator() {
221   static const char* allocator_names[kSmallObjectAllocatorsCount] = {
222     "linker_alloc_16", // 2^4
223     "linker_alloc_32", // 2^5
224     "linker_alloc_64", // and so on...
225     "linker_alloc_128",
226     "linker_alloc_256",
227     "linker_alloc_512",
228     "linker_alloc_1024", // 2^10
229   };
230 
231   for (size_t i = 0; i < kSmallObjectAllocatorsCount; ++i) {
232     uint32_t type = i + kSmallObjectMinSizeLog2;
233     allocators_[i].init(type, 1 << type, allocator_names[i]);
234   }
235 }
236 
alloc_mmap(size_t size)237 void* LinkerMemoryAllocator::alloc_mmap(size_t size) {
238   size_t allocated_size = PAGE_END(size + sizeof(page_info));
239   void* map_ptr = mmap(nullptr, allocated_size,
240       PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
241 
242   if (map_ptr == MAP_FAILED) {
243     __libc_fatal("mmap failed");
244   }
245 
246   prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, allocated_size, "linker_alloc_lob");
247 
248   memset(map_ptr, 0, allocated_size);
249 
250   page_info* info = reinterpret_cast<page_info*>(map_ptr);
251   memcpy(info->signature, kSignature, sizeof(kSignature));
252   info->type = kLargeObject;
253   info->allocated_size = allocated_size;
254 
255   return info + 1;
256 }
257 
alloc(size_t size)258 void* LinkerMemoryAllocator::alloc(size_t size) {
259   // treat alloc(0) as alloc(1)
260   if (size == 0) {
261     size = 1;
262   }
263 
264   if (size > kSmallObjectMaxSize) {
265     return alloc_mmap(size);
266   }
267 
268   uint16_t log2_size = log2(size);
269 
270   if (log2_size < kSmallObjectMinSizeLog2) {
271     log2_size = kSmallObjectMinSizeLog2;
272   }
273 
274   return get_small_object_allocator(log2_size)->alloc();
275 }
276 
get_page_info(void * ptr)277 page_info* LinkerMemoryAllocator::get_page_info(void* ptr) {
278   page_info* info = reinterpret_cast<page_info*>(PAGE_START(reinterpret_cast<size_t>(ptr)));
279   if (memcmp(info->signature, kSignature, sizeof(kSignature)) != 0) {
280     __libc_fatal("invalid pointer %p (page signature mismatch)", ptr);
281   }
282 
283   return info;
284 }
285 
realloc(void * ptr,size_t size)286 void* LinkerMemoryAllocator::realloc(void* ptr, size_t size) {
287   if (ptr == nullptr) {
288     return alloc(size);
289   }
290 
291   if (size == 0) {
292     free(ptr);
293     return nullptr;
294   }
295 
296   page_info* info = get_page_info(ptr);
297 
298   size_t old_size = 0;
299 
300   if (info->type == kLargeObject) {
301     old_size = info->allocated_size - sizeof(page_info);
302   } else {
303     LinkerSmallObjectAllocator* allocator = get_small_object_allocator(info->type);
304     if (allocator != info->allocator_addr) {
305       __libc_fatal("invalid pointer %p (page signature mismatch)", ptr);
306     }
307 
308     old_size = allocator->get_block_size();
309   }
310 
311   if (old_size < size) {
312     void *result = alloc(size);
313     memcpy(result, ptr, old_size);
314     free(ptr);
315     return result;
316   }
317 
318   return ptr;
319 }
320 
free(void * ptr)321 void LinkerMemoryAllocator::free(void* ptr) {
322   if (ptr == nullptr) {
323     return;
324   }
325 
326   page_info* info = get_page_info(ptr);
327 
328   if (info->type == kLargeObject) {
329     munmap(info, info->allocated_size);
330   } else {
331     LinkerSmallObjectAllocator* allocator = get_small_object_allocator(info->type);
332     if (allocator != info->allocator_addr) {
333       __libc_fatal("invalid pointer %p (invalid allocator address for the page)", ptr);
334     }
335 
336     allocator->free(ptr);
337   }
338 }
339 
get_small_object_allocator(uint32_t type)340 LinkerSmallObjectAllocator* LinkerMemoryAllocator::get_small_object_allocator(uint32_t type) {
341   if (type < kSmallObjectMinSizeLog2 || type > kSmallObjectMaxSizeLog2) {
342     __libc_fatal("invalid type: %u", type);
343   }
344 
345   return &allocators_[type - kSmallObjectMinSizeLog2];
346 }
347