• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2007 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "MemoryDealer"
18 
19 #include <binder/MemoryDealer.h>
20 #include <binder/IPCThreadState.h>
21 #include <binder/MemoryBase.h>
22 
23 #include <utils/Log.h>
24 #include <utils/SortedVector.h>
25 #include <utils/String8.h>
26 #include <utils/threads.h>
27 
28 #include <stdint.h>
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <fcntl.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <string.h>
35 
36 #include <sys/stat.h>
37 #include <sys/types.h>
38 #include <sys/mman.h>
39 #include <sys/file.h>
40 
41 namespace android {
42 // ----------------------------------------------------------------------------
43 
44 /*
45  * A simple templatized doubly linked-list implementation
46  */
47 
48 template <typename NODE>
49 class LinkedList
50 {
51     NODE*  mFirst;
52     NODE*  mLast;
53 
54 public:
LinkedList()55                 LinkedList() : mFirst(nullptr), mLast(nullptr) { }
isEmpty() const56     bool        isEmpty() const { return mFirst == nullptr; }
head() const57     NODE const* head() const { return mFirst; }
head()58     NODE*       head() { return mFirst; }
tail() const59     NODE const* tail() const { return mLast; }
tail()60     NODE*       tail() { return mLast; }
61 
insertAfter(NODE * node,NODE * newNode)62     void insertAfter(NODE* node, NODE* newNode) {
63         newNode->prev = node;
64         newNode->next = node->next;
65         if (node->next == nullptr) mLast = newNode;
66         else                 node->next->prev = newNode;
67         node->next = newNode;
68     }
69 
insertBefore(NODE * node,NODE * newNode)70     void insertBefore(NODE* node, NODE* newNode) {
71          newNode->prev = node->prev;
72          newNode->next = node;
73          if (node->prev == nullptr)   mFirst = newNode;
74          else                   node->prev->next = newNode;
75          node->prev = newNode;
76     }
77 
insertHead(NODE * newNode)78     void insertHead(NODE* newNode) {
79         if (mFirst == nullptr) {
80             mFirst = mLast = newNode;
81             newNode->prev = newNode->next = nullptr;
82         } else {
83             newNode->prev = nullptr;
84             newNode->next = mFirst;
85             mFirst->prev = newNode;
86             mFirst = newNode;
87         }
88     }
89 
insertTail(NODE * newNode)90     void insertTail(NODE* newNode) {
91         if (mLast == 0) {
92             insertHead(newNode);
93         } else {
94             newNode->prev = mLast;
95             newNode->next = 0;
96             mLast->next = newNode;
97             mLast = newNode;
98         }
99     }
100 
remove(NODE * node)101     NODE* remove(NODE* node) {
102         if (node->prev == nullptr)    mFirst = node->next;
103         else                    node->prev->next = node->next;
104         if (node->next == nullptr)    mLast = node->prev;
105         else                    node->next->prev = node->prev;
106         return node;
107     }
108 };
109 
110 // ----------------------------------------------------------------------------
111 
112 class Allocation : public MemoryBase {
113 public:
114     Allocation(const sp<MemoryDealer>& dealer,
115             const sp<IMemoryHeap>& heap, ssize_t offset, size_t size);
116     virtual ~Allocation();
117 private:
118     sp<MemoryDealer> mDealer;
119 };
120 
121 // ----------------------------------------------------------------------------
122 
123 class SimpleBestFitAllocator
124 {
125     enum {
126         PAGE_ALIGNED = 0x00000001
127     };
128 public:
129     explicit SimpleBestFitAllocator(size_t size);
130     ~SimpleBestFitAllocator();
131 
132     size_t      allocate(size_t size, uint32_t flags = 0);
133     status_t    deallocate(size_t offset);
134     size_t      size() const;
135     void        dump(const char* what) const;
136     void        dump(String8& res, const char* what) const;
137 
getAllocationAlignment()138     static size_t getAllocationAlignment() { return kMemoryAlign; }
139 
140 private:
141 
142     struct chunk_t {
chunk_tandroid::SimpleBestFitAllocator::chunk_t143         chunk_t(size_t start, size_t size)
144         : start(start), size(size), free(1), prev(nullptr), next(nullptr) {
145         }
146         size_t              start;
147         size_t              size : 28;
148         int                 free : 4;
149         mutable chunk_t*    prev;
150         mutable chunk_t*    next;
151     };
152 
153     ssize_t  alloc(size_t size, uint32_t flags);
154     chunk_t* dealloc(size_t start);
155     void     dump_l(const char* what) const;
156     void     dump_l(String8& res, const char* what) const;
157 
158     static const int    kMemoryAlign;
159     mutable Mutex       mLock;
160     LinkedList<chunk_t> mList;
161     size_t              mHeapSize;
162 };
163 
164 // ----------------------------------------------------------------------------
165 
Allocation(const sp<MemoryDealer> & dealer,const sp<IMemoryHeap> & heap,ssize_t offset,size_t size)166 Allocation::Allocation(
167         const sp<MemoryDealer>& dealer,
168         const sp<IMemoryHeap>& heap, ssize_t offset, size_t size)
169     : MemoryBase(heap, offset, size), mDealer(dealer)
170 {
171 #ifndef NDEBUG
172     void* const start_ptr = (void*)(intptr_t(heap->base()) + offset);
173     memset(start_ptr, 0xda, size);
174 #endif
175 }
176 
~Allocation()177 Allocation::~Allocation()
178 {
179     size_t freedOffset = getOffset();
180     size_t freedSize   = getSize();
181     if (freedSize) {
182         /* NOTE: it's VERY important to not free allocations of size 0 because
183          * they're special as they don't have any record in the allocator
184          * and could alias some real allocation (their offset is zero). */
185 
186         // keep the size to unmap in excess
187         size_t pagesize = getpagesize();
188         size_t start = freedOffset;
189         size_t end = start + freedSize;
190         start &= ~(pagesize-1);
191         end = (end + pagesize-1) & ~(pagesize-1);
192 
193         // give back to the kernel the pages we don't need
194         size_t free_start = freedOffset;
195         size_t free_end = free_start + freedSize;
196         if (start < free_start)
197             start = free_start;
198         if (end > free_end)
199             end = free_end;
200         start = (start + pagesize-1) & ~(pagesize-1);
201         end &= ~(pagesize-1);
202 
203         if (start < end) {
204             void* const start_ptr = (void*)(intptr_t(getHeap()->base()) + start);
205             size_t size = end-start;
206 
207 #ifndef NDEBUG
208             memset(start_ptr, 0xdf, size);
209 #endif
210 
211             // MADV_REMOVE is not defined on Dapper based Goobuntu
212 #ifdef MADV_REMOVE
213             if (size) {
214                 int err = madvise(start_ptr, size, MADV_REMOVE);
215                 ALOGW_IF(err, "madvise(%p, %zu, MADV_REMOVE) returned %s",
216                         start_ptr, size, err<0 ? strerror(errno) : "Ok");
217             }
218 #endif
219         }
220 
221         // This should be done after madvise(MADV_REMOVE), otherwise madvise()
222         // might kick out the memory region that's allocated and/or written
223         // right after the deallocation.
224         mDealer->deallocate(freedOffset);
225     }
226 }
227 
228 // ----------------------------------------------------------------------------
229 
MemoryDealer(size_t size,const char * name,uint32_t flags)230 MemoryDealer::MemoryDealer(size_t size, const char* name, uint32_t flags)
231     : mHeap(new MemoryHeapBase(size, flags, name)),
232     mAllocator(new SimpleBestFitAllocator(size))
233 {
234 }
235 
~MemoryDealer()236 MemoryDealer::~MemoryDealer()
237 {
238     delete mAllocator;
239 }
240 
allocate(size_t size)241 sp<IMemory> MemoryDealer::allocate(size_t size)
242 {
243     sp<IMemory> memory;
244     const ssize_t offset = allocator()->allocate(size);
245     if (offset >= 0) {
246         memory = new Allocation(this, heap(), offset, size);
247     }
248     return memory;
249 }
250 
deallocate(size_t offset)251 void MemoryDealer::deallocate(size_t offset)
252 {
253     allocator()->deallocate(offset);
254 }
255 
dump(const char * what) const256 void MemoryDealer::dump(const char* what) const
257 {
258     allocator()->dump(what);
259 }
260 
heap() const261 const sp<IMemoryHeap>& MemoryDealer::heap() const {
262     return mHeap;
263 }
264 
allocator() const265 SimpleBestFitAllocator* MemoryDealer::allocator() const {
266     return mAllocator;
267 }
268 
269 // static
getAllocationAlignment()270 size_t MemoryDealer::getAllocationAlignment()
271 {
272     return SimpleBestFitAllocator::getAllocationAlignment();
273 }
274 
275 // ----------------------------------------------------------------------------
276 
277 // align all the memory blocks on a cache-line boundary
278 const int SimpleBestFitAllocator::kMemoryAlign = 32;
279 
SimpleBestFitAllocator(size_t size)280 SimpleBestFitAllocator::SimpleBestFitAllocator(size_t size)
281 {
282     size_t pagesize = getpagesize();
283     mHeapSize = ((size + pagesize-1) & ~(pagesize-1));
284 
285     chunk_t* node = new chunk_t(0, mHeapSize / kMemoryAlign);
286     mList.insertHead(node);
287 }
288 
~SimpleBestFitAllocator()289 SimpleBestFitAllocator::~SimpleBestFitAllocator()
290 {
291     while(!mList.isEmpty()) {
292         chunk_t* removed = mList.remove(mList.head());
293 #ifdef __clang_analyzer__
294         // Clang static analyzer gets confused in this loop
295         // and generates a false positive warning about accessing
296         // memory that is already freed.
297         // Add an "assert" to avoid the confusion.
298         LOG_ALWAYS_FATAL_IF(mList.head() == removed);
299 #endif
300         delete removed;
301     }
302 }
303 
size() const304 size_t SimpleBestFitAllocator::size() const
305 {
306     return mHeapSize;
307 }
308 
allocate(size_t size,uint32_t flags)309 size_t SimpleBestFitAllocator::allocate(size_t size, uint32_t flags)
310 {
311     Mutex::Autolock _l(mLock);
312     ssize_t offset = alloc(size, flags);
313     return offset;
314 }
315 
deallocate(size_t offset)316 status_t SimpleBestFitAllocator::deallocate(size_t offset)
317 {
318     Mutex::Autolock _l(mLock);
319     chunk_t const * const freed = dealloc(offset);
320     if (freed) {
321         return NO_ERROR;
322     }
323     return NAME_NOT_FOUND;
324 }
325 
alloc(size_t size,uint32_t flags)326 ssize_t SimpleBestFitAllocator::alloc(size_t size, uint32_t flags)
327 {
328     if (size == 0) {
329         return 0;
330     }
331     size = (size + kMemoryAlign-1) / kMemoryAlign;
332     chunk_t* free_chunk = nullptr;
333     chunk_t* cur = mList.head();
334 
335     size_t pagesize = getpagesize();
336     while (cur) {
337         int extra = 0;
338         if (flags & PAGE_ALIGNED)
339             extra = ( -cur->start & ((pagesize/kMemoryAlign)-1) ) ;
340 
341         // best fit
342         if (cur->free && (cur->size >= (size+extra))) {
343             if ((!free_chunk) || (cur->size < free_chunk->size)) {
344                 free_chunk = cur;
345             }
346             if (cur->size == size) {
347                 break;
348             }
349         }
350         cur = cur->next;
351     }
352 
353     if (free_chunk) {
354         const size_t free_size = free_chunk->size;
355         free_chunk->free = 0;
356         free_chunk->size = size;
357         if (free_size > size) {
358             int extra = 0;
359             if (flags & PAGE_ALIGNED)
360                 extra = ( -free_chunk->start & ((pagesize/kMemoryAlign)-1) ) ;
361             if (extra) {
362                 chunk_t* split = new chunk_t(free_chunk->start, extra);
363                 free_chunk->start += extra;
364                 mList.insertBefore(free_chunk, split);
365             }
366 
367             ALOGE_IF((flags&PAGE_ALIGNED) &&
368                     ((free_chunk->start*kMemoryAlign)&(pagesize-1)),
369                     "PAGE_ALIGNED requested, but page is not aligned!!!");
370 
371             const ssize_t tail_free = free_size - (size+extra);
372             if (tail_free > 0) {
373                 chunk_t* split = new chunk_t(
374                         free_chunk->start + free_chunk->size, tail_free);
375                 mList.insertAfter(free_chunk, split);
376             }
377         }
378         return (free_chunk->start)*kMemoryAlign;
379     }
380     return NO_MEMORY;
381 }
382 
dealloc(size_t start)383 SimpleBestFitAllocator::chunk_t* SimpleBestFitAllocator::dealloc(size_t start)
384 {
385     start = start / kMemoryAlign;
386     chunk_t* cur = mList.head();
387     while (cur) {
388         if (cur->start == start) {
389             LOG_FATAL_IF(cur->free,
390                 "block at offset 0x%08lX of size 0x%08lX already freed",
391                 cur->start*kMemoryAlign, cur->size*kMemoryAlign);
392 
393             // merge freed blocks together
394             chunk_t* freed = cur;
395             cur->free = 1;
396             do {
397                 chunk_t* const p = cur->prev;
398                 chunk_t* const n = cur->next;
399                 if (p && (p->free || !cur->size)) {
400                     freed = p;
401                     p->size += cur->size;
402                     mList.remove(cur);
403                     delete cur;
404                 }
405                 cur = n;
406             } while (cur && cur->free);
407 
408             #ifndef NDEBUG
409                 if (!freed->free) {
410                     dump_l("dealloc (!freed->free)");
411                 }
412             #endif
413             LOG_FATAL_IF(!freed->free,
414                 "freed block at offset 0x%08lX of size 0x%08lX is not free!",
415                 freed->start * kMemoryAlign, freed->size * kMemoryAlign);
416 
417             return freed;
418         }
419         cur = cur->next;
420     }
421     return nullptr;
422 }
423 
dump(const char * what) const424 void SimpleBestFitAllocator::dump(const char* what) const
425 {
426     Mutex::Autolock _l(mLock);
427     dump_l(what);
428 }
429 
dump_l(const char * what) const430 void SimpleBestFitAllocator::dump_l(const char* what) const
431 {
432     String8 result;
433     dump_l(result, what);
434     ALOGD("%s", result.string());
435 }
436 
dump(String8 & result,const char * what) const437 void SimpleBestFitAllocator::dump(String8& result,
438         const char* what) const
439 {
440     Mutex::Autolock _l(mLock);
441     dump_l(result, what);
442 }
443 
dump_l(String8 & result,const char * what) const444 void SimpleBestFitAllocator::dump_l(String8& result,
445         const char* what) const
446 {
447     size_t size = 0;
448     int32_t i = 0;
449     chunk_t const* cur = mList.head();
450 
451     const size_t SIZE = 256;
452     char buffer[SIZE];
453     snprintf(buffer, SIZE, "  %s (%p, size=%u)\n",
454             what, this, (unsigned int)mHeapSize);
455 
456     result.append(buffer);
457 
458     while (cur) {
459         const char* errs[] = {"", "| link bogus NP",
460                             "| link bogus PN", "| link bogus NP+PN" };
461         int np = ((cur->next) && cur->next->prev != cur) ? 1 : 0;
462         int pn = ((cur->prev) && cur->prev->next != cur) ? 2 : 0;
463 
464         snprintf(buffer, SIZE, "  %3u: %p | 0x%08X | 0x%08X | %s %s\n",
465             i, cur, int(cur->start*kMemoryAlign),
466             int(cur->size*kMemoryAlign),
467                     int(cur->free) ? "F" : "A",
468                     errs[np|pn]);
469 
470         result.append(buffer);
471 
472         if (!cur->free)
473             size += cur->size*kMemoryAlign;
474 
475         i++;
476         cur = cur->next;
477     }
478     snprintf(buffer, SIZE,
479             "  size allocated: %u (%u KB)\n", int(size), int(size/1024));
480     result.append(buffer);
481 }
482 
483 
484 }; // namespace android
485