• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2007 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "MemoryDealer"
18 
19 #include <hidlcache/MemoryDealer.h>
20 #include <hidlmemory/HidlMemoryToken.h>
21 #include <hidlmemory/mapping.h>
22 
23 #include <list>
24 
25 #include <log/log.h>
26 
27 #include <errno.h>
28 #include <fcntl.h>
29 #include <stdint.h>
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <unistd.h>
34 
35 #include <sys/file.h>
36 #include <sys/mman.h>
37 #include <sys/stat.h>
38 #include <sys/types.h>
39 
40 using std::string;
41 
42 namespace android {
43 namespace hardware {
44 
45 class SimpleBestFitAllocator {
46     enum { PAGE_ALIGNED = 0x00000001 };
47 
48    public:
49     explicit SimpleBestFitAllocator(size_t size);
50     ~SimpleBestFitAllocator();
51 
52     size_t allocate(size_t size, uint32_t flags = 0);
53     status_t deallocate(size_t offset);
54     size_t size() const;
55     void dump(const char* tag) const;
56     void dump(string& res, const char* tag) const;
57 
getAllocationAlignment()58     static size_t getAllocationAlignment() { return kMemoryAlign; }
59 
60    private:
61     struct chunk_t {
chunk_tandroid::hardware::SimpleBestFitAllocator::chunk_t62         chunk_t(size_t start, size_t size) : start(start), size(size), free(1) {}
63         size_t start;
64         size_t size : 28;
65         int free : 4;
66     };
67     using List = std::list<chunk_t*>;
68     using Iterator = std::list<chunk_t*>::iterator;
69     using IteratorConst = std::list<chunk_t*>::const_iterator;
70     using Mutex = std::mutex;
71     using Lock = std::lock_guard<Mutex>;
72 
73     ssize_t alloc(size_t size, uint32_t flags);
74     chunk_t* dealloc(size_t start);
75     void dump_l(const char* tag) const;
76     void dump_l(string& res, const char* tag) const;
77 
78     static const int kMemoryAlign;
79     mutable Mutex mLock;
80     List mList;
81     size_t mHeapSize;
82 };
83 
MemoryDealer(size_t size)84 MemoryDealer::MemoryDealer(size_t size) : mAllocator(new SimpleBestFitAllocator(size)) {}
85 
~MemoryDealer()86 MemoryDealer::~MemoryDealer() {
87     delete mAllocator;
88 }
89 
allocateOffset(size_t size)90 ssize_t MemoryDealer::allocateOffset(size_t size) {
91     return mAllocator->allocate(size);
92 }
93 
deallocate(size_t offset)94 void MemoryDealer::deallocate(size_t offset) {
95     mAllocator->deallocate(offset);
96 }
97 
dump(const char * tag) const98 void MemoryDealer::dump(const char* tag) const {
99     mAllocator->dump(tag);
100 }
101 
getAllocationAlignment()102 size_t MemoryDealer::getAllocationAlignment() {
103     return SimpleBestFitAllocator::getAllocationAlignment();
104 }
105 
106 // align all the memory blocks on a cache-line boundary
107 const int SimpleBestFitAllocator::kMemoryAlign = 32;
108 
SimpleBestFitAllocator(size_t size)109 SimpleBestFitAllocator::SimpleBestFitAllocator(size_t size) {
110     size_t pagesize = getpagesize();
111     mHeapSize = ((size + pagesize - 1) & ~(pagesize - 1));
112 
113     chunk_t* node = new chunk_t(0, mHeapSize / kMemoryAlign);
114     mList.push_front(node);
115 }
116 
~SimpleBestFitAllocator()117 SimpleBestFitAllocator::~SimpleBestFitAllocator() {
118     while (mList.size() != 0) {
119         chunk_t* removed = mList.front();
120         mList.pop_front();
121 #ifdef __clang_analyzer__
122         // Clang static analyzer gets confused in this loop
123         // and generates a false positive warning about accessing
124         // memory that is already freed.
125         // Add an "assert" to avoid the confusion.
126         LOG_ALWAYS_FATAL_IF(mList.front() == removed);
127 #endif
128         delete removed;
129     }
130 }
131 
size() const132 size_t SimpleBestFitAllocator::size() const {
133     return mHeapSize;
134 }
135 
allocate(size_t size,uint32_t flags)136 size_t SimpleBestFitAllocator::allocate(size_t size, uint32_t flags) {
137     Lock lock(mLock);
138     ssize_t offset = alloc(size, flags);
139     return offset;
140 }
141 
deallocate(size_t offset)142 status_t SimpleBestFitAllocator::deallocate(size_t offset) {
143     Lock lock(mLock);
144     chunk_t const* const freed = dealloc(offset);
145     if (freed) {
146         return NO_ERROR;
147     }
148     return NAME_NOT_FOUND;
149 }
150 
alloc(size_t size,uint32_t flags)151 ssize_t SimpleBestFitAllocator::alloc(size_t size, uint32_t flags) {
152     if (size == 0) {
153         return 0;
154     }
155     size = (size + kMemoryAlign - 1) / kMemoryAlign;
156     size_t pagesize = getpagesize();
157 
158     Iterator free_chunk_p = mList.end();
159     for (Iterator p = mList.begin(); p != mList.end(); p++) {
160         chunk_t* cur = *p;
161         int extra = 0;
162         if (flags & PAGE_ALIGNED) extra = (-cur->start & ((pagesize / kMemoryAlign) - 1));
163 
164         // best fit
165         if (cur->free && (cur->size >= (size + extra))) {
166             if ((free_chunk_p == mList.end()) || (cur->size < (*free_chunk_p)->size)) {
167                 free_chunk_p = p;
168             }
169             if (cur->size == size) {
170                 break;
171             }
172         }
173     }
174     if (free_chunk_p != mList.end()) {
175         chunk_t* free_chunk = *free_chunk_p;
176         const size_t free_size = free_chunk->size;
177         free_chunk->free = 0;
178         free_chunk->size = size;
179         if (free_size > size) {
180             int extra = 0;
181             if (flags & PAGE_ALIGNED)
182                 extra = (-free_chunk->start & ((pagesize / kMemoryAlign) - 1));
183             if (extra) {
184                 chunk_t* split = new chunk_t(free_chunk->start, extra);
185                 free_chunk->start += extra;
186                 mList.insert(free_chunk_p, split);
187             }
188 
189             ALOGE_IF(
190                 (flags & PAGE_ALIGNED) && ((free_chunk->start * kMemoryAlign) & (pagesize - 1)),
191                 "PAGE_ALIGNED requested, but page is not aligned!!!");
192 
193             const ssize_t tail_free = free_size - (size + extra);
194             if (tail_free > 0) {
195                 chunk_t* split = new chunk_t(free_chunk->start + free_chunk->size, tail_free);
196                 mList.insert(++free_chunk_p, split);
197             }
198         }
199         return (free_chunk->start) * kMemoryAlign;
200     }
201     return NO_MEMORY;
202 }
203 
dealloc(size_t start)204 SimpleBestFitAllocator::chunk_t* SimpleBestFitAllocator::dealloc(size_t start) {
205     start = start / kMemoryAlign;
206 
207     for (Iterator pos = mList.begin(); pos != mList.end(); pos++) {
208         chunk_t* cur = *pos;
209         if (cur->start == start) {
210             LOG_FATAL_IF(cur->free, "block at offset 0x%08lX of size 0x%08lX already freed",
211                          cur->start * kMemoryAlign, cur->size * kMemoryAlign);
212 
213             // merge freed blocks together
214             chunk_t* freed = cur;
215             cur->free = 1;
216             do {
217                 if (pos != mList.begin()) {
218                     pos--;
219                     chunk_t* const p = *pos;
220                     pos++;
221                     if (p->free || !cur->size) {
222                         freed = p;
223                         p->size += cur->size;
224                         pos = mList.erase(pos);
225                         delete cur;
226                         if (pos == mList.end()) break;
227                     }
228                 }
229                 if (++pos == mList.end()) break;
230                 cur = *pos;
231             } while (cur && cur->free);
232 
233 #ifndef NDEBUG
234             if (!freed->free) {
235                 dump_l("dealloc (!freed->free)");
236             }
237 #endif
238             LOG_FATAL_IF(!freed->free, "freed block at offset 0x%08lX of size 0x%08lX is not free!",
239                          freed->start * kMemoryAlign, freed->size * kMemoryAlign);
240 
241             return freed;
242         }
243     }
244     return nullptr;
245 }
246 
dump(const char * tag) const247 void SimpleBestFitAllocator::dump(const char* tag) const {
248     Lock lock(mLock);
249     dump_l(tag);
250 }
251 
dump_l(const char * tag) const252 void SimpleBestFitAllocator::dump_l(const char* tag) const {
253     string result;
254     dump_l(result, tag);
255     ALOGD("%s", result.c_str());
256 }
257 
dump(string & result,const char * tag) const258 void SimpleBestFitAllocator::dump(string& result, const char* tag) const {
259     Lock lock(mLock);
260     dump_l(result, tag);
261 }
262 
dump_l(string & result,const char * tag) const263 void SimpleBestFitAllocator::dump_l(string& result, const char* tag) const {
264     size_t size = 0;
265     int32_t i = 0;
266     const size_t SIZE = 256;
267     char buffer[SIZE];
268     snprintf(buffer, SIZE, "  %s (%p, size=%u)\n", tag, this, (unsigned int)mHeapSize);
269 
270     result.append(buffer);
271 
272     for (IteratorConst pos = mList.begin(); pos != mList.end(); pos++) {
273         chunk_t const* cur = *pos;
274 
275         if (!cur->free) size += cur->size * kMemoryAlign;
276 
277         i++;
278     }
279     snprintf(buffer, SIZE, "  size allocated: %u (%u KB)\n", int(size), int(size / 1024));
280     result.append(buffer);
281 }
282 
isOk(const MemoryBlock & memblk)283 bool HidlMemoryDealer::isOk(const MemoryBlock& memblk) {
284     return memblk.token != nullptr;
285 }
286 
heap()287 sp<::android::hidl::memory::V1_0::IMemory> HidlMemoryDealer::heap() {
288     return mHeap;
289 }
290 
291 // The required heap size alignment is 4096 bytes
292 static const uint64_t kHeapSizeAlignment = (0x1ULL << 12);
293 
getInstance(const hidl_memory & mem)294 sp<HidlMemoryDealer> HidlMemoryDealer::getInstance(const hidl_memory& mem) {
295     uint64_t msk = (kHeapSizeAlignment - 1);
296     if (mem.size() & msk || !(mem.size() & ~msk)) {
297         ALOGE("size is not aligned to %x", static_cast<uint32_t>(kHeapSizeAlignment));
298         return nullptr;
299     }
300     sp<IMemory> heap = mapMemory(mem);
301     if (heap == nullptr) {
302         ALOGE("fail to mapMemory");
303         return nullptr;
304     }
305     return new HidlMemoryDealer(heap, mem);
306 }
307 
HidlMemoryDealer(sp<IMemory> heap,const hidl_memory & mem)308 HidlMemoryDealer::HidlMemoryDealer(sp<IMemory> heap, const hidl_memory& mem)
309     : MemoryDealer(heap->getSize()),
310       mHeap(heap),
311       mToken(new HidlMemoryToken(HidlMemory::getInstance(mem))) {}
312 
allocate(size_t size)313 ::android::hidl::memory::block::V1_0::MemoryBlock HidlMemoryDealer::allocate(size_t size) {
314     MemoryBlock memblk = {nullptr, 0xFFFFFFFFULL, 0xFFFFFFFFULL};
315     ssize_t offset = allocateOffset(size);
316     if (offset >= 0) {
317         memblk.token = mToken;
318         memblk.size = size;
319         memblk.offset = offset;
320     }
321     return memblk;
322 }
323 
324 };  // namespace hardware
325 };  // namespace android
326