• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "host-common/address_space_shared_slots_host_memory_allocator.h"
16 #include "host-common/address_space_device.hpp"
17 #include "host-common/vm_operations.h"
18 #include "host-common/crash_reporter.h"
19 #include "base/AlignedBuf.h"
20 #include "base/Lock.h"
21 #include <map>
22 #include <unordered_set>
23 #include <unordered_map>
24 #include <utility>
25 
26 namespace android {
27 namespace emulation {
28 namespace {
29 typedef AddressSpaceSharedSlotsHostMemoryAllocatorContext ASSSHMAC;
30 typedef ASSSHMAC::MemBlock MemBlock;
31 typedef MemBlock::FreeSubblocks_t FreeSubblocks_t;
32 
33 using base::AutoLock;
34 using base::Lock;
35 
36 #if defined(__APPLE__) && defined(__arm64__)
37 constexpr uint32_t kAlignment = 16384;
38 #else
39 constexpr uint32_t kAlignment = 4096;
40 #endif
41 
allocateAddressSpaceBlock(const AddressSpaceHwFuncs * hw,uint32_t size)42 uint64_t allocateAddressSpaceBlock(const AddressSpaceHwFuncs* hw, uint32_t size) {
43     uint64_t offset;
44     if (hw->allocSharedHostRegionLocked(size, &offset)) {
45         return 0;
46     } else {
47         return hw->getPhysAddrStartLocked() + offset;
48     }
49 }
50 
freeAddressBlock(const AddressSpaceHwFuncs * hw,uint64_t phys)51 int freeAddressBlock(const AddressSpaceHwFuncs* hw, uint64_t phys) {
52     const uint64_t start = hw->getPhysAddrStartLocked();
53     if (phys < start) { return -1; }
54     return hw->freeSharedHostRegionLocked(phys - start);
55 }
56 
57 std::map<uint64_t, MemBlock> g_blocks;
58 Lock g_blocksLock;
59 
translatePhysAddr(uint64_t p)60 std::pair<uint64_t, MemBlock*> translatePhysAddr(uint64_t p) {
61     for (auto& kv: g_blocks) {
62         MemBlock& block = kv.second;
63         if (p >= block.physBaseLoaded && p < block.physBaseLoaded + block.bitsSize) {
64             return {block.physBase + (p - block.physBaseLoaded), &block};
65         }
66     }
67 
68     return {0, nullptr};
69 }
70 }  // namespace
71 
MemBlock(const address_space_device_control_ops * o,const AddressSpaceHwFuncs * h,uint32_t sz)72 MemBlock::MemBlock(const address_space_device_control_ops* o, const AddressSpaceHwFuncs* h, uint32_t sz)
73         : ops(o), hw(h) {
74     bits = android::aligned_buf_alloc(kAlignment, sz);
75     bitsSize = sz;
76     physBase = allocateAddressSpaceBlock(hw, sz);
77     if (!physBase) {
78         crashhandler_die("%s:%d: allocateAddressSpaceBlock", __func__, __LINE__);
79     }
80     physBaseLoaded = 0;
81     if (!ops->add_memory_mapping(physBase, bits, bitsSize)) {
82         crashhandler_die("%s:%d: add_memory_mapping", __func__, __LINE__);
83     }
84 
85     if (!freeSubblocks.insert({0, sz}).second) {
86         crashhandler_die("%s:%d: freeSubblocks.insert", __func__, __LINE__);
87     }
88 }
89 
MemBlock(MemBlock && rhs)90 MemBlock::MemBlock(MemBlock&& rhs)
91     : ops(std::exchange(rhs.ops, nullptr)),
92       hw(std::exchange(rhs.hw, nullptr)),
93       physBase(std::exchange(rhs.physBase, 0)),
94       physBaseLoaded(std::exchange(rhs.physBaseLoaded, 0)),
95       bits(std::exchange(rhs.bits, nullptr)),
96       bitsSize(std::exchange(rhs.bitsSize, 0)),
97       freeSubblocks(std::move(rhs.freeSubblocks)) {
98 }
99 
operator =(MemBlock rhs)100 MemBlock& MemBlock::operator=(MemBlock rhs) {
101     swap(*this, rhs);
102     return *this;
103 }
104 
~MemBlock()105 MemBlock::~MemBlock() {
106     if (physBase) {
107         ops->remove_memory_mapping(physBase, bits, bitsSize);
108         freeAddressBlock(hw, physBase);
109         android::aligned_buf_free(bits);
110     }
111 }
112 
swap(MemBlock & lhs,MemBlock & rhs)113 void swap(MemBlock& lhs, MemBlock& rhs) {
114     using std::swap;
115 
116     swap(lhs.physBase,          rhs.physBase);
117     swap(lhs.physBaseLoaded,    rhs.physBaseLoaded);
118     swap(lhs.bits,              rhs.bits);
119     swap(lhs.bitsSize,          rhs.bitsSize);
120     swap(lhs.freeSubblocks,     rhs.freeSubblocks);
121 }
122 
123 
isAllFree() const124 bool MemBlock::isAllFree() const {
125     if (freeSubblocks.size() == 1) {
126         const auto kv = *freeSubblocks.begin();
127         return (kv.first == 0) && (kv.second == bitsSize);
128     } else {
129         return false;
130     }
131 }
132 
allocate(const size_t requestedSize)133 uint64_t MemBlock::allocate(const size_t requestedSize) {
134     FreeSubblocks_t::iterator i = findFreeSubblock(&freeSubblocks, requestedSize);
135     if (i == freeSubblocks.end()) {
136         return 0;
137     }
138 
139     const uint32_t subblockOffset = i->first;
140     const uint32_t subblockSize = i->second;
141 
142     freeSubblocks.erase(i);
143     if (subblockSize > requestedSize) {
144         if (!freeSubblocks.insert({subblockOffset + requestedSize,
145                                    subblockSize - requestedSize}).second) {
146             crashhandler_die("%s:%d: freeSubblocks.insert", __func__, __LINE__);
147         }
148     }
149 
150     return physBase + subblockOffset;
151 }
152 
unallocate(uint64_t phys,uint32_t subblockSize)153 void MemBlock::unallocate(
154         uint64_t phys, uint32_t subblockSize) {
155     if (phys >= physBase + bitsSize) {
156         crashhandler_die("%s:%d: phys >= physBase + bitsSize", __func__, __LINE__);
157     }
158 
159     auto r = freeSubblocks.insert({phys - physBase, subblockSize});
160     if (!r.second) {
161         crashhandler_die("%s:%d: freeSubblocks.insert", __func__, __LINE__);
162     }
163 
164     FreeSubblocks_t::iterator i = r.first;
165     if (i != freeSubblocks.begin()) {
166         i = tryMergeSubblocks(&freeSubblocks, i, std::prev(i), i);
167     }
168     FreeSubblocks_t::iterator next = std::next(i);
169     if (next != freeSubblocks.end()) {
170         i = tryMergeSubblocks(&freeSubblocks, i, i, next);
171     }
172 }
173 
findFreeSubblock(FreeSubblocks_t * fsb,const size_t sz)174 FreeSubblocks_t::iterator MemBlock::findFreeSubblock(FreeSubblocks_t* fsb,
175                                                      const size_t sz) {
176     if (fsb->empty()) {
177         return fsb->end();
178     } else {
179         auto best = fsb->end();
180         size_t bestSize = ~size_t(0);
181 
182         for (auto i = fsb->begin(); i != fsb->end(); ++i) {
183             if (i->second >= sz && sz < bestSize) {
184                 best = i;
185                 bestSize = i->second;
186             }
187         }
188 
189         return best;
190     }
191 }
192 
tryMergeSubblocks(FreeSubblocks_t * fsb,FreeSubblocks_t::iterator ret,FreeSubblocks_t::iterator lhs,FreeSubblocks_t::iterator rhs)193 FreeSubblocks_t::iterator MemBlock::tryMergeSubblocks(
194         FreeSubblocks_t* fsb,
195         FreeSubblocks_t::iterator ret,
196         FreeSubblocks_t::iterator lhs,
197         FreeSubblocks_t::iterator rhs) {
198     if (lhs->first + lhs->second == rhs->first) {
199         const uint32_t subblockOffset = lhs->first;
200         const uint32_t subblockSize = lhs->second + rhs->second;
201 
202         fsb->erase(lhs);
203         fsb->erase(rhs);
204         auto r = fsb->insert({subblockOffset, subblockSize});
205         if (!r.second) {
206             crashhandler_die("%s:%d: fsb->insert", __func__, __LINE__);
207         }
208 
209         return r.first;
210     } else {
211         return ret;
212     }
213 }
214 
save(base::Stream * stream) const215 void MemBlock::save(base::Stream* stream) const {
216     stream->putBe64(physBase);
217     stream->putBe32(bitsSize);
218     stream->write(bits, bitsSize);
219     stream->putBe32(freeSubblocks.size());
220     for (const auto& kv: freeSubblocks) {
221         stream->putBe32(kv.first);
222         stream->putBe32(kv.second);
223     }
224 }
225 
load(base::Stream * stream,const address_space_device_control_ops * ops,const AddressSpaceHwFuncs * hw,MemBlock * block)226 bool MemBlock::load(base::Stream* stream,
227                     const address_space_device_control_ops* ops,
228                     const AddressSpaceHwFuncs* hw,
229                     MemBlock* block) {
230     const uint64_t physBaseLoaded = stream->getBe64();
231     const uint32_t bitsSize = stream->getBe32();
232     void* const bits = android::aligned_buf_alloc(kAlignment, bitsSize);
233     if (!bits) {
234         return false;
235     }
236     if (stream->read(bits, bitsSize) != static_cast<ssize_t>(bitsSize)) {
237         android::aligned_buf_free(bits);
238         return false;
239     }
240     const uint64_t physBase = allocateAddressSpaceBlock(hw, bitsSize);
241     if (!physBase) {
242         android::aligned_buf_free(bits);
243         return false;
244     }
245     if (!ops->add_memory_mapping(physBase, bits, bitsSize)) {
246         freeAddressBlock(hw, physBase);
247         android::aligned_buf_free(bits);
248         return false;
249     }
250 
251     FreeSubblocks_t freeSubblocks;
252     for (uint32_t freeSubblocksSize = stream->getBe32();
253          freeSubblocksSize > 0;
254          --freeSubblocksSize) {
255         const uint32_t off = stream->getBe32();
256         const uint32_t sz = stream->getBe32();
257         if (!freeSubblocks.insert({off, sz}).second) {
258             crashhandler_die("%s:%d: freeSubblocks.insert", __func__, __LINE__);
259         }
260     }
261 
262     block->hw = hw;
263     block->ops = ops;
264     block->physBase = physBase;
265     block->physBaseLoaded = physBaseLoaded;
266     block->bits = bits;
267     block->bitsSize = bitsSize;
268     block->freeSubblocks = std::move(freeSubblocks);
269 
270     return true;
271 }
272 
AddressSpaceSharedSlotsHostMemoryAllocatorContext(const address_space_device_control_ops * ops,const AddressSpaceHwFuncs * hw)273 AddressSpaceSharedSlotsHostMemoryAllocatorContext::AddressSpaceSharedSlotsHostMemoryAllocatorContext(
274     const address_space_device_control_ops *ops, const AddressSpaceHwFuncs* hw)
275   : m_ops(ops),
276     m_hw(hw) {}
277 
~AddressSpaceSharedSlotsHostMemoryAllocatorContext()278 AddressSpaceSharedSlotsHostMemoryAllocatorContext::~AddressSpaceSharedSlotsHostMemoryAllocatorContext() {
279     clear();
280 }
281 
perform(AddressSpaceDevicePingInfo * info)282 void AddressSpaceSharedSlotsHostMemoryAllocatorContext::perform(AddressSpaceDevicePingInfo *info) {
283     uint64_t result;
284 
285     switch (static_cast<HostMemoryAllocatorCommand>(info->metadata)) {
286     case HostMemoryAllocatorCommand::Allocate:
287         result = allocate(info);
288         break;
289 
290     case HostMemoryAllocatorCommand::Unallocate:
291         result = unallocate(info->phys_addr);
292         break;
293 
294     case HostMemoryAllocatorCommand::CheckIfSharedSlotsSupported:
295         result = 0;
296         break;
297 
298     default:
299         result = -1;
300         break;
301     }
302 
303     info->metadata = result;
304 }
305 
306 uint64_t
allocate(AddressSpaceDevicePingInfo * info)307 AddressSpaceSharedSlotsHostMemoryAllocatorContext::allocate(
308         AddressSpaceDevicePingInfo *info) {
309     const uint32_t alignedSize =
310         ((info->size + kAlignment - 1) / kAlignment) * kAlignment;
311 
312     AutoLock lock(g_blocksLock);
313     for (auto& kv : g_blocks) {
314         uint64_t physAddr = kv.second.allocate(alignedSize);
315         if (physAddr) {
316             return populatePhysAddr(info, physAddr, alignedSize, &kv.second);
317         }
318     }
319 
320     const uint32_t defaultSize = 64u << 20;
321     MemBlock newBlock(m_ops, m_hw, std::max(alignedSize, defaultSize));
322     const uint64_t physAddr = newBlock.allocate(alignedSize);
323     if (!physAddr) {
324         return -1;
325     }
326 
327     const uint64_t physBase = newBlock.physBase;
328     auto r = g_blocks.insert({physBase, std::move(newBlock)});
329     if (!r.second) {
330         crashhandler_die("%s:%d: g_blocks.insert", __func__, __LINE__);
331     }
332 
333     return populatePhysAddr(info, physAddr, alignedSize, &r.first->second);
334 }
335 
336 uint64_t
unallocate(const uint64_t physAddr)337 AddressSpaceSharedSlotsHostMemoryAllocatorContext::unallocate(
338         const uint64_t physAddr) {
339     AutoLock lock(g_blocksLock);
340 
341     auto i = m_allocations.find(physAddr);
342     if (i == m_allocations.end()) {
343         return -1;
344     }
345 
346     MemBlock* block = i->second.second;
347     block->unallocate(physAddr, i->second.first);
348     m_allocations.erase(physAddr);
349 
350     if (block->isAllFree()) {
351         gcEmptyBlocks(1);
352     }
353 
354     return 0;
355 }
356 
gcEmptyBlocks(int allowedEmpty)357 void AddressSpaceSharedSlotsHostMemoryAllocatorContext::gcEmptyBlocks(int allowedEmpty) {
358     auto i = g_blocks.begin();
359     while (i != g_blocks.end()) {
360         if (i->second.isAllFree()) {
361             if (allowedEmpty > 0) {
362                 --allowedEmpty;
363                 ++i;
364             } else {
365                 i = g_blocks.erase(i);
366             }
367         } else {
368             ++i;
369         }
370     }
371 }
372 
populatePhysAddr(AddressSpaceDevicePingInfo * info,const uint64_t physAddr,const uint32_t alignedSize,MemBlock * owner)373 uint64_t AddressSpaceSharedSlotsHostMemoryAllocatorContext::populatePhysAddr(
374         AddressSpaceDevicePingInfo *info,
375         const uint64_t physAddr,
376         const uint32_t alignedSize,
377         MemBlock* owner) {
378     info->phys_addr = physAddr - get_address_space_device_hw_funcs()->getPhysAddrStartLocked();
379     info->size = alignedSize;
380     if (!m_allocations.insert({physAddr, {alignedSize, owner}}).second) {
381         crashhandler_die("%s:%d: m_allocations.insert", __func__, __LINE__);
382     }
383     return 0;
384 }
385 
getDeviceType() const386 AddressSpaceDeviceType AddressSpaceSharedSlotsHostMemoryAllocatorContext::getDeviceType() const {
387     return AddressSpaceDeviceType::SharedSlotsHostMemoryAllocator;
388 }
389 
save(base::Stream * stream) const390 void AddressSpaceSharedSlotsHostMemoryAllocatorContext::save(base::Stream* stream) const {
391     AutoLock lock(g_blocksLock);
392 
393     stream->putBe32(m_allocations.size());
394     for (const auto& kv: m_allocations) {
395         stream->putBe64(kv.first);
396         stream->putBe32(kv.second.first);
397     }
398 }
399 
load(base::Stream * stream)400 bool AddressSpaceSharedSlotsHostMemoryAllocatorContext::load(base::Stream* stream) {
401     clear();
402 
403     AutoLock lock(g_blocksLock);
404     for (uint32_t sz = stream->getBe32(); sz > 0; --sz) {
405         const uint64_t phys = stream->getBe64();
406         const uint32_t size = stream->getBe32();
407         const auto r = translatePhysAddr(phys);
408         if (phys) {
409             if (!m_allocations.insert({r.first, {size, r.second}}).second) {
410                 crashhandler_die("%s:%d: m_allocations.insert", __func__, __LINE__);
411             }
412         } else {
413             crashhandler_die("%s:%d: translatePhysAddr", __func__, __LINE__);
414         }
415     }
416 
417     return true;
418 }
419 
clear()420 void AddressSpaceSharedSlotsHostMemoryAllocatorContext::clear() {
421     AutoLock lock(g_blocksLock);
422     for (const auto& kv: m_allocations) {
423         MemBlock* block = kv.second.second;
424         block->unallocate(kv.first, kv.second.first);
425     }
426     m_allocations.clear();
427 }
428 
globalStateSave(base::Stream * stream)429 void AddressSpaceSharedSlotsHostMemoryAllocatorContext::globalStateSave(base::Stream* stream) {
430     AutoLock lock(g_blocksLock);
431 
432     stream->putBe32(g_blocks.size());
433     for (const auto& kv: g_blocks) {
434         kv.second.save(stream);
435     }
436 }
437 
438 // get_address_space_device_hw_funcs()
439 
globalStateLoad(base::Stream * stream,const address_space_device_control_ops * ops,const AddressSpaceHwFuncs * hw)440 bool AddressSpaceSharedSlotsHostMemoryAllocatorContext::globalStateLoad(
441         base::Stream* stream,
442         const address_space_device_control_ops *ops,
443         const AddressSpaceHwFuncs* hw) {
444     AutoLock lock(g_blocksLock);
445 
446     for (uint32_t sz = stream->getBe32(); sz > 0; --sz) {
447         MemBlock block;
448         if (!MemBlock::load(stream, ops, hw, &block)) { return false; }
449 
450         const uint64_t physBase = block.physBase;
451         if (!g_blocks.insert({physBase, std::move(block)}).second) {
452             crashhandler_die("%s:%d: block->unallocate", __func__, __LINE__);
453         }
454     }
455 
456     return true;
457 }
458 
globalStateClear()459 void AddressSpaceSharedSlotsHostMemoryAllocatorContext::globalStateClear() {
460     AutoLock lock(g_blocksLock);
461     g_blocks.clear();
462 }
463 
464 }  // namespace emulation
465 }  // namespace android
466