• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "host-common/address_space_shared_slots_host_memory_allocator.h"
16 #include "host-common/address_space_device.hpp"
17 #include "host-common/vm_operations.h"
18 #include "host-common/crash-handler.h"
19 #include "host-common/crash_reporter.h"
20 #include "aemu/base/AlignedBuf.h"
21 #include "aemu/base/synchronization/Lock.h"
22 #include <map>
23 #include <unordered_set>
24 #include <unordered_map>
25 #include <utility>
26 
27 namespace android {
28 namespace emulation {
29 namespace {
30 typedef AddressSpaceSharedSlotsHostMemoryAllocatorContext ASSSHMAC;
31 typedef ASSSHMAC::MemBlock MemBlock;
32 typedef MemBlock::FreeSubblocks_t FreeSubblocks_t;
33 
34 using base::AutoLock;
35 using base::Lock;
36 
37 #if defined(__APPLE__) && defined(__arm64__)
38 constexpr uint32_t kAlignment = 16384;
39 #else
40 constexpr uint32_t kAlignment = 4096;
41 #endif
42 
allocateAddressSpaceBlock(const AddressSpaceHwFuncs * hw,uint32_t size)43 uint64_t allocateAddressSpaceBlock(const AddressSpaceHwFuncs* hw, uint32_t size) {
44     uint64_t offset;
45     if (hw->allocSharedHostRegionLocked(size, &offset)) {
46         return 0;
47     } else {
48         return hw->getPhysAddrStartLocked() + offset;
49     }
50 }
51 
freeAddressBlock(const AddressSpaceHwFuncs * hw,uint64_t phys)52 int freeAddressBlock(const AddressSpaceHwFuncs* hw, uint64_t phys) {
53     const uint64_t start = hw->getPhysAddrStartLocked();
54     if (phys < start) { return -1; }
55     return hw->freeSharedHostRegionLocked(phys - start);
56 }
57 
58 std::map<uint64_t, MemBlock> g_blocks;
59 Lock g_blocksLock;
60 
translatePhysAddr(uint64_t p)61 std::pair<uint64_t, MemBlock*> translatePhysAddr(uint64_t p) {
62     for (auto& kv: g_blocks) {
63         MemBlock& block = kv.second;
64         if (p >= block.physBaseLoaded && p < block.physBaseLoaded + block.bitsSize) {
65             return {block.physBase + (p - block.physBaseLoaded), &block};
66         }
67     }
68 
69     return {0, nullptr};
70 }
71 }  // namespace
72 
MemBlock(const address_space_device_control_ops * o,const AddressSpaceHwFuncs * h,uint32_t sz)73 MemBlock::MemBlock(const address_space_device_control_ops* o, const AddressSpaceHwFuncs* h, uint32_t sz)
74         : ops(o), hw(h) {
75     bits = android::aligned_buf_alloc(kAlignment, sz);
76     bitsSize = sz;
77     physBase = allocateAddressSpaceBlock(hw, sz);
78     if (!physBase) {
79         crashhandler_die("%s:%d: allocateAddressSpaceBlock", __func__, __LINE__);
80     }
81     physBaseLoaded = 0;
82     if (!ops->add_memory_mapping(physBase, bits, bitsSize)) {
83         crashhandler_die("%s:%d: add_memory_mapping", __func__, __LINE__);
84     }
85 
86     if (!freeSubblocks.insert({0, sz}).second) {
87         crashhandler_die("%s:%d: freeSubblocks.insert", __func__, __LINE__);
88     }
89 }
90 
MemBlock(MemBlock && rhs)91 MemBlock::MemBlock(MemBlock&& rhs)
92     : ops(std::exchange(rhs.ops, nullptr)),
93       hw(std::exchange(rhs.hw, nullptr)),
94       physBase(std::exchange(rhs.physBase, 0)),
95       physBaseLoaded(std::exchange(rhs.physBaseLoaded, 0)),
96       bits(std::exchange(rhs.bits, nullptr)),
97       bitsSize(std::exchange(rhs.bitsSize, 0)),
98       freeSubblocks(std::move(rhs.freeSubblocks)) {
99 }
100 
operator =(MemBlock rhs)101 MemBlock& MemBlock::operator=(MemBlock rhs) {
102     swap(*this, rhs);
103     return *this;
104 }
105 
~MemBlock()106 MemBlock::~MemBlock() {
107     if (physBase) {
108         ops->remove_memory_mapping(physBase, bits, bitsSize);
109         freeAddressBlock(hw, physBase);
110         android::aligned_buf_free(bits);
111     }
112 }
113 
swap(MemBlock & lhs,MemBlock & rhs)114 void swap(MemBlock& lhs, MemBlock& rhs) {
115     using std::swap;
116 
117     swap(lhs.physBase,          rhs.physBase);
118     swap(lhs.physBaseLoaded,    rhs.physBaseLoaded);
119     swap(lhs.bits,              rhs.bits);
120     swap(lhs.bitsSize,          rhs.bitsSize);
121     swap(lhs.freeSubblocks,     rhs.freeSubblocks);
122 }
123 
124 
isAllFree() const125 bool MemBlock::isAllFree() const {
126     if (freeSubblocks.size() == 1) {
127         const auto kv = *freeSubblocks.begin();
128         return (kv.first == 0) && (kv.second == bitsSize);
129     } else {
130         return false;
131     }
132 }
133 
allocate(const size_t requestedSize)134 uint64_t MemBlock::allocate(const size_t requestedSize) {
135     FreeSubblocks_t::iterator i = findFreeSubblock(&freeSubblocks, requestedSize);
136     if (i == freeSubblocks.end()) {
137         return 0;
138     }
139 
140     const uint32_t subblockOffset = i->first;
141     const uint32_t subblockSize = i->second;
142 
143     freeSubblocks.erase(i);
144     if (subblockSize > requestedSize) {
145         if (!freeSubblocks.insert({subblockOffset + requestedSize,
146                                    subblockSize - requestedSize}).second) {
147             crashhandler_die("%s:%d: freeSubblocks.insert", __func__, __LINE__);
148         }
149     }
150 
151     return physBase + subblockOffset;
152 }
153 
unallocate(uint64_t phys,uint32_t subblockSize)154 void MemBlock::unallocate(
155         uint64_t phys, uint32_t subblockSize) {
156     if (phys >= physBase + bitsSize) {
157         crashhandler_die("%s:%d: phys >= physBase + bitsSize", __func__, __LINE__);
158     }
159 
160     auto r = freeSubblocks.insert({phys - physBase, subblockSize});
161     if (!r.second) {
162         crashhandler_die("%s:%d: freeSubblocks.insert", __func__, __LINE__);
163     }
164 
165     FreeSubblocks_t::iterator i = r.first;
166     if (i != freeSubblocks.begin()) {
167         i = tryMergeSubblocks(&freeSubblocks, i, std::prev(i), i);
168     }
169     FreeSubblocks_t::iterator next = std::next(i);
170     if (next != freeSubblocks.end()) {
171         i = tryMergeSubblocks(&freeSubblocks, i, i, next);
172     }
173 }
174 
findFreeSubblock(FreeSubblocks_t * fsb,const size_t sz)175 FreeSubblocks_t::iterator MemBlock::findFreeSubblock(FreeSubblocks_t* fsb,
176                                                      const size_t sz) {
177     if (fsb->empty()) {
178         return fsb->end();
179     } else {
180         auto best = fsb->end();
181         size_t bestSize = ~size_t(0);
182 
183         for (auto i = fsb->begin(); i != fsb->end(); ++i) {
184             if (i->second >= sz && sz < bestSize) {
185                 best = i;
186                 bestSize = i->second;
187             }
188         }
189 
190         return best;
191     }
192 }
193 
tryMergeSubblocks(FreeSubblocks_t * fsb,FreeSubblocks_t::iterator ret,FreeSubblocks_t::iterator lhs,FreeSubblocks_t::iterator rhs)194 FreeSubblocks_t::iterator MemBlock::tryMergeSubblocks(
195         FreeSubblocks_t* fsb,
196         FreeSubblocks_t::iterator ret,
197         FreeSubblocks_t::iterator lhs,
198         FreeSubblocks_t::iterator rhs) {
199     if (lhs->first + lhs->second == rhs->first) {
200         const uint32_t subblockOffset = lhs->first;
201         const uint32_t subblockSize = lhs->second + rhs->second;
202 
203         fsb->erase(lhs);
204         fsb->erase(rhs);
205         auto r = fsb->insert({subblockOffset, subblockSize});
206         if (!r.second) {
207             crashhandler_die("%s:%d: fsb->insert", __func__, __LINE__);
208         }
209 
210         return r.first;
211     } else {
212         return ret;
213     }
214 }
215 
save(base::Stream * stream) const216 void MemBlock::save(base::Stream* stream) const {
217     stream->putBe64(physBase);
218     stream->putBe32(bitsSize);
219     stream->write(bits, bitsSize);
220     stream->putBe32(freeSubblocks.size());
221     for (const auto& kv: freeSubblocks) {
222         stream->putBe32(kv.first);
223         stream->putBe32(kv.second);
224     }
225 }
226 
load(base::Stream * stream,const address_space_device_control_ops * ops,const AddressSpaceHwFuncs * hw,MemBlock * block)227 bool MemBlock::load(base::Stream* stream,
228                     const address_space_device_control_ops* ops,
229                     const AddressSpaceHwFuncs* hw,
230                     MemBlock* block) {
231     const uint64_t physBaseLoaded = stream->getBe64();
232     const uint32_t bitsSize = stream->getBe32();
233     void* const bits = android::aligned_buf_alloc(kAlignment, bitsSize);
234     if (!bits) {
235         return false;
236     }
237     if (stream->read(bits, bitsSize) != static_cast<ssize_t>(bitsSize)) {
238         android::aligned_buf_free(bits);
239         return false;
240     }
241     const uint64_t physBase = allocateAddressSpaceBlock(hw, bitsSize);
242     if (!physBase) {
243         android::aligned_buf_free(bits);
244         return false;
245     }
246     if (!ops->add_memory_mapping(physBase, bits, bitsSize)) {
247         freeAddressBlock(hw, physBase);
248         android::aligned_buf_free(bits);
249         return false;
250     }
251 
252     FreeSubblocks_t freeSubblocks;
253     for (uint32_t freeSubblocksSize = stream->getBe32();
254          freeSubblocksSize > 0;
255          --freeSubblocksSize) {
256         const uint32_t off = stream->getBe32();
257         const uint32_t sz = stream->getBe32();
258         if (!freeSubblocks.insert({off, sz}).second) {
259             crashhandler_die("%s:%d: freeSubblocks.insert", __func__, __LINE__);
260         }
261     }
262 
263     block->hw = hw;
264     block->ops = ops;
265     block->physBase = physBase;
266     block->physBaseLoaded = physBaseLoaded;
267     block->bits = bits;
268     block->bitsSize = bitsSize;
269     block->freeSubblocks = std::move(freeSubblocks);
270 
271     return true;
272 }
273 
AddressSpaceSharedSlotsHostMemoryAllocatorContext(const address_space_device_control_ops * ops,const AddressSpaceHwFuncs * hw)274 AddressSpaceSharedSlotsHostMemoryAllocatorContext::AddressSpaceSharedSlotsHostMemoryAllocatorContext(
275     const address_space_device_control_ops *ops, const AddressSpaceHwFuncs* hw)
276   : m_ops(ops),
277     m_hw(hw) {}
278 
~AddressSpaceSharedSlotsHostMemoryAllocatorContext()279 AddressSpaceSharedSlotsHostMemoryAllocatorContext::~AddressSpaceSharedSlotsHostMemoryAllocatorContext() {
280     clear();
281 }
282 
perform(AddressSpaceDevicePingInfo * info)283 void AddressSpaceSharedSlotsHostMemoryAllocatorContext::perform(AddressSpaceDevicePingInfo *info) {
284     uint64_t result;
285 
286     switch (static_cast<HostMemoryAllocatorCommand>(info->metadata)) {
287     case HostMemoryAllocatorCommand::Allocate:
288         result = allocate(info);
289         break;
290 
291     case HostMemoryAllocatorCommand::Unallocate:
292         result = unallocate(info->phys_addr);
293         break;
294 
295     case HostMemoryAllocatorCommand::CheckIfSharedSlotsSupported:
296         result = 0;
297         break;
298 
299     default:
300         result = -1;
301         break;
302     }
303 
304     info->metadata = result;
305 }
306 
307 uint64_t
allocate(AddressSpaceDevicePingInfo * info)308 AddressSpaceSharedSlotsHostMemoryAllocatorContext::allocate(
309         AddressSpaceDevicePingInfo *info) {
310     const uint32_t alignedSize =
311         ((info->size + kAlignment - 1) / kAlignment) * kAlignment;
312 
313     AutoLock lock(g_blocksLock);
314     for (auto& kv : g_blocks) {
315         uint64_t physAddr = kv.second.allocate(alignedSize);
316         if (physAddr) {
317             return populatePhysAddr(info, physAddr, alignedSize, &kv.second);
318         }
319     }
320 
321     const uint32_t defaultSize = 64u << 20;
322     MemBlock newBlock(m_ops, m_hw, std::max(alignedSize, defaultSize));
323     const uint64_t physAddr = newBlock.allocate(alignedSize);
324     if (!physAddr) {
325         return -1;
326     }
327 
328     const uint64_t physBase = newBlock.physBase;
329     auto r = g_blocks.insert({physBase, std::move(newBlock)});
330     if (!r.second) {
331         crashhandler_die("%s:%d: g_blocks.insert", __func__, __LINE__);
332     }
333 
334     return populatePhysAddr(info, physAddr, alignedSize, &r.first->second);
335 }
336 
337 uint64_t
unallocate(const uint64_t physAddr)338 AddressSpaceSharedSlotsHostMemoryAllocatorContext::unallocate(
339         const uint64_t physAddr) {
340     AutoLock lock(g_blocksLock);
341 
342     auto i = m_allocations.find(physAddr);
343     if (i == m_allocations.end()) {
344         return -1;
345     }
346 
347     MemBlock* block = i->second.second;
348     block->unallocate(physAddr, i->second.first);
349     m_allocations.erase(physAddr);
350 
351     if (block->isAllFree()) {
352         gcEmptyBlocks(1);
353     }
354 
355     return 0;
356 }
357 
gcEmptyBlocks(int allowedEmpty)358 void AddressSpaceSharedSlotsHostMemoryAllocatorContext::gcEmptyBlocks(int allowedEmpty) {
359     auto i = g_blocks.begin();
360     while (i != g_blocks.end()) {
361         if (i->second.isAllFree()) {
362             if (allowedEmpty > 0) {
363                 --allowedEmpty;
364                 ++i;
365             } else {
366                 i = g_blocks.erase(i);
367             }
368         } else {
369             ++i;
370         }
371     }
372 }
373 
populatePhysAddr(AddressSpaceDevicePingInfo * info,const uint64_t physAddr,const uint32_t alignedSize,MemBlock * owner)374 uint64_t AddressSpaceSharedSlotsHostMemoryAllocatorContext::populatePhysAddr(
375         AddressSpaceDevicePingInfo *info,
376         const uint64_t physAddr,
377         const uint32_t alignedSize,
378         MemBlock* owner) {
379     info->phys_addr = physAddr - get_address_space_device_hw_funcs()->getPhysAddrStartLocked();
380     info->size = alignedSize;
381     if (!m_allocations.insert({physAddr, {alignedSize, owner}}).second) {
382         crashhandler_die("%s:%d: m_allocations.insert", __func__, __LINE__);
383     }
384     return 0;
385 }
386 
getDeviceType() const387 AddressSpaceDeviceType AddressSpaceSharedSlotsHostMemoryAllocatorContext::getDeviceType() const {
388     return AddressSpaceDeviceType::SharedSlotsHostMemoryAllocator;
389 }
390 
save(base::Stream * stream) const391 void AddressSpaceSharedSlotsHostMemoryAllocatorContext::save(base::Stream* stream) const {
392     AutoLock lock(g_blocksLock);
393 
394     stream->putBe32(m_allocations.size());
395     for (const auto& kv: m_allocations) {
396         stream->putBe64(kv.first);
397         stream->putBe32(kv.second.first);
398     }
399 }
400 
load(base::Stream * stream)401 bool AddressSpaceSharedSlotsHostMemoryAllocatorContext::load(base::Stream* stream) {
402     clear();
403 
404     AutoLock lock(g_blocksLock);
405     for (uint32_t sz = stream->getBe32(); sz > 0; --sz) {
406         const uint64_t phys = stream->getBe64();
407         const uint32_t size = stream->getBe32();
408         const auto r = translatePhysAddr(phys);
409         if (phys) {
410             if (!m_allocations.insert({r.first, {size, r.second}}).second) {
411                 crashhandler_die("%s:%d: m_allocations.insert", __func__, __LINE__);
412             }
413         } else {
414             crashhandler_die("%s:%d: translatePhysAddr", __func__, __LINE__);
415         }
416     }
417 
418     return true;
419 }
420 
clear()421 void AddressSpaceSharedSlotsHostMemoryAllocatorContext::clear() {
422     AutoLock lock(g_blocksLock);
423     for (const auto& kv: m_allocations) {
424         MemBlock* block = kv.second.second;
425         block->unallocate(kv.first, kv.second.first);
426     }
427     m_allocations.clear();
428 }
429 
globalStateSave(base::Stream * stream)430 void AddressSpaceSharedSlotsHostMemoryAllocatorContext::globalStateSave(base::Stream* stream) {
431     AutoLock lock(g_blocksLock);
432 
433     stream->putBe32(g_blocks.size());
434     for (const auto& kv: g_blocks) {
435         kv.second.save(stream);
436     }
437 }
438 
439 // get_address_space_device_hw_funcs()
440 
globalStateLoad(base::Stream * stream,const address_space_device_control_ops * ops,const AddressSpaceHwFuncs * hw)441 bool AddressSpaceSharedSlotsHostMemoryAllocatorContext::globalStateLoad(
442         base::Stream* stream,
443         const address_space_device_control_ops *ops,
444         const AddressSpaceHwFuncs* hw) {
445     AutoLock lock(g_blocksLock);
446 
447     for (uint32_t sz = stream->getBe32(); sz > 0; --sz) {
448         MemBlock block;
449         if (!MemBlock::load(stream, ops, hw, &block)) { return false; }
450 
451         const uint64_t physBase = block.physBase;
452         if (!g_blocks.insert({physBase, std::move(block)}).second) {
453             crashhandler_die("%s:%d: block->unallocate", __func__, __LINE__);
454         }
455     }
456 
457     return true;
458 }
459 
globalStateClear()460 void AddressSpaceSharedSlotsHostMemoryAllocatorContext::globalStateClear() {
461     AutoLock lock(g_blocksLock);
462     g_blocks.clear();
463 }
464 
465 }  // namespace emulation
466 }  // namespace android
467