• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "host-common/address_space_graphics.h"
16 
17 #include <memory>
18 #include <optional>
19 
20 #include "aemu/base/AlignedBuf.h"
21 #include "aemu/base/SubAllocator.h"
22 #include "aemu/base/synchronization/Lock.h"
23 #include "host-common/GfxstreamFatalError.h"
24 #include "host-common/address_space_device.h"
25 #include "host-common/address_space_device.hpp"
26 #include "host-common/crash-handler.h"
27 #include "host-common/crash_reporter.h"
28 #include "host-common/globals.h"
29 #include "host-common/vm_operations.h"
30 
31 #define ASGFX_DEBUG 0
32 
33 #if ASGFX_DEBUG
34 #define ASGFX_LOG(fmt,...) printf("%s:%d " fmt "\n", __func__, __LINE__, ##__VA_ARGS__);
35 #else
36 #define ASGFX_LOG(fmt,...)
37 #endif
38 
39 using android::base::AutoLock;
40 using android::base::Lock;
41 using android::base::SubAllocator;
42 using emugl::ABORT_REASON_OTHER;
43 using emugl::FatalError;
44 
45 namespace android {
46 namespace emulation {
47 namespace asg {
48 
49 struct AllocationCreateInfo {
50     bool virtioGpu;
51     bool hostmemRegisterFixed;
52     bool fromLoad;
53     uint64_t size;
54     uint64_t hostmemId;
55     void *externalAddr;
56     std::optional<uint32_t> dedicatedContextHandle;
57 };
58 
59 struct Block {
60     char* buffer = nullptr;
61     uint64_t bufferSize = 0;
62     SubAllocator* subAlloc = nullptr;
63     uint64_t offsetIntoPhys = 0; // guest claimShared/mmap uses this
64     bool isEmpty = true;
65     std::optional<uint32_t> dedicatedContextHandle;
66     bool usesVirtioGpuHostmem = false;
67     uint64_t hostmemId = 0;
68     bool external = false;
69 };
70 
71 class Globals {
72 public:
Globals()73     Globals() :
74         mPerContextBufferSize(
75                 aemu_get_android_hw()->hw_gltransport_asg_writeBufferSize) { }
76 
~Globals()77     ~Globals() { clear(); }
78 
initialize(const address_space_device_control_ops * ops)79     void initialize(const address_space_device_control_ops* ops) {
80         AutoLock lock(mLock);
81 
82         if (mInitialized) return;
83 
84         mControlOps = ops;
85         mInitialized = true;
86     }
87 
setConsumer(ConsumerInterface iface)88     void setConsumer(ConsumerInterface iface) {
89         mConsumerInterface = iface;
90     }
91 
getConsumerInterface()92     ConsumerInterface getConsumerInterface() {
93         if (!mConsumerInterface.create ||
94             !mConsumerInterface.destroy ||
95             !mConsumerInterface.preSave ||
96             !mConsumerInterface.globalPreSave ||
97             !mConsumerInterface.save ||
98             !mConsumerInterface.globalPostSave ||
99             !mConsumerInterface.postSave) {
100             crashhandler_die("Consumer interface has not been set\n");
101         }
102         return mConsumerInterface;
103     }
104 
controlOps()105     const address_space_device_control_ops* controlOps() {
106         return mControlOps;
107     }
108 
clear()109     void clear() {
110         for (auto& block: mRingBlocks) {
111             if (block.isEmpty) continue;
112             destroyBlockLocked(block);
113         }
114 
115         for (auto& block: mBufferBlocks) {
116             if (block.isEmpty) continue;
117             destroyBlockLocked(block);
118         }
119 
120         for (auto& block: mCombinedBlocks) {
121             if (block.isEmpty) continue;
122             destroyBlockLocked(block);
123         }
124 
125         mRingBlocks.clear();
126         mBufferBlocks.clear();
127         mCombinedBlocks.clear();
128     }
129 
perContextBufferSize() const130     uint64_t perContextBufferSize() const {
131         return mPerContextBufferSize;
132     }
133 
newAllocation(struct AllocationCreateInfo & create,std::vector<Block> & existingBlocks)134     Allocation newAllocation(struct AllocationCreateInfo& create,
135                              std::vector<Block>& existingBlocks) {
136         AutoLock lock(mLock);
137 
138         if (create.size > ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE) {
139             crashhandler_die(
140                 "wanted size 0x%llx which is "
141                 "greater than block size 0x%llx",
142                 (unsigned long long)create.size,
143                 (unsigned long long)ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE);
144         }
145 
146         Allocation res;
147 
148         size_t index = 0;
149         for (index = 0; index < existingBlocks.size(); index++) {
150             auto& block = existingBlocks[index];
151 
152             if (block.isEmpty) {
153                 fillBlockLocked(block, create);
154             }
155 
156             if (block.dedicatedContextHandle != create.dedicatedContextHandle) {
157                 continue;
158             }
159 
160             auto buf = block.subAlloc->alloc(create.size);
161             if (buf) {
162                 res.buffer = (char*)buf;
163                 res.blockIndex = index;
164                 res.offsetIntoPhys =
165                     block.offsetIntoPhys +
166                     block.subAlloc->getOffset(buf);
167                 res.size = create.size;
168                 res.dedicatedContextHandle = create.dedicatedContextHandle;
169                 res.hostmemId = create.hostmemId;
170                 return res;
171             } else {
172                 // block full
173             }
174         }
175 
176         Block newBlock;
177         fillBlockLocked(newBlock, create);
178 
179         auto buf = newBlock.subAlloc->alloc(create.size);
180 
181         if (!buf) {
182             crashhandler_die(
183                 "failed to allocate size 0x%llx "
184                 "(no free slots or out of host memory)",
185                 (unsigned long long)create.size);
186         }
187 
188         existingBlocks.push_back(newBlock);
189 
190         res.buffer = (char*)buf;
191         res.blockIndex = index;
192         res.offsetIntoPhys =
193             newBlock.offsetIntoPhys +
194             newBlock.subAlloc->getOffset(buf);
195         res.size = create.size;
196         res.dedicatedContextHandle = create.dedicatedContextHandle;
197         res.hostmemId = create.hostmemId;
198 
199         return res;
200     }
201 
deleteAllocation(const Allocation & alloc,std::vector<Block> & existingBlocks)202     void deleteAllocation(const Allocation& alloc, std::vector<Block>& existingBlocks) {
203         if (!alloc.buffer) return;
204 
205         AutoLock lock(mLock);
206 
207         if (existingBlocks.size() <= alloc.blockIndex) {
208             crashhandler_die(
209                 "should be a block at index %zu "
210                 "but it is not found", alloc.blockIndex);
211         }
212 
213         auto& block = existingBlocks[alloc.blockIndex];
214 
215         if (block.external) {
216             destroyBlockLocked(block);
217             return;
218         }
219 
220         if (!block.subAlloc->free(alloc.buffer)) {
221             crashhandler_die(
222                 "failed to free %p (block start: %p)",
223                 alloc.buffer,
224                 block.buffer);
225         }
226 
227         if (shouldDestryBlockLocked(block)) {
228             destroyBlockLocked(block);
229         }
230     }
231 
allocRingStorage()232     Allocation allocRingStorage() {
233         struct AllocationCreateInfo create = {0};
234         create.size = sizeof(struct asg_ring_storage);
235         return newAllocation(create, mRingBlocks);
236     }
237 
freeRingStorage(const Allocation & alloc)238     void freeRingStorage(const Allocation& alloc) {
239         if (alloc.isView) return;
240         deleteAllocation(alloc, mRingBlocks);
241     }
242 
allocBuffer()243     Allocation allocBuffer() {
244         struct AllocationCreateInfo create = {0};
245         create.size = mPerContextBufferSize;
246         return newAllocation(create, mBufferBlocks);
247     }
248 
freeBuffer(const Allocation & alloc)249     void freeBuffer(const Allocation& alloc) {
250         if (alloc.isView) return;
251         deleteAllocation(alloc, mBufferBlocks);
252     }
253 
allocRingAndBufferStorageDedicated(const struct AddressSpaceCreateInfo & asgCreate)254     Allocation allocRingAndBufferStorageDedicated(const struct AddressSpaceCreateInfo& asgCreate) {
255         if (!asgCreate.handle) {
256             crashhandler_die("Dedicated ASG allocation requested without dedicated handle.\n");
257         }
258 
259         struct AllocationCreateInfo create = {0};
260         create.size = sizeof(struct asg_ring_storage) + mPerContextBufferSize;
261         create.dedicatedContextHandle = asgCreate.handle;
262         create.virtioGpu = true;
263         if (asgCreate.externalAddr) {
264             create.externalAddr = asgCreate.externalAddr;
265             if (asgCreate.externalAddrSize < static_cast<uint64_t>(create.size)) {
266                 crashhandler_die("External address size too small\n");
267             }
268             create.size = asgCreate.externalAddrSize;
269         }
270 
271         return newAllocation(create, mCombinedBlocks);
272     }
273 
allocRingViewIntoCombined(const Allocation & alloc)274     Allocation allocRingViewIntoCombined(const Allocation& alloc) {
275         Allocation res = alloc;
276         res.buffer = alloc.buffer;
277         res.size = sizeof(struct asg_ring_storage);
278         res.isView = true;
279         return res;
280     }
281 
allocBufferViewIntoCombined(const Allocation & alloc)282     Allocation allocBufferViewIntoCombined(const Allocation& alloc) {
283         Allocation res = alloc;
284         res.buffer = alloc.buffer + sizeof(asg_ring_storage);
285         res.size = mPerContextBufferSize;
286         res.isView = true;
287         return res;
288     }
289 
freeRingAndBuffer(const Allocation & alloc)290     void freeRingAndBuffer(const Allocation& alloc) {
291         deleteAllocation(alloc, mCombinedBlocks);
292     }
293 
preSave()294     void preSave() {
295         // mConsumerInterface.globalPreSave();
296     }
297 
save(base::Stream * stream)298     void save(base::Stream* stream) {
299         stream->putBe64(mRingBlocks.size());
300         stream->putBe64(mBufferBlocks.size());
301         stream->putBe64(mCombinedBlocks.size());
302 
303         for (const auto& block: mRingBlocks) {
304             saveBlockLocked(stream, block);
305         }
306 
307         for (const auto& block: mBufferBlocks) {
308             saveBlockLocked(stream, block);
309         }
310 
311         for (const auto& block: mCombinedBlocks) {
312             saveBlockLocked(stream, block);
313         }
314     }
315 
postSave()316     void postSave() {
317         // mConsumerInterface.globalPostSave();
318     }
319 
load(base::Stream * stream,const std::optional<AddressSpaceDeviceLoadResources> & resources)320     bool load(base::Stream* stream,
321               const std::optional<AddressSpaceDeviceLoadResources>& resources) {
322         clear();
323         mConsumerInterface.globalPreLoad();
324 
325         uint64_t ringBlockCount = stream->getBe64();
326         uint64_t bufferBlockCount = stream->getBe64();
327         uint64_t combinedBlockCount = stream->getBe64();
328 
329         mRingBlocks.resize(ringBlockCount);
330         mBufferBlocks.resize(bufferBlockCount);
331         mCombinedBlocks.resize(combinedBlockCount);
332 
333         for (auto& block: mRingBlocks) {
334             loadBlockLocked(stream, resources, block);
335         }
336 
337         for (auto& block: mBufferBlocks) {
338             loadBlockLocked(stream, resources, block);
339         }
340 
341         for (auto& block: mCombinedBlocks) {
342             loadBlockLocked(stream, resources, block);
343         }
344 
345         return true;
346     }
347 
348     // Assumes that blocks have been loaded,
349     // and that alloc has its blockIndex/offsetIntoPhys fields filled already
fillAllocFromLoad(Allocation & alloc,AddressSpaceGraphicsContext::AllocType allocType)350     void fillAllocFromLoad(Allocation& alloc, AddressSpaceGraphicsContext::AllocType allocType) {
351         switch (allocType) {
352             case AddressSpaceGraphicsContext::AllocType::AllocTypeRing:
353                 if (mRingBlocks.size() <= alloc.blockIndex) return;
354                 fillAllocFromLoad(mRingBlocks[alloc.blockIndex], alloc);
355                 break;
356             case AddressSpaceGraphicsContext::AllocType::AllocTypeBuffer:
357                 if (mBufferBlocks.size() <= alloc.blockIndex) return;
358                 fillAllocFromLoad(mBufferBlocks[alloc.blockIndex], alloc);
359                 break;
360             case AddressSpaceGraphicsContext::AllocType::AllocTypeCombined:
361                 if (mCombinedBlocks.size() <= alloc.blockIndex) return;
362                 fillAllocFromLoad(mCombinedBlocks[alloc.blockIndex], alloc);
363                 break;
364             default:
365                 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER));
366                 break;
367         }
368     }
369 
370 private:
371 
saveBlockLocked(base::Stream * stream,const Block & block)372     void saveBlockLocked(
373         base::Stream* stream,
374         const Block& block) {
375 
376         if (block.isEmpty) {
377             stream->putBe32(0);
378             return;
379         } else {
380             stream->putBe32(1);
381         }
382 
383         stream->putBe64(block.bufferSize);
384         stream->putBe64(block.offsetIntoPhys);
385         if (block.dedicatedContextHandle) {
386             stream->putBe32(1);
387             stream->putBe32(*block.dedicatedContextHandle);
388         } else {
389             stream->putBe32(0);
390         }
391         stream->putBe32(block.usesVirtioGpuHostmem);
392         stream->putBe64(block.hostmemId);
393         block.subAlloc->save(stream);
394         if (!block.external) {
395             stream->write(block.buffer, block.bufferSize);
396         }
397     }
398 
loadBlockLocked(base::Stream * stream,const std::optional<AddressSpaceDeviceLoadResources> & resources,Block & block)399     void loadBlockLocked(base::Stream* stream,
400                          const std::optional<AddressSpaceDeviceLoadResources>& resources,
401                          Block& block) {
402         uint32_t filled = stream->getBe32();
403         struct AllocationCreateInfo create = {0};
404 
405         if (!filled) {
406             block.isEmpty = true;
407             return;
408         } else {
409             block.isEmpty = false;
410         }
411 
412         create.size = stream->getBe64(); // `bufferSize`
413         block.offsetIntoPhys = stream->getBe64();
414         if (stream->getBe32() == 1) {
415             create.dedicatedContextHandle = stream->getBe32();
416         }
417         create.virtioGpu = stream->getBe32();
418 
419         if (create.virtioGpu) {
420             if (!create.dedicatedContextHandle) {
421                 crashhandler_die(
422                     "Failed to load ASG context global block: "
423                     "Virtio GPU backed blocks are expected to have dedicated context.\n");
424             }
425 
426             // Blocks whose memory are backed Virtio GPU resource do not own the external
427             // memory. The external memory must be re-loaded outside of ASG and provided via
428             // `resources`.
429             if (!resources) {
430                 crashhandler_die(
431                     "Failed to load ASG context global block: "
432                     "Virtio GPU backed blocks need external memory resources for loading.\n");
433             }
434 
435             const auto externalMemoryIt =
436                 resources->contextExternalMemoryMap.find(*create.dedicatedContextHandle);
437             if (externalMemoryIt == resources->contextExternalMemoryMap.end()) {
438                 crashhandler_die(
439                     "Failed to load ASG context global block: "
440                     "Virtio GPU backed blocks an need external memory replacement.\n");
441             }
442             const auto& externalMemory = externalMemoryIt->second;
443             create.externalAddr = externalMemory.externalAddress;
444         }
445 
446         create.hostmemRegisterFixed = true;
447         create.fromLoad = true;
448         create.hostmemId = stream->getBe64();
449 
450         fillBlockLocked(block, create);
451 
452         block.subAlloc->load(stream);
453 
454         if (!block.external) {
455             stream->read(block.buffer, block.bufferSize);
456         }
457     }
458 
fillAllocFromLoad(const Block & block,Allocation & alloc)459     void fillAllocFromLoad(const Block& block, Allocation& alloc) {
460         alloc.buffer = block.buffer + (alloc.offsetIntoPhys - block.offsetIntoPhys);
461         alloc.dedicatedContextHandle = block.dedicatedContextHandle;
462         alloc.hostmemId = block.hostmemId;
463     }
464 
fillBlockLocked(Block & block,struct AllocationCreateInfo & create)465     void fillBlockLocked(Block& block, struct AllocationCreateInfo& create) {
466         if (create.dedicatedContextHandle) {
467             if (!create.virtioGpu) {
468                 crashhandler_die("Cannot use dedicated allocation without virtio-gpu hostmem id");
469             }
470 
471             if (!create.externalAddr) {
472                 crashhandler_die(
473                     "Cannot use dedicated allocation without virtio-gpu hostmem id");
474             }
475 
476             block.external = true;
477             block.buffer = (char*)create.externalAddr;
478             block.bufferSize = create.size;
479             block.subAlloc =
480                 new SubAllocator(block.buffer, block.bufferSize, ADDRESS_SPACE_GRAPHICS_PAGE_SIZE);
481             block.offsetIntoPhys = 0;
482             block.isEmpty = false;
483             block.usesVirtioGpuHostmem = create.virtioGpu;
484             block.hostmemId = create.hostmemId;
485             block.dedicatedContextHandle = create.dedicatedContextHandle;
486         } else {
487             if (create.virtioGpu) {
488                 crashhandler_die(
489                     "Only dedicated allocation allowed in virtio-gpu hostmem id path");
490             } else {
491                 uint64_t offsetIntoPhys;
492 
493                 if (create.fromLoad) {
494                     offsetIntoPhys = block.offsetIntoPhys;
495                     int allocRes = get_address_space_device_hw_funcs()->
496                         allocSharedHostRegionFixedLocked(
497                                 ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE, offsetIntoPhys);
498                     if (allocRes) {
499                         // Disregard alloc failures for now. This is because when it fails,
500                         // we can assume the correct allocation already exists there (tested)
501                     }
502                 } else {
503                     int allocRes = get_address_space_device_hw_funcs()->
504                         allocSharedHostRegionLocked(
505                             ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE, &offsetIntoPhys);
506 
507                     if (allocRes) {
508                         crashhandler_die(
509                             "Failed to allocate physical address graphics backing memory.");
510                     }
511                 }
512 
513                 void* buf =
514                     aligned_buf_alloc(
515                         ADDRESS_SPACE_GRAPHICS_PAGE_SIZE,
516                         ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE);
517 
518                 mControlOps->add_memory_mapping(
519                     get_address_space_device_hw_funcs()->getPhysAddrStartLocked() +
520                         offsetIntoPhys, buf,
521                     ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE);
522 
523                 block.buffer = (char*)buf;
524                 block.bufferSize = ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE;
525                 block.subAlloc =
526                     new SubAllocator(
527                         buf, ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE,
528                         ADDRESS_SPACE_GRAPHICS_PAGE_SIZE);
529                 block.offsetIntoPhys = offsetIntoPhys;
530                 block.isEmpty = false;
531             }
532         }
533     }
534 
destroyBlockLocked(Block & block)535     void destroyBlockLocked(Block& block) {
536 
537         if (block.usesVirtioGpuHostmem && !block.external) {
538             mControlOps->hostmem_unregister(block.hostmemId);
539         } else if (!block.external) {
540             mControlOps->remove_memory_mapping(
541                 get_address_space_device_hw_funcs()->getPhysAddrStartLocked() +
542                     block.offsetIntoPhys,
543                 block.buffer,
544                 ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE);
545 
546             get_address_space_device_hw_funcs()->freeSharedHostRegionLocked(
547                 block.offsetIntoPhys);
548         }
549 
550         delete block.subAlloc;
551         if (!block.external) {
552             aligned_buf_free(block.buffer);
553         }
554 
555         block.isEmpty = true;
556     }
557 
shouldDestryBlockLocked(const Block & block) const558     bool shouldDestryBlockLocked(const Block& block) const {
559         return block.subAlloc->empty();
560     }
561 
562     Lock mLock;
563     uint64_t mPerContextBufferSize;
564     bool mInitialized = false;
565     const address_space_device_control_ops* mControlOps = 0;
566     ConsumerInterface mConsumerInterface;
567     std::vector<Block> mRingBlocks;
568     std::vector<Block> mBufferBlocks;
569     std::vector<Block> mCombinedBlocks;
570 };
571 
sGlobals()572 static Globals* sGlobals() {
573     static Globals* g = new Globals;
574     return g;
575 }
576 
577 // static
init(const address_space_device_control_ops * ops)578 void AddressSpaceGraphicsContext::init(const address_space_device_control_ops* ops) {
579     sGlobals()->initialize(ops);
580 }
581 
582 // static
clear()583 void AddressSpaceGraphicsContext::clear() {
584     sGlobals()->clear();
585 }
586 
587 // static
setConsumer(ConsumerInterface iface)588 void AddressSpaceGraphicsContext::setConsumer(
589     ConsumerInterface iface) {
590     sGlobals()->setConsumer(iface);
591 }
592 
AddressSpaceGraphicsContext(const struct AddressSpaceCreateInfo & create)593 AddressSpaceGraphicsContext::AddressSpaceGraphicsContext(
594     const struct AddressSpaceCreateInfo& create)
595     : mConsumerCallbacks((ConsumerCallbacks){
596           [this] { return onUnavailableRead(); },
597           [](uint64_t physAddr) { return (char*)sGlobals()->controlOps()->get_host_ptr(physAddr); },
598       }),
599       mConsumerInterface(sGlobals()->getConsumerInterface()) {
600     if (create.fromSnapshot) {
601         // Use load() instead to initialize
602         return;
603     }
604 
605     const bool isVirtio = (create.type == AddressSpaceDeviceType::VirtioGpuGraphics);
606     if (isVirtio) {
607         VirtioGpuInfo& info = mVirtioGpuInfo.emplace();
608         info.contextId = create.virtioGpuContextId;
609         info.capsetId = create.virtioGpuCapsetId;
610         if (create.contextNameSize) {
611             info.name = std::string(create.contextName, create.contextNameSize);
612         }
613 
614         mCombinedAllocation = sGlobals()->allocRingAndBufferStorageDedicated(create);
615         mRingAllocation = sGlobals()->allocRingViewIntoCombined(mCombinedAllocation);
616         mBufferAllocation = sGlobals()->allocBufferViewIntoCombined(mCombinedAllocation);
617     } else {
618         mRingAllocation = sGlobals()->allocRingStorage();
619         mBufferAllocation = sGlobals()->allocBuffer();
620     }
621 
622     if (!mRingAllocation.buffer) {
623         crashhandler_die(
624             "Failed to allocate ring for ASG context");
625     }
626 
627     if (!mBufferAllocation.buffer) {
628         crashhandler_die(
629             "Failed to allocate buffer for ASG context");
630     }
631 
632     mHostContext = asg_context_create(
633         mRingAllocation.buffer,
634         mBufferAllocation.buffer,
635         sGlobals()->perContextBufferSize());
636     mHostContext.ring_config->buffer_size =
637         sGlobals()->perContextBufferSize();
638     mHostContext.ring_config->flush_interval =
639         aemu_get_android_hw()->hw_gltransport_asg_writeStepSize;
640     mHostContext.ring_config->host_consumed_pos = 0;
641     mHostContext.ring_config->guest_write_pos = 0;
642     mHostContext.ring_config->transfer_mode = 1;
643     mHostContext.ring_config->transfer_size = 0;
644     mHostContext.ring_config->in_error = 0;
645 
646     mSavedConfig = *mHostContext.ring_config;
647 
648     if (create.createRenderThread) {
649         mCurrentConsumer =
650             mConsumerInterface.create(mHostContext, nullptr, mConsumerCallbacks,
651                                       mVirtioGpuInfo ? mVirtioGpuInfo->contextId : 0,
652                                       mVirtioGpuInfo ? mVirtioGpuInfo->capsetId : 0,
653                                       mVirtioGpuInfo ? mVirtioGpuInfo->name : std::nullopt);
654     }
655 }
656 
~AddressSpaceGraphicsContext()657 AddressSpaceGraphicsContext::~AddressSpaceGraphicsContext() {
658     if (mCurrentConsumer) {
659         mExiting = 1;
660         *(mHostContext.host_state) = ASG_HOST_STATE_EXIT;
661         mConsumerMessages.send(ConsumerCommand::Exit);
662         mConsumerInterface.destroy(mCurrentConsumer);
663     }
664 
665     sGlobals()->freeBuffer(mBufferAllocation);
666     sGlobals()->freeRingStorage(mRingAllocation);
667     sGlobals()->freeRingAndBuffer(mCombinedAllocation);
668 }
669 
perform(AddressSpaceDevicePingInfo * info)670 void AddressSpaceGraphicsContext::perform(AddressSpaceDevicePingInfo* info) {
671     switch (static_cast<asg_command>(info->metadata)) {
672     case ASG_GET_RING:
673         info->metadata = mRingAllocation.offsetIntoPhys;
674         info->size = mRingAllocation.size;
675         break;
676     case ASG_GET_BUFFER:
677         info->metadata = mBufferAllocation.offsetIntoPhys;
678         info->size = mBufferAllocation.size;
679         break;
680     case ASG_SET_VERSION: {
681         auto guestVersion = (uint32_t)info->size;
682         info->size = (uint64_t)(mVersion > guestVersion ? guestVersion : mVersion);
683         mVersion = (uint32_t)info->size;
684         mCurrentConsumer = mConsumerInterface.create(
685             mHostContext, nullptr /* no load stream */, mConsumerCallbacks, 0, 0,
686             std::nullopt);
687 
688         if (mVirtioGpuInfo) {
689             info->metadata = mCombinedAllocation.hostmemId;
690         }
691         break;
692     }
693     case ASG_NOTIFY_AVAILABLE:
694         mConsumerMessages.trySend(ConsumerCommand::Wakeup);
695         info->metadata = 0;
696         break;
697     case ASG_GET_CONFIG:
698         *mHostContext.ring_config = mSavedConfig;
699         info->metadata = 0;
700         break;
701     }
702 }
703 
onUnavailableRead()704 int AddressSpaceGraphicsContext::onUnavailableRead() {
705     static const uint32_t kMaxUnavailableReads = 8;
706 
707     ++mUnavailableReadCount;
708     ring_buffer_yield();
709 
710     ConsumerCommand cmd;
711 
712     if (mExiting) {
713         mUnavailableReadCount = kMaxUnavailableReads;
714     }
715 
716     if (mUnavailableReadCount >= kMaxUnavailableReads) {
717         mUnavailableReadCount = 0;
718 
719 sleep:
720         *(mHostContext.host_state) = ASG_HOST_STATE_NEED_NOTIFY;
721         mConsumerMessages.receive(&cmd);
722 
723         switch (cmd) {
724             case ConsumerCommand::Wakeup:
725                 *(mHostContext.host_state) = ASG_HOST_STATE_CAN_CONSUME;
726                 break;
727             case ConsumerCommand::Exit:
728                 *(mHostContext.host_state) = ASG_HOST_STATE_EXIT;
729                 return -1;
730             case ConsumerCommand::Sleep:
731                 goto sleep;
732             case ConsumerCommand::PausePreSnapshot:
733                 return -2;
734             case ConsumerCommand::ResumePostSnapshot:
735                 return -3;
736             default:
737                 crashhandler_die(
738                     "AddressSpaceGraphicsContext::onUnavailableRead: "
739                     "Unknown command: 0x%x\n",
740                     (uint32_t)cmd);
741         }
742 
743         return 1;
744     }
745     return 0;
746 }
747 
getDeviceType() const748 AddressSpaceDeviceType AddressSpaceGraphicsContext::getDeviceType() const {
749     return AddressSpaceDeviceType::Graphics;
750 }
751 
preSave() const752 void AddressSpaceGraphicsContext::preSave() const {
753     if (mCurrentConsumer) {
754         mConsumerInterface.preSave(mCurrentConsumer);
755         mConsumerMessages.send(ConsumerCommand::PausePreSnapshot);
756     }
757 }
758 
save(base::Stream * stream) const759 void AddressSpaceGraphicsContext::save(base::Stream* stream) const {
760     if (mVirtioGpuInfo) {
761         const VirtioGpuInfo& info = *mVirtioGpuInfo;
762         stream->putBe32(1);
763         stream->putBe32(info.contextId);
764         stream->putBe32(info.capsetId);
765         if (info.name) {
766             stream->putBe32(1);
767             stream->putString(*info.name);
768         } else {
769             stream->putBe32(0);
770         }
771     } else {
772         stream->putBe32(0);
773     }
774 
775     stream->putBe32(mVersion);
776     stream->putBe32(mExiting);
777     stream->putBe32(mUnavailableReadCount);
778 
779     saveAllocation(stream, mRingAllocation);
780     saveAllocation(stream, mBufferAllocation);
781     saveAllocation(stream, mCombinedAllocation);
782 
783     saveRingConfig(stream, mSavedConfig);
784 
785     if (mCurrentConsumer) {
786         stream->putBe32(1);
787         mConsumerInterface.save(mCurrentConsumer, stream);
788     } else {
789         stream->putBe32(0);
790     }
791 }
792 
postSave() const793 void AddressSpaceGraphicsContext::postSave() const {
794     if (mCurrentConsumer) {
795         mConsumerMessages.send(ConsumerCommand::ResumePostSnapshot);
796         mConsumerInterface.postSave(mCurrentConsumer);
797     }
798 }
799 
load(base::Stream * stream)800 bool AddressSpaceGraphicsContext::load(base::Stream* stream) {
801     const bool hasVirtioGpuInfo = (stream->getBe32() == 1);
802     if (hasVirtioGpuInfo) {
803         VirtioGpuInfo& info = mVirtioGpuInfo.emplace();
804         info.contextId = stream->getBe32();
805         info.capsetId = stream->getBe32();
806         const bool hasName = (stream->getBe32() == 1);
807         if (hasName) {
808             info.name = stream->getString();
809         }
810     }
811 
812     mVersion = stream->getBe32();
813     mExiting = stream->getBe32();
814     mUnavailableReadCount = stream->getBe32();
815 
816     loadAllocation(stream, mRingAllocation);
817     loadAllocation(stream, mBufferAllocation);
818     loadAllocation(stream, mCombinedAllocation);
819 
820     if (mVirtioGpuInfo) {
821         sGlobals()->fillAllocFromLoad(mCombinedAllocation, AllocType::AllocTypeCombined);
822         mRingAllocation = sGlobals()->allocRingViewIntoCombined(mCombinedAllocation);
823         mBufferAllocation = sGlobals()->allocBufferViewIntoCombined(mCombinedAllocation);
824     } else {
825         sGlobals()->fillAllocFromLoad(mRingAllocation, AllocType::AllocTypeRing);
826         sGlobals()->fillAllocFromLoad(mBufferAllocation, AllocType::AllocTypeBuffer);
827     }
828 
829     mHostContext = asg_context_create(
830         mRingAllocation.buffer,
831         mBufferAllocation.buffer,
832         sGlobals()->perContextBufferSize());
833     mHostContext.ring_config->buffer_size =
834         sGlobals()->perContextBufferSize();
835     mHostContext.ring_config->flush_interval =
836         aemu_get_android_hw()->hw_gltransport_asg_writeStepSize;
837 
838     // In load, the live ring config state is in shared host/guest ram.
839     //
840     // mHostContext.ring_config->host_consumed_pos = 0;
841     // mHostContext.ring_config->transfer_mode = 1;
842     // mHostContext.ring_config->transfer_size = 0;
843     // mHostContext.ring_config->in_error = 0;
844 
845     loadRingConfig(stream, mSavedConfig);
846 
847     const bool hasConsumer = stream->getBe32() == 1;
848     if (hasConsumer) {
849         mCurrentConsumer =
850             mConsumerInterface.create(mHostContext, stream, mConsumerCallbacks,
851                                       mVirtioGpuInfo ? mVirtioGpuInfo->contextId : 0,
852                                       mVirtioGpuInfo ? mVirtioGpuInfo->capsetId : 0,
853                                       mVirtioGpuInfo ? mVirtioGpuInfo->name : std::nullopt);
854         mConsumerInterface.postLoad(mCurrentConsumer);
855     }
856 
857     return true;
858 }
859 
globalStatePreSave()860 void AddressSpaceGraphicsContext::globalStatePreSave() {
861     sGlobals()->preSave();
862 }
863 
globalStateSave(base::Stream * stream)864 void AddressSpaceGraphicsContext::globalStateSave(base::Stream* stream) {
865     sGlobals()->save(stream);
866 }
867 
globalStatePostSave()868 void AddressSpaceGraphicsContext::globalStatePostSave() {
869     sGlobals()->postSave();
870 }
871 
globalStateLoad(base::Stream * stream,const std::optional<AddressSpaceDeviceLoadResources> & resources)872 bool AddressSpaceGraphicsContext::globalStateLoad(
873     base::Stream* stream, const std::optional<AddressSpaceDeviceLoadResources>& resources) {
874     return sGlobals()->load(stream, resources);
875 }
876 
saveRingConfig(base::Stream * stream,const struct asg_ring_config & config) const877 void AddressSpaceGraphicsContext::saveRingConfig(base::Stream* stream, const struct asg_ring_config& config) const {
878     stream->putBe32(config.buffer_size);
879     stream->putBe32(config.flush_interval);
880     stream->putBe32(config.host_consumed_pos);
881     stream->putBe32(config.guest_write_pos);
882     stream->putBe32(config.transfer_mode);
883     stream->putBe32(config.transfer_size);
884     stream->putBe32(config.in_error);
885 }
886 
saveAllocation(base::Stream * stream,const Allocation & alloc) const887 void AddressSpaceGraphicsContext::saveAllocation(base::Stream* stream, const Allocation& alloc) const {
888     stream->putBe64(alloc.blockIndex);
889     stream->putBe64(alloc.offsetIntoPhys);
890     stream->putBe64(alloc.size);
891     stream->putBe32(alloc.isView);
892 }
893 
loadRingConfig(base::Stream * stream,struct asg_ring_config & config)894 void AddressSpaceGraphicsContext::loadRingConfig(base::Stream* stream, struct asg_ring_config& config) {
895     config.buffer_size = stream->getBe32();
896     config.flush_interval = stream->getBe32();
897     config.host_consumed_pos = stream->getBe32();
898     config.guest_write_pos = stream->getBe32();
899     config.transfer_mode = stream->getBe32();
900     config.transfer_size = stream->getBe32();
901     config.in_error = stream->getBe32();
902 }
903 
loadAllocation(base::Stream * stream,Allocation & alloc)904 void AddressSpaceGraphicsContext::loadAllocation(base::Stream* stream, Allocation& alloc) {
905     alloc.blockIndex = stream->getBe64();
906     alloc.offsetIntoPhys = stream->getBe64();
907     alloc.size = stream->getBe64();
908     alloc.isView = stream->getBe32();
909 }
910 
911 }  // namespace asg
912 }  // namespace emulation
913 }  // namespace android
914