1 // Copyright 2019 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "host-common/address_space_graphics.h"
16
17 #include "host-common/address_space_device.hpp"
18 #include "host-common/address_space_device.h"
19 #include "host-common/vm_operations.h"
20 #include "host-common/crash_reporter.h"
21 #include "host-common/globals.h"
22 #include "base/AlignedBuf.h"
23 #include "base/SubAllocator.h"
24 #include "base/Lock.h"
25
26 #include <memory>
27
28 #define ASGFX_DEBUG 0
29
30 #if ASGFX_DEBUG
31 #define ASGFX_LOG(fmt,...) printf("%s:%d " fmt "\n", __func__, __LINE__, ##__VA_ARGS__);
32 #else
33 #define ASGFX_LOG(fmt,...)
34 #endif
35
36 using android::base::AutoLock;
37 using android::base::Lock;
38 using android::base::SubAllocator;
39
40 namespace android {
41 namespace emulation {
42 namespace asg {
43
44 struct Block {
45 char* buffer = nullptr;
46 SubAllocator* subAlloc = nullptr;
47 uint64_t offsetIntoPhys = 0; // guest claimShared/mmap uses this
48 // size: implicitly ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE
49 bool isEmpty = true;
50 bool dedicated = false;
51 size_t dedicatedSize = 0;
52 bool usesVirtioGpuHostmem = false;
53 uint64_t hostmemId = 0;
54 };
55
56 class Globals {
57 public:
Globals()58 Globals() :
59 mPerContextBufferSize(
60 android_hw->hw_gltransport_asg_writeBufferSize) { }
61
~Globals()62 ~Globals() { clear(); }
63
initialize(const address_space_device_control_ops * ops)64 void initialize(const address_space_device_control_ops* ops) {
65 AutoLock lock(mLock);
66
67 if (mInitialized) return;
68
69 mControlOps = ops;
70 mInitialized = true;
71 }
72
setConsumer(ConsumerInterface iface)73 void setConsumer(ConsumerInterface iface) {
74 mConsumerInterface = iface;
75 }
76
getConsumerInterface()77 ConsumerInterface getConsumerInterface() {
78 if (!mConsumerInterface.create ||
79 !mConsumerInterface.destroy ||
80 !mConsumerInterface.preSave ||
81 !mConsumerInterface.globalPreSave ||
82 !mConsumerInterface.save ||
83 !mConsumerInterface.globalPostSave ||
84 !mConsumerInterface.postSave) {
85 crashhandler_die("Consumer interface has not been set\n");
86 }
87 return mConsumerInterface;
88 }
89
controlOps()90 const address_space_device_control_ops* controlOps() {
91 return mControlOps;
92 }
93
clear()94 void clear() {
95 for (auto& block: mRingBlocks) {
96 if (block.isEmpty) continue;
97 destroyBlockLocked(block);
98 }
99
100 for (auto& block: mBufferBlocks) {
101 if (block.isEmpty) continue;
102 destroyBlockLocked(block);
103 }
104
105 for (auto& block: mCombinedBlocks) {
106 if (block.isEmpty) continue;
107 destroyBlockLocked(block);
108 }
109
110 mRingBlocks.clear();
111 mBufferBlocks.clear();
112 mCombinedBlocks.clear();
113 }
114
perContextBufferSize() const115 uint64_t perContextBufferSize() const {
116 return mPerContextBufferSize;
117 }
118
newAllocation(uint64_t wantedSize,std::vector<Block> & existingBlocks,bool dedicated=false,bool usesVirtioGpuHostmem=false)119 Allocation newAllocation(
120 uint64_t wantedSize,
121 std::vector<Block>& existingBlocks,
122 bool dedicated = false,
123 bool usesVirtioGpuHostmem = false) {
124
125 AutoLock lock(mLock);
126
127 if (wantedSize > ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE) {
128 crashhandler_die(
129 "wanted size 0x%llx which is "
130 "greater than block size 0x%llx",
131 (unsigned long long)wantedSize,
132 (unsigned long long)ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE);
133 }
134
135 size_t index = 0;
136
137 Allocation res;
138
139 for (auto& block : existingBlocks) {
140
141 uint64_t hostmemId;
142 if (block.isEmpty) {
143 fillBlockLocked(
144 block,
145 dedicated,
146 wantedSize,
147 usesVirtioGpuHostmem,
148 &hostmemId);
149 }
150
151 auto buf = block.subAlloc->alloc(wantedSize);
152
153 if (buf) {
154 res.buffer = (char*)buf;
155 res.blockIndex = index;
156 res.offsetIntoPhys =
157 block.offsetIntoPhys +
158 block.subAlloc->getOffset(buf);
159 res.size = wantedSize;
160 res.dedicated = dedicated;
161 res.hostmemId = hostmemId;
162 return res;
163 } else {
164 // block full
165 }
166
167 ++index;
168 }
169
170 uint64_t hostmemId;
171 Block newBlock;
172 fillBlockLocked(
173 newBlock,
174 dedicated,
175 wantedSize,
176 usesVirtioGpuHostmem,
177 &hostmemId);
178
179 auto buf = newBlock.subAlloc->alloc(wantedSize);
180
181 if (!buf) {
182 crashhandler_die(
183 "failed to allocate size 0x%llx "
184 "(no free slots or out of host memory)",
185 (unsigned long long)wantedSize);
186 }
187
188 existingBlocks.push_back(newBlock);
189
190 res.buffer = (char*)buf;
191 res.blockIndex = index;
192 res.offsetIntoPhys =
193 newBlock.offsetIntoPhys +
194 newBlock.subAlloc->getOffset(buf);
195 res.size = wantedSize;
196 res.dedicated = dedicated;
197 res.hostmemId = hostmemId;
198
199 return res;
200 }
201
deleteAllocation(const Allocation & alloc,std::vector<Block> & existingBlocks)202 void deleteAllocation(const Allocation& alloc, std::vector<Block>& existingBlocks) {
203 if (!alloc.buffer) return;
204
205 AutoLock lock(mLock);
206
207 if (existingBlocks.size() <= alloc.blockIndex) {
208 crashhandler_die(
209 "should be a block at index %zu "
210 "but it is not found", alloc.blockIndex);
211 }
212
213 auto& block = existingBlocks[alloc.blockIndex];
214
215 if (block.dedicated) {
216 destroyBlockLocked(block);
217 return;
218 }
219
220 if (!block.subAlloc->free(alloc.buffer)) {
221 crashhandler_die(
222 "failed to free %p (block start: %p)",
223 alloc.buffer,
224 block.buffer);
225 }
226
227 if (shouldDestryBlockLocked(block)) {
228 destroyBlockLocked(block);
229 }
230 }
231
allocRingStorage()232 Allocation allocRingStorage() {
233 return newAllocation(
234 sizeof(struct asg_ring_storage), mRingBlocks);
235 }
236
freeRingStorage(const Allocation & alloc)237 void freeRingStorage(const Allocation& alloc) {
238 if (alloc.isView) return;
239 deleteAllocation(alloc, mRingBlocks);
240 }
241
allocBuffer()242 Allocation allocBuffer() {
243 return newAllocation(
244 mPerContextBufferSize, mBufferBlocks);
245 }
246
freeBuffer(const Allocation & alloc)247 void freeBuffer(const Allocation& alloc) {
248 if (alloc.isView) return;
249 deleteAllocation(alloc, mBufferBlocks);
250 }
251
allocRingAndBufferStorageDedicated()252 Allocation allocRingAndBufferStorageDedicated() {
253 return newAllocation(
254 sizeof(struct asg_ring_storage) + mPerContextBufferSize,
255 mCombinedBlocks,
256 true /* dedicated */,
257 true /* virtio-gpu hostmem id mode */);
258 }
259
allocRingViewIntoCombined(const Allocation & alloc)260 Allocation allocRingViewIntoCombined(const Allocation& alloc) {
261 Allocation res = alloc;
262 res.buffer = alloc.buffer;
263 res.size = sizeof(struct asg_ring_storage);
264 res.isView = true;
265 return res;
266 }
267
allocBufferViewIntoCombined(const Allocation & alloc)268 Allocation allocBufferViewIntoCombined(const Allocation& alloc) {
269 Allocation res = alloc;
270 res.buffer = alloc.buffer + sizeof(asg_ring_storage);
271 res.size = mPerContextBufferSize;
272 res.isView = true;
273 return res;
274 }
275
freeRingAndBuffer(const Allocation & alloc)276 void freeRingAndBuffer(const Allocation& alloc) {
277 deleteAllocation(alloc, mCombinedBlocks);
278 }
279
preSave()280 void preSave() {
281 // mConsumerInterface.globalPreSave();
282 }
283
save(base::Stream * stream)284 void save(base::Stream* stream) {
285 stream->putBe64(mRingBlocks.size());
286 stream->putBe64(mBufferBlocks.size());
287 stream->putBe64(mCombinedBlocks.size());
288
289 for (const auto& block: mRingBlocks) {
290 saveBlockLocked(stream, block);
291 }
292
293 for (const auto& block: mBufferBlocks) {
294 saveBlockLocked(stream, block);
295 }
296
297 for (const auto& block: mCombinedBlocks) {
298 saveBlockLocked(stream, block);
299 }
300 }
301
postSave()302 void postSave() {
303 // mConsumerInterface.globalPostSave();
304 }
305
load(base::Stream * stream)306 bool load(base::Stream* stream) {
307 clear();
308 mConsumerInterface.globalPreLoad();
309
310 uint64_t ringBlockCount = stream->getBe64();
311 uint64_t bufferBlockCount = stream->getBe64();
312 uint64_t combinedBlockCount = stream->getBe64();
313
314 mRingBlocks.resize(ringBlockCount);
315 mBufferBlocks.resize(bufferBlockCount);
316 mCombinedBlocks.resize(combinedBlockCount);
317
318 for (auto& block: mRingBlocks) {
319 loadBlockLocked(stream, block);
320 }
321
322 for (auto& block: mBufferBlocks) {
323 loadBlockLocked(stream, block);
324 }
325
326 for (auto& block: mCombinedBlocks) {
327 loadBlockLocked(stream, block);
328 }
329
330 return true;
331 }
332
333 // Assumes that blocks have been loaded,
334 // and that alloc has its blockIndex/offsetIntoPhys fields filled already
fillAllocFromLoad(Allocation & alloc,AddressSpaceGraphicsContext::AllocType allocType)335 void fillAllocFromLoad(Allocation& alloc, AddressSpaceGraphicsContext::AllocType allocType) {
336 switch (allocType) {
337 case AddressSpaceGraphicsContext::AllocType::AllocTypeRing:
338 if (mRingBlocks.size() <= alloc.blockIndex) return;
339 fillAllocFromLoad(mRingBlocks[alloc.blockIndex], alloc);
340 break;
341 case AddressSpaceGraphicsContext::AllocType::AllocTypeBuffer:
342 if (mBufferBlocks.size() <= alloc.blockIndex) return;
343 fillAllocFromLoad(mBufferBlocks[alloc.blockIndex], alloc);
344 break;
345 case AddressSpaceGraphicsContext::AllocType::AllocTypeCombined:
346 if (mCombinedBlocks.size() <= alloc.blockIndex) return;
347 fillAllocFromLoad(mCombinedBlocks[alloc.blockIndex], alloc);
348 break;
349 default:
350 abort();
351 break;
352 }
353 }
354
355 private:
356
saveBlockLocked(base::Stream * stream,const Block & block)357 void saveBlockLocked(
358 base::Stream* stream,
359 const Block& block) {
360
361 if (block.isEmpty) {
362 stream->putBe32(0);
363 return;
364 } else {
365 stream->putBe32(1);
366 }
367
368 stream->putBe64(block.offsetIntoPhys);
369 stream->putBe32(block.dedicated);
370 stream->putBe64(block.dedicatedSize);
371 stream->putBe32(block.usesVirtioGpuHostmem);
372 stream->putBe64(block.hostmemId);
373
374 block.subAlloc->save(stream);
375
376 stream->putBe64(ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE);
377 stream->write(block.buffer, ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE);
378 }
379
loadBlockLocked(base::Stream * stream,Block & block)380 void loadBlockLocked(
381 base::Stream* stream,
382 Block& block) {
383
384 uint32_t filled = stream->getBe32();
385
386 if (!filled) {
387 block.isEmpty = true;
388 return;
389 } else {
390 block.isEmpty = false;
391 }
392
393 block.offsetIntoPhys = stream->getBe64();
394 block.dedicated = stream->getBe32();
395 block.dedicatedSize = stream->getBe64();
396 block.usesVirtioGpuHostmem = stream->getBe32();
397 block.hostmemId = stream->getBe64();
398
399 fillBlockLocked(
400 block,
401 block.dedicated,
402 block.dedicatedSize,
403 block.usesVirtioGpuHostmem,
404 &block.hostmemId,
405 true /* register fixed */,
406 true /* from snapshot load */);
407
408 block.subAlloc->load(stream);
409
410 stream->getBe64();
411 stream->read(block.buffer, ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE);
412 }
413
fillAllocFromLoad(const Block & block,Allocation & alloc)414 void fillAllocFromLoad(const Block& block, Allocation& alloc) {
415 alloc.buffer = block.buffer + (alloc.offsetIntoPhys - block.offsetIntoPhys);
416 alloc.dedicated = block.dedicated;
417 alloc.hostmemId = block.hostmemId;
418 }
419
fillBlockLocked(Block & block,bool dedicated=false,size_t dedicatedSize=0,bool usesVirtioGpuHostmem=false,uint64_t * hostmemIdOut=nullptr,bool hostmemRegisterFixed=false,bool fromLoad=false)420 void fillBlockLocked(
421 Block& block,
422 bool dedicated = false,
423 size_t dedicatedSize = 0,
424 bool usesVirtioGpuHostmem = false,
425 uint64_t* hostmemIdOut = nullptr,
426 bool hostmemRegisterFixed = false,
427 bool fromLoad = false) {
428
429 if (dedicated) {
430 if (usesVirtioGpuHostmem) {
431 void* buf =
432 aligned_buf_alloc(
433 ADDRESS_SPACE_GRAPHICS_PAGE_SIZE,
434 dedicatedSize);
435
436 uint64_t hostmemId =
437 mControlOps->hostmem_register(
438 (uint64_t)(uintptr_t)buf,
439 dedicatedSize,
440 hostmemRegisterFixed,
441 hostmemIdOut ? *hostmemIdOut : 0);
442
443 if (hostmemIdOut) *hostmemIdOut = hostmemId;
444
445 block.buffer = (char*)buf;
446 block.subAlloc =
447 new SubAllocator(
448 buf, dedicatedSize,
449 ADDRESS_SPACE_GRAPHICS_PAGE_SIZE);
450 block.offsetIntoPhys = 0;
451
452 block.isEmpty = false;
453 block.usesVirtioGpuHostmem = usesVirtioGpuHostmem;
454 block.hostmemId = hostmemId;
455 block.dedicated = true;
456 block.dedicatedSize = dedicatedSize;
457
458 } else {
459 crashhandler_die(
460 "Cannot use dedicated allocation without virtio-gpu hostmem id");
461 }
462 } else {
463 if (usesVirtioGpuHostmem) {
464 crashhandler_die(
465 "Only dedicated allocation allowed in virtio-gpu hostmem id path");
466 } else {
467 uint64_t offsetIntoPhys;
468 int allocRes = 0;
469
470 if (fromLoad) {
471 offsetIntoPhys = block.offsetIntoPhys;
472 allocRes = get_address_space_device_hw_funcs()->
473 allocSharedHostRegionFixedLocked(
474 ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE, offsetIntoPhys);
475 if (allocRes) {
476 // Disregard alloc failures for now. This is because when it fails,
477 // we can assume the correct allocation already exists there (tested)
478 }
479 } else {
480 int allocRes = get_address_space_device_hw_funcs()->
481 allocSharedHostRegionLocked(
482 ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE, &offsetIntoPhys);
483
484 if (allocRes) {
485 crashhandler_die(
486 "Failed to allocate physical address graphics backing memory.");
487 }
488 }
489
490 void* buf =
491 aligned_buf_alloc(
492 ADDRESS_SPACE_GRAPHICS_PAGE_SIZE,
493 ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE);
494
495 mControlOps->add_memory_mapping(
496 get_address_space_device_hw_funcs()->getPhysAddrStartLocked() +
497 offsetIntoPhys, buf,
498 ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE);
499
500 block.buffer = (char*)buf;
501 block.subAlloc =
502 new SubAllocator(
503 buf, ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE,
504 ADDRESS_SPACE_GRAPHICS_PAGE_SIZE);
505 block.offsetIntoPhys = offsetIntoPhys;
506
507 block.isEmpty = false;
508 }
509 }
510 }
511
destroyBlockLocked(Block & block)512 void destroyBlockLocked(Block& block) {
513
514 if (block.usesVirtioGpuHostmem) {
515 mControlOps->hostmem_unregister(block.hostmemId);
516 } else {
517 mControlOps->remove_memory_mapping(
518 get_address_space_device_hw_funcs()->getPhysAddrStartLocked() +
519 block.offsetIntoPhys,
520 block.buffer,
521 ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE);
522
523 get_address_space_device_hw_funcs()->freeSharedHostRegionLocked(
524 block.offsetIntoPhys);
525 }
526
527 delete block.subAlloc;
528
529 aligned_buf_free(block.buffer);
530
531 block.isEmpty = true;
532 }
533
shouldDestryBlockLocked(const Block & block) const534 bool shouldDestryBlockLocked(const Block& block) const {
535 return block.subAlloc->empty();
536 }
537
538 Lock mLock;
539 uint64_t mPerContextBufferSize;
540 bool mInitialized = false;
541 const address_space_device_control_ops* mControlOps = 0;
542 ConsumerInterface mConsumerInterface;
543 std::vector<Block> mRingBlocks;
544 std::vector<Block> mBufferBlocks;
545 std::vector<Block> mCombinedBlocks;
546 };
547
sGlobals()548 static Globals* sGlobals() {
549 static Globals* g = new Globals;
550 return g;
551 }
552
553 // static
init(const address_space_device_control_ops * ops)554 void AddressSpaceGraphicsContext::init(const address_space_device_control_ops* ops) {
555 sGlobals()->initialize(ops);
556 }
557
558 // static
clear()559 void AddressSpaceGraphicsContext::clear() {
560 sGlobals()->clear();
561 }
562
563 // static
setConsumer(ConsumerInterface iface)564 void AddressSpaceGraphicsContext::setConsumer(
565 ConsumerInterface iface) {
566 sGlobals()->setConsumer(iface);
567 }
568
AddressSpaceGraphicsContext(bool isVirtio,bool fromSnapshot)569 AddressSpaceGraphicsContext::AddressSpaceGraphicsContext(bool isVirtio, bool fromSnapshot) :
570 mConsumerCallbacks((ConsumerCallbacks){
571 [this] { return onUnavailableRead(); },
572 [](uint64_t physAddr) {
573 return (char*)sGlobals()->controlOps()->get_host_ptr(physAddr);
574 },
575 }),
576 mConsumerInterface(sGlobals()->getConsumerInterface()),
577 mIsVirtio(isVirtio) {
578
579 if (fromSnapshot) {
580 // Use load() instead to initialize
581 return;
582 }
583
584 if (mIsVirtio) {
585 mCombinedAllocation = sGlobals()->allocRingAndBufferStorageDedicated();
586 mRingAllocation = sGlobals()->allocRingViewIntoCombined(mCombinedAllocation);
587 mBufferAllocation = sGlobals()->allocBufferViewIntoCombined(mCombinedAllocation);
588 } else {
589 mRingAllocation = sGlobals()->allocRingStorage();
590 mBufferAllocation = sGlobals()->allocBuffer();
591 }
592
593 if (!mRingAllocation.buffer) {
594 crashhandler_die(
595 "Failed to allocate ring for ASG context");
596 }
597
598 if (!mBufferAllocation.buffer) {
599 crashhandler_die(
600 "Failed to allocate buffer for ASG context");
601 }
602
603 mHostContext = asg_context_create(
604 mRingAllocation.buffer,
605 mBufferAllocation.buffer,
606 sGlobals()->perContextBufferSize());
607 mHostContext.ring_config->buffer_size =
608 sGlobals()->perContextBufferSize();
609 mHostContext.ring_config->flush_interval =
610 android_hw->hw_gltransport_asg_writeStepSize;
611 mHostContext.ring_config->host_consumed_pos = 0;
612 mHostContext.ring_config->guest_write_pos = 0;
613 mHostContext.ring_config->transfer_mode = 1;
614 mHostContext.ring_config->transfer_size = 0;
615 mHostContext.ring_config->in_error = 0;
616
617 mSavedConfig = *mHostContext.ring_config;
618 }
619
~AddressSpaceGraphicsContext()620 AddressSpaceGraphicsContext::~AddressSpaceGraphicsContext() {
621 if (mCurrentConsumer) {
622 mExiting = 1;
623 *(mHostContext.host_state) = ASG_HOST_STATE_EXIT;
624 mConsumerMessages.send(ConsumerCommand::Exit);
625 mConsumerInterface.destroy(mCurrentConsumer);
626 }
627
628 sGlobals()->freeBuffer(mBufferAllocation);
629 sGlobals()->freeRingStorage(mRingAllocation);
630 sGlobals()->freeRingAndBuffer(mCombinedAllocation);
631 }
632
perform(AddressSpaceDevicePingInfo * info)633 void AddressSpaceGraphicsContext::perform(AddressSpaceDevicePingInfo* info) {
634 switch (static_cast<asg_command>(info->metadata)) {
635 case ASG_GET_RING:
636 info->metadata = mRingAllocation.offsetIntoPhys;
637 info->size = mRingAllocation.size;
638 break;
639 case ASG_GET_BUFFER:
640 info->metadata = mBufferAllocation.offsetIntoPhys;
641 info->size = mBufferAllocation.size;
642 break;
643 case ASG_SET_VERSION: {
644 auto guestVersion = (uint32_t)info->size;
645 info->size = (uint64_t)(mVersion > guestVersion ? guestVersion : mVersion);
646 mVersion = (uint32_t)info->size;
647 mCurrentConsumer = mConsumerInterface.create(
648 mHostContext, nullptr /* no load stream */, mConsumerCallbacks);
649
650 if (mIsVirtio) {
651 info->metadata = mCombinedAllocation.hostmemId;
652 }
653 break;
654 }
655 case ASG_NOTIFY_AVAILABLE:
656 mConsumerMessages.trySend(ConsumerCommand::Wakeup);
657 info->metadata = 0;
658 break;
659 case ASG_GET_CONFIG:
660 *mHostContext.ring_config = mSavedConfig;
661 info->metadata = 0;
662 break;
663 }
664 }
665
onUnavailableRead()666 int AddressSpaceGraphicsContext::onUnavailableRead() {
667 static const uint32_t kMaxUnavailableReads = 8;
668
669 ++mUnavailableReadCount;
670 ring_buffer_yield();
671
672 ConsumerCommand cmd;
673
674 if (mExiting) {
675 mUnavailableReadCount = kMaxUnavailableReads;
676 }
677
678 if (mUnavailableReadCount >= kMaxUnavailableReads) {
679 mUnavailableReadCount = 0;
680
681 sleep:
682 *(mHostContext.host_state) = ASG_HOST_STATE_NEED_NOTIFY;
683 mConsumerMessages.receive(&cmd);
684
685 switch (cmd) {
686 case ConsumerCommand::Wakeup:
687 *(mHostContext.host_state) = ASG_HOST_STATE_CAN_CONSUME;
688 break;
689 case ConsumerCommand::Exit:
690 *(mHostContext.host_state) = ASG_HOST_STATE_EXIT;
691 return -1;
692 case ConsumerCommand::Sleep:
693 goto sleep;
694 case ConsumerCommand::PausePreSnapshot:
695 return -2;
696 case ConsumerCommand::ResumePostSnapshot:
697 return -3;
698 default:
699 crashhandler_die(
700 "AddressSpaceGraphicsContext::onUnavailableRead: "
701 "Unknown command: 0x%x\n",
702 (uint32_t)cmd);
703 }
704
705 return 1;
706 }
707 return 0;
708 }
709
getDeviceType() const710 AddressSpaceDeviceType AddressSpaceGraphicsContext::getDeviceType() const {
711 return AddressSpaceDeviceType::Graphics;
712 }
713
preSave() const714 void AddressSpaceGraphicsContext::preSave() const {
715 if (mCurrentConsumer) {
716 mConsumerInterface.preSave(mCurrentConsumer);
717 mConsumerMessages.send(ConsumerCommand::PausePreSnapshot);
718 }
719 }
720
save(base::Stream * stream) const721 void AddressSpaceGraphicsContext::save(base::Stream* stream) const {
722 stream->putBe32(mIsVirtio);
723 stream->putBe32(mVersion);
724 stream->putBe32(mExiting);
725 stream->putBe32(mUnavailableReadCount);
726
727 saveAllocation(stream, mRingAllocation);
728 saveAllocation(stream, mBufferAllocation);
729 saveAllocation(stream, mCombinedAllocation);
730
731 saveRingConfig(stream, mSavedConfig);
732
733 if (mCurrentConsumer) {
734 stream->putBe32(1);
735 mConsumerInterface.save(mCurrentConsumer, stream);
736 } else {
737 stream->putBe32(0);
738 }
739 }
740
postSave() const741 void AddressSpaceGraphicsContext::postSave() const {
742 if (mCurrentConsumer) {
743 mConsumerMessages.send(ConsumerCommand::ResumePostSnapshot);
744 mConsumerInterface.postSave(mCurrentConsumer);
745 }
746 }
747
load(base::Stream * stream)748 bool AddressSpaceGraphicsContext::load(base::Stream* stream) {
749 mIsVirtio = stream->getBe32();
750 mVersion = stream->getBe32();
751 mExiting = stream->getBe32();
752 mUnavailableReadCount = stream->getBe32();
753
754 loadAllocation(stream, mRingAllocation, AllocType::AllocTypeRing);
755 loadAllocation(stream, mBufferAllocation, AllocType::AllocTypeBuffer);
756 loadAllocation(stream, mCombinedAllocation, AllocType::AllocTypeCombined);
757
758 mHostContext = asg_context_create(
759 mRingAllocation.buffer,
760 mBufferAllocation.buffer,
761 sGlobals()->perContextBufferSize());
762 mHostContext.ring_config->buffer_size =
763 sGlobals()->perContextBufferSize();
764 mHostContext.ring_config->flush_interval =
765 android_hw->hw_gltransport_asg_writeStepSize;
766
767 // In load, the live ring config state is in shared host/guest ram.
768 //
769 // mHostContext.ring_config->host_consumed_pos = 0;
770 // mHostContext.ring_config->transfer_mode = 1;
771 // mHostContext.ring_config->transfer_size = 0;
772 // mHostContext.ring_config->in_error = 0;
773
774 loadRingConfig(stream, mSavedConfig);
775
776 uint32_t consumerExists = stream->getBe32();
777
778 if (consumerExists) {
779 mCurrentConsumer = mConsumerInterface.create(
780 mHostContext, stream, mConsumerCallbacks);
781 mConsumerInterface.postLoad(mCurrentConsumer);
782 }
783
784 return true;
785 }
786
globalStatePreSave()787 void AddressSpaceGraphicsContext::globalStatePreSave() {
788 sGlobals()->preSave();
789 }
790
globalStateSave(base::Stream * stream)791 void AddressSpaceGraphicsContext::globalStateSave(base::Stream* stream) {
792 sGlobals()->save(stream);
793 }
794
globalStatePostSave()795 void AddressSpaceGraphicsContext::globalStatePostSave() {
796 sGlobals()->postSave();
797 }
798
globalStateLoad(base::Stream * stream)799 bool AddressSpaceGraphicsContext::globalStateLoad(base::Stream* stream) {
800 return sGlobals()->load(stream);
801 }
802
saveRingConfig(base::Stream * stream,const struct asg_ring_config & config) const803 void AddressSpaceGraphicsContext::saveRingConfig(base::Stream* stream, const struct asg_ring_config& config) const {
804 stream->putBe32(config.buffer_size);
805 stream->putBe32(config.flush_interval);
806 stream->putBe32(config.host_consumed_pos);
807 stream->putBe32(config.guest_write_pos);
808 stream->putBe32(config.transfer_mode);
809 stream->putBe32(config.transfer_size);
810 stream->putBe32(config.in_error);
811 }
812
saveAllocation(base::Stream * stream,const Allocation & alloc) const813 void AddressSpaceGraphicsContext::saveAllocation(base::Stream* stream, const Allocation& alloc) const {
814 stream->putBe64(alloc.blockIndex);
815 stream->putBe64(alloc.offsetIntoPhys);
816 stream->putBe64(alloc.size);
817 stream->putBe32(alloc.isView);
818 }
819
loadRingConfig(base::Stream * stream,struct asg_ring_config & config)820 void AddressSpaceGraphicsContext::loadRingConfig(base::Stream* stream, struct asg_ring_config& config) {
821 config.buffer_size = stream->getBe32();
822 config.flush_interval = stream->getBe32();
823 config.host_consumed_pos = stream->getBe32();
824 config.guest_write_pos = stream->getBe32();
825 config.transfer_mode = stream->getBe32();
826 config.transfer_size = stream->getBe32();
827 config.in_error = stream->getBe32();
828 }
829
loadAllocation(base::Stream * stream,Allocation & alloc,AddressSpaceGraphicsContext::AllocType type)830 void AddressSpaceGraphicsContext::loadAllocation(base::Stream* stream, Allocation& alloc, AddressSpaceGraphicsContext::AllocType type) {
831 alloc.blockIndex = stream->getBe64();
832 alloc.offsetIntoPhys = stream->getBe64();
833 alloc.size = stream->getBe64();
834 alloc.isView = stream->getBe32();
835
836 sGlobals()->fillAllocFromLoad(alloc, type);
837 }
838
839 } // namespace asg
840 } // namespace emulation
841 } // namespace android
842