1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include "AddressSpaceStream.h"
17
18 #include "VirtGpu.h"
19 #include "aemu/base/Tracing.h"
20 #include "virtgpu_gfxstream_protocol.h"
21
22 #if PLATFORM_SDK_VERSION < 26
23 #include <cutils/log.h>
24 #else
25 #include <log/log.h>
26 #endif
27 #include <cutils/properties.h>
28 #include <errno.h>
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <unistd.h>
33
34 static const size_t kReadSize = 512 * 1024;
35 static const size_t kWriteOffset = kReadSize;
36
createAddressSpaceStream(size_t ignored_bufSize,HealthMonitor<> * healthMonitor)37 AddressSpaceStream* createAddressSpaceStream(size_t ignored_bufSize,
38 HealthMonitor<>* healthMonitor) {
39 // Ignore incoming ignored_bufSize
40 (void)ignored_bufSize;
41
42 auto handle = goldfish_address_space_open();
43 address_space_handle_t child_device_handle;
44
45 if (!goldfish_address_space_set_subdevice_type(handle, GoldfishAddressSpaceSubdeviceType::Graphics, &child_device_handle)) {
46 ALOGE("AddressSpaceStream::create failed (initial device create)\n");
47 goldfish_address_space_close(handle);
48 return nullptr;
49 }
50
51 struct address_space_ping request;
52 request.metadata = ASG_GET_RING;
53 if (!goldfish_address_space_ping(child_device_handle, &request)) {
54 ALOGE("AddressSpaceStream::create failed (get ring)\n");
55 goldfish_address_space_close(child_device_handle);
56 return nullptr;
57 }
58
59 uint64_t ringOffset = request.metadata;
60
61 request.metadata = ASG_GET_BUFFER;
62 if (!goldfish_address_space_ping(child_device_handle, &request)) {
63 ALOGE("AddressSpaceStream::create failed (get buffer)\n");
64 goldfish_address_space_close(child_device_handle);
65 return nullptr;
66 }
67
68 uint64_t bufferOffset = request.metadata;
69 uint64_t bufferSize = request.size;
70
71 if (!goldfish_address_space_claim_shared(
72 child_device_handle, ringOffset, sizeof(asg_ring_storage))) {
73 ALOGE("AddressSpaceStream::create failed (claim ring storage)\n");
74 goldfish_address_space_close(child_device_handle);
75 return nullptr;
76 }
77
78 if (!goldfish_address_space_claim_shared(
79 child_device_handle, bufferOffset, bufferSize)) {
80 ALOGE("AddressSpaceStream::create failed (claim buffer storage)\n");
81 goldfish_address_space_unclaim_shared(child_device_handle, ringOffset);
82 goldfish_address_space_close(child_device_handle);
83 return nullptr;
84 }
85
86 char* ringPtr = (char*)goldfish_address_space_map(
87 child_device_handle, ringOffset, sizeof(struct asg_ring_storage));
88
89 if (!ringPtr) {
90 ALOGE("AddressSpaceStream::create failed (map ring storage)\n");
91 goldfish_address_space_unclaim_shared(child_device_handle, bufferOffset);
92 goldfish_address_space_unclaim_shared(child_device_handle, ringOffset);
93 goldfish_address_space_close(child_device_handle);
94 return nullptr;
95 }
96
97 char* bufferPtr = (char*)goldfish_address_space_map(
98 child_device_handle, bufferOffset, bufferSize);
99
100 if (!bufferPtr) {
101 ALOGE("AddressSpaceStream::create failed (map buffer storage)\n");
102 goldfish_address_space_unmap(ringPtr, sizeof(struct asg_ring_storage));
103 goldfish_address_space_unclaim_shared(child_device_handle, bufferOffset);
104 goldfish_address_space_unclaim_shared(child_device_handle, ringOffset);
105 goldfish_address_space_close(child_device_handle);
106 return nullptr;
107 }
108
109 struct asg_context context =
110 asg_context_create(
111 ringPtr, bufferPtr, bufferSize);
112
113 request.metadata = ASG_SET_VERSION;
114 request.size = 1; // version 1
115
116 if (!goldfish_address_space_ping(child_device_handle, &request)) {
117 ALOGE("AddressSpaceStream::create failed (get buffer)\n");
118 goldfish_address_space_unmap(bufferPtr, bufferSize);
119 goldfish_address_space_unmap(ringPtr, sizeof(struct asg_ring_storage));
120 goldfish_address_space_unclaim_shared(child_device_handle, bufferOffset);
121 goldfish_address_space_unclaim_shared(child_device_handle, ringOffset);
122 goldfish_address_space_close(child_device_handle);
123 return nullptr;
124 }
125
126 uint32_t version = request.size;
127
128 context.ring_config->transfer_mode = 1;
129 context.ring_config->host_consumed_pos = 0;
130 context.ring_config->guest_write_pos = 0;
131
132 struct address_space_ops ops = {
133 .open = goldfish_address_space_open,
134 .close = goldfish_address_space_close,
135 .claim_shared = goldfish_address_space_claim_shared,
136 .unclaim_shared = goldfish_address_space_unclaim_shared,
137 .map = goldfish_address_space_map,
138 .unmap = goldfish_address_space_unmap,
139 .set_subdevice_type = goldfish_address_space_set_subdevice_type,
140 .ping = goldfish_address_space_ping,
141 };
142
143 AddressSpaceStream* res =
144 new AddressSpaceStream(
145 child_device_handle, version, context,
146 ringOffset, bufferOffset, ops, healthMonitor);
147
148 return res;
149 }
150
virtgpu_address_space_open()151 address_space_handle_t virtgpu_address_space_open() {
152 return (address_space_handle_t)(-EINVAL);
153 }
154
virtgpu_address_space_close(address_space_handle_t fd)155 void virtgpu_address_space_close(address_space_handle_t fd) {
156 // Handle opened by VirtioGpuDevice wrapper
157 }
158
virtgpu_address_space_ping(address_space_handle_t fd,struct address_space_ping * info)159 bool virtgpu_address_space_ping(address_space_handle_t fd, struct address_space_ping* info) {
160 int ret;
161 struct VirtGpuExecBuffer exec = {};
162 VirtGpuDevice& instance = VirtGpuDevice::getInstance();
163 struct gfxstreamContextPing ping = {};
164
165 ping.hdr.opCode = GFXSTREAM_CONTEXT_PING;
166 ping.resourceId = info->resourceId;
167
168 exec.command = static_cast<void*>(&ping);
169 exec.command_size = sizeof(ping);
170
171 ret = instance.execBuffer(exec, nullptr);
172 if (ret)
173 return false;
174
175 return true;
176 }
177
createVirtioGpuAddressSpaceStream(HealthMonitor<> * healthMonitor)178 AddressSpaceStream* createVirtioGpuAddressSpaceStream(HealthMonitor<>* healthMonitor) {
179 VirtGpuBlobPtr pipe, blob;
180 VirtGpuBlobMappingPtr pipeMapping, blobMapping;
181 struct VirtGpuExecBuffer exec = {};
182 struct VirtGpuCreateBlob blobCreate = {};
183 struct gfxstreamContextCreate contextCreate = {};
184
185 char* blobAddr, *bufferPtr;
186 int ret;
187
188 // HACK: constants that are currently used.
189 // Ideal solution would use virtio-gpu capabilities to report both ringSize and bufferSize
190 uint32_t ringSize = 12288;
191 uint32_t bufferSize = 1048576;
192
193 VirtGpuDevice& instance = VirtGpuDevice::getInstance();
194
195 blobCreate.blobId = 0;
196 blobCreate.blobMem = kBlobMemHost3d;
197 blobCreate.flags = kBlobFlagMappable;
198 blobCreate.size = ringSize + bufferSize;
199 blob = instance.createBlob(blobCreate);
200 if (!blob)
201 return nullptr;
202
203 // Context creation command
204 contextCreate.hdr.opCode = GFXSTREAM_CONTEXT_CREATE;
205 contextCreate.resourceId = blob->getResourceHandle();
206
207 exec.command = static_cast<void*>(&contextCreate);
208 exec.command_size = sizeof(contextCreate);
209
210 ret = instance.execBuffer(exec, blob);
211 if (ret)
212 return nullptr;
213
214 // Wait occurs on global timeline -- should we use context specific one?
215 ret = blob->wait();
216 if (ret)
217 return nullptr;
218
219 blobMapping = blob->createMapping();
220 if (!blobMapping)
221 return nullptr;
222
223 blobAddr = reinterpret_cast<char*>(blobMapping->asRawPtr());
224
225 bufferPtr = blobAddr + sizeof(struct asg_ring_storage);
226 struct asg_context context =
227 asg_context_create(blobAddr, bufferPtr, bufferSize);
228
229 context.ring_config->transfer_mode = 1;
230 context.ring_config->host_consumed_pos = 0;
231 context.ring_config->guest_write_pos = 0;
232
233 struct address_space_ops ops = {
234 .open = virtgpu_address_space_open,
235 .close = virtgpu_address_space_close,
236 .ping = virtgpu_address_space_ping,
237 };
238
239 AddressSpaceStream* res =
240 new AddressSpaceStream((address_space_handle_t)(-1), 1, context, 0, 0, ops, healthMonitor);
241
242 res->setMapping(blobMapping);
243 res->setResourceId(contextCreate.resourceId);
244 return res;
245 }
246
AddressSpaceStream(address_space_handle_t handle,uint32_t version,struct asg_context context,uint64_t ringOffset,uint64_t writeBufferOffset,struct address_space_ops ops,HealthMonitor<> * healthMonitor)247 AddressSpaceStream::AddressSpaceStream(
248 address_space_handle_t handle,
249 uint32_t version,
250 struct asg_context context,
251 uint64_t ringOffset,
252 uint64_t writeBufferOffset,
253 struct address_space_ops ops,
254 HealthMonitor<>* healthMonitor) :
255 IOStream(context.ring_config->flush_interval),
256 m_ops(ops),
257 m_tmpBuf(0),
258 m_tmpBufSize(0),
259 m_tmpBufXferSize(0),
260 m_usingTmpBuf(0),
261 m_readBuf(0),
262 m_read(0),
263 m_readLeft(0),
264 m_handle(handle),
265 m_version(version),
266 m_context(context),
267 m_ringOffset(ringOffset),
268 m_writeBufferOffset(writeBufferOffset),
269 m_writeBufferSize(context.ring_config->buffer_size),
270 m_writeBufferMask(m_writeBufferSize - 1),
271 m_buf((unsigned char*)context.buffer),
272 m_writeStart(m_buf),
273 m_writeStep(context.ring_config->flush_interval),
274 m_notifs(0),
275 m_written(0),
276 m_backoffIters(0),
277 m_backoffFactor(1),
278 m_ringStorageSize(sizeof(struct asg_ring_storage) + m_writeBufferSize),
279 m_healthMonitor(healthMonitor) {
280 // We'll use this in the future, but at the moment,
281 // it's a potential compile Werror.
282 (void)m_ringStorageSize;
283 (void)m_version;
284 }
285
~AddressSpaceStream()286 AddressSpaceStream::~AddressSpaceStream() {
287 flush();
288 ensureType3Finished();
289 ensureType1Finished();
290
291 if (!m_mapping) {
292 m_ops.unmap(m_context.to_host, sizeof(struct asg_ring_storage));
293 m_ops.unmap(m_context.buffer, m_writeBufferSize);
294 m_ops.unclaim_shared(m_handle, m_ringOffset);
295 m_ops.unclaim_shared(m_handle, m_writeBufferOffset);
296 }
297
298 m_ops.close(m_handle);
299 if (m_readBuf) free(m_readBuf);
300 if (m_tmpBuf) free(m_tmpBuf);
301 }
302
idealAllocSize(size_t len)303 size_t AddressSpaceStream::idealAllocSize(size_t len) {
304 if (len > m_writeStep) return len;
305 return m_writeStep;
306 }
307
allocBuffer(size_t minSize)308 void *AddressSpaceStream::allocBuffer(size_t minSize) {
309 auto watchdog = WATCHDOG_BUILDER(m_healthMonitor, "ASG watchdog").build();
310 AEMU_SCOPED_TRACE("allocBuffer");
311 ensureType3Finished();
312
313 if (!m_readBuf) {
314 m_readBuf = (unsigned char*)malloc(kReadSize);
315 }
316
317 size_t allocSize =
318 (m_writeStep < minSize ? minSize : m_writeStep);
319
320 if (m_writeStep < allocSize) {
321 if (!m_tmpBuf) {
322 m_tmpBufSize = allocSize * 2;
323 m_tmpBuf = (unsigned char*)malloc(m_tmpBufSize);
324 }
325
326 if (m_tmpBufSize < allocSize) {
327 m_tmpBufSize = allocSize * 2;
328 m_tmpBuf = (unsigned char*)realloc(m_tmpBuf, m_tmpBufSize);
329 }
330
331 if (!m_usingTmpBuf) {
332 flush();
333 }
334
335 m_usingTmpBuf = true;
336 m_tmpBufXferSize = allocSize;
337 return m_tmpBuf;
338 } else {
339 if (m_usingTmpBuf) {
340 writeFully(m_tmpBuf, m_tmpBufXferSize);
341 m_usingTmpBuf = false;
342 m_tmpBufXferSize = 0;
343 }
344
345 return m_writeStart;
346 }
347 }
348
commitBuffer(size_t size)349 int AddressSpaceStream::commitBuffer(size_t size)
350 {
351 if (size == 0) return 0;
352
353 if (m_usingTmpBuf) {
354 writeFully(m_tmpBuf, size);
355 m_tmpBufXferSize = 0;
356 m_usingTmpBuf = false;
357 return 0;
358 } else {
359 int res = type1Write(m_writeStart - m_buf, size);
360 advanceWrite();
361 return res;
362 }
363 }
364
readFully(void * ptr,size_t totalReadSize)365 const unsigned char *AddressSpaceStream::readFully(void *ptr, size_t totalReadSize)
366 {
367
368 unsigned char* userReadBuf = static_cast<unsigned char*>(ptr);
369
370 if (!userReadBuf) {
371 if (totalReadSize > 0) {
372 ALOGE("AddressSpaceStream::commitBufferAndReadFully failed, userReadBuf=NULL, totalReadSize %zu, lethal"
373 " error, exiting.", totalReadSize);
374 abort();
375 }
376 return nullptr;
377 }
378
379 // Advance buffered read if not yet consumed.
380 size_t remaining = totalReadSize;
381 size_t bufferedReadSize =
382 m_readLeft < remaining ? m_readLeft : remaining;
383
384 if (bufferedReadSize) {
385 memcpy(userReadBuf,
386 m_readBuf + (m_read - m_readLeft),
387 bufferedReadSize);
388 remaining -= bufferedReadSize;
389 m_readLeft -= bufferedReadSize;
390 }
391
392 if (!remaining) return userReadBuf;
393
394 // Read up to kReadSize bytes if all buffered read has been consumed.
395 size_t maxRead = m_readLeft ? 0 : kReadSize;
396 ssize_t actual = 0;
397
398 if (maxRead) {
399 actual = speculativeRead(m_readBuf, maxRead);
400
401 // Updated buffered read size.
402 if (actual > 0) {
403 m_read = m_readLeft = actual;
404 }
405
406 if (actual == 0) {
407 ALOGD("%s: end of pipe", __FUNCTION__);
408 return NULL;
409 }
410 }
411
412 // Consume buffered read and read more if necessary.
413 while (remaining) {
414 bufferedReadSize = m_readLeft < remaining ? m_readLeft : remaining;
415 if (bufferedReadSize) {
416 memcpy(userReadBuf + (totalReadSize - remaining),
417 m_readBuf + (m_read - m_readLeft),
418 bufferedReadSize);
419 remaining -= bufferedReadSize;
420 m_readLeft -= bufferedReadSize;
421 continue;
422 }
423
424 actual = speculativeRead(m_readBuf, kReadSize);
425
426 if (actual == 0) {
427 ALOGD("%s: Failed reading from pipe: %d", __FUNCTION__, errno);
428 return NULL;
429 }
430
431 if (actual > 0) {
432 m_read = m_readLeft = actual;
433 continue;
434 }
435 }
436
437 resetBackoff();
438 return userReadBuf;
439 }
440
read(void * buf,size_t * inout_len)441 const unsigned char *AddressSpaceStream::read(void *buf, size_t *inout_len) {
442 unsigned char* dst = (unsigned char*)buf;
443 size_t wanted = *inout_len;
444 ssize_t actual = speculativeRead(dst, wanted);
445
446 if (actual >= 0) {
447 *inout_len = actual;
448 } else {
449 return nullptr;
450 }
451
452 return (const unsigned char*)dst;
453 }
454
writeFully(const void * buf,size_t size)455 int AddressSpaceStream::writeFully(const void *buf, size_t size)
456 {
457 auto watchdog = WATCHDOG_BUILDER(m_healthMonitor, "ASG watchdog").build();
458 AEMU_SCOPED_TRACE("writeFully");
459 ensureType3Finished();
460 ensureType1Finished();
461
462 m_context.ring_config->transfer_size = size;
463 m_context.ring_config->transfer_mode = 3;
464
465 size_t sent = 0;
466 size_t preferredChunkSize = m_writeBufferSize / 4;
467 size_t chunkSize = size < preferredChunkSize ? size : preferredChunkSize;
468 const uint8_t* bufferBytes = (const uint8_t*)buf;
469
470 bool hostPinged = false;
471 while (sent < size) {
472 size_t remaining = size - sent;
473 size_t sendThisTime = remaining < chunkSize ? remaining : chunkSize;
474
475 long sentChunks =
476 ring_buffer_view_write(
477 m_context.to_host_large_xfer.ring,
478 &m_context.to_host_large_xfer.view,
479 bufferBytes + sent, sendThisTime, 1);
480
481 if (!hostPinged && *(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
482 *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
483 notifyAvailable();
484 hostPinged = true;
485 }
486
487 if (sentChunks == 0) {
488 ring_buffer_yield();
489 backoff();
490 }
491
492 sent += sentChunks * sendThisTime;
493
494 if (isInError()) {
495 return -1;
496 }
497 }
498
499 bool isRenderingAfter = ASG_HOST_STATE_RENDERING == __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
500
501 if (!isRenderingAfter) {
502 notifyAvailable();
503 }
504
505 ensureType3Finished();
506
507 resetBackoff();
508 m_context.ring_config->transfer_mode = 1;
509 m_written += size;
510
511 float mb = (float)m_written / 1048576.0f;
512 if (mb > 100.0f) {
513 ALOGD("%s: %f mb in %d notifs. %f mb/notif\n", __func__,
514 mb, m_notifs, m_notifs ? mb / (float)m_notifs : 0.0f);
515 m_notifs = 0;
516 m_written = 0;
517 }
518 return 0;
519 }
520
writeFullyAsync(const void * buf,size_t size)521 int AddressSpaceStream::writeFullyAsync(const void *buf, size_t size)
522 {
523 auto watchdog = WATCHDOG_BUILDER(m_healthMonitor, "ASG watchdog").build();
524 AEMU_SCOPED_TRACE("writeFullyAsync");
525 ensureType3Finished();
526 ensureType1Finished();
527
528 __atomic_store_n(&m_context.ring_config->transfer_size, size, __ATOMIC_RELEASE);
529 m_context.ring_config->transfer_mode = 3;
530
531 size_t sent = 0;
532 size_t preferredChunkSize = m_writeBufferSize / 2;
533 size_t chunkSize = size < preferredChunkSize ? size : preferredChunkSize;
534 const uint8_t* bufferBytes = (const uint8_t*)buf;
535
536 bool pingedHost = false;
537
538 while (sent < size) {
539 size_t remaining = size - sent;
540 size_t sendThisTime = remaining < chunkSize ? remaining : chunkSize;
541
542 long sentChunks =
543 ring_buffer_view_write(
544 m_context.to_host_large_xfer.ring,
545 &m_context.to_host_large_xfer.view,
546 bufferBytes + sent, sendThisTime, 1);
547
548 uint32_t hostState = __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
549
550 if (!pingedHost &&
551 hostState != ASG_HOST_STATE_CAN_CONSUME &&
552 hostState != ASG_HOST_STATE_RENDERING) {
553 pingedHost = true;
554 notifyAvailable();
555 }
556
557 if (sentChunks == 0) {
558 ring_buffer_yield();
559 backoff();
560 }
561
562 sent += sentChunks * sendThisTime;
563
564 if (isInError()) {
565 return -1;
566 }
567 }
568
569
570 bool isRenderingAfter = ASG_HOST_STATE_RENDERING == __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
571
572 if (!isRenderingAfter) {
573 notifyAvailable();
574 }
575
576 resetBackoff();
577 m_context.ring_config->transfer_mode = 1;
578 m_written += size;
579
580 float mb = (float)m_written / 1048576.0f;
581 if (mb > 100.0f) {
582 ALOGD("%s: %f mb in %d notifs. %f mb/notif\n", __func__,
583 mb, m_notifs, m_notifs ? mb / (float)m_notifs : 0.0f);
584 m_notifs = 0;
585 m_written = 0;
586 }
587 return 0;
588 }
589
commitBufferAndReadFully(size_t writeSize,void * userReadBufPtr,size_t totalReadSize)590 const unsigned char *AddressSpaceStream::commitBufferAndReadFully(
591 size_t writeSize, void *userReadBufPtr, size_t totalReadSize) {
592
593 if (m_usingTmpBuf) {
594 writeFully(m_tmpBuf, writeSize);
595 m_usingTmpBuf = false;
596 m_tmpBufXferSize = 0;
597 return readFully(userReadBufPtr, totalReadSize);
598 } else {
599 commitBuffer(writeSize);
600 return readFully(userReadBufPtr, totalReadSize);
601 }
602 }
603
isInError() const604 bool AddressSpaceStream::isInError() const {
605 return 1 == m_context.ring_config->in_error;
606 }
607
speculativeRead(unsigned char * readBuffer,size_t trySize)608 ssize_t AddressSpaceStream::speculativeRead(unsigned char* readBuffer, size_t trySize) {
609 ensureType3Finished();
610 ensureType1Finished();
611
612 size_t actuallyRead = 0;
613
614 while (!actuallyRead) {
615
616 uint32_t readAvail =
617 ring_buffer_available_read(
618 m_context.from_host_large_xfer.ring,
619 &m_context.from_host_large_xfer.view);
620
621 if (!readAvail) {
622 ring_buffer_yield();
623 backoff();
624 continue;
625 }
626
627 uint32_t toRead = readAvail > trySize ? trySize : readAvail;
628
629 long stepsRead = ring_buffer_view_read(
630 m_context.from_host_large_xfer.ring,
631 &m_context.from_host_large_xfer.view,
632 readBuffer, toRead, 1);
633
634 actuallyRead += stepsRead * toRead;
635
636 if (isInError()) {
637 return -1;
638 }
639 }
640
641 return actuallyRead;
642 }
643
notifyAvailable()644 void AddressSpaceStream::notifyAvailable() {
645 auto watchdog = WATCHDOG_BUILDER(m_healthMonitor, "ASG watchdog").build();
646 AEMU_SCOPED_TRACE("PING");
647 struct address_space_ping request;
648 request.metadata = ASG_NOTIFY_AVAILABLE;
649 request.resourceId = m_resourceId;
650 m_ops.ping(m_handle, &request);
651 ++m_notifs;
652 }
653
getRelativeBufferPos(uint32_t pos)654 uint32_t AddressSpaceStream::getRelativeBufferPos(uint32_t pos) {
655 return pos & m_writeBufferMask;
656 }
657
advanceWrite()658 void AddressSpaceStream::advanceWrite() {
659 m_writeStart += m_context.ring_config->flush_interval;
660
661 if (m_writeStart == m_buf + m_context.ring_config->buffer_size) {
662 m_writeStart = m_buf;
663 }
664 }
665
ensureConsumerFinishing()666 void AddressSpaceStream::ensureConsumerFinishing() {
667 uint32_t currAvailRead = ring_buffer_available_read(m_context.to_host, 0);
668
669 while (currAvailRead) {
670 ring_buffer_yield();
671 uint32_t nextAvailRead = ring_buffer_available_read(m_context.to_host, 0);
672
673 if (nextAvailRead != currAvailRead) {
674 break;
675 }
676
677 if (*(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
678 *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
679 notifyAvailable();
680 break;
681 }
682
683 backoff();
684 }
685 }
686
ensureType1Finished()687 void AddressSpaceStream::ensureType1Finished() {
688 auto watchdog = WATCHDOG_BUILDER(m_healthMonitor, "ASG watchdog").build();
689 AEMU_SCOPED_TRACE("ensureType1Finished");
690
691 uint32_t currAvailRead =
692 ring_buffer_available_read(m_context.to_host, 0);
693
694 while (currAvailRead) {
695 backoff();
696 ring_buffer_yield();
697 currAvailRead = ring_buffer_available_read(m_context.to_host, 0);
698 if (isInError()) {
699 return;
700 }
701 }
702 }
703
ensureType3Finished()704 void AddressSpaceStream::ensureType3Finished() {
705 auto watchdog = WATCHDOG_BUILDER(m_healthMonitor, "ASG watchdog").build();
706 AEMU_SCOPED_TRACE("ensureType3Finished");
707 uint32_t availReadLarge =
708 ring_buffer_available_read(
709 m_context.to_host_large_xfer.ring,
710 &m_context.to_host_large_xfer.view);
711 while (availReadLarge) {
712 ring_buffer_yield();
713 backoff();
714 availReadLarge =
715 ring_buffer_available_read(
716 m_context.to_host_large_xfer.ring,
717 &m_context.to_host_large_xfer.view);
718 if (*(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
719 *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
720 notifyAvailable();
721 }
722 if (isInError()) {
723 return;
724 }
725 }
726 }
727
type1Write(uint32_t bufferOffset,size_t size)728 int AddressSpaceStream::type1Write(uint32_t bufferOffset, size_t size) {
729
730 auto watchdog = WATCHDOG_BUILDER(m_healthMonitor, "ASG watchdog").build();
731 AEMU_SCOPED_TRACE("type1Write");
732
733 ensureType3Finished();
734
735 size_t sent = 0;
736 size_t sizeForRing = sizeof(struct asg_type1_xfer);
737
738 struct asg_type1_xfer xfer = {
739 bufferOffset,
740 (uint32_t)size,
741 };
742
743 uint8_t* writeBufferBytes = (uint8_t*)(&xfer);
744
745 uint32_t maxOutstanding = 1;
746 uint32_t maxSteps = m_context.ring_config->buffer_size /
747 m_context.ring_config->flush_interval;
748
749 if (maxSteps > 1) maxOutstanding = maxSteps - 1;
750
751 uint32_t ringAvailReadNow = ring_buffer_available_read(m_context.to_host, 0);
752
753 while (ringAvailReadNow >= maxOutstanding * sizeForRing) {
754 ringAvailReadNow = ring_buffer_available_read(m_context.to_host, 0);
755 }
756
757 bool hostPinged = false;
758 while (sent < sizeForRing) {
759
760 long sentChunks = ring_buffer_write(
761 m_context.to_host,
762 writeBufferBytes + sent,
763 sizeForRing - sent, 1);
764
765 if (!hostPinged &&
766 *(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
767 *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
768 notifyAvailable();
769 hostPinged = true;
770 }
771
772 if (sentChunks == 0) {
773 ring_buffer_yield();
774 backoff();
775 }
776
777 sent += sentChunks * (sizeForRing - sent);
778
779 if (isInError()) {
780 return -1;
781 }
782 }
783
784 bool isRenderingAfter = ASG_HOST_STATE_RENDERING == __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
785
786 if (!isRenderingAfter) {
787 notifyAvailable();
788 }
789
790 m_written += size;
791
792 float mb = (float)m_written / 1048576.0f;
793 if (mb > 100.0f) {
794 ALOGD("%s: %f mb in %d notifs. %f mb/notif\n", __func__,
795 mb, m_notifs, m_notifs ? mb / (float)m_notifs : 0.0f);
796 m_notifs = 0;
797 m_written = 0;
798 }
799
800 resetBackoff();
801 return 0;
802 }
803
backoff()804 void AddressSpaceStream::backoff() {
805 #if defined(HOST_BUILD) || defined(__APPLE__) || defined(__MACOSX) || defined(__Fuchsia__) || defined(__linux__)
806 static const uint32_t kBackoffItersThreshold = 50000000;
807 static const uint32_t kBackoffFactorDoublingIncrement = 50000000;
808 #else
809 static const uint32_t kBackoffItersThreshold = property_get_int32("ro.boot.asg.backoffiters", 50000000);
810 static const uint32_t kBackoffFactorDoublingIncrement = property_get_int32("ro.boot.asg.backoffincrement", 50000000);
811 #endif
812 ++m_backoffIters;
813
814 if (m_backoffIters > kBackoffItersThreshold) {
815 usleep(m_backoffFactor);
816 uint32_t itersSoFarAfterThreshold = m_backoffIters - kBackoffItersThreshold;
817 if (itersSoFarAfterThreshold > kBackoffFactorDoublingIncrement) {
818 m_backoffFactor = m_backoffFactor << 1;
819 if (m_backoffFactor > 1000) m_backoffFactor = 1000;
820 m_backoffIters = kBackoffItersThreshold;
821 }
822 }
823 }
824
resetBackoff()825 void AddressSpaceStream::resetBackoff() {
826 m_backoffIters = 0;
827 m_backoffFactor = 1;
828 }
829