• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include "AddressSpaceStream.h"
17 
18 #include "android/base/Tracing.h"
19 
20 #if PLATFORM_SDK_VERSION < 26
21 #include <cutils/log.h>
22 #else
23 #include <log/log.h>
24 #endif
25 #include <cutils/properties.h>
26 #include <errno.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <unistd.h>
30 #include <string.h>
31 
32 #include <sys/mman.h>
33 
34 static const size_t kReadSize = 512 * 1024;
35 static const size_t kWriteOffset = kReadSize;
36 
createAddressSpaceStream(size_t ignored_bufSize)37 AddressSpaceStream* createAddressSpaceStream(size_t ignored_bufSize) {
38     // Ignore incoming ignored_bufSize
39     (void)ignored_bufSize;
40 
41     auto handle = goldfish_address_space_open();
42     address_space_handle_t child_device_handle;
43 
44     if (!goldfish_address_space_set_subdevice_type(handle, GoldfishAddressSpaceSubdeviceType::Graphics, &child_device_handle)) {
45         ALOGE("AddressSpaceStream::create failed (initial device create)\n");
46         goldfish_address_space_close(handle);
47         return nullptr;
48     }
49 
50     struct address_space_ping request;
51     request.metadata = ASG_GET_RING;
52     if (!goldfish_address_space_ping(child_device_handle, &request)) {
53         ALOGE("AddressSpaceStream::create failed (get ring)\n");
54         goldfish_address_space_close(child_device_handle);
55         return nullptr;
56     }
57 
58     uint64_t ringOffset = request.metadata;
59 
60     request.metadata = ASG_GET_BUFFER;
61     if (!goldfish_address_space_ping(child_device_handle, &request)) {
62         ALOGE("AddressSpaceStream::create failed (get buffer)\n");
63         goldfish_address_space_close(child_device_handle);
64         return nullptr;
65     }
66 
67     uint64_t bufferOffset = request.metadata;
68     uint64_t bufferSize = request.size;
69 
70     if (!goldfish_address_space_claim_shared(
71         child_device_handle, ringOffset, sizeof(asg_ring_storage))) {
72         ALOGE("AddressSpaceStream::create failed (claim ring storage)\n");
73         goldfish_address_space_close(child_device_handle);
74         return nullptr;
75     }
76 
77     if (!goldfish_address_space_claim_shared(
78         child_device_handle, bufferOffset, bufferSize)) {
79         ALOGE("AddressSpaceStream::create failed (claim buffer storage)\n");
80         goldfish_address_space_unclaim_shared(child_device_handle, ringOffset);
81         goldfish_address_space_close(child_device_handle);
82         return nullptr;
83     }
84 
85     char* ringPtr = (char*)goldfish_address_space_map(
86         child_device_handle, ringOffset, sizeof(struct asg_ring_storage));
87 
88     if (!ringPtr) {
89         ALOGE("AddressSpaceStream::create failed (map ring storage)\n");
90         goldfish_address_space_unclaim_shared(child_device_handle, bufferOffset);
91         goldfish_address_space_unclaim_shared(child_device_handle, ringOffset);
92         goldfish_address_space_close(child_device_handle);
93         return nullptr;
94     }
95 
96     char* bufferPtr = (char*)goldfish_address_space_map(
97         child_device_handle, bufferOffset, bufferSize);
98 
99     if (!bufferPtr) {
100         ALOGE("AddressSpaceStream::create failed (map buffer storage)\n");
101         goldfish_address_space_unmap(ringPtr, sizeof(struct asg_ring_storage));
102         goldfish_address_space_unclaim_shared(child_device_handle, bufferOffset);
103         goldfish_address_space_unclaim_shared(child_device_handle, ringOffset);
104         goldfish_address_space_close(child_device_handle);
105         return nullptr;
106     }
107 
108     struct asg_context context =
109         asg_context_create(
110             ringPtr, bufferPtr, bufferSize);
111 
112     request.metadata = ASG_SET_VERSION;
113     request.size = 1; // version 1
114 
115     if (!goldfish_address_space_ping(child_device_handle, &request)) {
116         ALOGE("AddressSpaceStream::create failed (get buffer)\n");
117         goldfish_address_space_unmap(bufferPtr, bufferSize);
118         goldfish_address_space_unmap(ringPtr, sizeof(struct asg_ring_storage));
119         goldfish_address_space_unclaim_shared(child_device_handle, bufferOffset);
120         goldfish_address_space_unclaim_shared(child_device_handle, ringOffset);
121         goldfish_address_space_close(child_device_handle);
122         return nullptr;
123     }
124 
125     uint32_t version = request.size;
126 
127     context.ring_config->transfer_mode = 1;
128     context.ring_config->host_consumed_pos = 0;
129     context.ring_config->guest_write_pos = 0;
130 
131     struct address_space_ops ops = {
132         .open = goldfish_address_space_open,
133         .close = goldfish_address_space_close,
134         .claim_shared = goldfish_address_space_claim_shared,
135         .unclaim_shared = goldfish_address_space_unclaim_shared,
136         .map = goldfish_address_space_map,
137         .unmap = goldfish_address_space_unmap,
138         .set_subdevice_type = goldfish_address_space_set_subdevice_type,
139         .ping = goldfish_address_space_ping,
140     };
141 
142     AddressSpaceStream* res =
143         new AddressSpaceStream(
144             child_device_handle, version, context,
145             ringOffset, bufferOffset, false /* not virtio */, ops);
146 
147     return res;
148 }
149 
150 #if defined(VIRTIO_GPU) && !defined(HOST_BUILD)
createVirtioGpuAddressSpaceStream(const struct StreamCreate & streamCreate)151 AddressSpaceStream* createVirtioGpuAddressSpaceStream(const struct StreamCreate &streamCreate) {
152     auto handle = reinterpret_cast<address_space_handle_t>(streamCreate.streamHandle);
153     struct address_space_virtgpu_info virtgpu_info;
154 
155     ALOGD("%s: create subdevice and get resp\n", __func__);
156     if (!virtgpu_address_space_create_context_with_subdevice(
157             handle, GoldfishAddressSpaceSubdeviceType::VirtioGpuGraphics,
158             &virtgpu_info)) {
159         ALOGE("AddressSpaceStream::create failed (create subdevice)\n");
160         if (virtgpu_info.resp_mapped_ptr) {
161             munmap(virtgpu_info.resp_mapped_ptr, 4096);
162         }
163         virtgpu_address_space_close(handle);
164         return nullptr;
165     }
166     ALOGD("%s: create subdevice and get resp (done)\n", __func__);
167 
168     struct address_space_ping request;
169     uint32_t ringSize = 0;
170     uint32_t bufferSize = 0;
171 
172     request.metadata = ASG_GET_RING;
173     if (!virtgpu_address_space_ping_with_response(
174         &virtgpu_info, &request)) {
175         ALOGE("AddressSpaceStream::create failed (get ring version)\n");
176         if (virtgpu_info.resp_mapped_ptr) {
177             munmap(virtgpu_info.resp_mapped_ptr, 4096);
178         }
179         virtgpu_address_space_close(handle);
180         return nullptr;
181     }
182     ringSize = request.size;
183 
184     request.metadata = ASG_GET_BUFFER;
185     if (!virtgpu_address_space_ping_with_response(
186         &virtgpu_info, &request)) {
187         ALOGE("AddressSpaceStream::create failed (get ring version)\n");
188         if (virtgpu_info.resp_mapped_ptr) {
189             munmap(virtgpu_info.resp_mapped_ptr, 4096);
190         }
191         virtgpu_address_space_close(handle);
192         return nullptr;
193     }
194     bufferSize = request.size;
195 
196     request.metadata = ASG_SET_VERSION;
197     request.size = 1; // version 1
198 
199     if (!virtgpu_address_space_ping_with_response(
200         &virtgpu_info, &request)) {
201         ALOGE("AddressSpaceStream::create failed (set version)\n");
202         if (virtgpu_info.resp_mapped_ptr) {
203             munmap(virtgpu_info.resp_mapped_ptr, 4096);
204         }
205         virtgpu_address_space_close(handle);
206         return nullptr;
207     }
208 
209     ALOGD("%s: ping returned. context ring and buffer sizes %u %u\n", __func__,
210             ringSize, bufferSize);
211 
212     uint64_t hostmem_id = request.metadata;
213     uint32_t version = request.size;
214     size_t hostmem_alloc_size =
215         (size_t)(ringSize + bufferSize);
216 
217     ALOGD("%s: hostmem size: %zu\n", __func__, hostmem_alloc_size);
218 
219     struct address_space_virtgpu_hostmem_info hostmem_info;
220     if (!virtgpu_address_space_allocate_hostmem(
221             handle,
222             hostmem_alloc_size,
223             hostmem_id,
224             &hostmem_info)) {
225         ALOGE("AddressSpaceStream::create failed (alloc hostmem)\n");
226         if (virtgpu_info.resp_mapped_ptr) {
227             munmap(virtgpu_info.resp_mapped_ptr, 4096);
228         }
229         virtgpu_address_space_close(handle);
230         return nullptr;
231     }
232 
233     request.metadata = ASG_GET_CONFIG;
234     if (!virtgpu_address_space_ping_with_response(
235         &virtgpu_info, &request)) {
236         ALOGE("AddressSpaceStream::create failed (get config)\n");
237         if (virtgpu_info.resp_mapped_ptr) {
238             munmap(virtgpu_info.resp_mapped_ptr, 4096);
239         }
240         virtgpu_address_space_close(handle);
241         return nullptr;
242     }
243 
244     char* ringPtr = (char*)hostmem_info.ptr;
245     char* bufferPtr = ((char*)hostmem_info.ptr) + sizeof(struct asg_ring_storage);
246 
247     struct asg_context context =
248         asg_context_create(
249             (char*)ringPtr, (char*)bufferPtr, bufferSize);
250 
251     context.ring_config->transfer_mode = 1;
252     context.ring_config->host_consumed_pos = 0;
253     context.ring_config->guest_write_pos = 0;
254 
255     struct address_space_ops ops = {
256         .open = virtgpu_address_space_open,
257         .close = virtgpu_address_space_close,
258         .ping = virtgpu_address_space_ping,
259         .allocate_hostmem = virtgpu_address_space_allocate_hostmem,
260         .ping_with_response = virtgpu_address_space_ping_with_response,
261     };
262 
263     if (virtgpu_info.resp_mapped_ptr) {
264         munmap(virtgpu_info.resp_mapped_ptr, 4096);
265     }
266 
267     AddressSpaceStream* res =
268         new AddressSpaceStream(
269             handle, version, context,
270             0, 0, true /* is virtio */, ops);
271 
272     return res;
273 }
274 #endif // VIRTIO_GPU && !HOST_BUILD
275 
276 
AddressSpaceStream(address_space_handle_t handle,uint32_t version,struct asg_context context,uint64_t ringOffset,uint64_t writeBufferOffset,bool virtioMode,struct address_space_ops ops)277 AddressSpaceStream::AddressSpaceStream(
278     address_space_handle_t handle,
279     uint32_t version,
280     struct asg_context context,
281     uint64_t ringOffset,
282     uint64_t writeBufferOffset,
283     bool virtioMode,
284     struct address_space_ops ops) :
285     IOStream(context.ring_config->flush_interval),
286     m_virtioMode(virtioMode),
287     m_ops(ops),
288     m_tmpBuf(0),
289     m_tmpBufSize(0),
290     m_tmpBufXferSize(0),
291     m_usingTmpBuf(0),
292     m_readBuf(0),
293     m_read(0),
294     m_readLeft(0),
295     m_handle(handle),
296     m_version(version),
297     m_context(context),
298     m_ringOffset(ringOffset),
299     m_writeBufferOffset(writeBufferOffset),
300     m_writeBufferSize(context.ring_config->buffer_size),
301     m_writeBufferMask(m_writeBufferSize - 1),
302     m_buf((unsigned char*)context.buffer),
303     m_writeStart(m_buf),
304     m_writeStep(context.ring_config->flush_interval),
305     m_notifs(0),
306     m_written(0),
307     m_backoffIters(0),
308     m_backoffFactor(1),
309     m_ringStorageSize(sizeof(struct asg_ring_storage) + m_writeBufferSize) {
310     // We'll use this in the future, but at the moment,
311     // it's a potential compile Werror.
312     (void)m_version;
313 }
314 
~AddressSpaceStream()315 AddressSpaceStream::~AddressSpaceStream() {
316     flush();
317     ensureType3Finished();
318     ensureType1Finished();
319     if (m_virtioMode) {
320         if (m_context.to_host) {
321             munmap(m_context.to_host, m_ringStorageSize);
322         }
323     } else {
324         m_ops.unmap(m_context.to_host, sizeof(struct asg_ring_storage));
325         m_ops.unmap(m_context.buffer, m_writeBufferSize);
326         m_ops.unclaim_shared(m_handle, m_ringOffset);
327         m_ops.unclaim_shared(m_handle, m_writeBufferOffset);
328     }
329     m_ops.close(m_handle);
330     if (m_readBuf) free(m_readBuf);
331     if (m_tmpBuf) free(m_tmpBuf);
332 }
333 
idealAllocSize(size_t len)334 size_t AddressSpaceStream::idealAllocSize(size_t len) {
335     if (len > m_writeStep) return len;
336     return m_writeStep;
337 }
338 
allocBuffer(size_t minSize)339 void *AddressSpaceStream::allocBuffer(size_t minSize) {
340     AEMU_SCOPED_TRACE("allocBuffer");
341     ensureType3Finished();
342 
343     if (!m_readBuf) {
344         m_readBuf = (unsigned char*)malloc(kReadSize);
345     }
346 
347     size_t allocSize =
348         (m_writeStep < minSize ? minSize : m_writeStep);
349 
350     if (m_writeStep < allocSize) {
351         if (!m_tmpBuf) {
352             m_tmpBufSize = allocSize * 2;
353             m_tmpBuf = (unsigned char*)malloc(m_tmpBufSize);
354         }
355 
356         if (m_tmpBufSize < allocSize) {
357             m_tmpBufSize = allocSize * 2;
358             m_tmpBuf = (unsigned char*)realloc(m_tmpBuf, m_tmpBufSize);
359         }
360 
361         if (!m_usingTmpBuf) {
362             flush();
363         }
364 
365         m_usingTmpBuf = true;
366         m_tmpBufXferSize = allocSize;
367         return m_tmpBuf;
368     } else {
369         if (m_usingTmpBuf) {
370             writeFully(m_tmpBuf, m_tmpBufXferSize);
371             m_usingTmpBuf = false;
372             m_tmpBufXferSize = 0;
373         }
374 
375         return m_writeStart;
376     }
377 }
378 
commitBuffer(size_t size)379 int AddressSpaceStream::commitBuffer(size_t size)
380 {
381     if (size == 0) return 0;
382 
383     if (m_usingTmpBuf) {
384         writeFully(m_tmpBuf, size);
385         m_tmpBufXferSize = 0;
386         m_usingTmpBuf = false;
387         return 0;
388     } else {
389         int res = type1Write(m_writeStart - m_buf, size);
390         advanceWrite();
391         return res;
392     }
393 }
394 
readFully(void * ptr,size_t totalReadSize)395 const unsigned char *AddressSpaceStream::readFully(void *ptr, size_t totalReadSize)
396 {
397 
398     unsigned char* userReadBuf = static_cast<unsigned char*>(ptr);
399 
400     if (!userReadBuf) {
401         if (totalReadSize > 0) {
402             ALOGE("AddressSpaceStream::commitBufferAndReadFully failed, userReadBuf=NULL, totalReadSize %zu, lethal"
403                     " error, exiting.", totalReadSize);
404             abort();
405         }
406         return nullptr;
407     }
408 
409     // Advance buffered read if not yet consumed.
410     size_t remaining = totalReadSize;
411     size_t bufferedReadSize =
412         m_readLeft < remaining ? m_readLeft : remaining;
413 
414     if (bufferedReadSize) {
415         memcpy(userReadBuf,
416                m_readBuf + (m_read - m_readLeft),
417                bufferedReadSize);
418         remaining -= bufferedReadSize;
419         m_readLeft -= bufferedReadSize;
420     }
421 
422     if (!remaining) return userReadBuf;
423 
424     // Read up to kReadSize bytes if all buffered read has been consumed.
425     size_t maxRead = m_readLeft ? 0 : kReadSize;
426     ssize_t actual = 0;
427 
428     if (maxRead) {
429         actual = speculativeRead(m_readBuf, maxRead);
430 
431         // Updated buffered read size.
432         if (actual > 0) {
433             m_read = m_readLeft = actual;
434         }
435 
436         if (actual == 0) {
437             ALOGD("%s: end of pipe", __FUNCTION__);
438             return NULL;
439         }
440     }
441 
442     // Consume buffered read and read more if necessary.
443     while (remaining) {
444         bufferedReadSize = m_readLeft < remaining ? m_readLeft : remaining;
445         if (bufferedReadSize) {
446             memcpy(userReadBuf + (totalReadSize - remaining),
447                    m_readBuf + (m_read - m_readLeft),
448                    bufferedReadSize);
449             remaining -= bufferedReadSize;
450             m_readLeft -= bufferedReadSize;
451             continue;
452         }
453 
454         actual = speculativeRead(m_readBuf, kReadSize);
455 
456         if (actual == 0) {
457             ALOGD("%s: Failed reading from pipe: %d", __FUNCTION__,  errno);
458             return NULL;
459         }
460 
461         if (actual > 0) {
462             m_read = m_readLeft = actual;
463             continue;
464         }
465     }
466 
467     resetBackoff();
468     return userReadBuf;
469 }
470 
read(void * buf,size_t * inout_len)471 const unsigned char *AddressSpaceStream::read(void *buf, size_t *inout_len) {
472     unsigned char* dst = (unsigned char*)buf;
473     size_t wanted = *inout_len;
474     ssize_t actual = speculativeRead(dst, wanted);
475 
476     if (actual >= 0) {
477         *inout_len = actual;
478     } else {
479         return nullptr;
480     }
481 
482     return (const unsigned char*)dst;
483 }
484 
writeFully(const void * buf,size_t size)485 int AddressSpaceStream::writeFully(const void *buf, size_t size)
486 {
487     AEMU_SCOPED_TRACE("writeFully");
488     ensureType3Finished();
489     ensureType1Finished();
490 
491     m_context.ring_config->transfer_size = size;
492     m_context.ring_config->transfer_mode = 3;
493 
494     size_t sent = 0;
495     size_t preferredChunkSize = m_writeBufferSize / 4;
496     size_t chunkSize = size < preferredChunkSize ? size : preferredChunkSize;
497     const uint8_t* bufferBytes = (const uint8_t*)buf;
498 
499     bool hostPinged = false;
500     while (sent < size) {
501         size_t remaining = size - sent;
502         size_t sendThisTime = remaining < chunkSize ? remaining : chunkSize;
503 
504         long sentChunks =
505             ring_buffer_view_write(
506                 m_context.to_host_large_xfer.ring,
507                 &m_context.to_host_large_xfer.view,
508                 bufferBytes + sent, sendThisTime, 1);
509 
510         if (!hostPinged && *(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
511             *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
512             notifyAvailable();
513             hostPinged = true;
514         }
515 
516         if (sentChunks == 0) {
517             ring_buffer_yield();
518             backoff();
519         }
520 
521         sent += sentChunks * sendThisTime;
522 
523         if (isInError()) {
524             return -1;
525         }
526     }
527 
528     bool isRenderingAfter = ASG_HOST_STATE_RENDERING == __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
529 
530     if (!isRenderingAfter) {
531         notifyAvailable();
532     }
533 
534     ensureType3Finished();
535 
536     resetBackoff();
537     m_context.ring_config->transfer_mode = 1;
538     m_written += size;
539 
540     float mb = (float)m_written / 1048576.0f;
541     if (mb > 100.0f) {
542         ALOGD("%s: %f mb in %d notifs. %f mb/notif\n", __func__,
543               mb, m_notifs, m_notifs ? mb / (float)m_notifs : 0.0f);
544         m_notifs = 0;
545         m_written = 0;
546     }
547     return 0;
548 }
549 
writeFullyAsync(const void * buf,size_t size)550 int AddressSpaceStream::writeFullyAsync(const void *buf, size_t size)
551 {
552     AEMU_SCOPED_TRACE("writeFullyAsync");
553     ensureType3Finished();
554     ensureType1Finished();
555 
556     __atomic_store_n(&m_context.ring_config->transfer_size, size, __ATOMIC_RELEASE);
557     m_context.ring_config->transfer_mode = 3;
558 
559     size_t sent = 0;
560     size_t preferredChunkSize = m_writeBufferSize / 2;
561     size_t chunkSize = size < preferredChunkSize ? size : preferredChunkSize;
562     const uint8_t* bufferBytes = (const uint8_t*)buf;
563 
564     bool pingedHost = false;
565 
566     while (sent < size) {
567         size_t remaining = size - sent;
568         size_t sendThisTime = remaining < chunkSize ? remaining : chunkSize;
569 
570         long sentChunks =
571             ring_buffer_view_write(
572                 m_context.to_host_large_xfer.ring,
573                 &m_context.to_host_large_xfer.view,
574                 bufferBytes + sent, sendThisTime, 1);
575 
576         uint32_t hostState = __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
577 
578         if (!pingedHost &&
579             hostState != ASG_HOST_STATE_CAN_CONSUME &&
580             hostState != ASG_HOST_STATE_RENDERING) {
581             pingedHost = true;
582             notifyAvailable();
583         }
584 
585         if (sentChunks == 0) {
586             ring_buffer_yield();
587             backoff();
588         }
589 
590         sent += sentChunks * sendThisTime;
591 
592         if (isInError()) {
593             return -1;
594         }
595     }
596 
597 
598     bool isRenderingAfter = ASG_HOST_STATE_RENDERING == __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
599 
600     if (!isRenderingAfter) {
601         notifyAvailable();
602     }
603 
604     resetBackoff();
605     m_context.ring_config->transfer_mode = 1;
606     m_written += size;
607 
608     float mb = (float)m_written / 1048576.0f;
609     if (mb > 100.0f) {
610         ALOGD("%s: %f mb in %d notifs. %f mb/notif\n", __func__,
611               mb, m_notifs, m_notifs ? mb / (float)m_notifs : 0.0f);
612         m_notifs = 0;
613         m_written = 0;
614     }
615     return 0;
616 }
617 
commitBufferAndReadFully(size_t writeSize,void * userReadBufPtr,size_t totalReadSize)618 const unsigned char *AddressSpaceStream::commitBufferAndReadFully(
619     size_t writeSize, void *userReadBufPtr, size_t totalReadSize) {
620 
621     if (m_usingTmpBuf) {
622         writeFully(m_tmpBuf, writeSize);
623         m_usingTmpBuf = false;
624         m_tmpBufXferSize = 0;
625         return readFully(userReadBufPtr, totalReadSize);
626     } else {
627         commitBuffer(writeSize);
628         return readFully(userReadBufPtr, totalReadSize);
629     }
630 }
631 
isInError() const632 bool AddressSpaceStream::isInError() const {
633     return 1 == m_context.ring_config->in_error;
634 }
635 
speculativeRead(unsigned char * readBuffer,size_t trySize)636 ssize_t AddressSpaceStream::speculativeRead(unsigned char* readBuffer, size_t trySize) {
637     ensureType3Finished();
638     ensureType1Finished();
639 
640     size_t actuallyRead = 0;
641     size_t readIters = 0;
642 
643     while (!actuallyRead) {
644         ++readIters;
645 
646         uint32_t readAvail =
647             ring_buffer_available_read(
648                 m_context.from_host_large_xfer.ring,
649                 &m_context.from_host_large_xfer.view);
650 
651         if (!readAvail) {
652             ring_buffer_yield();
653             backoff();
654             continue;
655         }
656 
657         uint32_t toRead = readAvail > trySize ?  trySize : readAvail;
658 
659         long stepsRead = ring_buffer_view_read(
660             m_context.from_host_large_xfer.ring,
661             &m_context.from_host_large_xfer.view,
662             readBuffer, toRead, 1);
663 
664         actuallyRead += stepsRead * toRead;
665 
666         if (isInError()) {
667             return -1;
668         }
669     }
670 
671     return actuallyRead;
672 }
673 
notifyAvailable()674 void AddressSpaceStream::notifyAvailable() {
675     AEMU_SCOPED_TRACE("PING");
676     struct address_space_ping request;
677     request.metadata = ASG_NOTIFY_AVAILABLE;
678     m_ops.ping(m_handle, &request);
679     ++m_notifs;
680 }
681 
getRelativeBufferPos(uint32_t pos)682 uint32_t AddressSpaceStream::getRelativeBufferPos(uint32_t pos) {
683     return pos & m_writeBufferMask;
684 }
685 
advanceWrite()686 void AddressSpaceStream::advanceWrite() {
687     m_writeStart += m_context.ring_config->flush_interval;
688 
689     if (m_writeStart == m_buf + m_context.ring_config->buffer_size) {
690         m_writeStart = m_buf;
691     }
692 }
693 
ensureConsumerFinishing()694 void AddressSpaceStream::ensureConsumerFinishing() {
695     uint32_t currAvailRead = ring_buffer_available_read(m_context.to_host, 0);
696 
697     while (currAvailRead) {
698         ring_buffer_yield();
699         uint32_t nextAvailRead = ring_buffer_available_read(m_context.to_host, 0);
700 
701         if (nextAvailRead != currAvailRead) {
702             break;
703         }
704 
705         if (*(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
706             *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
707             notifyAvailable();
708             break;
709         }
710 
711         backoff();
712     }
713 }
714 
ensureType1Finished()715 void AddressSpaceStream::ensureType1Finished() {
716     AEMU_SCOPED_TRACE("ensureType1Finished");
717 
718     uint32_t currAvailRead =
719         ring_buffer_available_read(m_context.to_host, 0);
720 
721     while (currAvailRead) {
722         backoff();
723         ring_buffer_yield();
724         currAvailRead = ring_buffer_available_read(m_context.to_host, 0);
725         if (isInError()) {
726             return;
727         }
728     }
729 }
730 
ensureType3Finished()731 void AddressSpaceStream::ensureType3Finished() {
732     AEMU_SCOPED_TRACE("ensureType3Finished");
733     uint32_t availReadLarge =
734         ring_buffer_available_read(
735             m_context.to_host_large_xfer.ring,
736             &m_context.to_host_large_xfer.view);
737     while (availReadLarge) {
738         ring_buffer_yield();
739         backoff();
740         availReadLarge =
741             ring_buffer_available_read(
742                 m_context.to_host_large_xfer.ring,
743                 &m_context.to_host_large_xfer.view);
744         if (*(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
745             *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
746             notifyAvailable();
747         }
748         if (isInError()) {
749             return;
750         }
751     }
752 }
753 
type1Write(uint32_t bufferOffset,size_t size)754 int AddressSpaceStream::type1Write(uint32_t bufferOffset, size_t size) {
755 
756     AEMU_SCOPED_TRACE("type1Write");
757 
758     ensureType3Finished();
759 
760     size_t sent = 0;
761     size_t sizeForRing = sizeof(struct asg_type1_xfer);
762 
763     struct asg_type1_xfer xfer = {
764         bufferOffset,
765         (uint32_t)size,
766     };
767 
768     uint8_t* writeBufferBytes = (uint8_t*)(&xfer);
769 
770     uint32_t maxOutstanding = 1;
771     uint32_t maxSteps = m_context.ring_config->buffer_size /
772             m_context.ring_config->flush_interval;
773 
774     if (maxSteps > 1) maxOutstanding = maxSteps - 1;
775 
776     uint32_t ringAvailReadNow = ring_buffer_available_read(m_context.to_host, 0);
777 
778     while (ringAvailReadNow >= maxOutstanding * sizeForRing) {
779         ringAvailReadNow = ring_buffer_available_read(m_context.to_host, 0);
780     }
781 
782     bool hostPinged = false;
783     while (sent < sizeForRing) {
784 
785         long sentChunks = ring_buffer_write(
786             m_context.to_host,
787             writeBufferBytes + sent,
788             sizeForRing - sent, 1);
789 
790         if (!hostPinged &&
791             *(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
792             *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
793             notifyAvailable();
794             hostPinged = true;
795         }
796 
797         if (sentChunks == 0) {
798             ring_buffer_yield();
799             backoff();
800         }
801 
802         sent += sentChunks * (sizeForRing - sent);
803 
804         if (isInError()) {
805             return -1;
806         }
807     }
808 
809     bool isRenderingAfter = ASG_HOST_STATE_RENDERING == __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
810 
811     if (!isRenderingAfter) {
812         notifyAvailable();
813     }
814 
815     m_written += size;
816 
817     float mb = (float)m_written / 1048576.0f;
818     if (mb > 100.0f) {
819         ALOGD("%s: %f mb in %d notifs. %f mb/notif\n", __func__,
820               mb, m_notifs, m_notifs ? mb / (float)m_notifs : 0.0f);
821         m_notifs = 0;
822         m_written = 0;
823     }
824 
825     resetBackoff();
826     return 0;
827 }
828 
backoff()829 void AddressSpaceStream::backoff() {
830 #if defined(HOST_BUILD) || defined(__APPLE__) || defined(__MACOSX) || defined(__Fuchsia__) || defined(__linux__)
831     static const uint32_t kBackoffItersThreshold = 50000000;
832     static const uint32_t kBackoffFactorDoublingIncrement = 50000000;
833 #else
834     static const uint32_t kBackoffItersThreshold = property_get_int32("ro.boot.asg.backoffiters", 50000000);
835     static const uint32_t kBackoffFactorDoublingIncrement = property_get_int32("ro.boot.asg.backoffincrement", 50000000);
836 #endif
837     ++m_backoffIters;
838 
839     if (m_backoffIters > kBackoffItersThreshold) {
840         usleep(m_backoffFactor);
841         uint32_t itersSoFarAfterThreshold = m_backoffIters - kBackoffItersThreshold;
842         if (itersSoFarAfterThreshold > kBackoffFactorDoublingIncrement) {
843             m_backoffFactor = m_backoffFactor << 1;
844             if (m_backoffFactor > 1000) m_backoffFactor = 1000;
845             m_backoffIters = kBackoffItersThreshold;
846         }
847     }
848 }
849 
resetBackoff()850 void AddressSpaceStream::resetBackoff() {
851     m_backoffIters = 0;
852     m_backoffFactor = 1;
853 }
854