• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include "AddressSpaceStream.h"
17 
18 #include "android/base/Tracing.h"
19 
20 #if PLATFORM_SDK_VERSION < 26
21 #include <cutils/log.h>
22 #else
23 #include <log/log.h>
24 #endif
25 #include <cutils/properties.h>
26 #include <errno.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <unistd.h>
30 #include <string.h>
31 
32 static const size_t kReadSize = 512 * 1024;
33 static const size_t kWriteOffset = kReadSize;
34 
createAddressSpaceStream(size_t ignored_bufSize)35 AddressSpaceStream* createAddressSpaceStream(size_t ignored_bufSize) {
36     // Ignore incoming ignored_bufSize
37     (void)ignored_bufSize;
38 
39     auto handle = goldfish_address_space_open();
40     address_space_handle_t child_device_handle;
41 
42     if (!goldfish_address_space_set_subdevice_type(handle, GoldfishAddressSpaceSubdeviceType::Graphics, &child_device_handle)) {
43         ALOGE("AddressSpaceStream::create failed (initial device create)\n");
44         goldfish_address_space_close(handle);
45         return nullptr;
46     }
47 
48     struct address_space_ping request;
49     request.metadata = ASG_GET_RING;
50     if (!goldfish_address_space_ping(child_device_handle, &request)) {
51         ALOGE("AddressSpaceStream::create failed (get ring)\n");
52         goldfish_address_space_close(child_device_handle);
53         return nullptr;
54     }
55 
56     uint64_t ringOffset = request.metadata;
57 
58     request.metadata = ASG_GET_BUFFER;
59     if (!goldfish_address_space_ping(child_device_handle, &request)) {
60         ALOGE("AddressSpaceStream::create failed (get buffer)\n");
61         goldfish_address_space_close(child_device_handle);
62         return nullptr;
63     }
64 
65     uint64_t bufferOffset = request.metadata;
66     uint64_t bufferSize = request.size;
67 
68     if (!goldfish_address_space_claim_shared(
69         child_device_handle, ringOffset, sizeof(asg_ring_storage))) {
70         ALOGE("AddressSpaceStream::create failed (claim ring storage)\n");
71         goldfish_address_space_close(child_device_handle);
72         return nullptr;
73     }
74 
75     if (!goldfish_address_space_claim_shared(
76         child_device_handle, bufferOffset, bufferSize)) {
77         ALOGE("AddressSpaceStream::create failed (claim buffer storage)\n");
78         goldfish_address_space_unclaim_shared(child_device_handle, ringOffset);
79         goldfish_address_space_close(child_device_handle);
80         return nullptr;
81     }
82 
83     char* ringPtr = (char*)goldfish_address_space_map(
84         child_device_handle, ringOffset, sizeof(struct asg_ring_storage));
85 
86     if (!ringPtr) {
87         ALOGE("AddressSpaceStream::create failed (map ring storage)\n");
88         goldfish_address_space_unclaim_shared(child_device_handle, bufferOffset);
89         goldfish_address_space_unclaim_shared(child_device_handle, ringOffset);
90         goldfish_address_space_close(child_device_handle);
91         return nullptr;
92     }
93 
94     char* bufferPtr = (char*)goldfish_address_space_map(
95         child_device_handle, bufferOffset, bufferSize);
96 
97     if (!bufferPtr) {
98         ALOGE("AddressSpaceStream::create failed (map buffer storage)\n");
99         goldfish_address_space_unmap(ringPtr, sizeof(struct asg_ring_storage));
100         goldfish_address_space_unclaim_shared(child_device_handle, bufferOffset);
101         goldfish_address_space_unclaim_shared(child_device_handle, ringOffset);
102         goldfish_address_space_close(child_device_handle);
103         return nullptr;
104     }
105 
106     struct asg_context context =
107         asg_context_create(
108             ringPtr, bufferPtr, bufferSize);
109 
110     request.metadata = ASG_SET_VERSION;
111     request.size = 1; // version 1
112 
113     if (!goldfish_address_space_ping(child_device_handle, &request)) {
114         ALOGE("AddressSpaceStream::create failed (get buffer)\n");
115         goldfish_address_space_unmap(bufferPtr, bufferSize);
116         goldfish_address_space_unmap(ringPtr, sizeof(struct asg_ring_storage));
117         goldfish_address_space_unclaim_shared(child_device_handle, bufferOffset);
118         goldfish_address_space_unclaim_shared(child_device_handle, ringOffset);
119         goldfish_address_space_close(child_device_handle);
120         return nullptr;
121     }
122 
123     uint32_t version = request.size;
124 
125     context.ring_config->transfer_mode = 1;
126     context.ring_config->host_consumed_pos = 0;
127     context.ring_config->guest_write_pos = 0;
128 
129     struct address_space_ops ops = {
130         .open = goldfish_address_space_open,
131         .close = goldfish_address_space_close,
132         .claim_shared = goldfish_address_space_claim_shared,
133         .unclaim_shared = goldfish_address_space_unclaim_shared,
134         .map = goldfish_address_space_map,
135         .unmap = goldfish_address_space_unmap,
136         .set_subdevice_type = goldfish_address_space_set_subdevice_type,
137         .ping = goldfish_address_space_ping,
138     };
139 
140     AddressSpaceStream* res =
141         new AddressSpaceStream(
142             child_device_handle, version, context,
143             ringOffset, bufferOffset, false /* not virtio */, ops);
144 
145     return res;
146 }
147 
148 #if defined(HOST_BUILD) || defined(__Fuchsia__)
createVirtioGpuAddressSpaceStream(size_t ignored_bufSize)149 AddressSpaceStream* createVirtioGpuAddressSpaceStream(size_t ignored_bufSize) {
150     // Ignore incoming ignored_bufSize
151     (void)ignored_bufSize;
152     return nullptr;
153 }
154 #else
openVirtGpuAddressSpace()155 static address_space_handle_t openVirtGpuAddressSpace() {
156     address_space_handle_t ret;
157     uint8_t retryCount = 64;
158     do {
159         ret = virtgpu_address_space_open();
160     } while(ret < 0 && retryCount-- > 0 && errno == EINTR);
161     return ret;
162 }
163 
createVirtioGpuAddressSpaceStream(size_t ignored_bufSize)164 AddressSpaceStream* createVirtioGpuAddressSpaceStream(size_t ignored_bufSize) {
165     // Ignore incoming ignored_bufSize
166     (void)ignored_bufSize;
167 
168     auto handle = openVirtGpuAddressSpace();
169     if (handle <= reinterpret_cast<address_space_handle_t>(-1)) {
170         ALOGE("AddressSpaceStream::create failed (open device) %d (%s)\n", errno, strerror(errno));
171         return nullptr;
172     }
173 
174     struct address_space_virtgpu_info virtgpu_info;
175 
176     ALOGD("%s: create subdevice and get resp\n", __func__);
177     if (!virtgpu_address_space_create_context_with_subdevice(
178             handle, GoldfishAddressSpaceSubdeviceType::VirtioGpuGraphics,
179             &virtgpu_info)) {
180         ALOGE("AddressSpaceStream::create failed (create subdevice)\n");
181         virtgpu_address_space_close(handle);
182         return nullptr;
183     }
184     ALOGD("%s: create subdevice and get resp (done)\n", __func__);
185 
186     struct address_space_ping request;
187     uint32_t ringSize = 0;
188     uint32_t bufferSize = 0;
189 
190     request.metadata = ASG_GET_RING;
191     if (!virtgpu_address_space_ping_with_response(
192         &virtgpu_info, &request)) {
193         ALOGE("AddressSpaceStream::create failed (get ring version)\n");
194         virtgpu_address_space_close(handle);
195         return nullptr;
196     }
197     ringSize = request.size;
198 
199     request.metadata = ASG_GET_BUFFER;
200     if (!virtgpu_address_space_ping_with_response(
201         &virtgpu_info, &request)) {
202         ALOGE("AddressSpaceStream::create failed (get ring version)\n");
203         virtgpu_address_space_close(handle);
204         return nullptr;
205     }
206     bufferSize = request.size;
207 
208     request.metadata = ASG_SET_VERSION;
209     request.size = 1; // version 1
210 
211     if (!virtgpu_address_space_ping_with_response(
212         &virtgpu_info, &request)) {
213         ALOGE("AddressSpaceStream::create failed (set version)\n");
214         virtgpu_address_space_close(handle);
215         return nullptr;
216     }
217 
218     ALOGD("%s: ping returned. context ring and buffer sizes %u %u\n", __func__,
219             ringSize, bufferSize);
220 
221     uint64_t hostmem_id = request.metadata;
222     uint32_t version = request.size;
223     size_t hostmem_alloc_size =
224         (size_t)(ringSize + bufferSize);
225 
226     ALOGD("%s: hostmem size: %zu\n", __func__, hostmem_alloc_size);
227 
228     struct address_space_virtgpu_hostmem_info hostmem_info;
229     if (!virtgpu_address_space_allocate_hostmem(
230             handle,
231             hostmem_alloc_size,
232             hostmem_id,
233             &hostmem_info)) {
234         ALOGE("AddressSpaceStream::create failed (alloc hostmem)\n");
235         virtgpu_address_space_close(handle);
236         return nullptr;
237     }
238 
239     request.metadata = ASG_GET_CONFIG;
240     if (!virtgpu_address_space_ping_with_response(
241         &virtgpu_info, &request)) {
242         ALOGE("AddressSpaceStream::create failed (get config)\n");
243         virtgpu_address_space_close(handle);
244         return nullptr;
245     }
246 
247     char* ringPtr = (char*)hostmem_info.ptr;
248     char* bufferPtr = ((char*)hostmem_info.ptr) + sizeof(struct asg_ring_storage);
249 
250     struct asg_context context =
251         asg_context_create(
252             (char*)ringPtr, (char*)bufferPtr, bufferSize);
253 
254     context.ring_config->transfer_mode = 1;
255     context.ring_config->host_consumed_pos = 0;
256     context.ring_config->guest_write_pos = 0;
257 
258     struct address_space_ops ops = {
259         .open = virtgpu_address_space_open,
260         .close = virtgpu_address_space_close,
261         .ping = virtgpu_address_space_ping,
262         .allocate_hostmem = virtgpu_address_space_allocate_hostmem,
263         .ping_with_response = virtgpu_address_space_ping_with_response,
264     };
265 
266     AddressSpaceStream* res =
267         new AddressSpaceStream(
268             handle, version, context,
269             0, 0, true /* is virtio */, ops);
270 
271     return res;
272 }
273 #endif // HOST_BUILD || __Fuchsia__
274 
275 
AddressSpaceStream(address_space_handle_t handle,uint32_t version,struct asg_context context,uint64_t ringOffset,uint64_t writeBufferOffset,bool virtioMode,struct address_space_ops ops)276 AddressSpaceStream::AddressSpaceStream(
277     address_space_handle_t handle,
278     uint32_t version,
279     struct asg_context context,
280     uint64_t ringOffset,
281     uint64_t writeBufferOffset,
282     bool virtioMode,
283     struct address_space_ops ops) :
284     IOStream(context.ring_config->flush_interval),
285     m_virtioMode(virtioMode),
286     m_ops(ops),
287     m_tmpBuf(0),
288     m_tmpBufSize(0),
289     m_tmpBufXferSize(0),
290     m_usingTmpBuf(0),
291     m_readBuf(0),
292     m_read(0),
293     m_readLeft(0),
294     m_handle(handle),
295     m_version(version),
296     m_context(context),
297     m_ringOffset(ringOffset),
298     m_writeBufferOffset(writeBufferOffset),
299     m_writeBufferSize(context.ring_config->buffer_size),
300     m_writeBufferMask(m_writeBufferSize - 1),
301     m_buf((unsigned char*)context.buffer),
302     m_writeStart(m_buf),
303     m_writeStep(context.ring_config->flush_interval),
304     m_notifs(0),
305     m_written(0),
306     m_backoffIters(0),
307     m_backoffFactor(1) {
308     // We'll use this in the future, but at the moment,
309     // it's a potential compile Werror.
310     (void)m_version;
311 }
312 
~AddressSpaceStream()313 AddressSpaceStream::~AddressSpaceStream() {
314     flush();
315     ensureType3Finished();
316     ensureType1Finished();
317     if (!m_virtioMode) {
318         m_ops.unmap(m_context.to_host, sizeof(struct asg_ring_storage));
319         m_ops.unmap(m_context.buffer, m_writeBufferSize);
320         m_ops.unclaim_shared(m_handle, m_ringOffset);
321         m_ops.unclaim_shared(m_handle, m_writeBufferOffset);
322     }
323     m_ops.close(m_handle);
324     if (m_readBuf) free(m_readBuf);
325     if (m_tmpBuf) free(m_tmpBuf);
326 }
327 
idealAllocSize(size_t len)328 size_t AddressSpaceStream::idealAllocSize(size_t len) {
329     if (len > m_writeStep) return len;
330     return m_writeStep;
331 }
332 
allocBuffer(size_t minSize)333 void *AddressSpaceStream::allocBuffer(size_t minSize) {
334     AEMU_SCOPED_TRACE("allocBuffer");
335     ensureType3Finished();
336 
337     if (!m_readBuf) {
338         m_readBuf = (unsigned char*)malloc(kReadSize);
339     }
340 
341     size_t allocSize =
342         (m_writeStep < minSize ? minSize : m_writeStep);
343 
344     if (m_writeStep < allocSize) {
345         if (!m_tmpBuf) {
346             m_tmpBufSize = allocSize * 2;
347             m_tmpBuf = (unsigned char*)malloc(m_tmpBufSize);
348         }
349 
350         if (m_tmpBufSize < allocSize) {
351             m_tmpBufSize = allocSize * 2;
352             m_tmpBuf = (unsigned char*)realloc(m_tmpBuf, m_tmpBufSize);
353         }
354 
355         if (!m_usingTmpBuf) {
356             flush();
357         }
358 
359         m_usingTmpBuf = true;
360         m_tmpBufXferSize = allocSize;
361         return m_tmpBuf;
362     } else {
363         if (m_usingTmpBuf) {
364             writeFully(m_tmpBuf, m_tmpBufXferSize);
365             m_usingTmpBuf = false;
366             m_tmpBufXferSize = 0;
367         }
368 
369         return m_writeStart;
370     }
371 };
372 
commitBuffer(size_t size)373 int AddressSpaceStream::commitBuffer(size_t size)
374 {
375     if (size == 0) return 0;
376 
377     if (m_usingTmpBuf) {
378         writeFully(m_tmpBuf, size);
379         m_tmpBufXferSize = 0;
380         m_usingTmpBuf = false;
381         return 0;
382     } else {
383         int res = type1Write(m_writeStart - m_buf, size);
384         advanceWrite();
385         return res;
386     }
387 }
388 
readFully(void * ptr,size_t totalReadSize)389 const unsigned char *AddressSpaceStream::readFully(void *ptr, size_t totalReadSize)
390 {
391 
392     unsigned char* userReadBuf = static_cast<unsigned char*>(ptr);
393 
394     if (!userReadBuf) {
395         if (totalReadSize > 0) {
396             ALOGE("AddressSpaceStream::commitBufferAndReadFully failed, userReadBuf=NULL, totalReadSize %zu, lethal"
397                     " error, exiting.", totalReadSize);
398             abort();
399         }
400         return nullptr;
401     }
402 
403     // Advance buffered read if not yet consumed.
404     size_t remaining = totalReadSize;
405     size_t bufferedReadSize =
406         m_readLeft < remaining ? m_readLeft : remaining;
407 
408     if (bufferedReadSize) {
409         memcpy(userReadBuf,
410                m_readBuf + (m_read - m_readLeft),
411                bufferedReadSize);
412         remaining -= bufferedReadSize;
413         m_readLeft -= bufferedReadSize;
414     }
415 
416     if (!remaining) return userReadBuf;
417 
418     // Read up to kReadSize bytes if all buffered read has been consumed.
419     size_t maxRead = m_readLeft ? 0 : kReadSize;
420     ssize_t actual = 0;
421 
422     if (maxRead) {
423         actual = speculativeRead(m_readBuf, maxRead);
424 
425         // Updated buffered read size.
426         if (actual > 0) {
427             m_read = m_readLeft = actual;
428         }
429 
430         if (actual == 0) {
431             ALOGD("%s: end of pipe", __FUNCTION__);
432             return NULL;
433         }
434     }
435 
436     // Consume buffered read and read more if necessary.
437     while (remaining) {
438         bufferedReadSize = m_readLeft < remaining ? m_readLeft : remaining;
439         if (bufferedReadSize) {
440             memcpy(userReadBuf + (totalReadSize - remaining),
441                    m_readBuf + (m_read - m_readLeft),
442                    bufferedReadSize);
443             remaining -= bufferedReadSize;
444             m_readLeft -= bufferedReadSize;
445             continue;
446         }
447 
448         actual = speculativeRead(m_readBuf, kReadSize);
449 
450         if (actual == 0) {
451             ALOGD("%s: Failed reading from pipe: %d", __FUNCTION__,  errno);
452             return NULL;
453         }
454 
455         if (actual > 0) {
456             m_read = m_readLeft = actual;
457             continue;
458         }
459     }
460 
461     resetBackoff();
462     return userReadBuf;
463 }
464 
read(void * buf,size_t * inout_len)465 const unsigned char *AddressSpaceStream::read(void *buf, size_t *inout_len) {
466     unsigned char* dst = (unsigned char*)buf;
467     size_t wanted = *inout_len;
468     ssize_t actual = speculativeRead(dst, wanted);
469 
470     if (actual >= 0) {
471         *inout_len = actual;
472     } else {
473         return nullptr;
474     }
475 
476     return (const unsigned char*)dst;
477 }
478 
writeFully(const void * buf,size_t size)479 int AddressSpaceStream::writeFully(const void *buf, size_t size)
480 {
481     AEMU_SCOPED_TRACE("writeFully");
482     ensureType3Finished();
483     ensureType1Finished();
484 
485     m_context.ring_config->transfer_size = size;
486     m_context.ring_config->transfer_mode = 3;
487 
488     size_t sent = 0;
489     size_t preferredChunkSize = m_writeBufferSize / 4;
490     size_t chunkSize = size < preferredChunkSize ? size : preferredChunkSize;
491     const uint8_t* bufferBytes = (const uint8_t*)buf;
492 
493     bool hostPinged = false;
494     while (sent < size) {
495         size_t remaining = size - sent;
496         size_t sendThisTime = remaining < chunkSize ? remaining : chunkSize;
497 
498         long sentChunks =
499             ring_buffer_view_write(
500                 m_context.to_host_large_xfer.ring,
501                 &m_context.to_host_large_xfer.view,
502                 bufferBytes + sent, sendThisTime, 1);
503 
504         if (!hostPinged && *(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
505             *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
506             notifyAvailable();
507             hostPinged = true;
508         }
509 
510         if (sentChunks == 0) {
511             ring_buffer_yield();
512             backoff();
513         }
514 
515         sent += sentChunks * sendThisTime;
516 
517         if (isInError()) {
518             return -1;
519         }
520     }
521 
522     bool isRenderingAfter = ASG_HOST_STATE_RENDERING == __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
523 
524     if (!isRenderingAfter) {
525         notifyAvailable();
526     }
527 
528     ensureType3Finished();
529 
530     resetBackoff();
531     m_context.ring_config->transfer_mode = 1;
532     m_written += size;
533 
534     float mb = (float)m_written / 1048576.0f;
535     if (mb > 100.0f) {
536         ALOGD("%s: %f mb in %d notifs. %f mb/notif\n", __func__,
537               mb, m_notifs, m_notifs ? mb / (float)m_notifs : 0.0f);
538         m_notifs = 0;
539         m_written = 0;
540     }
541     return 0;
542 }
543 
writeFullyAsync(const void * buf,size_t size)544 int AddressSpaceStream::writeFullyAsync(const void *buf, size_t size)
545 {
546     AEMU_SCOPED_TRACE("writeFullyAsync");
547     ensureType3Finished();
548     ensureType1Finished();
549 
550     __atomic_store_n(&m_context.ring_config->transfer_size, size, __ATOMIC_RELEASE);
551     m_context.ring_config->transfer_mode = 3;
552 
553     size_t sent = 0;
554     size_t preferredChunkSize = m_writeBufferSize / 2;
555     size_t chunkSize = size < preferredChunkSize ? size : preferredChunkSize;
556     const uint8_t* bufferBytes = (const uint8_t*)buf;
557 
558     bool pingedHost = false;
559 
560     while (sent < size) {
561         size_t remaining = size - sent;
562         size_t sendThisTime = remaining < chunkSize ? remaining : chunkSize;
563 
564         long sentChunks =
565             ring_buffer_view_write(
566                 m_context.to_host_large_xfer.ring,
567                 &m_context.to_host_large_xfer.view,
568                 bufferBytes + sent, sendThisTime, 1);
569 
570         uint32_t hostState = __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
571 
572         if (!pingedHost &&
573             hostState != ASG_HOST_STATE_CAN_CONSUME &&
574             hostState != ASG_HOST_STATE_RENDERING) {
575             pingedHost = true;
576             notifyAvailable();
577         }
578 
579         if (sentChunks == 0) {
580             ring_buffer_yield();
581             backoff();
582         }
583 
584         sent += sentChunks * sendThisTime;
585 
586         if (isInError()) {
587             return -1;
588         }
589     }
590 
591 
592     bool isRenderingAfter = ASG_HOST_STATE_RENDERING == __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
593 
594     if (!isRenderingAfter) {
595         notifyAvailable();
596     }
597 
598     resetBackoff();
599     m_context.ring_config->transfer_mode = 1;
600     m_written += size;
601 
602     float mb = (float)m_written / 1048576.0f;
603     if (mb > 100.0f) {
604         ALOGD("%s: %f mb in %d notifs. %f mb/notif\n", __func__,
605               mb, m_notifs, m_notifs ? mb / (float)m_notifs : 0.0f);
606         m_notifs = 0;
607         m_written = 0;
608     }
609     return 0;
610 }
611 
commitBufferAndReadFully(size_t writeSize,void * userReadBufPtr,size_t totalReadSize)612 const unsigned char *AddressSpaceStream::commitBufferAndReadFully(
613     size_t writeSize, void *userReadBufPtr, size_t totalReadSize) {
614 
615     if (m_usingTmpBuf) {
616         writeFully(m_tmpBuf, writeSize);
617         m_usingTmpBuf = false;
618         m_tmpBufXferSize = 0;
619         return readFully(userReadBufPtr, totalReadSize);
620     } else {
621         commitBuffer(writeSize);
622         return readFully(userReadBufPtr, totalReadSize);
623     }
624 }
625 
isInError() const626 bool AddressSpaceStream::isInError() const {
627     return 1 == m_context.ring_config->in_error;
628 }
629 
speculativeRead(unsigned char * readBuffer,size_t trySize)630 ssize_t AddressSpaceStream::speculativeRead(unsigned char* readBuffer, size_t trySize) {
631     ensureType3Finished();
632     ensureType1Finished();
633 
634     size_t actuallyRead = 0;
635     size_t readIters = 0;
636 
637     while (!actuallyRead) {
638         ++readIters;
639 
640         uint32_t readAvail =
641             ring_buffer_available_read(
642                 m_context.from_host_large_xfer.ring,
643                 &m_context.from_host_large_xfer.view);
644 
645         if (!readAvail) {
646             ring_buffer_yield();
647             backoff();
648             continue;
649         }
650 
651         uint32_t toRead = readAvail > trySize ?  trySize : readAvail;
652 
653         long stepsRead = ring_buffer_view_read(
654             m_context.from_host_large_xfer.ring,
655             &m_context.from_host_large_xfer.view,
656             readBuffer, toRead, 1);
657 
658         actuallyRead += stepsRead * toRead;
659 
660         if (isInError()) {
661             return -1;
662         }
663     }
664 
665     return actuallyRead;
666 }
667 
notifyAvailable()668 void AddressSpaceStream::notifyAvailable() {
669     AEMU_SCOPED_TRACE("PING");
670     struct address_space_ping request;
671     request.metadata = ASG_NOTIFY_AVAILABLE;
672     m_ops.ping(m_handle, &request);
673     ++m_notifs;
674 }
675 
getRelativeBufferPos(uint32_t pos)676 uint32_t AddressSpaceStream::getRelativeBufferPos(uint32_t pos) {
677     return pos & m_writeBufferMask;
678 }
679 
advanceWrite()680 void AddressSpaceStream::advanceWrite() {
681     m_writeStart += m_context.ring_config->flush_interval;
682 
683     if (m_writeStart == m_buf + m_context.ring_config->buffer_size) {
684         m_writeStart = m_buf;
685     }
686 }
687 
ensureConsumerFinishing()688 void AddressSpaceStream::ensureConsumerFinishing() {
689     uint32_t currAvailRead = ring_buffer_available_read(m_context.to_host, 0);
690 
691     while (currAvailRead) {
692         ring_buffer_yield();
693         uint32_t nextAvailRead = ring_buffer_available_read(m_context.to_host, 0);
694 
695         if (nextAvailRead != currAvailRead) {
696             break;
697         }
698 
699         if (*(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
700             *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
701             notifyAvailable();
702             break;
703         }
704 
705         backoff();
706     }
707 }
708 
ensureType1Finished()709 void AddressSpaceStream::ensureType1Finished() {
710     AEMU_SCOPED_TRACE("ensureType1Finished");
711 
712     uint32_t currAvailRead =
713         ring_buffer_available_read(m_context.to_host, 0);
714 
715     while (currAvailRead) {
716         backoff();
717         ring_buffer_yield();
718         currAvailRead = ring_buffer_available_read(m_context.to_host, 0);
719         if (isInError()) {
720             return;
721         }
722     }
723 }
724 
ensureType3Finished()725 void AddressSpaceStream::ensureType3Finished() {
726     AEMU_SCOPED_TRACE("ensureType3Finished");
727     uint32_t availReadLarge =
728         ring_buffer_available_read(
729             m_context.to_host_large_xfer.ring,
730             &m_context.to_host_large_xfer.view);
731     while (availReadLarge) {
732         ring_buffer_yield();
733         backoff();
734         availReadLarge =
735             ring_buffer_available_read(
736                 m_context.to_host_large_xfer.ring,
737                 &m_context.to_host_large_xfer.view);
738         if (*(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
739             *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
740             notifyAvailable();
741         }
742         if (isInError()) {
743             return;
744         }
745     }
746 }
747 
type1Write(uint32_t bufferOffset,size_t size)748 int AddressSpaceStream::type1Write(uint32_t bufferOffset, size_t size) {
749 
750     AEMU_SCOPED_TRACE("type1Write");
751 
752     ensureType3Finished();
753 
754     size_t sent = 0;
755     size_t sizeForRing = sizeof(struct asg_type1_xfer);
756 
757     struct asg_type1_xfer xfer = {
758         bufferOffset,
759         (uint32_t)size,
760     };
761 
762     uint8_t* writeBufferBytes = (uint8_t*)(&xfer);
763 
764     uint32_t maxOutstanding = 1;
765     uint32_t maxSteps = m_context.ring_config->buffer_size /
766             m_context.ring_config->flush_interval;
767 
768     if (maxSteps > 1) maxOutstanding = maxSteps - 1;
769 
770     uint32_t ringAvailReadNow = ring_buffer_available_read(m_context.to_host, 0);
771 
772     while (ringAvailReadNow >= maxOutstanding * sizeForRing) {
773         ringAvailReadNow = ring_buffer_available_read(m_context.to_host, 0);
774     }
775 
776     bool hostPinged = false;
777     while (sent < sizeForRing) {
778 
779         long sentChunks = ring_buffer_write(
780             m_context.to_host,
781             writeBufferBytes + sent,
782             sizeForRing - sent, 1);
783 
784         if (!hostPinged &&
785             *(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
786             *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
787             notifyAvailable();
788             hostPinged = true;
789         }
790 
791         if (sentChunks == 0) {
792             ring_buffer_yield();
793             backoff();
794         }
795 
796         sent += sentChunks * (sizeForRing - sent);
797 
798         if (isInError()) {
799             return -1;
800         }
801     }
802 
803     bool isRenderingAfter = ASG_HOST_STATE_RENDERING == __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
804 
805     if (!isRenderingAfter) {
806         notifyAvailable();
807     }
808 
809     m_written += size;
810 
811     float mb = (float)m_written / 1048576.0f;
812     if (mb > 100.0f) {
813         ALOGD("%s: %f mb in %d notifs. %f mb/notif\n", __func__,
814               mb, m_notifs, m_notifs ? mb / (float)m_notifs : 0.0f);
815         m_notifs = 0;
816         m_written = 0;
817     }
818 
819     resetBackoff();
820     return 0;
821 }
822 
backoff()823 void AddressSpaceStream::backoff() {
824 #if defined(HOST_BUILD) || defined(__APPLE__) || defined(__MACOSX) || defined(__Fuchsia__)
825     static const uint32_t kBackoffItersThreshold = 50000000;
826     static const uint32_t kBackoffFactorDoublingIncrement = 50000000;
827 #else
828     static const uint32_t kBackoffItersThreshold = property_get_int32("ro.boot.asg.backoffiters", 50000000);
829     static const uint32_t kBackoffFactorDoublingIncrement = property_get_int32("ro.boot.asg.backoffincrement", 50000000);
830 #endif
831     ++m_backoffIters;
832 
833     if (m_backoffIters > kBackoffItersThreshold) {
834         usleep(m_backoffFactor);
835         uint32_t itersSoFarAfterThreshold = m_backoffIters - kBackoffItersThreshold;
836         if (itersSoFarAfterThreshold > kBackoffFactorDoublingIncrement) {
837             m_backoffFactor = m_backoffFactor << 1;
838             if (m_backoffFactor > 1000) m_backoffFactor = 1000;
839             m_backoffIters = kBackoffItersThreshold;
840         }
841     }
842 }
843 
resetBackoff()844 void AddressSpaceStream::resetBackoff() {
845     m_backoffIters = 0;
846     m_backoffFactor = 1;
847 }
848