• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (C) 2019 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #include "RingStream.h"
15 
16 #include "aemu/base/system/System.h"
17 
18 #define EMUGL_DEBUG_LEVEL  0
19 
20 #include "host-common/crash_reporter.h"
21 #include "host-common/debug.h"
22 #include "host-common/dma_device.h"
23 #include "host-common/GfxstreamFatalError.h"
24 
25 #include <assert.h>
26 #include <memory.h>
27 
28 using emugl::ABORT_REASON_OTHER;
29 using emugl::FatalError;
30 
31 namespace gfxstream {
32 
RingStream(struct asg_context context,android::emulation::asg::ConsumerCallbacks callbacks,size_t bufsize)33 RingStream::RingStream(
34     struct asg_context context,
35     android::emulation::asg::ConsumerCallbacks callbacks,
36     size_t bufsize) :
37     IOStream(bufsize),
38     mContext(context),
39     mCallbacks(callbacks) { }
40 RingStream::~RingStream() = default;
41 
getNeededFreeTailSize() const42 int RingStream::getNeededFreeTailSize() const {
43     return mContext.ring_config->flush_interval;
44 }
45 
allocBuffer(size_t minSize)46 void* RingStream::allocBuffer(size_t minSize) {
47     if (mWriteBuffer.size() < minSize) {
48         mWriteBuffer.resize_noinit(minSize);
49     }
50     return mWriteBuffer.data();
51 }
52 
commitBuffer(size_t size)53 int RingStream::commitBuffer(size_t size) {
54     size_t sent = 0;
55     auto data = mWriteBuffer.data();
56 
57     size_t iters = 0;
58     size_t backedOffIters = 0;
59     const size_t kBackoffIters = 10000000ULL;
60     while (sent < size) {
61         ++iters;
62         auto avail = ring_buffer_available_write(
63             mContext.from_host_large_xfer.ring,
64             &mContext.from_host_large_xfer.view);
65 
66         // Check if the guest process crashed.
67         if (!avail) {
68             if (*(mContext.host_state) == ASG_HOST_STATE_EXIT) {
69                 return sent;
70             } else {
71                 ring_buffer_yield();
72                 if (iters > kBackoffIters) {
73                     android::base::sleepUs(10);
74                     ++backedOffIters;
75                 }
76             }
77             continue;
78         }
79 
80         auto remaining = size - sent;
81         auto todo = remaining < avail ? remaining : avail;
82 
83         ring_buffer_view_write(
84             mContext.from_host_large_xfer.ring,
85             &mContext.from_host_large_xfer.view,
86             data + sent, todo, 1);
87 
88         sent += todo;
89     }
90 
91     if (backedOffIters > 0) {
92         WARN("Backed off %zu times to avoid overloading the guest system. This "
93              "may indicate resource constraints or performance issues.",
94              backedOffIters);
95     }
96     return sent;
97 }
98 
readRaw(void * buf,size_t * inout_len)99 const unsigned char* RingStream::readRaw(void* buf, size_t* inout_len) {
100     size_t wanted = *inout_len;
101     size_t count = 0U;
102     auto dst = static_cast<char*>(buf);
103 
104     uint32_t ringAvailable = 0;
105     uint32_t ringLargeXferAvailable = 0;
106 
107     const uint32_t maxSpins = 30;
108     uint32_t spins = 0;
109     bool inLargeXfer = true;
110 
111     *(mContext.host_state) = ASG_HOST_STATE_CAN_CONSUME;
112 
113     while (count < wanted) {
114 
115         if (mReadBufferLeft) {
116             size_t avail = std::min<size_t>(wanted - count, mReadBufferLeft);
117             memcpy(dst + count,
118                     mReadBuffer.data() + (mReadBuffer.size() - mReadBufferLeft),
119                     avail);
120             count += avail;
121             mReadBufferLeft -= avail;
122             continue;
123         }
124 
125         mReadBuffer.clear();
126 
127         // no read buffer left...
128         if (count > 0) {  // There is some data to return.
129             break;
130         }
131 
132         *(mContext.host_state) = ASG_HOST_STATE_CAN_CONSUME;
133 
134         // if (mInSnapshotOperation) {
135         //     fprintf(stderr, "%s: %p in snapshot operation, exit\n", __func__, mRenderThreadPtr);
136         //     // In a snapshot operation, exit
137         //     return nullptr;
138         // }
139 
140         if (mShouldExit) {
141             return nullptr;
142         }
143 
144         ringAvailable =
145             ring_buffer_available_read(mContext.to_host, 0);
146         ringLargeXferAvailable =
147             ring_buffer_available_read(
148                 mContext.to_host_large_xfer.ring,
149                 &mContext.to_host_large_xfer.view);
150 
151         auto current = dst + count;
152         auto ptrEnd = dst + wanted;
153 
154         if (ringAvailable) {
155             inLargeXfer = false;
156             uint32_t transferMode =
157                 mContext.ring_config->transfer_mode;
158             switch (transferMode) {
159                 case 1:
160                     type1Read(ringAvailable, dst, &count, &current, ptrEnd);
161                     break;
162                 case 2:
163                     type2Read(ringAvailable, &count, &current, ptrEnd);
164                     break;
165                 case 3:
166                     // emugl::emugl_crash_reporter(
167                     //     "Guest should never set to "
168                     //     "transfer mode 3 with ringAvailable != 0\n");
169                 default:
170                     // emugl::emugl_crash_reporter(
171                     //     "Unknown transfer mode %u\n",
172                     //     transferMode);
173                     break;
174             }
175         } else if (ringLargeXferAvailable) {
176             type3Read(ringLargeXferAvailable,
177                       &count, &current, ptrEnd);
178             inLargeXfer = true;
179             if (0 == __atomic_load_n(&mContext.ring_config->transfer_size, __ATOMIC_ACQUIRE)) {
180                 inLargeXfer = false;
181             }
182         } else {
183             if (inLargeXfer && 0 != __atomic_load_n(&mContext.ring_config->transfer_size, __ATOMIC_ACQUIRE)) {
184                 continue;
185             }
186 
187             if (inLargeXfer && 0 == __atomic_load_n(&mContext.ring_config->transfer_size, __ATOMIC_ACQUIRE)) {
188                 inLargeXfer = false;
189             }
190 
191             if (++spins < maxSpins) {
192                 ring_buffer_yield();
193                 continue;
194             } else {
195                 spins = 0;
196             }
197 
198             if (mShouldExit) {
199                 return nullptr;
200             }
201 
202             if (mShouldExitForSnapshot && mInSnapshotOperation) {
203                 return nullptr;
204             }
205 
206             int unavailReadResult = mCallbacks.onUnavailableRead();
207 
208             if (-1 == unavailReadResult) {
209                 mShouldExit = true;
210             }
211 
212             // pause pre snapshot
213             if (-2 == unavailReadResult) {
214                 mShouldExitForSnapshot = true;
215             }
216 
217             // resume post snapshot
218             if (-3 == unavailReadResult) {
219                 mShouldExitForSnapshot = false;
220             }
221 
222             continue;
223         }
224     }
225 
226     *inout_len = count;
227     ++mXmits;
228     mTotalRecv += count;
229     D("read %d bytes", (int)count);
230 
231     *(mContext.host_state) = ASG_HOST_STATE_RENDERING;
232     return (const unsigned char*)buf;
233 }
234 
type1Read(uint32_t available,char * begin,size_t * count,char ** current,const char * ptrEnd)235 void RingStream::type1Read(
236     uint32_t available,
237     char* begin,
238     size_t* count, char** current, const char* ptrEnd) {
239 
240     uint32_t xferTotal = available / sizeof(struct asg_type1_xfer);
241 
242     if (mType1Xfers.size() < xferTotal) {
243         mType1Xfers.resize(xferTotal * 2);
244     }
245 
246     auto xfersPtr = mType1Xfers.data();
247 
248     ring_buffer_copy_contents(
249         mContext.to_host, 0, xferTotal * sizeof(struct asg_type1_xfer), (uint8_t*)xfersPtr);
250 
251 #ifdef __clang__
252 #pragma clang diagnostic push
253 #pragma clang diagnostic ignored "-Wunreachable-code-loop-increment"
254 #endif // __clang__
255     for (uint32_t i = 0; i < xferTotal; ++i) {
256         if (*current + xfersPtr[i].size > ptrEnd) {
257             // Save in a temp buffer or we'll get stuck
258             if (begin == *current && i == 0) {
259                 const char* src = mContext.buffer + xfersPtr[i].offset;
260                 mReadBuffer.resize_noinit(xfersPtr[i].size);
261                 memcpy(mReadBuffer.data(), src, xfersPtr[i].size);
262                 mReadBufferLeft = xfersPtr[i].size;
263                 ring_buffer_advance_read(
264                         mContext.to_host, sizeof(struct asg_type1_xfer), 1);
265                 __atomic_fetch_add(&mContext.ring_config->host_consumed_pos, xfersPtr[i].size, __ATOMIC_RELEASE);
266             }
267             return;
268         }
269         const char* src = mContext.buffer + xfersPtr[i].offset;
270         memcpy(*current, src, xfersPtr[i].size);
271         ring_buffer_advance_read(
272                 mContext.to_host, sizeof(struct asg_type1_xfer), 1);
273         __atomic_fetch_add(&mContext.ring_config->host_consumed_pos, xfersPtr[i].size, __ATOMIC_RELEASE);
274         *current += xfersPtr[i].size;
275         *count += xfersPtr[i].size;
276 
277         // TODO: Figure out why running multiple xfers here can result in data
278         // corruption and remove clang diagnostic block.
279         return;
280     }
281 #ifdef __clang__
282 #pragma clang diagnostic pop
283 #endif // __clang__
284 }
285 
type2Read(uint32_t available,size_t * count,char ** current,const char * ptrEnd)286 void RingStream::type2Read(
287     uint32_t available,
288     size_t* count, char** current,const char* ptrEnd) {
289 
290     GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER)) << "nyi. abort";
291 
292     uint32_t xferTotal = available / sizeof(struct asg_type2_xfer);
293 
294     if (mType2Xfers.size() < xferTotal) {
295         mType2Xfers.resize(xferTotal * 2);
296     }
297 
298     auto xfersPtr = mType2Xfers.data();
299 
300     ring_buffer_copy_contents(
301         mContext.to_host, 0, available, (uint8_t*)xfersPtr);
302 
303     for (uint32_t i = 0; i < xferTotal; ++i) {
304 
305         if (*current + xfersPtr[i].size > ptrEnd) return;
306 
307         const char* src =
308             mCallbacks.getPtr(xfersPtr[i].physAddr);
309 
310         memcpy(*current, src, xfersPtr[i].size);
311 
312         ring_buffer_advance_read(
313             mContext.to_host, sizeof(struct asg_type1_xfer), 1);
314 
315         *current += xfersPtr[i].size;
316         *count += xfersPtr[i].size;
317     }
318 }
319 
type3Read(uint32_t available,size_t * count,char ** current,const char * ptrEnd)320 void RingStream::type3Read(
321     uint32_t available,
322     size_t* count, char** current, const char* ptrEnd) {
323 
324     uint32_t xferTotal = __atomic_load_n(&mContext.ring_config->transfer_size, __ATOMIC_ACQUIRE);
325     uint32_t maxCanRead = ptrEnd - *current;
326     uint32_t ringAvail = available;
327     uint32_t actuallyRead = std::min(ringAvail, std::min(xferTotal, maxCanRead));
328 
329     // Decrement transfer_size before letting the guest proceed in ring_buffer funcs or we will race
330     // to the next time the guest sets transfer_size
331     __atomic_fetch_sub(&mContext.ring_config->transfer_size, actuallyRead, __ATOMIC_RELEASE);
332 
333     ring_buffer_read_fully_with_abort(
334             mContext.to_host_large_xfer.ring,
335             &mContext.to_host_large_xfer.view,
336             *current, actuallyRead,
337             1, &mContext.ring_config->in_error);
338 
339     *current += actuallyRead;
340     *count += actuallyRead;
341 }
342 
getDmaForReading(uint64_t guest_paddr)343 void* RingStream::getDmaForReading(uint64_t guest_paddr) {
344     return emugl::g_emugl_dma_get_host_addr(guest_paddr);
345 }
346 
unlockDma(uint64_t guest_paddr)347 void RingStream::unlockDma(uint64_t guest_paddr) { emugl::g_emugl_dma_unlock(guest_paddr); }
348 
writeFully(const void * buf,size_t len)349 int RingStream::writeFully(const void* buf, size_t len) {
350     void* dstBuf = alloc(len);
351     memcpy(dstBuf, buf, len);
352     flush();
353     return 0;
354 }
355 
readFully(void * buf,size_t len)356 const unsigned char *RingStream::readFully( void *buf, size_t len) {
357     GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER)) << "not intended for use with RingStream";
358 }
359 
onSave(android::base::Stream * stream)360 void RingStream::onSave(android::base::Stream* stream) {
361     stream->putBe32(mReadBufferLeft);
362     stream->write(mReadBuffer.data() + mReadBuffer.size() - mReadBufferLeft,
363                   mReadBufferLeft);
364     android::base::saveBuffer(stream, mWriteBuffer);
365 }
366 
onLoad(android::base::Stream * stream)367 unsigned char* RingStream::onLoad(android::base::Stream* stream) {
368     android::base::loadBuffer(stream, &mReadBuffer);
369     mReadBufferLeft = mReadBuffer.size();
370     android::base::loadBuffer(stream, &mWriteBuffer);
371     return reinterpret_cast<unsigned char*>(mWriteBuffer.data());
372 }
373 
374 }  // namespace gfxstream
375