• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (C) 2019 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #include "RingStream.h"
15 
16 #include "base/System.h"
17 
18 #define EMUGL_DEBUG_LEVEL  0
19 
20 #include "host-common/crash_reporter.h"
21 #include "host-common/debug.h"
22 #include "host-common/dma_device.h"
23 
24 #include <assert.h>
25 #include <memory.h>
26 
27 namespace emugl {
28 
RingStream(struct asg_context context,android::emulation::asg::ConsumerCallbacks callbacks,size_t bufsize)29 RingStream::RingStream(
30     struct asg_context context,
31     android::emulation::asg::ConsumerCallbacks callbacks,
32     size_t bufsize) :
33     IOStream(bufsize),
34     mContext(context),
35     mCallbacks(callbacks) { }
36 RingStream::~RingStream() = default;
37 
getNeededFreeTailSize() const38 int RingStream::getNeededFreeTailSize() const {
39     return mContext.ring_config->flush_interval;
40 }
41 
allocBuffer(size_t minSize)42 void* RingStream::allocBuffer(size_t minSize) {
43     if (mWriteBuffer.size() < minSize) {
44         mWriteBuffer.resize_noinit(minSize);
45     }
46     return mWriteBuffer.data();
47 }
48 
commitBuffer(size_t size)49 int RingStream::commitBuffer(size_t size) {
50     size_t sent = 0;
51     auto data = mWriteBuffer.data();
52 
53     size_t iters = 0;
54     size_t backedOffIters = 0;
55     const size_t kBackoffIters = 10000000ULL;
56     while (sent < size) {
57         ++iters;
58         auto avail = ring_buffer_available_write(
59             mContext.from_host_large_xfer.ring,
60             &mContext.from_host_large_xfer.view);
61 
62         // Check if the guest process crashed.
63         if (!avail) {
64             if (*(mContext.host_state) == ASG_HOST_STATE_EXIT) {
65                 return sent;
66             } else {
67                 ring_buffer_yield();
68                 if (iters > kBackoffIters) {
69                     android::base::sleepUs(10);
70                     ++backedOffIters;
71                 }
72             }
73             continue;
74         }
75 
76         auto remaining = size - sent;
77         auto todo = remaining < avail ? remaining : avail;
78 
79         ring_buffer_view_write(
80             mContext.from_host_large_xfer.ring,
81             &mContext.from_host_large_xfer.view,
82             data + sent, todo, 1);
83 
84         sent += todo;
85     }
86 
87     if (backedOffIters > 0) {
88         fprintf(stderr, "%s: warning: backed off %zu times due to guest slowness.\n",
89                 __func__,
90                 backedOffIters);
91     }
92     return sent;
93 }
94 
readRaw(void * buf,size_t * inout_len)95 const unsigned char* RingStream::readRaw(void* buf, size_t* inout_len) {
96     size_t wanted = *inout_len;
97     size_t count = 0U;
98     auto dst = static_cast<char*>(buf);
99 
100     uint32_t ringAvailable = 0;
101     uint32_t ringLargeXferAvailable = 0;
102 
103     const uint32_t maxSpins = 30;
104     uint32_t spins = 0;
105     bool inLargeXfer = true;
106 
107     *(mContext.host_state) = ASG_HOST_STATE_CAN_CONSUME;
108 
109     while (count < wanted) {
110 
111         if (mReadBufferLeft) {
112             size_t avail = std::min<size_t>(wanted - count, mReadBufferLeft);
113             memcpy(dst + count,
114                     mReadBuffer.data() + (mReadBuffer.size() - mReadBufferLeft),
115                     avail);
116             count += avail;
117             mReadBufferLeft -= avail;
118             continue;
119         }
120 
121         mReadBuffer.clear();
122 
123         // no read buffer left...
124         if (count > 0) {  // There is some data to return.
125             break;
126         }
127 
128         *(mContext.host_state) = ASG_HOST_STATE_CAN_CONSUME;
129 
130         // if (mInSnapshotOperation) {
131         //     fprintf(stderr, "%s: %p in snapshot operation, exit\n", __func__, mRenderThreadPtr);
132         //     // In a snapshot operation, exit
133         //     return nullptr;
134         // }
135 
136         if (mShouldExit) {
137             return nullptr;
138         }
139 
140         ringAvailable =
141             ring_buffer_available_read(mContext.to_host, 0);
142         ringLargeXferAvailable =
143             ring_buffer_available_read(
144                 mContext.to_host_large_xfer.ring,
145                 &mContext.to_host_large_xfer.view);
146 
147         auto current = dst + count;
148         auto ptrEnd = dst + wanted;
149 
150         if (ringAvailable) {
151             inLargeXfer = false;
152             uint32_t transferMode =
153                 mContext.ring_config->transfer_mode;
154             switch (transferMode) {
155                 case 1:
156                     type1Read(ringAvailable, dst, &count, &current, ptrEnd);
157                     break;
158                 case 2:
159                     type2Read(ringAvailable, &count, &current, ptrEnd);
160                     break;
161                 case 3:
162                     // emugl::emugl_crash_reporter(
163                     //     "Guest should never set to "
164                     //     "transfer mode 3 with ringAvailable != 0\n");
165                 default:
166                     // emugl::emugl_crash_reporter(
167                     //     "Unknown transfer mode %u\n",
168                     //     transferMode);
169                     break;
170             }
171         } else if (ringLargeXferAvailable) {
172             type3Read(ringLargeXferAvailable,
173                       &count, &current, ptrEnd);
174             inLargeXfer = true;
175             if (0 == __atomic_load_n(&mContext.ring_config->transfer_size, __ATOMIC_ACQUIRE)) {
176                 inLargeXfer = false;
177             }
178         } else {
179             if (inLargeXfer && 0 != __atomic_load_n(&mContext.ring_config->transfer_size, __ATOMIC_ACQUIRE)) {
180                 continue;
181             }
182 
183             if (inLargeXfer && 0 == __atomic_load_n(&mContext.ring_config->transfer_size, __ATOMIC_ACQUIRE)) {
184                 inLargeXfer = false;
185             }
186 
187             if (++spins < maxSpins) {
188                 ring_buffer_yield();
189                 continue;
190             } else {
191                 spins = 0;
192             }
193 
194             if (mShouldExit) {
195                 return nullptr;
196             }
197 
198             if (mShouldExitForSnapshot && mInSnapshotOperation) {
199                 return nullptr;
200             }
201 
202             int unavailReadResult = mCallbacks.onUnavailableRead();
203 
204             if (-1 == unavailReadResult) {
205                 mShouldExit = true;
206             }
207 
208             // pause pre snapshot
209             if (-2 == unavailReadResult) {
210                 mShouldExitForSnapshot = true;
211             }
212 
213             // resume post snapshot
214             if (-3 == unavailReadResult) {
215                 mShouldExitForSnapshot = false;
216             }
217 
218             continue;
219         }
220     }
221 
222     *inout_len = count;
223     ++mXmits;
224     mTotalRecv += count;
225     D("read %d bytes", (int)count);
226 
227     *(mContext.host_state) = ASG_HOST_STATE_RENDERING;
228     return (const unsigned char*)buf;
229 }
230 
type1Read(uint32_t available,char * begin,size_t * count,char ** current,const char * ptrEnd)231 void RingStream::type1Read(
232     uint32_t available,
233     char* begin,
234     size_t* count, char** current, const char* ptrEnd) {
235 
236     uint32_t xferTotal = available / sizeof(struct asg_type1_xfer);
237 
238     if (mType1Xfers.size() < xferTotal) {
239         mType1Xfers.resize(xferTotal * 2);
240     }
241 
242     auto xfersPtr = mType1Xfers.data();
243 
244     ring_buffer_copy_contents(
245         mContext.to_host, 0, xferTotal * sizeof(struct asg_type1_xfer), (uint8_t*)xfersPtr);
246 
247     for (uint32_t i = 0; i < xferTotal; ++i) {
248         if (*current + xfersPtr[i].size > ptrEnd) {
249             // Save in a temp buffer or we'll get stuck
250             if (begin == *current && i == 0) {
251                 const char* src = mContext.buffer + xfersPtr[i].offset;
252                 mReadBuffer.resize_noinit(xfersPtr[i].size);
253                 memcpy(mReadBuffer.data(), src, xfersPtr[i].size);
254                 mReadBufferLeft = xfersPtr[i].size;
255                 ring_buffer_advance_read(
256                         mContext.to_host, sizeof(struct asg_type1_xfer), 1);
257                 __atomic_fetch_add(&mContext.ring_config->host_consumed_pos, xfersPtr[i].size, __ATOMIC_RELEASE);
258             }
259             return;
260         }
261         const char* src = mContext.buffer + xfersPtr[i].offset;
262         memcpy(*current, src, xfersPtr[i].size);
263         ring_buffer_advance_read(
264                 mContext.to_host, sizeof(struct asg_type1_xfer), 1);
265         __atomic_fetch_add(&mContext.ring_config->host_consumed_pos, xfersPtr[i].size, __ATOMIC_RELEASE);
266         *current += xfersPtr[i].size;
267         *count += xfersPtr[i].size;
268 
269         // TODO: Figure out why running multiple xfers here can result in data
270         // corruption.
271         return;
272     }
273 }
274 
type2Read(uint32_t available,size_t * count,char ** current,const char * ptrEnd)275 void RingStream::type2Read(
276     uint32_t available,
277     size_t* count, char** current,const char* ptrEnd) {
278 
279     fprintf(stderr, "%s: nyi. abort\n", __func__);
280     abort();
281 
282     uint32_t xferTotal = available / sizeof(struct asg_type2_xfer);
283 
284     if (mType2Xfers.size() < xferTotal) {
285         mType2Xfers.resize(xferTotal * 2);
286     }
287 
288     auto xfersPtr = mType2Xfers.data();
289 
290     ring_buffer_copy_contents(
291         mContext.to_host, 0, available, (uint8_t*)xfersPtr);
292 
293     for (uint32_t i = 0; i < xferTotal; ++i) {
294 
295         if (*current + xfersPtr[i].size > ptrEnd) return;
296 
297         const char* src =
298             mCallbacks.getPtr(xfersPtr[i].physAddr);
299 
300         memcpy(*current, src, xfersPtr[i].size);
301 
302         ring_buffer_advance_read(
303             mContext.to_host, sizeof(struct asg_type1_xfer), 1);
304 
305         *current += xfersPtr[i].size;
306         *count += xfersPtr[i].size;
307     }
308 }
309 
type3Read(uint32_t available,size_t * count,char ** current,const char * ptrEnd)310 void RingStream::type3Read(
311     uint32_t available,
312     size_t* count, char** current, const char* ptrEnd) {
313 
314     uint32_t xferTotal = __atomic_load_n(&mContext.ring_config->transfer_size, __ATOMIC_ACQUIRE);
315     uint32_t maxCanRead = ptrEnd - *current;
316     uint32_t ringAvail = available;
317     uint32_t actuallyRead = std::min(ringAvail, std::min(xferTotal, maxCanRead));
318 
319     // Decrement transfer_size before letting the guest proceed in ring_buffer funcs or we will race
320     // to the next time the guest sets transfer_size
321     __atomic_fetch_sub(&mContext.ring_config->transfer_size, actuallyRead, __ATOMIC_RELEASE);
322 
323     ring_buffer_read_fully_with_abort(
324             mContext.to_host_large_xfer.ring,
325             &mContext.to_host_large_xfer.view,
326             *current, actuallyRead,
327             1, &mContext.ring_config->in_error);
328 
329     *current += actuallyRead;
330     *count += actuallyRead;
331 }
332 
getDmaForReading(uint64_t guest_paddr)333 void* RingStream::getDmaForReading(uint64_t guest_paddr) {
334     return g_emugl_dma_get_host_addr(guest_paddr);
335 }
336 
unlockDma(uint64_t guest_paddr)337 void RingStream::unlockDma(uint64_t guest_paddr) {
338     g_emugl_dma_unlock(guest_paddr);
339 }
340 
writeFully(const void * buf,size_t len)341 int RingStream::writeFully(const void* buf, size_t len) {
342     void* dstBuf = alloc(len);
343     memcpy(dstBuf, buf, len);
344     flush();
345     return 0;
346 }
347 
readFully(void * buf,size_t len)348 const unsigned char *RingStream::readFully( void *buf, size_t len) {
349     fprintf(stderr, "%s: FATAL: not intended for use with RingStream\n", __func__);
350     abort();
351 }
352 
onSave(android::base::Stream * stream)353 void RingStream::onSave(android::base::Stream* stream) {
354     stream->putBe32(mReadBufferLeft);
355     stream->write(mReadBuffer.data() + mReadBuffer.size() - mReadBufferLeft,
356                   mReadBufferLeft);
357     android::base::saveBuffer(stream, mWriteBuffer);
358 }
359 
onLoad(android::base::Stream * stream)360 unsigned char* RingStream::onLoad(android::base::Stream* stream) {
361     android::base::loadBuffer(stream, &mReadBuffer);
362     mReadBufferLeft = mReadBuffer.size();
363     android::base::loadBuffer(stream, &mWriteBuffer);
364     return reinterpret_cast<unsigned char*>(mWriteBuffer.data());
365 }
366 
367 }  // namespace emugl
368