• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #include <vulkan/vulkan.h>
15 
16 #include <cstdarg>
17 #include <cstdio>
18 #include <deque>
19 #include <type_traits>
20 #include <unordered_map>
21 
22 #include "BlobManager.h"
23 #include "FrameBuffer.h"
24 #include "GfxStreamAgents.h"
25 #include "VirtioGpuTimelines.h"
26 #include "VkCommonOperations.h"
27 #include "aemu/base/AlignedBuf.h"
28 #include "aemu/base/ManagedDescriptor.hpp"
29 #include "aemu/base/Metrics.h"
30 #include "aemu/base/Tracing.h"
31 #include "aemu/base/memory/SharedMemory.h"
32 #include "aemu/base/synchronization/Lock.h"
33 #include "host-common/AddressSpaceService.h"
34 #include "host-common/GfxstreamFatalError.h"
35 #include "host-common/address_space_device.h"
36 #include "host-common/android_pipe_common.h"
37 #include "host-common/android_pipe_device.h"
38 #include "host-common/feature_control.h"
39 #include "host-common/globals.h"
40 #include "host-common/opengles-pipe.h"
41 #include "host-common/opengles.h"
42 #include "host-common/refcount-pipe.h"
43 #include "host-common/vm_operations.h"
44 #include "virgl_hw.h"
45 #include "virtgpu_gfxstream_protocol.h"
46 #include "vk_util.h"
47 
48 extern "C" {
49 #include "drm_fourcc.h"
50 #include "gfxstream/virtio-gpu-gfxstream-renderer-unstable.h"
51 #include "gfxstream/virtio-gpu-gfxstream-renderer.h"
52 #include "host-common/goldfish_pipe.h"
53 #include "virgl_hw.h"
54 }  // extern "C"
55 
56 #if defined(_WIN32)
57 struct iovec {
58     void* iov_base; /* Starting address */
59     size_t iov_len; /* Length in bytes */
60 };
61 #else
62 #include <unistd.h>
63 #endif  // _WIN32
64 
65 #define MAX_DEBUG_BUFFER_SIZE 512
66 
67 void* globalUserData = nullptr;
68 stream_renderer_debug_callback globalDebugCallback = nullptr;
69 
stream_renderer_debug(uint32_t type,const char * format,...)70 void stream_renderer_debug(uint32_t type, const char* format, ...) {
71     char buf[MAX_DEBUG_BUFFER_SIZE];
72     va_list args;
73     va_start(args, format);
74     vsnprintf(buf, MAX_DEBUG_BUFFER_SIZE, format, args);
75     va_end(args);
76 
77     if (globalUserData && globalDebugCallback) {
78         struct stream_renderer_debug debug = {0};
79         debug.debug_type = type;
80         debug.message = &buf[0];
81 
82         globalDebugCallback(globalUserData, &debug);
83     } else {
84         fprintf(stderr, "%s\n", buf);
85     }
86 }
87 
88 #if STREAM_RENDERER_LOG_LEVEL >= 1
89 #define stream_renderer_error(format, ...)                                                         \
90     do {                                                                                           \
91         stream_renderer_debug(STREAM_RENDERER_DEBUG_ERROR, "[%s(%d)] %s " format,                  \
92                               __FILE__, __LINE__, __PRETTY_FUNCTION__, ##__VA_ARGS__);             \
93     } while (0)
94 #else
95 #define stream_renderer_error(format, ...)
96 #endif
97 
98 #if STREAM_RENDERER_LOG_LEVEL >= 3
99 #define stream_renderer_info(format, ...)                                                         \
100     do {                                                                                          \
101         stream_renderer_debug(STREAM_RENDERER_DEBUG_INFO, "[%s(%d)] %s " format,                  \
102                               __FILE__, __LINE__, __PRETTY_FUNCTION__, ##__VA_ARGS__);            \
103     } while (0)
104 #else
105 #define stream_renderer_info(format, ...)
106 #endif
107 
108 // Virtio Goldfish Pipe: Overview-----------------------------------------------
109 //
110 // Virtio Goldfish Pipe is meant for running goldfish pipe services with a
111 // stock Linux kernel that is already capable of virtio-gpu. It runs DRM
112 // VIRTGPU ioctls on top of a custom implementation of virglrenderer on the
113 // host side that doesn't (directly) do any rendering, but instead talks to
114 // host-side pipe services.
115 //
116 // This is mainly used for graphics at the moment, though it's possible to run
117 // other pipe services over virtio-gpu as well. virtio-gpu is selected over
118 // other devices primarily because of the existence of an API (virglrenderer)
119 // that is already somewhat separate from virtio-gpu, and not needing to create
120 // a new virtio device to handle goldfish pipe.
121 //
122 // How it works is, existing virglrenderer API are remapped to perform pipe
123 // operations. First of all, pipe operations consist of the following:
124 //
125 // - open() / close(): Starts or stops an instance of a pipe service.
126 //
127 // - write(const void* buf, size_t len) / read(const void* buf, size_t len):
128 // Sends or receives data over the pipe. The first write() is the name of the
129 // pipe service. After the pipe service is determined, the host calls
130 // resetPipe() to replace the host-side pipe instance with an instance of the
131 // pipe service.
132 //
133 // - reset(void* initialPipe, void* actualPipe): the operation that replaces an
134 // initial pipe with an instance of a pipe service.
135 //
136 // Next, here's how the pipe operations map to virglrenderer commands:
137 //
138 // - open() -> virgl_renderer_context_create(),
139 //             virgl_renderer_resource_create(),
140 //             virgl_renderer_resource_attach_iov()
141 //
142 // The open() corresponds to a guest-side open of a rendernode, which triggers
143 // context creation. Each pipe corresponds 1:1 with a drm virtgpu context id.
144 // We also associate an R8 resource with each pipe as the backing data for
145 // write/read.
146 //
147 // - close() -> virgl_rendrerer_resource_unref(),
148 //              virgl_renderer_context_destroy()
149 //
150 // The close() corresponds to undoing the operations of open().
151 //
152 // - write() -> virgl_renderer_transfer_write_iov() OR
153 //              virgl_renderer_submit_cmd()
154 //
155 // Pipe write() operation corresponds to performing a TRANSFER_TO_HOST ioctl on
156 // the resource created alongside open(), OR an EXECBUFFER ioctl.
157 //
158 // - read() -> virgl_renderer_transfer_read_iov()
159 //
160 // Pipe read() operation corresponds to performing a TRANSFER_FROM_HOST ioctl on
161 // the resource created alongside open().
162 //
163 // Details on transfer mechanism: mapping 2D transfer to 1D ones----------------
164 //
165 // Resource objects are typically 2D textures, while we're wanting to transmit
166 // 1D buffers to the pipe services on the host.  DRM VIRTGPU uses the concept
167 // of a 'box' to represent transfers that do not involve an entire resource
168 // object.  Each box has a x, y, width and height parameter to define the
169 // extent of the transfer for a 2D texture.  In our use case, we only use the x
170 // and width parameters. We've also created the resource with R8 format
171 // (byte-by-byte) with width equal to the total size of the transfer buffer we
172 // want (around 1 MB).
173 //
174 // The resource object itself is currently backed via plain guest RAM, which
175 // can be physically not-contiguous from the guest POV, and therefore
176 // corresponds to a possibly-long list of pointers and sizes (iov) on the host
177 // side. The sync_iov helper function converts convert the list of pointers
178 // to one contiguous buffer on the host (or vice versa), at the cost of a copy.
179 // (TODO: see if we can use host coherent memory to do away with the copy).
180 //
181 // We can see this abstraction in use via the implementation of
182 // transferWriteIov and transferReadIov below, which sync the iovec to/from a
183 // linear buffer if necessary, and then perform a corresponding pip operation
184 // based on the box parameter's x and width values.
185 
186 using android::AndroidPipe;
187 using android::base::AutoLock;
188 using android::base::DescriptorType;
189 using android::base::Lock;
190 using android::base::ManagedDescriptor;
191 using android::base::MetricsLogger;
192 using android::base::SharedMemory;
193 
194 using emugl::FatalError;
195 using gfxstream::BlobManager;
196 using gfxstream::ManagedDescriptorInfo;
197 
198 using VirtioGpuResId = uint32_t;
199 
200 static constexpr int kPipeTryAgain = -2;
201 
202 struct VirtioGpuCmd {
203     uint32_t op;
204     uint32_t cmdSize;
205     unsigned char buf[0];
206 } __attribute__((packed));
207 
208 struct PipeCtxEntry {
209     std::string name;
210     uint32_t capsetId;
211     VirtioGpuCtxId ctxId;
212     GoldfishHostPipe* hostPipe;
213     int fence;
214     uint32_t addressSpaceHandle;
215     bool hasAddressSpaceHandle;
216     std::unordered_map<VirtioGpuResId, uint32_t> addressSpaceHandles;
217 };
218 
219 enum class ResType {
220     // Used as a communication channel between the guest and the host
221     // which does not need an allocation on the host GPU.
222     PIPE,
223     // Used as a GPU data buffer.
224     BUFFER,
225     // Used as a GPU texture.
226     COLOR_BUFFER,
227 };
228 
229 struct PipeResEntry {
230     stream_renderer_resource_create_args args;
231     iovec* iov;
232     uint32_t numIovs;
233     void* linear;
234     size_t linearSize;
235     GoldfishHostPipe* hostPipe;
236     VirtioGpuCtxId ctxId;
237     void* hva;
238     uint64_t hvaSize;
239     uint64_t blobId;
240     uint32_t blobMem;
241     uint32_t blobFlags;
242     uint32_t caching;
243     ResType type;
244     std::shared_ptr<SharedMemory> ringBlob = nullptr;
245     bool externalAddr = false;
246     std::shared_ptr<ManagedDescriptorInfo> descriptorInfo = nullptr;
247 };
248 
align_up(uint32_t n,uint32_t a)249 static inline uint32_t align_up(uint32_t n, uint32_t a) { return ((n + a - 1) / a) * a; }
250 
align_up_power_of_2(uint32_t n,uint32_t a)251 static inline uint32_t align_up_power_of_2(uint32_t n, uint32_t a) {
252     return (n + (a - 1)) & ~(a - 1);
253 }
254 
255 #define VIRGL_FORMAT_NV12 166
256 #define VIRGL_FORMAT_YV12 163
257 #define VIRGL_FORMAT_P010 314
258 
259 const uint32_t kGlBgra = 0x80e1;
260 const uint32_t kGlRgba = 0x1908;
261 const uint32_t kGlRgba16f = 0x881A;
262 const uint32_t kGlRgb565 = 0x8d62;
263 const uint32_t kGlRgba1010102 = 0x8059;
264 const uint32_t kGlR8 = 0x8229;
265 const uint32_t kGlR16 = 0x822A;
266 const uint32_t kGlRg8 = 0x822b;
267 const uint32_t kGlLuminance = 0x1909;
268 const uint32_t kGlLuminanceAlpha = 0x190a;
269 const uint32_t kGlUnsignedByte = 0x1401;
270 const uint32_t kGlUnsignedShort565 = 0x8363;
271 
272 constexpr uint32_t kFwkFormatGlCompat = 0;
273 constexpr uint32_t kFwkFormatYV12 = 1;
274 // constexpr uint32_t kFwkFormatYUV420888 = 2;
275 constexpr uint32_t kFwkFormatNV12 = 3;
276 constexpr uint32_t kFwkFormatP010 = 4;
277 
virgl_format_is_yuv(uint32_t format)278 static inline bool virgl_format_is_yuv(uint32_t format) {
279     switch (format) {
280         case VIRGL_FORMAT_B8G8R8X8_UNORM:
281         case VIRGL_FORMAT_B8G8R8A8_UNORM:
282         case VIRGL_FORMAT_R8G8B8X8_UNORM:
283         case VIRGL_FORMAT_R8G8B8A8_UNORM:
284         case VIRGL_FORMAT_B5G6R5_UNORM:
285         case VIRGL_FORMAT_R8_UNORM:
286         case VIRGL_FORMAT_R16_UNORM:
287         case VIRGL_FORMAT_R16G16B16A16_FLOAT:
288         case VIRGL_FORMAT_R8G8_UNORM:
289         case VIRGL_FORMAT_R10G10B10A2_UNORM:
290             return false;
291         case VIRGL_FORMAT_NV12:
292         case VIRGL_FORMAT_P010:
293         case VIRGL_FORMAT_YV12:
294             return true;
295         default:
296             stream_renderer_error("Unknown virgl format 0x%x", format);
297             return false;
298     }
299 }
300 
virgl_format_to_gl(uint32_t virgl_format)301 static inline uint32_t virgl_format_to_gl(uint32_t virgl_format) {
302     switch (virgl_format) {
303         case VIRGL_FORMAT_B8G8R8X8_UNORM:
304         case VIRGL_FORMAT_B8G8R8A8_UNORM:
305             return kGlBgra;
306         case VIRGL_FORMAT_R8G8B8X8_UNORM:
307         case VIRGL_FORMAT_R8G8B8A8_UNORM:
308             return kGlRgba;
309         case VIRGL_FORMAT_B5G6R5_UNORM:
310             return kGlRgb565;
311         case VIRGL_FORMAT_R16_UNORM:
312             return kGlR16;
313         case VIRGL_FORMAT_R16G16B16A16_FLOAT:
314             return kGlRgba16f;
315         case VIRGL_FORMAT_R8_UNORM:
316             return kGlR8;
317         case VIRGL_FORMAT_R8G8_UNORM:
318             return kGlRg8;
319         case VIRGL_FORMAT_NV12:
320         case VIRGL_FORMAT_P010:
321         case VIRGL_FORMAT_YV12:
322             // emulated as RGBA8888
323             return kGlRgba;
324         case VIRGL_FORMAT_R10G10B10A2_UNORM:
325             return kGlRgba1010102;
326         default:
327             return kGlRgba;
328     }
329 }
330 
virgl_format_to_fwk_format(uint32_t virgl_format)331 static inline uint32_t virgl_format_to_fwk_format(uint32_t virgl_format) {
332     switch (virgl_format) {
333         case VIRGL_FORMAT_NV12:
334             return kFwkFormatNV12;
335         case VIRGL_FORMAT_P010:
336             return kFwkFormatP010;
337         case VIRGL_FORMAT_YV12:
338             return kFwkFormatYV12;
339         case VIRGL_FORMAT_R8_UNORM:
340         case VIRGL_FORMAT_R16_UNORM:
341         case VIRGL_FORMAT_R16G16B16A16_FLOAT:
342         case VIRGL_FORMAT_R8G8_UNORM:
343         case VIRGL_FORMAT_B8G8R8X8_UNORM:
344         case VIRGL_FORMAT_B8G8R8A8_UNORM:
345         case VIRGL_FORMAT_R8G8B8X8_UNORM:
346         case VIRGL_FORMAT_R8G8B8A8_UNORM:
347         case VIRGL_FORMAT_B5G6R5_UNORM:
348         case VIRGL_FORMAT_R10G10B10A2_UNORM:
349         default:  // kFwkFormatGlCompat: No extra conversions needed
350             return kFwkFormatGlCompat;
351     }
352 }
353 
gl_format_to_natural_type(uint32_t format)354 static inline uint32_t gl_format_to_natural_type(uint32_t format) {
355     switch (format) {
356         case kGlBgra:
357         case kGlRgba:
358         case kGlLuminance:
359         case kGlLuminanceAlpha:
360             return kGlUnsignedByte;
361         case kGlRgb565:
362             return kGlUnsignedShort565;
363         default:
364             return kGlUnsignedByte;
365     }
366 }
367 
virgl_format_to_linear_base(uint32_t format,uint32_t totalWidth,uint32_t totalHeight,uint32_t x,uint32_t y,uint32_t w,uint32_t h)368 static inline size_t virgl_format_to_linear_base(uint32_t format, uint32_t totalWidth,
369                                                  uint32_t totalHeight, uint32_t x, uint32_t y,
370                                                  uint32_t w, uint32_t h) {
371     if (virgl_format_is_yuv(format)) {
372         return 0;
373     } else {
374         uint32_t bpp = 4;
375         switch (format) {
376             case VIRGL_FORMAT_R16G16B16A16_FLOAT:
377                 bpp = 8;
378                 break;
379             case VIRGL_FORMAT_B8G8R8X8_UNORM:
380             case VIRGL_FORMAT_B8G8R8A8_UNORM:
381             case VIRGL_FORMAT_R8G8B8X8_UNORM:
382             case VIRGL_FORMAT_R8G8B8A8_UNORM:
383             case VIRGL_FORMAT_R10G10B10A2_UNORM:
384                 bpp = 4;
385                 break;
386             case VIRGL_FORMAT_B5G6R5_UNORM:
387             case VIRGL_FORMAT_R8G8_UNORM:
388             case VIRGL_FORMAT_R16_UNORM:
389                 bpp = 2;
390                 break;
391             case VIRGL_FORMAT_R8_UNORM:
392                 bpp = 1;
393                 break;
394             default:
395                 stream_renderer_error("Unknown virgl format: 0x%x", format);
396                 return 0;
397         }
398 
399         uint32_t stride = totalWidth * bpp;
400         return y * stride + x * bpp;
401     }
402     return 0;
403 }
404 
virgl_format_to_total_xfer_len(uint32_t format,uint32_t totalWidth,uint32_t totalHeight,uint32_t x,uint32_t y,uint32_t w,uint32_t h)405 static inline size_t virgl_format_to_total_xfer_len(uint32_t format, uint32_t totalWidth,
406                                                     uint32_t totalHeight, uint32_t x, uint32_t y,
407                                                     uint32_t w, uint32_t h) {
408     if (virgl_format_is_yuv(format)) {
409         uint32_t bpp = format == VIRGL_FORMAT_P010 ? 2 : 1;
410 
411         uint32_t yWidth = totalWidth;
412         uint32_t yHeight = totalHeight;
413         uint32_t yStridePixels;
414         if (format == VIRGL_FORMAT_NV12) {
415             yStridePixels = yWidth;
416         } else if (format == VIRGL_FORMAT_P010) {
417             yStridePixels = yWidth;
418         } else if (format == VIRGL_FORMAT_YV12) {
419             yStridePixels = align_up_power_of_2(yWidth, 32);
420         } else {
421             stream_renderer_error("Unknown virgl format: 0x%x", format);
422             return 0;
423         }
424         uint32_t yStrideBytes = yStridePixels * bpp;
425         uint32_t ySize = yStrideBytes * yHeight;
426 
427         uint32_t uvStridePixels;
428         uint32_t uvPlaneCount;
429         if (format == VIRGL_FORMAT_NV12) {
430             uvStridePixels = yStridePixels;
431             uvPlaneCount = 1;
432         } else if (format == VIRGL_FORMAT_P010) {
433             uvStridePixels = yStridePixels;
434             uvPlaneCount = 1;
435         } else if (format == VIRGL_FORMAT_YV12) {
436             uvStridePixels = yStridePixels / 2;
437             uvPlaneCount = 2;
438         } else {
439             stream_renderer_error("Unknown virgl yuv format: 0x%x", format);
440             return 0;
441         }
442         uint32_t uvStrideBytes = uvStridePixels * bpp;
443         uint32_t uvHeight = totalHeight / 2;
444         uint32_t uvSize = uvStrideBytes * uvHeight * uvPlaneCount;
445 
446         uint32_t dataSize = ySize + uvSize;
447         return dataSize;
448     } else {
449         uint32_t bpp = 4;
450         switch (format) {
451             case VIRGL_FORMAT_R16G16B16A16_FLOAT:
452                 bpp = 8;
453                 break;
454             case VIRGL_FORMAT_B8G8R8X8_UNORM:
455             case VIRGL_FORMAT_B8G8R8A8_UNORM:
456             case VIRGL_FORMAT_R8G8B8X8_UNORM:
457             case VIRGL_FORMAT_R8G8B8A8_UNORM:
458             case VIRGL_FORMAT_R10G10B10A2_UNORM:
459                 bpp = 4;
460                 break;
461             case VIRGL_FORMAT_B5G6R5_UNORM:
462             case VIRGL_FORMAT_R16_UNORM:
463             case VIRGL_FORMAT_R8G8_UNORM:
464                 bpp = 2;
465                 break;
466             case VIRGL_FORMAT_R8_UNORM:
467                 bpp = 1;
468                 break;
469             default:
470                 stream_renderer_error("Unknown virgl format: 0x%x", format);
471                 return 0;
472         }
473 
474         uint32_t stride = totalWidth * bpp;
475         return (h - 1U) * stride + w * bpp;
476     }
477     return 0;
478 }
479 
480 enum IovSyncDir {
481     IOV_TO_LINEAR = 0,
482     LINEAR_TO_IOV = 1,
483 };
484 
sync_iov(PipeResEntry * res,uint64_t offset,const stream_renderer_box * box,IovSyncDir dir)485 static int sync_iov(PipeResEntry* res, uint64_t offset, const stream_renderer_box* box,
486                     IovSyncDir dir) {
487     stream_renderer_info("offset: 0x%llx box: %u %u %u %u size %u x %u iovs %u linearSize %zu",
488                          (unsigned long long)offset, box->x, box->y, box->w, box->h,
489                          res->args.width, res->args.height, res->numIovs, res->linearSize);
490 
491     if (box->x > res->args.width || box->y > res->args.height) {
492         stream_renderer_error("Box out of range of resource");
493         return -EINVAL;
494     }
495     if (box->w == 0U || box->h == 0U) {
496         stream_renderer_error("Empty transfer");
497         return -EINVAL;
498     }
499     if (box->x + box->w > res->args.width) {
500         stream_renderer_error("Box overflows resource width");
501         return -EINVAL;
502     }
503 
504     size_t linearBase = virgl_format_to_linear_base(
505         res->args.format, res->args.width, res->args.height, box->x, box->y, box->w, box->h);
506     size_t start = linearBase;
507     // height - 1 in order to treat the (w * bpp) row specially
508     // (i.e., the last row does not occupy the full stride)
509     size_t length = virgl_format_to_total_xfer_len(
510         res->args.format, res->args.width, res->args.height, box->x, box->y, box->w, box->h);
511     size_t end = start + length;
512 
513     if (start == end) {
514         stream_renderer_error("nothing to transfer");
515         return -EINVAL;
516     }
517 
518     if (end > res->linearSize) {
519         stream_renderer_error("start + length overflows!");
520         return -EINVAL;
521     }
522 
523     uint32_t iovIndex = 0;
524     size_t iovOffset = 0;
525     size_t written = 0;
526     char* linear = static_cast<char*>(res->linear);
527 
528     while (written < length) {
529         if (iovIndex >= res->numIovs) {
530             stream_renderer_error("write request overflowed numIovs");
531             return -EINVAL;
532         }
533 
534         const char* iovBase_const = static_cast<const char*>(res->iov[iovIndex].iov_base);
535         char* iovBase = static_cast<char*>(res->iov[iovIndex].iov_base);
536         size_t iovLen = res->iov[iovIndex].iov_len;
537         size_t iovOffsetEnd = iovOffset + iovLen;
538 
539         auto lower_intersect = std::max(iovOffset, start);
540         auto upper_intersect = std::min(iovOffsetEnd, end);
541         if (lower_intersect < upper_intersect) {
542             size_t toWrite = upper_intersect - lower_intersect;
543             switch (dir) {
544                 case IOV_TO_LINEAR:
545                     memcpy(linear + lower_intersect, iovBase_const + lower_intersect - iovOffset,
546                            toWrite);
547                     break;
548                 case LINEAR_TO_IOV:
549                     memcpy(iovBase + lower_intersect - iovOffset, linear + lower_intersect,
550                            toWrite);
551                     break;
552                 default:
553                     stream_renderer_error("Invalid synchronization dir");
554                     return -EINVAL;
555             }
556             written += toWrite;
557         }
558         ++iovIndex;
559         iovOffset += iovLen;
560     }
561 
562     return 0;
563 }
564 
convert32to64(uint32_t lo,uint32_t hi)565 static uint64_t convert32to64(uint32_t lo, uint32_t hi) {
566     return ((uint64_t)lo) | (((uint64_t)hi) << 32);
567 }
568 
569 class PipeVirglRenderer {
570    public:
571     PipeVirglRenderer() = default;
572 
init(void * cookie,int flags,stream_renderer_fence_callback fence_callback)573     int init(void* cookie, int flags, stream_renderer_fence_callback fence_callback) {
574         stream_renderer_info("cookie: %p", cookie);
575         mCookie = cookie;
576         mFenceCallback = fence_callback;
577         mVirtioGpuOps = android_getVirtioGpuOps();
578         if (!mVirtioGpuOps) {
579             stream_renderer_error("Could not get virtio gpu ops!");
580             return -EINVAL;
581         }
582         mAddressSpaceDeviceControlOps = get_address_space_device_control_ops();
583         if (!mAddressSpaceDeviceControlOps) {
584             stream_renderer_error("Could not get address space device control ops!");
585             return -EINVAL;
586         }
587         mVirtioGpuTimelines = VirtioGpuTimelines::create(true);
588         mVirtioGpuTimelines = VirtioGpuTimelines::create(true);
589 
590 #if !defined(_WIN32)
591         mPageSize = getpagesize();
592 #endif
593 
594         return 0;
595     }
596 
resetPipe(GoldfishHwPipe * hwPipe,GoldfishHostPipe * hostPipe)597     int resetPipe(GoldfishHwPipe* hwPipe, GoldfishHostPipe* hostPipe) {
598         stream_renderer_info("Want to reset hwpipe %p to hostpipe %p", hwPipe, hostPipe);
599         VirtioGpuCtxId asCtxId = (VirtioGpuCtxId)(uintptr_t)hwPipe;
600         auto it = mContexts.find(asCtxId);
601         if (it == mContexts.end()) {
602             stream_renderer_error("fatal: pipe id %u", asCtxId);
603             return -EINVAL;
604         }
605 
606         auto& entry = it->second;
607         stream_renderer_info("ctxid: %u prev hostpipe: %p", asCtxId, entry.hostPipe);
608         entry.hostPipe = hostPipe;
609         stream_renderer_info("ctxid: %u next hostpipe: %p", asCtxId, entry.hostPipe);
610 
611         // Also update any resources associated with it
612         auto resourcesIt = mContextResources.find(asCtxId);
613 
614         if (resourcesIt == mContextResources.end()) {
615             return 0;
616         }
617 
618         const auto& resIds = resourcesIt->second;
619 
620         for (auto resId : resIds) {
621             auto resEntryIt = mResources.find(resId);
622             if (resEntryIt == mResources.end()) {
623                 stream_renderer_error("entry with res id %u not found", resId);
624                 return -EINVAL;
625             }
626 
627             auto& resEntry = resEntryIt->second;
628             resEntry.hostPipe = hostPipe;
629         }
630 
631         return 0;
632     }
633 
createContext(VirtioGpuCtxId ctx_id,uint32_t nlen,const char * name,uint32_t context_init)634     int createContext(VirtioGpuCtxId ctx_id, uint32_t nlen, const char* name,
635                       uint32_t context_init) {
636         std::string contextName(name, nlen);
637 
638         stream_renderer_info("ctxid: %u len: %u name: %s", ctx_id, nlen, contextName.c_str());
639         auto ops = ensureAndGetServiceOps();
640         auto hostPipe = ops->guest_open_with_flags(reinterpret_cast<GoldfishHwPipe*>(ctx_id),
641                                                    0x1 /* is virtio */);
642 
643         if (!hostPipe) {
644             stream_renderer_error("failed to create hw pipe!\n");
645             return -EINVAL;
646         }
647         std::unordered_map<uint32_t, uint32_t> map;
648 
649         PipeCtxEntry res = {
650             std::move(contextName),  // contextName
651             context_init,            // capsetId
652             ctx_id,                  // ctxId
653             hostPipe,                // hostPipe
654             0,                       // fence
655             0,                       // AS handle
656             false,                   // does not have an AS handle
657             map,                     // resourceId --> ASG handle map
658         };
659 
660         stream_renderer_info("initial host pipe for ctxid %u: %p", ctx_id, hostPipe);
661         mContexts[ctx_id] = res;
662         android_onGuestGraphicsProcessCreate(ctx_id);
663         return 0;
664     }
665 
destroyContext(VirtioGpuCtxId handle)666     int destroyContext(VirtioGpuCtxId handle) {
667         stream_renderer_info("ctxid: %u", handle);
668 
669         auto it = mContexts.find(handle);
670         if (it == mContexts.end()) {
671             stream_renderer_error("could not find context handle %u\n", handle);
672             return -EINVAL;
673         }
674 
675         if (it->second.hasAddressSpaceHandle) {
676             for (auto const& [resourceId, handle] : it->second.addressSpaceHandles) {
677                 mAddressSpaceDeviceControlOps->destroy_handle(handle);
678             }
679         }
680 
681         auto hostPipe = it->second.hostPipe;
682         if (!hostPipe) {
683             stream_renderer_error("0 is not a valid hostpipe");
684             return -EINVAL;
685         }
686 
687         auto ops = ensureAndGetServiceOps();
688         ops->guest_close(hostPipe, GOLDFISH_PIPE_CLOSE_GRACEFUL);
689 
690         android_cleanupProcGLObjects(handle);
691         mContexts.erase(it);
692         return 0;
693     }
694 
setContextAddressSpaceHandleLocked(VirtioGpuCtxId ctxId,uint32_t handle,uint32_t resourceId)695     int setContextAddressSpaceHandleLocked(VirtioGpuCtxId ctxId, uint32_t handle,
696                                            uint32_t resourceId) {
697         auto ctxIt = mContexts.find(ctxId);
698         if (ctxIt == mContexts.end()) {
699             stream_renderer_error("ctx id %u is not found", ctxId);
700             return -EINVAL;
701         }
702 
703         auto& ctxEntry = ctxIt->second;
704         ctxEntry.addressSpaceHandle = handle;
705         ctxEntry.hasAddressSpaceHandle = true;
706         ctxEntry.addressSpaceHandles[resourceId] = handle;
707         return 0;
708     }
709 
getAddressSpaceHandleLocked(VirtioGpuCtxId ctxId,uint32_t resourceId)710     uint32_t getAddressSpaceHandleLocked(VirtioGpuCtxId ctxId, uint32_t resourceId) {
711         auto ctxIt = mContexts.find(ctxId);
712         if (ctxIt == mContexts.end()) {
713             stream_renderer_error("ctx id %u is not found", ctxId);
714             return -EINVAL;
715         }
716 
717         auto& ctxEntry = ctxIt->second;
718 
719         if (!ctxEntry.addressSpaceHandles.count(resourceId)) {
720             stream_renderer_error("ASG context with resource id %u", resourceId);
721             return -EINVAL;
722         }
723 
724         return ctxEntry.addressSpaceHandles[resourceId];
725     }
726 
727 #define DECODE(variable, type, input) \
728     type variable = {};               \
729     memcpy(&variable, input, sizeof(type));
730 
addressSpaceProcessCmd(VirtioGpuCtxId ctxId,uint32_t * dwords)731     int addressSpaceProcessCmd(VirtioGpuCtxId ctxId, uint32_t* dwords) {
732         DECODE(header, gfxstream::gfxstreamHeader, dwords)
733 
734         switch (header.opCode) {
735             case GFXSTREAM_CONTEXT_CREATE: {
736                 DECODE(contextCreate, gfxstream::gfxstreamContextCreate, dwords)
737 
738                 auto resEntryIt = mResources.find(contextCreate.resourceId);
739                 if (resEntryIt == mResources.end()) {
740                     stream_renderer_error("ASG coherent resource %u not found",
741                                           contextCreate.resourceId);
742                     return -EINVAL;
743                 }
744 
745                 auto ctxIt = mContexts.find(ctxId);
746                 if (ctxIt == mContexts.end()) {
747                     stream_renderer_error("ctx id %u not found", ctxId);
748                     return -EINVAL;
749                 }
750 
751                 auto& ctxEntry = ctxIt->second;
752                 auto& resEntry = resEntryIt->second;
753 
754                 std::string name = ctxEntry.name + "-" + std::to_string(contextCreate.resourceId);
755                 uint32_t handle = mAddressSpaceDeviceControlOps->gen_handle();
756 
757                 struct AddressSpaceCreateInfo createInfo = {
758                     .handle = handle,
759                     .type = android::emulation::VirtioGpuGraphics,
760                     .createRenderThread = true,
761                     .externalAddr = resEntry.hva,
762                     .externalAddrSize = resEntry.hvaSize,
763                     .virtioGpuContextId = ctxId,
764                     .virtioGpuCapsetId = ctxEntry.capsetId,
765                     .contextName = name.c_str(),
766                     .contextNameSize = static_cast<uint32_t>(ctxEntry.name.size()),
767                 };
768 
769                 mAddressSpaceDeviceControlOps->create_instance(createInfo);
770                 if (setContextAddressSpaceHandleLocked(ctxId, handle, contextCreate.resourceId)) {
771                     return -EINVAL;
772                 }
773                 break;
774             }
775             case GFXSTREAM_CONTEXT_PING: {
776                 DECODE(contextPing, gfxstream::gfxstreamContextPing, dwords)
777 
778                 struct android::emulation::AddressSpaceDevicePingInfo ping = {0};
779                 ping.metadata = ASG_NOTIFY_AVAILABLE;
780 
781                 mAddressSpaceDeviceControlOps->ping_at_hva(
782                     getAddressSpaceHandleLocked(ctxId, contextPing.resourceId), &ping);
783                 break;
784             }
785             default:
786                 break;
787         }
788 
789         return 0;
790     }
791 
submitCmd(struct stream_renderer_command * cmd)792     int submitCmd(struct stream_renderer_command* cmd) {
793         if (!cmd) return -EINVAL;
794 
795         void* buffer = reinterpret_cast<void*>(cmd->cmd);
796 
797         VirtioGpuRing ring = VirtioGpuRingGlobal{};
798         stream_renderer_info("ctx: % u, ring: %s buffer: %p dwords: %d", cmd->ctx_id,
799                              to_string(ring).c_str(), buffer, cmd->cmd_size);
800 
801         if (!buffer) {
802             stream_renderer_error("error: buffer null\n");
803             return -EINVAL;
804         }
805 
806         if (cmd->cmd_size < 4) {
807             stream_renderer_error("error: not enough bytes (got %d)\n", cmd->cmd_size);
808             return -EINVAL;
809         }
810 
811         DECODE(header, gfxstream::gfxstreamHeader, buffer);
812         switch (header.opCode) {
813             case GFXSTREAM_CONTEXT_CREATE:
814             case GFXSTREAM_CONTEXT_PING:
815             case GFXSTREAM_CONTEXT_PING_WITH_RESPONSE:
816                 if (addressSpaceProcessCmd(cmd->ctx_id, (uint32_t*)buffer)) {
817                     return -EINVAL;
818                 }
819                 break;
820             case GFXSTREAM_CREATE_EXPORT_SYNC: {
821                 DECODE(exportSync, gfxstream::gfxstreamCreateExportSync, buffer)
822 
823                 uint64_t sync_handle =
824                     convert32to64(exportSync.syncHandleLo, exportSync.syncHandleHi);
825 
826                 stream_renderer_info("wait for gpu ring %s", to_string(ring).c_str());
827                 auto taskId = mVirtioGpuTimelines->enqueueTask(ring);
828                 mVirtioGpuOps->async_wait_for_gpu_with_cb(sync_handle, [this, taskId] {
829                     mVirtioGpuTimelines->notifyTaskCompletion(taskId);
830                 });
831                 break;
832             }
833             case GFXSTREAM_CREATE_EXPORT_SYNC_VK:
834             case GFXSTREAM_CREATE_IMPORT_SYNC_VK: {
835                 // The guest sync export assumes fence context support and always uses
836                 // VIRTGPU_EXECBUF_RING_IDX. With this, the task created here must use
837                 // the same ring as the fence created for the virtio gpu command or the
838                 // fence may be signaled without properly waiting for the task to complete.
839                 ring = VirtioGpuRingContextSpecific{
840                     .mCtxId = cmd->ctx_id,
841                     .mRingIdx = 0,
842                 };
843 
844                 DECODE(exportSyncVK, gfxstream::gfxstreamCreateExportSyncVK, buffer)
845 
846                 uint64_t device_handle =
847                     convert32to64(exportSyncVK.deviceHandleLo, exportSyncVK.deviceHandleHi);
848 
849                 uint64_t fence_handle =
850                     convert32to64(exportSyncVK.fenceHandleLo, exportSyncVK.fenceHandleHi);
851 
852                 stream_renderer_info("wait for gpu ring %s", to_string(ring).c_str());
853                 auto taskId = mVirtioGpuTimelines->enqueueTask(ring);
854                 mVirtioGpuOps->async_wait_for_gpu_vulkan_with_cb(
855                     device_handle, fence_handle,
856                     [this, taskId] { mVirtioGpuTimelines->notifyTaskCompletion(taskId); });
857                 break;
858             }
859             case GFXSTREAM_CREATE_QSRI_EXPORT_VK: {
860                 // The guest QSRI export assumes fence context support and always uses
861                 // VIRTGPU_EXECBUF_RING_IDX. With this, the task created here must use
862                 // the same ring as the fence created for the virtio gpu command or the
863                 // fence may be signaled without properly waiting for the task to complete.
864                 ring = VirtioGpuRingContextSpecific{
865                     .mCtxId = cmd->ctx_id,
866                     .mRingIdx = 0,
867                 };
868 
869                 DECODE(exportQSRI, gfxstream::gfxstreamCreateQSRIExportVK, buffer)
870 
871                 uint64_t image_handle =
872                     convert32to64(exportQSRI.imageHandleLo, exportQSRI.imageHandleHi);
873 
874                 stream_renderer_info("wait for gpu vk qsri ring %u image 0x%llx",
875                                      to_string(ring).c_str(), (unsigned long long)image_handle);
876                 auto taskId = mVirtioGpuTimelines->enqueueTask(ring);
877                 mVirtioGpuOps->async_wait_for_gpu_vulkan_qsri_with_cb(image_handle, [this, taskId] {
878                     mVirtioGpuTimelines->notifyTaskCompletion(taskId);
879                 });
880                 break;
881             }
882             case GFXSTREAM_PLACEHOLDER_COMMAND_VK: {
883                 // Do nothing, this is a placeholder command
884                 break;
885             }
886             default:
887                 return -EINVAL;
888         }
889 
890         return 0;
891     }
892 
createFence(uint64_t fence_id,const VirtioGpuRing & ring)893     int createFence(uint64_t fence_id, const VirtioGpuRing& ring) {
894         stream_renderer_info("fenceid: %llu ring: %s", (unsigned long long)fence_id,
895                              to_string(ring).c_str());
896 
897         struct {
898             FenceCompletionCallback operator()(const VirtioGpuRingGlobal&) {
899                 return [renderer = mRenderer, fenceId = mFenceId] {
900                     struct stream_renderer_fence fence = {0};
901                     fence.fence_id = fenceId;
902                     fence.flags = STREAM_RENDERER_FLAG_FENCE;
903                     renderer->mFenceCallback(renderer->mCookie, &fence);
904                 };
905             }
906             FenceCompletionCallback operator()(const VirtioGpuRingContextSpecific& ring) {
907                 return [renderer = mRenderer, fenceId = mFenceId, ring] {
908                     struct stream_renderer_fence fence = {0};
909                     fence.fence_id = fenceId;
910                     fence.flags = STREAM_RENDERER_FLAG_FENCE | STREAM_RENDERER_FLAG_FENCE_RING_IDX;
911                     fence.ctx_id = ring.mCtxId;
912                     fence.ring_idx = ring.mRingIdx;
913                     renderer->mFenceCallback(renderer->mCookie, &fence);
914                 };
915             }
916 
917             PipeVirglRenderer* mRenderer;
918             VirtioGpuTimelines::FenceId mFenceId;
919         } visitor{
920             .mRenderer = this,
921             .mFenceId = fence_id,
922         };
923         FenceCompletionCallback callback = std::visit(visitor, ring);
924         if (!callback) {
925             return -EINVAL;
926         }
927         mVirtioGpuTimelines->enqueueFence(ring, fence_id, std::move(callback));
928 
929         return 0;
930     }
931 
poll()932     void poll() { mVirtioGpuTimelines->poll(); }
933 
934     enum pipe_texture_target {
935         PIPE_BUFFER,
936         PIPE_TEXTURE_1D,
937         PIPE_TEXTURE_2D,
938         PIPE_TEXTURE_3D,
939         PIPE_TEXTURE_CUBE,
940         PIPE_TEXTURE_RECT,
941         PIPE_TEXTURE_1D_ARRAY,
942         PIPE_TEXTURE_2D_ARRAY,
943         PIPE_TEXTURE_CUBE_ARRAY,
944         PIPE_MAX_TEXTURE_TYPES,
945     };
946 
947     /**
948      *  * Resource binding flags -- state tracker must specify in advance all
949      *   * the ways a resource might be used.
950      *    */
951 #define PIPE_BIND_DEPTH_STENCIL (1 << 0)   /* create_surface */
952 #define PIPE_BIND_RENDER_TARGET (1 << 1)   /* create_surface */
953 #define PIPE_BIND_BLENDABLE (1 << 2)       /* create_surface */
954 #define PIPE_BIND_SAMPLER_VIEW (1 << 3)    /* create_sampler_view */
955 #define PIPE_BIND_VERTEX_BUFFER (1 << 4)   /* set_vertex_buffers */
956 #define PIPE_BIND_INDEX_BUFFER (1 << 5)    /* draw_elements */
957 #define PIPE_BIND_CONSTANT_BUFFER (1 << 6) /* set_constant_buffer */
958 #define PIPE_BIND_DISPLAY_TARGET (1 << 7)  /* flush_front_buffer */
959     /* gap */
960 #define PIPE_BIND_STREAM_OUTPUT (1 << 10)       /* set_stream_output_buffers */
961 #define PIPE_BIND_CURSOR (1 << 11)              /* mouse cursor */
962 #define PIPE_BIND_CUSTOM (1 << 12)              /* state-tracker/winsys usages */
963 #define PIPE_BIND_GLOBAL (1 << 13)              /* set_global_binding */
964 #define PIPE_BIND_SHADER_BUFFER (1 << 14)       /* set_shader_buffers */
965 #define PIPE_BIND_SHADER_IMAGE (1 << 15)        /* set_shader_images */
966 #define PIPE_BIND_COMPUTE_RESOURCE (1 << 16)    /* set_compute_resources */
967 #define PIPE_BIND_COMMAND_ARGS_BUFFER (1 << 17) /* pipe_draw_info.indirect */
968 #define PIPE_BIND_QUERY_BUFFER (1 << 18)        /* get_query_result_resource */
969 
getResourceType(const struct stream_renderer_resource_create_args & args) const970     ResType getResourceType(const struct stream_renderer_resource_create_args& args) const {
971         if (args.target == PIPE_BUFFER) {
972             return ResType::PIPE;
973         }
974 
975         if (args.format != VIRGL_FORMAT_R8_UNORM) {
976             return ResType::COLOR_BUFFER;
977         }
978         if (args.bind & VIRGL_BIND_SAMPLER_VIEW) {
979             return ResType::COLOR_BUFFER;
980         }
981         if (args.bind & VIRGL_BIND_RENDER_TARGET) {
982             return ResType::COLOR_BUFFER;
983         }
984         if (args.bind & VIRGL_BIND_SCANOUT) {
985             return ResType::COLOR_BUFFER;
986         }
987         if (args.bind & VIRGL_BIND_CURSOR) {
988             return ResType::COLOR_BUFFER;
989         }
990         if (!(args.bind & VIRGL_BIND_LINEAR)) {
991             return ResType::COLOR_BUFFER;
992         }
993 
994         return ResType::BUFFER;
995     }
996 
handleCreateResourceBuffer(struct stream_renderer_resource_create_args * args)997     void handleCreateResourceBuffer(struct stream_renderer_resource_create_args* args) {
998         mVirtioGpuOps->create_buffer_with_handle(args->width * args->height, args->handle);
999     }
1000 
handleCreateResourceColorBuffer(struct stream_renderer_resource_create_args * args)1001     void handleCreateResourceColorBuffer(struct stream_renderer_resource_create_args* args) {
1002         // corresponds to allocation of gralloc buffer in minigbm
1003         stream_renderer_info("w h %u %u resid %u -> CreateColorBufferWithHandle", args->width,
1004                              args->height, args->handle);
1005 
1006         const uint32_t glformat = virgl_format_to_gl(args->format);
1007         const uint32_t fwkformat = virgl_format_to_fwk_format(args->format);
1008         mVirtioGpuOps->create_color_buffer_with_handle(args->width, args->height, glformat,
1009                                                        fwkformat, args->handle);
1010         mVirtioGpuOps->set_guest_managed_color_buffer_lifetime(true /* guest manages lifetime */);
1011         mVirtioGpuOps->open_color_buffer(args->handle);
1012     }
1013 
createResource(struct stream_renderer_resource_create_args * args,struct iovec * iov,uint32_t num_iovs)1014     int createResource(struct stream_renderer_resource_create_args* args, struct iovec* iov,
1015                        uint32_t num_iovs) {
1016         stream_renderer_info("handle: %u. num iovs: %u", args->handle, num_iovs);
1017 
1018         const auto resType = getResourceType(*args);
1019         switch (resType) {
1020             case ResType::PIPE:
1021                 break;
1022             case ResType::BUFFER:
1023                 handleCreateResourceBuffer(args);
1024                 break;
1025             case ResType::COLOR_BUFFER:
1026                 handleCreateResourceColorBuffer(args);
1027                 break;
1028         }
1029 
1030         PipeResEntry e;
1031         e.args = *args;
1032         e.linear = 0;
1033         e.hostPipe = 0;
1034         e.hva = nullptr;
1035         e.hvaSize = 0;
1036         e.blobId = 0;
1037         e.blobMem = 0;
1038         e.type = resType;
1039         allocResource(e, iov, num_iovs);
1040 
1041         mResources[args->handle] = e;
1042         return 0;
1043     }
1044 
unrefResource(uint32_t toUnrefId)1045     void unrefResource(uint32_t toUnrefId) {
1046         stream_renderer_info("handle: %u", toUnrefId);
1047 
1048         auto it = mResources.find(toUnrefId);
1049         if (it == mResources.end()) return;
1050 
1051         auto contextsIt = mResourceContexts.find(toUnrefId);
1052         if (contextsIt != mResourceContexts.end()) {
1053             mResourceContexts.erase(contextsIt->first);
1054         }
1055 
1056         for (auto& ctxIdResources : mContextResources) {
1057             detachResourceLocked(ctxIdResources.first, toUnrefId);
1058         }
1059 
1060         auto& entry = it->second;
1061         switch (entry.type) {
1062             case ResType::PIPE:
1063                 break;
1064             case ResType::BUFFER:
1065                 mVirtioGpuOps->close_buffer(toUnrefId);
1066                 break;
1067             case ResType::COLOR_BUFFER:
1068                 mVirtioGpuOps->close_color_buffer(toUnrefId);
1069                 break;
1070         }
1071 
1072         if (entry.linear) {
1073             free(entry.linear);
1074             entry.linear = nullptr;
1075         }
1076 
1077         if (entry.iov) {
1078             free(entry.iov);
1079             entry.iov = nullptr;
1080             entry.numIovs = 0;
1081         }
1082 
1083         if (entry.externalAddr && !entry.ringBlob) {
1084             android::aligned_buf_free(entry.hva);
1085         }
1086 
1087         entry.hva = nullptr;
1088         entry.hvaSize = 0;
1089         entry.blobId = 0;
1090 
1091         mResources.erase(it);
1092     }
1093 
attachIov(int resId,iovec * iov,int num_iovs)1094     int attachIov(int resId, iovec* iov, int num_iovs) {
1095         stream_renderer_info("resid: %d numiovs: %d", resId, num_iovs);
1096 
1097         auto it = mResources.find(resId);
1098         if (it == mResources.end()) return ENOENT;
1099 
1100         auto& entry = it->second;
1101         stream_renderer_info("res linear: %p", entry.linear);
1102         if (!entry.linear) allocResource(entry, iov, num_iovs);
1103 
1104         stream_renderer_info("done");
1105         return 0;
1106     }
1107 
detachIov(int resId,iovec ** iov,int * num_iovs)1108     void detachIov(int resId, iovec** iov, int* num_iovs) {
1109         auto it = mResources.find(resId);
1110         if (it == mResources.end()) return;
1111 
1112         auto& entry = it->second;
1113 
1114         if (num_iovs) {
1115             *num_iovs = entry.numIovs;
1116             stream_renderer_info("resid: %d numIovs: %d", resId, *num_iovs);
1117         } else {
1118             stream_renderer_info("resid: %d numIovs: 0", resId);
1119         }
1120 
1121         entry.numIovs = 0;
1122 
1123         if (entry.iov) free(entry.iov);
1124         entry.iov = nullptr;
1125 
1126         if (iov) {
1127             *iov = entry.iov;
1128         }
1129 
1130         allocResource(entry, entry.iov, entry.numIovs);
1131         stream_renderer_info("done");
1132     }
1133 
handleTransferReadPipe(PipeResEntry * res,uint64_t offset,stream_renderer_box * box)1134     int handleTransferReadPipe(PipeResEntry* res, uint64_t offset, stream_renderer_box* box) {
1135         if (res->type != ResType::PIPE) {
1136             stream_renderer_error("resid: %d not a PIPE resource", res->args.handle);
1137             return -EINVAL;
1138         }
1139 
1140         // Do the pipe service op here, if there is an associated hostpipe.
1141         auto hostPipe = res->hostPipe;
1142         if (!hostPipe) return -EINVAL;
1143 
1144         auto ops = ensureAndGetServiceOps();
1145 
1146         size_t readBytes = 0;
1147         size_t wantedBytes = readBytes + (size_t)box->w;
1148 
1149         while (readBytes < wantedBytes) {
1150             GoldfishPipeBuffer buf = {
1151                 ((char*)res->linear) + box->x + readBytes,
1152                 wantedBytes - readBytes,
1153             };
1154             auto status = ops->guest_recv(hostPipe, &buf, 1);
1155 
1156             if (status > 0) {
1157                 readBytes += status;
1158             } else if (status == kPipeTryAgain) {
1159                 ops->wait_guest_recv(hostPipe);
1160             } else {
1161                 return EIO;
1162             }
1163         }
1164 
1165         return 0;
1166     }
1167 
handleTransferWritePipe(PipeResEntry * res,uint64_t offset,stream_renderer_box * box)1168     int handleTransferWritePipe(PipeResEntry* res, uint64_t offset, stream_renderer_box* box) {
1169         if (res->type != ResType::PIPE) {
1170             stream_renderer_error("resid: %d not a PIPE resource", res->args.handle);
1171             return -EINVAL;
1172         }
1173 
1174         // Do the pipe service op here, if there is an associated hostpipe.
1175         auto hostPipe = res->hostPipe;
1176         if (!hostPipe) {
1177             stream_renderer_info("No hostPipe");
1178             return -EINVAL;
1179         }
1180 
1181         stream_renderer_info("resid: %d offset: 0x%llx hostpipe: %p", res->args.handle,
1182                              (unsigned long long)offset, hostPipe);
1183 
1184         auto ops = ensureAndGetServiceOps();
1185 
1186         size_t writtenBytes = 0;
1187         size_t wantedBytes = (size_t)box->w;
1188 
1189         while (writtenBytes < wantedBytes) {
1190             GoldfishPipeBuffer buf = {
1191                 ((char*)res->linear) + box->x + writtenBytes,
1192                 wantedBytes - writtenBytes,
1193             };
1194 
1195             // guest_send can now reallocate the pipe.
1196             void* hostPipeBefore = hostPipe;
1197             auto status = ops->guest_send(&hostPipe, &buf, 1);
1198             if (hostPipe != hostPipeBefore) {
1199                 if (resetPipe((GoldfishHwPipe*)(uintptr_t)(res->ctxId), hostPipe)) {
1200                     return -EINVAL;
1201                 }
1202 
1203                 auto it = mResources.find(res->args.handle);
1204                 res = &it->second;
1205             }
1206 
1207             if (status > 0) {
1208                 writtenBytes += status;
1209             } else if (status == kPipeTryAgain) {
1210                 ops->wait_guest_send(hostPipe);
1211             } else {
1212                 return EIO;
1213             }
1214         }
1215 
1216         return 0;
1217     }
1218 
handleTransferReadBuffer(PipeResEntry * res,uint64_t offset,stream_renderer_box * box)1219     int handleTransferReadBuffer(PipeResEntry* res, uint64_t offset, stream_renderer_box* box) {
1220         if (res->type != ResType::BUFFER) {
1221             stream_renderer_error("resid: %d not a BUFFER resource", res->args.handle);
1222             return -EINVAL;
1223         }
1224 
1225         mVirtioGpuOps->read_buffer(res->args.handle, 0, res->args.width * res->args.height,
1226                                    res->linear);
1227         return 0;
1228     }
1229 
handleTransferWriteBuffer(PipeResEntry * res,uint64_t offset,stream_renderer_box * box)1230     int handleTransferWriteBuffer(PipeResEntry* res, uint64_t offset, stream_renderer_box* box) {
1231         if (res->type != ResType::BUFFER) {
1232             stream_renderer_error("resid: %d not a BUFFER resource", res->args.handle);
1233             return -EINVAL;
1234         }
1235 
1236         mVirtioGpuOps->update_buffer(res->args.handle, 0, res->args.width * res->args.height,
1237                                      res->linear);
1238         return 0;
1239     }
1240 
handleTransferReadColorBuffer(PipeResEntry * res,uint64_t offset,stream_renderer_box * box)1241     int handleTransferReadColorBuffer(PipeResEntry* res, uint64_t offset,
1242                                       stream_renderer_box* box) {
1243         if (res->type != ResType::COLOR_BUFFER) {
1244             stream_renderer_error("resid: %d not a COLOR_BUFFER resource", res->args.handle);
1245             return -EINVAL;
1246         }
1247 
1248         auto glformat = virgl_format_to_gl(res->args.format);
1249         auto gltype = gl_format_to_natural_type(glformat);
1250 
1251         // We always xfer the whole thing again from GL
1252         // since it's fiddly to calc / copy-out subregions
1253         if (virgl_format_is_yuv(res->args.format)) {
1254             mVirtioGpuOps->read_color_buffer_yuv(res->args.handle, 0, 0, res->args.width,
1255                                                  res->args.height, res->linear, res->linearSize);
1256         } else {
1257             mVirtioGpuOps->read_color_buffer(res->args.handle, 0, 0, res->args.width,
1258                                              res->args.height, glformat, gltype, res->linear);
1259         }
1260 
1261         return 0;
1262     }
1263 
handleTransferWriteColorBuffer(PipeResEntry * res,uint64_t offset,stream_renderer_box * box)1264     int handleTransferWriteColorBuffer(PipeResEntry* res, uint64_t offset,
1265                                        stream_renderer_box* box) {
1266         if (res->type != ResType::COLOR_BUFFER) {
1267             stream_renderer_error("resid: %d not a COLOR_BUFFER resource", res->args.handle);
1268             return -EINVAL;
1269         }
1270 
1271         auto glformat = virgl_format_to_gl(res->args.format);
1272         auto gltype = gl_format_to_natural_type(glformat);
1273 
1274         // We always xfer the whole thing again to GL
1275         // since it's fiddly to calc / copy-out subregions
1276         mVirtioGpuOps->update_color_buffer(res->args.handle, 0, 0, res->args.width,
1277                                            res->args.height, glformat, gltype, res->linear);
1278         return 0;
1279     }
1280 
transferReadIov(int resId,uint64_t offset,stream_renderer_box * box,struct iovec * iov,int iovec_cnt)1281     int transferReadIov(int resId, uint64_t offset, stream_renderer_box* box, struct iovec* iov,
1282                         int iovec_cnt) {
1283         auto it = mResources.find(resId);
1284         if (it == mResources.end()) return EINVAL;
1285 
1286         int ret = 0;
1287 
1288         auto& entry = it->second;
1289         switch (entry.type) {
1290             case ResType::PIPE:
1291                 ret = handleTransferReadPipe(&entry, offset, box);
1292                 break;
1293             case ResType::BUFFER:
1294                 ret = handleTransferReadBuffer(&entry, offset, box);
1295                 break;
1296             case ResType::COLOR_BUFFER:
1297                 ret = handleTransferReadColorBuffer(&entry, offset, box);
1298                 break;
1299         }
1300 
1301         if (ret != 0) {
1302             return ret;
1303         }
1304 
1305         if (iovec_cnt) {
1306             PipeResEntry e = {
1307                 entry.args, iov, (uint32_t)iovec_cnt, entry.linear, entry.linearSize,
1308             };
1309             ret = sync_iov(&e, offset, box, LINEAR_TO_IOV);
1310         } else {
1311             ret = sync_iov(&entry, offset, box, LINEAR_TO_IOV);
1312         }
1313 
1314         return ret;
1315     }
1316 
transferWriteIov(int resId,uint64_t offset,stream_renderer_box * box,struct iovec * iov,int iovec_cnt)1317     int transferWriteIov(int resId, uint64_t offset, stream_renderer_box* box, struct iovec* iov,
1318                          int iovec_cnt) {
1319         auto it = mResources.find(resId);
1320         if (it == mResources.end()) return EINVAL;
1321 
1322         auto& entry = it->second;
1323 
1324         int ret = 0;
1325         if (iovec_cnt) {
1326             PipeResEntry e = {
1327                 entry.args, iov, (uint32_t)iovec_cnt, entry.linear, entry.linearSize,
1328             };
1329             ret = sync_iov(&e, offset, box, IOV_TO_LINEAR);
1330         } else {
1331             ret = sync_iov(&entry, offset, box, IOV_TO_LINEAR);
1332         }
1333 
1334         if (ret != 0) {
1335             return ret;
1336         }
1337 
1338         switch (entry.type) {
1339             case ResType::PIPE:
1340                 ret = handleTransferWritePipe(&entry, offset, box);
1341                 break;
1342             case ResType::BUFFER:
1343                 ret = handleTransferWriteBuffer(&entry, offset, box);
1344                 break;
1345             case ResType::COLOR_BUFFER:
1346                 ret = handleTransferWriteColorBuffer(&entry, offset, box);
1347                 break;
1348         }
1349 
1350         return ret;
1351     }
1352 
getCapset(uint32_t set,uint32_t * max_size)1353     void getCapset(uint32_t set, uint32_t* max_size) {
1354         switch (set) {
1355             case VIRTGPU_CAPSET_GFXSTREAM_VULKAN:
1356                 *max_size = sizeof(struct gfxstream::vulkanCapset);
1357                 break;
1358             case VIRTGPU_CAPSET_GFXSTREAM_MAGMA:
1359                 *max_size = sizeof(struct gfxstream::magmaCapset);
1360                 break;
1361             case VIRTGPU_CAPSET_GFXSTREAM_GLES:
1362                 *max_size = sizeof(struct gfxstream::glesCapset);
1363                 break;
1364             case VIRTGPU_CAPSET_GFXSTREAM_COMPOSER:
1365                 *max_size = sizeof(struct gfxstream::composerCapset);
1366                 break;
1367             default:
1368                 stream_renderer_error("Incorrect capability set specified");
1369         }
1370     }
1371 
fillCaps(uint32_t set,void * caps)1372     void fillCaps(uint32_t set, void* caps) {
1373         switch (set) {
1374             case VIRTGPU_CAPSET_GFXSTREAM_VULKAN: {
1375                 struct gfxstream::vulkanCapset* capset =
1376                     reinterpret_cast<struct gfxstream::vulkanCapset*>(caps);
1377 
1378                 memset(capset, 0, sizeof(*capset));
1379 
1380                 capset->protocolVersion = 1;
1381                 capset->ringSize = 12288;
1382                 capset->bufferSize = 1048576;
1383 
1384                 auto vk_emu = gfxstream::vk::getGlobalVkEmulation();
1385                 if (vk_emu && vk_emu->live && vk_emu->representativeColorBufferMemoryTypeIndex) {
1386                     capset->colorBufferMemoryIndex =
1387                         *vk_emu->representativeColorBufferMemoryTypeIndex;
1388                 }
1389 
1390                 capset->noRenderControlEnc = 1;
1391                 capset->blobAlignment = mPageSize;
1392                 if (vk_emu && vk_emu->live) {
1393                     capset->deferredMapping = 1;
1394                 }
1395                 break;
1396             }
1397             case VIRTGPU_CAPSET_GFXSTREAM_MAGMA: {
1398                 struct gfxstream::magmaCapset* capset =
1399                     reinterpret_cast<struct gfxstream::magmaCapset*>(caps);
1400 
1401                 capset->protocolVersion = 1;
1402                 capset->ringSize = 12288;
1403                 capset->bufferSize = 1048576;
1404                 capset->blobAlignment = mPageSize;
1405                 break;
1406             }
1407             case VIRTGPU_CAPSET_GFXSTREAM_GLES: {
1408                 struct gfxstream::glesCapset* capset =
1409                     reinterpret_cast<struct gfxstream::glesCapset*>(caps);
1410 
1411                 capset->protocolVersion = 1;
1412                 capset->ringSize = 12288;
1413                 capset->bufferSize = 1048576;
1414                 capset->blobAlignment = mPageSize;
1415                 break;
1416             }
1417             case VIRTGPU_CAPSET_GFXSTREAM_COMPOSER: {
1418                 struct gfxstream::composerCapset* capset =
1419                     reinterpret_cast<struct gfxstream::composerCapset*>(caps);
1420 
1421                 capset->protocolVersion = 1;
1422                 capset->ringSize = 12288;
1423                 capset->bufferSize = 1048576;
1424                 capset->blobAlignment = mPageSize;
1425                 break;
1426             }
1427             default:
1428                 stream_renderer_error("Incorrect capability set specified");
1429         }
1430     }
1431 
attachResource(uint32_t ctxId,uint32_t resId)1432     void attachResource(uint32_t ctxId, uint32_t resId) {
1433         stream_renderer_info("ctxid: %u resid: %u", ctxId, resId);
1434 
1435         auto resourcesIt = mContextResources.find(ctxId);
1436 
1437         if (resourcesIt == mContextResources.end()) {
1438             std::vector<VirtioGpuResId> ids;
1439             ids.push_back(resId);
1440             mContextResources[ctxId] = ids;
1441         } else {
1442             auto& ids = resourcesIt->second;
1443             auto idIt = std::find(ids.begin(), ids.end(), resId);
1444             if (idIt == ids.end()) ids.push_back(resId);
1445         }
1446 
1447         auto contextsIt = mResourceContexts.find(resId);
1448 
1449         if (contextsIt == mResourceContexts.end()) {
1450             std::vector<VirtioGpuCtxId> ids;
1451             ids.push_back(ctxId);
1452             mResourceContexts[resId] = ids;
1453         } else {
1454             auto& ids = contextsIt->second;
1455             auto idIt = std::find(ids.begin(), ids.end(), ctxId);
1456             if (idIt == ids.end()) ids.push_back(ctxId);
1457         }
1458 
1459         // Associate the host pipe of the resource entry with the host pipe of
1460         // the context entry.  That is, the last context to call attachResource
1461         // wins if there is any conflict.
1462         auto ctxEntryIt = mContexts.find(ctxId);
1463         auto resEntryIt = mResources.find(resId);
1464 
1465         if (ctxEntryIt == mContexts.end() || resEntryIt == mResources.end()) return;
1466 
1467         stream_renderer_info("hostPipe: %p", ctxEntryIt->second.hostPipe);
1468         resEntryIt->second.hostPipe = ctxEntryIt->second.hostPipe;
1469         resEntryIt->second.ctxId = ctxId;
1470     }
1471 
detachResource(uint32_t ctxId,uint32_t toUnrefId)1472     void detachResource(uint32_t ctxId, uint32_t toUnrefId) {
1473         stream_renderer_info("ctxid: %u resid: %u", ctxId, toUnrefId);
1474         detachResourceLocked(ctxId, toUnrefId);
1475     }
1476 
getResourceInfo(uint32_t resId,struct stream_renderer_resource_info * info)1477     int getResourceInfo(uint32_t resId, struct stream_renderer_resource_info* info) {
1478         stream_renderer_info("resid: %u", resId);
1479         if (!info) return EINVAL;
1480 
1481         auto it = mResources.find(resId);
1482         if (it == mResources.end()) return ENOENT;
1483 
1484         auto& entry = it->second;
1485 
1486         uint32_t bpp = 4U;
1487         switch (entry.args.format) {
1488             case VIRGL_FORMAT_B8G8R8A8_UNORM:
1489                 info->drm_fourcc = DRM_FORMAT_ARGB8888;
1490                 break;
1491             case VIRGL_FORMAT_B5G6R5_UNORM:
1492                 info->drm_fourcc = DRM_FORMAT_RGB565;
1493                 bpp = 2U;
1494                 break;
1495             case VIRGL_FORMAT_R8G8B8A8_UNORM:
1496                 info->drm_fourcc = DRM_FORMAT_ABGR8888;
1497                 break;
1498             case VIRGL_FORMAT_R8G8B8X8_UNORM:
1499                 info->drm_fourcc = DRM_FORMAT_XBGR8888;
1500                 break;
1501             case VIRGL_FORMAT_R8_UNORM:
1502                 info->drm_fourcc = DRM_FORMAT_R8;
1503                 bpp = 1U;
1504                 break;
1505             default:
1506                 return EINVAL;
1507         }
1508 
1509         info->stride = align_up(entry.args.width * bpp, 16U);
1510         info->virgl_format = entry.args.format;
1511         info->handle = entry.args.handle;
1512         info->height = entry.args.height;
1513         info->width = entry.args.width;
1514         info->depth = entry.args.depth;
1515         info->flags = entry.args.flags;
1516         info->tex_id = 0;
1517         return 0;
1518     }
1519 
flushResource(uint32_t res_handle)1520     void flushResource(uint32_t res_handle) {
1521         auto taskId = mVirtioGpuTimelines->enqueueTask(VirtioGpuRingGlobal{});
1522         mVirtioGpuOps->async_post_color_buffer(
1523             res_handle, [this, taskId](std::shared_future<void> waitForGpu) {
1524                 waitForGpu.wait();
1525                 mVirtioGpuTimelines->notifyTaskCompletion(taskId);
1526             });
1527     }
1528 
createRingBlob(PipeResEntry & entry,uint32_t res_handle,const struct stream_renderer_create_blob * create_blob,const struct stream_renderer_handle * handle)1529     int createRingBlob(PipeResEntry& entry, uint32_t res_handle,
1530                        const struct stream_renderer_create_blob* create_blob,
1531                        const struct stream_renderer_handle* handle) {
1532         if (feature_is_enabled(kFeature_ExternalBlob)) {
1533             std::string name = "shared-memory-" + std::to_string(res_handle);
1534             auto ringBlob = std::make_shared<SharedMemory>(name, create_blob->size);
1535             int ret = ringBlob->create(0600);
1536             if (ret) {
1537                 stream_renderer_error("Failed to create shared memory blob");
1538                 return ret;
1539             }
1540 
1541             entry.ringBlob = ringBlob;
1542             entry.hva = ringBlob->get();
1543         } else {
1544             void* addr =
1545                 android::aligned_buf_alloc(mPageSize, create_blob->size);
1546             if (addr == nullptr) {
1547                 stream_renderer_error("Failed to allocate ring blob");
1548                 return -ENOMEM;
1549             }
1550 
1551             entry.hva = addr;
1552         }
1553 
1554         entry.hvaSize = create_blob->size;
1555         entry.externalAddr = true;
1556         entry.caching = STREAM_RENDERER_MAP_CACHE_CACHED;
1557 
1558         return 0;
1559     }
1560 
createBlob(uint32_t ctx_id,uint32_t res_handle,const struct stream_renderer_create_blob * create_blob,const struct stream_renderer_handle * handle)1561     int createBlob(uint32_t ctx_id, uint32_t res_handle,
1562                    const struct stream_renderer_create_blob* create_blob,
1563                    const struct stream_renderer_handle* handle) {
1564         stream_renderer_info("ctx:%u res:%u blob-id:%u blob-size:%u",
1565                              ctx_id, res_handle, create_blob->blob_id, create_blob->size);
1566 
1567         PipeResEntry e;
1568         struct stream_renderer_resource_create_args args = {0};
1569         e.args = args;
1570         e.hostPipe = 0;
1571 
1572         if (create_blob->blob_id == 0) {
1573             int ret = createRingBlob(e, res_handle, create_blob, handle);
1574             if (ret) {
1575                 return ret;
1576             }
1577         } else if (feature_is_enabled(kFeature_ExternalBlob)) {
1578             if (create_blob->blob_mem == STREAM_BLOB_MEM_GUEST &&
1579                 (create_blob->blob_flags & STREAM_BLOB_FLAG_CREATE_GUEST_HANDLE)) {
1580 #if defined(__linux__) || defined(__QNX__)
1581                 ManagedDescriptor managedHandle(handle->os_handle);
1582                 BlobManager::get()->addDescriptorInfo(ctx_id, create_blob->blob_id,
1583                                                       std::move(managedHandle), handle->handle_type,
1584                                                       0, std::nullopt);
1585 
1586                 e.caching = STREAM_RENDERER_MAP_CACHE_CACHED;
1587 #else
1588                 return -EINVAL;
1589 #endif
1590             } else {
1591                 auto descriptorInfoOpt =
1592                     BlobManager::get()->removeDescriptorInfo(ctx_id, create_blob->blob_id);
1593                 if (descriptorInfoOpt) {
1594                     e.descriptorInfo =
1595                         std::make_shared<ManagedDescriptorInfo>(std::move(*descriptorInfoOpt));
1596                 } else {
1597                     return -EINVAL;
1598                 }
1599 
1600                 e.caching = e.descriptorInfo->caching;
1601             }
1602         } else {
1603             auto entryOpt = BlobManager::get()->removeMapping(ctx_id, create_blob->blob_id);
1604             if (entryOpt) {
1605                 e.hva = entryOpt->addr;
1606                 e.caching = entryOpt->caching;
1607                 e.hvaSize = create_blob->size;
1608             } else {
1609                 return -EINVAL;
1610             }
1611         }
1612 
1613         e.blobId = create_blob->blob_id;
1614         e.blobMem = create_blob->blob_mem;
1615         e.blobFlags = create_blob->blob_flags;
1616         e.iov = nullptr;
1617         e.numIovs = 0;
1618         e.linear = 0;
1619         e.linearSize = 0;
1620 
1621         mResources[res_handle] = e;
1622         return 0;
1623     }
1624 
resourceMap(uint32_t res_handle,void ** hvaOut,uint64_t * sizeOut)1625     int resourceMap(uint32_t res_handle, void** hvaOut, uint64_t* sizeOut) {
1626         if (feature_is_enabled(kFeature_ExternalBlob)) return -EINVAL;
1627 
1628         auto it = mResources.find(res_handle);
1629         if (it == mResources.end()) {
1630             if (hvaOut) *hvaOut = nullptr;
1631             if (sizeOut) *sizeOut = 0;
1632             return -EINVAL;
1633         }
1634 
1635         const auto& entry = it->second;
1636 
1637         if (hvaOut) *hvaOut = entry.hva;
1638         if (sizeOut) *sizeOut = entry.hvaSize;
1639         return 0;
1640     }
1641 
resourceUnmap(uint32_t res_handle)1642     int resourceUnmap(uint32_t res_handle) {
1643         auto it = mResources.find(res_handle);
1644         if (it == mResources.end()) {
1645             return -EINVAL;
1646         }
1647 
1648         // TODO(lfy): Good place to run any registered cleanup callbacks.
1649         // No-op for now.
1650         return 0;
1651     }
1652 
platformImportResource(int res_handle,int res_info,void * resource)1653     int platformImportResource(int res_handle, int res_info, void* resource) {
1654         auto it = mResources.find(res_handle);
1655         if (it == mResources.end()) return -EINVAL;
1656         bool success = mVirtioGpuOps->platform_import_resource(res_handle, res_info, resource);
1657         return success ? 0 : -1;
1658     }
1659 
platformResourceInfo(int res_handle,int * width,int * height,int * internal_format)1660     int platformResourceInfo(int res_handle, int* width, int* height, int* internal_format) {
1661         auto it = mResources.find(res_handle);
1662         if (it == mResources.end()) return -EINVAL;
1663         bool success =
1664             mVirtioGpuOps->platform_resource_info(res_handle, width, height, internal_format);
1665         return success ? 0 : -1;
1666     }
1667 
platformCreateSharedEglContext()1668     void* platformCreateSharedEglContext() {
1669         return mVirtioGpuOps->platform_create_shared_egl_context();
1670     }
1671 
platformDestroySharedEglContext(void * context)1672     int platformDestroySharedEglContext(void* context) {
1673         bool success = mVirtioGpuOps->platform_destroy_shared_egl_context(context);
1674         return success ? 0 : -1;
1675     }
1676 
resourceMapInfo(uint32_t res_handle,uint32_t * map_info)1677     int resourceMapInfo(uint32_t res_handle, uint32_t* map_info) {
1678         auto it = mResources.find(res_handle);
1679         if (it == mResources.end()) return -EINVAL;
1680 
1681         const auto& entry = it->second;
1682         *map_info = entry.caching;
1683         return 0;
1684     }
1685 
exportBlob(uint32_t res_handle,struct stream_renderer_handle * handle)1686     int exportBlob(uint32_t res_handle, struct stream_renderer_handle* handle) {
1687         auto it = mResources.find(res_handle);
1688         if (it == mResources.end()) {
1689             return -EINVAL;
1690         }
1691 
1692         auto& entry = it->second;
1693         if (entry.ringBlob) {
1694             // Handle ownership transferred to VMM, gfxstream keeps the mapping.
1695 #ifdef _WIN32
1696             handle->os_handle =
1697                 static_cast<int64_t>(reinterpret_cast<intptr_t>(entry.ringBlob->releaseHandle()));
1698 #else
1699             handle->os_handle = static_cast<int64_t>(entry.ringBlob->releaseHandle());
1700 #endif
1701             handle->handle_type = STREAM_MEM_HANDLE_TYPE_SHM;
1702             return 0;
1703         }
1704 
1705         if (entry.descriptorInfo) {
1706             bool shareable = entry.blobFlags &
1707                              (STREAM_BLOB_FLAG_USE_SHAREABLE | STREAM_BLOB_FLAG_USE_CROSS_DEVICE);
1708 
1709             DescriptorType rawDescriptor;
1710             if (shareable) {
1711                 // TODO: Add ManagedDescriptor::{clone, dup} method and use it;
1712                 // This should have no affect since gfxstream allocates mappable-only buffers
1713                 // currently
1714                 return -EINVAL;
1715             } else {
1716                 auto rawDescriptorOpt = entry.descriptorInfo->descriptor.release();
1717                 if (rawDescriptorOpt)
1718                     rawDescriptor = *rawDescriptorOpt;
1719                 else
1720                     return -EINVAL;
1721             }
1722 
1723             handle->handle_type = entry.descriptorInfo->handleType;
1724 
1725 #ifdef _WIN32
1726             handle->os_handle = static_cast<int64_t>(reinterpret_cast<intptr_t>(rawDescriptor));
1727 #else
1728             handle->os_handle = static_cast<int64_t>(rawDescriptor);
1729 #endif
1730 
1731             return 0;
1732         }
1733 
1734         return -EINVAL;
1735     }
1736 
vulkanInfo(uint32_t res_handle,struct stream_renderer_vulkan_info * vulkan_info)1737     int vulkanInfo(uint32_t res_handle, struct stream_renderer_vulkan_info* vulkan_info) {
1738         auto it = mResources.find(res_handle);
1739         if (it == mResources.end()) return -EINVAL;
1740 
1741         const auto& entry = it->second;
1742         if (entry.descriptorInfo && entry.descriptorInfo->vulkanInfoOpt) {
1743             vulkan_info->memory_index = (*entry.descriptorInfo->vulkanInfoOpt).memoryIndex;
1744             memcpy(vulkan_info->device_id.device_uuid,
1745                    (*entry.descriptorInfo->vulkanInfoOpt).deviceUUID,
1746                    sizeof(vulkan_info->device_id.device_uuid));
1747             memcpy(vulkan_info->device_id.driver_uuid,
1748                    (*entry.descriptorInfo->vulkanInfoOpt).driverUUID,
1749                    sizeof(vulkan_info->device_id.driver_uuid));
1750             return 0;
1751         }
1752 
1753         return -EINVAL;
1754     }
1755 
1756 #ifdef CONFIG_AEMU
setServiceOps(const GoldfishPipeServiceOps * ops)1757     void setServiceOps(const GoldfishPipeServiceOps* ops) { mServiceOps = ops; }
1758 #endif  // CONFIG_AEMU
1759    private:
allocResource(PipeResEntry & entry,iovec * iov,int num_iovs)1760     void allocResource(PipeResEntry& entry, iovec* iov, int num_iovs) {
1761         stream_renderer_info("entry linear: %p", entry.linear);
1762         if (entry.linear) free(entry.linear);
1763 
1764         size_t linearSize = 0;
1765         for (uint32_t i = 0; i < num_iovs; ++i) {
1766             stream_renderer_info("iov base: %p", iov[i].iov_base);
1767             linearSize += iov[i].iov_len;
1768             stream_renderer_info("has iov of %zu. linearSize current: %zu", iov[i].iov_len,
1769                                  linearSize);
1770         }
1771         stream_renderer_info("final linearSize: %zu", linearSize);
1772 
1773         void* linear = nullptr;
1774 
1775         if (linearSize) linear = malloc(linearSize);
1776 
1777         entry.iov = (iovec*)malloc(sizeof(*iov) * num_iovs);
1778         entry.numIovs = num_iovs;
1779         memcpy(entry.iov, iov, num_iovs * sizeof(*iov));
1780         entry.linear = linear;
1781         entry.linearSize = linearSize;
1782     }
1783 
detachResourceLocked(uint32_t ctxId,uint32_t toUnrefId)1784     void detachResourceLocked(uint32_t ctxId, uint32_t toUnrefId) {
1785         stream_renderer_info("ctxid: %u resid: %u", ctxId, toUnrefId);
1786 
1787         auto it = mContextResources.find(ctxId);
1788         if (it == mContextResources.end()) return;
1789 
1790         std::vector<VirtioGpuResId> withoutRes;
1791         for (auto resId : it->second) {
1792             if (resId != toUnrefId) {
1793                 withoutRes.push_back(resId);
1794             }
1795         }
1796         mContextResources[ctxId] = withoutRes;
1797 
1798         auto resIt = mResources.find(toUnrefId);
1799         if (resIt == mResources.end()) return;
1800 
1801         resIt->second.hostPipe = 0;
1802         resIt->second.ctxId = 0;
1803 
1804         auto ctxIt = mContexts.find(ctxId);
1805         if (ctxIt != mContexts.end()) {
1806             auto& ctxEntry = ctxIt->second;
1807             if (ctxEntry.addressSpaceHandles.count(toUnrefId)) {
1808                 uint32_t handle = ctxEntry.addressSpaceHandles[toUnrefId];
1809                 mAddressSpaceDeviceControlOps->destroy_handle(handle);
1810                 ctxEntry.addressSpaceHandles.erase(toUnrefId);
1811             }
1812         }
1813     }
1814 
ensureAndGetServiceOps()1815     inline const GoldfishPipeServiceOps* ensureAndGetServiceOps() {
1816         if (mServiceOps) return mServiceOps;
1817         mServiceOps = goldfish_pipe_get_service_ops();
1818         return mServiceOps;
1819     }
1820 
1821     void* mCookie = nullptr;
1822     stream_renderer_fence_callback mFenceCallback;
1823     AndroidVirtioGpuOps* mVirtioGpuOps = nullptr;
1824     uint32_t mPageSize = 4096;
1825     struct address_space_device_control_ops* mAddressSpaceDeviceControlOps = nullptr;
1826 
1827     const GoldfishPipeServiceOps* mServiceOps = nullptr;
1828 
1829     std::unordered_map<VirtioGpuCtxId, PipeCtxEntry> mContexts;
1830     std::unordered_map<VirtioGpuResId, PipeResEntry> mResources;
1831     std::unordered_map<VirtioGpuCtxId, std::vector<VirtioGpuResId>> mContextResources;
1832     std::unordered_map<VirtioGpuResId, std::vector<VirtioGpuCtxId>> mResourceContexts;
1833 
1834     // When we wait for gpu or wait for gpu vulkan, the next (and subsequent)
1835     // fences created for that context should not be signaled immediately.
1836     // Rather, they should get in line.
1837     std::unique_ptr<VirtioGpuTimelines> mVirtioGpuTimelines = nullptr;
1838 };
1839 
sRenderer()1840 static PipeVirglRenderer* sRenderer() {
1841     static PipeVirglRenderer* p = new PipeVirglRenderer;
1842     return p;
1843 }
1844 
1845 extern "C" {
1846 
stream_renderer_resource_create(struct stream_renderer_resource_create_args * args,struct iovec * iov,uint32_t num_iovs)1847 VG_EXPORT int stream_renderer_resource_create(struct stream_renderer_resource_create_args* args,
1848                                               struct iovec* iov, uint32_t num_iovs) {
1849     return sRenderer()->createResource(args, iov, num_iovs);
1850 }
1851 
stream_renderer_resource_unref(uint32_t res_handle)1852 VG_EXPORT void stream_renderer_resource_unref(uint32_t res_handle) {
1853     sRenderer()->unrefResource(res_handle);
1854 }
1855 
stream_renderer_context_destroy(uint32_t handle)1856 VG_EXPORT void stream_renderer_context_destroy(uint32_t handle) {
1857     sRenderer()->destroyContext(handle);
1858 }
1859 
stream_renderer_submit_cmd(struct stream_renderer_command * cmd)1860 VG_EXPORT int stream_renderer_submit_cmd(struct stream_renderer_command* cmd) {
1861     return sRenderer()->submitCmd(cmd);
1862 }
1863 
stream_renderer_transfer_read_iov(uint32_t handle,uint32_t ctx_id,uint32_t level,uint32_t stride,uint32_t layer_stride,struct stream_renderer_box * box,uint64_t offset,struct iovec * iov,int iovec_cnt)1864 VG_EXPORT int stream_renderer_transfer_read_iov(uint32_t handle, uint32_t ctx_id, uint32_t level,
1865                                                 uint32_t stride, uint32_t layer_stride,
1866                                                 struct stream_renderer_box* box, uint64_t offset,
1867                                                 struct iovec* iov, int iovec_cnt) {
1868     return sRenderer()->transferReadIov(handle, offset, box, iov, iovec_cnt);
1869 }
1870 
stream_renderer_transfer_write_iov(uint32_t handle,uint32_t ctx_id,int level,uint32_t stride,uint32_t layer_stride,struct stream_renderer_box * box,uint64_t offset,struct iovec * iovec,unsigned int iovec_cnt)1871 VG_EXPORT int stream_renderer_transfer_write_iov(uint32_t handle, uint32_t ctx_id, int level,
1872                                                  uint32_t stride, uint32_t layer_stride,
1873                                                  struct stream_renderer_box* box, uint64_t offset,
1874                                                  struct iovec* iovec, unsigned int iovec_cnt) {
1875     return sRenderer()->transferWriteIov(handle, offset, box, iovec, iovec_cnt);
1876 }
1877 
stream_renderer_get_cap_set(uint32_t set,uint32_t * max_ver,uint32_t * max_size)1878 VG_EXPORT void stream_renderer_get_cap_set(uint32_t set, uint32_t* max_ver, uint32_t* max_size) {
1879     // `max_ver` not useful
1880     return sRenderer()->getCapset(set, max_size);
1881 }
1882 
stream_renderer_fill_caps(uint32_t set,uint32_t version,void * caps)1883 VG_EXPORT void stream_renderer_fill_caps(uint32_t set, uint32_t version, void* caps) {
1884     // `version` not useful
1885     return sRenderer()->fillCaps(set, caps);
1886 }
1887 
stream_renderer_resource_attach_iov(int res_handle,struct iovec * iov,int num_iovs)1888 VG_EXPORT int stream_renderer_resource_attach_iov(int res_handle, struct iovec* iov, int num_iovs) {
1889     return sRenderer()->attachIov(res_handle, iov, num_iovs);
1890 }
1891 
stream_renderer_resource_detach_iov(int res_handle,struct iovec ** iov,int * num_iovs)1892 VG_EXPORT void stream_renderer_resource_detach_iov(int res_handle, struct iovec** iov,
1893                                                    int* num_iovs) {
1894     return sRenderer()->detachIov(res_handle, iov, num_iovs);
1895 }
1896 
stream_renderer_ctx_attach_resource(int ctx_id,int res_handle)1897 VG_EXPORT void stream_renderer_ctx_attach_resource(int ctx_id, int res_handle) {
1898     sRenderer()->attachResource(ctx_id, res_handle);
1899 }
1900 
stream_renderer_ctx_detach_resource(int ctx_id,int res_handle)1901 VG_EXPORT void stream_renderer_ctx_detach_resource(int ctx_id, int res_handle) {
1902     sRenderer()->detachResource(ctx_id, res_handle);
1903 }
1904 
stream_renderer_resource_get_info(int res_handle,struct stream_renderer_resource_info * info)1905 VG_EXPORT int stream_renderer_resource_get_info(int res_handle,
1906                                                 struct stream_renderer_resource_info* info) {
1907     return sRenderer()->getResourceInfo(res_handle, info);
1908 }
1909 
stream_renderer_flush(uint32_t res_handle)1910 VG_EXPORT void stream_renderer_flush(uint32_t res_handle) {
1911     sRenderer()->flushResource(res_handle);
1912 }
1913 
stream_renderer_create_blob(uint32_t ctx_id,uint32_t res_handle,const struct stream_renderer_create_blob * create_blob,const struct iovec * iovecs,uint32_t num_iovs,const struct stream_renderer_handle * handle)1914 VG_EXPORT int stream_renderer_create_blob(uint32_t ctx_id, uint32_t res_handle,
1915                                           const struct stream_renderer_create_blob* create_blob,
1916                                           const struct iovec* iovecs, uint32_t num_iovs,
1917                                           const struct stream_renderer_handle* handle) {
1918     sRenderer()->createBlob(ctx_id, res_handle, create_blob, handle);
1919     return 0;
1920 }
1921 
stream_renderer_export_blob(uint32_t res_handle,struct stream_renderer_handle * handle)1922 VG_EXPORT int stream_renderer_export_blob(uint32_t res_handle,
1923                                           struct stream_renderer_handle* handle) {
1924     return sRenderer()->exportBlob(res_handle, handle);
1925 }
1926 
stream_renderer_resource_map(uint32_t res_handle,void ** hvaOut,uint64_t * sizeOut)1927 VG_EXPORT int stream_renderer_resource_map(uint32_t res_handle, void** hvaOut, uint64_t* sizeOut) {
1928     return sRenderer()->resourceMap(res_handle, hvaOut, sizeOut);
1929 }
1930 
stream_renderer_resource_unmap(uint32_t res_handle)1931 VG_EXPORT int stream_renderer_resource_unmap(uint32_t res_handle) {
1932     return sRenderer()->resourceUnmap(res_handle);
1933 }
1934 
stream_renderer_context_create(uint32_t ctx_id,uint32_t nlen,const char * name,uint32_t context_init)1935 VG_EXPORT int stream_renderer_context_create(uint32_t ctx_id, uint32_t nlen, const char* name,
1936                                              uint32_t context_init) {
1937     return sRenderer()->createContext(ctx_id, nlen, name, context_init);
1938 }
1939 
stream_renderer_create_fence(const struct stream_renderer_fence * fence)1940 VG_EXPORT int stream_renderer_create_fence(const struct stream_renderer_fence* fence) {
1941     if (fence->flags & STREAM_RENDERER_FLAG_FENCE_RING_IDX) {
1942         sRenderer()->createFence(fence->fence_id, VirtioGpuRingContextSpecific{
1943                                                       .mCtxId = fence->ctx_id,
1944                                                       .mRingIdx = fence->ring_idx,
1945                                                   });
1946     } else {
1947         sRenderer()->createFence(fence->fence_id, VirtioGpuRingGlobal{});
1948     }
1949 
1950     return 0;
1951 }
1952 
stream_renderer_platform_import_resource(int res_handle,int res_info,void * resource)1953 VG_EXPORT int stream_renderer_platform_import_resource(int res_handle, int res_info,
1954                                                        void* resource) {
1955     return sRenderer()->platformImportResource(res_handle, res_info, resource);
1956 }
1957 
stream_renderer_platform_resource_info(int res_handle,int * width,int * height,int * internal_format)1958 VG_EXPORT int stream_renderer_platform_resource_info(int res_handle, int* width, int* height,
1959                                                      int* internal_format) {
1960     return sRenderer()->platformResourceInfo(res_handle, width, height, internal_format);
1961 }
1962 
stream_renderer_platform_create_shared_egl_context()1963 VG_EXPORT void* stream_renderer_platform_create_shared_egl_context() {
1964     return sRenderer()->platformCreateSharedEglContext();
1965 }
1966 
stream_renderer_platform_destroy_shared_egl_context(void * context)1967 VG_EXPORT int stream_renderer_platform_destroy_shared_egl_context(void* context) {
1968     return sRenderer()->platformDestroySharedEglContext(context);
1969 }
1970 
stream_renderer_resource_map_info(uint32_t res_handle,uint32_t * map_info)1971 VG_EXPORT int stream_renderer_resource_map_info(uint32_t res_handle, uint32_t* map_info) {
1972     return sRenderer()->resourceMapInfo(res_handle, map_info);
1973 }
1974 
stream_renderer_vulkan_info(uint32_t res_handle,struct stream_renderer_vulkan_info * vulkan_info)1975 VG_EXPORT int stream_renderer_vulkan_info(uint32_t res_handle,
1976                                           struct stream_renderer_vulkan_info* vulkan_info) {
1977     return sRenderer()->vulkanInfo(res_handle, vulkan_info);
1978 }
1979 
1980 static const GoldfishPipeServiceOps goldfish_pipe_service_ops = {
1981     // guest_open()
__anon88f521de0802() 1982     [](GoldfishHwPipe* hwPipe) -> GoldfishHostPipe* {
1983         return static_cast<GoldfishHostPipe*>(android_pipe_guest_open(hwPipe));
1984     },
1985     // guest_open_with_flags()
__anon88f521de0902() 1986     [](GoldfishHwPipe* hwPipe, uint32_t flags) -> GoldfishHostPipe* {
1987         return static_cast<GoldfishHostPipe*>(android_pipe_guest_open_with_flags(hwPipe, flags));
1988     },
1989     // guest_close()
__anon88f521de0a02() 1990     [](GoldfishHostPipe* hostPipe, GoldfishPipeCloseReason reason) {
1991         static_assert((int)GOLDFISH_PIPE_CLOSE_GRACEFUL == (int)PIPE_CLOSE_GRACEFUL,
1992                       "Invalid PIPE_CLOSE_GRACEFUL value");
1993         static_assert((int)GOLDFISH_PIPE_CLOSE_REBOOT == (int)PIPE_CLOSE_REBOOT,
1994                       "Invalid PIPE_CLOSE_REBOOT value");
1995         static_assert((int)GOLDFISH_PIPE_CLOSE_LOAD_SNAPSHOT == (int)PIPE_CLOSE_LOAD_SNAPSHOT,
1996                       "Invalid PIPE_CLOSE_LOAD_SNAPSHOT value");
1997         static_assert((int)GOLDFISH_PIPE_CLOSE_ERROR == (int)PIPE_CLOSE_ERROR,
1998                       "Invalid PIPE_CLOSE_ERROR value");
1999 
2000         android_pipe_guest_close(hostPipe, static_cast<PipeCloseReason>(reason));
2001     },
2002     // guest_pre_load()
__anon88f521de0b02() 2003     [](QEMUFile* file) { (void)file; },
2004     // guest_post_load()
__anon88f521de0c02() 2005     [](QEMUFile* file) { (void)file; },
2006     // guest_pre_save()
__anon88f521de0d02() 2007     [](QEMUFile* file) { (void)file; },
2008     // guest_post_save()
__anon88f521de0e02() 2009     [](QEMUFile* file) { (void)file; },
2010     // guest_load()
__anon88f521de0f02() 2011     [](QEMUFile* file, GoldfishHwPipe* hwPipe, char* force_close) -> GoldfishHostPipe* {
2012         (void)file;
2013         (void)hwPipe;
2014         (void)force_close;
2015         return nullptr;
2016     },
2017     // guest_save()
__anon88f521de1002() 2018     [](GoldfishHostPipe* hostPipe, QEMUFile* file) {
2019         (void)hostPipe;
2020         (void)file;
2021     },
2022     // guest_poll()
__anon88f521de1102() 2023     [](GoldfishHostPipe* hostPipe) {
2024         static_assert((int)GOLDFISH_PIPE_POLL_IN == (int)PIPE_POLL_IN, "invalid POLL_IN values");
2025         static_assert((int)GOLDFISH_PIPE_POLL_OUT == (int)PIPE_POLL_OUT, "invalid POLL_OUT values");
2026         static_assert((int)GOLDFISH_PIPE_POLL_HUP == (int)PIPE_POLL_HUP, "invalid POLL_HUP values");
2027 
2028         return static_cast<GoldfishPipePollFlags>(android_pipe_guest_poll(hostPipe));
2029     },
2030     // guest_recv()
__anon88f521de1202() 2031     [](GoldfishHostPipe* hostPipe, GoldfishPipeBuffer* buffers, int numBuffers) -> int {
2032         // NOTE: Assumes that AndroidPipeBuffer and GoldfishPipeBuffer
2033         //       have exactly the same layout.
2034         static_assert(sizeof(AndroidPipeBuffer) == sizeof(GoldfishPipeBuffer),
2035                       "Invalid PipeBuffer sizes");
2036     // We can't use a static_assert with offsetof() because in msvc, it uses
2037     // reinterpret_cast.
2038     // TODO: Add runtime assertion instead?
2039     // https://developercommunity.visualstudio.com/content/problem/22196/static-assert-cannot-compile-constexprs-method-tha.html
2040 #ifndef _MSC_VER
2041         static_assert(offsetof(AndroidPipeBuffer, data) == offsetof(GoldfishPipeBuffer, data),
2042                       "Invalid PipeBuffer::data offsets");
2043         static_assert(offsetof(AndroidPipeBuffer, size) == offsetof(GoldfishPipeBuffer, size),
2044                       "Invalid PipeBuffer::size offsets");
2045 #endif
2046         return android_pipe_guest_recv(hostPipe, reinterpret_cast<AndroidPipeBuffer*>(buffers),
2047                                        numBuffers);
2048     },
2049     // wait_guest_recv()
__anon88f521de1302() 2050     [](GoldfishHostPipe* hostPipe) {
2051         android_pipe_wait_guest_recv(hostPipe);
2052     },
2053     // guest_send()
__anon88f521de1402() 2054     [](GoldfishHostPipe** hostPipe, const GoldfishPipeBuffer* buffers, int numBuffers) -> int {
2055         return android_pipe_guest_send(reinterpret_cast<void**>(hostPipe),
2056                                        reinterpret_cast<const AndroidPipeBuffer*>(buffers),
2057                                        numBuffers);
2058     },
2059     // wait_guest_send()
__anon88f521de1502() 2060     [](GoldfishHostPipe* hostPipe) {
2061         android_pipe_wait_guest_send(hostPipe);
2062     },
2063     // guest_wake_on()
__anon88f521de1602() 2064     [](GoldfishHostPipe* hostPipe, GoldfishPipeWakeFlags wakeFlags) {
2065         android_pipe_guest_wake_on(hostPipe, static_cast<int>(wakeFlags));
2066     },
2067     // dma_add_buffer()
__anon88f521de1702() 2068     [](void* pipe, uint64_t paddr, uint64_t sz) {
2069         // not considered for virtio
2070     },
2071     // dma_remove_buffer()
__anon88f521de1802() 2072     [](uint64_t paddr) {
2073         // not considered for virtio
2074     },
2075     // dma_invalidate_host_mappings()
__anon88f521de1902() 2076     []() {
2077         // not considered for virtio
2078     },
2079     // dma_reset_host_mappings()
__anon88f521de1a02() 2080     []() {
2081         // not considered for virtio
2082     },
2083     // dma_save_mappings()
__anon88f521de1b02() 2084     [](QEMUFile* file) { (void)file; },
2085     // dma_load_mappings()
__anon88f521de1c02() 2086     [](QEMUFile* file) { (void)file; },
2087 };
2088 
stream_renderer_opengles_init(uint32_t display_width,uint32_t display_height,int renderer_flags)2089 static int stream_renderer_opengles_init(uint32_t display_width, uint32_t display_height,
2090                                          int renderer_flags) {
2091     stream_renderer_info("start. display dimensions: width %u height %u, renderer flags: 0x%x",
2092                          display_width, display_height, renderer_flags);
2093 
2094     // Flags processing
2095 
2096     // TODO: hook up "gfxstream egl" to the renderer flags
2097     // STREAM_RENDERER_FLAGS_USE_EGL_BIT in crosvm
2098     // as it's specified from launch_cvd.
2099     // At the moment, use ANDROID_GFXSTREAM_EGL=1
2100     // For test on GCE
2101     if (android::base::getEnvironmentVariable("ANDROID_GFXSTREAM_EGL") == "1") {
2102         android::base::setEnvironmentVariable("ANDROID_EGL_ON_EGL", "1");
2103         android::base::setEnvironmentVariable("ANDROID_EMUGL_LOG_PRINT", "1");
2104         android::base::setEnvironmentVariable("ANDROID_EMUGL_VERBOSE", "1");
2105     }
2106     // end for test on GCE
2107 
2108     android::base::setEnvironmentVariable("ANDROID_EMU_HEADLESS", "1");
2109     bool enableVk = (renderer_flags & STREAM_RENDERER_FLAGS_USE_VK_BIT);
2110     bool enableGles = (renderer_flags & STREAM_RENDERER_FLAGS_USE_GLES_BIT);
2111     bool enableVkSnapshot = (renderer_flags & STREAM_RENDERER_FLAGS_VULKAN_SNAPSHOTS);
2112 
2113     bool egl2eglByEnv = android::base::getEnvironmentVariable("ANDROID_EGL_ON_EGL") == "1";
2114     bool egl2eglByFlag = renderer_flags & STREAM_RENDERER_FLAGS_USE_EGL_BIT;
2115     bool enable_egl2egl = egl2eglByFlag || egl2eglByEnv;
2116     if (enable_egl2egl) {
2117         android::base::setEnvironmentVariable("ANDROID_GFXSTREAM_EGL", "1");
2118         android::base::setEnvironmentVariable("ANDROID_EGL_ON_EGL", "1");
2119     }
2120 
2121     bool surfaceless = renderer_flags & STREAM_RENDERER_FLAGS_USE_SURFACELESS_BIT;
2122     bool enableGlEs31Flag = enableGles;
2123     bool useExternalBlob = renderer_flags & STREAM_RENDERER_FLAGS_USE_EXTERNAL_BLOB;
2124     bool useSystemBlob = renderer_flags & STREAM_RENDERER_FLAGS_USE_SYSTEM_BLOB;
2125     bool guestUsesAngle = enableVk && !enableGles;
2126     bool useVulkanNativeSwapchain =
2127         renderer_flags & STREAM_RENDERER_FLAGS_VULKAN_NATIVE_SWAPCHAIN_BIT;
2128 
2129     stream_renderer_info("GLES enabled? %d", enableGles);
2130     stream_renderer_info("Vulkan enabled? %d", enableVk);
2131     stream_renderer_info("egl2egl enabled? %d", enable_egl2egl);
2132     stream_renderer_info("surfaceless? %d", surfaceless);
2133     stream_renderer_info("OpenGL ES 3.1 enabled? %d", enableGlEs31Flag);
2134     stream_renderer_info("use external blob? %d", useExternalBlob);
2135     stream_renderer_info("use system blob? %d", useSystemBlob);
2136     stream_renderer_info("guest using ANGLE? %d", guestUsesAngle);
2137     stream_renderer_info("use Vulkan native swapchain on the host? %d", useVulkanNativeSwapchain);
2138 
2139     if (useSystemBlob) {
2140         if (!useExternalBlob) {
2141             stream_renderer_info("USE_EXTERNAL_BLOB must be on with USE_SYSTEM_BLOB");
2142             return -EINVAL;
2143         }
2144 
2145 #ifndef _WIN32
2146         stream_renderer_info("Warning: USE_SYSTEM_BLOB has only been tested on Windows");
2147 #endif
2148     }
2149 
2150     feature_set_enabled_override(kFeature_GLPipeChecksum, false);
2151     feature_set_enabled_override(kFeature_GLESDynamicVersion, true);
2152     feature_set_enabled_override(kFeature_PlayStoreImage, !enableGlEs31Flag);
2153     feature_set_enabled_override(kFeature_GLDMA, false);
2154     feature_set_enabled_override(kFeature_GLAsyncSwap, false);
2155     feature_set_enabled_override(kFeature_RefCountPipe, false);
2156     feature_set_enabled_override(kFeature_NoDelayCloseColorBuffer, true);
2157     feature_set_enabled_override(kFeature_NativeTextureDecompression, false);
2158     feature_set_enabled_override(kFeature_GLDirectMem, false);
2159     feature_set_enabled_override(kFeature_Vulkan, enableVk);
2160     feature_set_enabled_override(kFeature_VulkanSnapshots, enableVkSnapshot);
2161     feature_set_enabled_override(kFeature_VulkanNullOptionalStrings, true);
2162     feature_set_enabled_override(kFeature_VulkanShaderFloat16Int8, true);
2163     feature_set_enabled_override(kFeature_HostComposition, true);
2164     feature_set_enabled_override(kFeature_VulkanIgnoredHandles, true);
2165     feature_set_enabled_override(kFeature_VirtioGpuNext, true);
2166     feature_set_enabled_override(kFeature_VirtioGpuNativeSync, true);
2167     feature_set_enabled_override(kFeature_GuestUsesAngle, guestUsesAngle);
2168     feature_set_enabled_override(kFeature_VulkanQueueSubmitWithCommands, true);
2169     feature_set_enabled_override(kFeature_VulkanNativeSwapchain, useVulkanNativeSwapchain);
2170     feature_set_enabled_override(kFeature_VulkanBatchedDescriptorSetUpdate, true);
2171     feature_set_enabled_override(kFeature_VirtioGpuFenceContexts, true);
2172     feature_set_enabled_override(kFeature_ExternalBlob, useExternalBlob);
2173     feature_set_enabled_override(kFeature_SystemBlob, useSystemBlob);
2174 
2175     android::featurecontrol::productFeatureOverride();
2176 
2177     if (useVulkanNativeSwapchain && !enableVk) {
2178         stream_renderer_error("can't enable vulkan native swapchain, Vulkan is disabled");
2179         return -EINVAL;
2180     }
2181 
2182     gfxstream::vk::vkDispatch(false /* don't use test ICD */);
2183 
2184     auto androidHw = aemu_get_android_hw();
2185 
2186     androidHw->hw_gltransport_asg_writeBufferSize = 1048576;
2187     androidHw->hw_gltransport_asg_writeStepSize = 262144;
2188     androidHw->hw_gltransport_asg_dataRingSize = 524288;
2189     androidHw->hw_gltransport_drawFlushInterval = 10000;
2190 
2191     EmuglConfig config;
2192 
2193     // Make all the console agents available.
2194     android::emulation::injectGraphicsAgents(android::emulation::GfxStreamGraphicsAgentFactory());
2195 
2196     emuglConfig_init(&config, true /* gpu enabled */, "auto",
2197                      enable_egl2egl ? "swiftshader_indirect" : "host", 64, /* bitness */
2198                      surfaceless,                                          /* no window */
2199                      false,                                                /* blocklisted */
2200                      false,                                                /* has guest renderer */
2201                      WINSYS_GLESBACKEND_PREFERENCE_AUTO, true /* force host gpu vulkan */);
2202 
2203     emuglConfig_setupEnv(&config);
2204 
2205     android_prepareOpenglesEmulation();
2206 
2207     {
2208         static gfxstream::RenderLibPtr renderLibPtr = gfxstream::initLibrary();
2209         android_setOpenglesEmulation(renderLibPtr.get(), nullptr, nullptr);
2210     }
2211 
2212     int maj;
2213     int min;
2214     android_startOpenglesRenderer(display_width, display_height, 1, 28, getGraphicsAgents()->vm,
2215                                   getGraphicsAgents()->emu, getGraphicsAgents()->multi_display,
2216                                   &maj, &min);
2217 
2218     char* vendor = nullptr;
2219     char* renderer = nullptr;
2220     char* version = nullptr;
2221 
2222     android_getOpenglesHardwareStrings(&vendor, &renderer, &version);
2223 
2224     stream_renderer_info("GL strings; [%s] [%s] [%s].\n", vendor, renderer, version);
2225 
2226     auto openglesRenderer = android_getOpenglesRenderer();
2227 
2228     if (!openglesRenderer) {
2229         stream_renderer_error("No renderer started, fatal");
2230         return -EINVAL;
2231     }
2232 
2233     address_space_set_vm_operations(getGraphicsAgents()->vm);
2234     android_init_opengles_pipe();
2235     android_opengles_pipe_set_recv_mode(2 /* virtio-gpu */);
2236     android_init_refcount_pipe();
2237 
2238     return 0;
2239 }
2240 
stream_renderer_init(struct stream_renderer_param * stream_renderer_params,uint64_t num_params)2241 VG_EXPORT int stream_renderer_init(struct stream_renderer_param* stream_renderer_params,
2242                                    uint64_t num_params) {
2243     // Required parameters.
2244     std::unordered_set<uint64_t> required_params{STREAM_RENDERER_PARAM_USER_DATA,
2245                                                  STREAM_RENDERER_PARAM_RENDERER_FLAGS,
2246                                                  STREAM_RENDERER_PARAM_FENCE_CALLBACK};
2247 
2248     // String names of the parameters.
2249     std::unordered_map<uint64_t, std::string> param_strings{
2250         {STREAM_RENDERER_PARAM_USER_DATA, "USER_DATA"},
2251         {STREAM_RENDERER_PARAM_RENDERER_FLAGS, "RENDERER_FLAGS"},
2252         {STREAM_RENDERER_PARAM_FENCE_CALLBACK, "FENCE_CALLBACK"},
2253         {STREAM_RENDERER_PARAM_WIN0_WIDTH, "WIN0_WIDTH"},
2254         {STREAM_RENDERER_PARAM_WIN0_HEIGHT, "WIN0_HEIGHT"},
2255         {STREAM_RENDERER_PARAM_DEBUG_CALLBACK, "DEBUG_CALLBACK"},
2256         {STREAM_RENDERER_SKIP_OPENGLES_INIT, "SKIP_OPENGLES_INIT"},
2257         {STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_INSTANT_EVENT,
2258          "METRICS_CALLBACK_ADD_INSTANT_EVENT"},
2259         {STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_INSTANT_EVENT_WITH_DESCRIPTOR,
2260          "METRICS_CALLBACK_ADD_INSTANT_EVENT_WITH_DESCRIPTOR"},
2261         {STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_INSTANT_EVENT_WITH_METRIC,
2262          "METRICS_CALLBACK_ADD_INSTANT_EVENT_WITH_METRIC"},
2263         {STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_VULKAN_OUT_OF_MEMORY_EVENT,
2264          "METRICS_CALLBACK_ADD_VULKAN_OUT_OF_MEMORY_EVENT"},
2265         {STREAM_RENDERER_PARAM_METRICS_CALLBACK_SET_ANNOTATION, "METRICS_CALLBACK_SET_ANNOTATION"},
2266         {STREAM_RENDERER_PARAM_METRICS_CALLBACK_ABORT, "METRICS_CALLBACK_ABORT"}};
2267 
2268     // Print full values for these parameters:
2269     // Values here must not be pointers (e.g. callback functions), to avoid potentially identifying
2270     // someone via ASLR. Pointers in ASLR are randomized on boot, which means pointers may be
2271     // different between users but similar across a single user's sessions.
2272     // As a convenience, any value <= 4096 is also printed, to catch small or null pointer errors.
2273     std::unordered_set<uint64_t> printed_param_values{STREAM_RENDERER_PARAM_RENDERER_FLAGS,
2274                                                       STREAM_RENDERER_PARAM_WIN0_WIDTH,
2275                                                       STREAM_RENDERER_PARAM_WIN0_HEIGHT};
2276 
2277     // We may have unknown parameters, so this function is lenient.
2278     auto get_param_string = [&](uint64_t key) -> std::string {
2279         auto param_string = param_strings.find(key);
2280         if (param_string != param_strings.end()) {
2281             return param_string->second;
2282         } else {
2283             return "Unknown param with key=" + std::to_string(key);
2284         }
2285     };
2286 
2287     // Initialization data.
2288     uint32_t display_width = 0;
2289     uint32_t display_height = 0;
2290     void* renderer_cookie = nullptr;
2291     int renderer_flags = 0;
2292     stream_renderer_fence_callback fence_callback = nullptr;
2293     bool skip_opengles = false;
2294 
2295     // Iterate all parameters that we support.
2296     stream_renderer_info("Reading stream renderer parameters:");
2297     for (uint64_t i = 0; i < num_params; ++i) {
2298         stream_renderer_param& param = stream_renderer_params[i];
2299 
2300         // Print out parameter we are processing. See comment above `printed_param_values` before
2301         // adding new prints.
2302         if (printed_param_values.find(param.key) != printed_param_values.end() ||
2303             param.value <= 4096) {
2304             stream_renderer_info("%s - %llu", get_param_string(param.key).c_str(),
2305                                  static_cast<unsigned long long>(param.value));
2306         } else {
2307             // If not full value, print that it was passed.
2308             stream_renderer_info("%s", get_param_string(param.key).c_str());
2309         }
2310 
2311         // Removing every param we process will leave required_params empty if all provided.
2312         required_params.erase(param.key);
2313 
2314         switch (param.key) {
2315             case STREAM_RENDERER_PARAM_USER_DATA: {
2316                 renderer_cookie = reinterpret_cast<void*>(static_cast<uintptr_t>(param.value));
2317                 globalUserData = renderer_cookie;
2318                 break;
2319             }
2320             case STREAM_RENDERER_PARAM_RENDERER_FLAGS: {
2321                 renderer_flags = static_cast<int>(param.value);
2322                 break;
2323             }
2324             case STREAM_RENDERER_PARAM_FENCE_CALLBACK: {
2325                 fence_callback = reinterpret_cast<stream_renderer_fence_callback>(
2326                     static_cast<uintptr_t>(param.value));
2327                 break;
2328             }
2329             case STREAM_RENDERER_PARAM_WIN0_WIDTH: {
2330                 display_width = static_cast<uint32_t>(param.value);
2331                 break;
2332             }
2333             case STREAM_RENDERER_PARAM_WIN0_HEIGHT: {
2334                 display_height = static_cast<uint32_t>(param.value);
2335                 break;
2336             }
2337             case STREAM_RENDERER_PARAM_DEBUG_CALLBACK: {
2338                 globalDebugCallback = reinterpret_cast<stream_renderer_debug_callback>(
2339                     static_cast<uintptr_t>(param.value));
2340                 break;
2341             }
2342             case STREAM_RENDERER_SKIP_OPENGLES_INIT: {
2343                 skip_opengles = static_cast<bool>(param.value);
2344                 break;
2345             }
2346             case STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_INSTANT_EVENT: {
2347                 MetricsLogger::add_instant_event_callback =
2348                     reinterpret_cast<stream_renderer_param_metrics_callback_add_instant_event>(
2349                         static_cast<uintptr_t>(param.value));
2350                 break;
2351             }
2352             case STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_INSTANT_EVENT_WITH_DESCRIPTOR: {
2353                 MetricsLogger::add_instant_event_with_descriptor_callback = reinterpret_cast<
2354                     stream_renderer_param_metrics_callback_add_instant_event_with_descriptor>(
2355                     static_cast<uintptr_t>(param.value));
2356                 break;
2357             }
2358             case STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_INSTANT_EVENT_WITH_METRIC: {
2359                 MetricsLogger::add_instant_event_with_metric_callback = reinterpret_cast<
2360                     stream_renderer_param_metrics_callback_add_instant_event_with_metric>(
2361                     static_cast<uintptr_t>(param.value));
2362                 break;
2363             }
2364             case STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_VULKAN_OUT_OF_MEMORY_EVENT: {
2365                 MetricsLogger::add_vulkan_out_of_memory_event = reinterpret_cast<
2366                     stream_renderer_param_metrics_callback_add_vulkan_out_of_memory_event>(
2367                     static_cast<uintptr_t>(param.value));
2368                 break;
2369             }
2370             case STREAM_RENDERER_PARAM_METRICS_CALLBACK_SET_ANNOTATION: {
2371                 MetricsLogger::set_crash_annotation_callback =
2372                     reinterpret_cast<stream_renderer_param_metrics_callback_set_annotation>(
2373                         static_cast<uintptr_t>(param.value));
2374                 break;
2375             }
2376             case STREAM_RENDERER_PARAM_METRICS_CALLBACK_ABORT: {
2377                 emugl::setDieFunction(
2378                     reinterpret_cast<stream_renderer_param_metrics_callback_abort>(
2379                         static_cast<uintptr_t>(param.value)));
2380                 break;
2381             }
2382             default: {
2383                 // We skip any parameters we don't recognize.
2384                 stream_renderer_error(
2385                     "Skipping unknown parameter key: %llu. May need to upgrade gfxstream.",
2386                     static_cast<unsigned long long>(param.key));
2387                 break;
2388             }
2389         }
2390     }
2391     stream_renderer_info("Finished reading parameters");
2392 
2393     // Some required params not found.
2394     if (required_params.size() > 0) {
2395         stream_renderer_error("Missing required parameters:");
2396         for (uint64_t param : required_params) {
2397             stream_renderer_error("%s", get_param_string(param).c_str());
2398         }
2399         stream_renderer_error("Failing initialization intentionally");
2400         return -EINVAL;
2401     }
2402 
2403     // Set non product-specific callbacks
2404     gfxstream::vk::vk_util::setVkCheckCallbacks(
2405         std::make_unique<gfxstream::vk::vk_util::VkCheckCallbacks>(
2406             gfxstream::vk::vk_util::VkCheckCallbacks{
2407                 .onVkErrorOutOfMemory =
2408                     [](VkResult result, const char* function, int line) {
2409                         auto fb = gfxstream::FrameBuffer::getFB();
2410                         if (!fb) {
2411                             stream_renderer_error(
2412                                 "FrameBuffer not yet initialized. Dropping out of memory event");
2413                             return;
2414                         }
2415                         fb->logVulkanOutOfMemory(result, function, line);
2416                     },
2417                 .onVkErrorOutOfMemoryOnAllocation =
2418                     [](VkResult result, const char* function, int line,
2419                        std::optional<uint64_t> allocationSize) {
2420                         auto fb = gfxstream::FrameBuffer::getFB();
2421                         if (!fb) {
2422                             stream_renderer_error(
2423                                 "FrameBuffer not yet initialized. Dropping out of memory event");
2424                             return;
2425                         }
2426                         fb->logVulkanOutOfMemory(result, function, line, allocationSize);
2427                     }}));
2428 
2429     if (!skip_opengles) {
2430         // aemu currently does its own opengles initialization in
2431         // qemu/android/android-emu/android/opengles.cpp.
2432         int ret = stream_renderer_opengles_init(display_width, display_height, renderer_flags);
2433         if (ret) {
2434             return ret;
2435         }
2436     }
2437 
2438     sRenderer()->init(renderer_cookie, renderer_flags, fence_callback);
2439     gfxstream::FrameBuffer::waitUntilInitialized();
2440 
2441     stream_renderer_info("Started renderer");
2442     return 0;
2443 }
2444 
gfxstream_backend_setup_window(void * native_window_handle,int32_t window_x,int32_t window_y,int32_t window_width,int32_t window_height,int32_t fb_width,int32_t fb_height)2445 VG_EXPORT void gfxstream_backend_setup_window(void* native_window_handle, int32_t window_x,
2446                                               int32_t window_y, int32_t window_width,
2447                                               int32_t window_height, int32_t fb_width,
2448                                               int32_t fb_height) {
2449     android_showOpenglesWindow(native_window_handle, window_x, window_y, window_width,
2450                                window_height, fb_width, fb_height, 1.0f, 0, false, false);
2451 }
2452 
stream_renderer_teardown()2453 VG_EXPORT void stream_renderer_teardown() {
2454     android_finishOpenglesRenderer();
2455     android_hideOpenglesWindow();
2456     android_stopOpenglesRenderer(true);
2457 }
2458 
gfxstream_backend_set_screen_mask(int width,int height,const unsigned char * rgbaData)2459 VG_EXPORT void gfxstream_backend_set_screen_mask(int width, int height,
2460                                                  const unsigned char* rgbaData) {
2461     android_setOpenglesScreenMask(width, height, rgbaData);
2462 }
2463 
goldfish_pipe_get_service_ops()2464 const GoldfishPipeServiceOps* goldfish_pipe_get_service_ops() { return &goldfish_pipe_service_ops; }
2465 
2466 static_assert(sizeof(struct stream_renderer_device_id) == 32,
2467               "stream_renderer_device_id must be 32 bytes");
2468 static_assert(offsetof(struct stream_renderer_device_id, device_uuid) == 0,
2469               "stream_renderer_device_id.device_uuid must be at offset 0");
2470 static_assert(offsetof(struct stream_renderer_device_id, driver_uuid) == 16,
2471               "stream_renderer_device_id.driver_uuid must be at offset 16");
2472 
2473 static_assert(sizeof(struct stream_renderer_vulkan_info) == 36,
2474               "stream_renderer_vulkan_info must be 36 bytes");
2475 static_assert(offsetof(struct stream_renderer_vulkan_info, memory_index) == 0,
2476               "stream_renderer_vulkan_info.memory_index must be at offset 0");
2477 static_assert(offsetof(struct stream_renderer_vulkan_info, device_id) == 4,
2478               "stream_renderer_vulkan_info.device_id must be at offset 4");
2479 
2480 static_assert(sizeof(struct stream_renderer_param_host_visible_memory_mask_entry) == 36,
2481               "stream_renderer_param_host_visible_memory_mask_entry must be 36 bytes");
2482 static_assert(offsetof(struct stream_renderer_param_host_visible_memory_mask_entry, device_id) == 0,
2483               "stream_renderer_param_host_visible_memory_mask_entry.device_id must be at offset 0");
2484 static_assert(
2485     offsetof(struct stream_renderer_param_host_visible_memory_mask_entry, memory_type_mask) == 32,
2486     "stream_renderer_param_host_visible_memory_mask_entry.memory_type_mask must be at offset 32");
2487 
2488 static_assert(sizeof(struct stream_renderer_param_host_visible_memory_mask) == 16,
2489               "stream_renderer_param_host_visible_memory_mask must be 16 bytes");
2490 static_assert(offsetof(struct stream_renderer_param_host_visible_memory_mask, entries) == 0,
2491               "stream_renderer_param_host_visible_memory_mask.entries must be at offset 0");
2492 static_assert(offsetof(struct stream_renderer_param_host_visible_memory_mask, num_entries) == 8,
2493               "stream_renderer_param_host_visible_memory_mask.num_entries must be at offset 8");
2494 
2495 static_assert(sizeof(struct stream_renderer_param) == 16, "stream_renderer_param must be 16 bytes");
2496 static_assert(offsetof(struct stream_renderer_param, key) == 0,
2497               "stream_renderer_param.key must be at offset 0");
2498 static_assert(offsetof(struct stream_renderer_param, value) == 8,
2499               "stream_renderer_param.value must be at offset 8");
2500 
2501 #ifdef CONFIG_AEMU
2502 
stream_renderer_set_service_ops(const GoldfishPipeServiceOps * ops)2503 VG_EXPORT void stream_renderer_set_service_ops(const GoldfishPipeServiceOps* ops) {
2504     sRenderer()->setServiceOps(ops);
2505 }
2506 
2507 #endif  // CONFIG_AEMU
2508 
2509 }  // extern "C"
2510