1 // Copyright 2019 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #include <vulkan/vulkan.h>
15
16 #include <cstdarg>
17 #include <cstdio>
18 #include <deque>
19 #include <type_traits>
20 #include <unordered_map>
21 #include <variant>
22
23 #include "BlobManager.h"
24 #include "FrameBuffer.h"
25 #include "GfxStreamAgents.h"
26 #include "VirtioGpuTimelines.h"
27 #include "VkCommonOperations.h"
28 #include "aemu/base/AlignedBuf.h"
29 #include "aemu/base/ManagedDescriptor.hpp"
30 #include "aemu/base/Metrics.h"
31 #include "aemu/base/Tracing.h"
32 #include "aemu/base/memory/SharedMemory.h"
33 #include "aemu/base/synchronization/Lock.h"
34 #include "aemu/base/threads/WorkerThread.h"
35 #include "gfxstream/Strings.h"
36 #include "gfxstream/host/Features.h"
37 #include "host-common/AddressSpaceService.h"
38 #include "host-common/GfxstreamFatalError.h"
39 #include "host-common/address_space_device.h"
40 #include "host-common/android_pipe_common.h"
41 #include "host-common/android_pipe_device.h"
42 #include "host-common/feature_control.h"
43 #include "host-common/globals.h"
44 #include "host-common/opengles-pipe.h"
45 #include "host-common/opengles.h"
46 #include "host-common/refcount-pipe.h"
47 #include "host-common/vm_operations.h"
48 #include "virgl_hw.h"
49 #include "virtgpu_gfxstream_protocol.h"
50 #include "vk_util.h"
51
52 #ifdef GFXSTREAM_ENABLE_HOST_VK_SNAPSHOT
53 #include "aemu/base/files/StdioStream.h"
54 #endif
55
56 extern "C" {
57 #include "drm_fourcc.h"
58 #include "gfxstream/virtio-gpu-gfxstream-renderer-unstable.h"
59 #include "gfxstream/virtio-gpu-gfxstream-renderer.h"
60 #include "host-common/goldfish_pipe.h"
61 #include "virgl_hw.h"
62 } // extern "C"
63
64 #if defined(_WIN32)
65 struct iovec {
66 void* iov_base; /* Starting address */
67 size_t iov_len; /* Length in bytes */
68 };
69 #else
70 #include <unistd.h>
71 #endif // _WIN32
72
73 #define MAX_DEBUG_BUFFER_SIZE 512
74
75 void* globalUserData = nullptr;
76 stream_renderer_debug_callback globalDebugCallback = nullptr;
77
stream_renderer_log(uint32_t type,const char * format,...)78 void stream_renderer_log(uint32_t type, const char* format, ...) {
79 char buf[MAX_DEBUG_BUFFER_SIZE];
80 va_list args;
81 va_start(args, format);
82 vsnprintf(buf, MAX_DEBUG_BUFFER_SIZE, format, args);
83 va_end(args);
84
85 if (globalUserData && globalDebugCallback) {
86 struct stream_renderer_debug debug = {0};
87 debug.debug_type = type;
88 debug.message = &buf[0];
89
90 globalDebugCallback(globalUserData, &debug);
91 } else {
92 fprintf(stderr, "%s\n", buf);
93 }
94 }
95
96 #if STREAM_RENDERER_LOG_LEVEL >= STREAM_RENDERER_DEBUG_ERROR
97 #define stream_renderer_error(format, ...) \
98 do { \
99 stream_renderer_log(STREAM_RENDERER_DEBUG_ERROR, "[%s(%d)] %s " format, __FILE__, \
100 __LINE__, __PRETTY_FUNCTION__, ##__VA_ARGS__); \
101 } while (0)
102 #else
103 #define stream_renderer_error(format, ...)
104 #endif
105
106 #if STREAM_RENDERER_LOG_LEVEL >= STREAM_RENDERER_DEBUG_WARN
107 #define stream_renderer_warn(format, ...) \
108 do { \
109 stream_renderer_log(STREAM_RENDERER_DEBUG_WARN, "[%s(%d)] %s " format, __FILE__, __LINE__, \
110 __PRETTY_FUNCTION__, ##__VA_ARGS__); \
111 } while (0)
112 #else
113 #define stream_renderer_warn(format, ...)
114 #endif
115
116 #if STREAM_RENDERER_LOG_LEVEL >= STREAM_RENDERER_DEBUG_INFO
117 #define stream_renderer_info(format, ...) \
118 do { \
119 stream_renderer_log(STREAM_RENDERER_DEBUG_INFO, "[%s(%d)] %s " format, __FILE__, __LINE__, \
120 __FUNCTION__, ##__VA_ARGS__); \
121 } while (0)
122 #else
123 #define stream_renderer_info(format, ...)
124 #endif
125
126 #if STREAM_RENDERER_LOG_LEVEL >= STREAM_RENDERER_DEBUG_DEBUG
127 #define stream_renderer_debug(format, ...) \
128 do { \
129 stream_renderer_log(STREAM_RENDERER_DEBUG_DEBUG, "[%s(%d)] %s " format, __FILE__, \
130 __LINE__, __PRETTY_FUNCTION__, ##__VA_ARGS__); \
131 } while (0)
132 #else
133 #define stream_renderer_debug(format, ...)
134 #endif
135
136 // Virtio Goldfish Pipe: Overview-----------------------------------------------
137 //
138 // Virtio Goldfish Pipe is meant for running goldfish pipe services with a
139 // stock Linux kernel that is already capable of virtio-gpu. It runs DRM
140 // VIRTGPU ioctls on top of a custom implementation of virglrenderer on the
141 // host side that doesn't (directly) do any rendering, but instead talks to
142 // host-side pipe services.
143 //
144 // This is mainly used for graphics at the moment, though it's possible to run
145 // other pipe services over virtio-gpu as well. virtio-gpu is selected over
146 // other devices primarily because of the existence of an API (virglrenderer)
147 // that is already somewhat separate from virtio-gpu, and not needing to create
148 // a new virtio device to handle goldfish pipe.
149 //
150 // How it works is, existing virglrenderer API are remapped to perform pipe
151 // operations. First of all, pipe operations consist of the following:
152 //
153 // - open() / close(): Starts or stops an instance of a pipe service.
154 //
155 // - write(const void* buf, size_t len) / read(const void* buf, size_t len):
156 // Sends or receives data over the pipe. The first write() is the name of the
157 // pipe service. After the pipe service is determined, the host calls
158 // resetPipe() to replace the host-side pipe instance with an instance of the
159 // pipe service.
160 //
161 // - reset(void* initialPipe, void* actualPipe): the operation that replaces an
162 // initial pipe with an instance of a pipe service.
163 //
164 // Next, here's how the pipe operations map to virglrenderer commands:
165 //
166 // - open() -> virgl_renderer_context_create(),
167 // virgl_renderer_resource_create(),
168 // virgl_renderer_resource_attach_iov()
169 //
170 // The open() corresponds to a guest-side open of a rendernode, which triggers
171 // context creation. Each pipe corresponds 1:1 with a drm virtgpu context id.
172 // We also associate an R8 resource with each pipe as the backing data for
173 // write/read.
174 //
175 // - close() -> virgl_rendrerer_resource_unref(),
176 // virgl_renderer_context_destroy()
177 //
178 // The close() corresponds to undoing the operations of open().
179 //
180 // - write() -> virgl_renderer_transfer_write_iov() OR
181 // virgl_renderer_submit_cmd()
182 //
183 // Pipe write() operation corresponds to performing a TRANSFER_TO_HOST ioctl on
184 // the resource created alongside open(), OR an EXECBUFFER ioctl.
185 //
186 // - read() -> virgl_renderer_transfer_read_iov()
187 //
188 // Pipe read() operation corresponds to performing a TRANSFER_FROM_HOST ioctl on
189 // the resource created alongside open().
190 //
191 // Details on transfer mechanism: mapping 2D transfer to 1D ones----------------
192 //
193 // Resource objects are typically 2D textures, while we're wanting to transmit
194 // 1D buffers to the pipe services on the host. DRM VIRTGPU uses the concept
195 // of a 'box' to represent transfers that do not involve an entire resource
196 // object. Each box has a x, y, width and height parameter to define the
197 // extent of the transfer for a 2D texture. In our use case, we only use the x
198 // and width parameters. We've also created the resource with R8 format
199 // (byte-by-byte) with width equal to the total size of the transfer buffer we
200 // want (around 1 MB).
201 //
202 // The resource object itself is currently backed via plain guest RAM, which
203 // can be physically not-contiguous from the guest POV, and therefore
204 // corresponds to a possibly-long list of pointers and sizes (iov) on the host
205 // side. The sync_iov helper function converts convert the list of pointers
206 // to one contiguous buffer on the host (or vice versa), at the cost of a copy.
207 // (TODO: see if we can use host coherent memory to do away with the copy).
208 //
209 // We can see this abstraction in use via the implementation of
210 // transferWriteIov and transferReadIov below, which sync the iovec to/from a
211 // linear buffer if necessary, and then perform a corresponding pip operation
212 // based on the box parameter's x and width values.
213
214 using android::AndroidPipe;
215 using android::base::AutoLock;
216 using android::base::DescriptorType;
217 using android::base::Lock;
218 using android::base::ManagedDescriptor;
219 using android::base::MetricsLogger;
220 using android::base::SharedMemory;
221
222 using emugl::FatalError;
223 using gfxstream::BlobManager;
224 using gfxstream::ManagedDescriptorInfo;
225
226 using VirtioGpuResId = uint32_t;
227
228 static constexpr int kPipeTryAgain = -2;
229
230 struct VirtioGpuCmd {
231 uint32_t op;
232 uint32_t cmdSize;
233 unsigned char buf[0];
234 } __attribute__((packed));
235
236 struct PipeCtxEntry {
237 std::string name;
238 uint32_t capsetId;
239 VirtioGpuCtxId ctxId;
240 GoldfishHostPipe* hostPipe;
241 int fence;
242 uint32_t addressSpaceHandle;
243 bool hasAddressSpaceHandle;
244 std::unordered_map<VirtioGpuResId, uint32_t> addressSpaceHandles;
245 };
246
247 enum class ResType {
248 // Used as a communication channel between the guest and the host
249 // which does not need an allocation on the host GPU.
250 PIPE,
251 // Used as a GPU data buffer.
252 BUFFER,
253 // Used as a GPU texture.
254 COLOR_BUFFER,
255 };
256
257 struct AlignedMemory {
258 void* addr = nullptr;
259
AlignedMemoryAlignedMemory260 AlignedMemory(size_t align, size_t size)
261 : addr(android::aligned_buf_alloc(align, size)) {}
262
~AlignedMemoryAlignedMemory263 ~AlignedMemory() {
264 if (addr != nullptr) {
265 android::aligned_buf_free(addr);
266 }
267 }
268
269 // AlignedMemory is neither copyable nor movable.
270 AlignedMemory(const AlignedMemory& other) = delete;
271 AlignedMemory& operator=(const AlignedMemory& other) = delete;
272 AlignedMemory(AlignedMemory&& other) = delete;
273 AlignedMemory& operator=(AlignedMemory&& other) = delete;
274 };
275
276 // Memory used as a ring buffer for communication between the guest and host.
277 class RingBlob : public std::variant<std::unique_ptr<AlignedMemory>,
278 std::unique_ptr<SharedMemory>> {
279 public:
280 using BaseType = std::variant<std::unique_ptr<AlignedMemory>,
281 std::unique_ptr<SharedMemory>>;
282 // Inherit constructors.
283 using BaseType::BaseType;
284
isExportable() const285 bool isExportable() const {
286 return std::holds_alternative<std::unique_ptr<SharedMemory>>(*this);
287 }
288
releaseHandle()289 SharedMemory::handle_type releaseHandle() {
290 if (!isExportable()) {
291 return SharedMemory::invalidHandle();
292 }
293 return std::get<std::unique_ptr<SharedMemory>>(*this)->releaseHandle();
294 }
295 };
296
297
298 struct PipeResEntry {
299 stream_renderer_resource_create_args args;
300 iovec* iov;
301 uint32_t numIovs;
302 void* linear;
303 size_t linearSize;
304 GoldfishHostPipe* hostPipe;
305 VirtioGpuCtxId ctxId;
306 void* hva;
307 uint64_t hvaSize;
308 uint64_t blobId;
309 uint32_t blobMem;
310 uint32_t blobFlags;
311 uint32_t caching;
312 ResType type;
313 std::shared_ptr<RingBlob> ringBlob;
314 bool externalAddr = false;
315 std::shared_ptr<ManagedDescriptorInfo> descriptorInfo = nullptr;
316 };
317
align_up(uint32_t n,uint32_t a)318 static inline uint32_t align_up(uint32_t n, uint32_t a) { return ((n + a - 1) / a) * a; }
319
align_up_power_of_2(uint32_t n,uint32_t a)320 static inline uint32_t align_up_power_of_2(uint32_t n, uint32_t a) {
321 return (n + (a - 1)) & ~(a - 1);
322 }
323
324 #define VIRGL_FORMAT_NV12 166
325 #define VIRGL_FORMAT_YV12 163
326 #define VIRGL_FORMAT_P010 314
327
328 const uint32_t kGlBgra = 0x80e1;
329 const uint32_t kGlRgba = 0x1908;
330 const uint32_t kGlRgba16f = 0x881A;
331 const uint32_t kGlRgb565 = 0x8d62;
332 const uint32_t kGlRgba1010102 = 0x8059;
333 const uint32_t kGlR8 = 0x8229;
334 const uint32_t kGlR16 = 0x822A;
335 const uint32_t kGlRg8 = 0x822b;
336 const uint32_t kGlLuminance = 0x1909;
337 const uint32_t kGlLuminanceAlpha = 0x190a;
338 const uint32_t kGlUnsignedByte = 0x1401;
339 const uint32_t kGlUnsignedShort565 = 0x8363;
340
341 constexpr uint32_t kFwkFormatGlCompat = 0;
342 constexpr uint32_t kFwkFormatYV12 = 1;
343 // constexpr uint32_t kFwkFormatYUV420888 = 2;
344 constexpr uint32_t kFwkFormatNV12 = 3;
345 constexpr uint32_t kFwkFormatP010 = 4;
346
virgl_format_is_yuv(uint32_t format)347 static inline bool virgl_format_is_yuv(uint32_t format) {
348 switch (format) {
349 case VIRGL_FORMAT_B8G8R8X8_UNORM:
350 case VIRGL_FORMAT_B8G8R8A8_UNORM:
351 case VIRGL_FORMAT_R8G8B8X8_UNORM:
352 case VIRGL_FORMAT_R8G8B8A8_UNORM:
353 case VIRGL_FORMAT_B5G6R5_UNORM:
354 case VIRGL_FORMAT_R8_UNORM:
355 case VIRGL_FORMAT_R16_UNORM:
356 case VIRGL_FORMAT_R16G16B16A16_FLOAT:
357 case VIRGL_FORMAT_R8G8_UNORM:
358 case VIRGL_FORMAT_R10G10B10A2_UNORM:
359 return false;
360 case VIRGL_FORMAT_NV12:
361 case VIRGL_FORMAT_P010:
362 case VIRGL_FORMAT_YV12:
363 return true;
364 default:
365 stream_renderer_error("Unknown virgl format 0x%x", format);
366 return false;
367 }
368 }
369
virgl_format_to_gl(uint32_t virgl_format)370 static inline uint32_t virgl_format_to_gl(uint32_t virgl_format) {
371 switch (virgl_format) {
372 case VIRGL_FORMAT_B8G8R8X8_UNORM:
373 case VIRGL_FORMAT_B8G8R8A8_UNORM:
374 return kGlBgra;
375 case VIRGL_FORMAT_R8G8B8X8_UNORM:
376 case VIRGL_FORMAT_R8G8B8A8_UNORM:
377 return kGlRgba;
378 case VIRGL_FORMAT_B5G6R5_UNORM:
379 return kGlRgb565;
380 case VIRGL_FORMAT_R16_UNORM:
381 return kGlR16;
382 case VIRGL_FORMAT_R16G16B16A16_FLOAT:
383 return kGlRgba16f;
384 case VIRGL_FORMAT_R8_UNORM:
385 return kGlR8;
386 case VIRGL_FORMAT_R8G8_UNORM:
387 return kGlRg8;
388 case VIRGL_FORMAT_NV12:
389 case VIRGL_FORMAT_P010:
390 case VIRGL_FORMAT_YV12:
391 // emulated as RGBA8888
392 return kGlRgba;
393 case VIRGL_FORMAT_R10G10B10A2_UNORM:
394 return kGlRgba1010102;
395 default:
396 return kGlRgba;
397 }
398 }
399
virgl_format_to_fwk_format(uint32_t virgl_format)400 static inline uint32_t virgl_format_to_fwk_format(uint32_t virgl_format) {
401 switch (virgl_format) {
402 case VIRGL_FORMAT_NV12:
403 return kFwkFormatNV12;
404 case VIRGL_FORMAT_P010:
405 return kFwkFormatP010;
406 case VIRGL_FORMAT_YV12:
407 return kFwkFormatYV12;
408 case VIRGL_FORMAT_R8_UNORM:
409 case VIRGL_FORMAT_R16_UNORM:
410 case VIRGL_FORMAT_R16G16B16A16_FLOAT:
411 case VIRGL_FORMAT_R8G8_UNORM:
412 case VIRGL_FORMAT_B8G8R8X8_UNORM:
413 case VIRGL_FORMAT_B8G8R8A8_UNORM:
414 case VIRGL_FORMAT_R8G8B8X8_UNORM:
415 case VIRGL_FORMAT_R8G8B8A8_UNORM:
416 case VIRGL_FORMAT_B5G6R5_UNORM:
417 case VIRGL_FORMAT_R10G10B10A2_UNORM:
418 default: // kFwkFormatGlCompat: No extra conversions needed
419 return kFwkFormatGlCompat;
420 }
421 }
422
gl_format_to_natural_type(uint32_t format)423 static inline uint32_t gl_format_to_natural_type(uint32_t format) {
424 switch (format) {
425 case kGlBgra:
426 case kGlRgba:
427 case kGlLuminance:
428 case kGlLuminanceAlpha:
429 return kGlUnsignedByte;
430 case kGlRgb565:
431 return kGlUnsignedShort565;
432 default:
433 return kGlUnsignedByte;
434 }
435 }
436
virgl_format_to_linear_base(uint32_t format,uint32_t totalWidth,uint32_t totalHeight,uint32_t x,uint32_t y,uint32_t w,uint32_t h)437 static inline size_t virgl_format_to_linear_base(uint32_t format, uint32_t totalWidth,
438 uint32_t totalHeight, uint32_t x, uint32_t y,
439 uint32_t w, uint32_t h) {
440 if (virgl_format_is_yuv(format)) {
441 return 0;
442 } else {
443 uint32_t bpp = 4;
444 switch (format) {
445 case VIRGL_FORMAT_R16G16B16A16_FLOAT:
446 bpp = 8;
447 break;
448 case VIRGL_FORMAT_B8G8R8X8_UNORM:
449 case VIRGL_FORMAT_B8G8R8A8_UNORM:
450 case VIRGL_FORMAT_R8G8B8X8_UNORM:
451 case VIRGL_FORMAT_R8G8B8A8_UNORM:
452 case VIRGL_FORMAT_R10G10B10A2_UNORM:
453 bpp = 4;
454 break;
455 case VIRGL_FORMAT_B5G6R5_UNORM:
456 case VIRGL_FORMAT_R8G8_UNORM:
457 case VIRGL_FORMAT_R16_UNORM:
458 bpp = 2;
459 break;
460 case VIRGL_FORMAT_R8_UNORM:
461 bpp = 1;
462 break;
463 default:
464 stream_renderer_error("Unknown virgl format: 0x%x", format);
465 return 0;
466 }
467
468 uint32_t stride = totalWidth * bpp;
469 return y * stride + x * bpp;
470 }
471 return 0;
472 }
473
virgl_format_to_total_xfer_len(uint32_t format,uint32_t totalWidth,uint32_t totalHeight,uint32_t x,uint32_t y,uint32_t w,uint32_t h)474 static inline size_t virgl_format_to_total_xfer_len(uint32_t format, uint32_t totalWidth,
475 uint32_t totalHeight, uint32_t x, uint32_t y,
476 uint32_t w, uint32_t h) {
477 if (virgl_format_is_yuv(format)) {
478 uint32_t bpp = format == VIRGL_FORMAT_P010 ? 2 : 1;
479
480 uint32_t yWidth = totalWidth;
481 uint32_t yHeight = totalHeight;
482 uint32_t yStridePixels;
483 if (format == VIRGL_FORMAT_NV12) {
484 yStridePixels = yWidth;
485 } else if (format == VIRGL_FORMAT_P010) {
486 yStridePixels = yWidth;
487 } else if (format == VIRGL_FORMAT_YV12) {
488 yStridePixels = align_up_power_of_2(yWidth, 32);
489 } else {
490 stream_renderer_error("Unknown virgl format: 0x%x", format);
491 return 0;
492 }
493 uint32_t yStrideBytes = yStridePixels * bpp;
494 uint32_t ySize = yStrideBytes * yHeight;
495
496 uint32_t uvStridePixels;
497 uint32_t uvPlaneCount;
498 if (format == VIRGL_FORMAT_NV12) {
499 uvStridePixels = yStridePixels;
500 uvPlaneCount = 1;
501 } else if (format == VIRGL_FORMAT_P010) {
502 uvStridePixels = yStridePixels;
503 uvPlaneCount = 1;
504 } else if (format == VIRGL_FORMAT_YV12) {
505 uvStridePixels = yStridePixels / 2;
506 uvPlaneCount = 2;
507 } else {
508 stream_renderer_error("Unknown virgl yuv format: 0x%x", format);
509 return 0;
510 }
511 uint32_t uvStrideBytes = uvStridePixels * bpp;
512 uint32_t uvHeight = totalHeight / 2;
513 uint32_t uvSize = uvStrideBytes * uvHeight * uvPlaneCount;
514
515 uint32_t dataSize = ySize + uvSize;
516 return dataSize;
517 } else {
518 uint32_t bpp = 4;
519 switch (format) {
520 case VIRGL_FORMAT_R16G16B16A16_FLOAT:
521 bpp = 8;
522 break;
523 case VIRGL_FORMAT_B8G8R8X8_UNORM:
524 case VIRGL_FORMAT_B8G8R8A8_UNORM:
525 case VIRGL_FORMAT_R8G8B8X8_UNORM:
526 case VIRGL_FORMAT_R8G8B8A8_UNORM:
527 case VIRGL_FORMAT_R10G10B10A2_UNORM:
528 bpp = 4;
529 break;
530 case VIRGL_FORMAT_B5G6R5_UNORM:
531 case VIRGL_FORMAT_R16_UNORM:
532 case VIRGL_FORMAT_R8G8_UNORM:
533 bpp = 2;
534 break;
535 case VIRGL_FORMAT_R8_UNORM:
536 bpp = 1;
537 break;
538 default:
539 stream_renderer_error("Unknown virgl format: 0x%x", format);
540 return 0;
541 }
542
543 uint32_t stride = totalWidth * bpp;
544 return (h - 1U) * stride + w * bpp;
545 }
546 return 0;
547 }
548
549 enum IovSyncDir {
550 IOV_TO_LINEAR = 0,
551 LINEAR_TO_IOV = 1,
552 };
553
sync_iov(PipeResEntry * res,uint64_t offset,const stream_renderer_box * box,IovSyncDir dir)554 static int sync_iov(PipeResEntry* res, uint64_t offset, const stream_renderer_box* box,
555 IovSyncDir dir) {
556 stream_renderer_debug("offset: 0x%llx box: %u %u %u %u size %u x %u iovs %u linearSize %zu",
557 (unsigned long long)offset, box->x, box->y, box->w, box->h,
558 res->args.width, res->args.height, res->numIovs, res->linearSize);
559
560 if (box->x > res->args.width || box->y > res->args.height) {
561 stream_renderer_error("Box out of range of resource");
562 return -EINVAL;
563 }
564 if (box->w == 0U || box->h == 0U) {
565 stream_renderer_error("Empty transfer");
566 return -EINVAL;
567 }
568 if (box->x + box->w > res->args.width) {
569 stream_renderer_error("Box overflows resource width");
570 return -EINVAL;
571 }
572
573 size_t linearBase = virgl_format_to_linear_base(
574 res->args.format, res->args.width, res->args.height, box->x, box->y, box->w, box->h);
575 size_t start = linearBase;
576 // height - 1 in order to treat the (w * bpp) row specially
577 // (i.e., the last row does not occupy the full stride)
578 size_t length = virgl_format_to_total_xfer_len(
579 res->args.format, res->args.width, res->args.height, box->x, box->y, box->w, box->h);
580 size_t end = start + length;
581
582 if (start == end) {
583 stream_renderer_error("nothing to transfer");
584 return -EINVAL;
585 }
586
587 if (end > res->linearSize) {
588 stream_renderer_error("start + length overflows!");
589 return -EINVAL;
590 }
591
592 uint32_t iovIndex = 0;
593 size_t iovOffset = 0;
594 size_t written = 0;
595 char* linear = static_cast<char*>(res->linear);
596
597 while (written < length) {
598 if (iovIndex >= res->numIovs) {
599 stream_renderer_error("write request overflowed numIovs");
600 return -EINVAL;
601 }
602
603 const char* iovBase_const = static_cast<const char*>(res->iov[iovIndex].iov_base);
604 char* iovBase = static_cast<char*>(res->iov[iovIndex].iov_base);
605 size_t iovLen = res->iov[iovIndex].iov_len;
606 size_t iovOffsetEnd = iovOffset + iovLen;
607
608 auto lower_intersect = std::max(iovOffset, start);
609 auto upper_intersect = std::min(iovOffsetEnd, end);
610 if (lower_intersect < upper_intersect) {
611 size_t toWrite = upper_intersect - lower_intersect;
612 switch (dir) {
613 case IOV_TO_LINEAR:
614 memcpy(linear + lower_intersect, iovBase_const + lower_intersect - iovOffset,
615 toWrite);
616 break;
617 case LINEAR_TO_IOV:
618 memcpy(iovBase + lower_intersect - iovOffset, linear + lower_intersect,
619 toWrite);
620 break;
621 default:
622 stream_renderer_error("Invalid synchronization dir");
623 return -EINVAL;
624 }
625 written += toWrite;
626 }
627 ++iovIndex;
628 iovOffset += iovLen;
629 }
630
631 return 0;
632 }
633
convert32to64(uint32_t lo,uint32_t hi)634 static uint64_t convert32to64(uint32_t lo, uint32_t hi) {
635 return ((uint64_t)lo) | (((uint64_t)hi) << 32);
636 }
637
638 class CleanupThread {
639 public:
640 using GenericCleanup = std::function<void()>;
641
__anond9f1afd20102(CleanupTask task) 642 CleanupThread() : mWorker([](CleanupTask task) {
643 return std::visit([](auto&& work) {
644 using T = std::decay_t<decltype(work)>;
645 if constexpr (std::is_same_v<T, GenericCleanup>) {
646 work();
647 return android::base::WorkerProcessingResult::Continue;
648 } else if constexpr (std::is_same_v<T, Exit>) {
649 return android::base::WorkerProcessingResult::Stop;
650 }
651 }, std::move(task));
652 }) {
653 mWorker.start();
654 }
655
~CleanupThread()656 ~CleanupThread() { stop(); }
657
658 // CleanupThread is neither copyable nor movable.
659 CleanupThread(const CleanupThread& other) = delete;
660 CleanupThread& operator=(const CleanupThread& other) = delete;
661 CleanupThread(CleanupThread&& other) = delete;
662 CleanupThread& operator=(CleanupThread&& other) = delete;
663
enqueueCleanup(GenericCleanup command)664 void enqueueCleanup(GenericCleanup command) {
665 mWorker.enqueue(std::move(command));
666 }
667
stop()668 void stop() {
669 mWorker.enqueue(Exit{});
670 mWorker.join();
671 }
672
673 private:
674 struct Exit {};
675 using CleanupTask = std::variant<GenericCleanup, Exit>;
676 android::base::WorkerThread<CleanupTask> mWorker;
677 };
678
679 class PipeVirglRenderer {
680 public:
681 PipeVirglRenderer() = default;
682
init(void * cookie,gfxstream::host::FeatureSet features,stream_renderer_fence_callback fence_callback)683 int init(void* cookie, gfxstream::host::FeatureSet features, stream_renderer_fence_callback fence_callback) {
684 stream_renderer_debug("cookie: %p", cookie);
685 mCookie = cookie;
686 mFeatures = features;
687 mFenceCallback = fence_callback;
688 mVirtioGpuOps = android_getVirtioGpuOps();
689 if (!mVirtioGpuOps) {
690 stream_renderer_error("Could not get virtio gpu ops!");
691 return -EINVAL;
692 }
693 mAddressSpaceDeviceControlOps = get_address_space_device_control_ops();
694 if (!mAddressSpaceDeviceControlOps) {
695 stream_renderer_error("Could not get address space device control ops!");
696 return -EINVAL;
697 }
698 mVirtioGpuTimelines = VirtioGpuTimelines::create(true);
699 mVirtioGpuTimelines = VirtioGpuTimelines::create(true);
700
701 #if !defined(_WIN32)
702 mPageSize = getpagesize();
703 #endif
704
705 mCleanupThread.reset(new CleanupThread());
706
707 return 0;
708 }
709
teardown()710 void teardown() {
711 mCleanupThread.reset();
712 }
713
resetPipe(GoldfishHwPipe * hwPipe,GoldfishHostPipe * hostPipe)714 int resetPipe(GoldfishHwPipe* hwPipe, GoldfishHostPipe* hostPipe) {
715 stream_renderer_debug("Want to reset hwpipe %p to hostpipe %p", hwPipe, hostPipe);
716 VirtioGpuCtxId asCtxId = (VirtioGpuCtxId)(uintptr_t)hwPipe;
717 auto it = mContexts.find(asCtxId);
718 if (it == mContexts.end()) {
719 stream_renderer_error("fatal: pipe id %u", asCtxId);
720 return -EINVAL;
721 }
722
723 auto& entry = it->second;
724 stream_renderer_debug("ctxid: %u prev hostpipe: %p", asCtxId, entry.hostPipe);
725 entry.hostPipe = hostPipe;
726 stream_renderer_debug("ctxid: %u next hostpipe: %p", asCtxId, entry.hostPipe);
727
728 // Also update any resources associated with it
729 auto resourcesIt = mContextResources.find(asCtxId);
730
731 if (resourcesIt == mContextResources.end()) {
732 return 0;
733 }
734
735 const auto& resIds = resourcesIt->second;
736
737 for (auto resId : resIds) {
738 auto resEntryIt = mResources.find(resId);
739 if (resEntryIt == mResources.end()) {
740 stream_renderer_error("entry with res id %u not found", resId);
741 return -EINVAL;
742 }
743
744 auto& resEntry = resEntryIt->second;
745 resEntry.hostPipe = hostPipe;
746 }
747
748 return 0;
749 }
750
createContext(VirtioGpuCtxId ctx_id,uint32_t nlen,const char * name,uint32_t context_init)751 int createContext(VirtioGpuCtxId ctx_id, uint32_t nlen, const char* name,
752 uint32_t context_init) {
753 std::string contextName(name, nlen);
754
755 stream_renderer_debug("ctxid: %u len: %u name: %s", ctx_id, nlen, contextName.c_str());
756 auto ops = ensureAndGetServiceOps();
757 auto hostPipe = ops->guest_open_with_flags(reinterpret_cast<GoldfishHwPipe*>(ctx_id),
758 0x1 /* is virtio */);
759
760 if (!hostPipe) {
761 stream_renderer_error("failed to create hw pipe!");
762 return -EINVAL;
763 }
764 std::unordered_map<uint32_t, uint32_t> map;
765
766 PipeCtxEntry res = {
767 std::move(contextName), // contextName
768 context_init, // capsetId
769 ctx_id, // ctxId
770 hostPipe, // hostPipe
771 0, // fence
772 0, // AS handle
773 false, // does not have an AS handle
774 map, // resourceId --> ASG handle map
775 };
776
777 stream_renderer_debug("initial host pipe for ctxid %u: %p", ctx_id, hostPipe);
778 mContexts[ctx_id] = res;
779 android_onGuestGraphicsProcessCreate(ctx_id);
780 return 0;
781 }
782
destroyContext(VirtioGpuCtxId handle)783 int destroyContext(VirtioGpuCtxId handle) {
784 stream_renderer_debug("ctxid: %u", handle);
785
786 auto it = mContexts.find(handle);
787 if (it == mContexts.end()) {
788 stream_renderer_error("could not find context handle %u", handle);
789 return -EINVAL;
790 }
791
792 if (it->second.hasAddressSpaceHandle) {
793 for (auto const& [resourceId, handle] : it->second.addressSpaceHandles) {
794 // Note: this can hang as is but this has only been observed to
795 // happen during shutdown. See b/329287602#comment8.
796 mAddressSpaceDeviceControlOps->destroy_handle(handle);
797 }
798 }
799
800 auto hostPipe = it->second.hostPipe;
801 if (!hostPipe) {
802 stream_renderer_error("0 is not a valid hostpipe");
803 return -EINVAL;
804 }
805
806 auto ops = ensureAndGetServiceOps();
807 ops->guest_close(hostPipe, GOLDFISH_PIPE_CLOSE_GRACEFUL);
808
809 android_cleanupProcGLObjects(handle);
810 mContexts.erase(it);
811 return 0;
812 }
813
setContextAddressSpaceHandleLocked(VirtioGpuCtxId ctxId,uint32_t handle,uint32_t resourceId)814 int setContextAddressSpaceHandleLocked(VirtioGpuCtxId ctxId, uint32_t handle,
815 uint32_t resourceId) {
816 auto ctxIt = mContexts.find(ctxId);
817 if (ctxIt == mContexts.end()) {
818 stream_renderer_error("ctx id %u is not found", ctxId);
819 return -EINVAL;
820 }
821
822 auto& ctxEntry = ctxIt->second;
823 ctxEntry.addressSpaceHandle = handle;
824 ctxEntry.hasAddressSpaceHandle = true;
825 ctxEntry.addressSpaceHandles[resourceId] = handle;
826 return 0;
827 }
828
getAddressSpaceHandleLocked(VirtioGpuCtxId ctxId,uint32_t resourceId)829 uint32_t getAddressSpaceHandleLocked(VirtioGpuCtxId ctxId, uint32_t resourceId) {
830 auto ctxIt = mContexts.find(ctxId);
831 if (ctxIt == mContexts.end()) {
832 stream_renderer_error("ctx id %u is not found", ctxId);
833 return -EINVAL;
834 }
835
836 auto& ctxEntry = ctxIt->second;
837
838 if (!ctxEntry.addressSpaceHandles.count(resourceId)) {
839 stream_renderer_error("ASG context with resource id %u", resourceId);
840 return -EINVAL;
841 }
842
843 return ctxEntry.addressSpaceHandles[resourceId];
844 }
845
846 #define DECODE(variable, type, input) \
847 type variable = {}; \
848 memcpy(&variable, input, sizeof(type));
849
addressSpaceProcessCmd(VirtioGpuCtxId ctxId,uint32_t * dwords)850 int addressSpaceProcessCmd(VirtioGpuCtxId ctxId, uint32_t* dwords) {
851 DECODE(header, gfxstream::gfxstreamHeader, dwords)
852
853 switch (header.opCode) {
854 case GFXSTREAM_CONTEXT_CREATE: {
855 DECODE(contextCreate, gfxstream::gfxstreamContextCreate, dwords)
856
857 auto resEntryIt = mResources.find(contextCreate.resourceId);
858 if (resEntryIt == mResources.end()) {
859 stream_renderer_error("ASG coherent resource %u not found",
860 contextCreate.resourceId);
861 return -EINVAL;
862 }
863
864 auto ctxIt = mContexts.find(ctxId);
865 if (ctxIt == mContexts.end()) {
866 stream_renderer_error("ctx id %u not found", ctxId);
867 return -EINVAL;
868 }
869
870 auto& ctxEntry = ctxIt->second;
871 auto& resEntry = resEntryIt->second;
872
873 std::string name = ctxEntry.name + "-" + std::to_string(contextCreate.resourceId);
874
875 // Note: resource ids can not be used as ASG handles because ASGs may outlive the
876 // containing resource due asynchronous ASG destruction.
877 uint32_t handle = mAddressSpaceDeviceControlOps->gen_handle();
878
879 struct AddressSpaceCreateInfo createInfo = {
880 .handle = handle,
881 .type = android::emulation::VirtioGpuGraphics,
882 .createRenderThread = true,
883 .externalAddr = resEntry.hva,
884 .externalAddrSize = resEntry.hvaSize,
885 .virtioGpuContextId = ctxId,
886 .virtioGpuCapsetId = ctxEntry.capsetId,
887 .contextName = name.c_str(),
888 .contextNameSize = static_cast<uint32_t>(ctxEntry.name.size()),
889 };
890
891 mAddressSpaceDeviceControlOps->create_instance(createInfo);
892 if (setContextAddressSpaceHandleLocked(ctxId, handle, contextCreate.resourceId)) {
893 return -EINVAL;
894 }
895 break;
896 }
897 case GFXSTREAM_CONTEXT_PING: {
898 DECODE(contextPing, gfxstream::gfxstreamContextPing, dwords)
899
900 struct android::emulation::AddressSpaceDevicePingInfo ping = {0};
901 ping.metadata = ASG_NOTIFY_AVAILABLE;
902
903 mAddressSpaceDeviceControlOps->ping_at_hva(
904 getAddressSpaceHandleLocked(ctxId, contextPing.resourceId), &ping);
905 break;
906 }
907 default:
908 break;
909 }
910
911 return 0;
912 }
913
submitCmd(struct stream_renderer_command * cmd)914 int submitCmd(struct stream_renderer_command* cmd) {
915 if (!cmd) return -EINVAL;
916
917 void* buffer = reinterpret_cast<void*>(cmd->cmd);
918
919 VirtioGpuRing ring = VirtioGpuRingGlobal{};
920 stream_renderer_debug("ctx: % u, ring: %s buffer: %p dwords: %d", cmd->ctx_id,
921 to_string(ring).c_str(), buffer, cmd->cmd_size);
922
923 if (!buffer) {
924 stream_renderer_error("error: buffer null");
925 return -EINVAL;
926 }
927
928 if (cmd->cmd_size < 4) {
929 stream_renderer_error("error: not enough bytes (got %d)", cmd->cmd_size);
930 return -EINVAL;
931 }
932
933 DECODE(header, gfxstream::gfxstreamHeader, buffer);
934 switch (header.opCode) {
935 case GFXSTREAM_CONTEXT_CREATE:
936 case GFXSTREAM_CONTEXT_PING:
937 case GFXSTREAM_CONTEXT_PING_WITH_RESPONSE:
938 if (addressSpaceProcessCmd(cmd->ctx_id, (uint32_t*)buffer)) {
939 return -EINVAL;
940 }
941 break;
942 case GFXSTREAM_CREATE_EXPORT_SYNC: {
943 DECODE(exportSync, gfxstream::gfxstreamCreateExportSync, buffer)
944
945 uint64_t sync_handle =
946 convert32to64(exportSync.syncHandleLo, exportSync.syncHandleHi);
947
948 stream_renderer_debug("wait for gpu ring %s", to_string(ring).c_str());
949 auto taskId = mVirtioGpuTimelines->enqueueTask(ring);
950 mVirtioGpuOps->async_wait_for_gpu_with_cb(sync_handle, [this, taskId] {
951 mVirtioGpuTimelines->notifyTaskCompletion(taskId);
952 });
953 break;
954 }
955 case GFXSTREAM_CREATE_EXPORT_SYNC_VK:
956 case GFXSTREAM_CREATE_IMPORT_SYNC_VK: {
957 // The guest sync export assumes fence context support and always uses
958 // VIRTGPU_EXECBUF_RING_IDX. With this, the task created here must use
959 // the same ring as the fence created for the virtio gpu command or the
960 // fence may be signaled without properly waiting for the task to complete.
961 ring = VirtioGpuRingContextSpecific{
962 .mCtxId = cmd->ctx_id,
963 .mRingIdx = 0,
964 };
965
966 DECODE(exportSyncVK, gfxstream::gfxstreamCreateExportSyncVK, buffer)
967
968 uint64_t device_handle =
969 convert32to64(exportSyncVK.deviceHandleLo, exportSyncVK.deviceHandleHi);
970
971 uint64_t fence_handle =
972 convert32to64(exportSyncVK.fenceHandleLo, exportSyncVK.fenceHandleHi);
973
974 stream_renderer_debug("wait for gpu ring %s", to_string(ring).c_str());
975 auto taskId = mVirtioGpuTimelines->enqueueTask(ring);
976 mVirtioGpuOps->async_wait_for_gpu_vulkan_with_cb(
977 device_handle, fence_handle,
978 [this, taskId] { mVirtioGpuTimelines->notifyTaskCompletion(taskId); });
979 break;
980 }
981 case GFXSTREAM_CREATE_QSRI_EXPORT_VK: {
982 // The guest QSRI export assumes fence context support and always uses
983 // VIRTGPU_EXECBUF_RING_IDX. With this, the task created here must use
984 // the same ring as the fence created for the virtio gpu command or the
985 // fence may be signaled without properly waiting for the task to complete.
986 ring = VirtioGpuRingContextSpecific{
987 .mCtxId = cmd->ctx_id,
988 .mRingIdx = 0,
989 };
990
991 DECODE(exportQSRI, gfxstream::gfxstreamCreateQSRIExportVK, buffer)
992
993 uint64_t image_handle =
994 convert32to64(exportQSRI.imageHandleLo, exportQSRI.imageHandleHi);
995
996 stream_renderer_debug("wait for gpu vk qsri ring %u image 0x%llx",
997 to_string(ring).c_str(), (unsigned long long)image_handle);
998 auto taskId = mVirtioGpuTimelines->enqueueTask(ring);
999 mVirtioGpuOps->async_wait_for_gpu_vulkan_qsri_with_cb(image_handle, [this, taskId] {
1000 mVirtioGpuTimelines->notifyTaskCompletion(taskId);
1001 });
1002 break;
1003 }
1004 case GFXSTREAM_PLACEHOLDER_COMMAND_VK: {
1005 // Do nothing, this is a placeholder command
1006 break;
1007 }
1008 default:
1009 return -EINVAL;
1010 }
1011
1012 return 0;
1013 }
1014
createFence(uint64_t fence_id,const VirtioGpuRing & ring)1015 int createFence(uint64_t fence_id, const VirtioGpuRing& ring) {
1016 stream_renderer_debug("fenceid: %llu ring: %s", (unsigned long long)fence_id,
1017 to_string(ring).c_str());
1018
1019 struct {
1020 FenceCompletionCallback operator()(const VirtioGpuRingGlobal&) {
1021 return [renderer = mRenderer, fenceId = mFenceId] {
1022 struct stream_renderer_fence fence = {0};
1023 fence.fence_id = fenceId;
1024 fence.flags = STREAM_RENDERER_FLAG_FENCE;
1025 renderer->mFenceCallback(renderer->mCookie, &fence);
1026 };
1027 }
1028 FenceCompletionCallback operator()(const VirtioGpuRingContextSpecific& ring) {
1029 return [renderer = mRenderer, fenceId = mFenceId, ring] {
1030 struct stream_renderer_fence fence = {0};
1031 fence.fence_id = fenceId;
1032 fence.flags = STREAM_RENDERER_FLAG_FENCE | STREAM_RENDERER_FLAG_FENCE_RING_IDX;
1033 fence.ctx_id = ring.mCtxId;
1034 fence.ring_idx = ring.mRingIdx;
1035 renderer->mFenceCallback(renderer->mCookie, &fence);
1036 };
1037 }
1038
1039 PipeVirglRenderer* mRenderer;
1040 VirtioGpuTimelines::FenceId mFenceId;
1041 } visitor{
1042 .mRenderer = this,
1043 .mFenceId = fence_id,
1044 };
1045 FenceCompletionCallback callback = std::visit(visitor, ring);
1046 if (!callback) {
1047 return -EINVAL;
1048 }
1049 mVirtioGpuTimelines->enqueueFence(ring, fence_id, std::move(callback));
1050
1051 return 0;
1052 }
1053
poll()1054 void poll() { mVirtioGpuTimelines->poll(); }
1055
1056 enum pipe_texture_target {
1057 PIPE_BUFFER,
1058 PIPE_TEXTURE_1D,
1059 PIPE_TEXTURE_2D,
1060 PIPE_TEXTURE_3D,
1061 PIPE_TEXTURE_CUBE,
1062 PIPE_TEXTURE_RECT,
1063 PIPE_TEXTURE_1D_ARRAY,
1064 PIPE_TEXTURE_2D_ARRAY,
1065 PIPE_TEXTURE_CUBE_ARRAY,
1066 PIPE_MAX_TEXTURE_TYPES,
1067 };
1068
1069 /**
1070 * * Resource binding flags -- state tracker must specify in advance all
1071 * * the ways a resource might be used.
1072 * */
1073 #define PIPE_BIND_DEPTH_STENCIL (1 << 0) /* create_surface */
1074 #define PIPE_BIND_RENDER_TARGET (1 << 1) /* create_surface */
1075 #define PIPE_BIND_BLENDABLE (1 << 2) /* create_surface */
1076 #define PIPE_BIND_SAMPLER_VIEW (1 << 3) /* create_sampler_view */
1077 #define PIPE_BIND_VERTEX_BUFFER (1 << 4) /* set_vertex_buffers */
1078 #define PIPE_BIND_INDEX_BUFFER (1 << 5) /* draw_elements */
1079 #define PIPE_BIND_CONSTANT_BUFFER (1 << 6) /* set_constant_buffer */
1080 #define PIPE_BIND_DISPLAY_TARGET (1 << 7) /* flush_front_buffer */
1081 /* gap */
1082 #define PIPE_BIND_STREAM_OUTPUT (1 << 10) /* set_stream_output_buffers */
1083 #define PIPE_BIND_CURSOR (1 << 11) /* mouse cursor */
1084 #define PIPE_BIND_CUSTOM (1 << 12) /* state-tracker/winsys usages */
1085 #define PIPE_BIND_GLOBAL (1 << 13) /* set_global_binding */
1086 #define PIPE_BIND_SHADER_BUFFER (1 << 14) /* set_shader_buffers */
1087 #define PIPE_BIND_SHADER_IMAGE (1 << 15) /* set_shader_images */
1088 #define PIPE_BIND_COMPUTE_RESOURCE (1 << 16) /* set_compute_resources */
1089 #define PIPE_BIND_COMMAND_ARGS_BUFFER (1 << 17) /* pipe_draw_info.indirect */
1090 #define PIPE_BIND_QUERY_BUFFER (1 << 18) /* get_query_result_resource */
1091
getResourceType(const struct stream_renderer_resource_create_args & args) const1092 ResType getResourceType(const struct stream_renderer_resource_create_args& args) const {
1093 if (args.target == PIPE_BUFFER) {
1094 return ResType::PIPE;
1095 }
1096
1097 if (args.format != VIRGL_FORMAT_R8_UNORM) {
1098 return ResType::COLOR_BUFFER;
1099 }
1100 if (args.bind & VIRGL_BIND_SAMPLER_VIEW) {
1101 return ResType::COLOR_BUFFER;
1102 }
1103 if (args.bind & VIRGL_BIND_RENDER_TARGET) {
1104 return ResType::COLOR_BUFFER;
1105 }
1106 if (args.bind & VIRGL_BIND_SCANOUT) {
1107 return ResType::COLOR_BUFFER;
1108 }
1109 if (args.bind & VIRGL_BIND_CURSOR) {
1110 return ResType::COLOR_BUFFER;
1111 }
1112 if (!(args.bind & VIRGL_BIND_LINEAR)) {
1113 return ResType::COLOR_BUFFER;
1114 }
1115
1116 return ResType::BUFFER;
1117 }
1118
handleCreateResourceBuffer(struct stream_renderer_resource_create_args * args)1119 void handleCreateResourceBuffer(struct stream_renderer_resource_create_args* args) {
1120 stream_renderer_debug("w:%u h:%u handle:%u", args->handle, args->width, args->height);
1121 mVirtioGpuOps->create_buffer_with_handle(args->width * args->height, args->handle);
1122 }
1123
handleCreateResourceColorBuffer(struct stream_renderer_resource_create_args * args)1124 void handleCreateResourceColorBuffer(struct stream_renderer_resource_create_args* args) {
1125 stream_renderer_debug("w h %u %u resid %u -> CreateColorBufferWithHandle", args->width,
1126 args->height, args->handle);
1127
1128 const uint32_t glformat = virgl_format_to_gl(args->format);
1129 const uint32_t fwkformat = virgl_format_to_fwk_format(args->format);
1130 const bool linear = !!(args->bind & VIRGL_BIND_LINEAR);
1131 mVirtioGpuOps->create_color_buffer_with_handle(args->width, args->height, glformat,
1132 fwkformat, args->handle, linear);
1133 mVirtioGpuOps->set_guest_managed_color_buffer_lifetime(true /* guest manages lifetime */);
1134 mVirtioGpuOps->open_color_buffer(args->handle);
1135 }
1136
createResource(struct stream_renderer_resource_create_args * args,struct iovec * iov,uint32_t num_iovs)1137 int createResource(struct stream_renderer_resource_create_args* args, struct iovec* iov,
1138 uint32_t num_iovs) {
1139 stream_renderer_debug("handle: %u. num iovs: %u", args->handle, num_iovs);
1140
1141 const auto resType = getResourceType(*args);
1142 switch (resType) {
1143 case ResType::PIPE:
1144 break;
1145 case ResType::BUFFER:
1146 handleCreateResourceBuffer(args);
1147 break;
1148 case ResType::COLOR_BUFFER:
1149 handleCreateResourceColorBuffer(args);
1150 break;
1151 }
1152
1153 PipeResEntry e;
1154 e.args = *args;
1155 e.linear = 0;
1156 e.hostPipe = 0;
1157 e.hva = nullptr;
1158 e.hvaSize = 0;
1159 e.blobId = 0;
1160 e.blobMem = 0;
1161 e.type = resType;
1162 allocResource(e, iov, num_iovs);
1163
1164 mResources[args->handle] = e;
1165 return 0;
1166 }
1167
unrefResource(uint32_t toUnrefId)1168 void unrefResource(uint32_t toUnrefId) {
1169 stream_renderer_debug("handle: %u", toUnrefId);
1170
1171 auto it = mResources.find(toUnrefId);
1172 if (it == mResources.end()) return;
1173
1174 auto contextsIt = mResourceContexts.find(toUnrefId);
1175 if (contextsIt != mResourceContexts.end()) {
1176 mResourceContexts.erase(contextsIt->first);
1177 }
1178
1179 for (auto& ctxIdResources : mContextResources) {
1180 detachResourceLocked(ctxIdResources.first, toUnrefId);
1181 }
1182
1183 auto& entry = it->second;
1184 switch (entry.type) {
1185 case ResType::PIPE:
1186 break;
1187 case ResType::BUFFER:
1188 mVirtioGpuOps->close_buffer(toUnrefId);
1189 break;
1190 case ResType::COLOR_BUFFER:
1191 mVirtioGpuOps->close_color_buffer(toUnrefId);
1192 break;
1193 }
1194
1195 if (entry.linear) {
1196 free(entry.linear);
1197 entry.linear = nullptr;
1198 }
1199
1200 if (entry.iov) {
1201 free(entry.iov);
1202 entry.iov = nullptr;
1203 entry.numIovs = 0;
1204 }
1205
1206 entry.hva = nullptr;
1207 entry.hvaSize = 0;
1208 entry.blobId = 0;
1209
1210 mResources.erase(it);
1211 }
1212
attachIov(int resId,iovec * iov,int num_iovs)1213 int attachIov(int resId, iovec* iov, int num_iovs) {
1214 stream_renderer_debug("resid: %d numiovs: %d", resId, num_iovs);
1215
1216 auto it = mResources.find(resId);
1217 if (it == mResources.end()) return ENOENT;
1218
1219 auto& entry = it->second;
1220 stream_renderer_debug("res linear: %p", entry.linear);
1221 if (!entry.linear) allocResource(entry, iov, num_iovs);
1222
1223 stream_renderer_debug("done");
1224 return 0;
1225 }
1226
detachIov(int resId,iovec ** iov,int * num_iovs)1227 void detachIov(int resId, iovec** iov, int* num_iovs) {
1228 auto it = mResources.find(resId);
1229 if (it == mResources.end()) return;
1230
1231 auto& entry = it->second;
1232
1233 if (num_iovs) {
1234 *num_iovs = entry.numIovs;
1235 stream_renderer_debug("resid: %d numIovs: %d", resId, *num_iovs);
1236 } else {
1237 stream_renderer_debug("resid: %d numIovs: 0", resId);
1238 }
1239
1240 entry.numIovs = 0;
1241
1242 if (entry.iov) free(entry.iov);
1243 entry.iov = nullptr;
1244
1245 if (iov) {
1246 *iov = entry.iov;
1247 }
1248
1249 allocResource(entry, entry.iov, entry.numIovs);
1250 stream_renderer_debug("done");
1251 }
1252
handleTransferReadPipe(PipeResEntry * res,uint64_t offset,stream_renderer_box * box)1253 int handleTransferReadPipe(PipeResEntry* res, uint64_t offset, stream_renderer_box* box) {
1254 if (res->type != ResType::PIPE) {
1255 stream_renderer_error("resid: %d not a PIPE resource", res->args.handle);
1256 return -EINVAL;
1257 }
1258
1259 // Do the pipe service op here, if there is an associated hostpipe.
1260 auto hostPipe = res->hostPipe;
1261 if (!hostPipe) return -EINVAL;
1262
1263 auto ops = ensureAndGetServiceOps();
1264
1265 size_t readBytes = 0;
1266 size_t wantedBytes = readBytes + (size_t)box->w;
1267
1268 while (readBytes < wantedBytes) {
1269 GoldfishPipeBuffer buf = {
1270 ((char*)res->linear) + box->x + readBytes,
1271 wantedBytes - readBytes,
1272 };
1273 auto status = ops->guest_recv(hostPipe, &buf, 1);
1274
1275 if (status > 0) {
1276 readBytes += status;
1277 } else if (status == kPipeTryAgain) {
1278 ops->wait_guest_recv(hostPipe);
1279 } else {
1280 return EIO;
1281 }
1282 }
1283
1284 return 0;
1285 }
1286
handleTransferWritePipe(PipeResEntry * res,uint64_t offset,stream_renderer_box * box)1287 int handleTransferWritePipe(PipeResEntry* res, uint64_t offset, stream_renderer_box* box) {
1288 if (res->type != ResType::PIPE) {
1289 stream_renderer_error("resid: %d not a PIPE resource", res->args.handle);
1290 return -EINVAL;
1291 }
1292
1293 // Do the pipe service op here, if there is an associated hostpipe.
1294 auto hostPipe = res->hostPipe;
1295 if (!hostPipe) {
1296 stream_renderer_error("No hostPipe");
1297 return -EINVAL;
1298 }
1299
1300 stream_renderer_debug("resid: %d offset: 0x%llx hostpipe: %p", res->args.handle,
1301 (unsigned long long)offset, hostPipe);
1302
1303 auto ops = ensureAndGetServiceOps();
1304
1305 size_t writtenBytes = 0;
1306 size_t wantedBytes = (size_t)box->w;
1307
1308 while (writtenBytes < wantedBytes) {
1309 GoldfishPipeBuffer buf = {
1310 ((char*)res->linear) + box->x + writtenBytes,
1311 wantedBytes - writtenBytes,
1312 };
1313
1314 // guest_send can now reallocate the pipe.
1315 void* hostPipeBefore = hostPipe;
1316 auto status = ops->guest_send(&hostPipe, &buf, 1);
1317 if (hostPipe != hostPipeBefore) {
1318 if (resetPipe((GoldfishHwPipe*)(uintptr_t)(res->ctxId), hostPipe)) {
1319 return -EINVAL;
1320 }
1321
1322 auto it = mResources.find(res->args.handle);
1323 res = &it->second;
1324 }
1325
1326 if (status > 0) {
1327 writtenBytes += status;
1328 } else if (status == kPipeTryAgain) {
1329 ops->wait_guest_send(hostPipe);
1330 } else {
1331 return EIO;
1332 }
1333 }
1334
1335 return 0;
1336 }
1337
handleTransferReadBuffer(PipeResEntry * res,uint64_t offset,stream_renderer_box * box)1338 int handleTransferReadBuffer(PipeResEntry* res, uint64_t offset, stream_renderer_box* box) {
1339 if (res->type != ResType::BUFFER) {
1340 stream_renderer_error("resid: %d not a BUFFER resource", res->args.handle);
1341 return -EINVAL;
1342 }
1343
1344 mVirtioGpuOps->read_buffer(res->args.handle, 0, res->args.width * res->args.height,
1345 res->linear);
1346 return 0;
1347 }
1348
handleTransferWriteBuffer(PipeResEntry * res,uint64_t offset,stream_renderer_box * box)1349 int handleTransferWriteBuffer(PipeResEntry* res, uint64_t offset, stream_renderer_box* box) {
1350 if (res->type != ResType::BUFFER) {
1351 stream_renderer_error("resid: %d not a BUFFER resource", res->args.handle);
1352 return -EINVAL;
1353 }
1354
1355 mVirtioGpuOps->update_buffer(res->args.handle, 0, res->args.width * res->args.height,
1356 res->linear);
1357 return 0;
1358 }
1359
handleTransferReadColorBuffer(PipeResEntry * res,uint64_t offset,stream_renderer_box * box)1360 int handleTransferReadColorBuffer(PipeResEntry* res, uint64_t offset,
1361 stream_renderer_box* box) {
1362 if (res->type != ResType::COLOR_BUFFER) {
1363 stream_renderer_error("resid: %d not a COLOR_BUFFER resource", res->args.handle);
1364 return -EINVAL;
1365 }
1366
1367 auto glformat = virgl_format_to_gl(res->args.format);
1368 auto gltype = gl_format_to_natural_type(glformat);
1369
1370 // We always xfer the whole thing again from GL
1371 // since it's fiddly to calc / copy-out subregions
1372 if (virgl_format_is_yuv(res->args.format)) {
1373 mVirtioGpuOps->read_color_buffer_yuv(res->args.handle, 0, 0, res->args.width,
1374 res->args.height, res->linear, res->linearSize);
1375 } else {
1376 mVirtioGpuOps->read_color_buffer(res->args.handle, 0, 0, res->args.width,
1377 res->args.height, glformat, gltype, res->linear);
1378 }
1379
1380 return 0;
1381 }
1382
handleTransferWriteColorBuffer(PipeResEntry * res,uint64_t offset,stream_renderer_box * box)1383 int handleTransferWriteColorBuffer(PipeResEntry* res, uint64_t offset,
1384 stream_renderer_box* box) {
1385 if (res->type != ResType::COLOR_BUFFER) {
1386 stream_renderer_error("resid: %d not a COLOR_BUFFER resource", res->args.handle);
1387 return -EINVAL;
1388 }
1389
1390 auto glformat = virgl_format_to_gl(res->args.format);
1391 auto gltype = gl_format_to_natural_type(glformat);
1392
1393 // We always xfer the whole thing again to GL
1394 // since it's fiddly to calc / copy-out subregions
1395 mVirtioGpuOps->update_color_buffer(res->args.handle, 0, 0, res->args.width,
1396 res->args.height, glformat, gltype, res->linear);
1397 return 0;
1398 }
1399
transferReadIov(int resId,uint64_t offset,stream_renderer_box * box,struct iovec * iov,int iovec_cnt)1400 int transferReadIov(int resId, uint64_t offset, stream_renderer_box* box, struct iovec* iov,
1401 int iovec_cnt) {
1402 auto it = mResources.find(resId);
1403 if (it == mResources.end()) return EINVAL;
1404
1405 int ret = 0;
1406
1407 auto& entry = it->second;
1408 switch (entry.type) {
1409 case ResType::PIPE:
1410 ret = handleTransferReadPipe(&entry, offset, box);
1411 break;
1412 case ResType::BUFFER:
1413 ret = handleTransferReadBuffer(&entry, offset, box);
1414 break;
1415 case ResType::COLOR_BUFFER:
1416 ret = handleTransferReadColorBuffer(&entry, offset, box);
1417 break;
1418 }
1419
1420 if (ret != 0) {
1421 return ret;
1422 }
1423
1424 if (iovec_cnt) {
1425 PipeResEntry e = {
1426 entry.args, iov, (uint32_t)iovec_cnt, entry.linear, entry.linearSize,
1427 };
1428 ret = sync_iov(&e, offset, box, LINEAR_TO_IOV);
1429 } else {
1430 ret = sync_iov(&entry, offset, box, LINEAR_TO_IOV);
1431 }
1432
1433 return ret;
1434 }
1435
transferWriteIov(int resId,uint64_t offset,stream_renderer_box * box,struct iovec * iov,int iovec_cnt)1436 int transferWriteIov(int resId, uint64_t offset, stream_renderer_box* box, struct iovec* iov,
1437 int iovec_cnt) {
1438 auto it = mResources.find(resId);
1439 if (it == mResources.end()) return EINVAL;
1440
1441 auto& entry = it->second;
1442
1443 int ret = 0;
1444 if (iovec_cnt) {
1445 PipeResEntry e = {
1446 entry.args, iov, (uint32_t)iovec_cnt, entry.linear, entry.linearSize,
1447 };
1448 ret = sync_iov(&e, offset, box, IOV_TO_LINEAR);
1449 } else {
1450 ret = sync_iov(&entry, offset, box, IOV_TO_LINEAR);
1451 }
1452
1453 if (ret != 0) {
1454 return ret;
1455 }
1456
1457 switch (entry.type) {
1458 case ResType::PIPE:
1459 ret = handleTransferWritePipe(&entry, offset, box);
1460 break;
1461 case ResType::BUFFER:
1462 ret = handleTransferWriteBuffer(&entry, offset, box);
1463 break;
1464 case ResType::COLOR_BUFFER:
1465 ret = handleTransferWriteColorBuffer(&entry, offset, box);
1466 break;
1467 }
1468
1469 return ret;
1470 }
1471
getCapset(uint32_t set,uint32_t * max_size)1472 void getCapset(uint32_t set, uint32_t* max_size) {
1473 switch (set) {
1474 case VIRTGPU_CAPSET_GFXSTREAM_VULKAN:
1475 *max_size = sizeof(struct gfxstream::vulkanCapset);
1476 break;
1477 case VIRTGPU_CAPSET_GFXSTREAM_MAGMA:
1478 *max_size = sizeof(struct gfxstream::magmaCapset);
1479 break;
1480 case VIRTGPU_CAPSET_GFXSTREAM_GLES:
1481 *max_size = sizeof(struct gfxstream::glesCapset);
1482 break;
1483 case VIRTGPU_CAPSET_GFXSTREAM_COMPOSER:
1484 *max_size = sizeof(struct gfxstream::composerCapset);
1485 break;
1486 default:
1487 stream_renderer_error("Incorrect capability set specified (%u)", set);
1488 }
1489 }
1490
fillCaps(uint32_t set,void * caps)1491 void fillCaps(uint32_t set, void* caps) {
1492 switch (set) {
1493 case VIRTGPU_CAPSET_GFXSTREAM_VULKAN: {
1494 struct gfxstream::vulkanCapset* capset =
1495 reinterpret_cast<struct gfxstream::vulkanCapset*>(caps);
1496
1497 memset(capset, 0, sizeof(*capset));
1498
1499 capset->protocolVersion = 1;
1500 capset->ringSize = 12288;
1501 capset->bufferSize = 1048576;
1502
1503 auto vk_emu = gfxstream::vk::getGlobalVkEmulation();
1504 if (vk_emu && vk_emu->live && vk_emu->representativeColorBufferMemoryTypeInfo) {
1505 capset->colorBufferMemoryIndex =
1506 vk_emu->representativeColorBufferMemoryTypeInfo->guestMemoryTypeIndex;
1507 }
1508
1509 capset->noRenderControlEnc = 1;
1510 capset->blobAlignment = mPageSize;
1511 if (vk_emu && vk_emu->live) {
1512 capset->deferredMapping = 1;
1513 }
1514 break;
1515 }
1516 case VIRTGPU_CAPSET_GFXSTREAM_MAGMA: {
1517 struct gfxstream::magmaCapset* capset =
1518 reinterpret_cast<struct gfxstream::magmaCapset*>(caps);
1519
1520 capset->protocolVersion = 1;
1521 capset->ringSize = 12288;
1522 capset->bufferSize = 1048576;
1523 capset->blobAlignment = mPageSize;
1524 break;
1525 }
1526 case VIRTGPU_CAPSET_GFXSTREAM_GLES: {
1527 struct gfxstream::glesCapset* capset =
1528 reinterpret_cast<struct gfxstream::glesCapset*>(caps);
1529
1530 capset->protocolVersion = 1;
1531 capset->ringSize = 12288;
1532 capset->bufferSize = 1048576;
1533 capset->blobAlignment = mPageSize;
1534 break;
1535 }
1536 case VIRTGPU_CAPSET_GFXSTREAM_COMPOSER: {
1537 struct gfxstream::composerCapset* capset =
1538 reinterpret_cast<struct gfxstream::composerCapset*>(caps);
1539
1540 capset->protocolVersion = 1;
1541 capset->ringSize = 12288;
1542 capset->bufferSize = 1048576;
1543 capset->blobAlignment = mPageSize;
1544 break;
1545 }
1546 default:
1547 stream_renderer_error("Incorrect capability set specified");
1548 }
1549 }
1550
attachResource(uint32_t ctxId,uint32_t resId)1551 void attachResource(uint32_t ctxId, uint32_t resId) {
1552 stream_renderer_debug("ctxid: %u resid: %u", ctxId, resId);
1553
1554 auto resourcesIt = mContextResources.find(ctxId);
1555
1556 if (resourcesIt == mContextResources.end()) {
1557 std::vector<VirtioGpuResId> ids;
1558 ids.push_back(resId);
1559 mContextResources[ctxId] = ids;
1560 } else {
1561 auto& ids = resourcesIt->second;
1562 auto idIt = std::find(ids.begin(), ids.end(), resId);
1563 if (idIt == ids.end()) ids.push_back(resId);
1564 }
1565
1566 auto contextsIt = mResourceContexts.find(resId);
1567
1568 if (contextsIt == mResourceContexts.end()) {
1569 std::vector<VirtioGpuCtxId> ids;
1570 ids.push_back(ctxId);
1571 mResourceContexts[resId] = ids;
1572 } else {
1573 auto& ids = contextsIt->second;
1574 auto idIt = std::find(ids.begin(), ids.end(), ctxId);
1575 if (idIt == ids.end()) ids.push_back(ctxId);
1576 }
1577
1578 // Associate the host pipe of the resource entry with the host pipe of
1579 // the context entry. That is, the last context to call attachResource
1580 // wins if there is any conflict.
1581 auto ctxEntryIt = mContexts.find(ctxId);
1582 auto resEntryIt = mResources.find(resId);
1583
1584 if (ctxEntryIt == mContexts.end() || resEntryIt == mResources.end()) return;
1585
1586 stream_renderer_debug("hostPipe: %p", ctxEntryIt->second.hostPipe);
1587 resEntryIt->second.hostPipe = ctxEntryIt->second.hostPipe;
1588 resEntryIt->second.ctxId = ctxId;
1589 }
1590
detachResource(uint32_t ctxId,uint32_t toUnrefId)1591 void detachResource(uint32_t ctxId, uint32_t toUnrefId) {
1592 stream_renderer_debug("ctxid: %u resid: %u", ctxId, toUnrefId);
1593 detachResourceLocked(ctxId, toUnrefId);
1594 }
1595
getResourceInfo(uint32_t resId,struct stream_renderer_resource_info * info)1596 int getResourceInfo(uint32_t resId, struct stream_renderer_resource_info* info) {
1597 stream_renderer_debug("resid: %u", resId);
1598 if (!info) return EINVAL;
1599
1600 auto it = mResources.find(resId);
1601 if (it == mResources.end()) return ENOENT;
1602
1603 auto& entry = it->second;
1604
1605 uint32_t bpp = 4U;
1606 switch (entry.args.format) {
1607 case VIRGL_FORMAT_B8G8R8A8_UNORM:
1608 info->drm_fourcc = DRM_FORMAT_ARGB8888;
1609 break;
1610 case VIRGL_FORMAT_B5G6R5_UNORM:
1611 info->drm_fourcc = DRM_FORMAT_RGB565;
1612 bpp = 2U;
1613 break;
1614 case VIRGL_FORMAT_R8G8B8A8_UNORM:
1615 info->drm_fourcc = DRM_FORMAT_ABGR8888;
1616 break;
1617 case VIRGL_FORMAT_R8G8B8X8_UNORM:
1618 info->drm_fourcc = DRM_FORMAT_XBGR8888;
1619 break;
1620 case VIRGL_FORMAT_R8_UNORM:
1621 info->drm_fourcc = DRM_FORMAT_R8;
1622 bpp = 1U;
1623 break;
1624 default:
1625 return EINVAL;
1626 }
1627
1628 info->stride = align_up(entry.args.width * bpp, 16U);
1629 info->virgl_format = entry.args.format;
1630 info->handle = entry.args.handle;
1631 info->height = entry.args.height;
1632 info->width = entry.args.width;
1633 info->depth = entry.args.depth;
1634 info->flags = entry.args.flags;
1635 info->tex_id = 0;
1636 return 0;
1637 }
1638
flushResource(uint32_t res_handle)1639 void flushResource(uint32_t res_handle) {
1640 auto taskId = mVirtioGpuTimelines->enqueueTask(VirtioGpuRingGlobal{});
1641 mVirtioGpuOps->async_post_color_buffer(
1642 res_handle, [this, taskId](std::shared_future<void> waitForGpu) {
1643 waitForGpu.wait();
1644 mVirtioGpuTimelines->notifyTaskCompletion(taskId);
1645 });
1646 }
1647
createRingBlob(PipeResEntry & entry,uint32_t res_handle,const struct stream_renderer_create_blob * create_blob,const struct stream_renderer_handle * handle)1648 int createRingBlob(PipeResEntry& entry, uint32_t res_handle,
1649 const struct stream_renderer_create_blob* create_blob,
1650 const struct stream_renderer_handle* handle) {
1651 if (mFeatures.ExternalBlob.enabled) {
1652 std::string name = "shared-memory-" + std::to_string(res_handle);
1653 auto shmem = std::make_unique<SharedMemory>(name, create_blob->size);
1654 int ret = shmem->create(0600);
1655 if (ret) {
1656 stream_renderer_error("Failed to create shared memory blob");
1657 return ret;
1658 }
1659
1660 entry.hva = shmem->get();
1661 entry.ringBlob = std::make_shared<RingBlob>(std::move(shmem));
1662
1663 } else {
1664 auto mem = std::make_unique<AlignedMemory>(mPageSize, create_blob->size);
1665 if (mem->addr == nullptr) {
1666 stream_renderer_error("Failed to allocate ring blob");
1667 return -ENOMEM;
1668 }
1669
1670 entry.hva = mem->addr;
1671 entry.ringBlob = std::make_shared<RingBlob>(std::move(mem));
1672 }
1673
1674 entry.hvaSize = create_blob->size;
1675 entry.externalAddr = true;
1676 entry.caching = STREAM_RENDERER_MAP_CACHE_CACHED;
1677
1678 return 0;
1679 }
1680
createBlob(uint32_t ctx_id,uint32_t res_handle,const struct stream_renderer_create_blob * create_blob,const struct stream_renderer_handle * handle)1681 int createBlob(uint32_t ctx_id, uint32_t res_handle,
1682 const struct stream_renderer_create_blob* create_blob,
1683 const struct stream_renderer_handle* handle) {
1684 stream_renderer_debug("ctx:%u res:%u blob-id:%u blob-size:%u", ctx_id, res_handle,
1685 create_blob->blob_id, create_blob->size);
1686
1687 PipeResEntry e;
1688 struct stream_renderer_resource_create_args args = {0};
1689 e.args = args;
1690 e.hostPipe = 0;
1691
1692 if (create_blob->blob_id == 0) {
1693 int ret = createRingBlob(e, res_handle, create_blob, handle);
1694 if (ret) {
1695 return ret;
1696 }
1697 } else if (mFeatures.ExternalBlob.enabled) {
1698 if (create_blob->blob_mem == STREAM_BLOB_MEM_GUEST &&
1699 (create_blob->blob_flags & STREAM_BLOB_FLAG_CREATE_GUEST_HANDLE)) {
1700 #if defined(__linux__) || defined(__QNX__)
1701 ManagedDescriptor managedHandle(handle->os_handle);
1702 BlobManager::get()->addDescriptorInfo(ctx_id, create_blob->blob_id,
1703 std::move(managedHandle), handle->handle_type,
1704 0, std::nullopt);
1705
1706 e.caching = STREAM_RENDERER_MAP_CACHE_CACHED;
1707 #else
1708 return -EINVAL;
1709 #endif
1710 } else {
1711 auto descriptorInfoOpt =
1712 BlobManager::get()->removeDescriptorInfo(ctx_id, create_blob->blob_id);
1713 if (descriptorInfoOpt) {
1714 e.descriptorInfo =
1715 std::make_shared<ManagedDescriptorInfo>(std::move(*descriptorInfoOpt));
1716 } else {
1717 return -EINVAL;
1718 }
1719
1720 e.caching = e.descriptorInfo->caching;
1721 }
1722 } else {
1723 auto entryOpt = BlobManager::get()->removeMapping(ctx_id, create_blob->blob_id);
1724 if (entryOpt) {
1725 e.hva = entryOpt->addr;
1726 e.caching = entryOpt->caching;
1727 e.hvaSize = create_blob->size;
1728 } else {
1729 return -EINVAL;
1730 }
1731 }
1732
1733 e.blobId = create_blob->blob_id;
1734 e.blobMem = create_blob->blob_mem;
1735 e.blobFlags = create_blob->blob_flags;
1736 e.iov = nullptr;
1737 e.numIovs = 0;
1738 e.linear = 0;
1739 e.linearSize = 0;
1740
1741 mResources[res_handle] = e;
1742 return 0;
1743 }
1744
resourceMap(uint32_t res_handle,void ** hvaOut,uint64_t * sizeOut)1745 int resourceMap(uint32_t res_handle, void** hvaOut, uint64_t* sizeOut) {
1746 if (mFeatures.ExternalBlob.enabled) return -EINVAL;
1747
1748 auto it = mResources.find(res_handle);
1749 if (it == mResources.end()) {
1750 if (hvaOut) *hvaOut = nullptr;
1751 if (sizeOut) *sizeOut = 0;
1752 return -EINVAL;
1753 }
1754
1755 const auto& entry = it->second;
1756
1757 if (hvaOut) *hvaOut = entry.hva;
1758 if (sizeOut) *sizeOut = entry.hvaSize;
1759 return 0;
1760 }
1761
resourceUnmap(uint32_t res_handle)1762 int resourceUnmap(uint32_t res_handle) {
1763 auto it = mResources.find(res_handle);
1764 if (it == mResources.end()) {
1765 return -EINVAL;
1766 }
1767
1768 // TODO(lfy): Good place to run any registered cleanup callbacks.
1769 // No-op for now.
1770 return 0;
1771 }
1772
platformImportResource(int res_handle,int res_info,void * resource)1773 int platformImportResource(int res_handle, int res_info, void* resource) {
1774 auto it = mResources.find(res_handle);
1775 if (it == mResources.end()) return -EINVAL;
1776 bool success = mVirtioGpuOps->platform_import_resource(res_handle, res_info, resource);
1777 return success ? 0 : -1;
1778 }
1779
platformResourceInfo(int res_handle,int * width,int * height,int * internal_format)1780 int platformResourceInfo(int res_handle, int* width, int* height, int* internal_format) {
1781 auto it = mResources.find(res_handle);
1782 if (it == mResources.end()) return -EINVAL;
1783 bool success =
1784 mVirtioGpuOps->platform_resource_info(res_handle, width, height, internal_format);
1785 return success ? 0 : -1;
1786 }
1787
platformCreateSharedEglContext()1788 void* platformCreateSharedEglContext() {
1789 return mVirtioGpuOps->platform_create_shared_egl_context();
1790 }
1791
platformDestroySharedEglContext(void * context)1792 int platformDestroySharedEglContext(void* context) {
1793 bool success = mVirtioGpuOps->platform_destroy_shared_egl_context(context);
1794 return success ? 0 : -1;
1795 }
1796
resourceMapInfo(uint32_t res_handle,uint32_t * map_info)1797 int resourceMapInfo(uint32_t res_handle, uint32_t* map_info) {
1798 auto it = mResources.find(res_handle);
1799 if (it == mResources.end()) return -EINVAL;
1800
1801 const auto& entry = it->second;
1802 *map_info = entry.caching;
1803 return 0;
1804 }
1805
exportBlob(uint32_t res_handle,struct stream_renderer_handle * handle)1806 int exportBlob(uint32_t res_handle, struct stream_renderer_handle* handle) {
1807 auto it = mResources.find(res_handle);
1808 if (it == mResources.end()) {
1809 return -EINVAL;
1810 }
1811
1812 auto& entry = it->second;
1813 if (entry.ringBlob && entry.ringBlob->isExportable()) {
1814 // Handle ownership transferred to VMM, gfxstream keeps the mapping.
1815 #ifdef _WIN32
1816 handle->os_handle =
1817 static_cast<int64_t>(reinterpret_cast<intptr_t>(entry.ringBlob->releaseHandle()));
1818 #else
1819 handle->os_handle = static_cast<int64_t>(entry.ringBlob->releaseHandle());
1820 #endif
1821 handle->handle_type = STREAM_MEM_HANDLE_TYPE_SHM;
1822 return 0;
1823 }
1824
1825 if (entry.descriptorInfo) {
1826 bool shareable = entry.blobFlags &
1827 (STREAM_BLOB_FLAG_USE_SHAREABLE | STREAM_BLOB_FLAG_USE_CROSS_DEVICE);
1828
1829 DescriptorType rawDescriptor;
1830 if (shareable) {
1831 // TODO: Add ManagedDescriptor::{clone, dup} method and use it;
1832 // This should have no affect since gfxstream allocates mappable-only buffers
1833 // currently
1834 return -EINVAL;
1835 } else {
1836 auto rawDescriptorOpt = entry.descriptorInfo->descriptor.release();
1837 if (rawDescriptorOpt)
1838 rawDescriptor = *rawDescriptorOpt;
1839 else
1840 return -EINVAL;
1841 }
1842
1843 handle->handle_type = entry.descriptorInfo->handleType;
1844
1845 #ifdef _WIN32
1846 handle->os_handle = static_cast<int64_t>(reinterpret_cast<intptr_t>(rawDescriptor));
1847 #else
1848 handle->os_handle = static_cast<int64_t>(rawDescriptor);
1849 #endif
1850
1851 return 0;
1852 }
1853
1854 return -EINVAL;
1855 }
1856
vulkanInfo(uint32_t res_handle,struct stream_renderer_vulkan_info * vulkan_info)1857 int vulkanInfo(uint32_t res_handle, struct stream_renderer_vulkan_info* vulkan_info) {
1858 auto it = mResources.find(res_handle);
1859 if (it == mResources.end()) return -EINVAL;
1860
1861 const auto& entry = it->second;
1862 if (entry.descriptorInfo && entry.descriptorInfo->vulkanInfoOpt) {
1863 vulkan_info->memory_index = (*entry.descriptorInfo->vulkanInfoOpt).memoryIndex;
1864 memcpy(vulkan_info->device_id.device_uuid,
1865 (*entry.descriptorInfo->vulkanInfoOpt).deviceUUID,
1866 sizeof(vulkan_info->device_id.device_uuid));
1867 memcpy(vulkan_info->device_id.driver_uuid,
1868 (*entry.descriptorInfo->vulkanInfoOpt).driverUUID,
1869 sizeof(vulkan_info->device_id.driver_uuid));
1870 return 0;
1871 }
1872
1873 return -EINVAL;
1874 }
1875
1876 #ifdef CONFIG_AEMU
setServiceOps(const GoldfishPipeServiceOps * ops)1877 void setServiceOps(const GoldfishPipeServiceOps* ops) { mServiceOps = ops; }
1878 #endif // CONFIG_AEMU
1879 private:
allocResource(PipeResEntry & entry,iovec * iov,int num_iovs)1880 void allocResource(PipeResEntry& entry, iovec* iov, int num_iovs) {
1881 stream_renderer_debug("entry linear: %p", entry.linear);
1882 if (entry.linear) free(entry.linear);
1883
1884 size_t linearSize = 0;
1885 for (uint32_t i = 0; i < num_iovs; ++i) {
1886 stream_renderer_debug("iov base: %p", iov[i].iov_base);
1887 linearSize += iov[i].iov_len;
1888 stream_renderer_debug("has iov of %zu. linearSize current: %zu", iov[i].iov_len,
1889 linearSize);
1890 }
1891 stream_renderer_debug("final linearSize: %zu", linearSize);
1892
1893 void* linear = nullptr;
1894
1895 if (linearSize) linear = malloc(linearSize);
1896
1897 entry.numIovs = num_iovs;
1898 entry.iov = (iovec*)malloc(sizeof(*iov) * num_iovs);
1899 if (entry.numIovs > 0) {
1900 memcpy(entry.iov, iov, num_iovs * sizeof(*iov));
1901 }
1902 entry.linear = linear;
1903 entry.linearSize = linearSize;
1904 }
1905
detachResourceLocked(uint32_t ctxId,uint32_t toUnrefId)1906 void detachResourceLocked(uint32_t ctxId, uint32_t toUnrefId) {
1907 stream_renderer_debug("ctxid: %u resid: %u", ctxId, toUnrefId);
1908
1909 auto it = mContextResources.find(ctxId);
1910 if (it == mContextResources.end()) return;
1911
1912 std::vector<VirtioGpuResId> withoutRes;
1913 for (auto resId : it->second) {
1914 if (resId != toUnrefId) {
1915 withoutRes.push_back(resId);
1916 }
1917 }
1918 mContextResources[ctxId] = withoutRes;
1919
1920 auto resourceIt = mResources.find(toUnrefId);
1921 if (resourceIt == mResources.end()) return;
1922 auto& resource = resourceIt->second;
1923
1924 resource.hostPipe = 0;
1925 resource.ctxId = 0;
1926
1927 auto ctxIt = mContexts.find(ctxId);
1928 if (ctxIt != mContexts.end()) {
1929 auto& ctxEntry = ctxIt->second;
1930 if (ctxEntry.addressSpaceHandles.count(toUnrefId)) {
1931 uint32_t asgHandle = ctxEntry.addressSpaceHandles[toUnrefId];
1932
1933 mCleanupThread->enqueueCleanup([this, asgBlob = resource.ringBlob, asgHandle](){
1934 mAddressSpaceDeviceControlOps->destroy_handle(asgHandle);
1935 });
1936
1937 ctxEntry.addressSpaceHandles.erase(toUnrefId);
1938 }
1939 }
1940 }
1941
ensureAndGetServiceOps()1942 inline const GoldfishPipeServiceOps* ensureAndGetServiceOps() {
1943 if (mServiceOps) return mServiceOps;
1944 mServiceOps = goldfish_pipe_get_service_ops();
1945 return mServiceOps;
1946 }
1947
1948 void* mCookie = nullptr;
1949 gfxstream::host::FeatureSet mFeatures;
1950 stream_renderer_fence_callback mFenceCallback;
1951 AndroidVirtioGpuOps* mVirtioGpuOps = nullptr;
1952 uint32_t mPageSize = 4096;
1953 struct address_space_device_control_ops* mAddressSpaceDeviceControlOps = nullptr;
1954
1955 const GoldfishPipeServiceOps* mServiceOps = nullptr;
1956
1957 std::unordered_map<VirtioGpuCtxId, PipeCtxEntry> mContexts;
1958 std::unordered_map<VirtioGpuResId, PipeResEntry> mResources;
1959 std::unordered_map<VirtioGpuCtxId, std::vector<VirtioGpuResId>> mContextResources;
1960 std::unordered_map<VirtioGpuResId, std::vector<VirtioGpuCtxId>> mResourceContexts;
1961
1962 // When we wait for gpu or wait for gpu vulkan, the next (and subsequent)
1963 // fences created for that context should not be signaled immediately.
1964 // Rather, they should get in line.
1965 std::unique_ptr<VirtioGpuTimelines> mVirtioGpuTimelines = nullptr;
1966
1967 std::unique_ptr<CleanupThread> mCleanupThread;
1968 };
1969
sRenderer()1970 static PipeVirglRenderer* sRenderer() {
1971 static PipeVirglRenderer* p = new PipeVirglRenderer;
1972 return p;
1973 }
1974
1975 extern "C" {
1976
stream_renderer_resource_create(struct stream_renderer_resource_create_args * args,struct iovec * iov,uint32_t num_iovs)1977 VG_EXPORT int stream_renderer_resource_create(struct stream_renderer_resource_create_args* args,
1978 struct iovec* iov, uint32_t num_iovs) {
1979 return sRenderer()->createResource(args, iov, num_iovs);
1980 }
1981
stream_renderer_resource_unref(uint32_t res_handle)1982 VG_EXPORT void stream_renderer_resource_unref(uint32_t res_handle) {
1983 sRenderer()->unrefResource(res_handle);
1984 }
1985
stream_renderer_context_destroy(uint32_t handle)1986 VG_EXPORT void stream_renderer_context_destroy(uint32_t handle) {
1987 sRenderer()->destroyContext(handle);
1988 }
1989
stream_renderer_submit_cmd(struct stream_renderer_command * cmd)1990 VG_EXPORT int stream_renderer_submit_cmd(struct stream_renderer_command* cmd) {
1991 return sRenderer()->submitCmd(cmd);
1992 }
1993
stream_renderer_transfer_read_iov(uint32_t handle,uint32_t ctx_id,uint32_t level,uint32_t stride,uint32_t layer_stride,struct stream_renderer_box * box,uint64_t offset,struct iovec * iov,int iovec_cnt)1994 VG_EXPORT int stream_renderer_transfer_read_iov(uint32_t handle, uint32_t ctx_id, uint32_t level,
1995 uint32_t stride, uint32_t layer_stride,
1996 struct stream_renderer_box* box, uint64_t offset,
1997 struct iovec* iov, int iovec_cnt) {
1998 return sRenderer()->transferReadIov(handle, offset, box, iov, iovec_cnt);
1999 }
2000
stream_renderer_transfer_write_iov(uint32_t handle,uint32_t ctx_id,int level,uint32_t stride,uint32_t layer_stride,struct stream_renderer_box * box,uint64_t offset,struct iovec * iovec,unsigned int iovec_cnt)2001 VG_EXPORT int stream_renderer_transfer_write_iov(uint32_t handle, uint32_t ctx_id, int level,
2002 uint32_t stride, uint32_t layer_stride,
2003 struct stream_renderer_box* box, uint64_t offset,
2004 struct iovec* iovec, unsigned int iovec_cnt) {
2005 return sRenderer()->transferWriteIov(handle, offset, box, iovec, iovec_cnt);
2006 }
2007
stream_renderer_get_cap_set(uint32_t set,uint32_t * max_ver,uint32_t * max_size)2008 VG_EXPORT void stream_renderer_get_cap_set(uint32_t set, uint32_t* max_ver, uint32_t* max_size) {
2009 // `max_ver` not useful
2010 return sRenderer()->getCapset(set, max_size);
2011 }
2012
stream_renderer_fill_caps(uint32_t set,uint32_t version,void * caps)2013 VG_EXPORT void stream_renderer_fill_caps(uint32_t set, uint32_t version, void* caps) {
2014 // `version` not useful
2015 return sRenderer()->fillCaps(set, caps);
2016 }
2017
stream_renderer_resource_attach_iov(int res_handle,struct iovec * iov,int num_iovs)2018 VG_EXPORT int stream_renderer_resource_attach_iov(int res_handle, struct iovec* iov, int num_iovs) {
2019 return sRenderer()->attachIov(res_handle, iov, num_iovs);
2020 }
2021
stream_renderer_resource_detach_iov(int res_handle,struct iovec ** iov,int * num_iovs)2022 VG_EXPORT void stream_renderer_resource_detach_iov(int res_handle, struct iovec** iov,
2023 int* num_iovs) {
2024 return sRenderer()->detachIov(res_handle, iov, num_iovs);
2025 }
2026
stream_renderer_ctx_attach_resource(int ctx_id,int res_handle)2027 VG_EXPORT void stream_renderer_ctx_attach_resource(int ctx_id, int res_handle) {
2028 sRenderer()->attachResource(ctx_id, res_handle);
2029 }
2030
stream_renderer_ctx_detach_resource(int ctx_id,int res_handle)2031 VG_EXPORT void stream_renderer_ctx_detach_resource(int ctx_id, int res_handle) {
2032 sRenderer()->detachResource(ctx_id, res_handle);
2033 }
2034
stream_renderer_resource_get_info(int res_handle,struct stream_renderer_resource_info * info)2035 VG_EXPORT int stream_renderer_resource_get_info(int res_handle,
2036 struct stream_renderer_resource_info* info) {
2037 return sRenderer()->getResourceInfo(res_handle, info);
2038 }
2039
stream_renderer_flush(uint32_t res_handle)2040 VG_EXPORT void stream_renderer_flush(uint32_t res_handle) {
2041 sRenderer()->flushResource(res_handle);
2042 }
2043
stream_renderer_create_blob(uint32_t ctx_id,uint32_t res_handle,const struct stream_renderer_create_blob * create_blob,const struct iovec * iovecs,uint32_t num_iovs,const struct stream_renderer_handle * handle)2044 VG_EXPORT int stream_renderer_create_blob(uint32_t ctx_id, uint32_t res_handle,
2045 const struct stream_renderer_create_blob* create_blob,
2046 const struct iovec* iovecs, uint32_t num_iovs,
2047 const struct stream_renderer_handle* handle) {
2048 sRenderer()->createBlob(ctx_id, res_handle, create_blob, handle);
2049 return 0;
2050 }
2051
stream_renderer_export_blob(uint32_t res_handle,struct stream_renderer_handle * handle)2052 VG_EXPORT int stream_renderer_export_blob(uint32_t res_handle,
2053 struct stream_renderer_handle* handle) {
2054 return sRenderer()->exportBlob(res_handle, handle);
2055 }
2056
stream_renderer_resource_map(uint32_t res_handle,void ** hvaOut,uint64_t * sizeOut)2057 VG_EXPORT int stream_renderer_resource_map(uint32_t res_handle, void** hvaOut, uint64_t* sizeOut) {
2058 return sRenderer()->resourceMap(res_handle, hvaOut, sizeOut);
2059 }
2060
stream_renderer_resource_unmap(uint32_t res_handle)2061 VG_EXPORT int stream_renderer_resource_unmap(uint32_t res_handle) {
2062 return sRenderer()->resourceUnmap(res_handle);
2063 }
2064
stream_renderer_context_create(uint32_t ctx_id,uint32_t nlen,const char * name,uint32_t context_init)2065 VG_EXPORT int stream_renderer_context_create(uint32_t ctx_id, uint32_t nlen, const char* name,
2066 uint32_t context_init) {
2067 return sRenderer()->createContext(ctx_id, nlen, name, context_init);
2068 }
2069
stream_renderer_create_fence(const struct stream_renderer_fence * fence)2070 VG_EXPORT int stream_renderer_create_fence(const struct stream_renderer_fence* fence) {
2071 if (fence->flags & STREAM_RENDERER_FLAG_FENCE_RING_IDX) {
2072 sRenderer()->createFence(fence->fence_id, VirtioGpuRingContextSpecific{
2073 .mCtxId = fence->ctx_id,
2074 .mRingIdx = fence->ring_idx,
2075 });
2076 } else {
2077 sRenderer()->createFence(fence->fence_id, VirtioGpuRingGlobal{});
2078 }
2079
2080 return 0;
2081 }
2082
stream_renderer_platform_import_resource(int res_handle,int res_info,void * resource)2083 VG_EXPORT int stream_renderer_platform_import_resource(int res_handle, int res_info,
2084 void* resource) {
2085 return sRenderer()->platformImportResource(res_handle, res_info, resource);
2086 }
2087
stream_renderer_platform_resource_info(int res_handle,int * width,int * height,int * internal_format)2088 VG_EXPORT int stream_renderer_platform_resource_info(int res_handle, int* width, int* height,
2089 int* internal_format) {
2090 return sRenderer()->platformResourceInfo(res_handle, width, height, internal_format);
2091 }
2092
stream_renderer_platform_create_shared_egl_context()2093 VG_EXPORT void* stream_renderer_platform_create_shared_egl_context() {
2094 return sRenderer()->platformCreateSharedEglContext();
2095 }
2096
stream_renderer_platform_destroy_shared_egl_context(void * context)2097 VG_EXPORT int stream_renderer_platform_destroy_shared_egl_context(void* context) {
2098 return sRenderer()->platformDestroySharedEglContext(context);
2099 }
2100
stream_renderer_resource_map_info(uint32_t res_handle,uint32_t * map_info)2101 VG_EXPORT int stream_renderer_resource_map_info(uint32_t res_handle, uint32_t* map_info) {
2102 return sRenderer()->resourceMapInfo(res_handle, map_info);
2103 }
2104
stream_renderer_vulkan_info(uint32_t res_handle,struct stream_renderer_vulkan_info * vulkan_info)2105 VG_EXPORT int stream_renderer_vulkan_info(uint32_t res_handle,
2106 struct stream_renderer_vulkan_info* vulkan_info) {
2107 return sRenderer()->vulkanInfo(res_handle, vulkan_info);
2108 }
2109
stream_renderer_snapshot(const char * dir)2110 VG_EXPORT int stream_renderer_snapshot(const char* dir) {
2111 #ifdef GFXSTREAM_ENABLE_HOST_VK_SNAPSHOT
2112 std::string dirString(dir);
2113
2114 std::string snapshotFileName = dirString + "snapshot.bin";
2115
2116 std::unique_ptr<android::base::StdioStream> stream(new android::base::StdioStream(
2117 fopen(snapshotFileName.c_str(), "wb"), android::base::StdioStream::kOwner));
2118
2119 android_getOpenglesRenderer()->pauseAllPreSave();
2120 android::snapshot::SnapshotSaveStream saveStream{
2121 .stream = stream.get(),
2122 };
2123
2124 android_getOpenglesRenderer()->save(saveStream.stream, saveStream.textureSaver);
2125 return 0;
2126 #else
2127 stream_renderer_error("Snapshot save requested without support.");
2128 return -EINVAL;
2129 #endif
2130 }
2131
stream_renderer_restore(const char * dir)2132 VG_EXPORT int stream_renderer_restore(const char* dir) {
2133 #ifdef GFXSTREAM_ENABLE_HOST_VK_SNAPSHOT
2134 std::string dirString(dir);
2135 std::string snapshotFileName = dirString + "snapshot.bin";
2136
2137 std::unique_ptr<android::base::StdioStream> stream(new android::base::StdioStream(
2138 fopen(snapshotFileName.c_str(), "rb"), android::base::StdioStream::kOwner));
2139
2140 android::snapshot::SnapshotLoadStream loadStream{
2141 .stream = stream.get(),
2142 };
2143
2144 android_getOpenglesRenderer()->load(loadStream.stream, loadStream.textureLoader);
2145
2146 // In end2end tests, we don't really do snapshot save for render threads.
2147 // We will need to resume all render threads without waiting for snapshot.
2148 android_getOpenglesRenderer()->resumeAll(false);
2149 return 0;
2150 #else
2151 stream_renderer_error("Snapshot save requested without support.");
2152 return -EINVAL;
2153 #endif
2154 }
2155
2156 static const GoldfishPipeServiceOps goldfish_pipe_service_ops = {
2157 // guest_open()
__anond9f1afd20b02() 2158 [](GoldfishHwPipe* hwPipe) -> GoldfishHostPipe* {
2159 return static_cast<GoldfishHostPipe*>(android_pipe_guest_open(hwPipe));
2160 },
2161 // guest_open_with_flags()
__anond9f1afd20c02() 2162 [](GoldfishHwPipe* hwPipe, uint32_t flags) -> GoldfishHostPipe* {
2163 return static_cast<GoldfishHostPipe*>(android_pipe_guest_open_with_flags(hwPipe, flags));
2164 },
2165 // guest_close()
__anond9f1afd20d02() 2166 [](GoldfishHostPipe* hostPipe, GoldfishPipeCloseReason reason) {
2167 static_assert((int)GOLDFISH_PIPE_CLOSE_GRACEFUL == (int)PIPE_CLOSE_GRACEFUL,
2168 "Invalid PIPE_CLOSE_GRACEFUL value");
2169 static_assert((int)GOLDFISH_PIPE_CLOSE_REBOOT == (int)PIPE_CLOSE_REBOOT,
2170 "Invalid PIPE_CLOSE_REBOOT value");
2171 static_assert((int)GOLDFISH_PIPE_CLOSE_LOAD_SNAPSHOT == (int)PIPE_CLOSE_LOAD_SNAPSHOT,
2172 "Invalid PIPE_CLOSE_LOAD_SNAPSHOT value");
2173 static_assert((int)GOLDFISH_PIPE_CLOSE_ERROR == (int)PIPE_CLOSE_ERROR,
2174 "Invalid PIPE_CLOSE_ERROR value");
2175
2176 android_pipe_guest_close(hostPipe, static_cast<PipeCloseReason>(reason));
2177 },
2178 // guest_pre_load()
__anond9f1afd20e02() 2179 [](QEMUFile* file) { (void)file; },
2180 // guest_post_load()
__anond9f1afd20f02() 2181 [](QEMUFile* file) { (void)file; },
2182 // guest_pre_save()
__anond9f1afd21002() 2183 [](QEMUFile* file) { (void)file; },
2184 // guest_post_save()
__anond9f1afd21102() 2185 [](QEMUFile* file) { (void)file; },
2186 // guest_load()
__anond9f1afd21202() 2187 [](QEMUFile* file, GoldfishHwPipe* hwPipe, char* force_close) -> GoldfishHostPipe* {
2188 (void)file;
2189 (void)hwPipe;
2190 (void)force_close;
2191 return nullptr;
2192 },
2193 // guest_save()
__anond9f1afd21302() 2194 [](GoldfishHostPipe* hostPipe, QEMUFile* file) {
2195 (void)hostPipe;
2196 (void)file;
2197 },
2198 // guest_poll()
__anond9f1afd21402() 2199 [](GoldfishHostPipe* hostPipe) {
2200 static_assert((int)GOLDFISH_PIPE_POLL_IN == (int)PIPE_POLL_IN, "invalid POLL_IN values");
2201 static_assert((int)GOLDFISH_PIPE_POLL_OUT == (int)PIPE_POLL_OUT, "invalid POLL_OUT values");
2202 static_assert((int)GOLDFISH_PIPE_POLL_HUP == (int)PIPE_POLL_HUP, "invalid POLL_HUP values");
2203
2204 return static_cast<GoldfishPipePollFlags>(android_pipe_guest_poll(hostPipe));
2205 },
2206 // guest_recv()
__anond9f1afd21502() 2207 [](GoldfishHostPipe* hostPipe, GoldfishPipeBuffer* buffers, int numBuffers) -> int {
2208 // NOTE: Assumes that AndroidPipeBuffer and GoldfishPipeBuffer
2209 // have exactly the same layout.
2210 static_assert(sizeof(AndroidPipeBuffer) == sizeof(GoldfishPipeBuffer),
2211 "Invalid PipeBuffer sizes");
2212 // We can't use a static_assert with offsetof() because in msvc, it uses
2213 // reinterpret_cast.
2214 // TODO: Add runtime assertion instead?
2215 // https://developercommunity.visualstudio.com/content/problem/22196/static-assert-cannot-compile-constexprs-method-tha.html
2216 #ifndef _MSC_VER
2217 static_assert(offsetof(AndroidPipeBuffer, data) == offsetof(GoldfishPipeBuffer, data),
2218 "Invalid PipeBuffer::data offsets");
2219 static_assert(offsetof(AndroidPipeBuffer, size) == offsetof(GoldfishPipeBuffer, size),
2220 "Invalid PipeBuffer::size offsets");
2221 #endif
2222 return android_pipe_guest_recv(hostPipe, reinterpret_cast<AndroidPipeBuffer*>(buffers),
2223 numBuffers);
2224 },
2225 // wait_guest_recv()
__anond9f1afd21602() 2226 [](GoldfishHostPipe* hostPipe) {
2227 android_pipe_wait_guest_recv(hostPipe);
2228 },
2229 // guest_send()
__anond9f1afd21702() 2230 [](GoldfishHostPipe** hostPipe, const GoldfishPipeBuffer* buffers, int numBuffers) -> int {
2231 return android_pipe_guest_send(reinterpret_cast<void**>(hostPipe),
2232 reinterpret_cast<const AndroidPipeBuffer*>(buffers),
2233 numBuffers);
2234 },
2235 // wait_guest_send()
__anond9f1afd21802() 2236 [](GoldfishHostPipe* hostPipe) {
2237 android_pipe_wait_guest_send(hostPipe);
2238 },
2239 // guest_wake_on()
__anond9f1afd21902() 2240 [](GoldfishHostPipe* hostPipe, GoldfishPipeWakeFlags wakeFlags) {
2241 android_pipe_guest_wake_on(hostPipe, static_cast<int>(wakeFlags));
2242 },
2243 // dma_add_buffer()
__anond9f1afd21a02() 2244 [](void* pipe, uint64_t paddr, uint64_t sz) {
2245 // not considered for virtio
2246 },
2247 // dma_remove_buffer()
__anond9f1afd21b02() 2248 [](uint64_t paddr) {
2249 // not considered for virtio
2250 },
2251 // dma_invalidate_host_mappings()
__anond9f1afd21c02() 2252 []() {
2253 // not considered for virtio
2254 },
2255 // dma_reset_host_mappings()
__anond9f1afd21d02() 2256 []() {
2257 // not considered for virtio
2258 },
2259 // dma_save_mappings()
__anond9f1afd21e02() 2260 [](QEMUFile* file) { (void)file; },
2261 // dma_load_mappings()
__anond9f1afd21f02() 2262 [](QEMUFile* file) { (void)file; },
2263 };
2264
stream_renderer_opengles_init(uint32_t display_width,uint32_t display_height,int renderer_flags,gfxstream::host::FeatureSet features)2265 static int stream_renderer_opengles_init(uint32_t display_width, uint32_t display_height,
2266 int renderer_flags, gfxstream::host::FeatureSet features) {
2267 stream_renderer_debug("start. display dimensions: width %u height %u, renderer flags: 0x%x",
2268 display_width, display_height, renderer_flags);
2269
2270 // Flags processing
2271
2272 // TODO: hook up "gfxstream egl" to the renderer flags
2273 // STREAM_RENDERER_FLAGS_USE_EGL_BIT in crosvm
2274 // as it's specified from launch_cvd.
2275 // At the moment, use ANDROID_GFXSTREAM_EGL=1
2276 // For test on GCE
2277 if (android::base::getEnvironmentVariable("ANDROID_GFXSTREAM_EGL") == "1") {
2278 android::base::setEnvironmentVariable("ANDROID_EGL_ON_EGL", "1");
2279 android::base::setEnvironmentVariable("ANDROID_EMUGL_LOG_PRINT", "1");
2280 android::base::setEnvironmentVariable("ANDROID_EMUGL_VERBOSE", "1");
2281 }
2282 // end for test on GCE
2283
2284 android::base::setEnvironmentVariable("ANDROID_EMU_HEADLESS", "1");
2285
2286 bool egl2eglByEnv = android::base::getEnvironmentVariable("ANDROID_EGL_ON_EGL") == "1";
2287 bool egl2eglByFlag = renderer_flags & STREAM_RENDERER_FLAGS_USE_EGL_BIT;
2288 bool enable_egl2egl = egl2eglByFlag || egl2eglByEnv;
2289 if (enable_egl2egl) {
2290 android::base::setEnvironmentVariable("ANDROID_GFXSTREAM_EGL", "1");
2291 android::base::setEnvironmentVariable("ANDROID_EGL_ON_EGL", "1");
2292 }
2293
2294 bool surfaceless = renderer_flags & STREAM_RENDERER_FLAGS_USE_SURFACELESS_BIT;
2295
2296 android::featurecontrol::productFeatureOverride();
2297
2298 gfxstream::vk::vkDispatch(false /* don't use test ICD */);
2299
2300 auto androidHw = aemu_get_android_hw();
2301
2302 androidHw->hw_gltransport_asg_writeBufferSize = 1048576;
2303 androidHw->hw_gltransport_asg_writeStepSize = 262144;
2304 androidHw->hw_gltransport_asg_dataRingSize = 524288;
2305 androidHw->hw_gltransport_drawFlushInterval = 10000;
2306
2307 EmuglConfig config;
2308
2309 // Make all the console agents available.
2310 android::emulation::injectGraphicsAgents(android::emulation::GfxStreamGraphicsAgentFactory());
2311
2312 emuglConfig_init(&config, true /* gpu enabled */, "auto",
2313 enable_egl2egl ? "swiftshader_indirect" : "host", 64, /* bitness */
2314 surfaceless, /* no window */
2315 false, /* blocklisted */
2316 false, /* has guest renderer */
2317 WINSYS_GLESBACKEND_PREFERENCE_AUTO, true /* force host gpu vulkan */);
2318
2319 emuglConfig_setupEnv(&config);
2320
2321 android_prepareOpenglesEmulation();
2322
2323 {
2324 static gfxstream::RenderLibPtr renderLibPtr = gfxstream::initLibrary();
2325 android_setOpenglesEmulation(renderLibPtr.get(), nullptr, nullptr);
2326 }
2327
2328 int maj;
2329 int min;
2330 android_startOpenglesRenderer(display_width, display_height, 1, 28, getGraphicsAgents()->vm,
2331 getGraphicsAgents()->emu, getGraphicsAgents()->multi_display,
2332 &features, &maj, &min);
2333
2334 char* vendor = nullptr;
2335 char* renderer = nullptr;
2336 char* version = nullptr;
2337
2338 android_getOpenglesHardwareStrings(&vendor, &renderer, &version);
2339
2340 stream_renderer_info("GL strings; [%s] [%s] [%s].", vendor, renderer, version);
2341
2342 auto openglesRenderer = android_getOpenglesRenderer();
2343
2344 if (!openglesRenderer) {
2345 stream_renderer_error("No renderer started, fatal");
2346 return -EINVAL;
2347 }
2348
2349 address_space_set_vm_operations(getGraphicsAgents()->vm);
2350 android_init_opengles_pipe();
2351 android_opengles_pipe_set_recv_mode(2 /* virtio-gpu */);
2352 android_init_refcount_pipe();
2353
2354 return 0;
2355 }
2356
2357 namespace {
2358
parseGfxstreamFeatures(const int renderer_flags,const std::string & renderer_features,gfxstream::host::FeatureSet & features)2359 int parseGfxstreamFeatures(const int renderer_flags,
2360 const std::string& renderer_features,
2361 gfxstream::host::FeatureSet& features) {
2362 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2363 &features, ExternalBlob,
2364 renderer_flags & STREAM_RENDERER_FLAGS_USE_EXTERNAL_BLOB);
2365 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2366 &features, GlAsyncSwap, false);
2367 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2368 &features, GlDirectMem, false);
2369 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2370 &features, GlDma, false);
2371 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2372 &features, GlesDynamicVersion, true);
2373 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2374 &features, GlPipeChecksum, false);
2375 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2376 &features, GuestUsesAngle,
2377 (renderer_flags & STREAM_RENDERER_FLAGS_USE_VK_BIT) &&
2378 !(renderer_flags & STREAM_RENDERER_FLAGS_USE_GLES_BIT));
2379 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2380 &features, HostComposition, true);
2381 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2382 &features, NativeTextureDecompression, false);
2383 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2384 &features, NoDelayCloseColorBuffer, true);
2385 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2386 &features, PlayStoreImage,
2387 !(renderer_flags & STREAM_RENDERER_FLAGS_USE_GLES_BIT));
2388 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2389 &features, RefCountPipe,
2390 /*Resources are ref counted via guest file objects.*/false);
2391 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2392 &features, SystemBlob,
2393 renderer_flags & STREAM_RENDERER_FLAGS_USE_SYSTEM_BLOB);
2394 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2395 &features, VirtioGpuFenceContexts, true);
2396 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2397 &features, VirtioGpuNativeSync, true);
2398 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2399 &features, VirtioGpuNext, true);
2400 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2401 &features, Vulkan,
2402 renderer_flags & STREAM_RENDERER_FLAGS_USE_VK_BIT);
2403 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2404 &features, VulkanBatchedDescriptorSetUpdate, true);
2405 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2406 &features, VulkanIgnoredHandles, true);
2407 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2408 &features, VulkanNativeSwapchain,
2409 renderer_flags & STREAM_RENDERER_FLAGS_VULKAN_NATIVE_SWAPCHAIN_BIT);
2410 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2411 &features, VulkanNullOptionalStrings, true);
2412 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2413 &features, VulkanQueueSubmitWithCommands, true);
2414 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2415 &features, VulkanShaderFloat16Int8, true);
2416 GFXSTREAM_SET_FEATURE_ON_CONDITION(
2417 &features, VulkanSnapshots,
2418 android::base::getEnvironmentVariable("ANDROID_GFXSTREAM_CAPTURE_VK_SNAPSHOT") == "1");
2419
2420 for (const std::string& renderer_feature : gfxstream::Split(renderer_features, ",")) {
2421 if (renderer_feature.empty()) continue;
2422
2423 const std::vector<std::string>& parts = gfxstream::Split(renderer_feature, ":");
2424 if (parts.size() != 2) {
2425 stream_renderer_error("Error: invalid renderer features: %s",
2426 renderer_features.c_str());
2427 return -EINVAL;
2428 }
2429
2430 const std::string& feature_name = parts[0];
2431
2432 auto feature_it = features.map.find(feature_name);
2433 if (feature_it == features.map.end()) {
2434 stream_renderer_error("Error: invalid renderer feature: '%s'", feature_name.c_str());
2435 return -EINVAL;
2436 }
2437
2438 const std::string& feature_status = parts[1];
2439 if (feature_status != "enabled" && feature_status != "disabled") {
2440 stream_renderer_error("Error: invalid option %s for renderer feature: %s",
2441 feature_status.c_str(), feature_name.c_str());
2442 return -EINVAL;
2443 }
2444
2445 auto& feature_info = feature_it->second;
2446 feature_info->enabled = feature_status == "enabled";
2447 feature_info->reason = "Overridden via STREAM_RENDERER_PARAM_RENDERER_FEATURES";
2448
2449 stream_renderer_error("Gfxstream feature %s %s", feature_name.c_str(),
2450 feature_status.c_str());
2451 }
2452
2453 if (features.SystemBlob.enabled) {
2454 if(!features.ExternalBlob.enabled) {
2455 stream_renderer_error("The SystemBlob features requires the ExternalBlob feature.");
2456 return -EINVAL;
2457 }
2458 #ifndef _WIN32
2459 stream_renderer_warn("Warning: USE_SYSTEM_BLOB has only been tested on Windows");
2460 #endif
2461 }
2462 if (features.VulkanNativeSwapchain.enabled && !features.Vulkan.enabled) {
2463 stream_renderer_error("can't enable vulkan native swapchain, Vulkan is disabled");
2464 return -EINVAL;
2465 }
2466
2467 return 0;
2468 }
2469
2470 } // namespace
2471
stream_renderer_init(struct stream_renderer_param * stream_renderer_params,uint64_t num_params)2472 VG_EXPORT int stream_renderer_init(struct stream_renderer_param* stream_renderer_params,
2473 uint64_t num_params) {
2474 // Required parameters.
2475 std::unordered_set<uint64_t> required_params{STREAM_RENDERER_PARAM_USER_DATA,
2476 STREAM_RENDERER_PARAM_RENDERER_FLAGS,
2477 STREAM_RENDERER_PARAM_FENCE_CALLBACK};
2478
2479 // String names of the parameters.
2480 std::unordered_map<uint64_t, std::string> param_strings{
2481 {STREAM_RENDERER_PARAM_USER_DATA, "USER_DATA"},
2482 {STREAM_RENDERER_PARAM_RENDERER_FLAGS, "RENDERER_FLAGS"},
2483 {STREAM_RENDERER_PARAM_FENCE_CALLBACK, "FENCE_CALLBACK"},
2484 {STREAM_RENDERER_PARAM_WIN0_WIDTH, "WIN0_WIDTH"},
2485 {STREAM_RENDERER_PARAM_WIN0_HEIGHT, "WIN0_HEIGHT"},
2486 {STREAM_RENDERER_PARAM_DEBUG_CALLBACK, "DEBUG_CALLBACK"},
2487 {STREAM_RENDERER_SKIP_OPENGLES_INIT, "SKIP_OPENGLES_INIT"},
2488 {STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_INSTANT_EVENT,
2489 "METRICS_CALLBACK_ADD_INSTANT_EVENT"},
2490 {STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_INSTANT_EVENT_WITH_DESCRIPTOR,
2491 "METRICS_CALLBACK_ADD_INSTANT_EVENT_WITH_DESCRIPTOR"},
2492 {STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_INSTANT_EVENT_WITH_METRIC,
2493 "METRICS_CALLBACK_ADD_INSTANT_EVENT_WITH_METRIC"},
2494 {STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_VULKAN_OUT_OF_MEMORY_EVENT,
2495 "METRICS_CALLBACK_ADD_VULKAN_OUT_OF_MEMORY_EVENT"},
2496 {STREAM_RENDERER_PARAM_METRICS_CALLBACK_SET_ANNOTATION, "METRICS_CALLBACK_SET_ANNOTATION"},
2497 {STREAM_RENDERER_PARAM_METRICS_CALLBACK_ABORT, "METRICS_CALLBACK_ABORT"}};
2498
2499 // Print full values for these parameters:
2500 // Values here must not be pointers (e.g. callback functions), to avoid potentially identifying
2501 // someone via ASLR. Pointers in ASLR are randomized on boot, which means pointers may be
2502 // different between users but similar across a single user's sessions.
2503 // As a convenience, any value <= 4096 is also printed, to catch small or null pointer errors.
2504 std::unordered_set<uint64_t> printed_param_values{STREAM_RENDERER_PARAM_RENDERER_FLAGS,
2505 STREAM_RENDERER_PARAM_WIN0_WIDTH,
2506 STREAM_RENDERER_PARAM_WIN0_HEIGHT};
2507
2508 // We may have unknown parameters, so this function is lenient.
2509 auto get_param_string = [&](uint64_t key) -> std::string {
2510 auto param_string = param_strings.find(key);
2511 if (param_string != param_strings.end()) {
2512 return param_string->second;
2513 } else {
2514 return "Unknown param with key=" + std::to_string(key);
2515 }
2516 };
2517
2518 // Initialization data.
2519 uint32_t display_width = 0;
2520 uint32_t display_height = 0;
2521 void* renderer_cookie = nullptr;
2522 int renderer_flags = 0;
2523 std::string renderer_features_str;
2524 stream_renderer_fence_callback fence_callback = nullptr;
2525 bool skip_opengles = false;
2526
2527 // Iterate all parameters that we support.
2528 stream_renderer_debug("Reading stream renderer parameters:");
2529 for (uint64_t i = 0; i < num_params; ++i) {
2530 stream_renderer_param& param = stream_renderer_params[i];
2531
2532 // Print out parameter we are processing. See comment above `printed_param_values` before
2533 // adding new prints.
2534 if (printed_param_values.find(param.key) != printed_param_values.end() ||
2535 param.value <= 4096) {
2536 stream_renderer_debug("%s - %llu", get_param_string(param.key).c_str(),
2537 static_cast<unsigned long long>(param.value));
2538 } else {
2539 // If not full value, print that it was passed.
2540 stream_renderer_debug("%s", get_param_string(param.key).c_str());
2541 }
2542
2543 // Removing every param we process will leave required_params empty if all provided.
2544 required_params.erase(param.key);
2545
2546 switch (param.key) {
2547 case STREAM_RENDERER_PARAM_NULL:
2548 break;
2549 case STREAM_RENDERER_PARAM_USER_DATA: {
2550 renderer_cookie = reinterpret_cast<void*>(static_cast<uintptr_t>(param.value));
2551 globalUserData = renderer_cookie;
2552 break;
2553 }
2554 case STREAM_RENDERER_PARAM_RENDERER_FLAGS: {
2555 renderer_flags = static_cast<int>(param.value);
2556 break;
2557 }
2558 case STREAM_RENDERER_PARAM_FENCE_CALLBACK: {
2559 fence_callback = reinterpret_cast<stream_renderer_fence_callback>(
2560 static_cast<uintptr_t>(param.value));
2561 break;
2562 }
2563 case STREAM_RENDERER_PARAM_WIN0_WIDTH: {
2564 display_width = static_cast<uint32_t>(param.value);
2565 break;
2566 }
2567 case STREAM_RENDERER_PARAM_WIN0_HEIGHT: {
2568 display_height = static_cast<uint32_t>(param.value);
2569 break;
2570 }
2571 case STREAM_RENDERER_PARAM_DEBUG_CALLBACK: {
2572 globalDebugCallback = reinterpret_cast<stream_renderer_debug_callback>(
2573 static_cast<uintptr_t>(param.value));
2574 break;
2575 }
2576 case STREAM_RENDERER_SKIP_OPENGLES_INIT: {
2577 skip_opengles = static_cast<bool>(param.value);
2578 break;
2579 }
2580 case STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_INSTANT_EVENT: {
2581 MetricsLogger::add_instant_event_callback =
2582 reinterpret_cast<stream_renderer_param_metrics_callback_add_instant_event>(
2583 static_cast<uintptr_t>(param.value));
2584 break;
2585 }
2586 case STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_INSTANT_EVENT_WITH_DESCRIPTOR: {
2587 MetricsLogger::add_instant_event_with_descriptor_callback = reinterpret_cast<
2588 stream_renderer_param_metrics_callback_add_instant_event_with_descriptor>(
2589 static_cast<uintptr_t>(param.value));
2590 break;
2591 }
2592 case STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_INSTANT_EVENT_WITH_METRIC: {
2593 MetricsLogger::add_instant_event_with_metric_callback = reinterpret_cast<
2594 stream_renderer_param_metrics_callback_add_instant_event_with_metric>(
2595 static_cast<uintptr_t>(param.value));
2596 break;
2597 }
2598 case STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_VULKAN_OUT_OF_MEMORY_EVENT: {
2599 MetricsLogger::add_vulkan_out_of_memory_event = reinterpret_cast<
2600 stream_renderer_param_metrics_callback_add_vulkan_out_of_memory_event>(
2601 static_cast<uintptr_t>(param.value));
2602 break;
2603 }
2604 case STREAM_RENDERER_PARAM_RENDERER_FEATURES: {
2605 renderer_features_str =
2606 std::string(reinterpret_cast<const char*>(static_cast<uintptr_t>(param.value)));
2607 break;
2608 }
2609 case STREAM_RENDERER_PARAM_METRICS_CALLBACK_SET_ANNOTATION: {
2610 MetricsLogger::set_crash_annotation_callback =
2611 reinterpret_cast<stream_renderer_param_metrics_callback_set_annotation>(
2612 static_cast<uintptr_t>(param.value));
2613 break;
2614 }
2615 case STREAM_RENDERER_PARAM_METRICS_CALLBACK_ABORT: {
2616 emugl::setDieFunction(
2617 reinterpret_cast<stream_renderer_param_metrics_callback_abort>(
2618 static_cast<uintptr_t>(param.value)));
2619 break;
2620 }
2621 default: {
2622 // We skip any parameters we don't recognize.
2623 stream_renderer_error(
2624 "Skipping unknown parameter key: %llu. May need to upgrade gfxstream.",
2625 static_cast<unsigned long long>(param.key));
2626 break;
2627 }
2628 }
2629 }
2630 stream_renderer_debug("Finished reading parameters");
2631
2632 // Some required params not found.
2633 if (required_params.size() > 0) {
2634 stream_renderer_error("Missing required parameters:");
2635 for (uint64_t param : required_params) {
2636 stream_renderer_error("%s", get_param_string(param).c_str());
2637 }
2638 stream_renderer_error("Failing initialization intentionally");
2639 return -EINVAL;
2640 }
2641
2642 gfxstream::host::FeatureSet features;
2643 int ret = parseGfxstreamFeatures(renderer_flags, renderer_features_str, features);
2644 if (ret) {
2645 stream_renderer_error("Failed to initialize: failed to parse Gfxstream features.");
2646 return ret;
2647 }
2648
2649 stream_renderer_info("Gfxstream features:");
2650 for (const auto& [_, featureInfo] : features.map) {
2651 stream_renderer_info(" %s: %s (%s)", featureInfo->name.c_str(),
2652 (featureInfo->enabled ? "enabled" : "disabled"), featureInfo->reason.c_str());
2653 }
2654
2655 // Set non product-specific callbacks
2656 gfxstream::vk::vk_util::setVkCheckCallbacks(
2657 std::make_unique<gfxstream::vk::vk_util::VkCheckCallbacks>(
2658 gfxstream::vk::vk_util::VkCheckCallbacks{
2659 .onVkErrorOutOfMemory =
2660 [](VkResult result, const char* function, int line) {
2661 auto fb = gfxstream::FrameBuffer::getFB();
2662 if (!fb) {
2663 stream_renderer_error(
2664 "FrameBuffer not yet initialized. Dropping out of memory event");
2665 return;
2666 }
2667 fb->logVulkanOutOfMemory(result, function, line);
2668 },
2669 .onVkErrorOutOfMemoryOnAllocation =
2670 [](VkResult result, const char* function, int line,
2671 std::optional<uint64_t> allocationSize) {
2672 auto fb = gfxstream::FrameBuffer::getFB();
2673 if (!fb) {
2674 stream_renderer_error(
2675 "FrameBuffer not yet initialized. Dropping out of memory event");
2676 return;
2677 }
2678 fb->logVulkanOutOfMemory(result, function, line, allocationSize);
2679 }}));
2680
2681 if (!skip_opengles) {
2682 // aemu currently does its own opengles initialization in
2683 // qemu/android/android-emu/android/opengles.cpp.
2684 int ret = stream_renderer_opengles_init(display_width, display_height, renderer_flags, features);
2685 if (ret) {
2686 return ret;
2687 }
2688 }
2689
2690 sRenderer()->init(renderer_cookie, features, fence_callback);
2691 gfxstream::FrameBuffer::waitUntilInitialized();
2692
2693 stream_renderer_info("Gfxstream initialized successfully!");
2694 return 0;
2695 }
2696
gfxstream_backend_setup_window(void * native_window_handle,int32_t window_x,int32_t window_y,int32_t window_width,int32_t window_height,int32_t fb_width,int32_t fb_height)2697 VG_EXPORT void gfxstream_backend_setup_window(void* native_window_handle, int32_t window_x,
2698 int32_t window_y, int32_t window_width,
2699 int32_t window_height, int32_t fb_width,
2700 int32_t fb_height) {
2701 android_showOpenglesWindow(native_window_handle, window_x, window_y, window_width,
2702 window_height, fb_width, fb_height, 1.0f, 0, false, false);
2703 }
2704
stream_renderer_teardown()2705 VG_EXPORT void stream_renderer_teardown() {
2706 android_finishOpenglesRenderer();
2707 android_hideOpenglesWindow();
2708 android_stopOpenglesRenderer(true);
2709
2710 sRenderer()->teardown();
2711 stream_renderer_info("Gfxstream shut down completed!");
2712 }
2713
gfxstream_backend_set_screen_mask(int width,int height,const unsigned char * rgbaData)2714 VG_EXPORT void gfxstream_backend_set_screen_mask(int width, int height,
2715 const unsigned char* rgbaData) {
2716 android_setOpenglesScreenMask(width, height, rgbaData);
2717 }
2718
goldfish_pipe_get_service_ops()2719 const GoldfishPipeServiceOps* goldfish_pipe_get_service_ops() { return &goldfish_pipe_service_ops; }
2720
2721 static_assert(sizeof(struct stream_renderer_device_id) == 32,
2722 "stream_renderer_device_id must be 32 bytes");
2723 static_assert(offsetof(struct stream_renderer_device_id, device_uuid) == 0,
2724 "stream_renderer_device_id.device_uuid must be at offset 0");
2725 static_assert(offsetof(struct stream_renderer_device_id, driver_uuid) == 16,
2726 "stream_renderer_device_id.driver_uuid must be at offset 16");
2727
2728 static_assert(sizeof(struct stream_renderer_vulkan_info) == 36,
2729 "stream_renderer_vulkan_info must be 36 bytes");
2730 static_assert(offsetof(struct stream_renderer_vulkan_info, memory_index) == 0,
2731 "stream_renderer_vulkan_info.memory_index must be at offset 0");
2732 static_assert(offsetof(struct stream_renderer_vulkan_info, device_id) == 4,
2733 "stream_renderer_vulkan_info.device_id must be at offset 4");
2734
2735 static_assert(sizeof(struct stream_renderer_param_host_visible_memory_mask_entry) == 36,
2736 "stream_renderer_param_host_visible_memory_mask_entry must be 36 bytes");
2737 static_assert(offsetof(struct stream_renderer_param_host_visible_memory_mask_entry, device_id) == 0,
2738 "stream_renderer_param_host_visible_memory_mask_entry.device_id must be at offset 0");
2739 static_assert(
2740 offsetof(struct stream_renderer_param_host_visible_memory_mask_entry, memory_type_mask) == 32,
2741 "stream_renderer_param_host_visible_memory_mask_entry.memory_type_mask must be at offset 32");
2742
2743 static_assert(sizeof(struct stream_renderer_param_host_visible_memory_mask) == 16,
2744 "stream_renderer_param_host_visible_memory_mask must be 16 bytes");
2745 static_assert(offsetof(struct stream_renderer_param_host_visible_memory_mask, entries) == 0,
2746 "stream_renderer_param_host_visible_memory_mask.entries must be at offset 0");
2747 static_assert(offsetof(struct stream_renderer_param_host_visible_memory_mask, num_entries) == 8,
2748 "stream_renderer_param_host_visible_memory_mask.num_entries must be at offset 8");
2749
2750 static_assert(sizeof(struct stream_renderer_param) == 16, "stream_renderer_param must be 16 bytes");
2751 static_assert(offsetof(struct stream_renderer_param, key) == 0,
2752 "stream_renderer_param.key must be at offset 0");
2753 static_assert(offsetof(struct stream_renderer_param, value) == 8,
2754 "stream_renderer_param.value must be at offset 8");
2755
2756 #ifdef CONFIG_AEMU
2757
stream_renderer_set_service_ops(const GoldfishPipeServiceOps * ops)2758 VG_EXPORT void stream_renderer_set_service_ops(const GoldfishPipeServiceOps* ops) {
2759 sRenderer()->setServiceOps(ops);
2760 }
2761
2762 #endif // CONFIG_AEMU
2763
2764 } // extern "C"
2765