1 // Copyright 2019 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #include <vulkan/vulkan.h>
15
16 #include <deque>
17 #include <type_traits>
18 #include <unordered_map>
19
20 #include "FrameBuffer.h"
21 #include "GfxStreamAgents.h"
22 #include "VirtioGpuTimelines.h"
23 #include "VkCommonOperations.h"
24 #include "aemu/base/AlignedBuf.h"
25 #include "aemu/base/ManagedDescriptor.hpp"
26 #include "aemu/base/Metrics.h"
27 #include "aemu/base/Tracing.h"
28 #include "aemu/base/memory/SharedMemory.h"
29 #include "aemu/base/synchronization/Lock.h"
30 #include "host-common/AddressSpaceService.h"
31 #include "host-common/GfxstreamFatalError.h"
32 #include "host-common/HostmemIdMapping.h"
33 #include "host-common/address_space_device.h"
34 #include "host-common/android_pipe_common.h"
35 #include "host-common/android_pipe_device.h"
36 #include "host-common/feature_control.h"
37 #include "host-common/globals.h"
38 #include "host-common/linux_types.h"
39 #include "host-common/opengles-pipe.h"
40 #include "host-common/opengles.h"
41 #include "host-common/refcount-pipe.h"
42 #include "host-common/vm_operations.h"
43 #include "virgl_hw.h"
44 #include "virtgpu_gfxstream_protocol.h"
45 #include "vk_util.h"
46
47 extern "C" {
48 #include "drm_fourcc.h"
49 #include "host-common/goldfish_pipe.h"
50 #include "virgl_hw.h"
51 #include "virtio-gpu-gfxstream-renderer.h"
52 } // extern "C"
53
54 #define DEBUG_VIRTIO_GOLDFISH_PIPE 0
55
56 #if DEBUG_VIRTIO_GOLDFISH_PIPE
57
58 #define VGPLOG(fmt, ...) fprintf(stderr, "%s:%d: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__);
59
60 #else
61 #define VGPLOG(fmt, ...)
62 #endif
63
64 #define VGP_FATAL() \
65 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER)) << "virtio-goldfish-pipe fatal error: "
66
67 #ifdef VIRTIO_GOLDFISH_EXPORT_API
68
69 #ifdef _WIN32
70 #define VG_EXPORT __declspec(dllexport)
71 #else
72 #define VG_EXPORT __attribute__((visibility("default")))
73 #endif
74
75 #else
76
77 #define VG_EXPORT
78
79 #endif // !VIRTIO_GOLDFISH_EXPORT_API
80
81 #define GFXSTREAM_DEBUG_LEVEL 1
82
83 #if GFXSTREAM_DEBUG_LEVEL >= 1
84 #define GFXS_LOG(fmt, ...) \
85 do { \
86 fprintf(stdout, "%s:%d " fmt "\n", __func__, __LINE__, ##__VA_ARGS__); \
87 fflush(stdout); \
88 } while (0)
89
90 #else
91 #define GFXS_LOG(fmt, ...)
92 #endif
93
94 #define POST_CALLBACK_DISPLAY_TYPE_X 0
95 #define POST_CALLBACK_DISPLAY_TYPE_WAYLAND_SHARED_MEM 1
96 #define POST_CALLBACK_DISPLAY_TYPE_WINDOWS_HWND 2
97
98 // Virtio Goldfish Pipe: Overview-----------------------------------------------
99 //
100 // Virtio Goldfish Pipe is meant for running goldfish pipe services with a
101 // stock Linux kernel that is already capable of virtio-gpu. It runs DRM
102 // VIRTGPU ioctls on top of a custom implementation of virglrenderer on the
103 // host side that doesn't (directly) do any rendering, but instead talks to
104 // host-side pipe services.
105 //
106 // This is mainly used for graphics at the moment, though it's possible to run
107 // other pipe services over virtio-gpu as well. virtio-gpu is selected over
108 // other devices primarily because of the existence of an API (virglrenderer)
109 // that is already somewhat separate from virtio-gpu, and not needing to create
110 // a new virtio device to handle goldfish pipe.
111 //
112 // How it works is, existing virglrenderer API are remapped to perform pipe
113 // operations. First of all, pipe operations consist of the following:
114 //
115 // - open() / close(): Starts or stops an instance of a pipe service.
116 //
117 // - write(const void* buf, size_t len) / read(const void* buf, size_t len):
118 // Sends or receives data over the pipe. The first write() is the name of the
119 // pipe service. After the pipe service is determined, the host calls
120 // resetPipe() to replace the host-side pipe instance with an instance of the
121 // pipe service.
122 //
123 // - reset(void* initialPipe, void* actualPipe): the operation that replaces an
124 // initial pipe with an instance of a pipe service.
125 //
126 // Next, here's how the pipe operations map to virglrenderer commands:
127 //
128 // - open() -> virgl_renderer_context_create(),
129 // virgl_renderer_resource_create(),
130 // virgl_renderer_resource_attach_iov()
131 //
132 // The open() corresponds to a guest-side open of a rendernode, which triggers
133 // context creation. Each pipe corresponds 1:1 with a drm virtgpu context id.
134 // We also associate an R8 resource with each pipe as the backing data for
135 // write/read.
136 //
137 // - close() -> virgl_rendrerer_resource_unref(),
138 // virgl_renderer_context_destroy()
139 //
140 // The close() corresponds to undoing the operations of open().
141 //
142 // - write() -> virgl_renderer_transfer_write_iov() OR
143 // virgl_renderer_submit_cmd()
144 //
145 // Pipe write() operation corresponds to performing a TRANSFER_TO_HOST ioctl on
146 // the resource created alongside open(), OR an EXECBUFFER ioctl.
147 //
148 // - read() -> virgl_renderer_transfer_read_iov()
149 //
150 // Pipe read() operation corresponds to performing a TRANSFER_FROM_HOST ioctl on
151 // the resource created alongside open().
152 //
153 // Details on transfer mechanism: mapping 2D transfer to 1D ones----------------
154 //
155 // Resource objects are typically 2D textures, while we're wanting to transmit
156 // 1D buffers to the pipe services on the host. DRM VIRTGPU uses the concept
157 // of a 'box' to represent transfers that do not involve an entire resource
158 // object. Each box has a x, y, width and height parameter to define the
159 // extent of the transfer for a 2D texture. In our use case, we only use the x
160 // and width parameters. We've also created the resource with R8 format
161 // (byte-by-byte) with width equal to the total size of the transfer buffer we
162 // want (around 1 MB).
163 //
164 // The resource object itself is currently backed via plain guest RAM, which
165 // can be physically not-contiguous from the guest POV, and therefore
166 // corresponds to a possibly-long list of pointers and sizes (iov) on the host
167 // side. The sync_iov helper function converts convert the list of pointers
168 // to one contiguous buffer on the host (or vice versa), at the cost of a copy.
169 // (TODO: see if we can use host coherent memory to do away with the copy).
170 //
171 // We can see this abstraction in use via the implementation of
172 // transferWriteIov and transferReadIov below, which sync the iovec to/from a
173 // linear buffer if necessary, and then perform a corresponding pip operation
174 // based on the box parameter's x and width values.
175
176 using android::AndroidPipe;
177 using android::base::AutoLock;
178 using android::base::DescriptorType;
179 using android::base::Lock;
180 using android::base::ManagedDescriptor;
181 using android::base::MetricsLogger;
182 using android::base::SharedMemory;
183
184 using android::emulation::HostmemIdMapping;
185 using android::emulation::ManagedDescriptorInfo;
186 using emugl::ABORT_REASON_OTHER;
187 using emugl::FatalError;
188
189 using VirtioGpuResId = uint32_t;
190
191 static constexpr int kPipeTryAgain = -2;
192
193 struct VirtioGpuCmd {
194 uint32_t op;
195 uint32_t cmdSize;
196 unsigned char buf[0];
197 } __attribute__((packed));
198
199 struct PipeCtxEntry {
200 std::string name;
201 uint32_t capsetId;
202 VirtioGpuCtxId ctxId;
203 GoldfishHostPipe* hostPipe;
204 int fence;
205 uint32_t addressSpaceHandle;
206 bool hasAddressSpaceHandle;
207 std::unordered_map<VirtioGpuResId, uint32_t> addressSpaceHandles;
208 };
209
210 enum class ResType {
211 // Used as a communication channel between the guest and the host
212 // which does not need an allocation on the host GPU.
213 PIPE,
214 // Used as a GPU data buffer.
215 BUFFER,
216 // Used as a GPU texture.
217 COLOR_BUFFER,
218 };
219
220 struct PipeResEntry {
221 virgl_renderer_resource_create_args args;
222 iovec* iov;
223 uint32_t numIovs;
224 void* linear;
225 size_t linearSize;
226 GoldfishHostPipe* hostPipe;
227 VirtioGpuCtxId ctxId;
228 void* hva;
229 uint64_t hvaSize;
230 uint64_t blobId;
231 uint32_t blobMem;
232 uint32_t blobFlags;
233 uint32_t caching;
234 ResType type;
235 std::shared_ptr<SharedMemory> ringBlob = nullptr;
236 bool externalAddr = false;
237 std::shared_ptr<ManagedDescriptorInfo> descriptorInfo = nullptr;
238 };
239
align_up(uint32_t n,uint32_t a)240 static inline uint32_t align_up(uint32_t n, uint32_t a) { return ((n + a - 1) / a) * a; }
241
align_up_power_of_2(uint32_t n,uint32_t a)242 static inline uint32_t align_up_power_of_2(uint32_t n, uint32_t a) {
243 return (n + (a - 1)) & ~(a - 1);
244 }
245
246 #define VIRGL_FORMAT_NV12 166
247 #define VIRGL_FORMAT_YV12 163
248 #define VIRGL_FORMAT_P010 314
249
250 const uint32_t kGlBgra = 0x80e1;
251 const uint32_t kGlRgba = 0x1908;
252 const uint32_t kGlRgba16f = 0x881A;
253 const uint32_t kGlRgb565 = 0x8d62;
254 const uint32_t kGlRgba1010102 = 0x8059;
255 const uint32_t kGlR8 = 0x8229;
256 const uint32_t kGlR16 = 0x822A;
257 const uint32_t kGlRg8 = 0x822b;
258 const uint32_t kGlLuminance = 0x1909;
259 const uint32_t kGlLuminanceAlpha = 0x190a;
260 const uint32_t kGlUnsignedByte = 0x1401;
261 const uint32_t kGlUnsignedShort565 = 0x8363;
262
263 constexpr uint32_t kFwkFormatGlCompat = 0;
264 constexpr uint32_t kFwkFormatYV12 = 1;
265 // constexpr uint32_t kFwkFormatYUV420888 = 2;
266 constexpr uint32_t kFwkFormatNV12 = 3;
267 constexpr uint32_t kFwkFormatP010 = 4;
268
virgl_format_is_yuv(uint32_t format)269 static inline bool virgl_format_is_yuv(uint32_t format) {
270 switch (format) {
271 case VIRGL_FORMAT_B8G8R8X8_UNORM:
272 case VIRGL_FORMAT_B8G8R8A8_UNORM:
273 case VIRGL_FORMAT_R8G8B8X8_UNORM:
274 case VIRGL_FORMAT_R8G8B8A8_UNORM:
275 case VIRGL_FORMAT_B5G6R5_UNORM:
276 case VIRGL_FORMAT_R8_UNORM:
277 case VIRGL_FORMAT_R16_UNORM:
278 case VIRGL_FORMAT_R16G16B16A16_FLOAT:
279 case VIRGL_FORMAT_R8G8_UNORM:
280 case VIRGL_FORMAT_R10G10B10A2_UNORM:
281 return false;
282 case VIRGL_FORMAT_NV12:
283 case VIRGL_FORMAT_P010:
284 case VIRGL_FORMAT_YV12:
285 return true;
286 default:
287 VGP_FATAL() << "Unknown virgl format 0x" << std::hex << format;
288 return false;
289 }
290 }
291
virgl_format_to_gl(uint32_t virgl_format)292 static inline uint32_t virgl_format_to_gl(uint32_t virgl_format) {
293 switch (virgl_format) {
294 case VIRGL_FORMAT_B8G8R8X8_UNORM:
295 case VIRGL_FORMAT_B8G8R8A8_UNORM:
296 return kGlBgra;
297 case VIRGL_FORMAT_R8G8B8X8_UNORM:
298 case VIRGL_FORMAT_R8G8B8A8_UNORM:
299 return kGlRgba;
300 case VIRGL_FORMAT_B5G6R5_UNORM:
301 return kGlRgb565;
302 case VIRGL_FORMAT_R16_UNORM:
303 return kGlR16;
304 case VIRGL_FORMAT_R16G16B16A16_FLOAT:
305 return kGlRgba16f;
306 case VIRGL_FORMAT_R8_UNORM:
307 return kGlR8;
308 case VIRGL_FORMAT_R8G8_UNORM:
309 return kGlRg8;
310 case VIRGL_FORMAT_NV12:
311 case VIRGL_FORMAT_P010:
312 case VIRGL_FORMAT_YV12:
313 // emulated as RGBA8888
314 return kGlRgba;
315 case VIRGL_FORMAT_R10G10B10A2_UNORM:
316 return kGlRgba1010102;
317 default:
318 return kGlRgba;
319 }
320 }
321
virgl_format_to_fwk_format(uint32_t virgl_format)322 static inline uint32_t virgl_format_to_fwk_format(uint32_t virgl_format) {
323 switch (virgl_format) {
324 case VIRGL_FORMAT_NV12:
325 return kFwkFormatNV12;
326 case VIRGL_FORMAT_P010:
327 return kFwkFormatP010;
328 case VIRGL_FORMAT_YV12:
329 return kFwkFormatYV12;
330 case VIRGL_FORMAT_R8_UNORM:
331 case VIRGL_FORMAT_R16_UNORM:
332 case VIRGL_FORMAT_R16G16B16A16_FLOAT:
333 case VIRGL_FORMAT_R8G8_UNORM:
334 case VIRGL_FORMAT_B8G8R8X8_UNORM:
335 case VIRGL_FORMAT_B8G8R8A8_UNORM:
336 case VIRGL_FORMAT_R8G8B8X8_UNORM:
337 case VIRGL_FORMAT_R8G8B8A8_UNORM:
338 case VIRGL_FORMAT_B5G6R5_UNORM:
339 case VIRGL_FORMAT_R10G10B10A2_UNORM:
340 default: // kFwkFormatGlCompat: No extra conversions needed
341 return kFwkFormatGlCompat;
342 }
343 }
344
gl_format_to_natural_type(uint32_t format)345 static inline uint32_t gl_format_to_natural_type(uint32_t format) {
346 switch (format) {
347 case kGlBgra:
348 case kGlRgba:
349 case kGlLuminance:
350 case kGlLuminanceAlpha:
351 return kGlUnsignedByte;
352 case kGlRgb565:
353 return kGlUnsignedShort565;
354 default:
355 return kGlUnsignedByte;
356 }
357 }
358
virgl_format_to_linear_base(uint32_t format,uint32_t totalWidth,uint32_t totalHeight,uint32_t x,uint32_t y,uint32_t w,uint32_t h)359 static inline size_t virgl_format_to_linear_base(uint32_t format, uint32_t totalWidth,
360 uint32_t totalHeight, uint32_t x, uint32_t y,
361 uint32_t w, uint32_t h) {
362 if (virgl_format_is_yuv(format)) {
363 return 0;
364 } else {
365 uint32_t bpp = 4;
366 switch (format) {
367 case VIRGL_FORMAT_R16G16B16A16_FLOAT:
368 bpp = 8;
369 break;
370 case VIRGL_FORMAT_B8G8R8X8_UNORM:
371 case VIRGL_FORMAT_B8G8R8A8_UNORM:
372 case VIRGL_FORMAT_R8G8B8X8_UNORM:
373 case VIRGL_FORMAT_R8G8B8A8_UNORM:
374 case VIRGL_FORMAT_R10G10B10A2_UNORM:
375 bpp = 4;
376 break;
377 case VIRGL_FORMAT_B5G6R5_UNORM:
378 case VIRGL_FORMAT_R8G8_UNORM:
379 case VIRGL_FORMAT_R16_UNORM:
380 bpp = 2;
381 break;
382 case VIRGL_FORMAT_R8_UNORM:
383 bpp = 1;
384 break;
385 default:
386 VGP_FATAL() << "Unknown format: 0x" << std::hex << format;
387 }
388
389 uint32_t stride = totalWidth * bpp;
390 return y * stride + x * bpp;
391 }
392 return 0;
393 }
394
virgl_format_to_total_xfer_len(uint32_t format,uint32_t totalWidth,uint32_t totalHeight,uint32_t x,uint32_t y,uint32_t w,uint32_t h)395 static inline size_t virgl_format_to_total_xfer_len(uint32_t format, uint32_t totalWidth,
396 uint32_t totalHeight, uint32_t x, uint32_t y,
397 uint32_t w, uint32_t h) {
398 if (virgl_format_is_yuv(format)) {
399 uint32_t bpp = format == VIRGL_FORMAT_P010 ? 2 : 1;
400
401 uint32_t yWidth = totalWidth;
402 uint32_t yHeight = totalHeight;
403 uint32_t yStridePixels;
404 if (format == VIRGL_FORMAT_NV12) {
405 yStridePixels = yWidth;
406 } else if (format == VIRGL_FORMAT_P010) {
407 yStridePixels = yWidth;
408 } else if (format == VIRGL_FORMAT_YV12) {
409 yStridePixels = align_up_power_of_2(yWidth, 32);
410 } else {
411 VGP_FATAL() << "Unknown yuv virgl format: 0x" << std::hex << format;
412 }
413 uint32_t yStrideBytes = yStridePixels * bpp;
414 uint32_t ySize = yStrideBytes * yHeight;
415
416 uint32_t uvStridePixels;
417 uint32_t uvPlaneCount;
418 if (format == VIRGL_FORMAT_NV12) {
419 uvStridePixels = yStridePixels;
420 uvPlaneCount = 1;
421 } else if (format == VIRGL_FORMAT_P010) {
422 uvStridePixels = yStridePixels;
423 uvPlaneCount = 1;
424 } else if (format == VIRGL_FORMAT_YV12) {
425 uvStridePixels = yStridePixels / 2;
426 uvPlaneCount = 2;
427 } else {
428 VGP_FATAL() << "Unknown yuv virgl format: 0x" << std::hex << format;
429 }
430 uint32_t uvStrideBytes = uvStridePixels * bpp;
431 uint32_t uvHeight = totalHeight / 2;
432 uint32_t uvSize = uvStrideBytes * uvHeight * uvPlaneCount;
433
434 uint32_t dataSize = ySize + uvSize;
435 return dataSize;
436 } else {
437 uint32_t bpp = 4;
438 switch (format) {
439 case VIRGL_FORMAT_R16G16B16A16_FLOAT:
440 bpp = 8;
441 break;
442 case VIRGL_FORMAT_B8G8R8X8_UNORM:
443 case VIRGL_FORMAT_B8G8R8A8_UNORM:
444 case VIRGL_FORMAT_R8G8B8X8_UNORM:
445 case VIRGL_FORMAT_R8G8B8A8_UNORM:
446 case VIRGL_FORMAT_R10G10B10A2_UNORM:
447 bpp = 4;
448 break;
449 case VIRGL_FORMAT_B5G6R5_UNORM:
450 case VIRGL_FORMAT_R16_UNORM:
451 case VIRGL_FORMAT_R8G8_UNORM:
452 bpp = 2;
453 break;
454 case VIRGL_FORMAT_R8_UNORM:
455 bpp = 1;
456 break;
457 default:
458 VGP_FATAL() << "Unknown format: 0x" << std::hex << format;
459 }
460
461 uint32_t stride = totalWidth * bpp;
462 return (h - 1U) * stride + w * bpp;
463 }
464 return 0;
465 }
466
467 enum IovSyncDir {
468 IOV_TO_LINEAR = 0,
469 LINEAR_TO_IOV = 1,
470 };
471
sync_iov(PipeResEntry * res,uint64_t offset,const virgl_box * box,IovSyncDir dir)472 static int sync_iov(PipeResEntry* res, uint64_t offset, const virgl_box* box, IovSyncDir dir) {
473 VGPLOG("offset: 0x%llx box: %u %u %u %u size %u x %u iovs %u linearSize %zu",
474 (unsigned long long)offset, box->x, box->y, box->w, box->h, res->args.width,
475 res->args.height, res->numIovs, res->linearSize);
476
477 if (box->x > res->args.width || box->y > res->args.height) {
478 VGP_FATAL() << "Box out of range of resource";
479 }
480 if (box->w == 0U || box->h == 0U) {
481 VGP_FATAL() << "Empty transfer";
482 }
483 if (box->x + box->w > res->args.width) {
484 VGP_FATAL() << "Box overflows resource width";
485 }
486
487 size_t linearBase = virgl_format_to_linear_base(
488 res->args.format, res->args.width, res->args.height, box->x, box->y, box->w, box->h);
489 size_t start = linearBase;
490 // height - 1 in order to treat the (w * bpp) row specially
491 // (i.e., the last row does not occupy the full stride)
492 size_t length = virgl_format_to_total_xfer_len(
493 res->args.format, res->args.width, res->args.height, box->x, box->y, box->w, box->h);
494 size_t end = start + length;
495
496 if (end > res->linearSize) {
497 VGP_FATAL() << "start + length overflows! linearSize " << res->linearSize << " start "
498 << start << " length " << length << " (wanted " << start + length << ")";
499 }
500
501 uint32_t iovIndex = 0;
502 size_t iovOffset = 0;
503 size_t written = 0;
504 char* linear = static_cast<char*>(res->linear);
505
506 while (written < length) {
507 if (iovIndex >= res->numIovs) {
508 VGP_FATAL() << "write request overflowed numIovs";
509 }
510
511 const char* iovBase_const = static_cast<const char*>(res->iov[iovIndex].iov_base);
512 char* iovBase = static_cast<char*>(res->iov[iovIndex].iov_base);
513 size_t iovLen = res->iov[iovIndex].iov_len;
514 size_t iovOffsetEnd = iovOffset + iovLen;
515
516 auto lower_intersect = std::max(iovOffset, start);
517 auto upper_intersect = std::min(iovOffsetEnd, end);
518 if (lower_intersect < upper_intersect) {
519 size_t toWrite = upper_intersect - lower_intersect;
520 switch (dir) {
521 case IOV_TO_LINEAR:
522 memcpy(linear + lower_intersect, iovBase_const + lower_intersect - iovOffset,
523 toWrite);
524 break;
525 case LINEAR_TO_IOV:
526 memcpy(iovBase + lower_intersect - iovOffset, linear + lower_intersect,
527 toWrite);
528 break;
529 default:
530 VGP_FATAL() << "Invalid sync dir " << dir;
531 }
532 written += toWrite;
533 }
534 ++iovIndex;
535 iovOffset += iovLen;
536 }
537
538 return 0;
539 }
540
convert32to64(uint32_t lo,uint32_t hi)541 static uint64_t convert32to64(uint32_t lo, uint32_t hi) {
542 return ((uint64_t)lo) | (((uint64_t)hi) << 32);
543 }
544
545 class PipeVirglRenderer {
546 public:
547 PipeVirglRenderer() = default;
548
init(void * cookie,int flags,const struct virgl_renderer_callbacks * callbacks)549 int init(void* cookie, int flags, const struct virgl_renderer_callbacks* callbacks) {
550 VGPLOG("cookie: %p", cookie);
551 mCookie = cookie;
552 mVirglRendererCallbacks = *callbacks;
553 mVirtioGpuOps = android_getVirtioGpuOps();
554 if (!mVirtioGpuOps) {
555 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER)) << "Could not get virtio gpu ops!";
556 }
557 mReadPixelsFunc = android_getReadPixelsFunc();
558 if (!mReadPixelsFunc) {
559 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER)) << "Could not get read pixels func!";
560 }
561 mAddressSpaceDeviceControlOps = get_address_space_device_control_ops();
562 if (!mAddressSpaceDeviceControlOps) {
563 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER))
564 << "Could not get address space device control ops!";
565 }
566 mVirtioGpuTimelines =
567 VirtioGpuTimelines::create(flags & GFXSTREAM_RENDERER_FLAGS_ASYNC_FENCE_CB);
568 VGPLOG("done");
569 return 0;
570 }
571
resetPipe(GoldfishHwPipe * hwPipe,GoldfishHostPipe * hostPipe)572 void resetPipe(GoldfishHwPipe* hwPipe, GoldfishHostPipe* hostPipe) {
573 VGPLOG("Want to reset hwpipe %p to hostpipe %p", hwPipe, hostPipe);
574 VirtioGpuCtxId asCtxId = (VirtioGpuCtxId)(uintptr_t)hwPipe;
575 auto it = mContexts.find(asCtxId);
576 if (it == mContexts.end()) {
577 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER))
578 << "fatal: pipe id " << asCtxId << " not found";
579 }
580
581 auto& entry = it->second;
582 VGPLOG("ctxid: %u prev hostpipe: %p", asCtxId, entry.hostPipe);
583 entry.hostPipe = hostPipe;
584 VGPLOG("ctxid: %u next hostpipe: %p", asCtxId, entry.hostPipe);
585
586 // Also update any resources associated with it
587 auto resourcesIt = mContextResources.find(asCtxId);
588
589 if (resourcesIt == mContextResources.end()) return;
590
591 const auto& resIds = resourcesIt->second;
592
593 for (auto resId : resIds) {
594 auto resEntryIt = mResources.find(resId);
595 if (resEntryIt == mResources.end()) {
596 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER))
597 << "res id " << resId << " entry not found";
598 }
599
600 auto& resEntry = resEntryIt->second;
601 resEntry.hostPipe = hostPipe;
602 }
603 }
604
createContext(VirtioGpuCtxId ctx_id,uint32_t nlen,const char * name,uint32_t context_init)605 int createContext(VirtioGpuCtxId ctx_id, uint32_t nlen, const char* name,
606 uint32_t context_init) {
607
608 std::string contextName(name, nlen);
609
610 VGPLOG("ctxid: %u len: %u name: %s", ctx_id, nlen, contextName.c_str());
611 auto ops = ensureAndGetServiceOps();
612 auto hostPipe = ops->guest_open_with_flags(reinterpret_cast<GoldfishHwPipe*>(ctx_id),
613 0x1 /* is virtio */);
614
615 if (!hostPipe) {
616 fprintf(stderr, "%s: failed to create hw pipe!\n", __func__);
617 return -1;
618 }
619 std::unordered_map<uint32_t, uint32_t> map;
620
621 PipeCtxEntry res = {
622 std::move(contextName), // contextName
623 context_init, // capsetId
624 ctx_id, // ctxId
625 hostPipe, // hostPipe
626 0, // fence
627 0, // AS handle
628 false, // does not have an AS handle
629 map, // resourceId --> ASG handle map
630 };
631
632 VGPLOG("initial host pipe for ctxid %u: %p", ctx_id, hostPipe);
633 mContexts[ctx_id] = res;
634 return 0;
635 }
636
destroyContext(VirtioGpuCtxId handle)637 int destroyContext(VirtioGpuCtxId handle) {
638 VGPLOG("ctxid: %u", handle);
639
640 auto it = mContexts.find(handle);
641 if (it == mContexts.end()) {
642 fprintf(stderr, "%s: could not find context handle %u\n", __func__, handle);
643 return -1;
644 }
645
646 if (it->second.hasAddressSpaceHandle) {
647 for (auto const& [resourceId, handle] : it->second.addressSpaceHandles) {
648 mAddressSpaceDeviceControlOps->destroy_handle(handle);
649 }
650 }
651
652 auto ops = ensureAndGetServiceOps();
653 auto hostPipe = it->second.hostPipe;
654
655 if (!hostPipe) {
656 fprintf(stderr, "%s: 0 is not a valid hostpipe\n", __func__);
657 return -1;
658 }
659
660 ops->guest_close(hostPipe, GOLDFISH_PIPE_CLOSE_GRACEFUL);
661
662 mContexts.erase(it);
663 return 0;
664 }
665
setContextAddressSpaceHandleLocked(VirtioGpuCtxId ctxId,uint32_t handle,uint32_t resourceId)666 void setContextAddressSpaceHandleLocked(VirtioGpuCtxId ctxId, uint32_t handle,
667 uint32_t resourceId) {
668 auto ctxIt = mContexts.find(ctxId);
669 if (ctxIt == mContexts.end()) {
670 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER)) << "ctx id " << ctxId << " not found";
671 }
672
673 auto& ctxEntry = ctxIt->second;
674 ctxEntry.addressSpaceHandle = handle;
675 ctxEntry.hasAddressSpaceHandle = true;
676 ctxEntry.addressSpaceHandles[resourceId] = handle;
677 }
678
getAddressSpaceHandleLocked(VirtioGpuCtxId ctxId,uint32_t resourceId)679 uint32_t getAddressSpaceHandleLocked(VirtioGpuCtxId ctxId, uint32_t resourceId) {
680 auto ctxIt = mContexts.find(ctxId);
681 if (ctxIt == mContexts.end()) {
682 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER)) << "ctx id " << ctxId << " not found ";
683 }
684
685 auto& ctxEntry = ctxIt->second;
686
687 if (!ctxEntry.addressSpaceHandles.count(resourceId)) {
688 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER))
689 << "ASG context with resource id " << resourceId << " not found ";
690 }
691
692 return ctxEntry.addressSpaceHandles[resourceId];
693 }
694
695 #define DECODE(variable, type, input) \
696 type variable = {}; \
697 memcpy(&variable, input, sizeof(type));
698
addressSpaceProcessCmd(VirtioGpuCtxId ctxId,uint32_t * dwords,int dwordCount)699 void addressSpaceProcessCmd(VirtioGpuCtxId ctxId, uint32_t* dwords, int dwordCount) {
700 DECODE(header, gfxstream::gfxstreamHeader, dwords)
701
702 switch (header.opCode) {
703 case GFXSTREAM_CONTEXT_CREATE: {
704 DECODE(contextCreate, gfxstream::gfxstreamContextCreate, dwords)
705
706 auto resEntryIt = mResources.find(contextCreate.resourceId);
707 if (resEntryIt == mResources.end()) {
708 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER))
709 << " ASG coherent resource " << contextCreate.resourceId << " not found";
710 }
711
712 auto ctxIt = mContexts.find(ctxId);
713 if (ctxIt == mContexts.end()) {
714 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER))
715 << "ctx id " << ctxId << " not found ";
716 }
717
718 auto& ctxEntry = ctxIt->second;
719 auto& resEntry = resEntryIt->second;
720
721 std::string name = ctxEntry.name + "-" + std::to_string(contextCreate.resourceId);
722 uint32_t handle = mAddressSpaceDeviceControlOps->gen_handle();
723
724 struct AddressSpaceCreateInfo createInfo = {
725 .handle = handle,
726 .type = android::emulation::VirtioGpuGraphics,
727 .createRenderThread = true,
728 .externalAddr = resEntry.hva,
729 .externalAddrSize = resEntry.hvaSize,
730 .virtioGpuContextId = ctxId,
731 .virtioGpuCapsetId = ctxEntry.capsetId,
732 .contextName = name.c_str(),
733 .contextNameSize = static_cast<uint32_t>(ctxEntry.name.size()),
734 };
735
736 mAddressSpaceDeviceControlOps->create_instance(createInfo);
737 setContextAddressSpaceHandleLocked(ctxId, handle, contextCreate.resourceId);
738 break;
739 }
740 case GFXSTREAM_CONTEXT_PING: {
741 DECODE(contextPing, gfxstream::gfxstreamContextPing, dwords)
742
743 struct android::emulation::AddressSpaceDevicePingInfo ping = {0};
744 ping.metadata = ASG_NOTIFY_AVAILABLE;
745
746 mAddressSpaceDeviceControlOps->ping_at_hva(
747 getAddressSpaceHandleLocked(ctxId, contextPing.resourceId), &ping);
748 break;
749 }
750 default:
751 break;
752 }
753 }
754
submitCmd(VirtioGpuCtxId ctxId,void * buffer,int dwordCount)755 int submitCmd(VirtioGpuCtxId ctxId, void* buffer, int dwordCount) {
756 // TODO(kaiyili): embed the ring_idx into the command buffer to make it possible to dispatch
757 // commands on different ring.
758 VirtioGpuRing ring = VirtioGpuRingGlobal{};
759 VGPLOG("ctx: %" PRIu32 ", ring: %s buffer: %p dwords: %d", ctxId, to_string(ring).c_str(),
760 buffer, dwordCount);
761
762 if (!buffer) {
763 fprintf(stderr, "%s: error: buffer null\n", __func__);
764 return -1;
765 }
766
767 if (dwordCount < 1) {
768 fprintf(stderr, "%s: error: not enough dwords (got %d)\n", __func__, dwordCount);
769 return -1;
770 }
771
772 DECODE(header, gfxstream::gfxstreamHeader, buffer);
773 switch (header.opCode) {
774 case GFXSTREAM_CONTEXT_CREATE:
775 case GFXSTREAM_CONTEXT_PING:
776 case GFXSTREAM_CONTEXT_PING_WITH_RESPONSE:
777 addressSpaceProcessCmd(ctxId, (uint32_t*)buffer, dwordCount);
778 break;
779 case GFXSTREAM_CREATE_EXPORT_SYNC: {
780 DECODE(exportSync, gfxstream::gfxstreamCreateExportSync, buffer)
781
782 uint64_t sync_handle =
783 convert32to64(exportSync.syncHandleLo, exportSync.syncHandleHi);
784
785 VGPLOG("wait for gpu ring %s", to_string(ring));
786 auto taskId = mVirtioGpuTimelines->enqueueTask(ring);
787 mVirtioGpuOps->async_wait_for_gpu_with_cb(sync_handle, [this, taskId] {
788 mVirtioGpuTimelines->notifyTaskCompletion(taskId);
789 });
790 break;
791 }
792 case GFXSTREAM_CREATE_EXPORT_SYNC_VK:
793 case GFXSTREAM_CREATE_IMPORT_SYNC_VK: {
794 DECODE(exportSyncVK, gfxstream::gfxstreamCreateExportSyncVK, buffer)
795
796 uint64_t device_handle =
797 convert32to64(exportSyncVK.deviceHandleLo, exportSyncVK.deviceHandleHi);
798
799 uint64_t fence_handle =
800 convert32to64(exportSyncVK.fenceHandleLo, exportSyncVK.fenceHandleHi);
801
802 VGPLOG("wait for gpu ring %s", to_string(ring));
803 auto taskId = mVirtioGpuTimelines->enqueueTask(ring);
804 mVirtioGpuOps->async_wait_for_gpu_vulkan_with_cb(
805 device_handle, fence_handle,
806 [this, taskId] { mVirtioGpuTimelines->notifyTaskCompletion(taskId); });
807 break;
808 }
809 case GFXSTREAM_CREATE_QSRI_EXPORT_VK: {
810 // The guest QSRI export assumes fence context support and always uses
811 // VIRTGPU_EXECBUF_RING_IDX. With this, the task created here must use
812 // the same ring as the fence created for the virtio gpu command or the
813 // fence may be signaled without properly waiting for the task to complete.
814 ring = VirtioGpuRingContextSpecific{
815 .mCtxId = ctxId,
816 .mRingIdx = 0,
817 };
818
819 DECODE(exportQSRI, gfxstream::gfxstreamCreateQSRIExportVK, buffer)
820
821 uint64_t image_handle =
822 convert32to64(exportQSRI.imageHandleLo, exportQSRI.imageHandleHi);
823
824 VGPLOG("wait for gpu vk qsri ring %u image 0x%llx", to_string(ring).c_str(),
825 (unsigned long long)image_handle);
826 auto taskId = mVirtioGpuTimelines->enqueueTask(ring);
827 mVirtioGpuOps->async_wait_for_gpu_vulkan_qsri_with_cb(image_handle, [this, taskId] {
828 mVirtioGpuTimelines->notifyTaskCompletion(taskId);
829 });
830 break;
831 }
832 case GFXSTREAM_PLACEHOLDER_COMMAND_VK: {
833 // Do nothing, this is a placeholder command
834 break;
835 }
836 default:
837 return -1;
838 }
839
840 return 0;
841 }
842
createFence(uint64_t fence_id,const VirtioGpuRing & ring)843 int createFence(uint64_t fence_id, const VirtioGpuRing& ring) {
844 VGPLOG("fenceid: %llu ring: %s", (unsigned long long)fence_id, to_string(ring).c_str());
845
846 struct {
847 FenceCompletionCallback operator()(const VirtioGpuRingGlobal&) {
848 return [renderer = mRenderer, fenceId = mFenceId] {
849 renderer->mVirglRendererCallbacks.write_fence(renderer->mCookie, fenceId);
850 };
851 }
852 FenceCompletionCallback operator()(const VirtioGpuRingContextSpecific& ring) {
853 #ifdef VIRGL_RENDERER_UNSTABLE_APIS
854 return [renderer = mRenderer, fenceId = mFenceId, ring] {
855 renderer->mVirglRendererCallbacks.write_context_fence(
856 renderer->mCookie, fenceId, ring.mCtxId, ring.mRingIdx);
857 };
858 #else
859 VGPLOG("enable unstable apis for the context specific fence feature");
860 return {};
861 #endif
862 }
863
864 PipeVirglRenderer* mRenderer;
865 VirtioGpuTimelines::FenceId mFenceId;
866 } visitor{
867 .mRenderer = this,
868 .mFenceId = fence_id,
869 };
870 FenceCompletionCallback callback = std::visit(visitor, ring);
871 if (!callback) {
872 // A context specific ring passed in, but the project is compiled without
873 // VIRGL_RENDERER_UNSTABLE_APIS defined.
874 return -EINVAL;
875 }
876 mVirtioGpuTimelines->enqueueFence(ring, fence_id, callback);
877
878 return 0;
879 }
880
poll()881 void poll() { mVirtioGpuTimelines->poll(); }
882
883 enum pipe_texture_target {
884 PIPE_BUFFER,
885 PIPE_TEXTURE_1D,
886 PIPE_TEXTURE_2D,
887 PIPE_TEXTURE_3D,
888 PIPE_TEXTURE_CUBE,
889 PIPE_TEXTURE_RECT,
890 PIPE_TEXTURE_1D_ARRAY,
891 PIPE_TEXTURE_2D_ARRAY,
892 PIPE_TEXTURE_CUBE_ARRAY,
893 PIPE_MAX_TEXTURE_TYPES,
894 };
895
896 /**
897 * * Resource binding flags -- state tracker must specify in advance all
898 * * the ways a resource might be used.
899 * */
900 #define PIPE_BIND_DEPTH_STENCIL (1 << 0) /* create_surface */
901 #define PIPE_BIND_RENDER_TARGET (1 << 1) /* create_surface */
902 #define PIPE_BIND_BLENDABLE (1 << 2) /* create_surface */
903 #define PIPE_BIND_SAMPLER_VIEW (1 << 3) /* create_sampler_view */
904 #define PIPE_BIND_VERTEX_BUFFER (1 << 4) /* set_vertex_buffers */
905 #define PIPE_BIND_INDEX_BUFFER (1 << 5) /* draw_elements */
906 #define PIPE_BIND_CONSTANT_BUFFER (1 << 6) /* set_constant_buffer */
907 #define PIPE_BIND_DISPLAY_TARGET (1 << 7) /* flush_front_buffer */
908 /* gap */
909 #define PIPE_BIND_STREAM_OUTPUT (1 << 10) /* set_stream_output_buffers */
910 #define PIPE_BIND_CURSOR (1 << 11) /* mouse cursor */
911 #define PIPE_BIND_CUSTOM (1 << 12) /* state-tracker/winsys usages */
912 #define PIPE_BIND_GLOBAL (1 << 13) /* set_global_binding */
913 #define PIPE_BIND_SHADER_BUFFER (1 << 14) /* set_shader_buffers */
914 #define PIPE_BIND_SHADER_IMAGE (1 << 15) /* set_shader_images */
915 #define PIPE_BIND_COMPUTE_RESOURCE (1 << 16) /* set_compute_resources */
916 #define PIPE_BIND_COMMAND_ARGS_BUFFER (1 << 17) /* pipe_draw_info.indirect */
917 #define PIPE_BIND_QUERY_BUFFER (1 << 18) /* get_query_result_resource */
918
getResourceType(const struct virgl_renderer_resource_create_args & args) const919 ResType getResourceType(const struct virgl_renderer_resource_create_args& args) const {
920 if (args.target == PIPE_BUFFER) {
921 return ResType::PIPE;
922 }
923
924 if (args.format != VIRGL_FORMAT_R8_UNORM) {
925 return ResType::COLOR_BUFFER;
926 }
927 if (args.bind & VIRGL_BIND_SAMPLER_VIEW) {
928 return ResType::COLOR_BUFFER;
929 }
930 if (args.bind & VIRGL_BIND_RENDER_TARGET) {
931 return ResType::COLOR_BUFFER;
932 }
933 if (args.bind & VIRGL_BIND_SCANOUT) {
934 return ResType::COLOR_BUFFER;
935 }
936 if (args.bind & VIRGL_BIND_CURSOR) {
937 return ResType::COLOR_BUFFER;
938 }
939 if (!(args.bind & VIRGL_BIND_LINEAR)) {
940 return ResType::COLOR_BUFFER;
941 }
942
943 return ResType::BUFFER;
944 }
945
handleCreateResourceBuffer(struct virgl_renderer_resource_create_args * args)946 void handleCreateResourceBuffer(struct virgl_renderer_resource_create_args* args) {
947 mVirtioGpuOps->create_buffer_with_handle(args->width * args->height, args->handle);
948 }
949
handleCreateResourceColorBuffer(struct virgl_renderer_resource_create_args * args)950 void handleCreateResourceColorBuffer(struct virgl_renderer_resource_create_args* args) {
951 // corresponds to allocation of gralloc buffer in minigbm
952 VGPLOG("w h %u %u resid %u -> rcCreateColorBufferWithHandle", args->width, args->height,
953 args->handle);
954
955 const uint32_t glformat = virgl_format_to_gl(args->format);
956 const uint32_t fwkformat = virgl_format_to_fwk_format(args->format);
957 mVirtioGpuOps->create_color_buffer_with_handle(args->width, args->height, glformat,
958 fwkformat, args->handle);
959 mVirtioGpuOps->set_guest_managed_color_buffer_lifetime(true /* guest manages lifetime */);
960 mVirtioGpuOps->open_color_buffer(args->handle);
961 }
962
createResource(struct virgl_renderer_resource_create_args * args,struct iovec * iov,uint32_t num_iovs)963 int createResource(struct virgl_renderer_resource_create_args* args, struct iovec* iov,
964 uint32_t num_iovs) {
965 VGPLOG("handle: %u. num iovs: %u", args->handle, num_iovs);
966
967 const auto resType = getResourceType(*args);
968 switch (resType) {
969 case ResType::PIPE:
970 break;
971 case ResType::BUFFER:
972 handleCreateResourceBuffer(args);
973 break;
974 case ResType::COLOR_BUFFER:
975 handleCreateResourceColorBuffer(args);
976 break;
977 }
978
979 PipeResEntry e;
980 e.args = *args;
981 e.linear = 0;
982 e.hostPipe = 0;
983 e.hva = nullptr;
984 e.hvaSize = 0;
985 e.blobId = 0;
986 e.blobMem = 0;
987 e.type = resType;
988 allocResource(e, iov, num_iovs);
989
990 mResources[args->handle] = e;
991 return 0;
992 }
993
unrefResource(uint32_t toUnrefId)994 void unrefResource(uint32_t toUnrefId) {
995 VGPLOG("handle: %u", toUnrefId);
996
997 auto it = mResources.find(toUnrefId);
998 if (it == mResources.end()) return;
999
1000 auto contextsIt = mResourceContexts.find(toUnrefId);
1001 if (contextsIt != mResourceContexts.end()) {
1002 mResourceContexts.erase(contextsIt->first);
1003 }
1004
1005 for (auto& ctxIdResources : mContextResources) {
1006 detachResourceLocked(ctxIdResources.first, toUnrefId);
1007 }
1008
1009 auto& entry = it->second;
1010 switch (entry.type) {
1011 case ResType::PIPE:
1012 break;
1013 case ResType::BUFFER:
1014 mVirtioGpuOps->close_buffer(toUnrefId);
1015 break;
1016 case ResType::COLOR_BUFFER:
1017 mVirtioGpuOps->close_color_buffer(toUnrefId);
1018 break;
1019 }
1020
1021 if (entry.linear) {
1022 free(entry.linear);
1023 entry.linear = nullptr;
1024 }
1025
1026 if (entry.iov) {
1027 free(entry.iov);
1028 entry.iov = nullptr;
1029 entry.numIovs = 0;
1030 }
1031
1032 if (entry.externalAddr && !entry.ringBlob) {
1033 android::aligned_buf_free(entry.hva);
1034 }
1035
1036 entry.hva = nullptr;
1037 entry.hvaSize = 0;
1038 entry.blobId = 0;
1039
1040 mResources.erase(it);
1041 }
1042
attachIov(int resId,iovec * iov,int num_iovs)1043 int attachIov(int resId, iovec* iov, int num_iovs) {
1044
1045 VGPLOG("resid: %d numiovs: %d", resId, num_iovs);
1046
1047 auto it = mResources.find(resId);
1048 if (it == mResources.end()) return ENOENT;
1049
1050 auto& entry = it->second;
1051 VGPLOG("res linear: %p", entry.linear);
1052 if (!entry.linear) allocResource(entry, iov, num_iovs);
1053
1054 VGPLOG("done");
1055 return 0;
1056 }
1057
detachIov(int resId,iovec ** iov,int * num_iovs)1058 void detachIov(int resId, iovec** iov, int* num_iovs) {
1059
1060 auto it = mResources.find(resId);
1061 if (it == mResources.end()) return;
1062
1063 auto& entry = it->second;
1064
1065 if (num_iovs) {
1066 *num_iovs = entry.numIovs;
1067 VGPLOG("resid: %d numIovs: %d", resId, *num_iovs);
1068 } else {
1069 VGPLOG("resid: %d numIovs: 0", resId);
1070 }
1071
1072 entry.numIovs = 0;
1073
1074 if (entry.iov) free(entry.iov);
1075 entry.iov = nullptr;
1076
1077 if (iov) {
1078 *iov = entry.iov;
1079 }
1080
1081 allocResource(entry, entry.iov, entry.numIovs);
1082 VGPLOG("done");
1083 }
1084
handleTransferReadPipe(PipeResEntry * res,uint64_t offset,virgl_box * box)1085 int handleTransferReadPipe(PipeResEntry* res, uint64_t offset, virgl_box* box) {
1086 if (res->type != ResType::PIPE) {
1087 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER))
1088 << "Resource " << res->args.handle << " is not a PIPE resource.";
1089 return -1;
1090 }
1091
1092 // Do the pipe service op here, if there is an associated hostpipe.
1093 auto hostPipe = res->hostPipe;
1094 if (!hostPipe) return -1;
1095
1096 auto ops = ensureAndGetServiceOps();
1097
1098 size_t readBytes = 0;
1099 size_t wantedBytes = readBytes + (size_t)box->w;
1100
1101 while (readBytes < wantedBytes) {
1102 GoldfishPipeBuffer buf = {
1103 ((char*)res->linear) + box->x + readBytes,
1104 wantedBytes - readBytes,
1105 };
1106 auto status = ops->guest_recv(hostPipe, &buf, 1);
1107
1108 if (status > 0) {
1109 readBytes += status;
1110 } else if (status != kPipeTryAgain) {
1111 return EIO;
1112 }
1113 }
1114
1115 return 0;
1116 }
1117
handleTransferWritePipe(PipeResEntry * res,uint64_t offset,virgl_box * box)1118 int handleTransferWritePipe(PipeResEntry* res, uint64_t offset, virgl_box* box) {
1119 if (res->type != ResType::PIPE) {
1120 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER))
1121 << "Resource " << res->args.handle << " is not a PIPE resource.";
1122 return -1;
1123 }
1124
1125 // Do the pipe service op here, if there is an associated hostpipe.
1126 auto hostPipe = res->hostPipe;
1127 if (!hostPipe) {
1128 VGPLOG("No hostPipe");
1129 return -1;
1130 }
1131
1132 VGPLOG("resid: %d offset: 0x%llx hostpipe: %p", res->args.handle,
1133 (unsigned long long)offset, hostPipe);
1134
1135 auto ops = ensureAndGetServiceOps();
1136
1137 size_t writtenBytes = 0;
1138 size_t wantedBytes = (size_t)box->w;
1139
1140 while (writtenBytes < wantedBytes) {
1141 GoldfishPipeBuffer buf = {
1142 ((char*)res->linear) + box->x + writtenBytes,
1143 wantedBytes - writtenBytes,
1144 };
1145
1146 // guest_send can now reallocate the pipe.
1147 void* hostPipeBefore = hostPipe;
1148 auto status = ops->guest_send(&hostPipe, &buf, 1);
1149 if (hostPipe != hostPipeBefore) {
1150 resetPipe((GoldfishHwPipe*)(uintptr_t)(res->ctxId), hostPipe);
1151 auto it = mResources.find(res->args.handle);
1152 res = &it->second;
1153 }
1154
1155 if (status > 0) {
1156 writtenBytes += status;
1157 } else if (status != kPipeTryAgain) {
1158 return EIO;
1159 }
1160 }
1161
1162 return 0;
1163 }
1164
handleTransferReadBuffer(PipeResEntry * res,uint64_t offset,virgl_box * box)1165 int handleTransferReadBuffer(PipeResEntry* res, uint64_t offset, virgl_box* box) {
1166 if (res->type != ResType::BUFFER) {
1167 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER))
1168 << "Resource " << res->args.handle << " is not a BUFFER resource.";
1169 return -1;
1170 }
1171
1172 mVirtioGpuOps->read_buffer(res->args.handle, 0, res->args.width * res->args.height,
1173 res->linear);
1174 return 0;
1175 }
1176
handleTransferWriteBuffer(PipeResEntry * res,uint64_t offset,virgl_box * box)1177 int handleTransferWriteBuffer(PipeResEntry* res, uint64_t offset, virgl_box* box) {
1178 if (res->type != ResType::BUFFER) {
1179 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER))
1180 << res->args.handle << " is not a BUFFER resource.";
1181 return -1;
1182 }
1183
1184 mVirtioGpuOps->update_buffer(res->args.handle, 0, res->args.width * res->args.height,
1185 res->linear);
1186 return 0;
1187 }
1188
handleTransferReadColorBuffer(PipeResEntry * res,uint64_t offset,virgl_box * box)1189 void handleTransferReadColorBuffer(PipeResEntry* res, uint64_t offset, virgl_box* box) {
1190 if (res->type != ResType::COLOR_BUFFER) {
1191 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER))
1192 << "Resource " << res->args.handle << " is not a COLOR_BUFFER resource.";
1193 return;
1194 }
1195
1196 auto glformat = virgl_format_to_gl(res->args.format);
1197 auto gltype = gl_format_to_natural_type(glformat);
1198
1199 // We always xfer the whole thing again from GL
1200 // since it's fiddly to calc / copy-out subregions
1201 if (virgl_format_is_yuv(res->args.format)) {
1202 mVirtioGpuOps->read_color_buffer_yuv(res->args.handle, 0, 0, res->args.width,
1203 res->args.height, res->linear, res->linearSize);
1204 } else {
1205 mVirtioGpuOps->read_color_buffer(res->args.handle, 0, 0, res->args.width,
1206 res->args.height, glformat, gltype, res->linear);
1207 }
1208 }
1209
handleTransferWriteColorBuffer(PipeResEntry * res,uint64_t offset,virgl_box * box)1210 void handleTransferWriteColorBuffer(PipeResEntry* res, uint64_t offset, virgl_box* box) {
1211 if (res->type != ResType::COLOR_BUFFER) {
1212 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER))
1213 << "Resource " << res->args.handle << " is not a COLOR_BUFFER resource.";
1214 return;
1215 }
1216
1217 auto glformat = virgl_format_to_gl(res->args.format);
1218 auto gltype = gl_format_to_natural_type(glformat);
1219
1220 // We always xfer the whole thing again to GL
1221 // since it's fiddly to calc / copy-out subregions
1222 mVirtioGpuOps->update_color_buffer(res->args.handle, 0, 0, res->args.width,
1223 res->args.height, glformat, gltype, res->linear);
1224 }
1225
transferReadIov(int resId,uint64_t offset,virgl_box * box,struct iovec * iov,int iovec_cnt)1226 int transferReadIov(int resId, uint64_t offset, virgl_box* box, struct iovec* iov,
1227 int iovec_cnt) {
1228
1229 VGPLOG("resid: %d offset: 0x%llx. box: %u %u %u %u", resId, (unsigned long long)offset,
1230 box->x, box->y, box->w, box->h);
1231
1232 auto it = mResources.find(resId);
1233 if (it == mResources.end()) return EINVAL;
1234
1235 int ret = 0;
1236
1237 auto& entry = it->second;
1238 switch (entry.type) {
1239 case ResType::PIPE:
1240 ret = handleTransferReadPipe(&entry, offset, box);
1241 break;
1242 case ResType::BUFFER:
1243 ret = handleTransferReadBuffer(&entry, offset, box);
1244 break;
1245 case ResType::COLOR_BUFFER:
1246 handleTransferReadColorBuffer(&entry, offset, box);
1247 break;
1248 }
1249
1250 if (ret != 0) {
1251 return ret;
1252 }
1253
1254 VGPLOG("Linear first word: %d", *(int*)(entry.linear));
1255
1256 if (iovec_cnt) {
1257 PipeResEntry e = {
1258 entry.args, iov, (uint32_t)iovec_cnt, entry.linear, entry.linearSize,
1259 };
1260 ret = sync_iov(&e, offset, box, LINEAR_TO_IOV);
1261 } else {
1262 ret = sync_iov(&entry, offset, box, LINEAR_TO_IOV);
1263 }
1264
1265 VGPLOG("done");
1266 return ret;
1267 }
1268
transferWriteIov(int resId,uint64_t offset,virgl_box * box,struct iovec * iov,int iovec_cnt)1269 int transferWriteIov(int resId, uint64_t offset, virgl_box* box, struct iovec* iov,
1270 int iovec_cnt) {
1271 VGPLOG("resid: %d offset: 0x%llx", resId, (unsigned long long)offset);
1272 auto it = mResources.find(resId);
1273 if (it == mResources.end()) return EINVAL;
1274
1275 auto& entry = it->second;
1276
1277 int ret = 0;
1278 if (iovec_cnt) {
1279 PipeResEntry e = {
1280 entry.args, iov, (uint32_t)iovec_cnt, entry.linear, entry.linearSize,
1281 };
1282 ret = sync_iov(&e, offset, box, IOV_TO_LINEAR);
1283 } else {
1284 ret = sync_iov(&entry, offset, box, IOV_TO_LINEAR);
1285 }
1286
1287 if (ret != 0) {
1288 return ret;
1289 }
1290
1291 switch (entry.type) {
1292 case ResType::PIPE:
1293 ret = handleTransferWritePipe(&entry, offset, box);
1294 break;
1295 case ResType::BUFFER:
1296 ret = handleTransferWriteBuffer(&entry, offset, box);
1297 break;
1298 case ResType::COLOR_BUFFER:
1299 handleTransferWriteColorBuffer(&entry, offset, box);
1300 break;
1301 }
1302
1303 VGPLOG("done");
1304 return ret;
1305 }
1306
getCapset(uint32_t set,uint32_t * max_size)1307 void getCapset(uint32_t set, uint32_t *max_size) {
1308 // Only one capset right not
1309 *max_size = sizeof(struct gfxstream::gfxstreamCapset);
1310 }
1311
fillCaps(uint32_t set,void * caps)1312 void fillCaps(uint32_t set, void* caps) {
1313 struct gfxstream::gfxstreamCapset *capset =
1314 reinterpret_cast<struct gfxstream::gfxstreamCapset*>(caps);
1315 if (capset) {
1316 memset(capset, 0, sizeof(*capset));
1317
1318 capset->protocolVersion = 1;
1319 capset->ringSize = 12288;
1320 capset->bufferSize = 1048576;
1321
1322 auto vk_emu = gfxstream::vk::getGlobalVkEmulation();
1323 if (vk_emu && vk_emu->live && vk_emu->representativeColorBufferMemoryTypeIndex) {
1324 capset->colorBufferMemoryIndex = *vk_emu->representativeColorBufferMemoryTypeIndex;
1325 }
1326 }
1327 }
1328
attachResource(uint32_t ctxId,uint32_t resId)1329 void attachResource(uint32_t ctxId, uint32_t resId) {
1330 VGPLOG("ctxid: %u resid: %u", ctxId, resId);
1331
1332 auto resourcesIt = mContextResources.find(ctxId);
1333
1334 if (resourcesIt == mContextResources.end()) {
1335 std::vector<VirtioGpuResId> ids;
1336 ids.push_back(resId);
1337 mContextResources[ctxId] = ids;
1338 } else {
1339 auto& ids = resourcesIt->second;
1340 auto idIt = std::find(ids.begin(), ids.end(), resId);
1341 if (idIt == ids.end()) ids.push_back(resId);
1342 }
1343
1344 auto contextsIt = mResourceContexts.find(resId);
1345
1346 if (contextsIt == mResourceContexts.end()) {
1347 std::vector<VirtioGpuCtxId> ids;
1348 ids.push_back(ctxId);
1349 mResourceContexts[resId] = ids;
1350 } else {
1351 auto& ids = contextsIt->second;
1352 auto idIt = std::find(ids.begin(), ids.end(), ctxId);
1353 if (idIt == ids.end()) ids.push_back(ctxId);
1354 }
1355
1356 // Associate the host pipe of the resource entry with the host pipe of
1357 // the context entry. That is, the last context to call attachResource
1358 // wins if there is any conflict.
1359 auto ctxEntryIt = mContexts.find(ctxId);
1360 auto resEntryIt = mResources.find(resId);
1361
1362 if (ctxEntryIt == mContexts.end() || resEntryIt == mResources.end()) return;
1363
1364 VGPLOG("hostPipe: %p", ctxEntryIt->second.hostPipe);
1365 resEntryIt->second.hostPipe = ctxEntryIt->second.hostPipe;
1366 resEntryIt->second.ctxId = ctxId;
1367 }
1368
detachResource(uint32_t ctxId,uint32_t toUnrefId)1369 void detachResource(uint32_t ctxId, uint32_t toUnrefId) {
1370 VGPLOG("ctxid: %u resid: %u", ctxId, toUnrefId);
1371 detachResourceLocked(ctxId, toUnrefId);
1372 }
1373
getResourceInfo(uint32_t resId,struct virgl_renderer_resource_info * info)1374 int getResourceInfo(uint32_t resId, struct virgl_renderer_resource_info* info) {
1375 VGPLOG("resid: %u", resId);
1376 if (!info) return EINVAL;
1377
1378 auto it = mResources.find(resId);
1379 if (it == mResources.end()) return ENOENT;
1380
1381 auto& entry = it->second;
1382
1383 uint32_t bpp = 4U;
1384 switch (entry.args.format) {
1385 case VIRGL_FORMAT_B8G8R8A8_UNORM:
1386 info->drm_fourcc = DRM_FORMAT_ARGB8888;
1387 break;
1388 case VIRGL_FORMAT_B5G6R5_UNORM:
1389 info->drm_fourcc = DRM_FORMAT_RGB565;
1390 bpp = 2U;
1391 break;
1392 case VIRGL_FORMAT_R8G8B8A8_UNORM:
1393 info->drm_fourcc = DRM_FORMAT_ABGR8888;
1394 break;
1395 case VIRGL_FORMAT_R8G8B8X8_UNORM:
1396 info->drm_fourcc = DRM_FORMAT_XBGR8888;
1397 break;
1398 case VIRGL_FORMAT_R8_UNORM:
1399 info->drm_fourcc = DRM_FORMAT_R8;
1400 bpp = 1U;
1401 break;
1402 default:
1403 return EINVAL;
1404 }
1405
1406 info->stride = align_up(entry.args.width * bpp, 16U);
1407 info->virgl_format = entry.args.format;
1408 info->handle = entry.args.handle;
1409 info->height = entry.args.height;
1410 info->width = entry.args.width;
1411 info->depth = entry.args.depth;
1412 info->flags = entry.args.flags;
1413 info->tex_id = 0;
1414 return 0;
1415 }
1416
flushResourceAndReadback(uint32_t res_handle,uint32_t x,uint32_t y,uint32_t width,uint32_t height,void * pixels,uint32_t max_bytes)1417 void flushResourceAndReadback(uint32_t res_handle, uint32_t x, uint32_t y, uint32_t width,
1418 uint32_t height, void* pixels, uint32_t max_bytes) {
1419 (void)x;
1420 (void)y;
1421 (void)width;
1422 (void)height;
1423 auto taskId = mVirtioGpuTimelines->enqueueTask(VirtioGpuRingGlobal{});
1424 mVirtioGpuOps->async_post_color_buffer(
1425 res_handle, [this, taskId](std::shared_future<void> waitForGpu) {
1426 waitForGpu.wait();
1427 mVirtioGpuTimelines->notifyTaskCompletion(taskId);
1428 });
1429 // TODO: displayId > 0 ?
1430 uint32_t displayId = 0;
1431 if (pixels) {
1432 mReadPixelsFunc(pixels, max_bytes, displayId);
1433 }
1434 }
1435
createRingBlob(PipeResEntry & entry,uint32_t res_handle,const struct stream_renderer_create_blob * create_blob,const struct stream_renderer_handle * handle)1436 int createRingBlob(PipeResEntry& entry, uint32_t res_handle,
1437 const struct stream_renderer_create_blob* create_blob,
1438 const struct stream_renderer_handle* handle) {
1439 if (feature_is_enabled(kFeature_ExternalBlob)) {
1440 std::string name = "shared-memory-" + std::to_string(res_handle);
1441 auto ringBlob = std::make_shared<SharedMemory>(name, create_blob->size);
1442 int ret = ringBlob->create(0600);
1443 if (ret) {
1444 VGPLOG("Failed to create shared memory blob");
1445 return ret;
1446 }
1447
1448 entry.ringBlob = ringBlob;
1449 entry.hva = ringBlob->get();
1450 } else {
1451 void* addr =
1452 android::aligned_buf_alloc(ADDRESS_SPACE_GRAPHICS_PAGE_SIZE, create_blob->size);
1453 if (addr == nullptr) {
1454 VGPLOG("Failed to allocate ring blob");
1455 return -ENOMEM;
1456 }
1457
1458 entry.hva = addr;
1459 }
1460
1461 entry.hvaSize = create_blob->size;
1462 entry.externalAddr = true;
1463 entry.caching = STREAM_RENDERER_MAP_CACHE_CACHED;
1464
1465 return 0;
1466 }
1467
createBlob(uint32_t ctx_id,uint32_t res_handle,const struct stream_renderer_create_blob * create_blob,const struct stream_renderer_handle * handle)1468 int createBlob(uint32_t ctx_id, uint32_t res_handle,
1469 const struct stream_renderer_create_blob* create_blob,
1470 const struct stream_renderer_handle* handle) {
1471 PipeResEntry e;
1472 struct virgl_renderer_resource_create_args args = {0};
1473 e.args = args;
1474 e.hostPipe = 0;
1475
1476 if (create_blob->blob_id == 0) {
1477 int ret = createRingBlob(e, res_handle, create_blob, handle);
1478 if (ret) {
1479 return ret;
1480 }
1481 } else if (feature_is_enabled(kFeature_ExternalBlob)) {
1482 if (create_blob->blob_mem == STREAM_BLOB_MEM_GUEST &&
1483 (create_blob->blob_flags & STREAM_BLOB_FLAG_CREATE_GUEST_HANDLE)) {
1484 #if defined(__linux__) || defined(__QNX__)
1485 ManagedDescriptor managedHandle(handle->os_handle);
1486 HostmemIdMapping::get()->addDescriptorInfo(create_blob->blob_id,
1487 std::move(managedHandle),
1488 handle->handle_type, 0, std::nullopt);
1489
1490 e.caching = STREAM_RENDERER_MAP_CACHE_CACHED;
1491 #else
1492 return -EINVAL;
1493 #endif
1494 } else {
1495 auto descriptorInfoOpt =
1496 HostmemIdMapping::get()->removeDescriptorInfo(create_blob->blob_id);
1497 if (descriptorInfoOpt) {
1498 e.descriptorInfo =
1499 std::make_shared<ManagedDescriptorInfo>(std::move(*descriptorInfoOpt));
1500 } else {
1501 return -EINVAL;
1502 }
1503
1504 e.caching = e.descriptorInfo->caching;
1505 }
1506 } else {
1507 auto entry = HostmemIdMapping::get()->get(create_blob->blob_id);
1508 e.hva = entry.hva;
1509 e.hvaSize = entry.size;
1510 e.args.width = entry.size;
1511 e.caching = entry.caching;
1512 }
1513
1514 e.blobId = create_blob->blob_id;
1515 e.blobMem = create_blob->blob_mem;
1516 e.blobFlags = create_blob->blob_flags;
1517 e.iov = nullptr;
1518 e.numIovs = 0;
1519 e.linear = 0;
1520 e.linearSize = 0;
1521
1522 mResources[res_handle] = e;
1523 return 0;
1524 }
1525
resourceMap(uint32_t res_handle,void ** hvaOut,uint64_t * sizeOut)1526 int resourceMap(uint32_t res_handle, void** hvaOut, uint64_t* sizeOut) {
1527
1528 if (feature_is_enabled(kFeature_ExternalBlob)) return -EINVAL;
1529
1530 auto it = mResources.find(res_handle);
1531 if (it == mResources.end()) {
1532 if (hvaOut) *hvaOut = nullptr;
1533 if (sizeOut) *sizeOut = 0;
1534 return -1;
1535 }
1536
1537 const auto& entry = it->second;
1538
1539 if (hvaOut) *hvaOut = entry.hva;
1540 if (sizeOut) *sizeOut = entry.hvaSize;
1541 return 0;
1542 }
1543
resourceUnmap(uint32_t res_handle)1544 int resourceUnmap(uint32_t res_handle) {
1545 auto it = mResources.find(res_handle);
1546 if (it == mResources.end()) {
1547 return -1;
1548 }
1549
1550 // TODO(lfy): Good place to run any registered cleanup callbacks.
1551 // No-op for now.
1552 return 0;
1553 }
1554
platformImportResource(int res_handle,int res_info,void * resource)1555 int platformImportResource(int res_handle, int res_info, void* resource) {
1556 auto it = mResources.find(res_handle);
1557 if (it == mResources.end()) return -1;
1558 bool success = mVirtioGpuOps->platform_import_resource(res_handle, res_info, resource);
1559 return success ? 0 : -1;
1560 }
1561
platformResourceInfo(int res_handle,int * width,int * height,int * internal_format)1562 int platformResourceInfo(int res_handle, int* width, int* height, int* internal_format) {
1563 auto it = mResources.find(res_handle);
1564 if (it == mResources.end()) return -1;
1565 bool success =
1566 mVirtioGpuOps->platform_resource_info(res_handle, width, height, internal_format);
1567 return success ? 0 : -1;
1568 }
1569
platformCreateSharedEglContext()1570 void* platformCreateSharedEglContext() {
1571 return mVirtioGpuOps->platform_create_shared_egl_context();
1572 }
1573
platformDestroySharedEglContext(void * context)1574 int platformDestroySharedEglContext(void* context) {
1575 bool success = mVirtioGpuOps->platform_destroy_shared_egl_context(context);
1576 return success ? 0 : -1;
1577 }
1578
resourceMapInfo(uint32_t res_handle,uint32_t * map_info)1579 int resourceMapInfo(uint32_t res_handle, uint32_t* map_info) {
1580 auto it = mResources.find(res_handle);
1581 if (it == mResources.end()) return -1;
1582
1583 const auto& entry = it->second;
1584 *map_info = entry.caching;
1585 return 0;
1586 }
1587
exportBlob(uint32_t res_handle,struct stream_renderer_handle * handle)1588 int exportBlob(uint32_t res_handle, struct stream_renderer_handle* handle) {
1589
1590 auto it = mResources.find(res_handle);
1591 if (it == mResources.end()) {
1592 return -EINVAL;
1593 }
1594
1595 auto& entry = it->second;
1596 if (entry.ringBlob) {
1597 // Handle ownership transferred to VMM, gfxstream keeps the mapping.
1598 #ifdef _WIN32
1599 handle->os_handle =
1600 static_cast<int64_t>(reinterpret_cast<intptr_t>(entry.ringBlob->releaseHandle()));
1601 #else
1602 handle->os_handle = static_cast<int64_t>(entry.ringBlob->releaseHandle());
1603 #endif
1604 handle->handle_type = STREAM_MEM_HANDLE_TYPE_SHM;
1605 return 0;
1606 }
1607
1608 if (entry.descriptorInfo) {
1609 bool shareable = entry.blobFlags &
1610 (STREAM_BLOB_FLAG_USE_SHAREABLE | STREAM_BLOB_FLAG_USE_CROSS_DEVICE);
1611
1612 DescriptorType rawDescriptor;
1613 if (shareable) {
1614 // TODO: Add ManagedDescriptor::{clone, dup} method and use it;
1615 // This should have no affect since gfxstream allocates mappable-only buffers
1616 // currently
1617 return -EINVAL;
1618 } else {
1619 auto rawDescriptorOpt = entry.descriptorInfo->descriptor.release();
1620 if (rawDescriptorOpt)
1621 rawDescriptor = *rawDescriptorOpt;
1622 else
1623 return -EINVAL;
1624 }
1625
1626 handle->handle_type = entry.descriptorInfo->handleType;
1627
1628 #ifdef _WIN32
1629 handle->os_handle = static_cast<int64_t>(reinterpret_cast<intptr_t>(rawDescriptor));
1630 #else
1631 handle->os_handle = static_cast<int64_t>(rawDescriptor);
1632 #endif
1633
1634 return 0;
1635 }
1636
1637 return -EINVAL;
1638 }
1639
vulkanInfo(uint32_t res_handle,struct stream_renderer_vulkan_info * vulkan_info)1640 int vulkanInfo(uint32_t res_handle, struct stream_renderer_vulkan_info* vulkan_info) {
1641 auto it = mResources.find(res_handle);
1642 if (it == mResources.end()) return -EINVAL;
1643
1644 const auto& entry = it->second;
1645 if (entry.descriptorInfo && entry.descriptorInfo->vulkanInfoOpt) {
1646 vulkan_info->memory_index = (*entry.descriptorInfo->vulkanInfoOpt).memoryIndex;
1647 memcpy(vulkan_info->device_id.device_uuid,
1648 (*entry.descriptorInfo->vulkanInfoOpt).deviceUUID,
1649 sizeof(vulkan_info->device_id.device_uuid));
1650 memcpy(vulkan_info->device_id.driver_uuid,
1651 (*entry.descriptorInfo->vulkanInfoOpt).driverUUID,
1652 sizeof(vulkan_info->device_id.driver_uuid));
1653 return 0;
1654 }
1655
1656 return -EINVAL;
1657 }
1658
1659 private:
allocResource(PipeResEntry & entry,iovec * iov,int num_iovs)1660 void allocResource(PipeResEntry& entry, iovec* iov, int num_iovs) {
1661 VGPLOG("entry linear: %p", entry.linear);
1662 if (entry.linear) free(entry.linear);
1663
1664 size_t linearSize = 0;
1665 for (uint32_t i = 0; i < num_iovs; ++i) {
1666 VGPLOG("iov base: %p", iov[i].iov_base);
1667 linearSize += iov[i].iov_len;
1668 VGPLOG("has iov of %zu. linearSize current: %zu", iov[i].iov_len, linearSize);
1669 }
1670 VGPLOG("final linearSize: %zu", linearSize);
1671
1672 void* linear = nullptr;
1673
1674 if (linearSize) linear = malloc(linearSize);
1675
1676 entry.iov = (iovec*)malloc(sizeof(*iov) * num_iovs);
1677 entry.numIovs = num_iovs;
1678 memcpy(entry.iov, iov, num_iovs * sizeof(*iov));
1679 entry.linear = linear;
1680 entry.linearSize = linearSize;
1681
1682 virgl_box initbox;
1683 initbox.x = 0;
1684 initbox.y = 0;
1685 initbox.w = (uint32_t)linearSize;
1686 initbox.h = 1;
1687 }
1688
detachResourceLocked(uint32_t ctxId,uint32_t toUnrefId)1689 void detachResourceLocked(uint32_t ctxId, uint32_t toUnrefId) {
1690 VGPLOG("ctxid: %u resid: %u", ctxId, toUnrefId);
1691
1692 auto it = mContextResources.find(ctxId);
1693 if (it == mContextResources.end()) return;
1694
1695 std::vector<VirtioGpuResId> withoutRes;
1696 for (auto resId : it->second) {
1697 if (resId != toUnrefId) {
1698 withoutRes.push_back(resId);
1699 }
1700 }
1701 mContextResources[ctxId] = withoutRes;
1702
1703 auto resIt = mResources.find(toUnrefId);
1704 if (resIt == mResources.end()) return;
1705
1706 resIt->second.hostPipe = 0;
1707 resIt->second.ctxId = 0;
1708
1709 auto ctxIt = mContexts.find(ctxId);
1710 if (ctxIt != mContexts.end()) {
1711 auto& ctxEntry = ctxIt->second;
1712 if (ctxEntry.addressSpaceHandles.count(toUnrefId)) {
1713 uint32_t handle = ctxEntry.addressSpaceHandles[toUnrefId];
1714 mAddressSpaceDeviceControlOps->destroy_handle(handle);
1715 ctxEntry.addressSpaceHandles.erase(toUnrefId);
1716 }
1717 }
1718 }
1719
ensureAndGetServiceOps()1720 inline const GoldfishPipeServiceOps* ensureAndGetServiceOps() {
1721 if (mServiceOps) return mServiceOps;
1722 mServiceOps = goldfish_pipe_get_service_ops();
1723 return mServiceOps;
1724 }
1725
1726 void* mCookie = nullptr;
1727 virgl_renderer_callbacks mVirglRendererCallbacks;
1728 AndroidVirtioGpuOps* mVirtioGpuOps = nullptr;
1729 ReadPixelsFunc mReadPixelsFunc = nullptr;
1730 struct address_space_device_control_ops* mAddressSpaceDeviceControlOps = nullptr;
1731
1732 const GoldfishPipeServiceOps* mServiceOps = nullptr;
1733
1734 std::unordered_map<VirtioGpuCtxId, PipeCtxEntry> mContexts;
1735 std::unordered_map<VirtioGpuResId, PipeResEntry> mResources;
1736 std::unordered_map<VirtioGpuCtxId, std::vector<VirtioGpuResId>> mContextResources;
1737 std::unordered_map<VirtioGpuResId, std::vector<VirtioGpuCtxId>> mResourceContexts;
1738
1739 // When we wait for gpu or wait for gpu vulkan, the next (and subsequent)
1740 // fences created for that context should not be signaled immediately.
1741 // Rather, they should get in line.
1742 std::unique_ptr<VirtioGpuTimelines> mVirtioGpuTimelines = nullptr;
1743 };
1744
sRenderer()1745 static PipeVirglRenderer* sRenderer() {
1746 static PipeVirglRenderer* p = new PipeVirglRenderer;
1747 return p;
1748 }
1749
1750 extern "C" {
1751
pipe_virgl_renderer_init(void * cookie,int flags,struct virgl_renderer_callbacks * cb)1752 VG_EXPORT int pipe_virgl_renderer_init(void* cookie, int flags,
1753 struct virgl_renderer_callbacks* cb) {
1754 sRenderer()->init(cookie, flags, cb);
1755 return 0;
1756 }
1757
pipe_virgl_renderer_poll(void)1758 VG_EXPORT void pipe_virgl_renderer_poll(void) { sRenderer()->poll(); }
1759
pipe_virgl_renderer_get_cursor_data(uint32_t resource_id,uint32_t * width,uint32_t * height)1760 VG_EXPORT void* pipe_virgl_renderer_get_cursor_data(uint32_t resource_id, uint32_t* width,
1761 uint32_t* height) {
1762 return 0;
1763 }
1764
pipe_virgl_renderer_resource_create(struct virgl_renderer_resource_create_args * args,struct iovec * iov,uint32_t num_iovs)1765 VG_EXPORT int pipe_virgl_renderer_resource_create(struct virgl_renderer_resource_create_args* args,
1766 struct iovec* iov, uint32_t num_iovs) {
1767 return sRenderer()->createResource(args, iov, num_iovs);
1768 }
1769
pipe_virgl_renderer_resource_unref(uint32_t res_handle)1770 VG_EXPORT void pipe_virgl_renderer_resource_unref(uint32_t res_handle) {
1771 sRenderer()->unrefResource(res_handle);
1772 }
1773
pipe_virgl_renderer_context_create(uint32_t handle,uint32_t nlen,const char * name)1774 VG_EXPORT int pipe_virgl_renderer_context_create(uint32_t handle, uint32_t nlen, const char* name) {
1775 return sRenderer()->createContext(handle, nlen, name, 0);
1776 }
1777
pipe_virgl_renderer_context_destroy(uint32_t handle)1778 VG_EXPORT void pipe_virgl_renderer_context_destroy(uint32_t handle) {
1779 sRenderer()->destroyContext(handle);
1780 }
1781
pipe_virgl_renderer_submit_cmd(void * buffer,int ctx_id,int dwordCount)1782 VG_EXPORT int pipe_virgl_renderer_submit_cmd(void* buffer, int ctx_id, int dwordCount) {
1783 return sRenderer()->submitCmd(ctx_id, buffer, dwordCount);
1784 }
1785
pipe_virgl_renderer_transfer_read_iov(uint32_t handle,uint32_t ctx_id,uint32_t level,uint32_t stride,uint32_t layer_stride,struct virgl_box * box,uint64_t offset,struct iovec * iov,int iovec_cnt)1786 VG_EXPORT int pipe_virgl_renderer_transfer_read_iov(uint32_t handle, uint32_t ctx_id,
1787 uint32_t level, uint32_t stride,
1788 uint32_t layer_stride, struct virgl_box* box,
1789 uint64_t offset, struct iovec* iov,
1790 int iovec_cnt) {
1791 return sRenderer()->transferReadIov(handle, offset, box, iov, iovec_cnt);
1792 }
1793
pipe_virgl_renderer_transfer_write_iov(uint32_t handle,uint32_t ctx_id,int level,uint32_t stride,uint32_t layer_stride,struct virgl_box * box,uint64_t offset,struct iovec * iovec,unsigned int iovec_cnt)1794 VG_EXPORT int pipe_virgl_renderer_transfer_write_iov(uint32_t handle, uint32_t ctx_id, int level,
1795 uint32_t stride, uint32_t layer_stride,
1796 struct virgl_box* box, uint64_t offset,
1797 struct iovec* iovec, unsigned int iovec_cnt) {
1798 return sRenderer()->transferWriteIov(handle, offset, box, iovec, iovec_cnt);
1799 }
1800
pipe_virgl_renderer_get_cap_set(uint32_t set,uint32_t * max_ver,uint32_t * max_size)1801 VG_EXPORT void pipe_virgl_renderer_get_cap_set(uint32_t set, uint32_t* max_ver, uint32_t* max_size) {
1802 // `max_ver` not useful
1803 return sRenderer()->getCapset(set, max_size);
1804 }
1805
pipe_virgl_renderer_fill_caps(uint32_t set,uint32_t version,void * caps)1806 VG_EXPORT void pipe_virgl_renderer_fill_caps(uint32_t set, uint32_t version, void* caps) {
1807 // `version` not useful
1808 return sRenderer()->fillCaps(set, caps);
1809 }
1810
pipe_virgl_renderer_resource_attach_iov(int res_handle,struct iovec * iov,int num_iovs)1811 VG_EXPORT int pipe_virgl_renderer_resource_attach_iov(int res_handle, struct iovec* iov,
1812 int num_iovs) {
1813 return sRenderer()->attachIov(res_handle, iov, num_iovs);
1814 }
1815
pipe_virgl_renderer_resource_detach_iov(int res_handle,struct iovec ** iov,int * num_iovs)1816 VG_EXPORT void pipe_virgl_renderer_resource_detach_iov(int res_handle, struct iovec** iov,
1817 int* num_iovs) {
1818 return sRenderer()->detachIov(res_handle, iov, num_iovs);
1819 }
1820
pipe_virgl_renderer_create_fence(int client_fence_id,uint32_t ctx_id)1821 VG_EXPORT int pipe_virgl_renderer_create_fence(int client_fence_id, uint32_t ctx_id) {
1822 sRenderer()->createFence(client_fence_id, VirtioGpuRingGlobal{});
1823 return 0;
1824 }
1825
pipe_virgl_renderer_force_ctx_0(void)1826 VG_EXPORT void pipe_virgl_renderer_force_ctx_0(void) { VGPLOG("call"); }
1827
pipe_virgl_renderer_ctx_attach_resource(int ctx_id,int res_handle)1828 VG_EXPORT void pipe_virgl_renderer_ctx_attach_resource(int ctx_id, int res_handle) {
1829 sRenderer()->attachResource(ctx_id, res_handle);
1830 }
1831
pipe_virgl_renderer_ctx_detach_resource(int ctx_id,int res_handle)1832 VG_EXPORT void pipe_virgl_renderer_ctx_detach_resource(int ctx_id, int res_handle) {
1833 sRenderer()->detachResource(ctx_id, res_handle);
1834 }
1835
pipe_virgl_renderer_resource_get_info(int res_handle,struct virgl_renderer_resource_info * info)1836 VG_EXPORT int pipe_virgl_renderer_resource_get_info(int res_handle,
1837 struct virgl_renderer_resource_info* info) {
1838 return sRenderer()->getResourceInfo(res_handle, info);
1839 }
1840
pipe_virgl_renderer_resource_map(uint32_t res_handle,void ** hvaOut,uint64_t * sizeOut)1841 VG_EXPORT int pipe_virgl_renderer_resource_map(uint32_t res_handle, void** hvaOut,
1842 uint64_t* sizeOut) {
1843 return sRenderer()->resourceMap(res_handle, hvaOut, sizeOut);
1844 }
1845
pipe_virgl_renderer_resource_unmap(uint32_t res_handle)1846 VG_EXPORT int pipe_virgl_renderer_resource_unmap(uint32_t res_handle) {
1847 return sRenderer()->resourceUnmap(res_handle);
1848 }
1849
stream_renderer_flush_resource_and_readback(uint32_t res_handle,uint32_t x,uint32_t y,uint32_t width,uint32_t height,void * pixels,uint32_t max_bytes)1850 VG_EXPORT void stream_renderer_flush_resource_and_readback(uint32_t res_handle, uint32_t x,
1851 uint32_t y, uint32_t width,
1852 uint32_t height, void* pixels,
1853 uint32_t max_bytes) {
1854 sRenderer()->flushResourceAndReadback(res_handle, x, y, width, height, pixels, max_bytes);
1855 }
1856
stream_renderer_create_blob(uint32_t ctx_id,uint32_t res_handle,const struct stream_renderer_create_blob * create_blob,const struct iovec * iovecs,uint32_t num_iovs,const struct stream_renderer_handle * handle)1857 VG_EXPORT int stream_renderer_create_blob(uint32_t ctx_id, uint32_t res_handle,
1858 const struct stream_renderer_create_blob* create_blob,
1859 const struct iovec* iovecs, uint32_t num_iovs,
1860 const struct stream_renderer_handle* handle) {
1861 sRenderer()->createBlob(ctx_id, res_handle, create_blob, handle);
1862 return 0;
1863 }
1864
stream_renderer_export_blob(uint32_t res_handle,struct stream_renderer_handle * handle)1865 VG_EXPORT int stream_renderer_export_blob(uint32_t res_handle,
1866 struct stream_renderer_handle* handle) {
1867 return sRenderer()->exportBlob(res_handle, handle);
1868 }
1869
stream_renderer_resource_map(uint32_t res_handle,void ** hvaOut,uint64_t * sizeOut)1870 VG_EXPORT int stream_renderer_resource_map(uint32_t res_handle, void** hvaOut, uint64_t* sizeOut) {
1871 return sRenderer()->resourceMap(res_handle, hvaOut, sizeOut);
1872 }
1873
stream_renderer_resource_unmap(uint32_t res_handle)1874 VG_EXPORT int stream_renderer_resource_unmap(uint32_t res_handle) {
1875 return sRenderer()->resourceUnmap(res_handle);
1876 }
1877
stream_renderer_context_create(uint32_t ctx_id,uint32_t nlen,const char * name,uint32_t context_init)1878 VG_EXPORT int stream_renderer_context_create(uint32_t ctx_id, uint32_t nlen, const char* name,
1879 uint32_t context_init) {
1880 return sRenderer()->createContext(ctx_id, nlen, name, context_init);
1881 }
1882
stream_renderer_context_create_fence(uint64_t fence_id,uint32_t ctx_id,uint8_t ring_idx)1883 VG_EXPORT int stream_renderer_context_create_fence(uint64_t fence_id, uint32_t ctx_id,
1884 uint8_t ring_idx) {
1885 sRenderer()->createFence(fence_id, VirtioGpuRingContextSpecific{
1886 .mCtxId = ctx_id,
1887 .mRingIdx = ring_idx,
1888 });
1889 return 0;
1890 }
1891
stream_renderer_platform_import_resource(int res_handle,int res_info,void * resource)1892 VG_EXPORT int stream_renderer_platform_import_resource(int res_handle, int res_info,
1893 void* resource) {
1894 return sRenderer()->platformImportResource(res_handle, res_info, resource);
1895 }
1896
stream_renderer_platform_resource_info(int res_handle,int * width,int * height,int * internal_format)1897 VG_EXPORT int stream_renderer_platform_resource_info(int res_handle, int* width, int* height,
1898 int* internal_format) {
1899 return sRenderer()->platformResourceInfo(res_handle, width, height, internal_format);
1900 }
1901
stream_renderer_platform_create_shared_egl_context()1902 VG_EXPORT void* stream_renderer_platform_create_shared_egl_context() {
1903 return sRenderer()->platformCreateSharedEglContext();
1904 }
1905
stream_renderer_platform_destroy_shared_egl_context(void * context)1906 VG_EXPORT int stream_renderer_platform_destroy_shared_egl_context(void* context) {
1907 return sRenderer()->platformDestroySharedEglContext(context);
1908 }
1909
stream_renderer_resource_map_info(uint32_t res_handle,uint32_t * map_info)1910 VG_EXPORT int stream_renderer_resource_map_info(uint32_t res_handle, uint32_t* map_info) {
1911 return sRenderer()->resourceMapInfo(res_handle, map_info);
1912 }
1913
stream_renderer_vulkan_info(uint32_t res_handle,struct stream_renderer_vulkan_info * vulkan_info)1914 VG_EXPORT int stream_renderer_vulkan_info(uint32_t res_handle,
1915 struct stream_renderer_vulkan_info* vulkan_info) {
1916 return sRenderer()->vulkanInfo(res_handle, vulkan_info);
1917 }
1918
1919 static const GoldfishPipeServiceOps goldfish_pipe_service_ops = {
1920 // guest_open()
__anoncf34bd630802() 1921 [](GoldfishHwPipe* hwPipe) -> GoldfishHostPipe* {
1922 return static_cast<GoldfishHostPipe*>(android_pipe_guest_open(hwPipe));
1923 },
1924 // guest_open_with_flags()
__anoncf34bd630902() 1925 [](GoldfishHwPipe* hwPipe, uint32_t flags) -> GoldfishHostPipe* {
1926 return static_cast<GoldfishHostPipe*>(android_pipe_guest_open_with_flags(hwPipe, flags));
1927 },
1928 // guest_close()
__anoncf34bd630a02() 1929 [](GoldfishHostPipe* hostPipe, GoldfishPipeCloseReason reason) {
1930 static_assert((int)GOLDFISH_PIPE_CLOSE_GRACEFUL == (int)PIPE_CLOSE_GRACEFUL,
1931 "Invalid PIPE_CLOSE_GRACEFUL value");
1932 static_assert((int)GOLDFISH_PIPE_CLOSE_REBOOT == (int)PIPE_CLOSE_REBOOT,
1933 "Invalid PIPE_CLOSE_REBOOT value");
1934 static_assert((int)GOLDFISH_PIPE_CLOSE_LOAD_SNAPSHOT == (int)PIPE_CLOSE_LOAD_SNAPSHOT,
1935 "Invalid PIPE_CLOSE_LOAD_SNAPSHOT value");
1936 static_assert((int)GOLDFISH_PIPE_CLOSE_ERROR == (int)PIPE_CLOSE_ERROR,
1937 "Invalid PIPE_CLOSE_ERROR value");
1938
1939 android_pipe_guest_close(hostPipe, static_cast<PipeCloseReason>(reason));
1940 },
1941 // guest_pre_load()
__anoncf34bd630b02() 1942 [](QEMUFile* file) { (void)file; },
1943 // guest_post_load()
__anoncf34bd630c02() 1944 [](QEMUFile* file) { (void)file; },
1945 // guest_pre_save()
__anoncf34bd630d02() 1946 [](QEMUFile* file) { (void)file; },
1947 // guest_post_save()
__anoncf34bd630e02() 1948 [](QEMUFile* file) { (void)file; },
1949 // guest_load()
__anoncf34bd630f02() 1950 [](QEMUFile* file, GoldfishHwPipe* hwPipe, char* force_close) -> GoldfishHostPipe* {
1951 (void)file;
1952 (void)hwPipe;
1953 (void)force_close;
1954 return nullptr;
1955 },
1956 // guest_save()
__anoncf34bd631002() 1957 [](GoldfishHostPipe* hostPipe, QEMUFile* file) {
1958 (void)hostPipe;
1959 (void)file;
1960 },
1961 // guest_poll()
__anoncf34bd631102() 1962 [](GoldfishHostPipe* hostPipe) {
1963 static_assert((int)GOLDFISH_PIPE_POLL_IN == (int)PIPE_POLL_IN, "invalid POLL_IN values");
1964 static_assert((int)GOLDFISH_PIPE_POLL_OUT == (int)PIPE_POLL_OUT, "invalid POLL_OUT values");
1965 static_assert((int)GOLDFISH_PIPE_POLL_HUP == (int)PIPE_POLL_HUP, "invalid POLL_HUP values");
1966
1967 return static_cast<GoldfishPipePollFlags>(android_pipe_guest_poll(hostPipe));
1968 },
1969 // guest_recv()
__anoncf34bd631202() 1970 [](GoldfishHostPipe* hostPipe, GoldfishPipeBuffer* buffers, int numBuffers) -> int {
1971 // NOTE: Assumes that AndroidPipeBuffer and GoldfishPipeBuffer
1972 // have exactly the same layout.
1973 static_assert(sizeof(AndroidPipeBuffer) == sizeof(GoldfishPipeBuffer),
1974 "Invalid PipeBuffer sizes");
1975 // We can't use a static_assert with offsetof() because in msvc, it uses
1976 // reinterpret_cast.
1977 // TODO: Add runtime assertion instead?
1978 // https://developercommunity.visualstudio.com/content/problem/22196/static-assert-cannot-compile-constexprs-method-tha.html
1979 #ifndef _MSC_VER
1980 static_assert(offsetof(AndroidPipeBuffer, data) == offsetof(GoldfishPipeBuffer, data),
1981 "Invalid PipeBuffer::data offsets");
1982 static_assert(offsetof(AndroidPipeBuffer, size) == offsetof(GoldfishPipeBuffer, size),
1983 "Invalid PipeBuffer::size offsets");
1984 #endif
1985 return android_pipe_guest_recv(hostPipe, reinterpret_cast<AndroidPipeBuffer*>(buffers),
1986 numBuffers);
1987 },
1988 // guest_send()
__anoncf34bd631302() 1989 [](GoldfishHostPipe** hostPipe, const GoldfishPipeBuffer* buffers, int numBuffers) -> int {
1990 return android_pipe_guest_send(reinterpret_cast<void**>(hostPipe),
1991 reinterpret_cast<const AndroidPipeBuffer*>(buffers),
1992 numBuffers);
1993 },
1994 // guest_wake_on()
__anoncf34bd631402() 1995 [](GoldfishHostPipe* hostPipe, GoldfishPipeWakeFlags wakeFlags) {
1996 android_pipe_guest_wake_on(hostPipe, static_cast<int>(wakeFlags));
1997 },
1998 // dma_add_buffer()
__anoncf34bd631502() 1999 [](void* pipe, uint64_t paddr, uint64_t sz) {
2000 // not considered for virtio
2001 },
2002 // dma_remove_buffer()
__anoncf34bd631602() 2003 [](uint64_t paddr) {
2004 // not considered for virtio
2005 },
2006 // dma_invalidate_host_mappings()
__anoncf34bd631702() 2007 []() {
2008 // not considered for virtio
2009 },
2010 // dma_reset_host_mappings()
__anoncf34bd631802() 2011 []() {
2012 // not considered for virtio
2013 },
2014 // dma_save_mappings()
__anoncf34bd631902() 2015 [](QEMUFile* file) { (void)file; },
2016 // dma_load_mappings()
__anoncf34bd631a02() 2017 [](QEMUFile* file) { (void)file; },
2018 };
2019
stream_renderer_init(struct stream_renderer_param * stream_renderer_params,uint64_t num_params)2020 VG_EXPORT int stream_renderer_init(struct stream_renderer_param* stream_renderer_params,
2021 uint64_t num_params) {
2022 // Required parameters.
2023 std::unordered_set<uint64_t> required_params{STREAM_RENDERER_PARAM_USER_DATA,
2024 STREAM_RENDERER_PARAM_RENDERER_FLAGS,
2025 STREAM_RENDERER_PARAM_WRITE_FENCE_CALLBACK};
2026
2027 // String names of the parameters.
2028 std::unordered_map<uint64_t, std::string> param_strings{
2029 {STREAM_RENDERER_PARAM_USER_DATA, "USER_DATA"},
2030 {STREAM_RENDERER_PARAM_RENDERER_FLAGS, "RENDERER_FLAGS"},
2031 {STREAM_RENDERER_PARAM_WRITE_FENCE_CALLBACK, "WRITE_FENCE_CALLBACK"},
2032 {STREAM_RENDERER_PARAM_WRITE_CONTEXT_FENCE_CALLBACK, "WRITE_CONTEXT_FENCE_CALLBACK"},
2033 {STREAM_RENDERER_PARAM_WIN0_WIDTH, "WIN0_WIDTH"},
2034 {STREAM_RENDERER_PARAM_WIN0_HEIGHT, "WIN0_HEIGHT"},
2035 {STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_INSTANT_EVENT,
2036 "METRICS_CALLBACK_ADD_INSTANT_EVENT"},
2037 {STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_INSTANT_EVENT_WITH_DESCRIPTOR,
2038 "METRICS_CALLBACK_ADD_INSTANT_EVENT_WITH_DESCRIPTOR"},
2039 {STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_INSTANT_EVENT_WITH_METRIC,
2040 "METRICS_CALLBACK_ADD_INSTANT_EVENT_WITH_METRIC"},
2041 {STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_VULKAN_OUT_OF_MEMORY_EVENT,
2042 "METRICS_CALLBACK_ADD_VULKAN_OUT_OF_MEMORY_EVENT"},
2043 {STREAM_RENDERER_PARAM_METRICS_CALLBACK_SET_ANNOTATION, "METRICS_CALLBACK_SET_ANNOTATION"},
2044 {STREAM_RENDERER_PARAM_METRICS_CALLBACK_ABORT, "METRICS_CALLBACK_ABORT"}};
2045
2046 // Print full values for these parameters:
2047 // Values here must not be pointers (e.g. callback functions), to avoid potentially identifying
2048 // someone via ASLR. Pointers in ASLR are randomized on boot, which means pointers may be
2049 // different between users but similar across a single user's sessions.
2050 // As a convenience, any value <= 4096 is also printed, to catch small or null pointer errors.
2051 std::unordered_set<uint64_t> printed_param_values{STREAM_RENDERER_PARAM_RENDERER_FLAGS,
2052 STREAM_RENDERER_PARAM_WIN0_WIDTH,
2053 STREAM_RENDERER_PARAM_WIN0_HEIGHT};
2054
2055 // We may have unknown parameters, so this function is lenient.
2056 auto get_param_string = [&](uint64_t key) -> std::string {
2057 auto param_string = param_strings.find(key);
2058 if (param_string != param_strings.end()) {
2059 return param_string->second;
2060 } else {
2061 return "Unknown param with key=" + std::to_string(key);
2062 }
2063 };
2064
2065 // Initialization data.
2066 uint32_t display_width = 0;
2067 uint32_t display_height = 0;
2068 void* renderer_cookie = nullptr;
2069 int renderer_flags = 0;
2070 virgl_renderer_callbacks virglrenderer_callbacks = {};
2071
2072 // Iterate all parameters that we support.
2073 GFXS_LOG("Reading stream renderer parameters:");
2074 for (uint64_t i = 0; i < num_params; ++i) {
2075 stream_renderer_param& param = stream_renderer_params[i];
2076
2077 // Print out parameter we are processing. See comment above `printed_param_values` before
2078 // adding new prints.
2079 if (printed_param_values.find(param.key) != printed_param_values.end() ||
2080 param.value <= 4096) {
2081 GFXS_LOG("%s - %llu", get_param_string(param.key).c_str(),
2082 static_cast<unsigned long long>(param.value));
2083 } else {
2084 // If not full value, print that it was passed.
2085 GFXS_LOG("%s", get_param_string(param.key).c_str());
2086 }
2087
2088 // Removing every param we process will leave required_params empty if all provided.
2089 required_params.erase(param.key);
2090
2091 switch (param.key) {
2092 case STREAM_RENDERER_PARAM_USER_DATA: {
2093 renderer_cookie = reinterpret_cast<void*>(static_cast<uintptr_t>(param.value));
2094 break;
2095 }
2096 case STREAM_RENDERER_PARAM_RENDERER_FLAGS: {
2097 renderer_flags = static_cast<int>(param.value);
2098 break;
2099 }
2100 case STREAM_RENDERER_PARAM_WRITE_FENCE_CALLBACK: {
2101 virglrenderer_callbacks.write_fence =
2102 reinterpret_cast<stream_renderer_param_write_fence_callback>(
2103 static_cast<uintptr_t>(param.value));
2104 break;
2105 }
2106 case STREAM_RENDERER_PARAM_WRITE_CONTEXT_FENCE_CALLBACK: {
2107 #ifdef VIRGL_RENDERER_UNSTABLE_APIS
2108 virglrenderer_callbacks.write_context_fence =
2109 reinterpret_cast<stream_renderer_param_write_context_fence_callback>(
2110 static_cast<uintptr_t>(param.value));
2111 #else
2112 ERR("Cannot use WRITE_CONTEXT_FENCE_CALLBACK with unstable APIs OFF.");
2113 #endif
2114 break;
2115 }
2116 case STREAM_RENDERER_PARAM_WIN0_WIDTH: {
2117 display_width = static_cast<uint32_t>(param.value);
2118 break;
2119 }
2120 case STREAM_RENDERER_PARAM_WIN0_HEIGHT: {
2121 display_height = static_cast<uint32_t>(param.value);
2122 break;
2123 }
2124 case STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_INSTANT_EVENT: {
2125 MetricsLogger::add_instant_event_callback =
2126 reinterpret_cast<stream_renderer_param_metrics_callback_add_instant_event>(
2127 static_cast<uintptr_t>(param.value));
2128 break;
2129 }
2130 case STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_INSTANT_EVENT_WITH_DESCRIPTOR: {
2131 MetricsLogger::add_instant_event_with_descriptor_callback = reinterpret_cast<
2132 stream_renderer_param_metrics_callback_add_instant_event_with_descriptor>(
2133 static_cast<uintptr_t>(param.value));
2134 break;
2135 }
2136 case STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_INSTANT_EVENT_WITH_METRIC: {
2137 MetricsLogger::add_instant_event_with_metric_callback = reinterpret_cast<
2138 stream_renderer_param_metrics_callback_add_instant_event_with_metric>(
2139 static_cast<uintptr_t>(param.value));
2140 break;
2141 }
2142 case STREAM_RENDERER_PARAM_METRICS_CALLBACK_ADD_VULKAN_OUT_OF_MEMORY_EVENT: {
2143 MetricsLogger::add_vulkan_out_of_memory_event = reinterpret_cast<
2144 stream_renderer_param_metrics_callback_add_vulkan_out_of_memory_event>(
2145 static_cast<uintptr_t>(param.value));
2146 break;
2147 }
2148 case STREAM_RENDERER_PARAM_METRICS_CALLBACK_SET_ANNOTATION: {
2149 MetricsLogger::set_crash_annotation_callback =
2150 reinterpret_cast<stream_renderer_param_metrics_callback_set_annotation>(
2151 static_cast<uintptr_t>(param.value));
2152 break;
2153 }
2154 case STREAM_RENDERER_PARAM_METRICS_CALLBACK_ABORT: {
2155 emugl::setDieFunction(
2156 reinterpret_cast<stream_renderer_param_metrics_callback_abort>(
2157 static_cast<uintptr_t>(param.value)));
2158 break;
2159 }
2160 default: {
2161 // We skip any parameters we don't recognize.
2162 ERR("Skipping unknown parameter key: %llu. May need to upgrade gfxstream.",
2163 static_cast<unsigned long long>(param.key));
2164 break;
2165 }
2166 }
2167 }
2168 GFXS_LOG("Finished reading parameters");
2169
2170 // Some required params not found.
2171 if (required_params.size() > 0) {
2172 ERR("Missing required parameters:");
2173 for (uint64_t param : required_params) {
2174 ERR("%s", get_param_string(param).c_str());
2175 }
2176 ERR("Failing initialization intentionally");
2177 return -1;
2178 }
2179
2180 // Set non product-specific callbacks
2181 gfxstream::vk::vk_util::setVkCheckCallbacks(
2182 std::make_unique<gfxstream::vk::vk_util::VkCheckCallbacks>(
2183 gfxstream::vk::vk_util::VkCheckCallbacks{
2184 .onVkErrorOutOfMemory =
2185 [](VkResult result, const char* function, int line) {
2186 auto fb = gfxstream::FrameBuffer::getFB();
2187 if (!fb) {
2188 ERR("FrameBuffer not yet initialized. Dropping out of memory event");
2189 return;
2190 }
2191 fb->logVulkanOutOfMemory(result, function, line);
2192 },
2193 .onVkErrorOutOfMemoryOnAllocation =
2194 [](VkResult result, const char* function, int line,
2195 std::optional<uint64_t> allocationSize) {
2196 auto fb = gfxstream::FrameBuffer::getFB();
2197 if (!fb) {
2198 ERR("FrameBuffer not yet initialized. Dropping out of memory event");
2199 return;
2200 }
2201 fb->logVulkanOutOfMemory(result, function, line, allocationSize);
2202 }}));
2203
2204 GFXS_LOG("start. display dimensions: width %u height %u, renderer flags: 0x%x", display_width,
2205 display_height, renderer_flags);
2206
2207 // Flags processing
2208
2209 // TODO: hook up "gfxstream egl" to the renderer flags
2210 // GFXSTREAM_RENDERER_FLAGS_USE_EGL_BIT in crosvm
2211 // as it's specified from launch_cvd.
2212 // At the moment, use ANDROID_GFXSTREAM_EGL=1
2213 // For test on GCE
2214 if (android::base::getEnvironmentVariable("ANDROID_GFXSTREAM_EGL") == "1") {
2215 android::base::setEnvironmentVariable("ANDROID_EGL_ON_EGL", "1");
2216 android::base::setEnvironmentVariable("ANDROID_EMUGL_LOG_PRINT", "1");
2217 android::base::setEnvironmentVariable("ANDROID_EMUGL_VERBOSE", "1");
2218 }
2219 // end for test on GCE
2220
2221 android::base::setEnvironmentVariable("ANDROID_EMU_HEADLESS", "1");
2222 bool enableVk = !(renderer_flags & GFXSTREAM_RENDERER_FLAGS_NO_VK_BIT);
2223
2224 bool egl2eglByEnv = android::base::getEnvironmentVariable("ANDROID_EGL_ON_EGL") == "1";
2225 bool egl2eglByFlag = renderer_flags & GFXSTREAM_RENDERER_FLAGS_USE_EGL_BIT;
2226 bool enable_egl2egl = egl2eglByFlag || egl2eglByEnv;
2227 if (enable_egl2egl) {
2228 android::base::setEnvironmentVariable("ANDROID_GFXSTREAM_EGL", "1");
2229 android::base::setEnvironmentVariable("ANDROID_EGL_ON_EGL", "1");
2230 }
2231
2232 bool surfaceless = renderer_flags & GFXSTREAM_RENDERER_FLAGS_USE_SURFACELESS_BIT;
2233 bool enableGlEs31Flag = renderer_flags & GFXSTREAM_RENDERER_FLAGS_ENABLE_GLES31_BIT;
2234 bool useExternalBlob = renderer_flags & GFXSTREAM_RENDERER_FLAGS_USE_EXTERNAL_BLOB;
2235 bool useSystemBlob = renderer_flags & GFXSTREAM_RENDERER_FLAGS_USE_SYSTEM_BLOB;
2236 bool guestUsesAngle = renderer_flags & GFXSTREAM_RENDERER_FLAGS_GUEST_USES_ANGLE;
2237 bool useVulkanNativeSwapchain =
2238 renderer_flags & GFXSTREAM_RENDERER_FLAGS_VULKAN_NATIVE_SWAPCHAIN_BIT;
2239
2240 GFXS_LOG("Vulkan enabled? %d", enableVk);
2241 GFXS_LOG("egl2egl enabled? %d", enable_egl2egl);
2242 GFXS_LOG("surfaceless? %d", surfaceless);
2243 GFXS_LOG("OpenGL ES 3.1 enabled? %d", enableGlEs31Flag);
2244 GFXS_LOG("use external blob? %d", useExternalBlob);
2245 GFXS_LOG("use system blob? %d", useSystemBlob);
2246 GFXS_LOG("guest using ANGLE? %d", guestUsesAngle);
2247 GFXS_LOG("use Vulkan native swapchain on the host? %d", useVulkanNativeSwapchain);
2248
2249 if (useSystemBlob) {
2250 if (!useExternalBlob) {
2251 GFXS_LOG("USE_EXTERNAL_BLOB must be on with USE_SYSTEM_BLOB");
2252 return -2;
2253 }
2254
2255 #ifndef _WIN32
2256 GFXS_LOG("Warning: USE_SYSTEM_BLOB has only been tested on Windows");
2257 #endif
2258 }
2259
2260 // Need to manually set the GLES backend paths in gfxstream environment
2261 // because the library search paths are not automatically set to include
2262 // the directory in whioch the GLES backend resides.
2263 #if defined(__linux__)
2264 #define GFXSTREAM_LIB_SUFFIX ".so"
2265 #elif defined(__APPLE__)
2266 #define GFXSTREAM_LIB_SUFFIX ".dylib"
2267 #else // Windows
2268 #define GFXSTREAM_LIB_SUFFIX ".dll"
2269 #endif
2270
2271 feature_set_enabled_override(kFeature_GLPipeChecksum, false);
2272 feature_set_enabled_override(kFeature_GLESDynamicVersion, true);
2273 feature_set_enabled_override(kFeature_PlayStoreImage, !enableGlEs31Flag);
2274 feature_set_enabled_override(kFeature_GLDMA, false);
2275 feature_set_enabled_override(kFeature_GLAsyncSwap, false);
2276 feature_set_enabled_override(kFeature_RefCountPipe, false);
2277 feature_set_enabled_override(kFeature_NoDelayCloseColorBuffer, true);
2278 feature_set_enabled_override(kFeature_NativeTextureDecompression, false);
2279 feature_set_enabled_override(kFeature_GLDirectMem, false);
2280 feature_set_enabled_override(kFeature_Vulkan, enableVk);
2281 feature_set_enabled_override(kFeature_VulkanSnapshots, false);
2282 feature_set_enabled_override(kFeature_VulkanNullOptionalStrings, true);
2283 feature_set_enabled_override(kFeature_VulkanShaderFloat16Int8, true);
2284 feature_set_enabled_override(kFeature_HostComposition, true);
2285 feature_set_enabled_override(kFeature_VulkanIgnoredHandles, true);
2286 feature_set_enabled_override(kFeature_VirtioGpuNext, true);
2287 feature_set_enabled_override(kFeature_VirtioGpuNativeSync, true);
2288 feature_set_enabled_override(kFeature_GuestUsesAngle, guestUsesAngle);
2289 feature_set_enabled_override(kFeature_VulkanQueueSubmitWithCommands, true);
2290 feature_set_enabled_override(kFeature_VulkanNativeSwapchain, useVulkanNativeSwapchain);
2291 feature_set_enabled_override(kFeature_VulkanBatchedDescriptorSetUpdate, true);
2292 // TODO: Strictly speaking, renderer_flags check is insufficient because
2293 // fence contexts require us to be running a new-enough guest kernel.
2294 feature_set_enabled_override(kFeature_VirtioGpuFenceContexts,
2295 (renderer_flags & GFXSTREAM_RENDERER_FLAGS_ASYNC_FENCE_CB));
2296 feature_set_enabled_override(kFeature_ExternalBlob, useExternalBlob);
2297 feature_set_enabled_override(kFeature_SystemBlob, useSystemBlob);
2298
2299 android::featurecontrol::productFeatureOverride();
2300
2301 if (useVulkanNativeSwapchain && !enableVk) {
2302 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER))
2303 << "can't enable vulkan native swapchain, Vulkan is disabled";
2304 }
2305
2306 gfxstream::vk::vkDispatch(false /* don't use test ICD */);
2307
2308 auto androidHw = aemu_get_android_hw();
2309
2310 androidHw->hw_gltransport_asg_writeBufferSize = 1048576;
2311 androidHw->hw_gltransport_asg_writeStepSize = 262144;
2312 androidHw->hw_gltransport_asg_dataRingSize = 524288;
2313 androidHw->hw_gltransport_drawFlushInterval = 10000;
2314
2315 EmuglConfig config;
2316
2317 // Make all the console agents available.
2318 android::emulation::injectGraphicsAgents(android::emulation::GfxStreamGraphicsAgentFactory());
2319
2320 emuglConfig_init(&config, true /* gpu enabled */, "auto",
2321 enable_egl2egl ? "swiftshader_indirect" : "host", 64, /* bitness */
2322 surfaceless, /* no window */
2323 false, /* blocklisted */
2324 false, /* has guest renderer */
2325 WINSYS_GLESBACKEND_PREFERENCE_AUTO, true /* force host gpu vulkan */);
2326
2327 emuglConfig_setupEnv(&config);
2328
2329 android_prepareOpenglesEmulation();
2330
2331 {
2332 static gfxstream::RenderLibPtr renderLibPtr = gfxstream::initLibrary();
2333 android_setOpenglesEmulation(renderLibPtr.get(), nullptr, nullptr);
2334 }
2335
2336 int maj;
2337 int min;
2338 android_startOpenglesRenderer(display_width, display_height, 1, 28, getGraphicsAgents()->vm,
2339 getGraphicsAgents()->emu, getGraphicsAgents()->multi_display,
2340 &maj, &min);
2341
2342 char* vendor = nullptr;
2343 char* renderer = nullptr;
2344 char* version = nullptr;
2345
2346 android_getOpenglesHardwareStrings(&vendor, &renderer, &version);
2347
2348 GFXS_LOG("GL strings; [%s] [%s] [%s].\n", vendor, renderer, version);
2349
2350 auto openglesRenderer = android_getOpenglesRenderer();
2351
2352 if (!openglesRenderer) {
2353 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER)) << "No renderer started, fatal";
2354 }
2355
2356 address_space_set_vm_operations(getGraphicsAgents()->vm);
2357 android_init_opengles_pipe();
2358 android_opengles_pipe_set_recv_mode(2 /* virtio-gpu */);
2359 android_init_refcount_pipe();
2360
2361 pipe_virgl_renderer_init(renderer_cookie, renderer_flags, &virglrenderer_callbacks);
2362
2363 gfxstream::FrameBuffer::waitUntilInitialized();
2364
2365 GFXS_LOG("Started renderer");
2366
2367 return 0;
2368 }
2369
gfxstream_backend_setup_window(void * native_window_handle,int32_t window_x,int32_t window_y,int32_t window_width,int32_t window_height,int32_t fb_width,int32_t fb_height)2370 VG_EXPORT void gfxstream_backend_setup_window(void* native_window_handle, int32_t window_x,
2371 int32_t window_y, int32_t window_width,
2372 int32_t window_height, int32_t fb_width,
2373 int32_t fb_height) {
2374 android_showOpenglesWindow(native_window_handle, window_x, window_y, window_width,
2375 window_height, fb_width, fb_height, 1.0f, 0, false, false);
2376 }
2377
gfxstream_backend_teardown()2378 VG_EXPORT void gfxstream_backend_teardown() {
2379 android_finishOpenglesRenderer();
2380 android_hideOpenglesWindow();
2381 android_stopOpenglesRenderer(true);
2382 }
2383
gfxstream_backend_set_screen_mask(int width,int height,const unsigned char * rgbaData)2384 VG_EXPORT void gfxstream_backend_set_screen_mask(int width, int height,
2385 const unsigned char* rgbaData) {
2386 android_setOpenglesScreenMask(width, height, rgbaData);
2387 }
2388
goldfish_pipe_get_service_ops()2389 const GoldfishPipeServiceOps* goldfish_pipe_get_service_ops() { return &goldfish_pipe_service_ops; }
2390
2391 #define VIRGLRENDERER_API_PIPE_STRUCT_DEF(api) pipe_##api,
2392
2393 static struct virgl_renderer_virtio_interface s_virtio_interface = {
2394 LIST_VIRGLRENDERER_API(VIRGLRENDERER_API_PIPE_STRUCT_DEF)};
2395
get_goldfish_pipe_virgl_renderer_virtio_interface(void)2396 struct virgl_renderer_virtio_interface* get_goldfish_pipe_virgl_renderer_virtio_interface(void) {
2397 return &s_virtio_interface;
2398 }
2399
2400
2401 static_assert(sizeof(struct stream_renderer_device_id) == 32,
2402 "stream_renderer_device_id must be 32 bytes");
2403 static_assert(offsetof(struct stream_renderer_device_id, device_uuid) == 0,
2404 "stream_renderer_device_id.device_uuid must be at offset 0");
2405 static_assert(offsetof(struct stream_renderer_device_id, driver_uuid) == 16,
2406 "stream_renderer_device_id.driver_uuid must be at offset 16");
2407
2408 static_assert(sizeof(struct stream_renderer_vulkan_info) == 36,
2409 "stream_renderer_vulkan_info must be 36 bytes");
2410 static_assert(offsetof(struct stream_renderer_vulkan_info, memory_index) == 0,
2411 "stream_renderer_vulkan_info.memory_index must be at offset 0");
2412 static_assert(offsetof(struct stream_renderer_vulkan_info, device_id) == 4,
2413 "stream_renderer_vulkan_info.device_id must be at offset 4");
2414
2415 static_assert(sizeof(struct stream_renderer_param_host_visible_memory_mask_entry) == 36,
2416 "stream_renderer_param_host_visible_memory_mask_entry must be 36 bytes");
2417 static_assert(offsetof(struct stream_renderer_param_host_visible_memory_mask_entry, device_id) == 0,
2418 "stream_renderer_param_host_visible_memory_mask_entry.device_id must be at offset 0");
2419 static_assert(
2420 offsetof(struct stream_renderer_param_host_visible_memory_mask_entry, memory_type_mask) == 32,
2421 "stream_renderer_param_host_visible_memory_mask_entry.memory_type_mask must be at offset 32");
2422
2423 static_assert(sizeof(struct stream_renderer_param_host_visible_memory_mask) == 16,
2424 "stream_renderer_param_host_visible_memory_mask must be 16 bytes");
2425 static_assert(offsetof(struct stream_renderer_param_host_visible_memory_mask, entries) == 0,
2426 "stream_renderer_param_host_visible_memory_mask.entries must be at offset 0");
2427 static_assert(offsetof(struct stream_renderer_param_host_visible_memory_mask, num_entries) == 8,
2428 "stream_renderer_param_host_visible_memory_mask.num_entries must be at offset 8");
2429
2430 static_assert(sizeof(struct stream_renderer_param) == 16, "stream_renderer_param must be 16 bytes");
2431 static_assert(offsetof(struct stream_renderer_param, key) == 0,
2432 "stream_renderer_param.key must be at offset 0");
2433 static_assert(offsetof(struct stream_renderer_param, value) == 8,
2434 "stream_renderer_param.value must be at offset 8");
2435 } // extern "C"
2436