1 // Copyright 2019 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #include <vulkan/vulkan.h>
15
16 #include <deque>
17 #include <unordered_map>
18
19 #include "VirtioGpuTimelines.h"
20 #include "base/AlignedBuf.h"
21 #include "base/Lock.h"
22 #include "base/Tracing.h"
23 #include "host-common/AddressSpaceService.h"
24 #include "host-common/HostmemIdMapping.h"
25 #include "host-common/address_space_device.h"
26 #include "host-common/android_pipe_common.h"
27 #include "host-common/GfxstreamFatalError.h"
28 #include "host-common/opengles.h"
29 #include "host-common/vm_operations.h"
30 #include "host-common/linux_types.h"
31
32 extern "C" {
33 #include "virtio-gpu-gfxstream-renderer.h"
34 #include "drm_fourcc.h"
35 #include "virgl_hw.h"
36 #include "host-common/goldfish_pipe.h"
37 } // extern "C"
38
39 #define DEBUG_VIRTIO_GOLDFISH_PIPE 0
40
41 #if DEBUG_VIRTIO_GOLDFISH_PIPE
42
43 #define VGPLOG(fmt,...) \
44 fprintf(stderr, "%s:%d: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__);
45
46 #else
47 #define VGPLOG(fmt,...)
48 #endif
49
50 #define VGP_FATAL() \
51 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER)) << \
52 "virtio-goldfish-pipe fatal error: "
53
54 #ifdef VIRTIO_GOLDFISH_EXPORT_API
55
56 #ifdef _WIN32
57 #define VG_EXPORT __declspec(dllexport)
58 #else
59 #define VG_EXPORT __attribute__((visibility("default")))
60 #endif
61
62 #else
63
64 #define VG_EXPORT
65
66 #endif // !VIRTIO_GOLDFISH_EXPORT_API
67
68 // Virtio Goldfish Pipe: Overview-----------------------------------------------
69 //
70 // Virtio Goldfish Pipe is meant for running goldfish pipe services with a
71 // stock Linux kernel that is already capable of virtio-gpu. It runs DRM
72 // VIRTGPU ioctls on top of a custom implementation of virglrenderer on the
73 // host side that doesn't (directly) do any rendering, but instead talks to
74 // host-side pipe services.
75 //
76 // This is mainly used for graphics at the moment, though it's possible to run
77 // other pipe services over virtio-gpu as well. virtio-gpu is selected over
78 // other devices primarily because of the existence of an API (virglrenderer)
79 // that is already somewhat separate from virtio-gpu, and not needing to create
80 // a new virtio device to handle goldfish pipe.
81 //
82 // How it works is, existing virglrenderer API are remapped to perform pipe
83 // operations. First of all, pipe operations consist of the following:
84 //
85 // - open() / close(): Starts or stops an instance of a pipe service.
86 //
87 // - write(const void* buf, size_t len) / read(const void* buf, size_t len):
88 // Sends or receives data over the pipe. The first write() is the name of the
89 // pipe service. After the pipe service is determined, the host calls
90 // resetPipe() to replace the host-side pipe instance with an instance of the
91 // pipe service.
92 //
93 // - reset(void* initialPipe, void* actualPipe): the operation that replaces an
94 // initial pipe with an instance of a pipe service.
95 //
96 // Next, here's how the pipe operations map to virglrenderer commands:
97 //
98 // - open() -> virgl_renderer_context_create(),
99 // virgl_renderer_resource_create(),
100 // virgl_renderer_resource_attach_iov()
101 //
102 // The open() corresponds to a guest-side open of a rendernode, which triggers
103 // context creation. Each pipe corresponds 1:1 with a drm virtgpu context id.
104 // We also associate an R8 resource with each pipe as the backing data for
105 // write/read.
106 //
107 // - close() -> virgl_rendrerer_resource_unref(),
108 // virgl_renderer_context_destroy()
109 //
110 // The close() corresponds to undoing the operations of open().
111 //
112 // - write() -> virgl_renderer_transfer_write_iov() OR
113 // virgl_renderer_submit_cmd()
114 //
115 // Pipe write() operation corresponds to performing a TRANSFER_TO_HOST ioctl on
116 // the resource created alongside open(), OR an EXECBUFFER ioctl.
117 //
118 // - read() -> virgl_renderer_transfer_read_iov()
119 //
120 // Pipe read() operation corresponds to performing a TRANSFER_FROM_HOST ioctl on
121 // the resource created alongside open().
122 //
123 // A note on synchronization----------------------------------------------------
124 //
125 // Unlike goldfish-pipe which handles write/read/open/close on the vcpu thread
126 // that triggered the particular operation, virtio-gpu handles the
127 // corresponding virgl operations in a bottom half that is triggered off the
128 // vcpu thread on a timer. This means that in the guest, if we want to ensure
129 // that a particular operation such as TRANSFER_TO_HOST completed on the host,
130 // we need to call VIRTGPU_WAIT, which ends up polling fences here. This is why
131 // we insert a fence after every operation in this code.
132 //
133 // Details on transfer mechanism: mapping 2D transfer to 1D ones----------------
134 //
135 // Resource objects are typically 2D textures, while we're wanting to transmit
136 // 1D buffers to the pipe services on the host. DRM VIRTGPU uses the concept
137 // of a 'box' to represent transfers that do not involve an entire resource
138 // object. Each box has a x, y, width and height parameter to define the
139 // extent of the transfer for a 2D texture. In our use case, we only use the x
140 // and width parameters. We've also created the resource with R8 format
141 // (byte-by-byte) with width equal to the total size of the transfer buffer we
142 // want (around 1 MB).
143 //
144 // The resource object itself is currently backed via plain guest RAM, which
145 // can be physically not-contiguous from the guest POV, and therefore
146 // corresponds to a possibly-long list of pointers and sizes (iov) on the host
147 // side. The sync_iov helper function converts convert the list of pointers
148 // to one contiguous buffer on the host (or vice versa), at the cost of a copy.
149 // (TODO: see if we can use host coherent memory to do away with the copy).
150 //
151 // We can see this abstraction in use via the implementation of
152 // transferWriteIov and transferReadIov below, which sync the iovec to/from a
153 // linear buffer if necessary, and then perform a corresponding pip operation
154 // based on the box parameter's x and width values.
155
156 using android::base::AutoLock;
157 using android::base::Lock;
158 using android::emulation::HostmemIdMapping;
159 using emugl::ABORT_REASON_OTHER;
160 using emugl::FatalError;
161
162 using VirtioGpuResId = uint32_t;
163
164 static constexpr int kPipeTryAgain = -2;
165
166 struct VirtioGpuCmd {
167 uint32_t op;
168 uint32_t cmdSize;
169 unsigned char buf[0];
170 } __attribute__((packed));
171
172 struct PipeCtxEntry {
173 VirtioGpuCtxId ctxId;
174 GoldfishHostPipe* hostPipe;
175 int fence;
176 uint32_t addressSpaceHandle;
177 bool hasAddressSpaceHandle;
178 };
179
180 struct PipeResEntry {
181 virgl_renderer_resource_create_args args;
182 iovec* iov;
183 uint32_t numIovs;
184 void* linear;
185 size_t linearSize;
186 GoldfishHostPipe* hostPipe;
187 VirtioGpuCtxId ctxId;
188 uint64_t hva;
189 uint64_t hvaSize;
190 uint64_t hvaId;
191 uint32_t hvSlot;
192 uint32_t caching;
193 };
194
align_up(uint32_t n,uint32_t a)195 static inline uint32_t align_up(uint32_t n, uint32_t a) {
196 return ((n + a - 1) / a) * a;
197 }
198
align_up_power_of_2(uint32_t n,uint32_t a)199 static inline uint32_t align_up_power_of_2(uint32_t n, uint32_t a) {
200 return (n + (a - 1)) & ~(a - 1);
201 }
202
203 #define VIRGL_FORMAT_NV12 166
204 #define VIRGL_FORMAT_YV12 163
205 #define VIRGL_FORMAT_P010 314
206
207 const uint32_t kGlBgra = 0x80e1;
208 const uint32_t kGlRgba = 0x1908;
209 const uint32_t kGlRgba16f = 0x881A;
210 const uint32_t kGlRgb565 = 0x8d62;
211 const uint32_t kGlRgba1010102 = 0x8059;
212 const uint32_t kGlR8 = 0x8229;
213 const uint32_t kGlR16 = 0x822A;
214 const uint32_t kGlRg8 = 0x822b;
215 const uint32_t kGlLuminance = 0x1909;
216 const uint32_t kGlLuminanceAlpha = 0x190a;
217 const uint32_t kGlUnsignedByte = 0x1401;
218 const uint32_t kGlUnsignedShort565 = 0x8363;
219
220 constexpr uint32_t kFwkFormatGlCompat = 0;
221 constexpr uint32_t kFwkFormatYV12 = 1;
222 // constexpr uint32_t kFwkFormatYUV420888 = 2;
223 constexpr uint32_t kFwkFormatNV12 = 3;
224 constexpr uint32_t kFwkFormatP010 = 4;
225
virgl_format_is_yuv(uint32_t format)226 static inline bool virgl_format_is_yuv(uint32_t format) {
227 switch (format) {
228 case VIRGL_FORMAT_B8G8R8X8_UNORM:
229 case VIRGL_FORMAT_B8G8R8A8_UNORM:
230 case VIRGL_FORMAT_R8G8B8X8_UNORM:
231 case VIRGL_FORMAT_R8G8B8A8_UNORM:
232 case VIRGL_FORMAT_B5G6R5_UNORM:
233 case VIRGL_FORMAT_R8_UNORM:
234 case VIRGL_FORMAT_R16_UNORM:
235 case VIRGL_FORMAT_R16G16B16A16_FLOAT:
236 case VIRGL_FORMAT_R8G8_UNORM:
237 case VIRGL_FORMAT_R10G10B10A2_UNORM:
238 return false;
239 case VIRGL_FORMAT_NV12:
240 case VIRGL_FORMAT_P010:
241 case VIRGL_FORMAT_YV12:
242 return true;
243 default:
244 VGP_FATAL() << "Unknown virgl format 0x" << std::hex << format;
245 return false;
246 }
247 }
248
virgl_format_to_gl(uint32_t virgl_format)249 static inline uint32_t virgl_format_to_gl(uint32_t virgl_format) {
250 switch (virgl_format) {
251 case VIRGL_FORMAT_B8G8R8X8_UNORM:
252 case VIRGL_FORMAT_B8G8R8A8_UNORM:
253 return kGlBgra;
254 case VIRGL_FORMAT_R8G8B8X8_UNORM:
255 case VIRGL_FORMAT_R8G8B8A8_UNORM:
256 return kGlRgba;
257 case VIRGL_FORMAT_B5G6R5_UNORM:
258 return kGlRgb565;
259 case VIRGL_FORMAT_R16_UNORM:
260 return kGlR16;
261 case VIRGL_FORMAT_R16G16B16A16_FLOAT:
262 return kGlRgba16f;
263 case VIRGL_FORMAT_R8_UNORM:
264 return kGlR8;
265 case VIRGL_FORMAT_R8G8_UNORM:
266 return kGlRg8;
267 case VIRGL_FORMAT_NV12:
268 case VIRGL_FORMAT_P010:
269 case VIRGL_FORMAT_YV12:
270 // emulated as RGBA8888
271 return kGlRgba;
272 case VIRGL_FORMAT_R10G10B10A2_UNORM:
273 return kGlRgba1010102;
274 default:
275 return kGlRgba;
276 }
277 }
278
virgl_format_to_fwk_format(uint32_t virgl_format)279 static inline uint32_t virgl_format_to_fwk_format(uint32_t virgl_format) {
280 switch (virgl_format) {
281 case VIRGL_FORMAT_NV12:
282 return kFwkFormatNV12;
283 case VIRGL_FORMAT_P010:
284 return kFwkFormatP010;
285 case VIRGL_FORMAT_YV12:
286 return kFwkFormatYV12;
287 case VIRGL_FORMAT_R8_UNORM:
288 case VIRGL_FORMAT_R16_UNORM:
289 case VIRGL_FORMAT_R16G16B16A16_FLOAT:
290 case VIRGL_FORMAT_R8G8_UNORM:
291 case VIRGL_FORMAT_B8G8R8X8_UNORM:
292 case VIRGL_FORMAT_B8G8R8A8_UNORM:
293 case VIRGL_FORMAT_R8G8B8X8_UNORM:
294 case VIRGL_FORMAT_R8G8B8A8_UNORM:
295 case VIRGL_FORMAT_B5G6R5_UNORM:
296 case VIRGL_FORMAT_R10G10B10A2_UNORM:
297 default: // kFwkFormatGlCompat: No extra conversions needed
298 return kFwkFormatGlCompat;
299 }
300 }
301
gl_format_to_natural_type(uint32_t format)302 static inline uint32_t gl_format_to_natural_type(uint32_t format) {
303 switch (format) {
304 case kGlBgra:
305 case kGlRgba:
306 case kGlLuminance:
307 case kGlLuminanceAlpha:
308 return kGlUnsignedByte;
309 case kGlRgb565:
310 return kGlUnsignedShort565;
311 default:
312 return kGlUnsignedByte;
313 }
314 }
315
virgl_format_to_linear_base(uint32_t format,uint32_t totalWidth,uint32_t totalHeight,uint32_t x,uint32_t y,uint32_t w,uint32_t h)316 static inline size_t virgl_format_to_linear_base(
317 uint32_t format,
318 uint32_t totalWidth, uint32_t totalHeight,
319 uint32_t x, uint32_t y, uint32_t w, uint32_t h) {
320 if (virgl_format_is_yuv(format)) {
321 return 0;
322 } else {
323 uint32_t bpp = 4;
324 switch (format) {
325 case VIRGL_FORMAT_R16G16B16A16_FLOAT:
326 bpp = 8;
327 break;
328 case VIRGL_FORMAT_B8G8R8X8_UNORM:
329 case VIRGL_FORMAT_B8G8R8A8_UNORM:
330 case VIRGL_FORMAT_R8G8B8X8_UNORM:
331 case VIRGL_FORMAT_R8G8B8A8_UNORM:
332 case VIRGL_FORMAT_R10G10B10A2_UNORM:
333 bpp = 4;
334 break;
335 case VIRGL_FORMAT_B5G6R5_UNORM:
336 case VIRGL_FORMAT_R8G8_UNORM:
337 case VIRGL_FORMAT_R16_UNORM:
338 bpp = 2;
339 break;
340 case VIRGL_FORMAT_R8_UNORM:
341 bpp = 1;
342 break;
343 default:
344 VGP_FATAL() << "Unknown format: 0x" << std::hex << format;
345 }
346
347 uint32_t stride = totalWidth * bpp;
348 return y * stride + x * bpp;
349 }
350 return 0;
351 }
352
virgl_format_to_total_xfer_len(uint32_t format,uint32_t totalWidth,uint32_t totalHeight,uint32_t x,uint32_t y,uint32_t w,uint32_t h)353 static inline size_t virgl_format_to_total_xfer_len(
354 uint32_t format,
355 uint32_t totalWidth, uint32_t totalHeight,
356 uint32_t x, uint32_t y, uint32_t w, uint32_t h) {
357 if (virgl_format_is_yuv(format)) {
358 uint32_t bpp = format == VIRGL_FORMAT_P010 ? 2 : 1;
359 uint32_t yAlign = (format == VIRGL_FORMAT_YV12) ? 32 : 16;
360 uint32_t yWidth = totalWidth;
361 uint32_t yHeight = totalHeight;
362 uint32_t yStride = align_up_power_of_2(yWidth, yAlign) * bpp;
363 uint32_t ySize = yStride * yHeight;
364
365 uint32_t uvAlign = 16;
366 uint32_t uvWidth;
367 uint32_t uvPlaneCount;
368 if (format == VIRGL_FORMAT_NV12) {
369 uvWidth = totalWidth;
370 uvPlaneCount = 1;
371 } else if (format == VIRGL_FORMAT_P010) {
372 uvWidth = totalWidth;
373 uvPlaneCount = 1;
374 } else if (format == VIRGL_FORMAT_YV12) {
375 uvWidth = totalWidth / 2;
376 uvPlaneCount = 2;
377 } else {
378 VGP_FATAL() << "Unknown yuv virgl format: 0x" << std::hex << format;
379 }
380 uint32_t uvHeight = totalHeight / 2;
381 uint32_t uvStride = align_up_power_of_2(uvWidth, uvAlign) * bpp;
382 uint32_t uvSize = uvStride * uvHeight * uvPlaneCount;
383
384 uint32_t dataSize = ySize + uvSize;
385 return dataSize;
386 } else {
387 uint32_t bpp = 4;
388 switch (format) {
389 case VIRGL_FORMAT_R16G16B16A16_FLOAT:
390 bpp = 8;
391 break;
392 case VIRGL_FORMAT_B8G8R8X8_UNORM:
393 case VIRGL_FORMAT_B8G8R8A8_UNORM:
394 case VIRGL_FORMAT_R8G8B8X8_UNORM:
395 case VIRGL_FORMAT_R8G8B8A8_UNORM:
396 case VIRGL_FORMAT_R10G10B10A2_UNORM:
397 bpp = 4;
398 break;
399 case VIRGL_FORMAT_B5G6R5_UNORM:
400 case VIRGL_FORMAT_R16_UNORM:
401 case VIRGL_FORMAT_R8G8_UNORM:
402 bpp = 2;
403 break;
404 case VIRGL_FORMAT_R8_UNORM:
405 bpp = 1;
406 break;
407 default:
408 VGP_FATAL() << "Unknown format: 0x" << std::hex << format;
409 }
410
411 uint32_t stride = totalWidth * bpp;
412 return (h - 1U) * stride + w * bpp;
413 }
414 return 0;
415 }
416
417
418 enum IovSyncDir {
419 IOV_TO_LINEAR = 0,
420 LINEAR_TO_IOV = 1,
421 };
422
sync_iov(PipeResEntry * res,uint64_t offset,const virgl_box * box,IovSyncDir dir)423 static int sync_iov(PipeResEntry* res, uint64_t offset, const virgl_box* box, IovSyncDir dir) {
424 VGPLOG("offset: 0x%llx box: %u %u %u %u size %u x %u iovs %u linearSize %zu",
425 (unsigned long long)offset,
426 box->x, box->y, box->w, box->h,
427 res->args.width, res->args.height,
428 res->numIovs,
429 res->linearSize);
430
431 if (box->x > res->args.width || box->y > res->args.height) {
432 VGP_FATAL() << "Box out of range of resource";
433 }
434 if (box->w == 0U || box->h == 0U) {
435 VGP_FATAL() << "Empty transfer";
436 }
437 if (box->x + box->w > res->args.width) {
438 VGP_FATAL() << "Box overflows resource width";
439 }
440
441 size_t linearBase = virgl_format_to_linear_base(
442 res->args.format,
443 res->args.width,
444 res->args.height,
445 box->x, box->y, box->w, box->h);
446 size_t start = linearBase;
447 // height - 1 in order to treat the (w * bpp) row specially
448 // (i.e., the last row does not occupy the full stride)
449 size_t length = virgl_format_to_total_xfer_len(
450 res->args.format,
451 res->args.width,
452 res->args.height,
453 box->x, box->y, box->w, box->h);
454 size_t end = start + length;
455
456 if (end > res->linearSize) {
457 VGP_FATAL() << "start + length overflows! linearSize "
458 << res->linearSize << " start " << start << " length " << length << " (wanted "
459 << start + length << ")";
460 }
461
462 uint32_t iovIndex = 0;
463 size_t iovOffset = 0;
464 size_t written = 0;
465 char* linear = static_cast<char*>(res->linear);
466
467 while (written < length) {
468
469 if (iovIndex >= res->numIovs) {
470 VGP_FATAL() << "write request overflowed numIovs";
471 }
472
473 const char* iovBase_const = static_cast<const char*>(res->iov[iovIndex].iov_base);
474 char* iovBase = static_cast<char*>(res->iov[iovIndex].iov_base);
475 size_t iovLen = res->iov[iovIndex].iov_len;
476 size_t iovOffsetEnd = iovOffset + iovLen;
477
478 auto lower_intersect = std::max(iovOffset, start);
479 auto upper_intersect = std::min(iovOffsetEnd, end);
480 if (lower_intersect < upper_intersect) {
481 size_t toWrite = upper_intersect - lower_intersect;
482 switch (dir) {
483 case IOV_TO_LINEAR:
484 memcpy(linear + lower_intersect,
485 iovBase_const + lower_intersect - iovOffset,
486 toWrite);
487 break;
488 case LINEAR_TO_IOV:
489 memcpy(iovBase + lower_intersect - iovOffset,
490 linear + lower_intersect,
491 toWrite);
492 break;
493 default:
494 VGP_FATAL() << "Invalid sync dir " << dir;
495 }
496 written += toWrite;
497 }
498 ++iovIndex;
499 iovOffset += iovLen;
500 }
501
502 return 0;
503 }
504
convert32to64(uint32_t lo,uint32_t hi)505 static uint64_t convert32to64(uint32_t lo, uint32_t hi) {
506 return ((uint64_t)lo) | (((uint64_t)hi) << 32);
507 }
508
509 // Commands for address space device
510 // kVirtioGpuAddressSpaceContextCreateWithSubdevice | subdeviceType
511 const uint32_t kVirtioGpuAddressSpaceContextCreateWithSubdevice = 0x1001;
512
513 // kVirtioGpuAddressSpacePing | offset_lo | offset_hi | metadata_lo | metadata_hi | version | wait_fd | wait_flags | direction
514 // no output
515 const uint32_t kVirtioGpuAddressSpacePing = 0x1002;
516
517 // kVirtioGpuAddressSpacePingWithResponse | resp_resid | offset_lo | offset_hi | metadata_lo | metadata_hi | version | wait_fd | wait_flags | direction
518 // out: same as input then | out: error
519 const uint32_t kVirtioGpuAddressSpacePingWithResponse = 0x1003;
520
521 // Commands for native sync fd
522 const uint32_t kVirtioGpuNativeSyncCreateExportFd = 0x9000;
523 const uint32_t kVirtioGpuNativeSyncCreateImportFd = 0x9001;
524
525 const uint32_t kVirtioGpuNativeSyncVulkanCreateExportFd = 0xa000;
526 const uint32_t kVirtioGpuNativeSyncVulkanCreateImportFd = 0xa001;
527
528 const uint32_t kVirtioGpuNativeSyncVulkanQsriExport = 0xa002;
529 // Reserved for internal use. Do not reuse the same opcode for other execbuf
530 // commands.
531 const uint32_t kVirtioGpuReserved = 0xa003;
532
533 class PipeVirglRenderer {
534 public:
535 PipeVirglRenderer() = default;
536
init(void * cookie,int flags,const struct virgl_renderer_callbacks * callbacks)537 int init(void* cookie, int flags, const struct virgl_renderer_callbacks* callbacks) {
538 VGPLOG("cookie: %p", cookie);
539 mCookie = cookie;
540 mVirglRendererCallbacks = *callbacks;
541 mVirtioGpuOps = android_getVirtioGpuOps();
542 if (!mVirtioGpuOps) {
543 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER)) << "Could not get virtio gpu ops!";
544 }
545 mReadPixelsFunc = android_getReadPixelsFunc();
546 if (!mReadPixelsFunc) {
547 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER))
548 << "Could not get read pixels func!";
549 }
550 mAddressSpaceDeviceControlOps = get_address_space_device_control_ops();
551 if (!mAddressSpaceDeviceControlOps) {
552 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER))
553 << "Could not get address space device control ops!";
554 }
555 if (flags & GFXSTREAM_RENDERER_FLAGS_ASYNC_FENCE_CB) {
556 VGPLOG("Using async fence cb.");
557 mVirtioGpuTimelines = std::make_unique<VirtioGpuTimelines>();
558 } else {
559 VGPLOG("Not using async fence cb.");
560 mVirtioGpuTimelines = nullptr;
561 }
562 VGPLOG("done");
563 return 0;
564 }
565
resetPipe(GoldfishHwPipe * hwPipe,GoldfishHostPipe * hostPipe)566 void resetPipe(GoldfishHwPipe* hwPipe, GoldfishHostPipe* hostPipe) {
567 VGPLOG("Want to reset hwpipe %p to hostpipe %p", hwPipe, hostPipe);
568 VirtioGpuCtxId asCtxId = (VirtioGpuCtxId)(uintptr_t)hwPipe;
569 auto it = mContexts.find(asCtxId);
570 if (it == mContexts.end()) {
571 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER))
572 << "fatal: pipe id " << asCtxId << " not found";
573 }
574
575 auto& entry = it->second;
576 VGPLOG("ctxid: %u prev hostpipe: %p", asCtxId, entry.hostPipe);
577 entry.hostPipe = hostPipe;
578 VGPLOG("ctxid: %u next hostpipe: %p", asCtxId, entry.hostPipe);
579
580 // Also update any resources associated with it
581 auto resourcesIt = mContextResources.find(asCtxId);
582
583 if (resourcesIt == mContextResources.end()) return;
584
585 const auto& resIds = resourcesIt->second;
586
587 for (auto resId : resIds) {
588 auto resEntryIt = mResources.find(resId);
589 if (resEntryIt == mResources.end()) {
590 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER))
591 << "res id " << resId << " entry not found";
592 }
593
594 auto& resEntry = resEntryIt->second;
595 resEntry.hostPipe = hostPipe;
596 }
597 }
598
createContext(VirtioGpuCtxId ctx_id,uint32_t nlen,const char * name,uint32_t context_init)599 int createContext(VirtioGpuCtxId ctx_id, uint32_t nlen, const char* name,
600 uint32_t context_init) {
601 AutoLock lock(mLock);
602 VGPLOG("ctxid: %u len: %u name: %s", ctx_id, nlen, name);
603 auto ops = ensureAndGetServiceOps();
604 auto hostPipe = ops->guest_open_with_flags(
605 reinterpret_cast<GoldfishHwPipe*>(ctx_id),
606 0x1 /* is virtio */);
607
608 if (!hostPipe) {
609 fprintf(stderr, "%s: failed to create hw pipe!\n", __func__);
610 return -1;
611 }
612
613 PipeCtxEntry res = {
614 ctx_id, // ctxId
615 hostPipe, // hostPipe
616 0, // fence
617 0, // AS handle
618 false, // does not have an AS handle
619 };
620
621 VGPLOG("initial host pipe for ctxid %u: %p", ctx_id, hostPipe);
622 mContexts[ctx_id] = res;
623 return 0;
624 }
625
destroyContext(VirtioGpuCtxId handle)626 int destroyContext(VirtioGpuCtxId handle) {
627 AutoLock lock(mLock);
628 VGPLOG("ctxid: %u", handle);
629
630 auto it = mContexts.find(handle);
631 if (it == mContexts.end()) {
632 fprintf(stderr, "%s: could not find context handle %u\n", __func__, handle);
633 return -1;
634 }
635
636 if (it->second.hasAddressSpaceHandle) {
637 mAddressSpaceDeviceControlOps->destroy_handle(
638 it->second.addressSpaceHandle);
639 }
640
641 auto ops = ensureAndGetServiceOps();
642 auto hostPipe = it->second.hostPipe;
643
644 if (!hostPipe) {
645 fprintf(stderr, "%s: 0 is not a valid hostpipe\n", __func__);
646 return -1;
647 }
648
649 ops->guest_close(hostPipe, GOLDFISH_PIPE_CLOSE_GRACEFUL);
650
651 mContexts.erase(it);
652 return 0;
653 }
654
setContextAddressSpaceHandleLocked(VirtioGpuCtxId ctxId,uint32_t handle)655 void setContextAddressSpaceHandleLocked(VirtioGpuCtxId ctxId, uint32_t handle) {
656 auto ctxIt = mContexts.find(ctxId);
657 if (ctxIt == mContexts.end()) {
658 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER))
659 << "ctx id " << ctxId << " not found";
660 }
661
662 auto& ctxEntry = ctxIt->second;
663 ctxEntry.addressSpaceHandle = handle;
664 ctxEntry.hasAddressSpaceHandle = true;
665 }
666
getAddressSpaceHandleLocked(VirtioGpuCtxId ctxId)667 uint32_t getAddressSpaceHandleLocked(VirtioGpuCtxId ctxId) {
668 auto ctxIt = mContexts.find(ctxId);
669 if (ctxIt == mContexts.end()) {
670 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER))
671 << "ctx id " << ctxId << " not found ";
672 }
673
674 auto& ctxEntry = ctxIt->second;
675
676 if (!ctxEntry.hasAddressSpaceHandle) {
677 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER))
678 << "ctx id " << ctxId << " doesn't have address space handle";
679 }
680
681 return ctxEntry.addressSpaceHandle;
682 }
683
writeWordsToFirstIovPageLocked(uint32_t * dwords,size_t dwordCount,uint32_t resId)684 void writeWordsToFirstIovPageLocked(uint32_t* dwords, size_t dwordCount, uint32_t resId) {
685
686 auto resEntryIt = mResources.find(resId);
687 if (resEntryIt == mResources.end()) {
688 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER))
689 << " resid " << resId << " not found";
690 }
691
692 auto& resEntry = resEntryIt->second;
693
694 if (!resEntry.iov) {
695 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER))
696 << "resid " << resId << " has empty iov ";
697 }
698
699 uint32_t* iovWords = (uint32_t*)(resEntry.iov[0].iov_base);
700 memcpy(iovWords, dwords, sizeof(uint32_t) * dwordCount);
701 }
702
addressSpaceProcessCmd(VirtioGpuCtxId ctxId,uint32_t * dwords,int dwordCount)703 void addressSpaceProcessCmd(VirtioGpuCtxId ctxId, uint32_t* dwords, int dwordCount) {
704 uint32_t opcode = dwords[0];
705
706 switch (opcode) {
707 case kVirtioGpuAddressSpaceContextCreateWithSubdevice: {
708 uint32_t subdevice_type = dwords[1];
709
710 uint32_t handle = mAddressSpaceDeviceControlOps->gen_handle();
711
712 struct android::emulation::AddressSpaceDevicePingInfo pingInfo = {
713 .metadata = (uint64_t)subdevice_type,
714 };
715
716 mAddressSpaceDeviceControlOps->ping_at_hva(handle, &pingInfo);
717
718 AutoLock lock(mLock);
719 setContextAddressSpaceHandleLocked(ctxId, handle);
720 break;
721 }
722 case kVirtioGpuAddressSpacePing: {
723 uint32_t phys_addr_lo = dwords[1];
724 uint32_t phys_addr_hi = dwords[2];
725
726 uint32_t size_lo = dwords[3];
727 uint32_t size_hi = dwords[4];
728
729 uint32_t metadata_lo = dwords[5];
730 uint32_t metadata_hi = dwords[6];
731
732 uint32_t wait_phys_addr_lo = dwords[7];
733 uint32_t wait_phys_addr_hi = dwords[8];
734
735 uint32_t wait_flags = dwords[9];
736 uint32_t direction = dwords[10];
737
738 struct android::emulation::AddressSpaceDevicePingInfo pingInfo = {
739 .phys_addr = convert32to64(phys_addr_lo, phys_addr_hi),
740 .size = convert32to64(size_lo, size_hi),
741 .metadata = convert32to64(metadata_lo, metadata_hi),
742 .wait_phys_addr = convert32to64(wait_phys_addr_lo, wait_phys_addr_hi),
743 .wait_flags = wait_flags,
744 .direction = direction,
745 };
746
747 AutoLock lock(mLock);
748 mAddressSpaceDeviceControlOps->ping_at_hva(
749 getAddressSpaceHandleLocked(ctxId),
750 &pingInfo);
751 break;
752 }
753 case kVirtioGpuAddressSpacePingWithResponse: {
754 uint32_t resp_resid = dwords[1];
755 uint32_t phys_addr_lo = dwords[2];
756 uint32_t phys_addr_hi = dwords[3];
757
758 uint32_t size_lo = dwords[4];
759 uint32_t size_hi = dwords[5];
760
761 uint32_t metadata_lo = dwords[6];
762 uint32_t metadata_hi = dwords[7];
763
764 uint32_t wait_phys_addr_lo = dwords[8];
765 uint32_t wait_phys_addr_hi = dwords[9];
766
767 uint32_t wait_flags = dwords[10];
768 uint32_t direction = dwords[11];
769
770 struct android::emulation::AddressSpaceDevicePingInfo pingInfo = {
771 .phys_addr = convert32to64(phys_addr_lo, phys_addr_hi),
772 .size = convert32to64(size_lo, size_hi),
773 .metadata = convert32to64(metadata_lo, metadata_hi),
774 .wait_phys_addr = convert32to64(wait_phys_addr_lo, wait_phys_addr_hi),
775 .wait_flags = wait_flags,
776 .direction = direction,
777 };
778
779 AutoLock lock(mLock);
780 mAddressSpaceDeviceControlOps->ping_at_hva(
781 getAddressSpaceHandleLocked(ctxId),
782 &pingInfo);
783
784 phys_addr_lo = (uint32_t)pingInfo.phys_addr;
785 phys_addr_hi = (uint32_t)(pingInfo.phys_addr >> 32);
786 size_lo = (uint32_t)(pingInfo.size >> 0);
787 size_hi = (uint32_t)(pingInfo.size >> 32);
788 metadata_lo = (uint32_t)(pingInfo.metadata >> 0);
789 metadata_hi = (uint32_t)(pingInfo.metadata >> 32);
790 wait_phys_addr_lo = (uint32_t)(pingInfo.wait_phys_addr >> 0);
791 wait_phys_addr_hi = (uint32_t)(pingInfo.wait_phys_addr >> 32);
792 wait_flags = (uint32_t)(pingInfo.wait_flags >> 0);
793 direction = (uint32_t)(pingInfo.direction >> 0);
794
795 uint32_t response[] = {
796 phys_addr_lo, phys_addr_hi,
797 size_lo, size_hi,
798 metadata_lo, metadata_hi,
799 wait_phys_addr_lo, wait_phys_addr_hi,
800 wait_flags, direction,
801 };
802
803 writeWordsToFirstIovPageLocked(
804 response,
805 sizeof(response) / sizeof(uint32_t),
806 resp_resid);
807 break;
808 }
809 default:
810 break;
811 }
812 }
813
submitCmd(VirtioGpuCtxId ctxId,void * buffer,int dwordCount)814 int submitCmd(VirtioGpuCtxId ctxId, void* buffer, int dwordCount) {
815 VGPLOG("ctxid: %u buffer: %p dwords: %d", ctxId, buffer, dwordCount);
816
817 if (!buffer) {
818 fprintf(stderr, "%s: error: buffer null\n", __func__);
819 return -1;
820 }
821
822 // Parse command from buffer
823 uint32_t* dwords = (uint32_t*)buffer;
824
825 if (dwordCount < 1) {
826 fprintf(stderr, "%s: error: not enough dwords (got %d)\n", __func__, dwordCount);
827 return -1;
828 }
829
830 uint32_t opcode = dwords[0];
831
832 switch (opcode) {
833 case kVirtioGpuAddressSpaceContextCreateWithSubdevice:
834 case kVirtioGpuAddressSpacePing:
835 case kVirtioGpuAddressSpacePingWithResponse:
836 addressSpaceProcessCmd(ctxId, dwords, dwordCount);
837 break;
838 case kVirtioGpuNativeSyncCreateExportFd:
839 case kVirtioGpuNativeSyncCreateImportFd: {
840 uint32_t sync_handle_lo = dwords[1];
841 uint32_t sync_handle_hi = dwords[2];
842 uint64_t sync_handle = convert32to64(sync_handle_lo, sync_handle_hi);
843
844 VGPLOG("wait for gpu ctx id %u", ctxId);
845 if (mVirtioGpuTimelines) {
846 auto taskId = mVirtioGpuTimelines->enqueueTask(
847 static_cast<VirtioGpuTimelines::CtxId>(ctxId));
848 mVirtioGpuOps->async_wait_for_gpu_with_cb(
849 sync_handle, [this, ctxId, taskId] {
850 mVirtioGpuTimelines->notifyTaskCompletion(taskId);
851 });
852 } else {
853 mVirtioGpuOps->wait_for_gpu(sync_handle);
854 }
855 break;
856 }
857 case kVirtioGpuNativeSyncVulkanCreateExportFd:
858 case kVirtioGpuNativeSyncVulkanCreateImportFd: {
859 uint32_t device_handle_lo = dwords[1];
860 uint32_t device_handle_hi = dwords[2];
861 uint64_t device_handle = convert32to64(device_handle_lo, device_handle_hi);
862
863 uint32_t fence_handle_lo = dwords[3];
864 uint32_t fence_handle_hi = dwords[4];
865 uint64_t fence_handle = convert32to64(fence_handle_lo, fence_handle_hi);
866
867 VGPLOG("wait for gpu vk ctx id %u", ctxId);
868 if (mVirtioGpuTimelines) {
869 auto taskId = mVirtioGpuTimelines->enqueueTask(
870 static_cast<VirtioGpuTimelines::CtxId>(ctxId));
871 mVirtioGpuOps->async_wait_for_gpu_vulkan_with_cb(
872 device_handle, fence_handle, [this, ctxId, taskId] {
873 mVirtioGpuTimelines->notifyTaskCompletion(taskId);
874 });
875 } else {
876 mVirtioGpuOps->wait_for_gpu_vulkan(device_handle, fence_handle);
877 }
878 break;
879 }
880 case kVirtioGpuNativeSyncVulkanQsriExport: {
881 uint64_t image_handle_lo = dwords[1];
882 uint64_t image_handle_hi = dwords[2];
883 uint64_t image_handle = convert32to64(image_handle_lo, image_handle_hi);
884 VGPLOG("wait for gpu vk qsri id %u image 0x%llx", ctxId, (unsigned long long)image_handle);
885 if (mVirtioGpuTimelines) {
886 auto taskId = mVirtioGpuTimelines->enqueueTask(
887 static_cast<VirtioGpuTimelines::CtxId>(ctxId));
888 mVirtioGpuOps->async_wait_for_gpu_vulkan_qsri_with_cb(
889 image_handle, [this, ctxId, taskId] {
890 mVirtioGpuTimelines->notifyTaskCompletion(taskId);
891 });
892 } else {
893 mVirtioGpuOps->wait_for_gpu_vulkan_qsri(image_handle);
894 }
895 break;
896 }
897 default:
898 return -1;
899 }
900
901 return 0;
902 }
903
904 enum VirtioGpuFenceType {
905 Global,
906 ContextFence,
907 };
908
909 enum CtxSyncingType {
910 SyncSignal,
911 AsyncSignal,
912 };
913
914 struct CtxPendingFence {
915 VirtioGpuFenceType fenceType;
916 CtxSyncingType syncType;
917 uint64_t fence_value;
918 };
919
createFence(int client_fence_id,uint32_t ctx_id)920 int createFence(int client_fence_id, uint32_t ctx_id) {
921 AutoLock lock(mLock);
922 VGPLOG("fenceid: %u cmdtype: %u", client_fence_id, ctx_id);
923 if (mVirtioGpuTimelines) {
924 VGPLOG("create fence using async fence cb");
925 if (0 == ctx_id) {
926 VGPLOG("is 0 ctx id, signal right away as everything's serialized to this point");
927 mVirglRendererCallbacks.write_fence(mCookie, (uint32_t)client_fence_id);
928 } else {
929 VGPLOG("is Not 0 ctx id (%u), do not signal right away if async signal on top.. the client fence id was %d", ctx_id, client_fence_id);
930 mVirtioGpuTimelines->enqueueFence(
931 0,
932 static_cast<VirtioGpuTimelines::FenceId>(client_fence_id),
933 [this, client_fence_id]() {
934 mVirglRendererCallbacks.write_fence(
935 mCookie, static_cast<uint32_t>(client_fence_id));
936 });
937 }
938 } else {
939 VGPLOG("create fence without async fence cb");
940 mFenceDeque.push_back((uint64_t)client_fence_id);
941 }
942 return 0;
943 }
944
contextCreateFence(uint64_t fence_id,uint32_t ctx_id,uint8_t ring_idx)945 int contextCreateFence(uint64_t fence_id, uint32_t ctx_id, uint8_t ring_idx) {
946 AutoLock lock(mLock);
947 VGPLOG("fenceid: %llu cmdtype: %u ring_idx: %u", (unsigned long long)fence_id, ctx_id, ring_idx);
948 if (mVirtioGpuTimelines) {
949 VGPLOG("create fence using async fence cb");
950 if (0 == ctx_id) {
951 VGPLOG("is 0 ctx id, signal right away as everything's serialized to this point");
952 mVirglRendererCallbacks.write_fence(mCookie, (uint32_t)fence_id);
953 } else {
954 VGPLOG("is Not 0 ctx id (%u), do not signal right away if async signal on top.. the client fence id was %llu",
955 ctx_id, (unsigned long long)fence_id);
956 #ifdef VIRGL_RENDERER_UNSTABLE_APIS
957 mVirtioGpuTimelines->enqueueFence(
958 static_cast<VirtioGpuTimelines::CtxId>(ctx_id),
959 static_cast<VirtioGpuTimelines::FenceId>(fence_id),
960 [this, fence_id, ctx_id, ring_idx]() {
961 mVirglRendererCallbacks.write_context_fence(
962 mCookie, fence_id, ctx_id, ring_idx);
963 });
964 #else
965 VGPLOG("enable unstable apis for this feature");
966 return -EINVAL;
967 #endif
968 }
969 } else {
970 fprintf(stderr, "%s: create fence without async fence cb\n", __func__);
971 mFenceDeque.push_back(fence_id);
972 }
973 return 0;
974 }
975
poll()976 void poll() {
977 VGPLOG("start");
978 AutoLock lock(mLock);
979 for (auto fence : mFenceDeque) {
980 VGPLOG("write fence: %llu", (unsigned long long)fence);
981 mVirglRendererCallbacks.write_fence(mCookie, (uint32_t)fence);
982 VGPLOG("write fence: %llu (done with callback)", (unsigned long long)fence);
983 }
984 mFenceDeque.clear();
985 VGPLOG("end");
986 }
987
988 enum pipe_texture_target {
989 PIPE_BUFFER,
990 PIPE_TEXTURE_1D,
991 PIPE_TEXTURE_2D,
992 PIPE_TEXTURE_3D,
993 PIPE_TEXTURE_CUBE,
994 PIPE_TEXTURE_RECT,
995 PIPE_TEXTURE_1D_ARRAY,
996 PIPE_TEXTURE_2D_ARRAY,
997 PIPE_TEXTURE_CUBE_ARRAY,
998 PIPE_MAX_TEXTURE_TYPES,
999 };
1000
1001 /**
1002 * * Resource binding flags -- state tracker must specify in advance all
1003 * * the ways a resource might be used.
1004 * */
1005 #define PIPE_BIND_DEPTH_STENCIL (1 << 0) /* create_surface */
1006 #define PIPE_BIND_RENDER_TARGET (1 << 1) /* create_surface */
1007 #define PIPE_BIND_BLENDABLE (1 << 2) /* create_surface */
1008 #define PIPE_BIND_SAMPLER_VIEW (1 << 3) /* create_sampler_view */
1009 #define PIPE_BIND_VERTEX_BUFFER (1 << 4) /* set_vertex_buffers */
1010 #define PIPE_BIND_INDEX_BUFFER (1 << 5) /* draw_elements */
1011 #define PIPE_BIND_CONSTANT_BUFFER (1 << 6) /* set_constant_buffer */
1012 #define PIPE_BIND_DISPLAY_TARGET (1 << 7) /* flush_front_buffer */
1013 /* gap */
1014 #define PIPE_BIND_STREAM_OUTPUT (1 << 10) /* set_stream_output_buffers */
1015 #define PIPE_BIND_CURSOR (1 << 11) /* mouse cursor */
1016 #define PIPE_BIND_CUSTOM (1 << 12) /* state-tracker/winsys usages */
1017 #define PIPE_BIND_GLOBAL (1 << 13) /* set_global_binding */
1018 #define PIPE_BIND_SHADER_BUFFER (1 << 14) /* set_shader_buffers */
1019 #define PIPE_BIND_SHADER_IMAGE (1 << 15) /* set_shader_images */
1020 #define PIPE_BIND_COMPUTE_RESOURCE (1 << 16) /* set_compute_resources */
1021 #define PIPE_BIND_COMMAND_ARGS_BUFFER (1 << 17) /* pipe_draw_info.indirect */
1022 #define PIPE_BIND_QUERY_BUFFER (1 << 18) /* get_query_result_resource */
1023
1024
handleCreateResourceGraphicsUsage(struct virgl_renderer_resource_create_args * args,struct iovec * iov,uint32_t num_iovs)1025 void handleCreateResourceGraphicsUsage(
1026 struct virgl_renderer_resource_create_args *args,
1027 struct iovec *iov, uint32_t num_iovs) {
1028
1029 if (args->target == PIPE_BUFFER) {
1030 // Nothing to handle; this is generic pipe usage.
1031 return;
1032 }
1033
1034 // corresponds to allocation of gralloc buffer in minigbm
1035 VGPLOG("w h %u %u resid %u -> rcCreateColorBufferWithHandle",
1036 args->width, args->height, args->handle);
1037 uint32_t glformat = virgl_format_to_gl(args->format);
1038 uint32_t fwkformat = virgl_format_to_fwk_format(args->format);
1039 mVirtioGpuOps->create_color_buffer_with_handle(
1040 args->width, args->height, glformat, fwkformat, args->handle);
1041 mVirtioGpuOps->set_guest_managed_color_buffer_lifetime(true /* guest manages lifetime */);
1042 mVirtioGpuOps->open_color_buffer(
1043 args->handle);
1044 }
1045
createResource(struct virgl_renderer_resource_create_args * args,struct iovec * iov,uint32_t num_iovs)1046 int createResource(
1047 struct virgl_renderer_resource_create_args *args,
1048 struct iovec *iov, uint32_t num_iovs) {
1049
1050 VGPLOG("handle: %u. num iovs: %u", args->handle, num_iovs);
1051
1052 handleCreateResourceGraphicsUsage(args, iov, num_iovs);
1053
1054 PipeResEntry e;
1055 e.args = *args;
1056 e.linear = 0;
1057 e.hostPipe = 0;
1058 e.hva = 0;
1059 e.hvaSize = 0;
1060 e.hvaId = 0;
1061 e.hvSlot = 0;
1062 allocResource(e, iov, num_iovs);
1063
1064 AutoLock lock(mLock);
1065 mResources[args->handle] = e;
1066 return 0;
1067 }
1068
handleUnrefResourceGraphicsUsage(PipeResEntry * res,uint32_t resId)1069 void handleUnrefResourceGraphicsUsage(PipeResEntry* res, uint32_t resId) {
1070 if (res->args.target == PIPE_BUFFER) return;
1071 mVirtioGpuOps->close_color_buffer(resId);
1072 }
1073
unrefResource(uint32_t toUnrefId)1074 void unrefResource(uint32_t toUnrefId) {
1075 AutoLock lock(mLock);
1076 VGPLOG("handle: %u", toUnrefId);
1077
1078 auto it = mResources.find(toUnrefId);
1079 if (it == mResources.end()) return;
1080
1081 auto contextsIt = mResourceContexts.find(toUnrefId);
1082 if (contextsIt != mResourceContexts.end()) {
1083 mResourceContexts.erase(contextsIt->first);
1084 }
1085
1086 for (auto& ctxIdResources : mContextResources) {
1087 detachResourceLocked(ctxIdResources.first, toUnrefId);
1088 }
1089
1090 auto& entry = it->second;
1091
1092 handleUnrefResourceGraphicsUsage(&entry, toUnrefId);
1093
1094 if (entry.linear) {
1095 free(entry.linear);
1096 entry.linear = nullptr;
1097 }
1098
1099 if (entry.iov) {
1100 free(entry.iov);
1101 entry.iov = nullptr;
1102 entry.numIovs = 0;
1103 }
1104
1105 if (entry.hvaId) {
1106 // gfxstream manages when to actually remove the hostmem id and storage
1107 //
1108 // fprintf(stderr, "%s: unref a hostmem resource. hostmem id: 0x%llx\n", __func__,
1109 // (unsigned long long)(entry.hvaId));
1110 // HostmemIdMapping::get()->remove(entry.hvaId);
1111 // auto ownedIt = mOwnedHostmemIdBuffers.find(entry.hvaId);
1112 // if (ownedIt != mOwnedHostmemIdBuffers.end()) {
1113 // // android::aligned_buf_free(ownedIt->second);
1114 // }
1115 }
1116
1117 entry.hva = 0;
1118 entry.hvaSize = 0;
1119 entry.hvaId = 0;
1120 entry.hvSlot = 0;
1121
1122 mResources.erase(it);
1123 }
1124
attachIov(int resId,iovec * iov,int num_iovs)1125 int attachIov(int resId, iovec* iov, int num_iovs) {
1126 AutoLock lock(mLock);
1127
1128 VGPLOG("resid: %d numiovs: %d", resId, num_iovs);
1129
1130 auto it = mResources.find(resId);
1131 if (it == mResources.end()) return ENOENT;
1132
1133 auto& entry = it->second;
1134 VGPLOG("res linear: %p", entry.linear);
1135 if (!entry.linear) allocResource(entry, iov, num_iovs);
1136
1137 VGPLOG("done");
1138 return 0;
1139 }
1140
detachIov(int resId,iovec ** iov,int * num_iovs)1141 void detachIov(int resId, iovec** iov, int* num_iovs) {
1142 AutoLock lock(mLock);
1143
1144 auto it = mResources.find(resId);
1145 if (it == mResources.end()) return;
1146
1147 auto& entry = it->second;
1148
1149 if (num_iovs) {
1150 *num_iovs = entry.numIovs;
1151 VGPLOG("resid: %d numIovs: %d", resId, *num_iovs);
1152 } else {
1153 VGPLOG("resid: %d numIovs: 0", resId);
1154 }
1155
1156 entry.numIovs = 0;
1157
1158 if (entry.iov) free(entry.iov);
1159 entry.iov = nullptr;
1160
1161 if (iov) {
1162 *iov = entry.iov;
1163 }
1164
1165 allocResource(entry, entry.iov, entry.numIovs);
1166 VGPLOG("done");
1167 }
1168
handleTransferReadGraphicsUsage(PipeResEntry * res,uint64_t offset,virgl_box * box)1169 bool handleTransferReadGraphicsUsage(
1170 PipeResEntry* res, uint64_t offset, virgl_box* box) {
1171 // PIPE_BUFFER: Generic pipe usage
1172 if (res->args.target == PIPE_BUFFER) return true;
1173
1174 // Others: Gralloc transfer read operation
1175 auto glformat = virgl_format_to_gl(res->args.format);
1176 auto gltype = gl_format_to_natural_type(glformat);
1177
1178 // We always xfer the whole thing again from GL
1179 // since it's fiddly to calc / copy-out subregions
1180 if (virgl_format_is_yuv(res->args.format)) {
1181 mVirtioGpuOps->read_color_buffer_yuv(
1182 res->args.handle,
1183 0, 0,
1184 res->args.width, res->args.height,
1185 res->linear, res->linearSize);
1186 } else {
1187 mVirtioGpuOps->read_color_buffer(
1188 res->args.handle,
1189 0, 0,
1190 res->args.width, res->args.height,
1191 glformat,
1192 gltype,
1193 res->linear);
1194 }
1195
1196 return false;
1197 }
1198
handleTransferWriteGraphicsUsage(PipeResEntry * res,uint64_t offset,virgl_box * box)1199 bool handleTransferWriteGraphicsUsage(
1200 PipeResEntry* res, uint64_t offset, virgl_box* box) {
1201 // PIPE_BUFFER: Generic pipe usage
1202 if (res->args.target == PIPE_BUFFER) return true;
1203
1204 // Others: Gralloc transfer read operation
1205 auto glformat = virgl_format_to_gl(res->args.format);
1206 auto gltype = gl_format_to_natural_type(glformat);
1207
1208 // We always xfer the whole thing again to GL
1209 // since it's fiddly to calc / copy-out subregions
1210 mVirtioGpuOps->update_color_buffer(
1211 res->args.handle,
1212 0, 0,
1213 res->args.width, res->args.height,
1214 glformat,
1215 gltype,
1216 res->linear);
1217
1218 return false;
1219 }
1220
transferReadIov(int resId,uint64_t offset,virgl_box * box,struct iovec * iov,int iovec_cnt)1221 int transferReadIov(int resId, uint64_t offset, virgl_box* box, struct iovec* iov, int iovec_cnt) {
1222 AutoLock lock(mLock);
1223
1224 VGPLOG("resid: %d offset: 0x%llx. box: %u %u %u %u", resId,
1225 (unsigned long long)offset,
1226 box->x,
1227 box->y,
1228 box->w,
1229 box->h);
1230
1231 auto it = mResources.find(resId);
1232 if (it == mResources.end()) return EINVAL;
1233
1234 auto& entry = it->second;
1235
1236 if (handleTransferReadGraphicsUsage(
1237 &entry, offset, box)) {
1238 // Do the pipe service op here, if there is an associated hostpipe.
1239 auto hostPipe = entry.hostPipe;
1240 if (!hostPipe) return -1;
1241
1242 auto ops = ensureAndGetServiceOps();
1243
1244 size_t readBytes = 0;
1245 size_t wantedBytes = readBytes + (size_t)box->w;
1246
1247 while (readBytes < wantedBytes) {
1248 GoldfishPipeBuffer buf = {
1249 ((char*)entry.linear) + box->x + readBytes,
1250 wantedBytes - readBytes,
1251 };
1252 auto status = ops->guest_recv(hostPipe, &buf, 1);
1253
1254 if (status > 0) {
1255 readBytes += status;
1256 } else if (status != kPipeTryAgain) {
1257 return EIO;
1258 }
1259 }
1260 }
1261
1262 VGPLOG("Linear first word: %d", *(int*)(entry.linear));
1263
1264 int syncRes;
1265
1266 if (iovec_cnt) {
1267 PipeResEntry e = {
1268 entry.args,
1269 iov,
1270 (uint32_t)iovec_cnt,
1271 entry.linear,
1272 entry.linearSize,
1273 };
1274 syncRes =
1275 sync_iov(&e, offset, box, LINEAR_TO_IOV);
1276 } else {
1277 syncRes =
1278 sync_iov(&entry, offset, box, LINEAR_TO_IOV);
1279 }
1280
1281 VGPLOG("done");
1282
1283 return syncRes;
1284 }
1285
transferWriteIov(int resId,uint64_t offset,virgl_box * box,struct iovec * iov,int iovec_cnt)1286 int transferWriteIov(int resId, uint64_t offset, virgl_box* box, struct iovec* iov, int iovec_cnt) {
1287 AutoLock lock(mLock);
1288 VGPLOG("resid: %d offset: 0x%llx", resId,
1289 (unsigned long long)offset);
1290 auto it = mResources.find(resId);
1291 if (it == mResources.end()) return EINVAL;
1292
1293 auto& entry = it->second;
1294 int syncRes;
1295
1296 if (iovec_cnt) {
1297 PipeResEntry e = {
1298 entry.args,
1299 iov,
1300 (uint32_t)iovec_cnt,
1301 entry.linear,
1302 entry.linearSize,
1303 };
1304 syncRes = sync_iov(&e, offset, box, IOV_TO_LINEAR);
1305 } else {
1306 syncRes = sync_iov(&entry, offset, box, IOV_TO_LINEAR);
1307 }
1308
1309 if (handleTransferWriteGraphicsUsage(&entry, offset, box)) {
1310 // Do the pipe service op here, if there is an associated hostpipe.
1311 auto hostPipe = entry.hostPipe;
1312 if (!hostPipe) {
1313 VGPLOG("No hostPipe");
1314 return syncRes;
1315 }
1316
1317 VGPLOG("resid: %d offset: 0x%llx hostpipe: %p", resId,
1318 (unsigned long long)offset, hostPipe);
1319
1320 auto ops = ensureAndGetServiceOps();
1321
1322 size_t writtenBytes = 0;
1323 size_t wantedBytes = (size_t)box->w;
1324
1325 while (writtenBytes < wantedBytes) {
1326 GoldfishPipeBuffer buf = {
1327 ((char*)entry.linear) + box->x + writtenBytes,
1328 wantedBytes - writtenBytes,
1329 };
1330
1331 // guest_send can now reallocate the pipe.
1332 void* hostPipeBefore = hostPipe;
1333 auto status = ops->guest_send(&hostPipe, &buf, 1);
1334 if (hostPipe != hostPipeBefore) {
1335 resetPipe((GoldfishHwPipe*)(uintptr_t)(entry.ctxId), hostPipe);
1336 it = mResources.find(resId);
1337 entry = it->second;
1338 }
1339
1340 if (status > 0) {
1341 writtenBytes += status;
1342 } else if (status != kPipeTryAgain) {
1343 return EIO;
1344 }
1345 }
1346 }
1347
1348 VGPLOG("done");
1349 return syncRes;
1350 }
1351
attachResource(uint32_t ctxId,uint32_t resId)1352 void attachResource(uint32_t ctxId, uint32_t resId) {
1353 AutoLock lock(mLock);
1354 VGPLOG("ctxid: %u resid: %u", ctxId, resId);
1355
1356 auto resourcesIt = mContextResources.find(ctxId);
1357
1358 if (resourcesIt == mContextResources.end()) {
1359 std::vector<VirtioGpuResId> ids;
1360 ids.push_back(resId);
1361 mContextResources[ctxId] = ids;
1362 } else {
1363 auto& ids = resourcesIt->second;
1364 auto idIt = std::find(ids.begin(), ids.end(), resId);
1365 if (idIt == ids.end())
1366 ids.push_back(resId);
1367 }
1368
1369 auto contextsIt = mResourceContexts.find(resId);
1370
1371 if (contextsIt == mResourceContexts.end()) {
1372 std::vector<VirtioGpuCtxId> ids;
1373 ids.push_back(ctxId);
1374 mResourceContexts[resId] = ids;
1375 } else {
1376 auto& ids = contextsIt->second;
1377 auto idIt = std::find(ids.begin(), ids.end(), ctxId);
1378 if (idIt == ids.end())
1379 ids.push_back(ctxId);
1380 }
1381
1382 // Associate the host pipe of the resource entry with the host pipe of
1383 // the context entry. That is, the last context to call attachResource
1384 // wins if there is any conflict.
1385 auto ctxEntryIt = mContexts.find(ctxId); auto resEntryIt =
1386 mResources.find(resId);
1387
1388 if (ctxEntryIt == mContexts.end() ||
1389 resEntryIt == mResources.end()) return;
1390
1391 VGPLOG("hostPipe: %p", ctxEntryIt->second.hostPipe);
1392 resEntryIt->second.hostPipe = ctxEntryIt->second.hostPipe;
1393 resEntryIt->second.ctxId = ctxId;
1394 }
1395
detachResource(uint32_t ctxId,uint32_t toUnrefId)1396 void detachResource(uint32_t ctxId, uint32_t toUnrefId) {
1397 AutoLock lock(mLock);
1398 VGPLOG("ctxid: %u resid: %u", ctxId, toUnrefId);
1399 detachResourceLocked(ctxId, toUnrefId);
1400 }
1401
getResourceInfo(uint32_t resId,struct virgl_renderer_resource_info * info)1402 int getResourceInfo(uint32_t resId, struct virgl_renderer_resource_info *info) {
1403 VGPLOG("resid: %u", resId);
1404 if (!info)
1405 return EINVAL;
1406
1407 AutoLock lock(mLock);
1408 auto it = mResources.find(resId);
1409 if (it == mResources.end())
1410 return ENOENT;
1411
1412 auto& entry = it->second;
1413
1414 uint32_t bpp = 4U;
1415 switch (entry.args.format) {
1416 case VIRGL_FORMAT_B8G8R8A8_UNORM:
1417 info->drm_fourcc = DRM_FORMAT_ARGB8888;
1418 break;
1419 case VIRGL_FORMAT_B5G6R5_UNORM:
1420 info->drm_fourcc = DRM_FORMAT_RGB565;
1421 bpp = 2U;
1422 break;
1423 case VIRGL_FORMAT_R8G8B8A8_UNORM:
1424 info->drm_fourcc = DRM_FORMAT_ABGR8888;
1425 break;
1426 case VIRGL_FORMAT_R8G8B8X8_UNORM:
1427 info->drm_fourcc = DRM_FORMAT_XBGR8888;
1428 break;
1429 case VIRGL_FORMAT_R8_UNORM:
1430 info->drm_fourcc = DRM_FORMAT_R8;
1431 bpp = 1U;
1432 break;
1433 default:
1434 return EINVAL;
1435 }
1436
1437 info->stride = align_up(entry.args.width * bpp, 16U);
1438 info->virgl_format = entry.args.format;
1439 info->handle = entry.args.handle;
1440 info->height = entry.args.height;
1441 info->width = entry.args.width;
1442 info->depth = entry.args.depth;
1443 info->flags = entry.args.flags;
1444 info->tex_id = 0;
1445 return 0;
1446 }
1447
flushResourceAndReadback(uint32_t res_handle,uint32_t x,uint32_t y,uint32_t width,uint32_t height,void * pixels,uint32_t max_bytes)1448 void flushResourceAndReadback(
1449 uint32_t res_handle, uint32_t x, uint32_t y, uint32_t width, uint32_t height,
1450 void* pixels, uint32_t max_bytes) {
1451 (void)x;
1452 (void)y;
1453 (void)width;
1454 (void)height;
1455 //TODO: displayId > 0 ?
1456 uint32_t displayId = 0;
1457 mVirtioGpuOps->post_color_buffer(res_handle);
1458 if (pixels) {
1459 mReadPixelsFunc(pixels, max_bytes, displayId);
1460 }
1461 }
1462
createResourceV2(uint32_t res_handle,uint64_t hvaId)1463 void createResourceV2(uint32_t res_handle, uint64_t hvaId) {
1464 PipeResEntry e;
1465 struct virgl_renderer_resource_create_args args = {
1466 res_handle,
1467 PIPE_BUFFER,
1468 VIRGL_FORMAT_R8_UNORM,
1469 PIPE_BIND_COMMAND_ARGS_BUFFER,
1470 0, 1, 1,
1471 0, 0, 0, 0
1472 };
1473 e.args = args;
1474 e.hostPipe = 0;
1475
1476 auto entry = HostmemIdMapping::get()->get(hvaId);
1477
1478 e.hva = entry.hva;
1479 e.hvaSize = entry.size;
1480 e.args.width = entry.size;
1481 e.caching = entry.caching;
1482 e.hvaId = hvaId;
1483 e.hvSlot = 0;
1484 e.iov = nullptr;
1485 e.numIovs = 0;
1486 e.linear = 0;
1487 e.linearSize = 0;
1488
1489 AutoLock lock(mLock);
1490 mResources[res_handle] = e;
1491 }
1492
resourceMap(uint32_t res_handle,void ** hvaOut,uint64_t * sizeOut)1493 int resourceMap(uint32_t res_handle, void** hvaOut, uint64_t* sizeOut) {
1494 AutoLock lock(mLock);
1495 auto it = mResources.find(res_handle);
1496 if (it == mResources.end()) {
1497 if (hvaOut) *hvaOut = nullptr;
1498 if (sizeOut) *sizeOut = 0;
1499 return -1;
1500 }
1501
1502 const auto& entry = it->second;
1503
1504 static const uint64_t kPageSizeforBlob = 4096;
1505 static const uint64_t kPageMaskForBlob = ~(0xfff);
1506
1507 uint64_t alignedHva =
1508 entry.hva & kPageMaskForBlob;
1509
1510 uint64_t alignedSize =
1511 kPageSizeforBlob *
1512 ((entry.hvaSize + kPageSizeforBlob - 1) / kPageSizeforBlob);
1513
1514 if (hvaOut) *hvaOut = (void*)(uintptr_t)alignedHva;
1515 if (sizeOut) *sizeOut = alignedSize;
1516 return 0;
1517 }
1518
resourceUnmap(uint32_t res_handle)1519 int resourceUnmap(uint32_t res_handle) {
1520 AutoLock lock(mLock);
1521 auto it = mResources.find(res_handle);
1522 if (it == mResources.end()) {
1523 return -1;
1524 }
1525
1526 // TODO(lfy): Good place to run any registered cleanup callbacks.
1527 // No-op for now.
1528 return 0;
1529 }
1530
platformImportResource(int res_handle,int res_type,void * resource)1531 int platformImportResource(int res_handle, int res_type, void* resource) {
1532 AutoLock lock(mLock);
1533 auto it = mResources.find(res_handle);
1534 if (it == mResources.end()) return -1;
1535 bool success =
1536 mVirtioGpuOps->platform_import_resource(res_handle, res_type, resource);
1537 return success ? 0 : -1;
1538 }
1539
platformResourceInfo(int res_handle,int * width,int * height,int * internal_format)1540 int platformResourceInfo(int res_handle, int* width, int* height, int* internal_format) {
1541 AutoLock lock(mLock);
1542 auto it = mResources.find(res_handle);
1543 if (it == mResources.end()) return -1;
1544 bool success =
1545 mVirtioGpuOps->platform_resource_info(res_handle, width, height, internal_format);
1546 return success ? 0 : -1;
1547 }
1548
platformCreateSharedEglContext()1549 void* platformCreateSharedEglContext() {
1550 return mVirtioGpuOps->platform_create_shared_egl_context();
1551 }
1552
platformDestroySharedEglContext(void * context)1553 int platformDestroySharedEglContext(void* context) {
1554 bool success = mVirtioGpuOps->platform_destroy_shared_egl_context(context);
1555 return success ? 0 : -1;
1556 }
1557
resourceMapInfo(uint32_t res_handle,uint32_t * map_info)1558 int resourceMapInfo(uint32_t res_handle, uint32_t *map_info) {
1559 AutoLock lock(mLock);
1560 auto it = mResources.find(res_handle);
1561 if (it == mResources.end()) return -1;
1562
1563 const auto& entry = it->second;
1564 *map_info = entry.caching;
1565 return 0;
1566 }
1567
1568 private:
allocResource(PipeResEntry & entry,iovec * iov,int num_iovs)1569 void allocResource(PipeResEntry& entry, iovec* iov, int num_iovs) {
1570 VGPLOG("entry linear: %p", entry.linear);
1571 if (entry.linear) free(entry.linear);
1572
1573 size_t linearSize = 0;
1574 for (uint32_t i = 0; i < num_iovs; ++i) {
1575 VGPLOG("iov base: %p", iov[i].iov_base);
1576 linearSize += iov[i].iov_len;
1577 VGPLOG("has iov of %zu. linearSize current: %zu",
1578 iov[i].iov_len, linearSize);
1579 }
1580 VGPLOG("final linearSize: %zu", linearSize);
1581
1582 void* linear = nullptr;
1583
1584 if (linearSize) linear = malloc(linearSize);
1585
1586 entry.iov = (iovec*)malloc(sizeof(*iov) * num_iovs);
1587 entry.numIovs = num_iovs;
1588 memcpy(entry.iov, iov, num_iovs * sizeof(*iov));
1589 entry.linear = linear;
1590 entry.linearSize = linearSize;
1591
1592 virgl_box initbox;
1593 initbox.x = 0;
1594 initbox.y = 0;
1595 initbox.w = (uint32_t)linearSize;
1596 initbox.h = 1;
1597 }
1598
detachResourceLocked(uint32_t ctxId,uint32_t toUnrefId)1599 void detachResourceLocked(uint32_t ctxId, uint32_t toUnrefId) {
1600 VGPLOG("ctxid: %u resid: %u", ctxId, toUnrefId);
1601
1602 auto it = mContextResources.find(ctxId);
1603 if (it == mContextResources.end()) return;
1604
1605 std::vector<VirtioGpuResId> withoutRes;
1606 for (auto resId : it->second) {
1607 if (resId != toUnrefId) {
1608 withoutRes.push_back(resId);
1609 }
1610 }
1611 mContextResources[ctxId] = withoutRes;
1612
1613 auto resIt = mResources.find(toUnrefId);
1614 if (resIt == mResources.end()) return;
1615
1616 resIt->second.hostPipe = 0;
1617 resIt->second.ctxId = 0;
1618 }
1619
ensureAndGetServiceOps()1620 inline const GoldfishPipeServiceOps* ensureAndGetServiceOps() {
1621 if (mServiceOps) return mServiceOps;
1622 mServiceOps = goldfish_pipe_get_service_ops();
1623 return mServiceOps;
1624 }
1625
1626 Lock mLock;
1627
1628 void* mCookie = nullptr;
1629 virgl_renderer_callbacks mVirglRendererCallbacks;
1630 AndroidVirtioGpuOps* mVirtioGpuOps = nullptr;
1631 ReadPixelsFunc mReadPixelsFunc = nullptr;
1632 struct address_space_device_control_ops* mAddressSpaceDeviceControlOps =
1633 nullptr;
1634
1635 const GoldfishPipeServiceOps* mServiceOps = nullptr;
1636
1637 std::unordered_map<VirtioGpuCtxId, PipeCtxEntry> mContexts;
1638 std::unordered_map<VirtioGpuResId, PipeResEntry> mResources;
1639 std::unordered_map<VirtioGpuCtxId, std::vector<VirtioGpuResId>> mContextResources;
1640 std::unordered_map<VirtioGpuResId, std::vector<VirtioGpuCtxId>> mResourceContexts;
1641
1642 // For use with the async fence cb.
1643 // When we wait for gpu or wait for gpu vulkan, the next (and subsequent)
1644 // fences created for that context should not be signaled immediately.
1645 // Rather, they should get in line.
1646 std::unique_ptr<VirtioGpuTimelines> mVirtioGpuTimelines = nullptr;
1647
1648 // For use without the async fence cb.
1649 std::deque<uint64_t> mFenceDeque;
1650 };
1651
sRenderer()1652 static PipeVirglRenderer* sRenderer() {
1653 static PipeVirglRenderer* p = new PipeVirglRenderer;
1654 return p;
1655 }
1656
1657 extern "C" {
1658
pipe_virgl_renderer_init(void * cookie,int flags,struct virgl_renderer_callbacks * cb)1659 VG_EXPORT int pipe_virgl_renderer_init(
1660 void *cookie, int flags, struct virgl_renderer_callbacks *cb) {
1661 sRenderer()->init(cookie, flags, cb);
1662 return 0;
1663 }
1664
pipe_virgl_renderer_poll(void)1665 VG_EXPORT void pipe_virgl_renderer_poll(void) {
1666 sRenderer()->poll();
1667 }
1668
pipe_virgl_renderer_get_cursor_data(uint32_t resource_id,uint32_t * width,uint32_t * height)1669 VG_EXPORT void* pipe_virgl_renderer_get_cursor_data(
1670 uint32_t resource_id, uint32_t *width, uint32_t *height) {
1671 return 0;
1672 }
1673
pipe_virgl_renderer_resource_create(struct virgl_renderer_resource_create_args * args,struct iovec * iov,uint32_t num_iovs)1674 VG_EXPORT int pipe_virgl_renderer_resource_create(
1675 struct virgl_renderer_resource_create_args *args,
1676 struct iovec *iov, uint32_t num_iovs) {
1677
1678 return sRenderer()->createResource(args, iov, num_iovs);
1679 }
1680
pipe_virgl_renderer_resource_unref(uint32_t res_handle)1681 VG_EXPORT void pipe_virgl_renderer_resource_unref(uint32_t res_handle) {
1682 sRenderer()->unrefResource(res_handle);
1683 }
1684
pipe_virgl_renderer_context_create(uint32_t handle,uint32_t nlen,const char * name)1685 VG_EXPORT int pipe_virgl_renderer_context_create(
1686 uint32_t handle, uint32_t nlen, const char *name) {
1687 return sRenderer()->createContext(handle, nlen, name, 0);
1688 }
1689
pipe_virgl_renderer_context_destroy(uint32_t handle)1690 VG_EXPORT void pipe_virgl_renderer_context_destroy(uint32_t handle) {
1691 sRenderer()->destroyContext(handle);
1692 }
1693
pipe_virgl_renderer_submit_cmd(void * buffer,int ctx_id,int dwordCount)1694 VG_EXPORT int pipe_virgl_renderer_submit_cmd(void *buffer,
1695 int ctx_id,
1696 int dwordCount) {
1697 return sRenderer()->submitCmd(ctx_id, buffer, dwordCount);
1698 }
1699
pipe_virgl_renderer_transfer_read_iov(uint32_t handle,uint32_t ctx_id,uint32_t level,uint32_t stride,uint32_t layer_stride,struct virgl_box * box,uint64_t offset,struct iovec * iov,int iovec_cnt)1700 VG_EXPORT int pipe_virgl_renderer_transfer_read_iov(
1701 uint32_t handle, uint32_t ctx_id,
1702 uint32_t level, uint32_t stride,
1703 uint32_t layer_stride,
1704 struct virgl_box *box,
1705 uint64_t offset, struct iovec *iov,
1706 int iovec_cnt) {
1707 return sRenderer()->transferReadIov(handle, offset, box, iov, iovec_cnt);
1708 }
1709
pipe_virgl_renderer_transfer_write_iov(uint32_t handle,uint32_t ctx_id,int level,uint32_t stride,uint32_t layer_stride,struct virgl_box * box,uint64_t offset,struct iovec * iovec,unsigned int iovec_cnt)1710 VG_EXPORT int pipe_virgl_renderer_transfer_write_iov(
1711 uint32_t handle,
1712 uint32_t ctx_id,
1713 int level,
1714 uint32_t stride,
1715 uint32_t layer_stride,
1716 struct virgl_box *box,
1717 uint64_t offset,
1718 struct iovec *iovec,
1719 unsigned int iovec_cnt) {
1720 return sRenderer()->transferWriteIov(handle, offset, box, iovec, iovec_cnt);
1721 }
1722
1723 // Not implemented
pipe_virgl_renderer_get_cap_set(uint32_t,uint32_t *,uint32_t *)1724 VG_EXPORT void pipe_virgl_renderer_get_cap_set(uint32_t, uint32_t*, uint32_t*) { }
pipe_virgl_renderer_fill_caps(uint32_t,uint32_t,void * caps)1725 VG_EXPORT void pipe_virgl_renderer_fill_caps(uint32_t, uint32_t, void *caps) { }
1726
pipe_virgl_renderer_resource_attach_iov(int res_handle,struct iovec * iov,int num_iovs)1727 VG_EXPORT int pipe_virgl_renderer_resource_attach_iov(
1728 int res_handle, struct iovec *iov,
1729 int num_iovs) {
1730 return sRenderer()->attachIov(res_handle, iov, num_iovs);
1731 }
1732
pipe_virgl_renderer_resource_detach_iov(int res_handle,struct iovec ** iov,int * num_iovs)1733 VG_EXPORT void pipe_virgl_renderer_resource_detach_iov(
1734 int res_handle, struct iovec **iov, int *num_iovs) {
1735 return sRenderer()->detachIov(res_handle, iov, num_iovs);
1736 }
1737
pipe_virgl_renderer_create_fence(int client_fence_id,uint32_t ctx_id)1738 VG_EXPORT int pipe_virgl_renderer_create_fence(
1739 int client_fence_id, uint32_t ctx_id) {
1740 sRenderer()->createFence(client_fence_id, ctx_id);
1741 return 0;
1742 }
1743
pipe_virgl_renderer_force_ctx_0(void)1744 VG_EXPORT void pipe_virgl_renderer_force_ctx_0(void) {
1745 VGPLOG("call");
1746 }
1747
pipe_virgl_renderer_ctx_attach_resource(int ctx_id,int res_handle)1748 VG_EXPORT void pipe_virgl_renderer_ctx_attach_resource(
1749 int ctx_id, int res_handle) {
1750 sRenderer()->attachResource(ctx_id, res_handle);
1751 }
1752
pipe_virgl_renderer_ctx_detach_resource(int ctx_id,int res_handle)1753 VG_EXPORT void pipe_virgl_renderer_ctx_detach_resource(
1754 int ctx_id, int res_handle) {
1755 sRenderer()->detachResource(ctx_id, res_handle);
1756 }
1757
pipe_virgl_renderer_resource_get_info(int res_handle,struct virgl_renderer_resource_info * info)1758 VG_EXPORT int pipe_virgl_renderer_resource_get_info(
1759 int res_handle,
1760 struct virgl_renderer_resource_info *info) {
1761 return sRenderer()->getResourceInfo(res_handle, info);
1762 }
1763
pipe_virgl_renderer_resource_map(uint32_t res_handle,void ** hvaOut,uint64_t * sizeOut)1764 VG_EXPORT int pipe_virgl_renderer_resource_map(uint32_t res_handle, void** hvaOut, uint64_t* sizeOut) {
1765 return sRenderer()->resourceMap(res_handle, hvaOut, sizeOut);
1766 }
1767
pipe_virgl_renderer_resource_unmap(uint32_t res_handle)1768 VG_EXPORT int pipe_virgl_renderer_resource_unmap(uint32_t res_handle) {
1769 return sRenderer()->resourceUnmap(res_handle);
1770 }
1771
stream_renderer_flush_resource_and_readback(uint32_t res_handle,uint32_t x,uint32_t y,uint32_t width,uint32_t height,void * pixels,uint32_t max_bytes)1772 VG_EXPORT void stream_renderer_flush_resource_and_readback(
1773 uint32_t res_handle, uint32_t x, uint32_t y, uint32_t width, uint32_t height,
1774 void* pixels, uint32_t max_bytes) {
1775 sRenderer()->flushResourceAndReadback(res_handle, x, y, width, height, pixels, max_bytes);
1776 }
1777
stream_renderer_create_blob(uint32_t ctx_id,uint32_t res_handle,const struct stream_renderer_create_blob * create_blob,const struct iovec * iovecs,uint32_t num_iovs,const struct stream_renderer_handle * handle)1778 VG_EXPORT int stream_renderer_create_blob(uint32_t ctx_id, uint32_t res_handle,
1779 const struct stream_renderer_create_blob* create_blob,
1780 const struct iovec* iovecs, uint32_t num_iovs,
1781 const struct stream_renderer_handle* handle) {
1782 sRenderer()->createResourceV2(res_handle, create_blob->blob_id);
1783 return 0;
1784 }
1785
stream_renderer_export_blob(uint32_t res_handle,struct stream_renderer_handle * handle)1786 VG_EXPORT int stream_renderer_export_blob(uint32_t res_handle,
1787 struct stream_renderer_handle* handle) {
1788 // Unimplemented for now.
1789 return -EINVAL;
1790 }
1791
stream_renderer_resource_map(uint32_t res_handle,void ** hvaOut,uint64_t * sizeOut)1792 VG_EXPORT int stream_renderer_resource_map(uint32_t res_handle, void** hvaOut, uint64_t* sizeOut) {
1793 return sRenderer()->resourceMap(res_handle, hvaOut, sizeOut);
1794 }
1795
stream_renderer_resource_unmap(uint32_t res_handle)1796 VG_EXPORT int stream_renderer_resource_unmap(uint32_t res_handle) {
1797 return sRenderer()->resourceUnmap(res_handle);
1798 }
1799
stream_renderer_create_context(uint32_t ctx_id,uint32_t nlen,const char * name,uint32_t context_init)1800 VG_EXPORT int stream_renderer_create_context(uint32_t ctx_id, uint32_t nlen, const char *name,
1801 uint32_t context_init) {
1802 return sRenderer()->createContext(ctx_id, nlen, name, context_init);
1803 }
1804
stream_renderer_context_create_fence(uint64_t fence_id,uint32_t ctx_id,uint8_t ring_idx)1805 VG_EXPORT int stream_renderer_context_create_fence(
1806 uint64_t fence_id, uint32_t ctx_id, uint8_t ring_idx) {
1807 sRenderer()->contextCreateFence(fence_id, ctx_id, ring_idx);
1808 return 0;
1809 }
1810
stream_renderer_platform_import_resource(int res_handle,int res_type,void * resource)1811 VG_EXPORT int stream_renderer_platform_import_resource(int res_handle, int res_type, void* resource) {
1812 return sRenderer()->platformImportResource(res_handle, res_type, resource);
1813 }
1814
stream_renderer_platform_resource_info(int res_handle,int * width,int * height,int * internal_format)1815 VG_EXPORT int stream_renderer_platform_resource_info(int res_handle, int* width, int* height, int* internal_format) {
1816 return sRenderer()->platformResourceInfo(res_handle, width, height, internal_format);
1817 }
1818
stream_renderer_platform_create_shared_egl_context()1819 VG_EXPORT void* stream_renderer_platform_create_shared_egl_context() {
1820 return sRenderer()->platformCreateSharedEglContext();
1821 }
1822
stream_renderer_platform_destroy_shared_egl_context(void * context)1823 VG_EXPORT int stream_renderer_platform_destroy_shared_egl_context(void* context) {
1824 return sRenderer()->platformDestroySharedEglContext(context);
1825 }
1826
stream_renderer_resource_map_info(uint32_t res_handle,uint32_t * map_info)1827 VG_EXPORT int stream_renderer_resource_map_info(uint32_t res_handle, uint32_t *map_info) {
1828 return sRenderer()->resourceMapInfo(res_handle, map_info);
1829 }
1830
1831 #define VIRGLRENDERER_API_PIPE_STRUCT_DEF(api) pipe_##api,
1832
1833 static struct virgl_renderer_virtio_interface s_virtio_interface = {
1834 LIST_VIRGLRENDERER_API(VIRGLRENDERER_API_PIPE_STRUCT_DEF)
1835 };
1836
1837 struct virgl_renderer_virtio_interface*
get_goldfish_pipe_virgl_renderer_virtio_interface(void)1838 get_goldfish_pipe_virgl_renderer_virtio_interface(void) {
1839 return &s_virtio_interface;
1840 }
1841
virtio_goldfish_pipe_reset(void * pipe,void * host_pipe)1842 void virtio_goldfish_pipe_reset(void *pipe, void *host_pipe) {
1843 sRenderer()->resetPipe((GoldfishHwPipe*)pipe, (GoldfishHostPipe*)host_pipe);
1844 }
1845
1846 } // extern "C"
1847