• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #include "base/AlignedBuf.h"
15 #include "base/Lock.h"
16 #include "host-common/AddressSpaceService.h"
17 #include "host-common/address_space_device.h"
18 #include "host-common/android_pipe_common.h"
19 #include "host-common/HostmemIdMapping.h"
20 #include "host-common/opengles.h"
21 #include "host-common/vm_operations.h"
22 
23 #include <deque>
24 #include <string>
25 #include <unordered_map>
26 
27 extern "C" {
28 #include "virtio-gpu-gfxstream-renderer.h"
29 #include "drm_fourcc.h"
30 #include "virgl_hw.h"
31 #include "host-common/virtio_gpu.h"
32 #include "host-common/goldfish_pipe.h"
33 }  // extern "C"
34 
35 #define DEBUG_VIRTIO_GOLDFISH_PIPE 0
36 
37 #if DEBUG_VIRTIO_GOLDFISH_PIPE
38 
39 #define VGPLOG(fmt,...) \
40     fprintf(stderr, "%s:%d: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__);
41 
42 #else
43 #define VGPLOG(fmt,...)
44 #endif
45 
46 #define VGP_FATAL(fmt,...) do { \
47     fprintf(stderr, "virto-goldfish-pipe fatal error: %s:%d: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__); \
48     abort(); \
49 } while(0);
50 
51 #ifdef VIRTIO_GOLDFISH_EXPORT_API
52 
53 #ifdef _WIN32
54 #define VG_EXPORT __declspec(dllexport)
55 #else
56 #define VG_EXPORT __attribute__((visibility("default")))
57 #endif
58 
59 #else
60 
61 #define VG_EXPORT
62 
63 #endif // !VIRTIO_GOLDFISH_EXPORT_API
64 
65 // Virtio Goldfish Pipe: Overview-----------------------------------------------
66 //
67 // Virtio Goldfish Pipe is meant for running goldfish pipe services with a
68 // stock Linux kernel that is already capable of virtio-gpu. It runs DRM
69 // VIRTGPU ioctls on top of a custom implementation of virglrenderer on the
70 // host side that doesn't (directly) do any rendering, but instead talks to
71 // host-side pipe services.
72 //
73 // This is mainly used for graphics at the moment, though it's possible to run
74 // other pipe services over virtio-gpu as well. virtio-gpu is selected over
75 // other devices primarily because of the existence of an API (virglrenderer)
76 // that is already somewhat separate from virtio-gpu, and not needing to create
77 // a new virtio device to handle goldfish pipe.
78 //
79 // How it works is, existing virglrenderer API are remapped to perform pipe
80 // operations. First of all, pipe operations consist of the following:
81 //
82 // - open() / close(): Starts or stops an instance of a pipe service.
83 //
84 // - write(const void* buf, size_t len) / read(const void* buf, size_t len):
85 // Sends or receives data over the pipe. The first write() is the name of the
86 // pipe service. After the pipe service is determined, the host calls
87 // resetPipe() to replace the host-side pipe instance with an instance of the
88 // pipe service.
89 //
90 // - reset(void* initialPipe, void* actualPipe): the operation that replaces an
91 // initial pipe with an instance of a pipe service.
92 //
93 // Next, here's how the pipe operations map to virglrenderer commands:
94 //
95 // - open() -> virgl_renderer_context_create(),
96 //             virgl_renderer_resource_create(),
97 //             virgl_renderer_resource_attach_iov()
98 //
99 // The open() corresponds to a guest-side open of a rendernode, which triggers
100 // context creation. Each pipe corresponds 1:1 with a drm virtgpu context id.
101 // We also associate an R8 resource with each pipe as the backing data for
102 // write/read.
103 //
104 // - close() -> virgl_rendrerer_resource_unref(),
105 //              virgl_renderer_context_destroy()
106 //
107 // The close() corresponds to undoing the operations of open().
108 //
109 // - write() -> virgl_renderer_transfer_write_iov() OR
110 //              virgl_renderer_submit_cmd()
111 //
112 // Pipe write() operation corresponds to performing a TRANSFER_TO_HOST ioctl on
113 // the resource created alongside open(), OR an EXECBUFFER ioctl.
114 //
115 // - read() -> virgl_renderer_transfer_read_iov()
116 //
117 // Pipe read() operation corresponds to performing a TRANSFER_FROM_HOST ioctl on
118 // the resource created alongside open().
119 //
120 // A note on synchronization----------------------------------------------------
121 //
122 // Unlike goldfish-pipe which handles write/read/open/close on the vcpu thread
123 // that triggered the particular operation, virtio-gpu handles the
124 // corresponding virgl operations in a bottom half that is triggered off the
125 // vcpu thread on a timer. This means that in the guest, if we want to ensure
126 // that a particular operation such as TRANSFER_TO_HOST completed on the host,
127 // we need to call VIRTGPU_WAIT, which ends up polling fences here. This is why
128 // we insert a fence after every operation in this code.
129 //
130 // Details on transfer mechanism: mapping 2D transfer to 1D ones----------------
131 //
132 // Resource objects are typically 2D textures, while we're wanting to transmit
133 // 1D buffers to the pipe services on the host.  DRM VIRTGPU uses the concept
134 // of a 'box' to represent transfers that do not involve an entire resource
135 // object.  Each box has a x, y, width and height parameter to define the
136 // extent of the transfer for a 2D texture.  In our use case, we only use the x
137 // and width parameters. We've also created the resource with R8 format
138 // (byte-by-byte) with width equal to the total size of the transfer buffer we
139 // want (around 1 MB).
140 //
141 // The resource object itself is currently backed via plain guest RAM, which
142 // can be physically not-contiguous from the guest POV, and therefore
143 // corresponds to a possibly-long list of pointers and sizes (iov) on the host
144 // side. The sync_iov helper function converts convert the list of pointers
145 // to one contiguous buffer on the host (or vice versa), at the cost of a copy.
146 // (TODO: see if we can use host coherent memory to do away with the copy).
147 //
148 // We can see this abstraction in use via the implementation of
149 // transferWriteIov and transferReadIov below, which sync the iovec to/from a
150 // linear buffer if necessary, and then perform a corresponding pip operation
151 // based on the box parameter's x and width values.
152 
153 using android::base::AutoLock;
154 using android::base::Lock;
155 using android::emulation::HostmemIdMapping;
156 
157 using VirglCtxId = uint32_t;
158 using VirglResId = uint32_t;
159 
160 static constexpr int kPipeTryAgain = -2;
161 
162 struct VirtioGpuCmd {
163     uint32_t op;
164     uint32_t cmdSize;
165     unsigned char buf[0];
166 } __attribute__((packed));
167 
168 struct PipeCtxEntry {
169     VirglCtxId ctxId;
170     GoldfishHostPipe* hostPipe;
171     int fence;
172     uint32_t addressSpaceHandle;
173     bool hasAddressSpaceHandle;
174 };
175 
176 struct PipeResEntry {
177     virgl_renderer_resource_create_args args;
178     iovec* iov;
179     uint32_t numIovs;
180     void* linear;
181     size_t linearSize;
182     GoldfishHostPipe* hostPipe;
183     VirglCtxId ctxId;
184     uint64_t hva;
185     uint64_t hvaSize;
186     uint64_t hvaId;
187     uint32_t hvSlot;
188 };
189 
align_up(uint32_t n,uint32_t a)190 static inline uint32_t align_up(uint32_t n, uint32_t a) {
191     return ((n + a - 1) / a) * a;
192 }
193 
align_up_power_of_2(uint32_t n,uint32_t a)194 static inline uint32_t align_up_power_of_2(uint32_t n, uint32_t a) {
195     return (n + (a - 1)) & ~(a - 1);
196 }
197 
198 #define VIRGL_FORMAT_NV12 166
199 #define VIRGL_FORMAT_YV12 163
200 
201 const uint32_t kGlBgra = 0x80e1;
202 const uint32_t kGlRgba = 0x1908;
203 const uint32_t kGlRgba16f = 0x881A;
204 const uint32_t kGlRgb565 = 0x8d62;
205 const uint32_t kGlR8 = 0x8229;
206 const uint32_t kGlR16 = 0x822A;
207 const uint32_t kGlRg8 = 0x822b;
208 const uint32_t kGlLuminance = 0x1909;
209 const uint32_t kGlLuminanceAlpha = 0x190a;
210 const uint32_t kGlUnsignedByte = 0x1401;
211 const uint32_t kGlUnsignedShort565 = 0x8363;
212 
213 constexpr uint32_t kFwkFormatGlCompat = 0;
214 constexpr uint32_t kFwkFormatYV12 = 1;
215 // constexpr uint32_t kFwkFormatYUV420888 = 2;
216 constexpr uint32_t kFwkFormatNV12 = 3;
217 
virgl_format_is_yuv(uint32_t format)218 static inline bool virgl_format_is_yuv(uint32_t format) {
219     switch (format) {
220         case VIRGL_FORMAT_B8G8R8X8_UNORM:
221         case VIRGL_FORMAT_B8G8R8A8_UNORM:
222         case VIRGL_FORMAT_R8G8B8X8_UNORM:
223         case VIRGL_FORMAT_R8G8B8A8_UNORM:
224         case VIRGL_FORMAT_B5G6R5_UNORM:
225         case VIRGL_FORMAT_R8_UNORM:
226         case VIRGL_FORMAT_R16_UNORM:
227         case VIRGL_FORMAT_R16G16B16A16_FLOAT:
228         case VIRGL_FORMAT_R8G8_UNORM:
229             return false;
230         case VIRGL_FORMAT_NV12:
231         case VIRGL_FORMAT_YV12:
232             return true;
233         default:
234             VGP_FATAL("Unknown virgl format: 0x%x", format);
235     }
236 }
237 
virgl_format_to_gl(uint32_t virgl_format)238 static inline uint32_t virgl_format_to_gl(uint32_t virgl_format) {
239     switch (virgl_format) {
240         case VIRGL_FORMAT_B8G8R8X8_UNORM:
241         case VIRGL_FORMAT_B8G8R8A8_UNORM:
242             return kGlBgra;
243         case VIRGL_FORMAT_R8G8B8X8_UNORM:
244         case VIRGL_FORMAT_R8G8B8A8_UNORM:
245             return kGlRgba;
246         case VIRGL_FORMAT_B5G6R5_UNORM:
247             return kGlRgb565;
248         case VIRGL_FORMAT_R16_UNORM:
249             return kGlR16;
250         case VIRGL_FORMAT_R16G16B16A16_FLOAT:
251             return kGlRgba16f;
252         case VIRGL_FORMAT_R8_UNORM:
253             return kGlR8;
254         case VIRGL_FORMAT_R8G8_UNORM:
255             return kGlRg8;
256         case VIRGL_FORMAT_NV12:
257         case VIRGL_FORMAT_YV12:
258             // emulated as RGBA8888
259             return kGlRgba;
260         default:
261             return kGlRgba;
262     }
263 }
264 
virgl_format_to_fwk_format(uint32_t virgl_format)265 static inline uint32_t virgl_format_to_fwk_format(uint32_t virgl_format) {
266     switch (virgl_format) {
267         case VIRGL_FORMAT_NV12:
268             return kFwkFormatNV12;
269         case VIRGL_FORMAT_YV12:
270             return kFwkFormatYV12;
271         case VIRGL_FORMAT_R8_UNORM:
272         case VIRGL_FORMAT_R16_UNORM:
273         case VIRGL_FORMAT_R16G16B16A16_FLOAT:
274         case VIRGL_FORMAT_R8G8_UNORM:
275         case VIRGL_FORMAT_B8G8R8X8_UNORM:
276         case VIRGL_FORMAT_B8G8R8A8_UNORM:
277         case VIRGL_FORMAT_R8G8B8X8_UNORM:
278         case VIRGL_FORMAT_R8G8B8A8_UNORM:
279         case VIRGL_FORMAT_B5G6R5_UNORM:
280         default: // kFwkFormatGlCompat: No extra conversions needed
281             return kFwkFormatGlCompat;
282     }
283 }
284 
gl_format_to_natural_type(uint32_t format)285 static inline uint32_t gl_format_to_natural_type(uint32_t format) {
286     switch (format) {
287         case kGlBgra:
288         case kGlRgba:
289         case kGlLuminance:
290         case kGlLuminanceAlpha:
291             return kGlUnsignedByte;
292         case kGlRgb565:
293             return kGlUnsignedShort565;
294         default:
295             return kGlUnsignedByte;
296     }
297 }
298 
virgl_format_to_linear_base(uint32_t format,uint32_t totalWidth,uint32_t totalHeight,uint32_t x,uint32_t y,uint32_t w,uint32_t h)299 static inline size_t virgl_format_to_linear_base(
300     uint32_t format,
301     uint32_t totalWidth, uint32_t totalHeight,
302     uint32_t x, uint32_t y, uint32_t w, uint32_t h) {
303     if (virgl_format_is_yuv(format)) {
304         return 0;
305     } else {
306         uint32_t bpp = 4;
307         switch (format) {
308             case VIRGL_FORMAT_R16G16B16A16_FLOAT:
309                 bpp = 8;
310                 break;
311             case VIRGL_FORMAT_B8G8R8X8_UNORM:
312             case VIRGL_FORMAT_B8G8R8A8_UNORM:
313             case VIRGL_FORMAT_R8G8B8X8_UNORM:
314             case VIRGL_FORMAT_R8G8B8A8_UNORM:
315                 bpp = 4;
316                 break;
317             case VIRGL_FORMAT_B5G6R5_UNORM:
318             case VIRGL_FORMAT_R8G8_UNORM:
319             case VIRGL_FORMAT_R16_UNORM:
320                 bpp = 2;
321                 break;
322             case VIRGL_FORMAT_R8_UNORM:
323                 bpp = 1;
324                 break;
325             default:
326                 VGP_FATAL("Unknown format: 0x%x", format);
327         }
328 
329         uint32_t stride = totalWidth * bpp;
330         return y * stride + x * bpp;
331     }
332     return 0;
333 }
334 
virgl_format_to_total_xfer_len(uint32_t format,uint32_t totalWidth,uint32_t totalHeight,uint32_t x,uint32_t y,uint32_t w,uint32_t h)335 static inline size_t virgl_format_to_total_xfer_len(
336     uint32_t format,
337     uint32_t totalWidth, uint32_t totalHeight,
338     uint32_t x, uint32_t y, uint32_t w, uint32_t h) {
339     if (virgl_format_is_yuv(format)) {
340         uint32_t yAlign = (format == VIRGL_FORMAT_YV12) ?  32 : 16;
341         uint32_t yWidth = totalWidth;
342         uint32_t yHeight = totalHeight;
343         uint32_t yStride = align_up_power_of_2(yWidth, yAlign);
344         uint32_t ySize = yStride * yHeight;
345 
346         uint32_t uvAlign = 16;
347         uint32_t uvWidth;
348         uint32_t uvPlaneCount;
349         if (format == VIRGL_FORMAT_NV12) {
350             uvWidth = totalWidth;
351             uvPlaneCount = 1;
352         } else if (format == VIRGL_FORMAT_YV12) {
353             uvWidth = totalWidth / 2;
354             uvPlaneCount = 2;
355         } else {
356             VGP_FATAL("Unknown yuv virgl format: 0x%x", format);
357         }
358         uint32_t uvHeight = totalHeight / 2;
359         uint32_t uvStride = align_up_power_of_2(uvWidth, uvAlign);
360         uint32_t uvSize = uvStride * uvHeight * uvPlaneCount;
361 
362         uint32_t dataSize = ySize + uvSize;
363         return dataSize;
364     } else {
365         uint32_t bpp = 4;
366         switch (format) {
367             case VIRGL_FORMAT_R16G16B16A16_FLOAT:
368                 bpp = 8;
369                 break;
370             case VIRGL_FORMAT_B8G8R8X8_UNORM:
371             case VIRGL_FORMAT_B8G8R8A8_UNORM:
372             case VIRGL_FORMAT_R8G8B8X8_UNORM:
373             case VIRGL_FORMAT_R8G8B8A8_UNORM:
374                 bpp = 4;
375                 break;
376             case VIRGL_FORMAT_B5G6R5_UNORM:
377             case VIRGL_FORMAT_R16_UNORM:
378             case VIRGL_FORMAT_R8G8_UNORM:
379                 bpp = 2;
380                 break;
381             case VIRGL_FORMAT_R8_UNORM:
382                 bpp = 1;
383                 break;
384             default:
385                 VGP_FATAL("Unknown format: 0x%x", format);
386         }
387 
388         uint32_t stride = totalWidth * bpp;
389         return (h - 1U) * stride + w * bpp;
390     }
391     return 0;
392 }
393 
394 
395 enum IovSyncDir {
396     IOV_TO_LINEAR = 0,
397     LINEAR_TO_IOV = 1,
398 };
399 
sync_iov(PipeResEntry * res,uint64_t offset,const virgl_box * box,IovSyncDir dir)400 static int sync_iov(PipeResEntry* res, uint64_t offset, const virgl_box* box, IovSyncDir dir) {
401     VGPLOG("offset: 0x%llx box: %u %u %u %u size %u x %u iovs %u linearSize %zu",
402             (unsigned long long)offset,
403             box->x, box->y, box->w, box->h,
404             res->args.width, res->args.height,
405             res->numIovs,
406             res->linearSize);
407 
408     if (box->x > res->args.width || box->y > res->args.height) {
409         VGP_FATAL("Box out of range of resource");
410     }
411     if (box->w == 0U || box->h == 0U) {
412         VGP_FATAL("Empty transfer");
413     }
414     if (box->x + box->w > res->args.width) {
415         VGP_FATAL("Box overflows resource width");
416     }
417 
418     size_t linearBase = virgl_format_to_linear_base(
419         res->args.format,
420         res->args.width,
421         res->args.height,
422         box->x, box->y, box->w, box->h);
423     size_t start = linearBase;
424     // height - 1 in order to treat the (w * bpp) row specially
425     // (i.e., the last row does not occupy the full stride)
426     size_t length = virgl_format_to_total_xfer_len(
427         res->args.format,
428         res->args.width,
429         res->args.height,
430         box->x, box->y, box->w, box->h);
431     size_t end = start + length;
432 
433     if (end > res->linearSize) {
434         VGP_FATAL("start + length overflows! linearSize %zu, start %zu length %zu (wanted %zu)",
435                   res->linearSize, start, length, start + length);
436     }
437 
438     uint32_t iovIndex = 0;
439     size_t iovOffset = 0;
440     size_t written = 0;
441     char* linear = static_cast<char*>(res->linear);
442 
443     while (written < length) {
444 
445         if (iovIndex >= res->numIovs) {
446             VGP_FATAL("write request overflowed numIovs");
447         }
448 
449         const char* iovBase_const = static_cast<const char*>(res->iov[iovIndex].iov_base);
450         char* iovBase = static_cast<char*>(res->iov[iovIndex].iov_base);
451         size_t iovLen = res->iov[iovIndex].iov_len;
452         size_t iovOffsetEnd = iovOffset + iovLen;
453 
454         auto lower_intersect = std::max(iovOffset, start);
455         auto upper_intersect = std::min(iovOffsetEnd, end);
456         if (lower_intersect < upper_intersect) {
457             size_t toWrite = upper_intersect - lower_intersect;
458             switch (dir) {
459                 case IOV_TO_LINEAR:
460                     memcpy(linear + lower_intersect,
461                            iovBase_const + lower_intersect - iovOffset,
462                            toWrite);
463                     break;
464                 case LINEAR_TO_IOV:
465                     memcpy(iovBase + lower_intersect - iovOffset,
466                            linear + lower_intersect,
467                            toWrite);
468                     break;
469                 default:
470                     VGP_FATAL("Invalid sync dir: %d", dir);
471             }
472             written += toWrite;
473         }
474         ++iovIndex;
475         iovOffset += iovLen;
476     }
477 
478     return 0;
479 }
480 
convert32to64(uint32_t lo,uint32_t hi)481 static uint64_t convert32to64(uint32_t lo, uint32_t hi) {
482     return ((uint64_t)lo) | (((uint64_t)hi) << 32);
483 }
484 
485 // Commands for address space device
486 // kVirtioGpuAddressSpaceContextCreateWithSubdevice | subdeviceType
487 const uint32_t kVirtioGpuAddressSpaceContextCreateWithSubdevice = 0x1001;
488 
489 // kVirtioGpuAddressSpacePing | offset_lo | offset_hi | metadata_lo | metadata_hi | version | wait_fd | wait_flags | direction
490 // no output
491 const uint32_t kVirtioGpuAddressSpacePing = 0x1002;
492 
493 // kVirtioGpuAddressSpacePingWithResponse | resp_resid | offset_lo | offset_hi | metadata_lo | metadata_hi | version | wait_fd | wait_flags | direction
494 // out: same as input then | out: error
495 const uint32_t kVirtioGpuAddressSpacePingWithResponse = 0x1003;
496 
497 // Commands for native sync fd
498 const uint32_t kVirtioGpuNativeSyncCreateExportFd = 0x9000;
499 const uint32_t kVirtioGpuNativeSyncCreateImportFd = 0x9001;
500 
501 const uint32_t kVirtioGpuNativeSyncVulkanCreateExportFd = 0xa000;
502 const uint32_t kVirtioGpuNativeSyncVulkanCreateImportFd = 0xa001;
503 
504 class PipeVirglRenderer {
505 public:
506     PipeVirglRenderer() = default;
507 
init(void * cookie,int flags,const struct virgl_renderer_callbacks * callbacks)508     int init(void* cookie, int flags, const struct virgl_renderer_callbacks* callbacks) {
509         VGPLOG("cookie: %p", cookie);
510         mCookie = cookie;
511         mVirglRendererCallbacks = *callbacks;
512         mVirtioGpuOps = android_getVirtioGpuOps();
513         if (!mVirtioGpuOps) {
514             VGP_FATAL("Could not get virtio gpu ops!");
515         }
516         mReadPixelsFunc = android_getReadPixelsFunc();
517         if (!mReadPixelsFunc) {
518             VGP_FATAL("Could not get read pixels func!");
519         }
520         mAddressSpaceDeviceControlOps = get_address_space_device_control_ops();
521         if (!mAddressSpaceDeviceControlOps) {
522             VGP_FATAL("Could not get address space device control ops!");
523         }
524         VGPLOG("done");
525         return 0;
526     }
527 
resetPipe(GoldfishHwPipe * hwPipe,GoldfishHostPipe * hostPipe)528     void resetPipe(GoldfishHwPipe* hwPipe, GoldfishHostPipe* hostPipe) {
529         VGPLOG("Want to reset hwpipe %p to hostpipe %p", hwPipe, hostPipe);
530         VirglCtxId asCtxId = (VirglCtxId)(uintptr_t)hwPipe;
531         auto it = mContexts.find(asCtxId);
532         if (it == mContexts.end()) {
533             fprintf(stderr, "%s: fatal: pipe id %u not found\n", __func__, asCtxId);
534             abort();
535         }
536 
537         auto& entry = it->second;
538         VGPLOG("ctxid: %u prev hostpipe: %p", asCtxId, entry.hostPipe);
539         entry.hostPipe = hostPipe;
540         VGPLOG("ctxid: %u next hostpipe: %p", asCtxId, entry.hostPipe);
541 
542         // Also update any resources associated with it
543         auto resourcesIt = mContextResources.find(asCtxId);
544 
545         if (resourcesIt == mContextResources.end()) return;
546 
547         const auto& resIds = resourcesIt->second;
548 
549         for (auto resId : resIds) {
550             auto resEntryIt = mResources.find(resId);
551             if (resEntryIt == mResources.end()) {
552                 fprintf(stderr, "%s: fatal: res id %u entry not found\n", __func__, resId);
553                 abort();
554             }
555 
556             auto& resEntry = resEntryIt->second;
557             resEntry.hostPipe = hostPipe;
558         }
559     }
560 
createContext(VirglCtxId handle,uint32_t nlen,const char * name)561     int createContext(VirglCtxId handle, uint32_t nlen, const char* name) {
562         AutoLock lock(mLock);
563         VGPLOG("ctxid: %u len: %u name: %s", handle, nlen, name);
564         auto ops = ensureAndGetServiceOps();
565         auto hostPipe = ops->guest_open_with_flags(
566             reinterpret_cast<GoldfishHwPipe*>(handle),
567             0x1 /* is virtio */);
568 
569         if (!hostPipe) {
570             fprintf(stderr, "%s: failed to create hw pipe!\n", __func__);
571             return -1;
572         }
573 
574         PipeCtxEntry res = {
575             handle, // ctxId
576             hostPipe, // hostPipe
577             0, // fence
578             0, // AS handle
579             false, // does not have an AS handle
580         };
581 
582         VGPLOG("initial host pipe for ctxid %u: %p", handle, hostPipe);
583         mContexts[handle] = res;
584         return 0;
585     }
586 
destroyContext(VirglCtxId handle)587     int destroyContext(VirglCtxId handle) {
588         AutoLock lock(mLock);
589         VGPLOG("ctxid: %u", handle);
590 
591         auto it = mContexts.find(handle);
592         if (it == mContexts.end()) {
593             fprintf(stderr, "%s: could not find context handle %u\n", __func__, handle);
594             return -1;
595         }
596 
597         if (it->second.hasAddressSpaceHandle) {
598             mAddressSpaceDeviceControlOps->destroy_handle(
599                 it->second.addressSpaceHandle);
600         }
601 
602         auto ops = ensureAndGetServiceOps();
603         auto hostPipe = it->second.hostPipe;
604 
605         if (!hostPipe) {
606             fprintf(stderr, "%s: 0 is not a valid hostpipe\n", __func__);
607             return -1;
608         }
609 
610         ops->guest_close(hostPipe, GOLDFISH_PIPE_CLOSE_GRACEFUL);
611 
612         mContexts.erase(it);
613         return 0;
614     }
615 
setContextAddressSpaceHandleLocked(VirglCtxId ctxId,uint32_t handle)616     void setContextAddressSpaceHandleLocked(VirglCtxId ctxId, uint32_t handle) {
617         auto ctxIt = mContexts.find(ctxId);
618         if (ctxIt == mContexts.end()) {
619             fprintf(stderr, "%s: fatal: ctx id %u not found\n", __func__,
620                     ctxId);
621             abort();
622         }
623 
624         auto& ctxEntry = ctxIt->second;
625         ctxEntry.addressSpaceHandle = handle;
626         ctxEntry.hasAddressSpaceHandle = true;
627     }
628 
getAddressSpaceHandleLocked(VirglCtxId ctxId)629     uint32_t getAddressSpaceHandleLocked(VirglCtxId ctxId) {
630         auto ctxIt = mContexts.find(ctxId);
631         if (ctxIt == mContexts.end()) {
632             fprintf(stderr, "%s: fatal: ctx id %u not found\n", __func__,
633                     ctxId);
634             abort();
635         }
636 
637         auto& ctxEntry = ctxIt->second;
638 
639         if (!ctxEntry.hasAddressSpaceHandle) {
640             fprintf(stderr, "%s: fatal: ctx id %u doesn't have address space handle\n", __func__,
641                     ctxId);
642             abort();
643         }
644 
645         return ctxEntry.addressSpaceHandle;
646     }
647 
writeWordsToFirstIovPageLocked(uint32_t * dwords,size_t dwordCount,uint32_t resId)648     void writeWordsToFirstIovPageLocked(uint32_t* dwords, size_t dwordCount, uint32_t resId) {
649 
650         auto resEntryIt = mResources.find(resId);
651         if (resEntryIt == mResources.end()) {
652             fprintf(stderr, "%s: fatal: resid %u not found\n", __func__, resId);
653             abort();
654         }
655 
656         auto& resEntry = resEntryIt->second;
657 
658         if (!resEntry.iov) {
659             fprintf(stderr, "%s: fatal:resid %u had empty iov\n", __func__, resId);
660             abort();
661         }
662 
663         uint32_t* iovWords = (uint32_t*)(resEntry.iov[0].iov_base);
664         memcpy(iovWords, dwords, sizeof(uint32_t) * dwordCount);
665     }
666 
addressSpaceProcessCmd(VirglCtxId ctxId,uint32_t * dwords,int dwordCount)667     void addressSpaceProcessCmd(VirglCtxId ctxId, uint32_t* dwords, int dwordCount) {
668         uint32_t opcode = dwords[0];
669 
670         switch (opcode) {
671             case kVirtioGpuAddressSpaceContextCreateWithSubdevice: {
672                 uint32_t subdevice_type = dwords[1];
673 
674                 uint32_t handle = mAddressSpaceDeviceControlOps->gen_handle();
675 
676                 struct android::emulation::AddressSpaceDevicePingInfo pingInfo = {
677                     .metadata = (uint64_t)subdevice_type,
678                 };
679 
680                 mAddressSpaceDeviceControlOps->ping_at_hva(handle, &pingInfo);
681 
682                 AutoLock lock(mLock);
683                 setContextAddressSpaceHandleLocked(ctxId, handle);
684                 break;
685             }
686             case kVirtioGpuAddressSpacePing: {
687                 uint32_t phys_addr_lo = dwords[1];
688                 uint32_t phys_addr_hi = dwords[2];
689 
690                 uint32_t size_lo = dwords[3];
691                 uint32_t size_hi = dwords[4];
692 
693                 uint32_t metadata_lo = dwords[5];
694                 uint32_t metadata_hi = dwords[6];
695 
696                 uint32_t wait_phys_addr_lo = dwords[7];
697                 uint32_t wait_phys_addr_hi = dwords[8];
698 
699                 uint32_t wait_flags = dwords[9];
700                 uint32_t direction = dwords[10];
701 
702                 struct android::emulation::AddressSpaceDevicePingInfo pingInfo = {
703                     .phys_addr = convert32to64(phys_addr_lo, phys_addr_hi),
704                     .size = convert32to64(size_lo, size_hi),
705                     .metadata = convert32to64(metadata_lo, metadata_hi),
706                     .wait_phys_addr = convert32to64(wait_phys_addr_lo, wait_phys_addr_hi),
707                     .wait_flags = wait_flags,
708                     .direction = direction,
709                 };
710 
711                 AutoLock lock(mLock);
712                 mAddressSpaceDeviceControlOps->ping_at_hva(
713                     getAddressSpaceHandleLocked(ctxId),
714                     &pingInfo);
715                 break;
716             }
717             case kVirtioGpuAddressSpacePingWithResponse: {
718                 uint32_t resp_resid = dwords[1];
719                 uint32_t phys_addr_lo = dwords[2];
720                 uint32_t phys_addr_hi = dwords[3];
721 
722                 uint32_t size_lo = dwords[4];
723                 uint32_t size_hi = dwords[5];
724 
725                 uint32_t metadata_lo = dwords[6];
726                 uint32_t metadata_hi = dwords[7];
727 
728                 uint32_t wait_phys_addr_lo = dwords[8];
729                 uint32_t wait_phys_addr_hi = dwords[9];
730 
731                 uint32_t wait_flags = dwords[10];
732                 uint32_t direction = dwords[11];
733 
734                 struct android::emulation::AddressSpaceDevicePingInfo pingInfo = {
735                     .phys_addr = convert32to64(phys_addr_lo, phys_addr_hi),
736                     .size = convert32to64(size_lo, size_hi),
737                     .metadata = convert32to64(metadata_lo, metadata_hi),
738                     .wait_phys_addr = convert32to64(wait_phys_addr_lo, wait_phys_addr_hi),
739                     .wait_flags = wait_flags,
740                     .direction = direction,
741                 };
742 
743                 AutoLock lock(mLock);
744                 mAddressSpaceDeviceControlOps->ping_at_hva(
745                     getAddressSpaceHandleLocked(ctxId),
746                     &pingInfo);
747 
748                 phys_addr_lo = (uint32_t)pingInfo.phys_addr;
749                 phys_addr_hi = (uint32_t)(pingInfo.phys_addr >> 32);
750                 size_lo = (uint32_t)(pingInfo.size >> 0);
751                 size_hi = (uint32_t)(pingInfo.size >> 32);
752                 metadata_lo = (uint32_t)(pingInfo.metadata >> 0);
753                 metadata_hi = (uint32_t)(pingInfo.metadata >> 32);
754                 wait_phys_addr_lo = (uint32_t)(pingInfo.wait_phys_addr >> 0);
755                 wait_phys_addr_hi = (uint32_t)(pingInfo.wait_phys_addr >> 32);
756                 wait_flags = (uint32_t)(pingInfo.wait_flags >> 0);
757                 direction = (uint32_t)(pingInfo.direction >> 0);
758 
759                 uint32_t response[] = {
760                     phys_addr_lo, phys_addr_hi,
761                     size_lo, size_hi,
762                     metadata_lo, metadata_hi,
763                     wait_phys_addr_lo, wait_phys_addr_hi,
764                     wait_flags, direction,
765                 };
766 
767                 writeWordsToFirstIovPageLocked(
768                     response,
769                     sizeof(response) / sizeof(uint32_t),
770                     resp_resid);
771                 break;
772             }
773             default:
774                 break;
775         }
776     }
777 
submitCmd(VirglCtxId ctxId,void * buffer,int dwordCount)778     int submitCmd(VirglCtxId ctxId, void* buffer, int dwordCount) {
779         VGPLOG("ctxid: %u buffer: %p dwords: %d", ctxId, buffer, dwordCount);
780 
781         if (!buffer) {
782             fprintf(stderr, "%s: error: buffer null\n", __func__);
783             return -1;
784         }
785 
786         // Parse command from buffer
787         uint32_t* dwords = (uint32_t*)buffer;
788 
789         if (dwordCount < 1) {
790             fprintf(stderr, "%s: error: not enough dwords (got %d)\n", __func__, dwordCount);
791             return -1;
792         }
793 
794         uint32_t opcode = dwords[0];
795 
796         switch (opcode) {
797             case kVirtioGpuAddressSpaceContextCreateWithSubdevice:
798             case kVirtioGpuAddressSpacePing:
799             case kVirtioGpuAddressSpacePingWithResponse:
800                 addressSpaceProcessCmd(ctxId, dwords, dwordCount);
801                 break;
802             case kVirtioGpuNativeSyncCreateExportFd:
803             case kVirtioGpuNativeSyncCreateImportFd: {
804                 uint32_t sync_handle_lo = dwords[1];
805                 uint32_t sync_handle_hi = dwords[2];
806                 uint64_t sync_handle = convert32to64(sync_handle_lo, sync_handle_hi);
807 
808                 mVirtioGpuOps->wait_for_gpu(sync_handle);
809                 break;
810             }
811             case kVirtioGpuNativeSyncVulkanCreateExportFd:
812             case kVirtioGpuNativeSyncVulkanCreateImportFd: {
813                 uint32_t device_handle_lo = dwords[1];
814                 uint32_t device_handle_hi = dwords[2];
815                 uint64_t device_handle = convert32to64(device_handle_lo, device_handle_hi);
816 
817                 uint32_t fence_handle_lo = dwords[3];
818                 uint32_t fence_handle_hi = dwords[4];
819                 uint64_t fence_handle = convert32to64(fence_handle_lo, fence_handle_hi);
820 
821                 mVirtioGpuOps->wait_for_gpu_vulkan(device_handle, fence_handle);
822                 break;
823             }
824             default:
825                 return -1;
826         }
827 
828         mLastSubmitCmdCtxExists = true;
829         mLastSubmitCmdCtx = ctxId;
830         return 0;
831     }
832 
createFence(int client_fence_id,uint32_t cmd_type)833     int createFence(int client_fence_id, uint32_t cmd_type) {
834         AutoLock lock(mLock);
835         VGPLOG("fenceid: %u cmdtype: %u", client_fence_id, cmd_type);
836         mFenceDeque.push_back(client_fence_id);
837         return 0;
838     }
839 
poll()840     void poll() {
841         VGPLOG("start");
842         AutoLock lock(mLock);
843         for (auto fence : mFenceDeque) {
844             VGPLOG("write fence: %u", fence);
845             mVirglRendererCallbacks.write_fence(mCookie, fence);
846             VGPLOG("write fence: %u (done with callback)", fence);
847         }
848         mFenceDeque.clear();
849         VGPLOG("end");
850     }
851 
852     enum pipe_texture_target {
853         PIPE_BUFFER,
854         PIPE_TEXTURE_1D,
855         PIPE_TEXTURE_2D,
856         PIPE_TEXTURE_3D,
857         PIPE_TEXTURE_CUBE,
858         PIPE_TEXTURE_RECT,
859         PIPE_TEXTURE_1D_ARRAY,
860         PIPE_TEXTURE_2D_ARRAY,
861         PIPE_TEXTURE_CUBE_ARRAY,
862         PIPE_MAX_TEXTURE_TYPES,
863     };
864 
865     /**
866      *  * Resource binding flags -- state tracker must specify in advance all
867      *   * the ways a resource might be used.
868      *    */
869 #define PIPE_BIND_DEPTH_STENCIL        (1 << 0) /* create_surface */
870 #define PIPE_BIND_RENDER_TARGET        (1 << 1) /* create_surface */
871 #define PIPE_BIND_BLENDABLE            (1 << 2) /* create_surface */
872 #define PIPE_BIND_SAMPLER_VIEW         (1 << 3) /* create_sampler_view */
873 #define PIPE_BIND_VERTEX_BUFFER        (1 << 4) /* set_vertex_buffers */
874 #define PIPE_BIND_INDEX_BUFFER         (1 << 5) /* draw_elements */
875 #define PIPE_BIND_CONSTANT_BUFFER      (1 << 6) /* set_constant_buffer */
876 #define PIPE_BIND_DISPLAY_TARGET       (1 << 7) /* flush_front_buffer */
877     /* gap */
878 #define PIPE_BIND_STREAM_OUTPUT        (1 << 10) /* set_stream_output_buffers */
879 #define PIPE_BIND_CURSOR               (1 << 11) /* mouse cursor */
880 #define PIPE_BIND_CUSTOM               (1 << 12) /* state-tracker/winsys usages */
881 #define PIPE_BIND_GLOBAL               (1 << 13) /* set_global_binding */
882 #define PIPE_BIND_SHADER_BUFFER        (1 << 14) /* set_shader_buffers */
883 #define PIPE_BIND_SHADER_IMAGE         (1 << 15) /* set_shader_images */
884 #define PIPE_BIND_COMPUTE_RESOURCE     (1 << 16) /* set_compute_resources */
885 #define PIPE_BIND_COMMAND_ARGS_BUFFER  (1 << 17) /* pipe_draw_info.indirect */
886 #define PIPE_BIND_QUERY_BUFFER         (1 << 18) /* get_query_result_resource */
887 
888 
handleCreateResourceGraphicsUsage(struct virgl_renderer_resource_create_args * args,struct iovec * iov,uint32_t num_iovs)889     void handleCreateResourceGraphicsUsage(
890             struct virgl_renderer_resource_create_args *args,
891             struct iovec *iov, uint32_t num_iovs) {
892 
893         if (args->target == PIPE_BUFFER) {
894             // Nothing to handle; this is generic pipe usage.
895             return;
896         }
897 
898         // corresponds to allocation of gralloc buffer in minigbm
899         VGPLOG("w h %u %u resid %u -> rcCreateColorBufferWithHandle",
900                args->width, args->height, args->handle);
901         uint32_t glformat = virgl_format_to_gl(args->format);
902         uint32_t fwkformat = virgl_format_to_fwk_format(args->format);
903         mVirtioGpuOps->create_color_buffer_with_handle(
904             args->width, args->height, glformat, fwkformat, args->handle);
905         mVirtioGpuOps->set_guest_managed_color_buffer_lifetime(true /* guest manages lifetime */);
906         mVirtioGpuOps->open_color_buffer(
907             args->handle);
908     }
909 
createResource(struct virgl_renderer_resource_create_args * args,struct iovec * iov,uint32_t num_iovs)910     int createResource(
911             struct virgl_renderer_resource_create_args *args,
912             struct iovec *iov, uint32_t num_iovs) {
913 
914         VGPLOG("handle: %u. num iovs: %u", args->handle, num_iovs);
915 
916         handleCreateResourceGraphicsUsage(args, iov, num_iovs);
917 
918         PipeResEntry e;
919         e.args = *args;
920         e.linear = 0;
921         e.hostPipe = 0;
922         e.hva = 0;
923         e.hvaSize = 0;
924         e.hvaId = 0;
925         e.hvSlot = 0;
926         allocResource(e, iov, num_iovs);
927 
928         AutoLock lock(mLock);
929         mResources[args->handle] = e;
930         return 0;
931     }
932 
handleUnrefResourceGraphicsUsage(PipeResEntry * res,uint32_t resId)933     void handleUnrefResourceGraphicsUsage(PipeResEntry* res, uint32_t resId) {
934         if (res->args.target == PIPE_BUFFER) return;
935         mVirtioGpuOps->close_color_buffer(resId);
936     }
937 
unrefResource(uint32_t toUnrefId)938     void unrefResource(uint32_t toUnrefId) {
939         AutoLock lock(mLock);
940         VGPLOG("handle: %u", toUnrefId);
941 
942         auto it = mResources.find(toUnrefId);
943         if (it == mResources.end()) return;
944 
945         auto contextsIt = mResourceContexts.find(toUnrefId);
946         if (contextsIt != mResourceContexts.end()) {
947             mResourceContexts.erase(contextsIt->first);
948         }
949 
950         for (auto& ctxIdResources : mContextResources) {
951             detachResourceLocked(ctxIdResources.first, toUnrefId);
952         }
953 
954         auto& entry = it->second;
955 
956         handleUnrefResourceGraphicsUsage(&entry, toUnrefId);
957 
958         if (entry.linear) {
959             free(entry.linear);
960             entry.linear = nullptr;
961         }
962 
963         if (entry.iov) {
964             free(entry.iov);
965             entry.iov = nullptr;
966             entry.numIovs = 0;
967         }
968 
969         if (entry.hvaId) {
970             // gfxstream manages when to actually remove the hostmem id and storage
971             //
972             // fprintf(stderr, "%s: unref a hostmem resource. hostmem id: 0x%llx\n", __func__,
973             //         (unsigned long long)(entry.hvaId));
974             // HostmemIdMapping::get()->remove(entry.hvaId);
975             // auto ownedIt = mOwnedHostmemIdBuffers.find(entry.hvaId);
976             // if (ownedIt != mOwnedHostmemIdBuffers.end()) {
977             //      // android::aligned_buf_free(ownedIt->second);
978             // }
979         }
980 
981         entry.hva = 0;
982         entry.hvaSize = 0;
983         entry.hvaId = 0;
984         entry.hvSlot = 0;
985 
986         mResources.erase(it);
987     }
988 
attachIov(int resId,iovec * iov,int num_iovs)989     int attachIov(int resId, iovec* iov, int num_iovs) {
990         AutoLock lock(mLock);
991 
992         VGPLOG("resid: %d numiovs: %d", resId, num_iovs);
993 
994         auto it = mResources.find(resId);
995         if (it == mResources.end()) return ENOENT;
996 
997         auto& entry = it->second;
998         VGPLOG("res linear: %p", entry.linear);
999         if (!entry.linear) allocResource(entry, iov, num_iovs);
1000 
1001         VGPLOG("done");
1002         return 0;
1003     }
1004 
detachIov(int resId,iovec ** iov,int * num_iovs)1005     void detachIov(int resId, iovec** iov, int* num_iovs) {
1006         AutoLock lock(mLock);
1007 
1008         auto it = mResources.find(resId);
1009         if (it == mResources.end()) return;
1010 
1011         auto& entry = it->second;
1012 
1013         if (num_iovs) {
1014             *num_iovs = entry.numIovs;
1015             VGPLOG("resid: %d numIovs: %d", resId, *num_iovs);
1016         } else {
1017             VGPLOG("resid: %d numIovs: 0", resId);
1018         }
1019 
1020         entry.numIovs = 0;
1021 
1022         if (entry.iov) free(entry.iov);
1023         entry.iov = nullptr;
1024 
1025         if (iov) {
1026             *iov = entry.iov;
1027         }
1028 
1029         allocResource(entry, entry.iov, entry.numIovs);
1030         VGPLOG("done");
1031     }
1032 
handleTransferReadGraphicsUsage(PipeResEntry * res,uint64_t offset,virgl_box * box)1033     bool handleTransferReadGraphicsUsage(
1034         PipeResEntry* res, uint64_t offset, virgl_box* box) {
1035         // PIPE_BUFFER: Generic pipe usage
1036         if (res->args.target == PIPE_BUFFER) return true;
1037 
1038         // Others: Gralloc transfer read operation
1039         auto glformat = virgl_format_to_gl(res->args.format);
1040         auto gltype = gl_format_to_natural_type(glformat);
1041 
1042         // We always xfer the whole thing again from GL
1043         // since it's fiddly to calc / copy-out subregions
1044         if (virgl_format_is_yuv(res->args.format)) {
1045             mVirtioGpuOps->read_color_buffer_yuv(
1046                 res->args.handle,
1047                 0, 0,
1048                 res->args.width, res->args.height,
1049                 res->linear, res->linearSize);
1050         } else {
1051             mVirtioGpuOps->read_color_buffer(
1052                 res->args.handle,
1053                 0, 0,
1054                 res->args.width, res->args.height,
1055                 glformat,
1056                 gltype,
1057                 res->linear);
1058         }
1059 
1060         return false;
1061     }
1062 
handleTransferWriteGraphicsUsage(PipeResEntry * res,uint64_t offset,virgl_box * box)1063     bool handleTransferWriteGraphicsUsage(
1064         PipeResEntry* res, uint64_t offset, virgl_box* box) {
1065         // PIPE_BUFFER: Generic pipe usage
1066         if (res->args.target == PIPE_BUFFER) return true;
1067 
1068         // Others: Gralloc transfer read operation
1069         auto glformat = virgl_format_to_gl(res->args.format);
1070         auto gltype = gl_format_to_natural_type(glformat);
1071 
1072         // We always xfer the whole thing again to GL
1073         // since it's fiddly to calc / copy-out subregions
1074         mVirtioGpuOps->update_color_buffer(
1075             res->args.handle,
1076             0, 0,
1077             res->args.width, res->args.height,
1078             glformat,
1079             gltype,
1080             res->linear);
1081 
1082         return false;
1083     }
1084 
transferReadIov(int resId,uint64_t offset,virgl_box * box,struct iovec * iov,int iovec_cnt)1085     int transferReadIov(int resId, uint64_t offset, virgl_box* box, struct iovec* iov, int iovec_cnt) {
1086         AutoLock lock(mLock);
1087 
1088         VGPLOG("resid: %d offset: 0x%llx. box: %u %u %u %u", resId,
1089                (unsigned long long)offset,
1090                box->x,
1091                box->y,
1092                box->w,
1093                box->h);
1094 
1095         auto it = mResources.find(resId);
1096         if (it == mResources.end()) return EINVAL;
1097 
1098         auto& entry = it->second;
1099 
1100         if (handleTransferReadGraphicsUsage(
1101             &entry, offset, box)) {
1102             // Do the pipe service op here, if there is an associated hostpipe.
1103             auto hostPipe = entry.hostPipe;
1104             if (!hostPipe) return -1;
1105 
1106             auto ops = ensureAndGetServiceOps();
1107 
1108             size_t readBytes = 0;
1109             size_t wantedBytes = readBytes + (size_t)box->w;
1110 
1111             while (readBytes < wantedBytes) {
1112                 GoldfishPipeBuffer buf = {
1113                     ((char*)entry.linear) + box->x + readBytes,
1114                     wantedBytes - readBytes,
1115                 };
1116                 auto status = ops->guest_recv(hostPipe, &buf, 1);
1117 
1118                 if (status > 0) {
1119                     readBytes += status;
1120                 } else if (status != kPipeTryAgain) {
1121                     return EIO;
1122                 }
1123             }
1124         }
1125 
1126         VGPLOG("Linear first word: %d", *(int*)(entry.linear));
1127 
1128         int syncRes;
1129 
1130         if (iovec_cnt) {
1131             PipeResEntry e = {
1132                 entry.args,
1133                 iov,
1134                 (uint32_t)iovec_cnt,
1135                 entry.linear,
1136                 entry.linearSize,
1137             };
1138             syncRes =
1139                 sync_iov(&e, offset, box, LINEAR_TO_IOV);
1140         } else {
1141             syncRes =
1142                 sync_iov(&entry, offset, box, LINEAR_TO_IOV);
1143         }
1144 
1145         mLastSubmitCmdCtxExists = true;
1146         mLastSubmitCmdCtx = entry.ctxId;
1147 
1148         VGPLOG("done");
1149 
1150         return syncRes;
1151     }
1152 
transferWriteIov(int resId,uint64_t offset,virgl_box * box,struct iovec * iov,int iovec_cnt)1153     int transferWriteIov(int resId, uint64_t offset, virgl_box* box, struct iovec* iov, int iovec_cnt) {
1154         AutoLock lock(mLock);
1155         VGPLOG("resid: %d offset: 0x%llx", resId,
1156                (unsigned long long)offset);
1157         auto it = mResources.find(resId);
1158         if (it == mResources.end()) return EINVAL;
1159 
1160         auto& entry = it->second;
1161         int syncRes;
1162 
1163         if (iovec_cnt) {
1164             PipeResEntry e = {
1165                 entry.args,
1166                 iov,
1167                 (uint32_t)iovec_cnt,
1168                 entry.linear,
1169                 entry.linearSize,
1170             };
1171             syncRes = sync_iov(&e, offset, box, IOV_TO_LINEAR);
1172         } else {
1173             syncRes = sync_iov(&entry, offset, box, IOV_TO_LINEAR);
1174         }
1175 
1176         if (handleTransferWriteGraphicsUsage(&entry, offset, box)) {
1177             // Do the pipe service op here, if there is an associated hostpipe.
1178             auto hostPipe = entry.hostPipe;
1179             if (!hostPipe) {
1180                 VGPLOG("No hostPipe");
1181                 return syncRes;
1182             }
1183 
1184             VGPLOG("resid: %d offset: 0x%llx hostpipe: %p", resId,
1185                    (unsigned long long)offset, hostPipe);
1186 
1187             auto ops = ensureAndGetServiceOps();
1188 
1189             size_t writtenBytes = 0;
1190             size_t wantedBytes = (size_t)box->w;
1191 
1192             while (writtenBytes < wantedBytes) {
1193                 GoldfishPipeBuffer buf = {
1194                     ((char*)entry.linear) + box->x + writtenBytes,
1195                     wantedBytes - writtenBytes,
1196                 };
1197 
1198                 // guest_send can now reallocate the pipe.
1199                 void* hostPipeBefore = hostPipe;
1200                 auto status = ops->guest_send(&hostPipe, &buf, 1);
1201                 if (hostPipe != hostPipeBefore) {
1202                     resetPipe((GoldfishHwPipe*)(uintptr_t)(entry.ctxId), hostPipe);
1203                     it = mResources.find(resId);
1204                     entry = it->second;
1205                 }
1206 
1207                 if (status > 0) {
1208                     writtenBytes += status;
1209                 } else if (status != kPipeTryAgain) {
1210                     return EIO;
1211                 }
1212             }
1213         }
1214 
1215         mLastSubmitCmdCtxExists = true;
1216         mLastSubmitCmdCtx = entry.ctxId;
1217 
1218         VGPLOG("done");
1219         return syncRes;
1220     }
1221 
attachResource(uint32_t ctxId,uint32_t resId)1222     void attachResource(uint32_t ctxId, uint32_t resId) {
1223         AutoLock lock(mLock);
1224         VGPLOG("ctxid: %u resid: %u", ctxId, resId);
1225 
1226         auto resourcesIt = mContextResources.find(ctxId);
1227 
1228         if (resourcesIt == mContextResources.end()) {
1229             std::vector<VirglResId> ids;
1230             ids.push_back(resId);
1231             mContextResources[ctxId] = ids;
1232         } else {
1233             auto& ids = resourcesIt->second;
1234             auto idIt = std::find(ids.begin(), ids.end(), resId);
1235             if (idIt == ids.end())
1236                 ids.push_back(resId);
1237         }
1238 
1239         auto contextsIt = mResourceContexts.find(resId);
1240 
1241         if (contextsIt == mResourceContexts.end()) {
1242             std::vector<VirglCtxId> ids;
1243             ids.push_back(ctxId);
1244             mResourceContexts[resId] = ids;
1245         } else {
1246             auto& ids = contextsIt->second;
1247             auto idIt = std::find(ids.begin(), ids.end(), resId);
1248             if (idIt == ids.end())
1249                 ids.push_back(ctxId);
1250         }
1251 
1252         // Associate the host pipe of the resource entry with the host pipe of
1253         // the context entry.  That is, the last context to call attachResource
1254         // wins if there is any conflict.
1255         auto ctxEntryIt = mContexts.find(ctxId); auto resEntryIt =
1256             mResources.find(resId);
1257 
1258         if (ctxEntryIt == mContexts.end() ||
1259             resEntryIt == mResources.end()) return;
1260 
1261         VGPLOG("hostPipe: %p", ctxEntryIt->second.hostPipe);
1262         resEntryIt->second.hostPipe = ctxEntryIt->second.hostPipe;
1263         resEntryIt->second.ctxId = ctxId;
1264     }
1265 
detachResource(uint32_t ctxId,uint32_t toUnrefId)1266     void detachResource(uint32_t ctxId, uint32_t toUnrefId) {
1267         AutoLock lock(mLock);
1268         VGPLOG("ctxid: %u resid: %u", ctxId, toUnrefId);
1269         detachResourceLocked(ctxId, toUnrefId);
1270     }
1271 
getResourceInfo(uint32_t resId,struct virgl_renderer_resource_info * info)1272     int getResourceInfo(uint32_t resId, struct virgl_renderer_resource_info *info) {
1273         VGPLOG("resid: %u", resId);
1274         if (!info)
1275             return EINVAL;
1276 
1277         AutoLock lock(mLock);
1278         auto it = mResources.find(resId);
1279         if (it == mResources.end())
1280             return ENOENT;
1281 
1282         auto& entry = it->second;
1283 
1284         uint32_t bpp = 4U;
1285         switch (entry.args.format) {
1286             case VIRGL_FORMAT_B8G8R8A8_UNORM:
1287                 info->drm_fourcc = DRM_FORMAT_BGRA8888;
1288                 break;
1289             case VIRGL_FORMAT_B5G6R5_UNORM:
1290                 info->drm_fourcc = DRM_FORMAT_BGR565;
1291                 bpp = 2U;
1292                 break;
1293             case VIRGL_FORMAT_R8G8B8A8_UNORM:
1294                 info->drm_fourcc = DRM_FORMAT_RGBA8888;
1295                 break;
1296             case VIRGL_FORMAT_R8G8B8X8_UNORM:
1297                 info->drm_fourcc = DRM_FORMAT_RGBX8888;
1298                 break;
1299             default:
1300                 return EINVAL;
1301         }
1302 
1303         info->stride = align_up(entry.args.width * bpp, 16U);
1304         info->virgl_format = entry.args.format;
1305         info->handle = entry.args.handle;
1306         info->height = entry.args.height;
1307         info->width = entry.args.width;
1308         info->depth = entry.args.depth;
1309         info->flags = entry.args.flags;
1310         info->tex_id = 0;
1311         return 0;
1312     }
1313 
flushResourceAndReadback(uint32_t res_handle,uint32_t x,uint32_t y,uint32_t width,uint32_t height,void * pixels,uint32_t max_bytes)1314     void flushResourceAndReadback(
1315         uint32_t res_handle, uint32_t x, uint32_t y, uint32_t width, uint32_t height,
1316         void* pixels, uint32_t max_bytes) {
1317         (void)x;
1318         (void)y;
1319         (void)width;
1320         (void)height;
1321         //TODO: displayId > 0 ?
1322         uint32_t displayId = 0;
1323         mVirtioGpuOps->post_color_buffer(res_handle);
1324         if (pixels) {
1325             mReadPixelsFunc(pixels, max_bytes, displayId);
1326         }
1327     }
1328 
createResourceV2(uint32_t res_handle,uint64_t hvaId)1329     void createResourceV2(uint32_t res_handle, uint64_t hvaId) {
1330         PipeResEntry e;
1331         struct virgl_renderer_resource_create_args args = {
1332             res_handle,
1333             PIPE_BUFFER,
1334             VIRGL_FORMAT_R8_UNORM,
1335             PIPE_BIND_COMMAND_ARGS_BUFFER,
1336             0, 1, 1,
1337             0, 0, 0, 0
1338         };
1339         e.args = args;
1340         e.hostPipe = 0;
1341 
1342         auto entry = HostmemIdMapping::get()->get(hvaId);
1343 
1344         e.hva = entry.hva;
1345         e.hvaSize = entry.size;
1346         e.args.width = entry.size;
1347         e.hvaId = hvaId;
1348         e.hvSlot = 0;
1349         e.iov = nullptr;
1350         e.numIovs = 0;
1351         e.linear = 0;
1352         e.linearSize = 0;
1353 
1354         AutoLock lock(mLock);
1355         mResources[res_handle] = e;
1356     }
1357 
getResourceHva(uint32_t res_handle)1358     uint64_t getResourceHva(uint32_t res_handle) {
1359         AutoLock lock(mLock);
1360         auto it = mResources.find(res_handle);
1361         if (it == mResources.end()) return 0;
1362         const auto& entry = it->second;
1363         return entry.hva;
1364     }
1365 
getResourceHvaSize(uint32_t res_handle)1366     uint64_t getResourceHvaSize(uint32_t res_handle) {
1367         AutoLock lock(mLock);
1368         auto it = mResources.find(res_handle);
1369         if (it == mResources.end()) return 0;
1370         const auto& entry = it->second;
1371         return entry.hvaSize;
1372     }
1373 
resourceMap(uint32_t res_handle,void ** hvaOut,uint64_t * sizeOut)1374     int resourceMap(uint32_t res_handle, void** hvaOut, uint64_t* sizeOut) {
1375         AutoLock lock(mLock);
1376         auto it = mResources.find(res_handle);
1377         if (it == mResources.end()) {
1378             if (hvaOut) *hvaOut = nullptr;
1379             if (sizeOut) *sizeOut = 0;
1380             return -1;
1381         }
1382 
1383         const auto& entry = it->second;
1384 
1385         static const uint64_t kPageSizeforBlob = 4096;
1386         static const uint64_t kPageMaskForBlob = ~(0xfff);
1387 
1388         uint64_t alignedHva =
1389             entry.hva & kPageMaskForBlob;
1390 
1391         uint64_t alignedSize =
1392             kPageSizeforBlob *
1393             ((entry.hvaSize + kPageSizeforBlob - 1) / kPageSizeforBlob);
1394 
1395         if (hvaOut) *hvaOut = (void*)(uintptr_t)alignedHva;
1396         if (sizeOut) *sizeOut = alignedSize;
1397         return 0;
1398     }
1399 
resourceUnmap(uint32_t res_handle)1400     int resourceUnmap(uint32_t res_handle) {
1401         AutoLock lock(mLock);
1402         auto it = mResources.find(res_handle);
1403         if (it == mResources.end()) {
1404             return -1;
1405         }
1406 
1407         // TODO(lfy): Good place to run any registered cleanup callbacks.
1408         // No-op for now.
1409         return 0;
1410     }
1411 
setResourceHvSlot(uint32_t res_handle,uint32_t slot)1412     void setResourceHvSlot(uint32_t res_handle, uint32_t slot) {
1413         AutoLock lock(mLock);
1414         auto it = mResources.find(res_handle);
1415         if (it == mResources.end()) return;
1416         auto& entry = it->second;
1417         entry.hvSlot = slot;
1418     }
1419 
getResourceHvSlot(uint32_t res_handle)1420     uint32_t getResourceHvSlot(uint32_t res_handle) {
1421         AutoLock lock(mLock);
1422         auto it = mResources.find(res_handle);
1423         if (it == mResources.end()) return 0;
1424         const auto& entry = it->second;
1425         return entry.hvSlot;
1426     }
1427 
1428 private:
allocResource(PipeResEntry & entry,iovec * iov,int num_iovs)1429     void allocResource(PipeResEntry& entry, iovec* iov, int num_iovs) {
1430         VGPLOG("entry linear: %p", entry.linear);
1431         if (entry.linear) free(entry.linear);
1432 
1433         size_t linearSize = 0;
1434         for (uint32_t i = 0; i < num_iovs; ++i) {
1435             VGPLOG("iov base: %p", iov[i].iov_base);
1436             linearSize += iov[i].iov_len;
1437             VGPLOG("has iov of %zu. linearSize current: %zu",
1438                    iov[i].iov_len, linearSize);
1439         }
1440         VGPLOG("final linearSize: %zu", linearSize);
1441 
1442         void* linear = nullptr;
1443 
1444         if (linearSize) linear = malloc(linearSize);
1445 
1446         entry.iov = (iovec*)malloc(sizeof(*iov) * num_iovs);
1447         entry.numIovs = num_iovs;
1448         memcpy(entry.iov, iov, num_iovs * sizeof(*iov));
1449         entry.linear = linear;
1450         entry.linearSize = linearSize;
1451 
1452         virgl_box initbox;
1453         initbox.x = 0;
1454         initbox.y = 0;
1455         initbox.w = (uint32_t)linearSize;
1456         initbox.h = 1;
1457     }
1458 
detachResourceLocked(uint32_t ctxId,uint32_t toUnrefId)1459     void detachResourceLocked(uint32_t ctxId, uint32_t toUnrefId) {
1460         VGPLOG("ctxid: %u resid: %u", ctxId, toUnrefId);
1461 
1462         auto it = mContextResources.find(ctxId);
1463         if (it == mContextResources.end()) return;
1464 
1465         std::vector<VirglResId> withoutRes;
1466         for (auto resId : it->second) {
1467             if (resId != toUnrefId) {
1468                 withoutRes.push_back(resId);
1469             }
1470         }
1471         mContextResources[ctxId] = withoutRes;
1472 
1473         auto resIt = mResources.find(toUnrefId);
1474         if (resIt == mResources.end()) return;
1475 
1476         resIt->second.hostPipe = 0;
1477         resIt->second.ctxId = 0;
1478     }
1479 
ensureAndGetServiceOps()1480     inline const GoldfishPipeServiceOps* ensureAndGetServiceOps() {
1481         if (mServiceOps) return mServiceOps;
1482         mServiceOps = goldfish_pipe_get_service_ops();
1483         return mServiceOps;
1484     }
1485 
1486     Lock mLock;
1487 
1488     void* mCookie = nullptr;
1489     virgl_renderer_callbacks mVirglRendererCallbacks;
1490     AndroidVirtioGpuOps* mVirtioGpuOps = nullptr;
1491     ReadPixelsFunc mReadPixelsFunc = nullptr;
1492     struct address_space_device_control_ops* mAddressSpaceDeviceControlOps =
1493         nullptr;
1494 
1495     const GoldfishPipeServiceOps* mServiceOps = nullptr;
1496 
1497     std::unordered_map<VirglCtxId, PipeCtxEntry> mContexts;
1498     std::unordered_map<VirglResId, PipeResEntry> mResources;
1499     std::unordered_map<VirglCtxId, std::vector<VirglResId>> mContextResources;
1500     std::unordered_map<VirglResId, std::vector<VirglCtxId>> mResourceContexts;
1501     bool mLastSubmitCmdCtxExists = false;
1502     uint32_t mLastSubmitCmdCtx = 0;
1503     // Other fences that aren't related to the fence covering a pipe buffer
1504     // submission.
1505     std::deque<int> mFenceDeque;
1506 };
1507 
sRenderer()1508 static PipeVirglRenderer* sRenderer() {
1509     static PipeVirglRenderer* p = new PipeVirglRenderer;
1510     return p;
1511 }
1512 
1513 extern "C" {
1514 
pipe_virgl_renderer_init(void * cookie,int flags,struct virgl_renderer_callbacks * cb)1515 VG_EXPORT int pipe_virgl_renderer_init(
1516     void *cookie, int flags, struct virgl_renderer_callbacks *cb) {
1517     sRenderer()->init(cookie, flags, cb);
1518     return 0;
1519 }
1520 
pipe_virgl_renderer_poll(void)1521 VG_EXPORT void pipe_virgl_renderer_poll(void) {
1522     sRenderer()->poll();
1523 }
1524 
pipe_virgl_renderer_get_cursor_data(uint32_t resource_id,uint32_t * width,uint32_t * height)1525 VG_EXPORT void* pipe_virgl_renderer_get_cursor_data(
1526     uint32_t resource_id, uint32_t *width, uint32_t *height) {
1527     return 0;
1528 }
1529 
pipe_virgl_renderer_resource_create(struct virgl_renderer_resource_create_args * args,struct iovec * iov,uint32_t num_iovs)1530 VG_EXPORT int pipe_virgl_renderer_resource_create(
1531     struct virgl_renderer_resource_create_args *args,
1532     struct iovec *iov, uint32_t num_iovs) {
1533 
1534     return sRenderer()->createResource(args, iov, num_iovs);
1535 }
1536 
pipe_virgl_renderer_resource_unref(uint32_t res_handle)1537 VG_EXPORT void pipe_virgl_renderer_resource_unref(uint32_t res_handle) {
1538     sRenderer()->unrefResource(res_handle);
1539 }
1540 
pipe_virgl_renderer_context_create(uint32_t handle,uint32_t nlen,const char * name)1541 VG_EXPORT int pipe_virgl_renderer_context_create(
1542     uint32_t handle, uint32_t nlen, const char *name) {
1543     return sRenderer()->createContext(handle, nlen, name);
1544 }
1545 
pipe_virgl_renderer_context_destroy(uint32_t handle)1546 VG_EXPORT void pipe_virgl_renderer_context_destroy(uint32_t handle) {
1547     sRenderer()->destroyContext(handle);
1548 }
1549 
pipe_virgl_renderer_submit_cmd(void * buffer,int ctx_id,int dwordCount)1550 VG_EXPORT int pipe_virgl_renderer_submit_cmd(void *buffer,
1551                                           int ctx_id,
1552                                           int dwordCount) {
1553     return sRenderer()->submitCmd(ctx_id, buffer, dwordCount);
1554 }
1555 
pipe_virgl_renderer_transfer_read_iov(uint32_t handle,uint32_t ctx_id,uint32_t level,uint32_t stride,uint32_t layer_stride,struct virgl_box * box,uint64_t offset,struct iovec * iov,int iovec_cnt)1556 VG_EXPORT int pipe_virgl_renderer_transfer_read_iov(
1557     uint32_t handle, uint32_t ctx_id,
1558     uint32_t level, uint32_t stride,
1559     uint32_t layer_stride,
1560     struct virgl_box *box,
1561     uint64_t offset, struct iovec *iov,
1562     int iovec_cnt) {
1563     return sRenderer()->transferReadIov(handle, offset, box, iov, iovec_cnt);
1564 }
1565 
pipe_virgl_renderer_transfer_write_iov(uint32_t handle,uint32_t ctx_id,int level,uint32_t stride,uint32_t layer_stride,struct virgl_box * box,uint64_t offset,struct iovec * iovec,unsigned int iovec_cnt)1566 VG_EXPORT int pipe_virgl_renderer_transfer_write_iov(
1567     uint32_t handle,
1568     uint32_t ctx_id,
1569     int level,
1570     uint32_t stride,
1571     uint32_t layer_stride,
1572     struct virgl_box *box,
1573     uint64_t offset,
1574     struct iovec *iovec,
1575     unsigned int iovec_cnt) {
1576     return sRenderer()->transferWriteIov(handle, offset, box, iovec, iovec_cnt);
1577 }
1578 
1579 // Not implemented
pipe_virgl_renderer_get_cap_set(uint32_t,uint32_t *,uint32_t *)1580 VG_EXPORT void pipe_virgl_renderer_get_cap_set(uint32_t, uint32_t*, uint32_t*) { }
pipe_virgl_renderer_fill_caps(uint32_t,uint32_t,void * caps)1581 VG_EXPORT void pipe_virgl_renderer_fill_caps(uint32_t, uint32_t, void *caps) { }
1582 
pipe_virgl_renderer_resource_attach_iov(int res_handle,struct iovec * iov,int num_iovs)1583 VG_EXPORT int pipe_virgl_renderer_resource_attach_iov(
1584     int res_handle, struct iovec *iov,
1585     int num_iovs) {
1586     return sRenderer()->attachIov(res_handle, iov, num_iovs);
1587 }
1588 
pipe_virgl_renderer_resource_detach_iov(int res_handle,struct iovec ** iov,int * num_iovs)1589 VG_EXPORT void pipe_virgl_renderer_resource_detach_iov(
1590     int res_handle, struct iovec **iov, int *num_iovs) {
1591     return sRenderer()->detachIov(res_handle, iov, num_iovs);
1592 }
1593 
pipe_virgl_renderer_create_fence(int client_fence_id,uint32_t cmd_type)1594 VG_EXPORT int pipe_virgl_renderer_create_fence(
1595     int client_fence_id, uint32_t cmd_type) {
1596     sRenderer()->createFence(client_fence_id, cmd_type);
1597     return 0;
1598 }
1599 
pipe_virgl_renderer_force_ctx_0(void)1600 VG_EXPORT void pipe_virgl_renderer_force_ctx_0(void) {
1601     VGPLOG("call");
1602 }
1603 
pipe_virgl_renderer_ctx_attach_resource(int ctx_id,int res_handle)1604 VG_EXPORT void pipe_virgl_renderer_ctx_attach_resource(
1605     int ctx_id, int res_handle) {
1606     sRenderer()->attachResource(ctx_id, res_handle);
1607 }
1608 
pipe_virgl_renderer_ctx_detach_resource(int ctx_id,int res_handle)1609 VG_EXPORT void pipe_virgl_renderer_ctx_detach_resource(
1610     int ctx_id, int res_handle) {
1611     sRenderer()->detachResource(ctx_id, res_handle);
1612 }
1613 
pipe_virgl_renderer_resource_get_info(int res_handle,struct virgl_renderer_resource_info * info)1614 VG_EXPORT int pipe_virgl_renderer_resource_get_info(
1615     int res_handle,
1616     struct virgl_renderer_resource_info *info) {
1617     return sRenderer()->getResourceInfo(res_handle, info);
1618 }
1619 
pipe_virgl_renderer_resource_create_v2(uint32_t res_handle,uint64_t hvaId)1620 VG_EXPORT int pipe_virgl_renderer_resource_create_v2(uint32_t res_handle, uint64_t hvaId) {
1621     sRenderer()->createResourceV2(res_handle, hvaId);
1622     return 0;
1623 }
1624 
pipe_virgl_renderer_resource_map(uint32_t res_handle,void ** hvaOut,uint64_t * sizeOut)1625 VG_EXPORT int pipe_virgl_renderer_resource_map(uint32_t res_handle, void** hvaOut, uint64_t* sizeOut) {
1626     return sRenderer()->resourceMap(res_handle, hvaOut, sizeOut);
1627 }
1628 
pipe_virgl_renderer_resource_unmap(uint32_t res_handle)1629 VG_EXPORT int pipe_virgl_renderer_resource_unmap(uint32_t res_handle) {
1630     return sRenderer()->resourceUnmap(res_handle);
1631 }
1632 
stream_renderer_flush_resource_and_readback(uint32_t res_handle,uint32_t x,uint32_t y,uint32_t width,uint32_t height,void * pixels,uint32_t max_bytes)1633 VG_EXPORT void stream_renderer_flush_resource_and_readback(
1634     uint32_t res_handle, uint32_t x, uint32_t y, uint32_t width, uint32_t height,
1635     void* pixels, uint32_t max_bytes) {
1636     sRenderer()->flushResourceAndReadback(res_handle, x, y, width, height, pixels, max_bytes);
1637 }
1638 
stream_renderer_resource_create_v2(uint32_t res_handle,uint64_t hvaId)1639 VG_EXPORT void stream_renderer_resource_create_v2(
1640     uint32_t res_handle, uint64_t hvaId) {
1641     sRenderer()->createResourceV2(res_handle, hvaId);
1642 }
1643 
stream_renderer_resource_get_hva(uint32_t res_handle)1644 VG_EXPORT uint64_t stream_renderer_resource_get_hva(uint32_t res_handle) {
1645     return sRenderer()->getResourceHva(res_handle);
1646 }
1647 
stream_renderer_resource_get_hva_size(uint32_t res_handle)1648 VG_EXPORT uint64_t stream_renderer_resource_get_hva_size(uint32_t res_handle) {
1649     return sRenderer()->getResourceHvaSize(res_handle);
1650 }
1651 
stream_renderer_resource_set_hv_slot(uint32_t res_handle,uint32_t slot)1652 VG_EXPORT void stream_renderer_resource_set_hv_slot(uint32_t res_handle, uint32_t slot) {
1653     sRenderer()->setResourceHvSlot(res_handle, slot);
1654 }
1655 
stream_renderer_resource_get_hv_slot(uint32_t res_handle)1656 VG_EXPORT uint32_t stream_renderer_resource_get_hv_slot(uint32_t res_handle) {
1657     return sRenderer()->getResourceHvSlot(res_handle);
1658 }
1659 
stream_renderer_resource_map(uint32_t res_handle,void ** hvaOut,uint64_t * sizeOut)1660 VG_EXPORT int stream_renderer_resource_map(uint32_t res_handle, void** hvaOut, uint64_t* sizeOut) {
1661     return sRenderer()->resourceMap(res_handle, hvaOut, sizeOut);
1662 }
1663 
stream_renderer_resource_unmap(uint32_t res_handle)1664 VG_EXPORT int stream_renderer_resource_unmap(uint32_t res_handle) {
1665     return sRenderer()->resourceUnmap(res_handle);
1666 }
1667 
1668 
1669 #define VIRGLRENDERER_API_PIPE_STRUCT_DEF(api) pipe_##api,
1670 
1671 static struct virgl_renderer_virtio_interface s_virtio_interface = {
1672     LIST_VIRGLRENDERER_API(VIRGLRENDERER_API_PIPE_STRUCT_DEF)
1673 };
1674 
1675 struct virgl_renderer_virtio_interface*
get_goldfish_pipe_virgl_renderer_virtio_interface(void)1676 get_goldfish_pipe_virgl_renderer_virtio_interface(void) {
1677     return &s_virtio_interface;
1678 }
1679 
virtio_goldfish_pipe_reset(void * pipe,void * host_pipe)1680 void virtio_goldfish_pipe_reset(void *pipe, void *host_pipe) {
1681     sRenderer()->resetPipe((GoldfishHwPipe*)pipe, (GoldfishHostPipe*)host_pipe);
1682 }
1683 
1684 } // extern "C"
1685