• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include "HostConnection.h"
17 
18 #include "aemu/base/threads/AndroidThread.h"
19 #include "aemu/base/AndroidHealthMonitor.h"
20 #include "aemu/base/AndroidHealthMonitorConsumerBasic.h"
21 #include "cutils/properties.h"
22 #include "renderControl_types.h"
23 
24 #ifdef HOST_BUILD
25 #include "aemu/base/Tracing.h"
26 #endif
27 #include "aemu/base/Process.h"
28 
29 #define DEBUG_HOSTCONNECTION 0
30 
31 #if DEBUG_HOSTCONNECTION
32 #define DPRINT(fmt,...) ALOGD("%s: " fmt, __FUNCTION__, ##__VA_ARGS__);
33 #else
34 #define DPRINT(...)
35 #endif
36 
37 using android::base::guest::CreateHealthMonitor;
38 using android::base::guest::HealthMonitor;
39 using android::base::guest::HealthMonitorConsumerBasic;
40 
41 #ifdef GOLDFISH_NO_GL
42 struct gl_client_context_t {
43     int placeholder;
44 };
45 class GLEncoder : public gl_client_context_t {
46 public:
GLEncoder(IOStream *,ChecksumCalculator *)47     GLEncoder(IOStream*, ChecksumCalculator*) { }
setContextAccessor(gl_client_context_t * ())48     void setContextAccessor(gl_client_context_t *()) { }
49 };
50 struct gl2_client_context_t {
51     int placeholder;
52 };
53 class GL2Encoder : public gl2_client_context_t {
54 public:
GL2Encoder(IOStream *,ChecksumCalculator *)55     GL2Encoder(IOStream*, ChecksumCalculator*) { }
setContextAccessor(gl2_client_context_t * ())56     void setContextAccessor(gl2_client_context_t *()) { }
setNoHostError(bool)57     void setNoHostError(bool) { }
setDrawCallFlushInterval(uint32_t)58     void setDrawCallFlushInterval(uint32_t) { }
setHasAsyncUnmapBuffer(int)59     void setHasAsyncUnmapBuffer(int) { }
setHasSyncBufferData(int)60     void setHasSyncBufferData(int) { }
61 };
62 #else
63 #include "GLEncoder.h"
64 #include "GL2Encoder.h"
65 #endif
66 
67 #ifdef GFXSTREAM
68 #include "VkEncoder.h"
69 #include "AddressSpaceStream.h"
70 #else
71 namespace gfxstream {
72 namespace vk {
73 struct VkEncoder {
VkEncodergfxstream::vk::VkEncoder74     VkEncoder(IOStream* stream, HealthMonitor<>* healthMonitor = nullptr) { }
decRefgfxstream::vk::VkEncoder75     void decRef() { }
76     int placeholder;
77 };
78 }  // namespace vk
79 }  // namespace gfxstream
80 class QemuPipeStream;
81 typedef QemuPipeStream AddressSpaceStream;
createAddressSpaceStream(size_t bufSize,HealthMonitor<> * healthMonitor)82 AddressSpaceStream* createAddressSpaceStream(size_t bufSize, HealthMonitor<>* healthMonitor) {
83     ALOGE("%s: FATAL: Trying to create ASG stream in unsupported build\n", __func__);
84     abort();
85 }
createVirtioGpuAddressSpaceStream(HealthMonitor<> * healthMonitor)86 AddressSpaceStream* createVirtioGpuAddressSpaceStream(HealthMonitor<>* healthMonitor) {
87     ALOGE("%s: FATAL: Trying to create VirtioGpu ASG stream in unsupported build\n", __func__);
88     abort();
89 }
90 #endif
91 
92 using gfxstream::vk::VkEncoder;
93 
94 #include "ProcessPipe.h"
95 #include "QemuPipeStream.h"
96 #include "TcpStream.h"
97 #include "ThreadInfo.h"
98 #include <gralloc_cb_bp.h>
99 #include <unistd.h>
100 
101 using android::base::guest::getCurrentThreadId;
102 
103 #ifdef VIRTIO_GPU
104 
105 #include "VirtGpu.h"
106 #include "VirtioGpuPipeStream.h"
107 #include "virtgpu_drm.h"
108 
109 #include <cros_gralloc_handle.h>
110 #include <xf86drm.h>
111 
112 #endif
113 
114 #if defined(__linux__) || defined(__ANDROID__)
115 #include <fstream>
116 #include <string>
117 #endif
118 
119 #undef LOG_TAG
120 #define LOG_TAG "HostConnection"
121 #if PLATFORM_SDK_VERSION < 26
122 #include <cutils/log.h>
123 #else
124 #include <log/log.h>
125 #endif
126 
127 #define STREAM_BUFFER_SIZE  (4*1024*1024)
128 #define STREAM_PORT_NUM     22468
129 
getGlobalHealthMonitor()130 HealthMonitor<>* getGlobalHealthMonitor() {
131     // Initialize HealthMonitor
132     // Rather than inject as a construct arg, we keep it as a static variable in the .cpp
133     // to avoid setting up dependencies in other repos (external/qemu)
134     static HealthMonitorConsumerBasic sHealthMonitorConsumerBasic;
135     static std::unique_ptr<HealthMonitor<>> sHealthMonitor = CreateHealthMonitor(sHealthMonitorConsumerBasic);
136     return sHealthMonitor.get();
137 }
138 
getConnectionTypeFromProperty()139 static HostConnectionType getConnectionTypeFromProperty() {
140 #ifdef __Fuchsia__
141     return HOST_CONNECTION_ADDRESS_SPACE;
142 #elif defined(__ANDROID__) || defined(HOST_BUILD)
143     char transportValue[PROPERTY_VALUE_MAX] = "";
144 
145     do {
146         property_get("ro.boot.qemu.gltransport.name", transportValue, "");
147         if (transportValue[0]) { break; }
148 
149         property_get("ro.boot.qemu.gltransport", transportValue, "");
150         if (transportValue[0]) { break; }
151 
152         property_get("ro.boot.hardware.gltransport", transportValue, "");
153     } while (false);
154 
155     if (!transportValue[0]) return HOST_CONNECTION_QEMU_PIPE;
156 
157     if (!strcmp("tcp", transportValue)) return HOST_CONNECTION_TCP;
158     if (!strcmp("pipe", transportValue)) return HOST_CONNECTION_QEMU_PIPE;
159     if (!strcmp("asg", transportValue)) return HOST_CONNECTION_ADDRESS_SPACE;
160     if (!strcmp("virtio-gpu-pipe", transportValue)) return HOST_CONNECTION_VIRTIO_GPU_PIPE;
161     if (!strcmp("virtio-gpu-asg", transportValue)) return HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE;
162 
163     return HOST_CONNECTION_QEMU_PIPE;
164 #else
165     return HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE;
166 #endif
167 }
168 
getDrawCallFlushIntervalFromProperty()169 static uint32_t getDrawCallFlushIntervalFromProperty() {
170     constexpr uint32_t kDefaultValue = 800;
171 
172     char flushValue[PROPERTY_VALUE_MAX] = "";
173     property_get("ro.boot.qemu.gltransport.drawFlushInterval", flushValue, "");
174     if (!flushValue[0]) return kDefaultValue;
175 
176     const long interval = strtol(flushValue, 0, 10);
177     return (interval > 0) ? uint32_t(interval) : kDefaultValue;
178 }
179 
getGrallocTypeFromProperty()180 static GrallocType getGrallocTypeFromProperty() {
181     char value[PROPERTY_VALUE_MAX] = "";
182     property_get("ro.hardware.gralloc", value, "");
183 
184     if (!value[0]) return GRALLOC_TYPE_RANCHU;
185 
186     if (!strcmp("ranchu", value)) return GRALLOC_TYPE_RANCHU;
187     if (!strcmp("minigbm", value)) return GRALLOC_TYPE_MINIGBM;
188     return GRALLOC_TYPE_RANCHU;
189 }
190 
191 class GoldfishGralloc : public Gralloc
192 {
193 public:
createColorBuffer(ExtendedRCEncoderContext * rcEnc,int width,int height,uint32_t glformat)194     virtual uint32_t createColorBuffer(
195         ExtendedRCEncoderContext* rcEnc,
196         int width, int height, uint32_t glformat) {
197         return rcEnc->rcCreateColorBuffer(
198             rcEnc, width, height, glformat);
199     }
200 
getHostHandle(native_handle_t const * handle)201     virtual uint32_t getHostHandle(native_handle_t const* handle)
202     {
203         return cb_handle_t::from(handle)->hostHandle;
204     }
205 
getFormat(native_handle_t const * handle)206     virtual int getFormat(native_handle_t const* handle)
207     {
208         return cb_handle_t::from(handle)->format;
209     }
210 
getAllocatedSize(native_handle_t const * handle)211     virtual size_t getAllocatedSize(native_handle_t const* handle)
212     {
213         return static_cast<size_t>(cb_handle_t::from(handle)->allocatedSize());
214     }
treatBlobAsImage()215     virtual bool treatBlobAsImage() { return true; }
216 };
217 
align_up(uint32_t n,uint32_t a)218 static inline uint32_t align_up(uint32_t n, uint32_t a) {
219     return ((n + a - 1) / a) * a;
220 }
221 
222 #if defined(VIRTIO_GPU)
223 
224 class MinigbmGralloc : public Gralloc {
225 public:
createColorBuffer(ExtendedRCEncoderContext *,int width,int height,uint32_t glformat)226     virtual uint32_t createColorBuffer(
227         ExtendedRCEncoderContext*,
228         int width, int height, uint32_t glformat) {
229 
230         // Only supported format for pbuffers in gfxstream
231         // should be RGBA8
232         const uint32_t kGlRGB = 0x1907;
233         const uint32_t kGlRGBA = 0x1908;
234         const uint32_t kVirglFormatRGBA = 67; // VIRGL_FORMAT_R8G8B8A8_UNORM;
235         uint32_t virtgpu_format = 0;
236         uint32_t bpp = 0;
237         switch (glformat) {
238             case kGlRGB:
239                 DPRINT("Note: egl wanted GL_RGB, still using RGBA");
240                 virtgpu_format = kVirglFormatRGBA;
241                 bpp = 4;
242                 break;
243             case kGlRGBA:
244                 virtgpu_format = kVirglFormatRGBA;
245                 bpp = 4;
246                 break;
247             default:
248                 DPRINT("Note: egl wanted 0x%x, still using RGBA", glformat);
249                 virtgpu_format = kVirglFormatRGBA;
250                 bpp = 4;
251                 break;
252         }
253         const uint32_t kPipeTexture2D = 2; // PIPE_TEXTURE_2D
254         const uint32_t kBindRenderTarget = 1 << 1; // VIRGL_BIND_RENDER_TARGET
255         struct drm_virtgpu_resource_create res_create;
256         memset(&res_create, 0, sizeof(res_create));
257         res_create.target = kPipeTexture2D;
258         res_create.format = virtgpu_format;
259         res_create.bind = kBindRenderTarget;
260         res_create.width = width;
261         res_create.height = height;
262         res_create.depth = 1;
263         res_create.array_size = 1;
264         res_create.last_level = 0;
265         res_create.nr_samples = 0;
266         res_create.stride = bpp * width;
267         res_create.size = align_up(bpp * width * height, PAGE_SIZE);
268 
269         int ret = drmIoctl(m_fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &res_create);
270         if (ret) {
271             ALOGE("%s: DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s (%d)\n", __func__,
272                   strerror(errno), errno);
273             abort();
274         }
275 
276         return res_create.res_handle;
277     }
278 
getHostHandle(native_handle_t const * handle)279     virtual uint32_t getHostHandle(native_handle_t const* handle) {
280         struct drm_virtgpu_resource_info info;
281         if (!getResInfo(handle, &info)) {
282             ALOGE("%s: failed to get resource info\n", __func__);
283             return 0;
284         }
285 
286         return info.res_handle;
287     }
288 
getFormat(native_handle_t const * handle)289     virtual int getFormat(native_handle_t const* handle) {
290         return ((cros_gralloc_handle *)handle)->droid_format;
291     }
292 
getFormatDrmFourcc(native_handle_t const * handle)293     virtual uint32_t getFormatDrmFourcc(native_handle_t const* handle) override {
294 	return ((cros_gralloc_handle *)handle)->format;
295     }
296 
getAllocatedSize(native_handle_t const * handle)297     virtual size_t getAllocatedSize(native_handle_t const* handle) {
298         struct drm_virtgpu_resource_info info;
299         if (!getResInfo(handle, &info)) {
300             ALOGE("%s: failed to get resource info\n", __func__);
301             return 0;
302         }
303 
304         return info.size;
305     }
306 
setFd(int fd)307     void setFd(int fd) { m_fd = fd; }
308 
309 private:
310 
getResInfo(native_handle_t const * handle,struct drm_virtgpu_resource_info * info)311     bool getResInfo(native_handle_t const* handle,
312                     struct drm_virtgpu_resource_info* info) {
313         memset(info, 0x0, sizeof(*info));
314         if (m_fd < 0) {
315             ALOGE("%s: Error, rendernode fd missing\n", __func__);
316             return false;
317         }
318 
319         struct drm_gem_close gem_close;
320         memset(&gem_close, 0x0, sizeof(gem_close));
321 
322         cros_gralloc_handle const* cros_handle =
323             reinterpret_cast<cros_gralloc_handle const*>(handle);
324 
325         uint32_t prime_handle;
326         int ret = drmPrimeFDToHandle(m_fd, cros_handle->fds[0], &prime_handle);
327         if (ret) {
328             ALOGE("%s: DRM_IOCTL_PRIME_FD_TO_HANDLE failed: %s (errno %d)\n",
329                   __func__, strerror(errno), errno);
330             return false;
331         }
332         struct ManagedDrmGem {
333             ManagedDrmGem(const ManagedDrmGem&) = delete;
334             ~ManagedDrmGem() {
335                 struct drm_gem_close gem_close {
336                     .handle = m_prime_handle,
337                     .pad = 0,
338                 };
339                 int ret = drmIoctl(m_fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
340                 if (ret) {
341                     ALOGE("%s: DRM_IOCTL_GEM_CLOSE failed on handle %" PRIu32 ": %s(%d).",
342                           __func__, m_prime_handle, strerror(errno), errno);
343                 }
344             }
345 
346             int m_fd;
347             uint32_t m_prime_handle;
348         } managed_prime_handle{
349             .m_fd = m_fd,
350             .m_prime_handle = prime_handle,
351         };
352 
353         info->bo_handle = managed_prime_handle.m_prime_handle;
354 
355         struct drm_virtgpu_3d_wait virtgpuWait{
356             .handle = managed_prime_handle.m_prime_handle,
357             .flags = 0,
358         };
359         // This only works for host resources by VIRTGPU_RESOURCE_CREATE ioctl.
360         // We need to use a different mechanism to synchonize with the host if
361         // the minigbm gralloc swiches to virtio-gpu blobs or cross-domain
362         // backend.
363         ret = drmIoctl(m_fd, DRM_IOCTL_VIRTGPU_WAIT, &virtgpuWait);
364         if (ret) {
365             ALOGE("%s: DRM_IOCTL_VIRTGPU_WAIT failed: %s(%d)", __func__, strerror(errno), errno);
366             return false;
367         }
368 
369         ret = drmIoctl(m_fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, info);
370         if (ret) {
371             ALOGE("%s: DRM_IOCTL_VIRTGPU_RESOURCE_INFO failed: %s (errno %d)\n",
372                   __func__, strerror(errno), errno);
373             return false;
374         }
375 
376         return true;
377     }
378 
379     int m_fd = -1;
380 };
381 
382 #else
383 
384 class MinigbmGralloc : public Gralloc {
385 public:
createColorBuffer(ExtendedRCEncoderContext *,int width,int height,uint32_t glformat)386     virtual uint32_t createColorBuffer(
387         ExtendedRCEncoderContext*,
388         int width, int height, uint32_t glformat) {
389         ALOGE("%s: Error: using minigbm without -DVIRTIO_GPU\n", __func__);
390         return 0;
391     }
392 
getHostHandle(native_handle_t const * handle)393     virtual uint32_t getHostHandle(native_handle_t const* handle) {
394         ALOGE("%s: Error: using minigbm without -DVIRTIO_GPU\n", __func__);
395         return 0;
396     }
397 
getFormat(native_handle_t const * handle)398     virtual int getFormat(native_handle_t const* handle) {
399         ALOGE("%s: Error: using minigbm without -DVIRTIO_GPU\n", __func__);
400         return 0;
401     }
402 
getAllocatedSize(native_handle_t const * handle)403     virtual size_t getAllocatedSize(native_handle_t const* handle) {
404         ALOGE("%s: Error: using minigbm without -DVIRTIO_GPU\n", __func__);
405         return 0;
406     }
407 
setFd(int fd)408     void setFd(int fd) { m_fd = fd; }
409 
410 private:
411 
412     int m_fd = -1;
413 };
414 
415 #endif
416 
417 class GoldfishProcessPipe : public ProcessPipe
418 {
419 public:
processPipeInit(int stream_handle,HostConnectionType connType,renderControl_encoder_context_t * rcEnc)420     bool processPipeInit(int stream_handle, HostConnectionType connType, renderControl_encoder_context_t *rcEnc)
421     {
422         return ::processPipeInit(stream_handle, connType, rcEnc);
423     }
424 
425 };
426 
427 static GoldfishGralloc m_goldfishGralloc;
428 static GoldfishProcessPipe m_goldfishProcessPipe;
429 
HostConnection()430 HostConnection::HostConnection() :
431     exitUncleanly(false),
432     m_checksumHelper(),
433     m_hostExtensions(),
434     m_grallocOnly(true),
435     m_noHostError(true),
436     m_rendernodeFd(-1) {
437 #ifdef HOST_BUILD
438     android::base::initializeTracing();
439 #endif
440 }
441 
~HostConnection()442 HostConnection::~HostConnection()
443 {
444     // round-trip to ensure that queued commands have been processed
445     // before process pipe closure is detected.
446     if (m_rcEnc && !exitUncleanly) {
447         (void)m_rcEnc->rcGetRendererVersion(m_rcEnc.get());
448     }
449 
450     if (m_grallocType == GRALLOC_TYPE_MINIGBM) {
451         delete m_grallocHelper;
452     }
453 
454     if (m_vkEnc) {
455         m_vkEnc->decRef();
456     }
457 
458     if (m_stream) {
459         m_stream->decRef();
460     }
461 }
462 
463 
464 // static
connect(uint32_t capset_id)465 std::unique_ptr<HostConnection> HostConnection::connect(uint32_t capset_id) {
466     const enum HostConnectionType connType = getConnectionTypeFromProperty();
467 
468     // Use "new" to access a non-public constructor.
469     auto con = std::unique_ptr<HostConnection>(new HostConnection);
470 
471     switch (connType) {
472         case HOST_CONNECTION_ADDRESS_SPACE: {
473             auto stream = createAddressSpaceStream(STREAM_BUFFER_SIZE, getGlobalHealthMonitor());
474             if (!stream) {
475                 ALOGE("Failed to create AddressSpaceStream for host connection\n");
476                 return nullptr;
477             }
478             con->m_connectionType = HOST_CONNECTION_ADDRESS_SPACE;
479             con->m_grallocType = GRALLOC_TYPE_RANCHU;
480             con->m_stream = stream;
481             con->m_grallocHelper = &m_goldfishGralloc;
482             con->m_processPipe = &m_goldfishProcessPipe;
483             break;
484         }
485         case HOST_CONNECTION_QEMU_PIPE: {
486             auto stream = new QemuPipeStream(STREAM_BUFFER_SIZE);
487             if (!stream) {
488                 ALOGE("Failed to create QemuPipeStream for host connection\n");
489                 return nullptr;
490             }
491             if (stream->connect() < 0) {
492                 ALOGE("Failed to connect to host (QemuPipeStream)\n");
493                 return nullptr;
494             }
495             con->m_connectionType = HOST_CONNECTION_QEMU_PIPE;
496             con->m_grallocType = GRALLOC_TYPE_RANCHU;
497             con->m_stream = stream;
498             con->m_grallocHelper = &m_goldfishGralloc;
499             con->m_processPipe = &m_goldfishProcessPipe;
500             break;
501         }
502         case HOST_CONNECTION_TCP: {
503 #ifndef __ANDROID__
504             ALOGE("Failed to create TCP connection on non-Android guest\n");
505             return nullptr;
506             break;
507 #else
508             auto stream = new TcpStream(STREAM_BUFFER_SIZE);
509             if (!stream) {
510                 ALOGE("Failed to create TcpStream for host connection\n");
511                 return nullptr;
512             }
513 
514             if (stream->connect("10.0.2.2", STREAM_PORT_NUM) < 0) {
515                 ALOGE("Failed to connect to host (TcpStream)\n");
516                 return nullptr;
517             }
518             con->m_connectionType = HOST_CONNECTION_TCP;
519             con->m_grallocType = GRALLOC_TYPE_RANCHU;
520             con->m_stream = stream;
521             con->m_grallocHelper = &m_goldfishGralloc;
522             con->m_processPipe = &m_goldfishProcessPipe;
523             break;
524 #endif
525         }
526 #if defined(VIRTIO_GPU) && !defined(HOST_BUILD)
527         case HOST_CONNECTION_VIRTIO_GPU_PIPE: {
528             auto stream = new VirtioGpuPipeStream(STREAM_BUFFER_SIZE);
529             if (!stream) {
530                 ALOGE("Failed to create VirtioGpu for host connection\n");
531                 return nullptr;
532             }
533             if (stream->connect() < 0) {
534                 ALOGE("Failed to connect to host (VirtioGpu)\n");
535                 return nullptr;
536             }
537             con->m_connectionType = HOST_CONNECTION_VIRTIO_GPU_PIPE;
538             con->m_grallocType = getGrallocTypeFromProperty();
539             auto rendernodeFd = stream->getRendernodeFd();
540             con->m_stream = stream;
541             con->m_rendernodeFd = rendernodeFd;
542             switch (con->m_grallocType) {
543                 case GRALLOC_TYPE_RANCHU:
544                     con->m_grallocHelper = &m_goldfishGralloc;
545                     break;
546                 case GRALLOC_TYPE_MINIGBM: {
547                     MinigbmGralloc* m = new MinigbmGralloc;
548                     m->setFd(rendernodeFd);
549                     con->m_grallocHelper = m;
550                     break;
551                 }
552                 default:
553                     ALOGE("Fatal: Unknown gralloc type 0x%x\n", con->m_grallocType);
554                     abort();
555             }
556             con->m_processPipe = &m_goldfishProcessPipe;
557             break;
558         }
559         case HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE: {
560             VirtGpuDevice& instance = VirtGpuDevice::getInstance((enum VirtGpuCapset)capset_id);
561             auto deviceHandle = instance.getDeviceHandle();
562             auto stream = createVirtioGpuAddressSpaceStream(getGlobalHealthMonitor());
563             if (!stream) {
564                 ALOGE("Failed to create virtgpu AddressSpaceStream\n");
565                 return nullptr;
566             }
567             con->m_connectionType = HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE;
568             con->m_grallocType = getGrallocTypeFromProperty();
569             con->m_stream = stream;
570             con->m_rendernodeFd = deviceHandle;
571             switch (con->m_grallocType) {
572                 case GRALLOC_TYPE_RANCHU:
573                     con->m_grallocHelper = &m_goldfishGralloc;
574                     break;
575                 case GRALLOC_TYPE_MINIGBM: {
576                     MinigbmGralloc* m = new MinigbmGralloc;
577                     m->setFd(deviceHandle);
578                     con->m_grallocHelper = m;
579                     break;
580                 }
581                 default:
582                     ALOGE("Fatal: Unknown gralloc type 0x%x\n", con->m_grallocType);
583                     abort();
584             }
585             con->m_processPipe = &m_goldfishProcessPipe;
586             break;
587         }
588 #endif // !VIRTIO_GPU && !HOST_BUILD_
589         default:
590             break;
591     }
592 
593     // send zero 'clientFlags' to the host.
594     unsigned int *pClientFlags =
595             (unsigned int *)con->m_stream->allocBuffer(sizeof(unsigned int));
596     *pClientFlags = 0;
597     con->m_stream->commitBuffer(sizeof(unsigned int));
598 
599 #if defined(__linux__) || defined(__ANDROID__)
600     auto rcEnc = con->rcEncoder();
601     if (rcEnc != nullptr) {
602         auto processName = android::base::guest::getProcessName();
603         if (!processName.empty()) {
604             rcEnc->rcSetProcessMetadata(
605                 rcEnc, const_cast<char*>("process_name"),
606                 const_cast<RenderControlByte*>(processName.c_str()),
607                 strlen(processName.c_str())+ 1);
608         }
609     }
610 #endif
611 
612     return con;
613 }
614 
get()615 HostConnection *HostConnection::get() {
616     return getWithThreadInfo(getEGLThreadInfo(), VIRTIO_GPU_CAPSET_NONE);
617 }
618 
getOrCreate(uint32_t capset_id)619 HostConnection *HostConnection::getOrCreate(uint32_t capset_id) {
620     return getWithThreadInfo(getEGLThreadInfo(), capset_id);
621 }
622 
getWithThreadInfo(EGLThreadInfo * tinfo,uint32_t capset_id)623 HostConnection *HostConnection::getWithThreadInfo(EGLThreadInfo* tinfo, uint32_t capset_id) {
624     // Get thread info
625     if (!tinfo) {
626         return NULL;
627     }
628 
629     if (tinfo->hostConn == NULL) {
630         tinfo->hostConn = HostConnection::createUnique(capset_id);
631     }
632 
633     return tinfo->hostConn.get();
634 }
635 
exit()636 void HostConnection::exit() {
637     EGLThreadInfo *tinfo = getEGLThreadInfo();
638     if (!tinfo) {
639         return;
640     }
641 
642     tinfo->hostConn.reset();
643 }
644 
exitUnclean()645 void HostConnection::exitUnclean() {
646     EGLThreadInfo *tinfo = getEGLThreadInfo();
647     if (!tinfo) {
648         return;
649     }
650 
651     tinfo->hostConn->exitUncleanly = true;
652     tinfo->hostConn.reset();
653 }
654 
655 // static
createUnique(uint32_t capset_id)656 std::unique_ptr<HostConnection> HostConnection::createUnique(uint32_t capset_id) {
657     return connect(capset_id);
658 }
659 
glEncoder()660 GLEncoder *HostConnection::glEncoder()
661 {
662     if (!m_glEnc) {
663         m_glEnc = std::make_unique<GLEncoder>(m_stream, checksumHelper());
664         DBG("HostConnection::glEncoder new encoder %p, tid %lu", m_glEnc, getCurrentThreadId());
665         m_glEnc->setContextAccessor(s_getGLContext);
666     }
667     return m_glEnc.get();
668 }
669 
gl2Encoder()670 GL2Encoder *HostConnection::gl2Encoder()
671 {
672     if (!m_gl2Enc) {
673         m_gl2Enc =
674             std::make_unique<GL2Encoder>(m_stream, checksumHelper());
675         DBG("HostConnection::gl2Encoder new encoder %p, tid %lu", m_gl2Enc, getCurrentThreadId());
676         m_gl2Enc->setContextAccessor(s_getGL2Context);
677         m_gl2Enc->setNoHostError(m_noHostError);
678         m_gl2Enc->setDrawCallFlushInterval(
679             getDrawCallFlushIntervalFromProperty());
680         m_gl2Enc->setHasAsyncUnmapBuffer(m_rcEnc->hasAsyncUnmapBuffer());
681         m_gl2Enc->setHasSyncBufferData(m_rcEnc->hasSyncBufferData());
682     }
683     return m_gl2Enc.get();
684 }
685 
vkEncoder()686 VkEncoder *HostConnection::vkEncoder()
687 {
688     rcEncoder();
689     if (!m_vkEnc) {
690         m_vkEnc = new VkEncoder(m_stream, getGlobalHealthMonitor());
691     }
692     return m_vkEnc;
693 }
694 
rcEncoder()695 ExtendedRCEncoderContext *HostConnection::rcEncoder()
696 {
697     if (!m_rcEnc) {
698         m_rcEnc = std::make_unique<ExtendedRCEncoderContext>(m_stream,
699                                                              checksumHelper());
700 
701         ExtendedRCEncoderContext* rcEnc = m_rcEnc.get();
702         setChecksumHelper(rcEnc);
703         queryAndSetSyncImpl(rcEnc);
704         queryAndSetDmaImpl(rcEnc);
705         queryAndSetGLESMaxVersion(rcEnc);
706         queryAndSetNoErrorState(rcEnc);
707         queryAndSetHostCompositionImpl(rcEnc);
708         queryAndSetDirectMemSupport(rcEnc);
709         queryAndSetVulkanSupport(rcEnc);
710         queryAndSetDeferredVulkanCommandsSupport(rcEnc);
711         queryAndSetVulkanNullOptionalStringsSupport(rcEnc);
712         queryAndSetVulkanCreateResourcesWithRequirementsSupport(rcEnc);
713         queryAndSetVulkanIgnoredHandles(rcEnc);
714         queryAndSetYUVCache(rcEnc);
715         queryAndSetAsyncUnmapBuffer(rcEnc);
716         queryAndSetVirtioGpuNext(rcEnc);
717         queryHasSharedSlotsHostMemoryAllocator(rcEnc);
718         queryAndSetVulkanFreeMemorySync(rcEnc);
719         queryAndSetVirtioGpuNativeSync(rcEnc);
720         queryAndSetVulkanShaderFloat16Int8Support(rcEnc);
721         queryAndSetVulkanAsyncQueueSubmitSupport(rcEnc);
722         queryAndSetHostSideTracingSupport(rcEnc);
723         queryAndSetAsyncFrameCommands(rcEnc);
724         queryAndSetVulkanQueueSubmitWithCommandsSupport(rcEnc);
725         queryAndSetVulkanBatchedDescriptorSetUpdateSupport(rcEnc);
726         queryAndSetSyncBufferData(rcEnc);
727         queryAndSetVulkanAsyncQsri(rcEnc);
728         queryAndSetReadColorBufferDma(rcEnc);
729         queryAndSetHWCMultiConfigs(rcEnc);
730         queryAndSetVulkanAuxCommandBufferMemory(rcEnc);
731         queryVersion(rcEnc);
732         if (m_processPipe) {
733             auto fd = (m_connectionType == HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE) ? m_rendernodeFd : -1;
734             m_processPipe->processPipeInit(fd, m_connectionType, rcEnc);
735         }
736     }
737     return m_rcEnc.get();
738 }
739 
s_getGLContext()740 gl_client_context_t *HostConnection::s_getGLContext()
741 {
742     EGLThreadInfo *ti = getEGLThreadInfo();
743     if (ti->hostConn) {
744         return ti->hostConn->m_glEnc.get();
745     }
746     return NULL;
747 }
748 
s_getGL2Context()749 gl2_client_context_t *HostConnection::s_getGL2Context()
750 {
751     EGLThreadInfo *ti = getEGLThreadInfo();
752     if (ti->hostConn) {
753         return ti->hostConn->m_gl2Enc.get();
754     }
755     return NULL;
756 }
757 
queryHostExtensions(ExtendedRCEncoderContext * rcEnc)758 const std::string& HostConnection::queryHostExtensions(ExtendedRCEncoderContext *rcEnc) {
759     if (!m_hostExtensions.empty()) {
760         return m_hostExtensions;
761     }
762 
763     // Extensions strings are usually quite long, preallocate enough here.
764     std::string extensionsBuffer(1023, '\0');
765 
766     // Returns the required size including the 0-terminator, so
767     // account it when passing/using the sizes.
768     int extensionSize = rcEnc->rcGetHostExtensionsString(rcEnc,
769                                                          extensionsBuffer.size() + 1,
770                                                          &extensionsBuffer[0]);
771     if (extensionSize < 0) {
772         extensionsBuffer.resize(-extensionSize);
773         extensionSize = rcEnc->rcGetHostExtensionsString(rcEnc,
774                                                          -extensionSize + 1,
775                                                          &extensionsBuffer[0]);
776     }
777 
778     if (extensionSize > 0) {
779         extensionsBuffer.resize(extensionSize - 1);
780         m_hostExtensions.swap(extensionsBuffer);
781     }
782 
783     return m_hostExtensions;
784 }
785 
queryAndSetHostCompositionImpl(ExtendedRCEncoderContext * rcEnc)786 void HostConnection::queryAndSetHostCompositionImpl(ExtendedRCEncoderContext *rcEnc) {
787     const std::string& hostExtensions = queryHostExtensions(rcEnc);
788     DPRINT("HostComposition ext %s", hostExtensions.c_str());
789     // make sure V2 is checked first before V1, as host may declare supporting both
790     if (hostExtensions.find(kHostCompositionV2) != std::string::npos) {
791         rcEnc->setHostComposition(HOST_COMPOSITION_V2);
792     }
793     else if (hostExtensions.find(kHostCompositionV1) != std::string::npos) {
794         rcEnc->setHostComposition(HOST_COMPOSITION_V1);
795     }
796     else {
797         rcEnc->setHostComposition(HOST_COMPOSITION_NONE);
798     }
799 }
800 
setChecksumHelper(ExtendedRCEncoderContext * rcEnc)801 void HostConnection::setChecksumHelper(ExtendedRCEncoderContext *rcEnc) {
802     const std::string& hostExtensions = queryHostExtensions(rcEnc);
803     // check the host supported version
804     uint32_t checksumVersion = 0;
805     const char* checksumPrefix = ChecksumCalculator::getMaxVersionStrPrefix();
806     const char* glProtocolStr = strstr(hostExtensions.c_str(), checksumPrefix);
807     if (glProtocolStr) {
808         uint32_t maxVersion = ChecksumCalculator::getMaxVersion();
809         sscanf(glProtocolStr+strlen(checksumPrefix), "%d", &checksumVersion);
810         if (maxVersion < checksumVersion) {
811             checksumVersion = maxVersion;
812         }
813         // The ordering of the following two commands matters!
814         // Must tell the host first before setting it in the guest
815         rcEnc->rcSelectChecksumHelper(rcEnc, checksumVersion, 0);
816         m_checksumHelper.setVersion(checksumVersion);
817     }
818 }
819 
queryAndSetSyncImpl(ExtendedRCEncoderContext * rcEnc)820 void HostConnection::queryAndSetSyncImpl(ExtendedRCEncoderContext *rcEnc) {
821     const std::string& hostExtensions = queryHostExtensions(rcEnc);
822     if (hostExtensions.find(kRCNativeSyncV4) != std::string::npos) {
823         rcEnc->setSyncImpl(SYNC_IMPL_NATIVE_SYNC_V4);
824     } else if (hostExtensions.find(kRCNativeSyncV3) != std::string::npos) {
825         rcEnc->setSyncImpl(SYNC_IMPL_NATIVE_SYNC_V3);
826     } else if (hostExtensions.find(kRCNativeSyncV2) != std::string::npos) {
827         rcEnc->setSyncImpl(SYNC_IMPL_NATIVE_SYNC_V2);
828     } else {
829         rcEnc->setSyncImpl(SYNC_IMPL_NONE);
830     }
831 }
832 
queryAndSetDmaImpl(ExtendedRCEncoderContext * rcEnc)833 void HostConnection::queryAndSetDmaImpl(ExtendedRCEncoderContext *rcEnc) {
834     std::string hostExtensions = queryHostExtensions(rcEnc);
835     if (hostExtensions.find(kDmaExtStr_v1) != std::string::npos) {
836         rcEnc->setDmaImpl(DMA_IMPL_v1);
837     } else {
838         rcEnc->setDmaImpl(DMA_IMPL_NONE);
839     }
840 }
841 
queryAndSetGLESMaxVersion(ExtendedRCEncoderContext * rcEnc)842 void HostConnection::queryAndSetGLESMaxVersion(ExtendedRCEncoderContext* rcEnc) {
843     std::string hostExtensions = queryHostExtensions(rcEnc);
844     if (hostExtensions.find(kGLESMaxVersion_2) != std::string::npos) {
845         rcEnc->setGLESMaxVersion(GLES_MAX_VERSION_2);
846     } else if (hostExtensions.find(kGLESMaxVersion_3_0) != std::string::npos) {
847         rcEnc->setGLESMaxVersion(GLES_MAX_VERSION_3_0);
848     } else if (hostExtensions.find(kGLESMaxVersion_3_1) != std::string::npos) {
849         rcEnc->setGLESMaxVersion(GLES_MAX_VERSION_3_1);
850     } else if (hostExtensions.find(kGLESMaxVersion_3_2) != std::string::npos) {
851         rcEnc->setGLESMaxVersion(GLES_MAX_VERSION_3_2);
852     } else {
853         ALOGW("Unrecognized GLES max version string in extensions: %s",
854               hostExtensions.c_str());
855         rcEnc->setGLESMaxVersion(GLES_MAX_VERSION_2);
856     }
857 }
858 
queryAndSetNoErrorState(ExtendedRCEncoderContext * rcEnc)859 void HostConnection::queryAndSetNoErrorState(ExtendedRCEncoderContext* rcEnc) {
860     std::string hostExtensions = queryHostExtensions(rcEnc);
861     if (hostExtensions.find(kGLESUseHostError) != std::string::npos) {
862         m_noHostError = false;
863     }
864 }
865 
queryAndSetDirectMemSupport(ExtendedRCEncoderContext * rcEnc)866 void HostConnection::queryAndSetDirectMemSupport(ExtendedRCEncoderContext* rcEnc) {
867     std::string hostExtensions = queryHostExtensions(rcEnc);
868     if (hostExtensions.find(kGLDirectMem) != std::string::npos) {
869         rcEnc->featureInfo()->hasDirectMem = true;
870     }
871 }
872 
queryAndSetVulkanSupport(ExtendedRCEncoderContext * rcEnc)873 void HostConnection::queryAndSetVulkanSupport(ExtendedRCEncoderContext* rcEnc) {
874     std::string hostExtensions = queryHostExtensions(rcEnc);
875     if (hostExtensions.find(kVulkan) != std::string::npos) {
876         rcEnc->featureInfo()->hasVulkan = true;
877     }
878 }
879 
queryAndSetDeferredVulkanCommandsSupport(ExtendedRCEncoderContext * rcEnc)880 void HostConnection::queryAndSetDeferredVulkanCommandsSupport(ExtendedRCEncoderContext* rcEnc) {
881     std::string hostExtensions = queryHostExtensions(rcEnc);
882     if (hostExtensions.find(kDeferredVulkanCommands) != std::string::npos) {
883         rcEnc->featureInfo()->hasDeferredVulkanCommands = true;
884     }
885 }
886 
queryAndSetVulkanNullOptionalStringsSupport(ExtendedRCEncoderContext * rcEnc)887 void HostConnection::queryAndSetVulkanNullOptionalStringsSupport(ExtendedRCEncoderContext* rcEnc) {
888     std::string hostExtensions = queryHostExtensions(rcEnc);
889     if (hostExtensions.find(kVulkanNullOptionalStrings) != std::string::npos) {
890         rcEnc->featureInfo()->hasVulkanNullOptionalStrings = true;
891     }
892 }
893 
queryAndSetVulkanCreateResourcesWithRequirementsSupport(ExtendedRCEncoderContext * rcEnc)894 void HostConnection::queryAndSetVulkanCreateResourcesWithRequirementsSupport(ExtendedRCEncoderContext* rcEnc) {
895     std::string hostExtensions = queryHostExtensions(rcEnc);
896     if (hostExtensions.find(kVulkanCreateResourcesWithRequirements) != std::string::npos) {
897         rcEnc->featureInfo()->hasVulkanCreateResourcesWithRequirements = true;
898     }
899 }
900 
queryAndSetVulkanIgnoredHandles(ExtendedRCEncoderContext * rcEnc)901 void HostConnection::queryAndSetVulkanIgnoredHandles(ExtendedRCEncoderContext* rcEnc) {
902     std::string hostExtensions = queryHostExtensions(rcEnc);
903     if (hostExtensions.find(kVulkanIgnoredHandles) != std::string::npos) {
904         rcEnc->featureInfo()->hasVulkanIgnoredHandles = true;
905     }
906 }
907 
queryAndSetYUVCache(ExtendedRCEncoderContext * rcEnc)908 void HostConnection::queryAndSetYUVCache(ExtendedRCEncoderContext* rcEnc) {
909     std::string hostExtensions = queryHostExtensions(rcEnc);
910     if (hostExtensions.find(kYUVCache) != std::string::npos) {
911         rcEnc->featureInfo()->hasYUVCache = true;
912     }
913 }
914 
queryAndSetAsyncUnmapBuffer(ExtendedRCEncoderContext * rcEnc)915 void HostConnection::queryAndSetAsyncUnmapBuffer(ExtendedRCEncoderContext* rcEnc) {
916     std::string hostExtensions = queryHostExtensions(rcEnc);
917     if (hostExtensions.find(kAsyncUnmapBuffer) != std::string::npos) {
918         rcEnc->featureInfo()->hasAsyncUnmapBuffer = true;
919     }
920 }
921 
queryAndSetVirtioGpuNext(ExtendedRCEncoderContext * rcEnc)922 void HostConnection::queryAndSetVirtioGpuNext(ExtendedRCEncoderContext* rcEnc) {
923     std::string hostExtensions = queryHostExtensions(rcEnc);
924     if (hostExtensions.find(kVirtioGpuNext) != std::string::npos) {
925         rcEnc->featureInfo()->hasVirtioGpuNext = true;
926     }
927 }
928 
queryHasSharedSlotsHostMemoryAllocator(ExtendedRCEncoderContext * rcEnc)929 void HostConnection::queryHasSharedSlotsHostMemoryAllocator(ExtendedRCEncoderContext *rcEnc) {
930     const std::string& hostExtensions = queryHostExtensions(rcEnc);
931     if (hostExtensions.find(kHasSharedSlotsHostMemoryAllocator) != std::string::npos) {
932         rcEnc->featureInfo()->hasSharedSlotsHostMemoryAllocator = true;
933     }
934 }
935 
queryAndSetVulkanFreeMemorySync(ExtendedRCEncoderContext * rcEnc)936 void HostConnection::queryAndSetVulkanFreeMemorySync(ExtendedRCEncoderContext *rcEnc) {
937     const std::string& hostExtensions = queryHostExtensions(rcEnc);
938     if (hostExtensions.find(kVulkanFreeMemorySync) != std::string::npos) {
939         rcEnc->featureInfo()->hasVulkanFreeMemorySync = true;
940     }
941 }
942 
queryAndSetVirtioGpuNativeSync(ExtendedRCEncoderContext * rcEnc)943 void HostConnection::queryAndSetVirtioGpuNativeSync(ExtendedRCEncoderContext* rcEnc) {
944     std::string hostExtensions = queryHostExtensions(rcEnc);
945     if (hostExtensions.find(kVirtioGpuNativeSync) != std::string::npos) {
946         rcEnc->featureInfo()->hasVirtioGpuNativeSync = true;
947     }
948 }
949 
queryAndSetVulkanShaderFloat16Int8Support(ExtendedRCEncoderContext * rcEnc)950 void HostConnection::queryAndSetVulkanShaderFloat16Int8Support(ExtendedRCEncoderContext* rcEnc) {
951     std::string hostExtensions = queryHostExtensions(rcEnc);
952     if (hostExtensions.find(kVulkanShaderFloat16Int8) != std::string::npos) {
953         rcEnc->featureInfo()->hasVulkanShaderFloat16Int8 = true;
954     }
955 }
956 
queryAndSetVulkanAsyncQueueSubmitSupport(ExtendedRCEncoderContext * rcEnc)957 void HostConnection::queryAndSetVulkanAsyncQueueSubmitSupport(ExtendedRCEncoderContext* rcEnc) {
958     std::string hostExtensions = queryHostExtensions(rcEnc);
959     if (hostExtensions.find(kVulkanAsyncQueueSubmit) != std::string::npos) {
960         rcEnc->featureInfo()->hasVulkanAsyncQueueSubmit = true;
961     }
962 }
963 
queryAndSetHostSideTracingSupport(ExtendedRCEncoderContext * rcEnc)964 void HostConnection::queryAndSetHostSideTracingSupport(ExtendedRCEncoderContext* rcEnc) {
965     std::string hostExtensions = queryHostExtensions(rcEnc);
966     if (hostExtensions.find(kHostSideTracing) != std::string::npos) {
967         rcEnc->featureInfo()->hasHostSideTracing = true;
968     }
969 }
970 
queryAndSetAsyncFrameCommands(ExtendedRCEncoderContext * rcEnc)971 void HostConnection::queryAndSetAsyncFrameCommands(ExtendedRCEncoderContext* rcEnc) {
972     std::string hostExtensions = queryHostExtensions(rcEnc);
973     if (hostExtensions.find(kAsyncFrameCommands) != std::string::npos) {
974         rcEnc->featureInfo()->hasAsyncFrameCommands = true;
975     }
976 }
977 
queryAndSetVulkanQueueSubmitWithCommandsSupport(ExtendedRCEncoderContext * rcEnc)978 void HostConnection::queryAndSetVulkanQueueSubmitWithCommandsSupport(ExtendedRCEncoderContext* rcEnc) {
979     std::string hostExtensions = queryHostExtensions(rcEnc);
980     if (hostExtensions.find(kVulkanQueueSubmitWithCommands) != std::string::npos) {
981         rcEnc->featureInfo()->hasVulkanQueueSubmitWithCommands = true;
982     }
983 }
984 
queryAndSetVulkanBatchedDescriptorSetUpdateSupport(ExtendedRCEncoderContext * rcEnc)985 void HostConnection::queryAndSetVulkanBatchedDescriptorSetUpdateSupport(ExtendedRCEncoderContext* rcEnc) {
986     std::string hostExtensions = queryHostExtensions(rcEnc);
987     if (hostExtensions.find(kVulkanBatchedDescriptorSetUpdate) != std::string::npos) {
988         rcEnc->featureInfo()->hasVulkanBatchedDescriptorSetUpdate = true;
989     }
990 }
991 
queryAndSetSyncBufferData(ExtendedRCEncoderContext * rcEnc)992 void HostConnection::queryAndSetSyncBufferData(ExtendedRCEncoderContext* rcEnc) {
993     std::string hostExtensions = queryHostExtensions(rcEnc);
994     if (hostExtensions.find(kSyncBufferData) != std::string::npos) {
995         rcEnc->featureInfo()->hasSyncBufferData = true;
996     }
997 }
998 
queryAndSetVulkanAsyncQsri(ExtendedRCEncoderContext * rcEnc)999 void HostConnection::queryAndSetVulkanAsyncQsri(ExtendedRCEncoderContext* rcEnc) {
1000     std::string hostExtensions = queryHostExtensions(rcEnc);
1001     if (hostExtensions.find(kVulkanAsyncQsri) != std::string::npos) {
1002         rcEnc->featureInfo()->hasVulkanAsyncQsri = true;
1003     }
1004 }
1005 
queryAndSetReadColorBufferDma(ExtendedRCEncoderContext * rcEnc)1006 void HostConnection::queryAndSetReadColorBufferDma(ExtendedRCEncoderContext* rcEnc) {
1007     std::string hostExtensions = queryHostExtensions(rcEnc);
1008     if (hostExtensions.find(kReadColorBufferDma) != std::string::npos) {
1009         rcEnc->featureInfo()->hasReadColorBufferDma = true;
1010     }
1011 }
1012 
queryAndSetHWCMultiConfigs(ExtendedRCEncoderContext * rcEnc)1013 void HostConnection::queryAndSetHWCMultiConfigs(ExtendedRCEncoderContext* rcEnc) {
1014     std::string hostExtensions = queryHostExtensions(rcEnc);
1015     if (hostExtensions.find(kHWCMultiConfigs) != std::string::npos) {
1016         rcEnc->featureInfo()->hasHWCMultiConfigs = true;
1017     }
1018 }
1019 
queryAndSetVulkanAuxCommandBufferMemory(ExtendedRCEncoderContext * rcEnc)1020 void HostConnection::queryAndSetVulkanAuxCommandBufferMemory(ExtendedRCEncoderContext* rcEnc) {
1021     std::string hostExtensions = queryHostExtensions(rcEnc);
1022     rcEnc->featureInfo()->hasVulkanAuxCommandMemory = hostExtensions.find(kVulkanAuxCommandMemory) != std::string::npos;
1023 }
1024 
1025 
queryVersion(ExtendedRCEncoderContext * rcEnc)1026 GLint HostConnection::queryVersion(ExtendedRCEncoderContext* rcEnc) {
1027     GLint version = m_rcEnc->rcGetRendererVersion(m_rcEnc.get());
1028     return version;
1029 }
1030