• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include "HostConnection.h"
17 
18 #include "GoldfishAddressSpaceStream.h"
19 #include "VirtioGpuAddressSpaceStream.h"
20 #include "aemu/base/AndroidHealthMonitor.h"
21 #include "aemu/base/AndroidHealthMonitorConsumerBasic.h"
22 #include "aemu/base/threads/AndroidThread.h"
23 #if defined(__ANDROID__)
24 #include "android-base/properties.h"
25 #endif
26 #include "renderControl_types.h"
27 
28 #include "aemu/base/Process.h"
29 
30 #define DEBUG_HOSTCONNECTION 0
31 
32 #if DEBUG_HOSTCONNECTION
33 #define DPRINT(fmt,...) ALOGD("%s: " fmt, __FUNCTION__, ##__VA_ARGS__);
34 #else
35 #define DPRINT(...)
36 #endif
37 
38 using gfxstream::guest::ChecksumCalculator;
39 using gfxstream::guest::CreateHealthMonitor;
40 using gfxstream::guest::HealthMonitor;
41 using gfxstream::guest::HealthMonitorConsumerBasic;
42 using gfxstream::guest::IOStream;
43 
44 #ifdef GOLDFISH_NO_GL
45 struct gl_client_context_t {
46     int placeholder;
47 };
48 class GLEncoder : public gl_client_context_t {
49 public:
GLEncoder(IOStream *,ChecksumCalculator *)50     GLEncoder(IOStream*, ChecksumCalculator*) { }
setContextAccessor(gl_client_context_t * ())51     void setContextAccessor(gl_client_context_t *()) { }
52 };
53 struct gl2_client_context_t {
54     int placeholder;
55 };
56 class GL2Encoder : public gl2_client_context_t {
57 public:
GL2Encoder(IOStream *,ChecksumCalculator *)58     GL2Encoder(IOStream*, ChecksumCalculator*) { }
setContextAccessor(gl2_client_context_t * ())59     void setContextAccessor(gl2_client_context_t *()) { }
setNoHostError(bool)60     void setNoHostError(bool) { }
setDrawCallFlushInterval(uint32_t)61     void setDrawCallFlushInterval(uint32_t) { }
setHasAsyncUnmapBuffer(int)62     void setHasAsyncUnmapBuffer(int) { }
setHasSyncBufferData(int)63     void setHasSyncBufferData(int) { }
64 };
65 #else
66 #include "GLEncoder.h"
67 #include "GL2Encoder.h"
68 #endif
69 
70 #include "VkEncoder.h"
71 #include "AddressSpaceStream.h"
72 
73 using gfxstream::vk::VkEncoder;
74 
75 #include <unistd.h>
76 
77 #include "ProcessPipe.h"
78 #include "QemuPipeStream.h"
79 #include "ThreadInfo.h"
80 
81 using gfxstream::guest::getCurrentThreadId;
82 
83 #include "VirtGpu.h"
84 #include "VirtioGpuPipeStream.h"
85 
86 #if defined(__linux__) || defined(__ANDROID__)
87 #include "virtgpu_drm.h"
88 #include <fstream>
89 #include <string>
90 #include <unistd.h>
91 
92 static const size_t kPageSize = getpagesize();
93 #else
94 constexpr size_t kPageSize = PAGE_SIZE;
95 #endif
96 
97 #undef LOG_TAG
98 #define LOG_TAG "HostConnection"
99 #include <cutils/log.h>
100 
101 #define STREAM_BUFFER_SIZE  (4*1024*1024)
102 #define STREAM_PORT_NUM     22468
103 
104 constexpr const auto kEglProp = "ro.hardware.egl";
105 
getGlobalHealthMonitor()106 HealthMonitor<>* getGlobalHealthMonitor() {
107     // Initialize HealthMonitor
108     // Rather than inject as a construct arg, we keep it as a static variable in the .cpp
109     // to avoid setting up dependencies in other repos (external/qemu)
110     static HealthMonitorConsumerBasic sHealthMonitorConsumerBasic;
111     static std::unique_ptr<HealthMonitor<>> sHealthMonitor = CreateHealthMonitor(sHealthMonitorConsumerBasic);
112     return sHealthMonitor.get();
113 }
114 
getConnectionTypeFromProperty(enum VirtGpuCapset capset)115 static HostConnectionType getConnectionTypeFromProperty(enum VirtGpuCapset capset) {
116 #if defined(__Fuchsia__) || defined(LINUX_GUEST_BUILD)
117     return HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE;
118 #else
119     std::string transport;
120 
121 #if defined(__ANDROID__)
122     transport = android::base::GetProperty("ro.boot.hardware.gltransport", "");
123 #else
124     const char* transport_envvar = getenv("GFXSTREAM_TRANSPORT");
125     if (transport_envvar != nullptr) {
126         transport = std::string(transport_envvar);
127     }
128 #endif
129 
130     if (transport.empty()) {
131 #if defined(__ANDROID__)
132         return HOST_CONNECTION_QEMU_PIPE;
133 #else
134         return HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE;
135 #endif
136     }
137 
138     if (transport == "asg") {
139         return HOST_CONNECTION_ADDRESS_SPACE;
140     }
141     if (transport == "pipe") {
142         return HOST_CONNECTION_QEMU_PIPE;
143     }
144 
145     if (transport == "virtio-gpu-asg" || transport == "virtio-gpu-pipe") {
146         std::string egl;
147 #if defined(__ANDROID__)
148         egl = android::base::GetProperty(kEglProp, "");
149 #endif
150         // ANGLE doesn't work well without ASG, particularly if HostComposer uses a pipe
151         // transport and VK uses ASG.
152         if (capset == kCapsetGfxStreamVulkan || egl == "angle") {
153             return HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE;
154         } else {
155             return HOST_CONNECTION_VIRTIO_GPU_PIPE;
156         }
157     }
158 
159     return HOST_CONNECTION_QEMU_PIPE;
160 #endif
161 }
162 
getDrawCallFlushIntervalFromProperty()163 static uint32_t getDrawCallFlushIntervalFromProperty() {
164     constexpr uint32_t kDefaultValue = 800;
165     uint32_t value = kDefaultValue;
166 
167 #if defined(__ANDROID__)
168     value = android::base::GetUintProperty("ro.boot.qemu.gltransport.drawFlushInterval",
169                                            kDefaultValue);
170 #endif
171     return value;
172 }
173 
HostConnection()174 HostConnection::HostConnection()
175     : exitUncleanly(false),
176       m_checksumHelper(),
177       m_hostExtensions(),
178       m_noHostError(true),
179       m_rendernodeFd(-1) { }
180 
~HostConnection()181 HostConnection::~HostConnection()
182 {
183     // round-trip to ensure that queued commands have been processed
184     // before process pipe closure is detected.
185     if (m_rcEnc && !exitUncleanly) {
186         (void)m_rcEnc->rcGetRendererVersion(m_rcEnc.get());
187     }
188 
189     if (m_vkEnc) {
190         m_vkEnc->decRef();
191     }
192 
193     if (m_stream) {
194         m_stream->decRef();
195     }
196 }
197 
198 
199 // static
connect(enum VirtGpuCapset capset)200 std::unique_ptr<HostConnection> HostConnection::connect(enum VirtGpuCapset capset) {
201     const enum HostConnectionType connType = getConnectionTypeFromProperty(capset);
202     uint32_t noRenderControlEnc = 0;
203 
204     // Use "new" to access a non-public constructor.
205     auto con = std::unique_ptr<HostConnection>(new HostConnection);
206     con->m_connectionType = connType;
207 
208     switch (connType) {
209         case HOST_CONNECTION_ADDRESS_SPACE: {
210 #if defined(__ANDROID__)
211             auto stream = createGoldfishAddressSpaceStream(STREAM_BUFFER_SIZE, getGlobalHealthMonitor());
212             if (!stream) {
213                 ALOGE("Failed to create AddressSpaceStream for host connection\n");
214                 return nullptr;
215             }
216             con->m_stream = stream;
217 #else
218             ALOGE("Fatal: HOST_CONNECTION_ADDRESS_SPACE not supported on this host.");
219             abort();
220 #endif
221 
222             break;
223         }
224 #if !defined(__Fuchsia__)
225         case HOST_CONNECTION_QEMU_PIPE: {
226             auto stream = new QemuPipeStream(STREAM_BUFFER_SIZE);
227             if (!stream) {
228                 ALOGE("Failed to create QemuPipeStream for host connection\n");
229                 return nullptr;
230             }
231             if (stream->connect() < 0) {
232                 ALOGE("Failed to connect to host (QemuPipeStream)\n");
233                 return nullptr;
234             }
235             con->m_stream = stream;
236             break;
237         }
238 #endif
239         case HOST_CONNECTION_VIRTIO_GPU_PIPE: {
240             auto stream = new VirtioGpuPipeStream(STREAM_BUFFER_SIZE);
241             if (!stream) {
242                 ALOGE("Failed to create VirtioGpu for host connection\n");
243                 return nullptr;
244             }
245             if (stream->connect() < 0) {
246                 ALOGE("Failed to connect to host (VirtioGpu)\n");
247                 return nullptr;
248             }
249             auto rendernodeFd = stream->getRendernodeFd();
250             con->m_stream = stream;
251             con->m_rendernodeFd = rendernodeFd;
252             break;
253         }
254         case HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE: {
255             // Use kCapsetGfxStreamVulkan for now, Ranchu HWC needs to be modified to pass in
256             // right capset.
257             auto device = VirtGpuDevice::getInstance(kCapsetGfxStreamVulkan);
258             auto deviceHandle = device->getDeviceHandle();
259             auto stream =
260                 createVirtioGpuAddressSpaceStream(kCapsetGfxStreamVulkan, getGlobalHealthMonitor());
261             if (!stream) {
262                 ALOGE("Failed to create virtgpu AddressSpaceStream\n");
263                 return nullptr;
264             }
265             con->m_stream = stream;
266             con->m_rendernodeFd = deviceHandle;
267             break;
268         }
269         default:
270             break;
271     }
272 
273 #if defined(ANDROID)
274     con->m_grallocHelper.reset(gfxstream::createPlatformGralloc(con->m_rendernodeFd));
275     if (!con->m_grallocHelper) {
276         ALOGE("Failed to create platform Gralloc!");
277         abort();
278     }
279 
280     con->m_anwHelper.reset(gfxstream::createPlatformANativeWindowHelper());
281     if (!con->m_anwHelper) {
282         ALOGE("Failed to create platform ANativeWindowHelper!");
283         abort();
284     }
285 #endif
286 
287     con->m_syncHelper.reset(gfxstream::createPlatformSyncHelper());
288 
289     // send zero 'clientFlags' to the host.
290     unsigned int *pClientFlags =
291             (unsigned int *)con->m_stream->allocBuffer(sizeof(unsigned int));
292     *pClientFlags = 0;
293     con->m_stream->commitBuffer(sizeof(unsigned int));
294 
295     if (capset == kCapsetGfxStreamMagma) {
296         noRenderControlEnc = 1;
297     } else if (capset == kCapsetGfxStreamVulkan) {
298         VirtGpuDevice* instance = VirtGpuDevice::getInstance(kCapsetGfxStreamVulkan);
299         auto caps = instance->getCaps();
300         noRenderControlEnc = caps.vulkanCapset.noRenderControlEnc;
301     }
302 
303     auto fd = (connType == HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE) ? con->m_rendernodeFd : -1;
304     processPipeInit(fd, connType, noRenderControlEnc);
305     if (!noRenderControlEnc && capset == kCapsetGfxStreamVulkan) {
306         con->rcEncoder();
307     }
308 
309     return con;
310 }
311 
get()312 HostConnection* HostConnection::get() { return getWithThreadInfo(getEGLThreadInfo(), kCapsetNone); }
313 
getOrCreate(enum VirtGpuCapset capset)314 HostConnection* HostConnection::getOrCreate(enum VirtGpuCapset capset) {
315     return getWithThreadInfo(getEGLThreadInfo(), capset);
316 }
317 
getWithThreadInfo(EGLThreadInfo * tinfo,enum VirtGpuCapset capset)318 HostConnection* HostConnection::getWithThreadInfo(EGLThreadInfo* tinfo, enum VirtGpuCapset capset) {
319     // Get thread info
320     if (!tinfo) {
321         return NULL;
322     }
323 
324     if (tinfo->hostConn == NULL) {
325         tinfo->hostConn = HostConnection::createUnique(capset);
326     }
327 
328     return tinfo->hostConn.get();
329 }
330 
exit()331 void HostConnection::exit() {
332     EGLThreadInfo *tinfo = getEGLThreadInfo();
333     if (!tinfo) {
334         return;
335     }
336 
337     tinfo->hostConn.reset();
338 }
339 
exitUnclean()340 void HostConnection::exitUnclean() {
341     EGLThreadInfo *tinfo = getEGLThreadInfo();
342     if (!tinfo) {
343         return;
344     }
345 
346     tinfo->hostConn->exitUncleanly = true;
347     tinfo->hostConn.reset();
348 }
349 
350 // static
createUnique(enum VirtGpuCapset capset)351 std::unique_ptr<HostConnection> HostConnection::createUnique(enum VirtGpuCapset capset) {
352     return connect(capset);
353 }
354 
glEncoder()355 GLEncoder *HostConnection::glEncoder()
356 {
357     if (!m_glEnc) {
358         m_glEnc = std::make_unique<GLEncoder>(m_stream, checksumHelper());
359         DBG("HostConnection::glEncoder new encoder %p, tid %lu", m_glEnc, getCurrentThreadId());
360         m_glEnc->setContextAccessor(s_getGLContext);
361     }
362     return m_glEnc.get();
363 }
364 
gl2Encoder()365 GL2Encoder *HostConnection::gl2Encoder()
366 {
367     if (!m_gl2Enc) {
368         m_gl2Enc =
369             std::make_unique<GL2Encoder>(m_stream, checksumHelper());
370         DBG("HostConnection::gl2Encoder new encoder %p, tid %lu", m_gl2Enc, getCurrentThreadId());
371         m_gl2Enc->setContextAccessor(s_getGL2Context);
372         m_gl2Enc->setNoHostError(m_noHostError);
373         m_gl2Enc->setDrawCallFlushInterval(
374             getDrawCallFlushIntervalFromProperty());
375         m_gl2Enc->setHasAsyncUnmapBuffer(m_rcEnc->hasAsyncUnmapBuffer());
376         m_gl2Enc->setHasSyncBufferData(m_rcEnc->hasSyncBufferData());
377     }
378     return m_gl2Enc.get();
379 }
380 
vkEncoder()381 VkEncoder* HostConnection::vkEncoder() {
382     if (!m_vkEnc) {
383         m_vkEnc = new VkEncoder(m_stream, getGlobalHealthMonitor());
384     }
385     return m_vkEnc;
386 }
387 
rcEncoder()388 ExtendedRCEncoderContext *HostConnection::rcEncoder()
389 {
390     if (!m_rcEnc) {
391         m_rcEnc = std::make_unique<ExtendedRCEncoderContext>(m_stream,
392                                                              checksumHelper());
393 
394         ExtendedRCEncoderContext* rcEnc = m_rcEnc.get();
395         setChecksumHelper(rcEnc);
396         queryAndSetSyncImpl(rcEnc);
397         queryAndSetDmaImpl(rcEnc);
398         queryAndSetGLESMaxVersion(rcEnc);
399         queryAndSetNoErrorState(rcEnc);
400         queryAndSetHostCompositionImpl(rcEnc);
401         queryAndSetDirectMemSupport(rcEnc);
402         queryAndSetVulkanSupport(rcEnc);
403         queryAndSetDeferredVulkanCommandsSupport(rcEnc);
404         queryAndSetVulkanNullOptionalStringsSupport(rcEnc);
405         queryAndSetVulkanCreateResourcesWithRequirementsSupport(rcEnc);
406         queryAndSetVulkanIgnoredHandles(rcEnc);
407         queryAndSetYUVCache(rcEnc);
408         queryAndSetAsyncUnmapBuffer(rcEnc);
409         queryAndSetVirtioGpuNext(rcEnc);
410         queryHasSharedSlotsHostMemoryAllocator(rcEnc);
411         queryAndSetVulkanFreeMemorySync(rcEnc);
412         queryAndSetVirtioGpuNativeSync(rcEnc);
413         queryAndSetVulkanShaderFloat16Int8Support(rcEnc);
414         queryAndSetVulkanAsyncQueueSubmitSupport(rcEnc);
415         queryAndSetHostSideTracingSupport(rcEnc);
416         queryAndSetAsyncFrameCommands(rcEnc);
417         queryAndSetVulkanQueueSubmitWithCommandsSupport(rcEnc);
418         queryAndSetVulkanBatchedDescriptorSetUpdateSupport(rcEnc);
419         queryAndSetSyncBufferData(rcEnc);
420         queryAndSetVulkanAsyncQsri(rcEnc);
421         queryAndSetReadColorBufferDma(rcEnc);
422         queryAndSetHWCMultiConfigs(rcEnc);
423         queryAndSetVulkanAuxCommandBufferMemory(rcEnc);
424         queryVersion(rcEnc);
425 
426         rcEnc->rcSetPuid(rcEnc, getPuid());
427     }
428     return m_rcEnc.get();
429 }
430 
s_getGLContext()431 gl_client_context_t *HostConnection::s_getGLContext()
432 {
433     EGLThreadInfo *ti = getEGLThreadInfo();
434     if (ti->hostConn) {
435         return ti->hostConn->m_glEnc.get();
436     }
437     return NULL;
438 }
439 
s_getGL2Context()440 gl2_client_context_t *HostConnection::s_getGL2Context()
441 {
442     EGLThreadInfo *ti = getEGLThreadInfo();
443     if (ti->hostConn) {
444         return ti->hostConn->m_gl2Enc.get();
445     }
446     return NULL;
447 }
448 
queryHostExtensions(ExtendedRCEncoderContext * rcEnc)449 const std::string& HostConnection::queryHostExtensions(ExtendedRCEncoderContext *rcEnc) {
450     if (!m_hostExtensions.empty()) {
451         return m_hostExtensions;
452     }
453 
454     // Extensions strings are usually quite long, preallocate enough here.
455     std::string extensionsBuffer(1023, '\0');
456 
457     // Returns the required size including the 0-terminator, so
458     // account it when passing/using the sizes.
459     int extensionSize = rcEnc->rcGetHostExtensionsString(rcEnc,
460                                                          extensionsBuffer.size() + 1,
461                                                          &extensionsBuffer[0]);
462     if (extensionSize < 0) {
463         extensionsBuffer.resize(-extensionSize);
464         extensionSize = rcEnc->rcGetHostExtensionsString(rcEnc,
465                                                          -extensionSize + 1,
466                                                          &extensionsBuffer[0]);
467     }
468 
469     if (extensionSize > 0) {
470         extensionsBuffer.resize(extensionSize - 1);
471         m_hostExtensions.swap(extensionsBuffer);
472     }
473 
474     return m_hostExtensions;
475 }
476 
queryAndSetHostCompositionImpl(ExtendedRCEncoderContext * rcEnc)477 void HostConnection::queryAndSetHostCompositionImpl(ExtendedRCEncoderContext *rcEnc) {
478     const std::string& hostExtensions = queryHostExtensions(rcEnc);
479     DPRINT("HostComposition ext %s", hostExtensions.c_str());
480     // make sure V2 is checked first before V1, as host may declare supporting both
481     if (hostExtensions.find(kHostCompositionV2) != std::string::npos) {
482         rcEnc->setHostComposition(HOST_COMPOSITION_V2);
483     }
484     else if (hostExtensions.find(kHostCompositionV1) != std::string::npos) {
485         rcEnc->setHostComposition(HOST_COMPOSITION_V1);
486     }
487     else {
488         rcEnc->setHostComposition(HOST_COMPOSITION_NONE);
489     }
490 }
491 
setChecksumHelper(ExtendedRCEncoderContext * rcEnc)492 void HostConnection::setChecksumHelper(ExtendedRCEncoderContext *rcEnc) {
493     const std::string& hostExtensions = queryHostExtensions(rcEnc);
494     // check the host supported version
495     uint32_t checksumVersion = 0;
496     const char* checksumPrefix = ChecksumCalculator::getMaxVersionStrPrefix();
497     const char* glProtocolStr = strstr(hostExtensions.c_str(), checksumPrefix);
498     if (glProtocolStr) {
499         uint32_t maxVersion = ChecksumCalculator::getMaxVersion();
500         sscanf(glProtocolStr+strlen(checksumPrefix), "%d", &checksumVersion);
501         if (maxVersion < checksumVersion) {
502             checksumVersion = maxVersion;
503         }
504         // The ordering of the following two commands matters!
505         // Must tell the host first before setting it in the guest
506         rcEnc->rcSelectChecksumHelper(rcEnc, checksumVersion, 0);
507         m_checksumHelper.setVersion(checksumVersion);
508     }
509 }
510 
queryAndSetSyncImpl(ExtendedRCEncoderContext * rcEnc)511 void HostConnection::queryAndSetSyncImpl(ExtendedRCEncoderContext *rcEnc) {
512     const std::string& hostExtensions = queryHostExtensions(rcEnc);
513     if (hostExtensions.find(kRCNativeSyncV4) != std::string::npos) {
514         rcEnc->setSyncImpl(SYNC_IMPL_NATIVE_SYNC_V4);
515     } else if (hostExtensions.find(kRCNativeSyncV3) != std::string::npos) {
516         rcEnc->setSyncImpl(SYNC_IMPL_NATIVE_SYNC_V3);
517     } else if (hostExtensions.find(kRCNativeSyncV2) != std::string::npos) {
518         rcEnc->setSyncImpl(SYNC_IMPL_NATIVE_SYNC_V2);
519     } else {
520         rcEnc->setSyncImpl(SYNC_IMPL_NONE);
521     }
522 }
523 
queryAndSetDmaImpl(ExtendedRCEncoderContext * rcEnc)524 void HostConnection::queryAndSetDmaImpl(ExtendedRCEncoderContext *rcEnc) {
525     std::string hostExtensions = queryHostExtensions(rcEnc);
526     if (hostExtensions.find(kDmaExtStr_v1) != std::string::npos) {
527         rcEnc->setDmaImpl(DMA_IMPL_v1);
528     } else {
529         rcEnc->setDmaImpl(DMA_IMPL_NONE);
530     }
531 }
532 
queryAndSetGLESMaxVersion(ExtendedRCEncoderContext * rcEnc)533 void HostConnection::queryAndSetGLESMaxVersion(ExtendedRCEncoderContext* rcEnc) {
534     std::string hostExtensions = queryHostExtensions(rcEnc);
535     if (hostExtensions.find(kGLESMaxVersion_2) != std::string::npos) {
536         rcEnc->setGLESMaxVersion(GLES_MAX_VERSION_2);
537     } else if (hostExtensions.find(kGLESMaxVersion_3_0) != std::string::npos) {
538         rcEnc->setGLESMaxVersion(GLES_MAX_VERSION_3_0);
539     } else if (hostExtensions.find(kGLESMaxVersion_3_1) != std::string::npos) {
540         rcEnc->setGLESMaxVersion(GLES_MAX_VERSION_3_1);
541     } else if (hostExtensions.find(kGLESMaxVersion_3_2) != std::string::npos) {
542         rcEnc->setGLESMaxVersion(GLES_MAX_VERSION_3_2);
543     } else {
544         ALOGW("Unrecognized GLES max version string in extensions: %s",
545               hostExtensions.c_str());
546         rcEnc->setGLESMaxVersion(GLES_MAX_VERSION_2);
547     }
548 }
549 
queryAndSetNoErrorState(ExtendedRCEncoderContext * rcEnc)550 void HostConnection::queryAndSetNoErrorState(ExtendedRCEncoderContext* rcEnc) {
551     std::string hostExtensions = queryHostExtensions(rcEnc);
552     if (hostExtensions.find(kGLESUseHostError) != std::string::npos) {
553         m_noHostError = false;
554     }
555 }
556 
queryAndSetDirectMemSupport(ExtendedRCEncoderContext * rcEnc)557 void HostConnection::queryAndSetDirectMemSupport(ExtendedRCEncoderContext* rcEnc) {
558     std::string hostExtensions = queryHostExtensions(rcEnc);
559     if (hostExtensions.find(kGLDirectMem) != std::string::npos) {
560         rcEnc->featureInfo()->hasDirectMem = true;
561     }
562 }
563 
queryAndSetVulkanSupport(ExtendedRCEncoderContext * rcEnc)564 void HostConnection::queryAndSetVulkanSupport(ExtendedRCEncoderContext* rcEnc) {
565     std::string hostExtensions = queryHostExtensions(rcEnc);
566     if (hostExtensions.find(kVulkan) != std::string::npos) {
567         rcEnc->featureInfo()->hasVulkan = true;
568     }
569 }
570 
queryAndSetDeferredVulkanCommandsSupport(ExtendedRCEncoderContext * rcEnc)571 void HostConnection::queryAndSetDeferredVulkanCommandsSupport(ExtendedRCEncoderContext* rcEnc) {
572     std::string hostExtensions = queryHostExtensions(rcEnc);
573     if (hostExtensions.find(kDeferredVulkanCommands) != std::string::npos) {
574         rcEnc->featureInfo()->hasDeferredVulkanCommands = true;
575     }
576 }
577 
queryAndSetVulkanNullOptionalStringsSupport(ExtendedRCEncoderContext * rcEnc)578 void HostConnection::queryAndSetVulkanNullOptionalStringsSupport(ExtendedRCEncoderContext* rcEnc) {
579     std::string hostExtensions = queryHostExtensions(rcEnc);
580     if (hostExtensions.find(kVulkanNullOptionalStrings) != std::string::npos) {
581         rcEnc->featureInfo()->hasVulkanNullOptionalStrings = true;
582     }
583 }
584 
queryAndSetVulkanCreateResourcesWithRequirementsSupport(ExtendedRCEncoderContext * rcEnc)585 void HostConnection::queryAndSetVulkanCreateResourcesWithRequirementsSupport(ExtendedRCEncoderContext* rcEnc) {
586     std::string hostExtensions = queryHostExtensions(rcEnc);
587     if (hostExtensions.find(kVulkanCreateResourcesWithRequirements) != std::string::npos) {
588         rcEnc->featureInfo()->hasVulkanCreateResourcesWithRequirements = true;
589     }
590 }
591 
queryAndSetVulkanIgnoredHandles(ExtendedRCEncoderContext * rcEnc)592 void HostConnection::queryAndSetVulkanIgnoredHandles(ExtendedRCEncoderContext* rcEnc) {
593     std::string hostExtensions = queryHostExtensions(rcEnc);
594     if (hostExtensions.find(kVulkanIgnoredHandles) != std::string::npos) {
595         rcEnc->featureInfo()->hasVulkanIgnoredHandles = true;
596     }
597 }
598 
queryAndSetYUVCache(ExtendedRCEncoderContext * rcEnc)599 void HostConnection::queryAndSetYUVCache(ExtendedRCEncoderContext* rcEnc) {
600     std::string hostExtensions = queryHostExtensions(rcEnc);
601     if (hostExtensions.find(kYUVCache) != std::string::npos) {
602         rcEnc->featureInfo()->hasYUVCache = true;
603     }
604 }
605 
queryAndSetAsyncUnmapBuffer(ExtendedRCEncoderContext * rcEnc)606 void HostConnection::queryAndSetAsyncUnmapBuffer(ExtendedRCEncoderContext* rcEnc) {
607     std::string hostExtensions = queryHostExtensions(rcEnc);
608     if (hostExtensions.find(kAsyncUnmapBuffer) != std::string::npos) {
609         rcEnc->featureInfo()->hasAsyncUnmapBuffer = true;
610     }
611 }
612 
queryAndSetVirtioGpuNext(ExtendedRCEncoderContext * rcEnc)613 void HostConnection::queryAndSetVirtioGpuNext(ExtendedRCEncoderContext* rcEnc) {
614     std::string hostExtensions = queryHostExtensions(rcEnc);
615     if (hostExtensions.find(kVirtioGpuNext) != std::string::npos) {
616         rcEnc->featureInfo()->hasVirtioGpuNext = true;
617     }
618 }
619 
queryHasSharedSlotsHostMemoryAllocator(ExtendedRCEncoderContext * rcEnc)620 void HostConnection::queryHasSharedSlotsHostMemoryAllocator(ExtendedRCEncoderContext *rcEnc) {
621     const std::string& hostExtensions = queryHostExtensions(rcEnc);
622     if (hostExtensions.find(kHasSharedSlotsHostMemoryAllocator) != std::string::npos) {
623         rcEnc->featureInfo()->hasSharedSlotsHostMemoryAllocator = true;
624     }
625 }
626 
queryAndSetVulkanFreeMemorySync(ExtendedRCEncoderContext * rcEnc)627 void HostConnection::queryAndSetVulkanFreeMemorySync(ExtendedRCEncoderContext *rcEnc) {
628     const std::string& hostExtensions = queryHostExtensions(rcEnc);
629     if (hostExtensions.find(kVulkanFreeMemorySync) != std::string::npos) {
630         rcEnc->featureInfo()->hasVulkanFreeMemorySync = true;
631     }
632 }
633 
queryAndSetVirtioGpuNativeSync(ExtendedRCEncoderContext * rcEnc)634 void HostConnection::queryAndSetVirtioGpuNativeSync(ExtendedRCEncoderContext* rcEnc) {
635     std::string hostExtensions = queryHostExtensions(rcEnc);
636     if (hostExtensions.find(kVirtioGpuNativeSync) != std::string::npos) {
637         rcEnc->featureInfo()->hasVirtioGpuNativeSync = true;
638     }
639 }
640 
queryAndSetVulkanShaderFloat16Int8Support(ExtendedRCEncoderContext * rcEnc)641 void HostConnection::queryAndSetVulkanShaderFloat16Int8Support(ExtendedRCEncoderContext* rcEnc) {
642     std::string hostExtensions = queryHostExtensions(rcEnc);
643     if (hostExtensions.find(kVulkanShaderFloat16Int8) != std::string::npos) {
644         rcEnc->featureInfo()->hasVulkanShaderFloat16Int8 = true;
645     }
646 }
647 
queryAndSetVulkanAsyncQueueSubmitSupport(ExtendedRCEncoderContext * rcEnc)648 void HostConnection::queryAndSetVulkanAsyncQueueSubmitSupport(ExtendedRCEncoderContext* rcEnc) {
649     std::string hostExtensions = queryHostExtensions(rcEnc);
650     if (hostExtensions.find(kVulkanAsyncQueueSubmit) != std::string::npos) {
651         rcEnc->featureInfo()->hasVulkanAsyncQueueSubmit = true;
652     }
653 }
654 
queryAndSetHostSideTracingSupport(ExtendedRCEncoderContext * rcEnc)655 void HostConnection::queryAndSetHostSideTracingSupport(ExtendedRCEncoderContext* rcEnc) {
656     std::string hostExtensions = queryHostExtensions(rcEnc);
657     if (hostExtensions.find(kHostSideTracing) != std::string::npos) {
658         rcEnc->featureInfo()->hasHostSideTracing = true;
659     }
660 }
661 
queryAndSetAsyncFrameCommands(ExtendedRCEncoderContext * rcEnc)662 void HostConnection::queryAndSetAsyncFrameCommands(ExtendedRCEncoderContext* rcEnc) {
663     std::string hostExtensions = queryHostExtensions(rcEnc);
664     if (hostExtensions.find(kAsyncFrameCommands) != std::string::npos) {
665         rcEnc->featureInfo()->hasAsyncFrameCommands = true;
666     }
667 }
668 
queryAndSetVulkanQueueSubmitWithCommandsSupport(ExtendedRCEncoderContext * rcEnc)669 void HostConnection::queryAndSetVulkanQueueSubmitWithCommandsSupport(ExtendedRCEncoderContext* rcEnc) {
670     std::string hostExtensions = queryHostExtensions(rcEnc);
671     if (hostExtensions.find(kVulkanQueueSubmitWithCommands) != std::string::npos) {
672         rcEnc->featureInfo()->hasVulkanQueueSubmitWithCommands = true;
673     }
674 }
675 
queryAndSetVulkanBatchedDescriptorSetUpdateSupport(ExtendedRCEncoderContext * rcEnc)676 void HostConnection::queryAndSetVulkanBatchedDescriptorSetUpdateSupport(ExtendedRCEncoderContext* rcEnc) {
677     std::string hostExtensions = queryHostExtensions(rcEnc);
678     if (hostExtensions.find(kVulkanBatchedDescriptorSetUpdate) != std::string::npos) {
679         rcEnc->featureInfo()->hasVulkanBatchedDescriptorSetUpdate = true;
680     }
681 }
682 
queryAndSetSyncBufferData(ExtendedRCEncoderContext * rcEnc)683 void HostConnection::queryAndSetSyncBufferData(ExtendedRCEncoderContext* rcEnc) {
684     std::string hostExtensions = queryHostExtensions(rcEnc);
685     if (hostExtensions.find(kSyncBufferData) != std::string::npos) {
686         rcEnc->featureInfo()->hasSyncBufferData = true;
687     }
688 }
689 
queryAndSetVulkanAsyncQsri(ExtendedRCEncoderContext * rcEnc)690 void HostConnection::queryAndSetVulkanAsyncQsri(ExtendedRCEncoderContext* rcEnc) {
691     std::string hostExtensions = queryHostExtensions(rcEnc);
692     if (hostExtensions.find(kVulkanAsyncQsri) != std::string::npos) {
693         rcEnc->featureInfo()->hasVulkanAsyncQsri = true;
694     }
695 }
696 
queryAndSetReadColorBufferDma(ExtendedRCEncoderContext * rcEnc)697 void HostConnection::queryAndSetReadColorBufferDma(ExtendedRCEncoderContext* rcEnc) {
698     std::string hostExtensions = queryHostExtensions(rcEnc);
699     if (hostExtensions.find(kReadColorBufferDma) != std::string::npos) {
700         rcEnc->featureInfo()->hasReadColorBufferDma = true;
701     }
702 }
703 
queryAndSetHWCMultiConfigs(ExtendedRCEncoderContext * rcEnc)704 void HostConnection::queryAndSetHWCMultiConfigs(ExtendedRCEncoderContext* rcEnc) {
705     std::string hostExtensions = queryHostExtensions(rcEnc);
706     if (hostExtensions.find(kHWCMultiConfigs) != std::string::npos) {
707         rcEnc->featureInfo()->hasHWCMultiConfigs = true;
708     }
709 }
710 
queryAndSetVulkanAuxCommandBufferMemory(ExtendedRCEncoderContext * rcEnc)711 void HostConnection::queryAndSetVulkanAuxCommandBufferMemory(ExtendedRCEncoderContext* rcEnc) {
712     std::string hostExtensions = queryHostExtensions(rcEnc);
713     rcEnc->featureInfo()->hasVulkanAuxCommandMemory = hostExtensions.find(kVulkanAuxCommandMemory) != std::string::npos;
714 }
715 
716 
queryVersion(ExtendedRCEncoderContext * rcEnc)717 GLint HostConnection::queryVersion(ExtendedRCEncoderContext* rcEnc) {
718     GLint version = m_rcEnc->rcGetRendererVersion(m_rcEnc.get());
719     return version;
720 }
721