• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include "HostConnection.h"
17 
18 #include "cutils/properties.h"
19 
20 #ifdef HOST_BUILD
21 #include "android/base/Tracing.h"
22 #endif
23 
24 #ifdef GOLDFISH_NO_GL
25 struct gl_client_context_t {
26     int placeholder;
27 };
28 class GLEncoder : public gl_client_context_t {
29 public:
GLEncoder(IOStream *,ChecksumCalculator *)30     GLEncoder(IOStream*, ChecksumCalculator*) { }
setContextAccessor(gl_client_context_t * ())31     void setContextAccessor(gl_client_context_t *()) { }
32 };
33 struct gl2_client_context_t {
34     int placeholder;
35 };
36 class GL2Encoder : public gl2_client_context_t {
37 public:
GL2Encoder(IOStream *,ChecksumCalculator *)38     GL2Encoder(IOStream*, ChecksumCalculator*) { }
setContextAccessor(gl2_client_context_t * ())39     void setContextAccessor(gl2_client_context_t *()) { }
setNoHostError(bool)40     void setNoHostError(bool) { }
setDrawCallFlushInterval(uint32_t)41     void setDrawCallFlushInterval(uint32_t) { }
setHasAsyncUnmapBuffer(int)42     void setHasAsyncUnmapBuffer(int) { }
setHasSyncBufferData(int)43     void setHasSyncBufferData(int) { }
44 };
45 #else
46 #include "GLEncoder.h"
47 #include "GL2Encoder.h"
48 #endif
49 
50 #ifdef GFXSTREAM
51 #include "VkEncoder.h"
52 #include "AddressSpaceStream.h"
53 #else
54 namespace goldfish_vk {
55 struct VkEncoder {
VkEncodergoldfish_vk::VkEncoder56     VkEncoder(IOStream*) { }
decRefgoldfish_vk::VkEncoder57     void decRef() { }
58     int placeholder;
59 };
60 } // namespace goldfish_vk
61 class QemuPipeStream;
62 typedef QemuPipeStream AddressSpaceStream;
createAddressSpaceStream(size_t bufSize)63 AddressSpaceStream* createAddressSpaceStream(size_t bufSize) {
64     ALOGE("%s: FATAL: Trying to create ASG stream in unsupported build\n", __func__);
65     abort();
66 }
createVirtioGpuAddressSpaceStream(size_t bufSize)67 AddressSpaceStream* createVirtioGpuAddressSpaceStream(size_t bufSize) {
68     ALOGE("%s: FATAL: Trying to create virtgpu ASG stream in unsupported build\n", __func__);
69     abort();
70 }
71 #endif
72 
73 using goldfish_vk::VkEncoder;
74 
75 #include "ProcessPipe.h"
76 #include "QemuPipeStream.h"
77 #include "TcpStream.h"
78 #include "ThreadInfo.h"
79 #include <gralloc_cb_bp.h>
80 #include <unistd.h>
81 
82 #ifdef VIRTIO_GPU
83 
84 #include "VirtioGpuStream.h"
85 #include "VirtioGpuPipeStream.h"
86 #include "virtgpu_drm.h"
87 
88 #include <cros_gralloc_handle.h>
89 #include <xf86drm.h>
90 
91 #endif
92 
93 #undef LOG_TAG
94 #define LOG_TAG "HostConnection"
95 #if PLATFORM_SDK_VERSION < 26
96 #include <cutils/log.h>
97 #else
98 #include <log/log.h>
99 #endif
100 
101 #define STREAM_BUFFER_SIZE  (4*1024*1024)
102 #define STREAM_PORT_NUM     22468
103 
getConnectionTypeFromProperty()104 static HostConnectionType getConnectionTypeFromProperty() {
105 #ifdef __Fuchsia__
106     return HOST_CONNECTION_ADDRESS_SPACE;
107 #elif defined(__ANDROID__) || defined(HOST_BUILD)
108     char transportValue[PROPERTY_VALUE_MAX] = "";
109 
110     do {
111         property_get("ro.boot.qemu.gltransport.name", transportValue, "");
112         if (transportValue[0]) { break; }
113 
114         property_get("ro.boot.qemu.gltransport", transportValue, "");
115         if (transportValue[0]) { break; }
116 
117         property_get("ro.boot.hardware.gltransport", transportValue, "");
118     } while (false);
119 
120     if (!transportValue[0]) return HOST_CONNECTION_QEMU_PIPE;
121 
122     if (!strcmp("tcp", transportValue)) return HOST_CONNECTION_TCP;
123     if (!strcmp("pipe", transportValue)) return HOST_CONNECTION_QEMU_PIPE;
124     if (!strcmp("virtio-gpu", transportValue)) return HOST_CONNECTION_VIRTIO_GPU;
125     if (!strcmp("asg", transportValue)) return HOST_CONNECTION_ADDRESS_SPACE;
126     if (!strcmp("virtio-gpu-pipe", transportValue)) return HOST_CONNECTION_VIRTIO_GPU_PIPE;
127     if (!strcmp("virtio-gpu-asg", transportValue)) return HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE;
128 
129     return HOST_CONNECTION_QEMU_PIPE;
130 #else
131     return HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE;
132 #endif
133 }
134 
getDrawCallFlushIntervalFromProperty()135 static uint32_t getDrawCallFlushIntervalFromProperty() {
136     constexpr uint32_t kDefaultValue = 800;
137 
138     char flushValue[PROPERTY_VALUE_MAX] = "";
139     property_get("ro.boot.qemu.gltransport.drawFlushInterval", flushValue, "");
140     if (!flushValue[0]) return kDefaultValue;
141 
142     const long interval = strtol(flushValue, 0, 10);
143     return (interval > 0) ? uint32_t(interval) : kDefaultValue;
144 }
145 
getGrallocTypeFromProperty()146 static GrallocType getGrallocTypeFromProperty() {
147     char value[PROPERTY_VALUE_MAX] = "";
148     property_get("ro.hardware.gralloc", value, "");
149 
150     if (!value[0]) return GRALLOC_TYPE_RANCHU;
151 
152     if (!strcmp("ranchu", value)) return GRALLOC_TYPE_RANCHU;
153     if (!strcmp("minigbm", value)) return GRALLOC_TYPE_MINIGBM;
154     return GRALLOC_TYPE_RANCHU;
155 }
156 
157 class GoldfishGralloc : public Gralloc
158 {
159 public:
createColorBuffer(ExtendedRCEncoderContext * rcEnc,int width,int height,uint32_t glformat)160     virtual uint32_t createColorBuffer(
161         ExtendedRCEncoderContext* rcEnc,
162         int width, int height, uint32_t glformat) {
163         return rcEnc->rcCreateColorBuffer(
164             rcEnc, width, height, glformat);
165     }
166 
getHostHandle(native_handle_t const * handle)167     virtual uint32_t getHostHandle(native_handle_t const* handle)
168     {
169         return cb_handle_t::from(handle)->hostHandle;
170     }
171 
getFormat(native_handle_t const * handle)172     virtual int getFormat(native_handle_t const* handle)
173     {
174         return cb_handle_t::from(handle)->format;
175     }
176 
getAllocatedSize(native_handle_t const * handle)177     virtual size_t getAllocatedSize(native_handle_t const* handle)
178     {
179         return static_cast<size_t>(cb_handle_t::from(handle)->allocatedSize());
180     }
181 };
182 
align_up(uint32_t n,uint32_t a)183 static inline uint32_t align_up(uint32_t n, uint32_t a) {
184     return ((n + a - 1) / a) * a;
185 }
186 
187 #if defined(VIRTIO_GPU)
188 
189 class MinigbmGralloc : public Gralloc {
190 public:
createColorBuffer(ExtendedRCEncoderContext *,int width,int height,uint32_t glformat)191     virtual uint32_t createColorBuffer(
192         ExtendedRCEncoderContext*,
193         int width, int height, uint32_t glformat) {
194 
195         // Only supported format for pbuffers in gfxstream
196         // should be RGBA8
197         const uint32_t kGlRGB = 0x1907;
198         const uint32_t kGlRGBA = 0x1908;
199         const uint32_t kVirglFormatRGBA = 67; // VIRGL_FORMAT_R8G8B8A8_UNORM;
200         uint32_t virtgpu_format = 0;
201         uint32_t bpp = 0;
202         switch (glformat) {
203             case kGlRGB:
204                 ALOGD("Note: egl wanted GL_RGB, still using RGBA");
205                 virtgpu_format = kVirglFormatRGBA;
206                 bpp = 4;
207                 break;
208             case kGlRGBA:
209                 virtgpu_format = kVirglFormatRGBA;
210                 bpp = 4;
211                 break;
212             default:
213                 ALOGD("Note: egl wanted 0x%x, still using RGBA", glformat);
214                 virtgpu_format = kVirglFormatRGBA;
215                 bpp = 4;
216                 break;
217         }
218         const uint32_t kPipeTexture2D = 2; // PIPE_TEXTURE_2D
219         const uint32_t kBindRenderTarget = 1 << 1; // VIRGL_BIND_RENDER_TARGET
220         struct drm_virtgpu_resource_create res_create;
221         memset(&res_create, 0, sizeof(res_create));
222         res_create.target = kPipeTexture2D;
223         res_create.format = virtgpu_format;
224         res_create.bind = kBindRenderTarget;
225         res_create.width = width;
226         res_create.height = height;
227         res_create.depth = 1;
228         res_create.array_size = 1;
229         res_create.last_level = 0;
230         res_create.nr_samples = 0;
231         res_create.stride = bpp * width;
232         res_create.size = align_up(bpp * width * height, PAGE_SIZE);
233 
234         int ret = drmIoctl(m_fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &res_create);
235         if (ret) {
236             ALOGE("%s: DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s (%d)\n", __func__,
237                   strerror(errno), errno);
238             abort();
239         }
240 
241         return res_create.res_handle;
242     }
243 
getHostHandle(native_handle_t const * handle)244     virtual uint32_t getHostHandle(native_handle_t const* handle) {
245         struct drm_virtgpu_resource_info info;
246         if (!getResInfo(handle, &info)) {
247             ALOGE("%s: failed to get resource info\n", __func__);
248             return 0;
249         }
250 
251         return info.res_handle;
252     }
253 
getFormat(native_handle_t const * handle)254     virtual int getFormat(native_handle_t const* handle) {
255         return ((cros_gralloc_handle *)handle)->droid_format;
256     }
257 
getAllocatedSize(native_handle_t const * handle)258     virtual size_t getAllocatedSize(native_handle_t const* handle) {
259         struct drm_virtgpu_resource_info info;
260         if (!getResInfo(handle, &info)) {
261             ALOGE("%s: failed to get resource info\n", __func__);
262             return 0;
263         }
264 
265         return info.size;
266     }
267 
setFd(int fd)268     void setFd(int fd) { m_fd = fd; }
269 
270 private:
271 
getResInfo(native_handle_t const * handle,struct drm_virtgpu_resource_info * info)272     bool getResInfo(native_handle_t const* handle,
273                     struct drm_virtgpu_resource_info* info) {
274         memset(info, 0x0, sizeof(*info));
275         if (m_fd < 0) {
276             ALOGE("%s: Error, rendernode fd missing\n", __func__);
277             return false;
278         }
279 
280         struct drm_gem_close gem_close;
281         memset(&gem_close, 0x0, sizeof(gem_close));
282 
283         cros_gralloc_handle const* cros_handle =
284             reinterpret_cast<cros_gralloc_handle const*>(handle);
285 
286         uint32_t prime_handle;
287         int ret = drmPrimeFDToHandle(m_fd, cros_handle->fds[0], &prime_handle);
288         if (ret) {
289             ALOGE("%s: DRM_IOCTL_PRIME_FD_TO_HANDLE failed: %s (errno %d)\n",
290                   __func__, strerror(errno), errno);
291             return false;
292         }
293         struct ManagedDrmGem {
294             ManagedDrmGem(const ManagedDrmGem&) = delete;
295             ~ManagedDrmGem() {
296                 struct drm_gem_close gem_close {
297                     .handle = m_prime_handle,
298                     .pad = 0,
299                 };
300                 int ret = drmIoctl(m_fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
301                 if (ret) {
302                     ALOGE("%s: DRM_IOCTL_GEM_CLOSE failed on handle %" PRIu32 ": %s(%d).",
303                           __func__, m_prime_handle, strerror(errno), errno);
304                 }
305             }
306 
307             int m_fd;
308             uint32_t m_prime_handle;
309         } managed_prime_handle{
310             .m_fd = m_fd,
311             .m_prime_handle = prime_handle,
312         };
313 
314         info->bo_handle = managed_prime_handle.m_prime_handle;
315 
316         struct drm_virtgpu_3d_wait virtgpuWait{
317             .handle = managed_prime_handle.m_prime_handle,
318             .flags = 0,
319         };
320         // This only works for host resources by VIRTGPU_RESOURCE_CREATE ioctl.
321         // We need to use a different mechanism to synchonize with the host if
322         // the minigbm gralloc swiches to virtio-gpu blobs or cross-domain
323         // backend.
324         ret = drmIoctl(m_fd, DRM_IOCTL_VIRTGPU_WAIT, &virtgpuWait);
325         if (ret) {
326             ALOGE("%s: DRM_IOCTL_VIRTGPU_WAIT failed: %s(%d)", __func__, strerror(errno), errno);
327             return false;
328         }
329 
330         ret = drmIoctl(m_fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, info);
331         if (ret) {
332             ALOGE("%s: DRM_IOCTL_VIRTGPU_RESOURCE_INFO failed: %s (errno %d)\n",
333                   __func__, strerror(errno), errno);
334             return false;
335         }
336 
337         return true;
338     }
339 
340     int m_fd = -1;
341 };
342 
343 #else
344 
345 class MinigbmGralloc : public Gralloc {
346 public:
createColorBuffer(ExtendedRCEncoderContext *,int width,int height,uint32_t glformat)347     virtual uint32_t createColorBuffer(
348         ExtendedRCEncoderContext*,
349         int width, int height, uint32_t glformat) {
350         ALOGE("%s: Error: using minigbm without -DVIRTIO_GPU\n", __func__);
351         return 0;
352     }
353 
getHostHandle(native_handle_t const * handle)354     virtual uint32_t getHostHandle(native_handle_t const* handle) {
355         ALOGE("%s: Error: using minigbm without -DVIRTIO_GPU\n", __func__);
356         return 0;
357     }
358 
getFormat(native_handle_t const * handle)359     virtual int getFormat(native_handle_t const* handle) {
360         ALOGE("%s: Error: using minigbm without -DVIRTIO_GPU\n", __func__);
361         return 0;
362     }
363 
getAllocatedSize(native_handle_t const * handle)364     virtual size_t getAllocatedSize(native_handle_t const* handle) {
365         ALOGE("%s: Error: using minigbm without -DVIRTIO_GPU\n", __func__);
366         return 0;
367     }
368 
setFd(int fd)369     void setFd(int fd) { m_fd = fd; }
370 
371 private:
372 
373     int m_fd = -1;
374 };
375 
376 #endif
377 
378 class GoldfishProcessPipe : public ProcessPipe
379 {
380 public:
processPipeInit(int stream_handle,HostConnectionType connType,renderControl_encoder_context_t * rcEnc)381     bool processPipeInit(int stream_handle, HostConnectionType connType, renderControl_encoder_context_t *rcEnc)
382     {
383         return ::processPipeInit(stream_handle, connType, rcEnc);
384     }
385 
386 };
387 
388 static GoldfishGralloc m_goldfishGralloc;
389 static GoldfishProcessPipe m_goldfishProcessPipe;
390 
HostConnection()391 HostConnection::HostConnection() :
392     exitUncleanly(false),
393     m_checksumHelper(),
394     m_glExtensions(),
395     m_grallocOnly(true),
396     m_noHostError(true),
397     m_rendernodeFd(-1) {
398 #ifdef HOST_BUILD
399     android::base::initializeTracing();
400 #endif
401 }
402 
~HostConnection()403 HostConnection::~HostConnection()
404 {
405     // round-trip to ensure that queued commands have been processed
406     // before process pipe closure is detected.
407     if (m_rcEnc && !exitUncleanly) {
408         (void)m_rcEnc->rcGetRendererVersion(m_rcEnc.get());
409     }
410 
411     if (m_grallocType == GRALLOC_TYPE_MINIGBM) {
412         delete m_grallocHelper;
413     }
414 
415     if (m_vkEnc) {
416         m_vkEnc->decRef();
417     }
418 
419     if (m_stream) {
420         m_stream->decRef();
421     }
422 }
423 
424 #if defined(VIRTIO_GPU) && !defined(HOST_BUILD)
virtgpuOpen(uint32_t capset_id)425 int virtgpuOpen(uint32_t capset_id) {
426     int fd = drmOpenRender(128);
427     if (fd < 0) {
428         ALOGE("Failed to open rendernode: %s", strerror(errno));
429         return fd;
430     }
431 
432     if (capset_id) {
433         int ret;
434         struct drm_virtgpu_context_init init = {0};
435         struct drm_virtgpu_context_set_param ctx_set_params[2] = {{0}};
436 
437         ctx_set_params[0].param = VIRTGPU_CONTEXT_PARAM_NUM_RINGS;
438         ctx_set_params[0].value = 1;
439         init.num_params = 1;
440 
441         // TODO(b/218538495): A KI in the 5.4 kernel will sometimes result in capsets not
442         // being properly queried.
443 #if defined(__linux__) && !defined(__ANDROID__)
444         ctx_set_params[1].param = VIRTGPU_CONTEXT_PARAM_CAPSET_ID;
445         ctx_set_params[1].value = capset_id;
446         init.num_params++;
447 #endif
448 
449         init.ctx_set_params = (unsigned long long)&ctx_set_params[0];
450         ret = drmIoctl(fd, DRM_IOCTL_VIRTGPU_CONTEXT_INIT, &init);
451         if (ret) {
452             ALOGE("DRM_IOCTL_VIRTGPU_CONTEXT_INIT failed with %s, continuing without context...", strerror(errno));
453         }
454     }
455 
456     return fd;
457 }
458 #endif
459 
460 // static
connect(uint32_t capset_id)461 std::unique_ptr<HostConnection> HostConnection::connect(uint32_t capset_id) {
462     const enum HostConnectionType connType = getConnectionTypeFromProperty();
463 
464     // Use "new" to access a non-public constructor.
465     auto con = std::unique_ptr<HostConnection>(new HostConnection);
466     switch (connType) {
467         case HOST_CONNECTION_ADDRESS_SPACE: {
468             auto stream = createAddressSpaceStream(STREAM_BUFFER_SIZE);
469             if (!stream) {
470                 ALOGE("Failed to create AddressSpaceStream for host connection\n");
471                 return nullptr;
472             }
473             con->m_connectionType = HOST_CONNECTION_ADDRESS_SPACE;
474             con->m_grallocType = GRALLOC_TYPE_RANCHU;
475             con->m_stream = stream;
476             con->m_grallocHelper = &m_goldfishGralloc;
477             con->m_processPipe = &m_goldfishProcessPipe;
478             break;
479         }
480         case HOST_CONNECTION_QEMU_PIPE: {
481             auto stream = new QemuPipeStream(STREAM_BUFFER_SIZE);
482             if (!stream) {
483                 ALOGE("Failed to create QemuPipeStream for host connection\n");
484                 return nullptr;
485             }
486             if (stream->connect() < 0) {
487                 ALOGE("Failed to connect to host (QemuPipeStream)\n");
488                 return nullptr;
489             }
490             con->m_connectionType = HOST_CONNECTION_QEMU_PIPE;
491             con->m_grallocType = GRALLOC_TYPE_RANCHU;
492             con->m_stream = stream;
493             con->m_grallocHelper = &m_goldfishGralloc;
494             con->m_processPipe = &m_goldfishProcessPipe;
495             break;
496         }
497         case HOST_CONNECTION_TCP: {
498 #ifndef __ANDROID__
499             ALOGE("Failed to create TCP connection on non-Android guest\n");
500             return nullptr;
501             break;
502 #else
503             auto stream = new TcpStream(STREAM_BUFFER_SIZE);
504             if (!stream) {
505                 ALOGE("Failed to create TcpStream for host connection\n");
506                 return nullptr;
507             }
508 
509             if (stream->connect("10.0.2.2", STREAM_PORT_NUM) < 0) {
510                 ALOGE("Failed to connect to host (TcpStream)\n");
511                 return nullptr;
512             }
513             con->m_connectionType = HOST_CONNECTION_TCP;
514             con->m_grallocType = GRALLOC_TYPE_RANCHU;
515             con->m_stream = stream;
516             con->m_grallocHelper = &m_goldfishGralloc;
517             con->m_processPipe = &m_goldfishProcessPipe;
518             break;
519 #endif
520         }
521 #if defined(VIRTIO_GPU) && !defined(HOST_BUILD)
522         case HOST_CONNECTION_VIRTIO_GPU: {
523             auto stream = new VirtioGpuStream(STREAM_BUFFER_SIZE);
524             if (!stream) {
525                 ALOGE("Failed to create VirtioGpu for host connection\n");
526                 return nullptr;
527             }
528             if (stream->connect() < 0) {
529                 ALOGE("Failed to connect to host (VirtioGpu)\n");
530                 return nullptr;
531             }
532             con->m_connectionType = HOST_CONNECTION_VIRTIO_GPU;
533             con->m_grallocType = GRALLOC_TYPE_MINIGBM;
534             auto rendernodeFd = stream->getRendernodeFd();
535             con->m_processPipe = stream->getProcessPipe();
536             con->m_stream = stream;
537             con->m_rendernodeFd = rendernodeFd;
538             MinigbmGralloc* m = new MinigbmGralloc;
539             m->setFd(rendernodeFd);
540             con->m_grallocHelper = m;
541             break;
542         }
543         case HOST_CONNECTION_VIRTIO_GPU_PIPE: {
544             auto stream = new VirtioGpuPipeStream(STREAM_BUFFER_SIZE);
545             if (!stream) {
546                 ALOGE("Failed to create VirtioGpu for host connection\n");
547                 return nullptr;
548             }
549             if (stream->connect() < 0) {
550                 ALOGE("Failed to connect to host (VirtioGpu)\n");
551                 return nullptr;
552             }
553             con->m_connectionType = HOST_CONNECTION_VIRTIO_GPU_PIPE;
554             con->m_grallocType = getGrallocTypeFromProperty();
555             auto rendernodeFd = stream->getRendernodeFd();
556             con->m_stream = stream;
557             con->m_rendernodeFd = rendernodeFd;
558             switch (con->m_grallocType) {
559                 case GRALLOC_TYPE_RANCHU:
560                     con->m_grallocHelper = &m_goldfishGralloc;
561                     break;
562                 case GRALLOC_TYPE_MINIGBM: {
563                     MinigbmGralloc* m = new MinigbmGralloc;
564                     m->setFd(rendernodeFd);
565                     con->m_grallocHelper = m;
566                     break;
567                 }
568                 default:
569                     ALOGE("Fatal: Unknown gralloc type 0x%x\n", con->m_grallocType);
570                     abort();
571             }
572             con->m_processPipe = &m_goldfishProcessPipe;
573             break;
574         }
575         case HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE: {
576             struct StreamCreate streamCreate = {0};
577             streamCreate.streamHandle = virtgpuOpen(capset_id);
578             if (streamCreate.streamHandle < 0) {
579                 ALOGE("Failed to open virtgpu for ASG host connection\n");
580                 return nullptr;
581             }
582 
583             auto stream = createVirtioGpuAddressSpaceStream(streamCreate);
584             if (!stream) {
585                 ALOGE("Failed to create virtgpu AddressSpaceStream\n");
586                 return nullptr;
587             }
588             con->m_connectionType = HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE;
589             con->m_grallocType = getGrallocTypeFromProperty();
590             auto rendernodeFd = stream->getRendernodeFd();
591             con->m_stream = stream;
592             con->m_rendernodeFd = rendernodeFd;
593             switch (con->m_grallocType) {
594                 case GRALLOC_TYPE_RANCHU:
595                     con->m_grallocHelper = &m_goldfishGralloc;
596                     break;
597                 case GRALLOC_TYPE_MINIGBM: {
598                     MinigbmGralloc* m = new MinigbmGralloc;
599                     m->setFd(rendernodeFd);
600                     con->m_grallocHelper = m;
601                     break;
602                 }
603                 default:
604                     ALOGE("Fatal: Unknown gralloc type 0x%x\n", con->m_grallocType);
605                     abort();
606             }
607             con->m_processPipe = &m_goldfishProcessPipe;
608             break;
609         }
610 #endif // !VIRTIO_GPU && !HOST_BUILD_
611         default:
612             break;
613     }
614 
615     // send zero 'clientFlags' to the host.
616     unsigned int *pClientFlags =
617             (unsigned int *)con->m_stream->allocBuffer(sizeof(unsigned int));
618     *pClientFlags = 0;
619     con->m_stream->commitBuffer(sizeof(unsigned int));
620     return con;
621 }
622 
get()623 HostConnection *HostConnection::get() {
624     return getWithThreadInfo(getEGLThreadInfo(), VIRTIO_GPU_CAPSET_NONE);
625 }
626 
getOrCreate(uint32_t capset_id)627 HostConnection *HostConnection::getOrCreate(uint32_t capset_id) {
628     return getWithThreadInfo(getEGLThreadInfo(), capset_id);
629 }
630 
getWithThreadInfo(EGLThreadInfo * tinfo,uint32_t capset_id)631 HostConnection *HostConnection::getWithThreadInfo(EGLThreadInfo* tinfo, uint32_t capset_id) {
632     // Get thread info
633     if (!tinfo) {
634         return NULL;
635     }
636 
637     if (tinfo->hostConn == NULL) {
638         tinfo->hostConn = HostConnection::createUnique(capset_id);
639     }
640 
641     return tinfo->hostConn.get();
642 }
643 
exit()644 void HostConnection::exit() {
645     EGLThreadInfo *tinfo = getEGLThreadInfo();
646     if (!tinfo) {
647         return;
648     }
649 
650     tinfo->hostConn.reset();
651 }
652 
exitUnclean()653 void HostConnection::exitUnclean() {
654     EGLThreadInfo *tinfo = getEGLThreadInfo();
655     if (!tinfo) {
656         return;
657     }
658 
659     tinfo->hostConn->exitUncleanly = true;
660     tinfo->hostConn.reset();
661 }
662 
663 // static
createUnique(uint32_t capset_id)664 std::unique_ptr<HostConnection> HostConnection::createUnique(uint32_t capset_id) {
665     return connect(capset_id);
666 }
667 
glEncoder()668 GLEncoder *HostConnection::glEncoder()
669 {
670     if (!m_glEnc) {
671         m_glEnc = std::make_unique<GLEncoder>(m_stream, checksumHelper());
672         DBG("HostConnection::glEncoder new encoder %p, tid %d",
673             m_glEnc, getCurrentThreadId());
674         m_glEnc->setContextAccessor(s_getGLContext);
675     }
676     return m_glEnc.get();
677 }
678 
gl2Encoder()679 GL2Encoder *HostConnection::gl2Encoder()
680 {
681     if (!m_gl2Enc) {
682         m_gl2Enc =
683             std::make_unique<GL2Encoder>(m_stream, checksumHelper());
684         DBG("HostConnection::gl2Encoder new encoder %p, tid %d",
685             m_gl2Enc, getCurrentThreadId());
686         m_gl2Enc->setContextAccessor(s_getGL2Context);
687         m_gl2Enc->setNoHostError(m_noHostError);
688         m_gl2Enc->setDrawCallFlushInterval(
689             getDrawCallFlushIntervalFromProperty());
690         m_gl2Enc->setHasAsyncUnmapBuffer(m_rcEnc->hasAsyncUnmapBuffer());
691         m_gl2Enc->setHasSyncBufferData(m_rcEnc->hasSyncBufferData());
692     }
693     return m_gl2Enc.get();
694 }
695 
vkEncoder()696 VkEncoder *HostConnection::vkEncoder()
697 {
698     rcEncoder();
699     if (!m_vkEnc) {
700         m_vkEnc = new VkEncoder(m_stream);
701     }
702     return m_vkEnc;
703 }
704 
rcEncoder()705 ExtendedRCEncoderContext *HostConnection::rcEncoder()
706 {
707     if (!m_rcEnc) {
708         m_rcEnc = std::make_unique<ExtendedRCEncoderContext>(m_stream,
709                                                              checksumHelper());
710 
711         ExtendedRCEncoderContext* rcEnc = m_rcEnc.get();
712         setChecksumHelper(rcEnc);
713         queryAndSetSyncImpl(rcEnc);
714         queryAndSetDmaImpl(rcEnc);
715         queryAndSetGLESMaxVersion(rcEnc);
716         queryAndSetNoErrorState(rcEnc);
717         queryAndSetHostCompositionImpl(rcEnc);
718         queryAndSetDirectMemSupport(rcEnc);
719         queryAndSetVulkanSupport(rcEnc);
720         queryAndSetDeferredVulkanCommandsSupport(rcEnc);
721         queryAndSetVulkanNullOptionalStringsSupport(rcEnc);
722         queryAndSetVulkanCreateResourcesWithRequirementsSupport(rcEnc);
723         queryAndSetVulkanIgnoredHandles(rcEnc);
724         queryAndSetYUVCache(rcEnc);
725         queryAndSetAsyncUnmapBuffer(rcEnc);
726         queryAndSetVirtioGpuNext(rcEnc);
727         queryHasSharedSlotsHostMemoryAllocator(rcEnc);
728         queryAndSetVulkanFreeMemorySync(rcEnc);
729         queryAndSetVirtioGpuNativeSync(rcEnc);
730         queryAndSetVulkanShaderFloat16Int8Support(rcEnc);
731         queryAndSetVulkanAsyncQueueSubmitSupport(rcEnc);
732         queryAndSetHostSideTracingSupport(rcEnc);
733         queryAndSetAsyncFrameCommands(rcEnc);
734         queryAndSetVulkanQueueSubmitWithCommandsSupport(rcEnc);
735         queryAndSetVulkanBatchedDescriptorSetUpdateSupport(rcEnc);
736         queryAndSetSyncBufferData(rcEnc);
737         queryAndSetVulkanAsyncQsri(rcEnc);
738         queryAndSetReadColorBufferDma(rcEnc);
739         queryAndSetHWCMultiConfigs(rcEnc);
740         queryVersion(rcEnc);
741         if (m_processPipe) {
742             auto fd = (m_connectionType == HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE) ? m_rendernodeFd : -1;
743             m_processPipe->processPipeInit(fd, m_connectionType, rcEnc);
744         }
745     }
746     return m_rcEnc.get();
747 }
748 
s_getGLContext()749 gl_client_context_t *HostConnection::s_getGLContext()
750 {
751     EGLThreadInfo *ti = getEGLThreadInfo();
752     if (ti->hostConn) {
753         return ti->hostConn->m_glEnc.get();
754     }
755     return NULL;
756 }
757 
s_getGL2Context()758 gl2_client_context_t *HostConnection::s_getGL2Context()
759 {
760     EGLThreadInfo *ti = getEGLThreadInfo();
761     if (ti->hostConn) {
762         return ti->hostConn->m_gl2Enc.get();
763     }
764     return NULL;
765 }
766 
queryGLExtensions(ExtendedRCEncoderContext * rcEnc)767 const std::string& HostConnection::queryGLExtensions(ExtendedRCEncoderContext *rcEnc) {
768     if (!m_glExtensions.empty()) {
769         return m_glExtensions;
770     }
771 
772     // Extensions strings are usually quite long, preallocate enough here.
773     std::string extensions_buffer(1023, '\0');
774 
775     // rcGetGLString() returns required size including the 0-terminator, so
776     // account it when passing/using the sizes.
777     int extensionSize = rcEnc->rcGetGLString(rcEnc, GL_EXTENSIONS,
778                                              &extensions_buffer[0],
779                                              extensions_buffer.size() + 1);
780     if (extensionSize < 0) {
781         extensions_buffer.resize(-extensionSize);
782         extensionSize = rcEnc->rcGetGLString(rcEnc, GL_EXTENSIONS,
783                                              &extensions_buffer[0],
784                                             -extensionSize + 1);
785     }
786 
787     if (extensionSize > 0) {
788         extensions_buffer.resize(extensionSize - 1);
789         m_glExtensions.swap(extensions_buffer);
790     }
791 
792     return m_glExtensions;
793 }
794 
queryAndSetHostCompositionImpl(ExtendedRCEncoderContext * rcEnc)795 void HostConnection::queryAndSetHostCompositionImpl(ExtendedRCEncoderContext *rcEnc) {
796     const std::string& glExtensions = queryGLExtensions(rcEnc);
797     ALOGD("HostComposition ext %s", glExtensions.c_str());
798     // make sure V2 is checked first before V1, as host may declare supporting both
799     if (glExtensions.find(kHostCompositionV2) != std::string::npos) {
800         rcEnc->setHostComposition(HOST_COMPOSITION_V2);
801     }
802     else if (glExtensions.find(kHostCompositionV1) != std::string::npos) {
803         rcEnc->setHostComposition(HOST_COMPOSITION_V1);
804     }
805     else {
806         rcEnc->setHostComposition(HOST_COMPOSITION_NONE);
807     }
808 }
809 
setChecksumHelper(ExtendedRCEncoderContext * rcEnc)810 void HostConnection::setChecksumHelper(ExtendedRCEncoderContext *rcEnc) {
811     const std::string& glExtensions = queryGLExtensions(rcEnc);
812     // check the host supported version
813     uint32_t checksumVersion = 0;
814     const char* checksumPrefix = ChecksumCalculator::getMaxVersionStrPrefix();
815     const char* glProtocolStr = strstr(glExtensions.c_str(), checksumPrefix);
816     if (glProtocolStr) {
817         uint32_t maxVersion = ChecksumCalculator::getMaxVersion();
818         sscanf(glProtocolStr+strlen(checksumPrefix), "%d", &checksumVersion);
819         if (maxVersion < checksumVersion) {
820             checksumVersion = maxVersion;
821         }
822         // The ordering of the following two commands matters!
823         // Must tell the host first before setting it in the guest
824         rcEnc->rcSelectChecksumHelper(rcEnc, checksumVersion, 0);
825         m_checksumHelper.setVersion(checksumVersion);
826     }
827 }
828 
queryAndSetSyncImpl(ExtendedRCEncoderContext * rcEnc)829 void HostConnection::queryAndSetSyncImpl(ExtendedRCEncoderContext *rcEnc) {
830     const std::string& glExtensions = queryGLExtensions(rcEnc);
831     if (glExtensions.find(kRCNativeSyncV4) != std::string::npos) {
832         rcEnc->setSyncImpl(SYNC_IMPL_NATIVE_SYNC_V4);
833     } else if (glExtensions.find(kRCNativeSyncV3) != std::string::npos) {
834         rcEnc->setSyncImpl(SYNC_IMPL_NATIVE_SYNC_V3);
835     } else if (glExtensions.find(kRCNativeSyncV2) != std::string::npos) {
836         rcEnc->setSyncImpl(SYNC_IMPL_NATIVE_SYNC_V2);
837     } else {
838         rcEnc->setSyncImpl(SYNC_IMPL_NONE);
839     }
840 }
841 
queryAndSetDmaImpl(ExtendedRCEncoderContext * rcEnc)842 void HostConnection::queryAndSetDmaImpl(ExtendedRCEncoderContext *rcEnc) {
843     std::string glExtensions = queryGLExtensions(rcEnc);
844     if (glExtensions.find(kDmaExtStr_v1) != std::string::npos) {
845         rcEnc->setDmaImpl(DMA_IMPL_v1);
846     } else {
847         rcEnc->setDmaImpl(DMA_IMPL_NONE);
848     }
849 }
850 
queryAndSetGLESMaxVersion(ExtendedRCEncoderContext * rcEnc)851 void HostConnection::queryAndSetGLESMaxVersion(ExtendedRCEncoderContext* rcEnc) {
852     std::string glExtensions = queryGLExtensions(rcEnc);
853     if (glExtensions.find(kGLESMaxVersion_2) != std::string::npos) {
854         rcEnc->setGLESMaxVersion(GLES_MAX_VERSION_2);
855     } else if (glExtensions.find(kGLESMaxVersion_3_0) != std::string::npos) {
856         rcEnc->setGLESMaxVersion(GLES_MAX_VERSION_3_0);
857     } else if (glExtensions.find(kGLESMaxVersion_3_1) != std::string::npos) {
858         rcEnc->setGLESMaxVersion(GLES_MAX_VERSION_3_1);
859     } else if (glExtensions.find(kGLESMaxVersion_3_2) != std::string::npos) {
860         rcEnc->setGLESMaxVersion(GLES_MAX_VERSION_3_2);
861     } else {
862         ALOGW("Unrecognized GLES max version string in extensions: %s",
863               glExtensions.c_str());
864         rcEnc->setGLESMaxVersion(GLES_MAX_VERSION_2);
865     }
866 }
867 
queryAndSetNoErrorState(ExtendedRCEncoderContext * rcEnc)868 void HostConnection::queryAndSetNoErrorState(ExtendedRCEncoderContext* rcEnc) {
869     std::string glExtensions = queryGLExtensions(rcEnc);
870     if (glExtensions.find(kGLESUseHostError) != std::string::npos) {
871         m_noHostError = false;
872     }
873 }
874 
queryAndSetDirectMemSupport(ExtendedRCEncoderContext * rcEnc)875 void HostConnection::queryAndSetDirectMemSupport(ExtendedRCEncoderContext* rcEnc) {
876     std::string glExtensions = queryGLExtensions(rcEnc);
877     if (glExtensions.find(kGLDirectMem) != std::string::npos) {
878         rcEnc->featureInfo()->hasDirectMem = true;
879     }
880 }
881 
queryAndSetVulkanSupport(ExtendedRCEncoderContext * rcEnc)882 void HostConnection::queryAndSetVulkanSupport(ExtendedRCEncoderContext* rcEnc) {
883     std::string glExtensions = queryGLExtensions(rcEnc);
884     if (glExtensions.find(kVulkan) != std::string::npos) {
885         rcEnc->featureInfo()->hasVulkan = true;
886     }
887 }
888 
queryAndSetDeferredVulkanCommandsSupport(ExtendedRCEncoderContext * rcEnc)889 void HostConnection::queryAndSetDeferredVulkanCommandsSupport(ExtendedRCEncoderContext* rcEnc) {
890     std::string glExtensions = queryGLExtensions(rcEnc);
891     if (glExtensions.find(kDeferredVulkanCommands) != std::string::npos) {
892         rcEnc->featureInfo()->hasDeferredVulkanCommands = true;
893     }
894 }
895 
queryAndSetVulkanNullOptionalStringsSupport(ExtendedRCEncoderContext * rcEnc)896 void HostConnection::queryAndSetVulkanNullOptionalStringsSupport(ExtendedRCEncoderContext* rcEnc) {
897     std::string glExtensions = queryGLExtensions(rcEnc);
898     if (glExtensions.find(kVulkanNullOptionalStrings) != std::string::npos) {
899         rcEnc->featureInfo()->hasVulkanNullOptionalStrings = true;
900     }
901 }
902 
queryAndSetVulkanCreateResourcesWithRequirementsSupport(ExtendedRCEncoderContext * rcEnc)903 void HostConnection::queryAndSetVulkanCreateResourcesWithRequirementsSupport(ExtendedRCEncoderContext* rcEnc) {
904     std::string glExtensions = queryGLExtensions(rcEnc);
905     if (glExtensions.find(kVulkanCreateResourcesWithRequirements) != std::string::npos) {
906         rcEnc->featureInfo()->hasVulkanCreateResourcesWithRequirements = true;
907     }
908 }
909 
queryAndSetVulkanIgnoredHandles(ExtendedRCEncoderContext * rcEnc)910 void HostConnection::queryAndSetVulkanIgnoredHandles(ExtendedRCEncoderContext* rcEnc) {
911     std::string glExtensions = queryGLExtensions(rcEnc);
912     if (glExtensions.find(kVulkanIgnoredHandles) != std::string::npos) {
913         rcEnc->featureInfo()->hasVulkanIgnoredHandles = true;
914     }
915 }
916 
queryAndSetYUVCache(ExtendedRCEncoderContext * rcEnc)917 void HostConnection::queryAndSetYUVCache(ExtendedRCEncoderContext* rcEnc) {
918     std::string glExtensions = queryGLExtensions(rcEnc);
919     if (glExtensions.find(kYUVCache) != std::string::npos) {
920         rcEnc->featureInfo()->hasYUVCache = true;
921     }
922 }
923 
queryAndSetAsyncUnmapBuffer(ExtendedRCEncoderContext * rcEnc)924 void HostConnection::queryAndSetAsyncUnmapBuffer(ExtendedRCEncoderContext* rcEnc) {
925     std::string glExtensions = queryGLExtensions(rcEnc);
926     if (glExtensions.find(kAsyncUnmapBuffer) != std::string::npos) {
927         rcEnc->featureInfo()->hasAsyncUnmapBuffer = true;
928     }
929 }
930 
queryAndSetVirtioGpuNext(ExtendedRCEncoderContext * rcEnc)931 void HostConnection::queryAndSetVirtioGpuNext(ExtendedRCEncoderContext* rcEnc) {
932     std::string glExtensions = queryGLExtensions(rcEnc);
933     if (glExtensions.find(kVirtioGpuNext) != std::string::npos) {
934         rcEnc->featureInfo()->hasVirtioGpuNext = true;
935     }
936 }
937 
queryHasSharedSlotsHostMemoryAllocator(ExtendedRCEncoderContext * rcEnc)938 void HostConnection::queryHasSharedSlotsHostMemoryAllocator(ExtendedRCEncoderContext *rcEnc) {
939     const std::string& glExtensions = queryGLExtensions(rcEnc);
940     if (glExtensions.find(kHasSharedSlotsHostMemoryAllocator) != std::string::npos) {
941         rcEnc->featureInfo()->hasSharedSlotsHostMemoryAllocator = true;
942     }
943 }
944 
queryAndSetVulkanFreeMemorySync(ExtendedRCEncoderContext * rcEnc)945 void HostConnection::queryAndSetVulkanFreeMemorySync(ExtendedRCEncoderContext *rcEnc) {
946     const std::string& glExtensions = queryGLExtensions(rcEnc);
947     if (glExtensions.find(kVulkanFreeMemorySync) != std::string::npos) {
948         rcEnc->featureInfo()->hasVulkanFreeMemorySync = true;
949     }
950 }
951 
queryAndSetVirtioGpuNativeSync(ExtendedRCEncoderContext * rcEnc)952 void HostConnection::queryAndSetVirtioGpuNativeSync(ExtendedRCEncoderContext* rcEnc) {
953     std::string glExtensions = queryGLExtensions(rcEnc);
954     if (glExtensions.find(kVirtioGpuNativeSync) != std::string::npos) {
955         rcEnc->featureInfo()->hasVirtioGpuNativeSync = true;
956     }
957 }
958 
queryAndSetVulkanShaderFloat16Int8Support(ExtendedRCEncoderContext * rcEnc)959 void HostConnection::queryAndSetVulkanShaderFloat16Int8Support(ExtendedRCEncoderContext* rcEnc) {
960     std::string glExtensions = queryGLExtensions(rcEnc);
961     if (glExtensions.find(kVulkanShaderFloat16Int8) != std::string::npos) {
962         rcEnc->featureInfo()->hasVulkanShaderFloat16Int8 = true;
963     }
964 }
965 
queryAndSetVulkanAsyncQueueSubmitSupport(ExtendedRCEncoderContext * rcEnc)966 void HostConnection::queryAndSetVulkanAsyncQueueSubmitSupport(ExtendedRCEncoderContext* rcEnc) {
967     std::string glExtensions = queryGLExtensions(rcEnc);
968     if (glExtensions.find(kVulkanAsyncQueueSubmit) != std::string::npos) {
969         rcEnc->featureInfo()->hasVulkanAsyncQueueSubmit = true;
970     }
971 }
972 
queryAndSetHostSideTracingSupport(ExtendedRCEncoderContext * rcEnc)973 void HostConnection::queryAndSetHostSideTracingSupport(ExtendedRCEncoderContext* rcEnc) {
974     std::string glExtensions = queryGLExtensions(rcEnc);
975     if (glExtensions.find(kHostSideTracing) != std::string::npos) {
976         rcEnc->featureInfo()->hasHostSideTracing = true;
977     }
978 }
979 
queryAndSetAsyncFrameCommands(ExtendedRCEncoderContext * rcEnc)980 void HostConnection::queryAndSetAsyncFrameCommands(ExtendedRCEncoderContext* rcEnc) {
981     std::string glExtensions = queryGLExtensions(rcEnc);
982     if (glExtensions.find(kAsyncFrameCommands) != std::string::npos) {
983         rcEnc->featureInfo()->hasAsyncFrameCommands = true;
984     }
985 }
986 
queryAndSetVulkanQueueSubmitWithCommandsSupport(ExtendedRCEncoderContext * rcEnc)987 void HostConnection::queryAndSetVulkanQueueSubmitWithCommandsSupport(ExtendedRCEncoderContext* rcEnc) {
988     std::string glExtensions = queryGLExtensions(rcEnc);
989     if (glExtensions.find(kVulkanQueueSubmitWithCommands) != std::string::npos) {
990         rcEnc->featureInfo()->hasVulkanQueueSubmitWithCommands = true;
991     }
992 }
993 
queryAndSetVulkanBatchedDescriptorSetUpdateSupport(ExtendedRCEncoderContext * rcEnc)994 void HostConnection::queryAndSetVulkanBatchedDescriptorSetUpdateSupport(ExtendedRCEncoderContext* rcEnc) {
995     std::string glExtensions = queryGLExtensions(rcEnc);
996     if (glExtensions.find(kVulkanBatchedDescriptorSetUpdate) != std::string::npos) {
997         rcEnc->featureInfo()->hasVulkanBatchedDescriptorSetUpdate = true;
998     }
999 }
1000 
queryAndSetSyncBufferData(ExtendedRCEncoderContext * rcEnc)1001 void HostConnection::queryAndSetSyncBufferData(ExtendedRCEncoderContext* rcEnc) {
1002     std::string glExtensions = queryGLExtensions(rcEnc);
1003     if (glExtensions.find(kSyncBufferData) != std::string::npos) {
1004         rcEnc->featureInfo()->hasSyncBufferData = true;
1005     }
1006 }
1007 
queryAndSetVulkanAsyncQsri(ExtendedRCEncoderContext * rcEnc)1008 void HostConnection::queryAndSetVulkanAsyncQsri(ExtendedRCEncoderContext* rcEnc) {
1009     std::string glExtensions = queryGLExtensions(rcEnc);
1010     if (glExtensions.find(kVulkanAsyncQsri) != std::string::npos) {
1011         rcEnc->featureInfo()->hasVulkanAsyncQsri = true;
1012     }
1013 }
1014 
queryAndSetReadColorBufferDma(ExtendedRCEncoderContext * rcEnc)1015 void HostConnection::queryAndSetReadColorBufferDma(ExtendedRCEncoderContext* rcEnc) {
1016     std::string glExtensions = queryGLExtensions(rcEnc);
1017     if (glExtensions.find(kReadColorBufferDma) != std::string::npos) {
1018         rcEnc->featureInfo()->hasReadColorBufferDma = true;
1019     }
1020 }
1021 
queryAndSetHWCMultiConfigs(ExtendedRCEncoderContext * rcEnc)1022 void HostConnection::queryAndSetHWCMultiConfigs(ExtendedRCEncoderContext* rcEnc) {
1023     std::string glExtensions = queryGLExtensions(rcEnc);
1024     if (glExtensions.find(kHWCMultiConfigs) != std::string::npos) {
1025         rcEnc->featureInfo()->hasHWCMultiConfigs = true;
1026     }
1027 }
1028 
queryVersion(ExtendedRCEncoderContext * rcEnc)1029 GLint HostConnection::queryVersion(ExtendedRCEncoderContext* rcEnc) {
1030     GLint version = m_rcEnc->rcGetRendererVersion(m_rcEnc.get());
1031     return version;
1032 }
1033