1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 #ifndef __COMMON_HOST_CONNECTION_H 17 #define __COMMON_HOST_CONNECTION_H 18 19 #include "EmulatorFeatureInfo.h" 20 #include "IOStream.h" 21 #include "renderControl_enc.h" 22 #include "ChecksumCalculator.h" 23 #ifdef __Fuchsia__ 24 struct goldfish_dma_context; 25 #else 26 #include "goldfish_dma.h" 27 #endif 28 29 #include <cutils/native_handle.h> 30 31 #ifdef GFXSTREAM 32 #include <mutex> 33 #else 34 #include <utils/threads.h> 35 #endif 36 37 #include <memory> 38 #include <optional> 39 #include <cstring> 40 41 class GLEncoder; 42 struct gl_client_context_t; 43 class GL2Encoder; 44 struct gl2_client_context_t; 45 46 namespace gfxstream { 47 namespace vk { 48 class VkEncoder; 49 } // namespace vk 50 } // namespace gfxstream 51 52 // ExtendedRCEncoderContext is an extended version of renderControl_encoder_context_t 53 // that will be used to track available emulator features. 54 class ExtendedRCEncoderContext : public renderControl_encoder_context_t { 55 public: ExtendedRCEncoderContext(IOStream * stream,ChecksumCalculator * checksumCalculator)56 ExtendedRCEncoderContext(IOStream *stream, ChecksumCalculator *checksumCalculator) 57 : renderControl_encoder_context_t(stream, checksumCalculator), 58 m_dmaCxt(NULL), m_dmaPtr(NULL), m_dmaPhysAddr(0) { } setSyncImpl(SyncImpl syncImpl)59 void setSyncImpl(SyncImpl syncImpl) { m_featureInfo.syncImpl = syncImpl; } setDmaImpl(DmaImpl dmaImpl)60 void setDmaImpl(DmaImpl dmaImpl) { m_featureInfo.dmaImpl = dmaImpl; } setHostComposition(HostComposition hostComposition)61 void setHostComposition(HostComposition hostComposition) { 62 m_featureInfo.hostComposition = hostComposition; } hasNativeSync()63 bool hasNativeSync() const { return m_featureInfo.syncImpl >= SYNC_IMPL_NATIVE_SYNC_V2; } hasNativeSyncV3()64 bool hasNativeSyncV3() const { return m_featureInfo.syncImpl >= SYNC_IMPL_NATIVE_SYNC_V3; } hasNativeSyncV4()65 bool hasNativeSyncV4() const { return m_featureInfo.syncImpl >= SYNC_IMPL_NATIVE_SYNC_V4; } hasVirtioGpuNativeSync()66 bool hasVirtioGpuNativeSync() const { return m_featureInfo.hasVirtioGpuNativeSync; } hasHostCompositionV1()67 bool hasHostCompositionV1() const { 68 return m_featureInfo.hostComposition == HOST_COMPOSITION_V1; } hasHostCompositionV2()69 bool hasHostCompositionV2() const { 70 return m_featureInfo.hostComposition == HOST_COMPOSITION_V2; } hasYUVCache()71 bool hasYUVCache() const { 72 return m_featureInfo.hasYUVCache; } hasAsyncUnmapBuffer()73 bool hasAsyncUnmapBuffer() const { 74 return m_featureInfo.hasAsyncUnmapBuffer; } hasHostSideTracing()75 bool hasHostSideTracing() const { 76 return m_featureInfo.hasHostSideTracing; 77 } hasAsyncFrameCommands()78 bool hasAsyncFrameCommands() const { 79 return m_featureInfo.hasAsyncFrameCommands; 80 } hasSyncBufferData()81 bool hasSyncBufferData() const { 82 return m_featureInfo.hasSyncBufferData; } hasHWCMultiConfigs()83 bool hasHWCMultiConfigs() const { 84 return m_featureInfo.hasHWCMultiConfigs; 85 } getDmaVersion()86 DmaImpl getDmaVersion() const { return m_featureInfo.dmaImpl; } bindDmaContext(struct goldfish_dma_context * cxt)87 void bindDmaContext(struct goldfish_dma_context* cxt) { m_dmaCxt = cxt; } bindDmaDirectly(void * dmaPtr,uint64_t dmaPhysAddr)88 void bindDmaDirectly(void* dmaPtr, uint64_t dmaPhysAddr) { 89 m_dmaPtr = dmaPtr; 90 m_dmaPhysAddr = dmaPhysAddr; 91 } lockAndWriteDma(void * data,uint32_t size)92 virtual uint64_t lockAndWriteDma(void* data, uint32_t size) { 93 if (m_dmaPtr && m_dmaPhysAddr) { 94 if (data != m_dmaPtr) { 95 memcpy(m_dmaPtr, data, size); 96 } 97 return m_dmaPhysAddr; 98 } else if (m_dmaCxt) { 99 return writeGoldfishDma(data, size, m_dmaCxt); 100 } else { 101 ALOGE("%s: ERROR: No DMA context bound!", __func__); 102 return 0; 103 } 104 } setGLESMaxVersion(GLESMaxVersion ver)105 void setGLESMaxVersion(GLESMaxVersion ver) { m_featureInfo.glesMaxVersion = ver; } getGLESMaxVersion()106 GLESMaxVersion getGLESMaxVersion() const { return m_featureInfo.glesMaxVersion; } hasDirectMem()107 bool hasDirectMem() const { 108 #ifdef HOST_BUILD 109 // unit tests do not support restoring "guest" ram because there is no VM 110 return false; 111 #else 112 return m_featureInfo.hasDirectMem; 113 #endif 114 } 115 featureInfo_const()116 const EmulatorFeatureInfo* featureInfo_const() const { return &m_featureInfo; } featureInfo()117 EmulatorFeatureInfo* featureInfo() { return &m_featureInfo; } 118 private: writeGoldfishDma(void * data,uint32_t size,struct goldfish_dma_context * dmaCxt)119 static uint64_t writeGoldfishDma(void* data, uint32_t size, 120 struct goldfish_dma_context* dmaCxt) { 121 #ifdef __Fuchsia__ 122 ALOGE("%s Not implemented!", __FUNCTION__); 123 return 0u; 124 #else 125 ALOGV("%s(data=%p, size=%u): call", __func__, data, size); 126 127 goldfish_dma_write(dmaCxt, data, size); 128 uint64_t paddr = goldfish_dma_guest_paddr(dmaCxt); 129 130 ALOGV("%s: paddr=0x%llx", __func__, (unsigned long long)paddr); 131 return paddr; 132 #endif 133 } 134 135 EmulatorFeatureInfo m_featureInfo; 136 struct goldfish_dma_context* m_dmaCxt; 137 void* m_dmaPtr; 138 uint64_t m_dmaPhysAddr; 139 }; 140 141 // Abstraction for gralloc handle conversion 142 class Gralloc { 143 public: 144 virtual uint32_t createColorBuffer( 145 ExtendedRCEncoderContext* rcEnc, int width, int height, uint32_t glformat) = 0; 146 virtual uint32_t getHostHandle(native_handle_t const* handle) = 0; 147 virtual int getFormat(native_handle_t const* handle) = 0; getFormatDrmFourcc(native_handle_t const *)148 virtual uint32_t getFormatDrmFourcc(native_handle_t const* /*handle*/) { 149 // Equal to DRM_FORMAT_INVALID -- see <drm_fourcc.h> 150 return 0; 151 } 152 virtual size_t getAllocatedSize(native_handle_t const* handle) = 0; treatBlobAsImage()153 virtual bool treatBlobAsImage() { return false; }; ~Gralloc()154 virtual ~Gralloc() {} 155 }; 156 157 // Abstraction for process pipe helper 158 class ProcessPipe { 159 public: 160 virtual bool processPipeInit(int stream_handle, HostConnectionType connType, renderControl_encoder_context_t *rcEnc) = 0; ~ProcessPipe()161 virtual ~ProcessPipe() {} 162 }; 163 164 struct EGLThreadInfo; 165 166 // Rutabaga capsets. 167 #define VIRTIO_GPU_CAPSET_NONE 0 168 #define VIRTIO_GPU_CAPSET_VIRGL 1 169 #define VIRTIO_GPU_CAPSET_VIRGL2 2 170 #define VIRTIO_GPU_CAPSET_GFXSTREAM 3 171 #define VIRTIO_GPU_CAPSET_VENUS 4 172 #define VIRTIO_GPU_CAPSET_CROSS_DOMAIN 5 173 174 class HostConnection 175 { 176 public: 177 static HostConnection *get(); 178 static HostConnection *getOrCreate(uint32_t capset_id); 179 180 static HostConnection *getWithThreadInfo(EGLThreadInfo* tInfo, 181 uint32_t capset_id = VIRTIO_GPU_CAPSET_NONE); 182 static void exit(); 183 static void exitUnclean(); // for testing purposes 184 185 static std::unique_ptr<HostConnection> createUnique(uint32_t capset_id = VIRTIO_GPU_CAPSET_NONE); 186 HostConnection(const HostConnection&) = delete; 187 188 ~HostConnection(); 189 190 GLEncoder *glEncoder(); 191 GL2Encoder *gl2Encoder(); 192 gfxstream::vk::VkEncoder *vkEncoder(); 193 ExtendedRCEncoderContext *rcEncoder(); 194 getRendernodeFd()195 int getRendernodeFd() { return m_rendernodeFd; } 196 checksumHelper()197 ChecksumCalculator *checksumHelper() { return &m_checksumHelper; } grallocHelper()198 Gralloc *grallocHelper() { return m_grallocHelper; } 199 flush()200 void flush() { 201 if (m_stream) { 202 m_stream->flush(); 203 } 204 } 205 setGrallocOnly(bool gralloc_only)206 void setGrallocOnly(bool gralloc_only) { 207 m_grallocOnly = gralloc_only; 208 } 209 isGrallocOnly()210 bool isGrallocOnly() const { return m_grallocOnly; } 211 212 #ifdef __clang__ 213 #pragma clang diagnostic push 214 #pragma clang diagnostic ignored "-Wthread-safety-analysis" 215 #endif lock()216 void lock() const { m_lock.lock(); } unlock()217 void unlock() const { m_lock.unlock(); } 218 #ifdef __clang__ 219 #pragma clang diagnostic pop 220 #endif 221 222 bool exitUncleanly; // for testing purposes 223 224 private: 225 // If the connection failed, |conn| is deleted. 226 // Returns NULL if connection failed. 227 static std::unique_ptr<HostConnection> connect(uint32_t capset_id); 228 229 HostConnection(); 230 static gl_client_context_t *s_getGLContext(); 231 static gl2_client_context_t *s_getGL2Context(); 232 233 const std::string& queryHostExtensions(ExtendedRCEncoderContext *rcEnc); 234 // setProtocol initilizes GL communication protocol for checksums 235 // should be called when m_rcEnc is created 236 void setChecksumHelper(ExtendedRCEncoderContext *rcEnc); 237 void queryAndSetSyncImpl(ExtendedRCEncoderContext *rcEnc); 238 void queryAndSetDmaImpl(ExtendedRCEncoderContext *rcEnc); 239 void queryAndSetGLESMaxVersion(ExtendedRCEncoderContext *rcEnc); 240 void queryAndSetNoErrorState(ExtendedRCEncoderContext *rcEnc); 241 void queryAndSetHostCompositionImpl(ExtendedRCEncoderContext *rcEnc); 242 void queryAndSetDirectMemSupport(ExtendedRCEncoderContext *rcEnc); 243 void queryAndSetVulkanSupport(ExtendedRCEncoderContext *rcEnc); 244 void queryAndSetDeferredVulkanCommandsSupport(ExtendedRCEncoderContext *rcEnc); 245 void queryAndSetVulkanNullOptionalStringsSupport(ExtendedRCEncoderContext *rcEnc); 246 void queryAndSetVulkanCreateResourcesWithRequirementsSupport(ExtendedRCEncoderContext *rcEnc); 247 void queryAndSetVulkanIgnoredHandles(ExtendedRCEncoderContext *rcEnc); 248 void queryAndSetYUVCache(ExtendedRCEncoderContext *mrcEnc); 249 void queryAndSetAsyncUnmapBuffer(ExtendedRCEncoderContext *rcEnc); 250 void queryAndSetVirtioGpuNext(ExtendedRCEncoderContext *rcEnc); 251 void queryHasSharedSlotsHostMemoryAllocator(ExtendedRCEncoderContext *rcEnc); 252 void queryAndSetVulkanFreeMemorySync(ExtendedRCEncoderContext *rcEnc); 253 void queryAndSetVirtioGpuNativeSync(ExtendedRCEncoderContext *rcEnc); 254 void queryAndSetVulkanShaderFloat16Int8Support(ExtendedRCEncoderContext *rcEnc); 255 void queryAndSetVulkanAsyncQueueSubmitSupport(ExtendedRCEncoderContext *rcEnc); 256 void queryAndSetHostSideTracingSupport(ExtendedRCEncoderContext *rcEnc); 257 void queryAndSetAsyncFrameCommands(ExtendedRCEncoderContext *rcEnc); 258 void queryAndSetVulkanQueueSubmitWithCommandsSupport(ExtendedRCEncoderContext *rcEnc); 259 void queryAndSetVulkanBatchedDescriptorSetUpdateSupport(ExtendedRCEncoderContext *rcEnc); 260 void queryAndSetSyncBufferData(ExtendedRCEncoderContext *rcEnc); 261 void queryAndSetVulkanAsyncQsri(ExtendedRCEncoderContext *rcEnc); 262 void queryAndSetReadColorBufferDma(ExtendedRCEncoderContext *rcEnc); 263 void queryAndSetHWCMultiConfigs(ExtendedRCEncoderContext* rcEnc); 264 void queryAndSetVulkanAuxCommandBufferMemory(ExtendedRCEncoderContext* rcEnc); 265 GLint queryVersion(ExtendedRCEncoderContext* rcEnc); 266 267 private: 268 HostConnectionType m_connectionType; 269 GrallocType m_grallocType; 270 271 // intrusively refcounted 272 IOStream* m_stream = nullptr; 273 274 std::unique_ptr<GLEncoder> m_glEnc; 275 std::unique_ptr<GL2Encoder> m_gl2Enc; 276 277 // intrusively refcounted 278 gfxstream::vk::VkEncoder* m_vkEnc = nullptr; 279 std::unique_ptr<ExtendedRCEncoderContext> m_rcEnc; 280 281 ChecksumCalculator m_checksumHelper; 282 Gralloc* m_grallocHelper = nullptr; 283 ProcessPipe* m_processPipe = nullptr; 284 std::string m_hostExtensions; 285 bool m_grallocOnly; 286 bool m_noHostError; 287 #ifdef GFXSTREAM 288 mutable std::mutex m_lock; 289 #else 290 mutable android::Mutex m_lock; 291 #endif 292 int m_rendernodeFd; 293 }; 294 295 #endif 296