• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #ifndef __COMMON_HOST_CONNECTION_H
17 #define __COMMON_HOST_CONNECTION_H
18 
19 #include "EmulatorFeatureInfo.h"
20 #include "IOStream.h"
21 #include "renderControl_enc.h"
22 #include "ChecksumCalculator.h"
23 #ifdef __Fuchsia__
24 struct goldfish_dma_context;
25 #else
26 #include "goldfish_dma.h"
27 #endif
28 
29 #include <cutils/native_handle.h>
30 
31 #ifdef GFXSTREAM
32 #include <mutex>
33 #else
34 #include <utils/threads.h>
35 #endif
36 
37 #include <memory>
38 #include <cstring>
39 
40 class GLEncoder;
41 struct gl_client_context_t;
42 class GL2Encoder;
43 struct gl2_client_context_t;
44 
45 namespace goldfish_vk {
46 class VkEncoder;
47 }
48 
49 // ExtendedRCEncoderContext is an extended version of renderControl_encoder_context_t
50 // that will be used to track available emulator features.
51 class ExtendedRCEncoderContext : public renderControl_encoder_context_t {
52 public:
ExtendedRCEncoderContext(IOStream * stream,ChecksumCalculator * checksumCalculator)53     ExtendedRCEncoderContext(IOStream *stream, ChecksumCalculator *checksumCalculator)
54         : renderControl_encoder_context_t(stream, checksumCalculator),
55           m_dmaCxt(NULL), m_dmaPtr(NULL), m_dmaPhysAddr(0) { }
setSyncImpl(SyncImpl syncImpl)56     void setSyncImpl(SyncImpl syncImpl) { m_featureInfo.syncImpl = syncImpl; }
setDmaImpl(DmaImpl dmaImpl)57     void setDmaImpl(DmaImpl dmaImpl) { m_featureInfo.dmaImpl = dmaImpl; }
setHostComposition(HostComposition hostComposition)58     void setHostComposition(HostComposition hostComposition) {
59         m_featureInfo.hostComposition = hostComposition; }
hasNativeSync()60     bool hasNativeSync() const { return m_featureInfo.syncImpl >= SYNC_IMPL_NATIVE_SYNC_V2; }
hasNativeSyncV3()61     bool hasNativeSyncV3() const { return m_featureInfo.syncImpl >= SYNC_IMPL_NATIVE_SYNC_V3; }
hasNativeSyncV4()62     bool hasNativeSyncV4() const { return m_featureInfo.syncImpl >= SYNC_IMPL_NATIVE_SYNC_V4; }
hasVirtioGpuNativeSync()63     bool hasVirtioGpuNativeSync() const { return m_featureInfo.hasVirtioGpuNativeSync; }
hasHostCompositionV1()64     bool hasHostCompositionV1() const {
65         return m_featureInfo.hostComposition == HOST_COMPOSITION_V1; }
hasHostCompositionV2()66     bool hasHostCompositionV2() const {
67         return m_featureInfo.hostComposition == HOST_COMPOSITION_V2; }
hasYUVCache()68     bool hasYUVCache() const {
69         return m_featureInfo.hasYUVCache; }
hasAsyncUnmapBuffer()70     bool hasAsyncUnmapBuffer() const {
71         return m_featureInfo.hasAsyncUnmapBuffer; }
hasHostSideTracing()72     bool hasHostSideTracing() const {
73         return m_featureInfo.hasHostSideTracing;
74     }
hasAsyncFrameCommands()75     bool hasAsyncFrameCommands() const {
76         return m_featureInfo.hasAsyncFrameCommands;
77     }
hasSyncBufferData()78     bool hasSyncBufferData() const {
79         return m_featureInfo.hasSyncBufferData; }
hasHWCMultiConfigs()80     bool hasHWCMultiConfigs() const {
81         return m_featureInfo.hasHWCMultiConfigs;
82     }
getDmaVersion()83     DmaImpl getDmaVersion() const { return m_featureInfo.dmaImpl; }
bindDmaContext(struct goldfish_dma_context * cxt)84     void bindDmaContext(struct goldfish_dma_context* cxt) { m_dmaCxt = cxt; }
bindDmaDirectly(void * dmaPtr,uint64_t dmaPhysAddr)85     void bindDmaDirectly(void* dmaPtr, uint64_t dmaPhysAddr) {
86         m_dmaPtr = dmaPtr;
87         m_dmaPhysAddr = dmaPhysAddr;
88     }
lockAndWriteDma(void * data,uint32_t size)89     virtual uint64_t lockAndWriteDma(void* data, uint32_t size) {
90         if (m_dmaPtr && m_dmaPhysAddr) {
91             if (data != m_dmaPtr) {
92                 memcpy(m_dmaPtr, data, size);
93             }
94             return m_dmaPhysAddr;
95         } else if (m_dmaCxt) {
96             return writeGoldfishDma(data, size, m_dmaCxt);
97         } else {
98             ALOGE("%s: ERROR: No DMA context bound!", __func__);
99             return 0;
100         }
101     }
setGLESMaxVersion(GLESMaxVersion ver)102     void setGLESMaxVersion(GLESMaxVersion ver) { m_featureInfo.glesMaxVersion = ver; }
getGLESMaxVersion()103     GLESMaxVersion getGLESMaxVersion() const { return m_featureInfo.glesMaxVersion; }
hasDirectMem()104     bool hasDirectMem() const {
105 #ifdef HOST_BUILD
106         // unit tests do not support restoring "guest" ram because there is no VM
107         return false;
108 #else
109         return m_featureInfo.hasDirectMem;
110 #endif
111     }
112 
featureInfo_const()113     const EmulatorFeatureInfo* featureInfo_const() const { return &m_featureInfo; }
featureInfo()114     EmulatorFeatureInfo* featureInfo() { return &m_featureInfo; }
115 private:
writeGoldfishDma(void * data,uint32_t size,struct goldfish_dma_context * dmaCxt)116     static uint64_t writeGoldfishDma(void* data, uint32_t size,
117                                      struct goldfish_dma_context* dmaCxt) {
118 #ifdef __Fuchsia__
119         ALOGE("%s Not implemented!", __FUNCTION__);
120         return 0u;
121 #else
122         ALOGV("%s(data=%p, size=%u): call", __func__, data, size);
123 
124         goldfish_dma_write(dmaCxt, data, size);
125         uint64_t paddr = goldfish_dma_guest_paddr(dmaCxt);
126 
127         ALOGV("%s: paddr=0x%llx", __func__, (unsigned long long)paddr);
128         return paddr;
129 #endif
130     }
131 
132     EmulatorFeatureInfo m_featureInfo;
133     struct goldfish_dma_context* m_dmaCxt;
134     void* m_dmaPtr;
135     uint64_t m_dmaPhysAddr;
136 };
137 
138 // Abstraction for gralloc handle conversion
139 class Gralloc {
140 public:
141     virtual uint32_t createColorBuffer(
142         ExtendedRCEncoderContext* rcEnc, int width, int height, uint32_t glformat) = 0;
143     virtual uint32_t getHostHandle(native_handle_t const* handle) = 0;
144     virtual int getFormat(native_handle_t const* handle) = 0;
145     virtual size_t getAllocatedSize(native_handle_t const* handle) = 0;
~Gralloc()146     virtual ~Gralloc() {}
147 };
148 
149 // Abstraction for process pipe helper
150 class ProcessPipe {
151 public:
152     virtual bool processPipeInit(int stream_handle, HostConnectionType connType, renderControl_encoder_context_t *rcEnc) = 0;
~ProcessPipe()153     virtual ~ProcessPipe() {}
154 };
155 
156 struct EGLThreadInfo;
157 
158 // Rutabaga capsets.
159 #define VIRTIO_GPU_CAPSET_NONE 0
160 #define VIRTIO_GPU_CAPSET_VIRGL 1
161 #define VIRTIO_GPU_CAPSET_VIRGL2 2
162 #define VIRTIO_GPU_CAPSET_GFXSTREAM 3
163 #define VIRTIO_GPU_CAPSET_VENUS 4
164 #define VIRTIO_GPU_CAPSET_CROSS_DOMAIN 5
165 
166 class HostConnection
167 {
168 public:
169     static HostConnection *get();
170     static HostConnection *getOrCreate(uint32_t capset_id);
171 
172     static HostConnection *getWithThreadInfo(EGLThreadInfo* tInfo,
173                                              uint32_t capset_id = VIRTIO_GPU_CAPSET_NONE);
174     static void exit();
175     static void exitUnclean(); // for testing purposes
176 
177     static std::unique_ptr<HostConnection> createUnique(uint32_t capset_id = VIRTIO_GPU_CAPSET_NONE);
178     HostConnection(const HostConnection&) = delete;
179 
180     ~HostConnection();
181 
182     GLEncoder *glEncoder();
183     GL2Encoder *gl2Encoder();
184     goldfish_vk::VkEncoder *vkEncoder();
185     ExtendedRCEncoderContext *rcEncoder();
186 
getRendernodeFd()187     int getRendernodeFd() { return m_rendernodeFd; }
188 
checksumHelper()189     ChecksumCalculator *checksumHelper() { return &m_checksumHelper; }
grallocHelper()190     Gralloc *grallocHelper() { return m_grallocHelper; }
191 
flush()192     void flush() {
193         if (m_stream) {
194             m_stream->flush();
195         }
196     }
197 
setGrallocOnly(bool gralloc_only)198     void setGrallocOnly(bool gralloc_only) {
199         m_grallocOnly = gralloc_only;
200     }
201 
isGrallocOnly()202     bool isGrallocOnly() const { return m_grallocOnly; }
203 
204 #ifdef __clang__
205 #pragma clang diagnostic push
206 #pragma clang diagnostic ignored "-Wthread-safety-analysis"
207 #endif
lock()208     void lock() const { m_lock.lock(); }
unlock()209     void unlock() const { m_lock.unlock(); }
210 #ifdef __clang__
211 #pragma clang diagnostic pop
212 #endif
213 
214     bool exitUncleanly; // for testing purposes
215 
216 private:
217     // If the connection failed, |conn| is deleted.
218     // Returns NULL if connection failed.
219     static std::unique_ptr<HostConnection> connect(uint32_t capset_id);
220 
221     HostConnection();
222     static gl_client_context_t  *s_getGLContext();
223     static gl2_client_context_t *s_getGL2Context();
224 
225     const std::string& queryGLExtensions(ExtendedRCEncoderContext *rcEnc);
226     // setProtocol initilizes GL communication protocol for checksums
227     // should be called when m_rcEnc is created
228     void setChecksumHelper(ExtendedRCEncoderContext *rcEnc);
229     void queryAndSetSyncImpl(ExtendedRCEncoderContext *rcEnc);
230     void queryAndSetDmaImpl(ExtendedRCEncoderContext *rcEnc);
231     void queryAndSetGLESMaxVersion(ExtendedRCEncoderContext *rcEnc);
232     void queryAndSetNoErrorState(ExtendedRCEncoderContext *rcEnc);
233     void queryAndSetHostCompositionImpl(ExtendedRCEncoderContext *rcEnc);
234     void queryAndSetDirectMemSupport(ExtendedRCEncoderContext *rcEnc);
235     void queryAndSetVulkanSupport(ExtendedRCEncoderContext *rcEnc);
236     void queryAndSetDeferredVulkanCommandsSupport(ExtendedRCEncoderContext *rcEnc);
237     void queryAndSetVulkanNullOptionalStringsSupport(ExtendedRCEncoderContext *rcEnc);
238     void queryAndSetVulkanCreateResourcesWithRequirementsSupport(ExtendedRCEncoderContext *rcEnc);
239     void queryAndSetVulkanIgnoredHandles(ExtendedRCEncoderContext *rcEnc);
240     void queryAndSetYUVCache(ExtendedRCEncoderContext *mrcEnc);
241     void queryAndSetAsyncUnmapBuffer(ExtendedRCEncoderContext *rcEnc);
242     void queryAndSetVirtioGpuNext(ExtendedRCEncoderContext *rcEnc);
243     void queryHasSharedSlotsHostMemoryAllocator(ExtendedRCEncoderContext *rcEnc);
244     void queryAndSetVulkanFreeMemorySync(ExtendedRCEncoderContext *rcEnc);
245     void queryAndSetVirtioGpuNativeSync(ExtendedRCEncoderContext *rcEnc);
246     void queryAndSetVulkanShaderFloat16Int8Support(ExtendedRCEncoderContext *rcEnc);
247     void queryAndSetVulkanAsyncQueueSubmitSupport(ExtendedRCEncoderContext *rcEnc);
248     void queryAndSetHostSideTracingSupport(ExtendedRCEncoderContext *rcEnc);
249     void queryAndSetAsyncFrameCommands(ExtendedRCEncoderContext *rcEnc);
250     void queryAndSetVulkanQueueSubmitWithCommandsSupport(ExtendedRCEncoderContext *rcEnc);
251     void queryAndSetVulkanBatchedDescriptorSetUpdateSupport(ExtendedRCEncoderContext *rcEnc);
252     void queryAndSetSyncBufferData(ExtendedRCEncoderContext *rcEnc);
253     void queryAndSetVulkanAsyncQsri(ExtendedRCEncoderContext *rcEnc);
254     void queryAndSetReadColorBufferDma(ExtendedRCEncoderContext *rcEnc);
255     void queryAndSetHWCMultiConfigs(ExtendedRCEncoderContext* rcEnc);
256     GLint queryVersion(ExtendedRCEncoderContext* rcEnc);
257 
258 private:
259     HostConnectionType m_connectionType;
260     GrallocType m_grallocType;
261 
262     // intrusively refcounted
263     IOStream* m_stream = nullptr;
264 
265     std::unique_ptr<GLEncoder> m_glEnc;
266     std::unique_ptr<GL2Encoder> m_gl2Enc;
267 
268     // intrusively refcounted
269     goldfish_vk::VkEncoder* m_vkEnc = nullptr;
270     std::unique_ptr<ExtendedRCEncoderContext> m_rcEnc;
271 
272     ChecksumCalculator m_checksumHelper;
273     Gralloc* m_grallocHelper = nullptr;
274     ProcessPipe* m_processPipe = nullptr;
275     std::string m_glExtensions;
276     bool m_grallocOnly;
277     bool m_noHostError;
278 #ifdef GFXSTREAM
279     mutable std::mutex m_lock;
280 #else
281     mutable android::Mutex m_lock;
282 #endif
283     int m_rendernodeFd;
284 };
285 
286 #endif
287