• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #ifndef __COMMON_HOST_CONNECTION_H
17 #define __COMMON_HOST_CONNECTION_H
18 
19 #include "EmulatorFeatureInfo.h"
20 #include "IOStream.h"
21 #include "renderControl_enc.h"
22 #include "ChecksumCalculator.h"
23 #ifdef __Fuchsia__
24 struct goldfish_dma_context;
25 #else
26 #include "goldfish_dma.h"
27 #endif
28 
29 #include <cutils/native_handle.h>
30 
31 #ifdef GFXSTREAM
32 #include <mutex>
33 #else
34 #include <utils/threads.h>
35 #endif
36 
37 #include <memory>
38 #include <string>
39 
40 class GLEncoder;
41 struct gl_client_context_t;
42 class GL2Encoder;
43 struct gl2_client_context_t;
44 
45 namespace goldfish_vk {
46 class VkEncoder;
47 }
48 
49 // ExtendedRCEncoderContext is an extended version of renderControl_encoder_context_t
50 // that will be used to track available emulator features.
51 class ExtendedRCEncoderContext : public renderControl_encoder_context_t {
52 public:
ExtendedRCEncoderContext(IOStream * stream,ChecksumCalculator * checksumCalculator)53     ExtendedRCEncoderContext(IOStream *stream, ChecksumCalculator *checksumCalculator)
54         : renderControl_encoder_context_t(stream, checksumCalculator),
55           m_dmaCxt(NULL), m_dmaPtr(NULL), m_dmaPhysAddr(0) { }
setSyncImpl(SyncImpl syncImpl)56     void setSyncImpl(SyncImpl syncImpl) { m_featureInfo.syncImpl = syncImpl; }
setDmaImpl(DmaImpl dmaImpl)57     void setDmaImpl(DmaImpl dmaImpl) { m_featureInfo.dmaImpl = dmaImpl; }
setHostComposition(HostComposition hostComposition)58     void setHostComposition(HostComposition hostComposition) {
59         m_featureInfo.hostComposition = hostComposition; }
hasNativeSync()60     bool hasNativeSync() const { return m_featureInfo.syncImpl >= SYNC_IMPL_NATIVE_SYNC_V2; }
hasNativeSyncV3()61     bool hasNativeSyncV3() const { return m_featureInfo.syncImpl >= SYNC_IMPL_NATIVE_SYNC_V3; }
hasNativeSyncV4()62     bool hasNativeSyncV4() const { return m_featureInfo.syncImpl >= SYNC_IMPL_NATIVE_SYNC_V4; }
hasVirtioGpuNativeSync()63     bool hasVirtioGpuNativeSync() const { return m_featureInfo.hasVirtioGpuNativeSync; }
hasHostCompositionV1()64     bool hasHostCompositionV1() const {
65         return m_featureInfo.hostComposition == HOST_COMPOSITION_V1; }
hasHostCompositionV2()66     bool hasHostCompositionV2() const {
67         return m_featureInfo.hostComposition == HOST_COMPOSITION_V2; }
hasYUVCache()68     bool hasYUVCache() const {
69         return m_featureInfo.hasYUVCache; }
hasAsyncUnmapBuffer()70     bool hasAsyncUnmapBuffer() const {
71         return m_featureInfo.hasAsyncUnmapBuffer; }
hasHostSideTracing()72     bool hasHostSideTracing() const {
73         return m_featureInfo.hasHostSideTracing;
74     }
hasAsyncFrameCommands()75     bool hasAsyncFrameCommands() const {
76         return m_featureInfo.hasAsyncFrameCommands;
77     }
hasSyncBufferData()78     bool hasSyncBufferData() const {
79         return m_featureInfo.hasSyncBufferData; }
getDmaVersion()80     DmaImpl getDmaVersion() const { return m_featureInfo.dmaImpl; }
bindDmaContext(struct goldfish_dma_context * cxt)81     void bindDmaContext(struct goldfish_dma_context* cxt) { m_dmaCxt = cxt; }
bindDmaDirectly(void * dmaPtr,uint64_t dmaPhysAddr)82     void bindDmaDirectly(void* dmaPtr, uint64_t dmaPhysAddr) {
83         m_dmaPtr = dmaPtr;
84         m_dmaPhysAddr = dmaPhysAddr;
85     }
lockAndWriteDma(void * data,uint32_t size)86     virtual uint64_t lockAndWriteDma(void* data, uint32_t size) {
87         if (m_dmaPtr && m_dmaPhysAddr) {
88             if (data != m_dmaPtr) {
89                 memcpy(m_dmaPtr, data, size);
90             }
91             return m_dmaPhysAddr;
92         } else if (m_dmaCxt) {
93             return writeGoldfishDma(data, size, m_dmaCxt);
94         } else {
95             ALOGE("%s: ERROR: No DMA context bound!", __func__);
96             return 0;
97         }
98     }
setGLESMaxVersion(GLESMaxVersion ver)99     void setGLESMaxVersion(GLESMaxVersion ver) { m_featureInfo.glesMaxVersion = ver; }
getGLESMaxVersion()100     GLESMaxVersion getGLESMaxVersion() const { return m_featureInfo.glesMaxVersion; }
hasDirectMem()101     bool hasDirectMem() const {
102 #ifdef HOST_BUILD
103         // unit tests do not support restoring "guest" ram because there is no VM
104         return false;
105 #else
106         return m_featureInfo.hasDirectMem;
107 #endif
108     }
109 
featureInfo_const()110     const EmulatorFeatureInfo* featureInfo_const() const { return &m_featureInfo; }
featureInfo()111     EmulatorFeatureInfo* featureInfo() { return &m_featureInfo; }
112 private:
writeGoldfishDma(void * data,uint32_t size,struct goldfish_dma_context * dmaCxt)113     static uint64_t writeGoldfishDma(void* data, uint32_t size,
114                                      struct goldfish_dma_context* dmaCxt) {
115 #ifdef __Fuchsia__
116         ALOGE("%s Not implemented!", __FUNCTION__);
117         return 0u;
118 #else
119         ALOGV("%s(data=%p, size=%u): call", __func__, data, size);
120 
121         goldfish_dma_write(dmaCxt, data, size);
122         uint64_t paddr = goldfish_dma_guest_paddr(dmaCxt);
123 
124         ALOGV("%s: paddr=0x%llx", __func__, (unsigned long long)paddr);
125         return paddr;
126 #endif
127     }
128 
129     EmulatorFeatureInfo m_featureInfo;
130     struct goldfish_dma_context* m_dmaCxt;
131     void* m_dmaPtr;
132     uint64_t m_dmaPhysAddr;
133 };
134 
135 // Abstraction for gralloc handle conversion
136 class Gralloc {
137 public:
138     virtual uint32_t createColorBuffer(
139         ExtendedRCEncoderContext* rcEnc, int width, int height, uint32_t glformat);
140     virtual uint32_t getHostHandle(native_handle_t const* handle) = 0;
141     virtual int getFormat(native_handle_t const* handle) = 0;
142     virtual size_t getAllocatedSize(native_handle_t const* handle) = 0;
~Gralloc()143     virtual ~Gralloc() {}
144 };
145 
146 // Abstraction for process pipe helper
147 class ProcessPipe {
148 public:
149     virtual bool processPipeInit(HostConnectionType connType, renderControl_encoder_context_t *rcEnc) = 0;
~ProcessPipe()150     virtual ~ProcessPipe() {}
151 };
152 
153 struct EGLThreadInfo;
154 
155 
156 class HostConnection
157 {
158 public:
159     static HostConnection *get();
160     static HostConnection *getWithThreadInfo(EGLThreadInfo* tInfo);
161     static void exit();
162     static void exitUnclean(); // for testing purposes
163 
164     static std::unique_ptr<HostConnection> createUnique();
165     HostConnection(const HostConnection&) = delete;
166 
167     ~HostConnection();
168 
connectionType()169     HostConnectionType connectionType() const {
170         return m_connectionType;
171     }
172 
173     GLEncoder *glEncoder();
174     GL2Encoder *gl2Encoder();
175     goldfish_vk::VkEncoder *vkEncoder();
176     ExtendedRCEncoderContext *rcEncoder();
177 
178     // Returns rendernode fd, in case the stream is virtio-gpu based.
179     // Otherwise, attempts to create a rendernode fd assuming
180     // virtio-gpu is available.
181     int getOrCreateRendernodeFd();
182 
checksumHelper()183     ChecksumCalculator *checksumHelper() { return &m_checksumHelper; }
grallocHelper()184     Gralloc *grallocHelper() { return m_grallocHelper; }
185 
flush()186     void flush() {
187         if (m_stream) {
188             m_stream->flush();
189         }
190     }
191 
setGrallocOnly(bool gralloc_only)192     void setGrallocOnly(bool gralloc_only) {
193         m_grallocOnly = gralloc_only;
194     }
195 
isGrallocOnly()196     bool isGrallocOnly() const { return m_grallocOnly; }
197 
198 #ifdef __clang__
199 #pragma clang diagnostic push
200 #pragma clang diagnostic ignored "-Wthread-safety-analysis"
201 #endif
lock()202     void lock() const { m_lock.lock(); }
unlock()203     void unlock() const { m_lock.unlock(); }
204 #ifdef __clang__
205 #pragma clang diagnostic pop
206 #endif
207 
208     bool exitUncleanly; // for testing purposes
209 
210 private:
211     // If the connection failed, |conn| is deleted.
212     // Returns NULL if connection failed.
213     static std::unique_ptr<HostConnection> connect();
214 
215     HostConnection();
216     static gl_client_context_t  *s_getGLContext();
217     static gl2_client_context_t *s_getGL2Context();
218 
219     const std::string& queryGLExtensions(ExtendedRCEncoderContext *rcEnc);
220     // setProtocol initilizes GL communication protocol for checksums
221     // should be called when m_rcEnc is created
222     void setChecksumHelper(ExtendedRCEncoderContext *rcEnc);
223     void queryAndSetSyncImpl(ExtendedRCEncoderContext *rcEnc);
224     void queryAndSetDmaImpl(ExtendedRCEncoderContext *rcEnc);
225     void queryAndSetGLESMaxVersion(ExtendedRCEncoderContext *rcEnc);
226     void queryAndSetNoErrorState(ExtendedRCEncoderContext *rcEnc);
227     void queryAndSetHostCompositionImpl(ExtendedRCEncoderContext *rcEnc);
228     void queryAndSetDirectMemSupport(ExtendedRCEncoderContext *rcEnc);
229     void queryAndSetVulkanSupport(ExtendedRCEncoderContext *rcEnc);
230     void queryAndSetDeferredVulkanCommandsSupport(ExtendedRCEncoderContext *rcEnc);
231     void queryAndSetVulkanNullOptionalStringsSupport(ExtendedRCEncoderContext *rcEnc);
232     void queryAndSetVulkanCreateResourcesWithRequirementsSupport(ExtendedRCEncoderContext *rcEnc);
233     void queryAndSetVulkanIgnoredHandles(ExtendedRCEncoderContext *rcEnc);
234     void queryAndSetYUVCache(ExtendedRCEncoderContext *mrcEnc);
235     void queryAndSetAsyncUnmapBuffer(ExtendedRCEncoderContext *rcEnc);
236     void queryAndSetVirtioGpuNext(ExtendedRCEncoderContext *rcEnc);
237     void queryHasSharedSlotsHostMemoryAllocator(ExtendedRCEncoderContext *rcEnc);
238     void queryAndSetVulkanFreeMemorySync(ExtendedRCEncoderContext *rcEnc);
239     void queryAndSetVirtioGpuNativeSync(ExtendedRCEncoderContext *rcEnc);
240     void queryAndSetVulkanShaderFloat16Int8Support(ExtendedRCEncoderContext *rcEnc);
241     void queryAndSetVulkanAsyncQueueSubmitSupport(ExtendedRCEncoderContext *rcEnc);
242     void queryAndSetHostSideTracingSupport(ExtendedRCEncoderContext *rcEnc);
243     void queryAndSetAsyncFrameCommands(ExtendedRCEncoderContext *rcEnc);
244     void queryAndSetVulkanQueueSubmitWithCommandsSupport(ExtendedRCEncoderContext *rcEnc);
245     void queryAndSetVulkanBatchedDescriptorSetUpdateSupport(ExtendedRCEncoderContext *rcEnc);
246     void queryAndSetSyncBufferData(ExtendedRCEncoderContext *rcEnc);
247     void queryAndSetReadColorBufferDma(ExtendedRCEncoderContext *rcEnc);
248     GLint queryVersion(ExtendedRCEncoderContext* rcEnc);
249 
250 private:
251     HostConnectionType m_connectionType;
252     GrallocType m_grallocType;
253 
254     // intrusively refcounted
255     IOStream* m_stream = nullptr;
256 
257     std::unique_ptr<GLEncoder> m_glEnc;
258     std::unique_ptr<GL2Encoder> m_gl2Enc;
259 
260     // intrusively refcounted
261     goldfish_vk::VkEncoder* m_vkEnc = nullptr;
262     std::unique_ptr<ExtendedRCEncoderContext> m_rcEnc;
263 
264     ChecksumCalculator m_checksumHelper;
265     Gralloc* m_grallocHelper = nullptr;
266     ProcessPipe* m_processPipe = nullptr;
267     std::string m_glExtensions;
268     bool m_grallocOnly;
269     bool m_noHostError;
270 #ifdef GFXSTREAM
271     mutable std::mutex m_lock;
272 #else
273     mutable android::Mutex m_lock;
274 #endif
275     int m_rendernodeFd;
276     bool m_rendernodeFdOwned;
277 };
278 
279 #endif
280