1 /* 2 * Copyright (c) 2023 Huawei Device Co., Ltd. 3 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * you may not use this file except in compliance with the License. 5 * You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software 10 * distributed under the License is distributed on an "AS IS" BASIS, 11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 * See the License for the specific language governing permissions and 13 * limitations under the License. 14 */ 15 16 #ifndef GPU_CONTEXT_H 17 #define GPU_CONTEXT_H 18 #include <functional> 19 #include <set> 20 21 #include "impl_interface/gpu_context_impl.h" 22 #include "trace_memory_dump.h" 23 #include "utils/data.h" 24 #include "utils/drawing_macros.h" 25 26 typedef void* EGLContext; 27 namespace OHOS { 28 namespace Rosen { 29 namespace Drawing { 30 enum class PathRenderers : uint32_t { 31 NONE = 0, 32 DASHLINE = 1 << 0, 33 STENCILANDCOVER = 1 << 1, 34 COVERAGECOUNTING = 1 << 2, 35 AAHAIRLINE = 1 << 3, 36 AACONVEX = 1 << 4, 37 AALINEARIZING = 1 << 5, 38 SMALL = 1 << 6, 39 TESSELLATING = 1 << 7, 40 41 ALL = (TESSELLATING | (TESSELLATING - 1)), 42 DEFAULT = ALL & ~COVERAGECOUNTING 43 }; 44 45 struct GPUResourceTag { GPUResourceTagGPUResourceTag46 GPUResourceTag() 47 : fPid(0), fTid(0), fWid(0), fFid(0) {} GPUResourceTagGPUResourceTag48 GPUResourceTag(uint32_t pid, uint32_t tid, uint32_t wid, uint32_t fid, const std::string& name) 49 : fPid(pid), fTid(tid), fWid(wid), fFid(fid), fName(name) {} 50 uint32_t fPid; 51 uint32_t fTid; 52 uint32_t fWid; 53 uint32_t fFid; 54 std::string fName; 55 }; 56 57 /** 58 * @brief Option to create a GPUContext. Currently only supports setting persistent cache, 59 * other options may be expanded in the future 60 */ 61 class DRAWING_API GPUContextOptions { 62 public: 63 /** 64 * @brief Cache compiled shaders for use between sessions. 65 */ 66 class PersistentCache { 67 public: 68 PersistentCache() = default; 69 virtual ~PersistentCache() = default; 70 71 /** 72 * @brief Returns the data for the key if it exists in the cache. 73 */ 74 virtual std::shared_ptr<Data> Load(const Data& key) = 0; 75 76 /** 77 * @brief Stores the data and key. 78 */ 79 virtual void Store(const Data& key, const Data& data) = 0; 80 }; 81 82 /** 83 * @brief Gets persistent cache object. 84 */ 85 PersistentCache* GetPersistentCache() const; 86 87 /** 88 * @brief Sets persistent cache object. 89 * @param persistentCache A pointer to persistent cache object. 90 */ 91 void SetPersistentCache(PersistentCache* persistentCache); 92 93 void SetAllowPathMaskCaching(bool allowPathMaskCaching); 94 bool GetAllowPathMaskCaching() const; 95 96 void SetStoreCachePath(const std::string& filename); 97 std::string GetStoreCachePath() const; 98 99 private: 100 PersistentCache* persistentCache_ = nullptr; 101 bool allowPathMaskCaching_ = true; 102 std::string filePath_ = ""; 103 }; 104 105 class DRAWING_API GPUContext { 106 public: 107 GPUContext(); ~GPUContext()108 ~GPUContext() {} 109 110 /** 111 * @brief Creates a GL GPUContext for a backend context. 112 * @param options Option to create a GL GPUContext. 113 */ 114 bool BuildFromGL(const GPUContextOptions& options); 115 116 #ifdef RS_ENABLE_VK 117 bool BuildFromVK(const GrVkBackendContext& context); 118 119 /** 120 * @brief Creates a VK GPUContext for a backend context. 121 * @param context An existed VK Context used to create a VK GPUContext. 122 * @param options Option to create a VK GPUContext. 123 */ 124 bool BuildFromVK(const GrVkBackendContext& context, const GPUContextOptions& options); 125 #endif 126 127 /** 128 * @brief Call to ensure all drawing to the context has been flushed to underlying 3D API specific objects. 129 */ 130 void Flush(); 131 132 /** 133 * @brief Call to ensure all drawing to the context has been submitted to underlying 3D API. 134 */ 135 void Submit(); 136 137 /** 138 * @brief Call to ensure all drawing to the context has been flushed and submitted to underlying 3D API. 139 * @param syncCpu Whether to sync CPU or not. 140 */ 141 void FlushAndSubmit(bool syncCpu = false); 142 143 /** 144 * @brief Purge GPU resources that haven't been used in the past 'msNotUsed' milliseconds 145 or are otherwise marked for deletion. 146 * @param msNotUsed Only unlocked resources not used in these last milliseconds will be cleaned up. 147 */ 148 void PerformDeferredCleanup(std::chrono::milliseconds msNotUsed); 149 150 /** 151 * @brief Gets the current GPU resource cache limits. 152 * @param maxResource If non-null, returns maximum number of resources that can be held in the cache. 153 * @param maxResourceBytes If non-null, returns maximum number of bytes of video memory 154 that can be held in the cache. 155 */ 156 void GetResourceCacheLimits(int* maxResource, size_t* maxResourceBytes) const; 157 158 /** 159 * @brief Specify the GPU resource cache limits. 160 * @param maxResource The maximum number of resources that can be held in the cache. 161 * @param maxResourceBytes The maximum number of bytes of video memory that can be held in the cache. 162 */ 163 void SetResourceCacheLimits(int maxResource, size_t maxResourceBytes); 164 165 /** 166 * @brief Specify the GPU purgeable resource cache limit. 167 * @param purgeableMaxCount The maximum number of purgeable queue resources that need to be cleaned. 168 */ 169 void SetPurgeableResourceLimit(int purgeableMaxCount); 170 171 /** 172 * @brief Gets the current GPU resource cache usage. 173 * @param resourceCount If non-null, returns the number of resources that are held in the cache. 174 * @param resourceBytes If non-null, returns the total number of bytes of video memory held in the cache. 175 */ 176 void GetResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const; 177 178 void DumpAllResource(std::stringstream& dump) const; 179 180 /** 181 * @brief Free GPU created by the contetx. 182 */ 183 void FreeGpuResources(); 184 185 /** 186 * @brief Dump GPU stats. 187 * @param out Dump GPU stat string. 188 */ 189 void DumpGpuStats(std::string& out) const; 190 191 /** 192 * @brief After returning it will assume that the underlying context may no longer be valid. 193 */ 194 void ReleaseResourcesAndAbandonContext(); 195 196 /** 197 * @brief Purge unlocked resources from the cache until 198 * the provided byte count has been reached or we have purged all unlocked resources. 199 * @param scratchResourcesOnly Whether to scratch the resources only or not. 200 */ 201 void PurgeUnlockedResources(bool scratchResourcesOnly); 202 203 /** 204 * @brief Purge unlocked resources by tag from the cache until 205 * the provided byte count has been reached or we have purged all unlocked resources. 206 * @param scratchResourcesOnly Whether to scratch the resources only or not. 207 * @param tag GPU resource tag used to purge unlocked resources. 208 */ 209 void PurgeUnlockedResourcesByTag(bool scratchResourcesOnly, const GPUResourceTag &tag); 210 211 /** 212 * @brief Purge unlocked resources by pid from the cache until 213 * the provided byte count has been reached or we have purged all unlocked resources. 214 * @param scratchResourcesOnly Whether to scratch the resources only or not. 215 * @param exitedPidSet GPU resource exitedPidSet used to purge unlocked resources. 216 */ 217 void PurgeUnlockedResourcesByPid(bool scratchResourcesOnly, const std::set<pid_t>& exitedPidSet); 218 219 /** 220 * @brief Register LeashWindow callback function 221 * provided callback function when gpu reset with device lost error. 222 * @param LeashWindowCallback callback function for skia recall 223 */ 224 void RegisterVulkanErrorCallback(const std::function<void()>& vulkanErrorCallback); 225 226 /** 227 * @brief Purge unlocked resources in every frame 228 * @param scratchResourcesOnly Whether to scratch the resources only or not. 229 * @param exitedPidSet GPU resource of exited PidSet used to purge unlocked resources. 230 * @param protectedPidSet GPU resource of protectedPidSet will not be purged. 231 */ 232 void PurgeCacheBetweenFrames(bool scratchResourcesOnly, const std::set<pid_t>& exitedPidSet, 233 const std::set<pid_t>& protectedPidSet); 234 235 /** 236 * @brief Purge unlocked resources from the safe cache until 237 * the provided byte count has been reached or we have purged all unlocked resources. 238 */ 239 void PurgeUnlockAndSafeCacheGpuResources(); 240 241 /** 242 * @brief Releases GPUResource objects and removes them from the cache by tag. 243 * @param tag GPU resource tag used to release. 244 */ 245 void ReleaseByTag(const GPUResourceTag &tag); 246 247 /** 248 * @brief Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. 249 * @param traceMemoryDump A trace to memory dump. 250 * @param tag GPU resource tag used to dump memory statistics. 251 */ 252 void DumpMemoryStatisticsByTag(TraceMemoryDump* traceMemoryDump, GPUResourceTag &tag) const; 253 254 /** 255 * @brief Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. 256 * @param traceMemoryDump A trace to memory dump. 257 */ 258 void DumpMemoryStatistics(TraceMemoryDump* traceMemoryDump) const; 259 260 /** 261 * @brief Reset GPU contect cache. 262 */ 263 void ResetContext(); 264 265 /** 266 * @brief Set current resource tag for gpu cache recycle. 267 * @param tag GPU resource tag used to set current GPU resource tag. 268 */ 269 void SetCurrentGpuResourceTag(const GPUResourceTag &tag); 270 271 /** 272 * @brief Get updated memory map. 273 * @param out Updated memory map. 274 */ 275 void GetUpdatedMemoryMap(std::unordered_map<pid_t, size_t> &out); 276 277 /** 278 * @brief Init gpu memory limit. 279 * @param callback Memory overflow calllback. 280 * @param size Memory size limit. 281 */ 282 void InitGpuMemoryLimit(MemoryOverflowCalllback callback, uint64_t size); 283 284 #ifdef RS_ENABLE_VK 285 /** 286 * @brief Store vulkan pipeline cache 287 */ 288 void StoreVkPipelineCacheData(); 289 #endif 290 291 void BeginFrame(); 292 293 void EndFrame(); 294 295 void SetGpuCacheSuppressWindowSwitch(bool enabled); 296 297 void SetGpuMemoryAsyncReclaimerSwitch(bool enabled, const std::function<void()>& setThreadPriority); 298 299 void FlushGpuMemoryInWaitQueue(); 300 301 void SuppressGpuCacheBelowCertainRatio(const std::function<bool(void)>& nextFrameHasArrived); 302 303 /** 304 * @brief Get the adaptation layer instance, called in the adaptation layer. 305 * @param T The name of Impl class. 306 * @return Adaptation Layer instance. 307 */ 308 template<typename T> GetImpl()309 T* GetImpl() const 310 { 311 return impl_->DowncastingTo<T>(); 312 } 313 314 void RegisterPostFunc(const std::function<void(const std::function<void()>& task)>& func); 315 316 /** 317 * @brief Defragment or clear Vma Cache if needed 318 */ 319 void VmaDefragment(); 320 private: 321 std::shared_ptr<GPUContextImpl> impl_; 322 }; 323 } // namespace Drawing 324 } // namespace Rosen 325 } // namespace OHOS 326 327 #endif // !GPU_CONTEXT_H 328