• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023-2025 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef GPU_CONTEXT_H
17 #define GPU_CONTEXT_H
18 #include <functional>
19 #include <set>
20 
21 #include "impl_interface/gpu_context_impl.h"
22 #include "trace_memory_dump.h"
23 #include "utils/data.h"
24 #include "utils/drawing_macros.h"
25 
26 typedef void* EGLContext;
27 namespace OHOS {
28 namespace Rosen {
29 namespace Drawing {
30 enum class PathRenderers : uint32_t {
31     NONE              = 0,
32     DASHLINE          = 1 << 0,
33     STENCILANDCOVER   = 1 << 1,
34     COVERAGECOUNTING  = 1 << 2,
35     AAHAIRLINE        = 1 << 3,
36     AACONVEX          = 1 << 4,
37     AALINEARIZING     = 1 << 5,
38     SMALL             = 1 << 6,
39     TESSELLATING      = 1 << 7,
40 
41     ALL               = (TESSELLATING | (TESSELLATING - 1)),
42     DEFAULT           = ALL & ~COVERAGECOUNTING
43 };
44 
45 struct GPUResourceTag {
GPUResourceTagGPUResourceTag46     GPUResourceTag()
47         : fPid(0), fTid(0), fWid(0), fFid(0) {}
GPUResourceTagGPUResourceTag48     GPUResourceTag(uint32_t pid, uint32_t tid, uint64_t wid, uint32_t fid, const std::string& name)
49         : fPid(pid), fTid(tid), fWid(wid), fFid(fid), fName(name) {}
50     uint32_t fPid;  // id of process
51     uint32_t fTid;  // id of thread
52     uint64_t fWid;  // id of window
53     uint32_t fFid;  // id of type
54     uint32_t fSid{0}; // id of source type in ondraw phase
55     std::string fName;
56 
57     static void SetCurrentNodeId(uint64_t nodeId);
58 };
59 
60 /**
61  * @brief Option to create a GPUContext. Currently only supports setting persistent cache,
62  * other options may be expanded in the future
63  */
64 class DRAWING_API GPUContextOptions {
65 public:
66     /**
67      * @brief Cache compiled shaders for use between sessions.
68      */
69     class PersistentCache {
70     public:
71         PersistentCache() = default;
72         virtual ~PersistentCache() = default;
73 
74         /**
75          * @brief Returns the data for the key if it exists in the cache.
76          */
77         virtual std::shared_ptr<Data> Load(const Data& key) = 0;
78 
79         /**
80          * @brief Stores the data and key.
81          */
82         virtual void Store(const Data& key, const Data& data) = 0;
83     };
84 
85     /**
86      * @brief Gets persistent cache object.
87      */
88     PersistentCache* GetPersistentCache() const;
89 
90     /**
91      * @brief Sets persistent cache object.
92      * @param persistentCache A pointer to persistent cache object.
93      */
94     void SetPersistentCache(PersistentCache* persistentCache);
95 
96     void SetAllowPathMaskCaching(bool allowPathMaskCaching);
97     bool GetAllowPathMaskCaching() const;
98 
99     void SetStoreCachePath(const std::string& filename);
100     std::string GetStoreCachePath() const;
101 
102     /**
103      * @brief cache small Texture on UnUni devices.
104      * @param isUniRender isUniRender A boolean value indicating whether to use the unified rendering mode.
105      */
106     void SetIsUniRender(bool isUniRender);
107     bool GetIsUniRender() const;
108 
109 private:
110     PersistentCache* persistentCache_ = nullptr;
111     bool allowPathMaskCaching_ = true;
112     bool isUniRender_ = true;
113     std::string filePath_ = "";
114 };
115 
116 class DRAWING_API GPUContext {
117 public:
118     GPUContext();
~GPUContext()119     ~GPUContext() {}
120 
121     /**
122      * @brief           Creates a GL GPUContext for a backend context.
123      * @param options   Option to create a GL GPUContext.
124      */
125     bool BuildFromGL(const GPUContextOptions& options);
126 
127 #ifdef RS_ENABLE_VK
128 #ifdef USE_M133_SKIA
129     bool BuildFromVK(const skgpu::VulkanBackendContext& context);
130 #else
131     bool BuildFromVK(const GrVkBackendContext& context);
132 #endif
133 
134     /**
135      * @brief           Creates a VK GPUContext for a backend context.
136      * @param context   An existed VK Context used to create a VK GPUContext.
137      * @param options   Option to create a VK GPUContext.
138      */
139 #ifdef USE_M133_SKIA
140     bool BuildFromVK(const skgpu::VulkanBackendContext& context, const GPUContextOptions& options);
141 #else
142     bool BuildFromVK(const GrVkBackendContext& context, const GPUContextOptions& options);
143 #endif
144 #endif
145 
146     /**
147      * @brief   Call to ensure all drawing to the context has been flushed to underlying 3D API specific objects.
148      */
149     void Flush();
150 
151     /**
152      * @brief   subtree parallel feature interface to generate draw op.
153      */
154     void FlushCommands();
155 
156     /**
157      * @brief   subtree parallel feature interface to generate submit information.
158      */
159     void GenerateSubmitInfo(int seq);
160     /**
161      * @brief   Call to ensure all drawing to the context has been submitted to underlying 3D API.
162      */
163     void Submit();
164 
165     /**
166      * @brief           Call to ensure all drawing to the context has been flushed and submitted to underlying 3D API.
167      * @param syncCpu   Whether to sync CPU or not.
168      */
169     void FlushAndSubmit(bool syncCpu = false);
170 
171     /**
172      * @brief             Purge GPU resources that haven't been used in the past 'msNotUsed' milliseconds
173                           or are otherwise marked for deletion.
174      * @param msNotUsed   Only unlocked resources not used in these last milliseconds will be cleaned up.
175      */
176     void PerformDeferredCleanup(std::chrono::milliseconds msNotUsed);
177 
178     /**
179      * @brief                   Gets the current GPU resource cache limits.
180      * @param maxResource       If non-null, returns maximum number of resources that can be held in the cache.
181      * @param maxResourceBytes  If non-null, returns maximum number of bytes of video memory
182                                 that can be held in the cache.
183      */
184     void GetResourceCacheLimits(int* maxResource, size_t* maxResourceBytes) const;
185 
186     /**
187      * @brief                   Specify the GPU resource cache limits.
188      * @param maxResource       The maximum number of resources that can be held in the cache.
189      * @param maxResourceBytes  The maximum number of bytes of video memory that can be held in the cache.
190      */
191     void SetResourceCacheLimits(int maxResource, size_t maxResourceBytes);
192 
193     /**
194      * @brief                   Specify the GPU purgeable resource cache limit.
195      * @param purgeableMaxCount The maximum number of purgeable queue resources that need to be cleaned.
196      */
197     void SetPurgeableResourceLimit(int purgeableMaxCount);
198 
199     /**
200      * @brief                   Gets the current GPU resource cache usage.
201      * @param resourceCount     If non-null, returns the number of resources that are held in the cache.
202      * @param resourceBytes     If non-null, returns the total number of bytes of video memory held in the cache.
203      */
204     void GetResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const;
205 
206     void DumpAllResource(std::stringstream& dump) const;
207 
208     /**
209      * @brief                   Free GPU created by the contetx.
210      */
211     void FreeGpuResources();
212 
213     /**
214      * @brief                   Deeply clean resources in Relcaim.
215      */
216     void ReclaimResources();
217 
218     /**
219      * @brief                   Dump GPU stats.
220      * @param out               Dump GPU stat string.
221      */
222     void DumpGpuStats(std::string& out) const;
223 
224     /**
225      * @brief                   After returning it will assume that the underlying context may no longer be valid.
226      */
227     void ReleaseResourcesAndAbandonContext();
228 
229     /**
230      * @brief                         Purge unlocked resources from the cache until
231      *                                the provided byte count has been reached or we have purged all unlocked resources.
232      * @param scratchResourcesOnly    Whether to scratch the resources only or not.
233      */
234     void PurgeUnlockedResources(bool scratchResourcesOnly);
235 
236     /**
237      * @brief                         Purge unlocked resources by tag from the cache until
238      *                                the provided byte count has been reached or we have purged all unlocked resources.
239      * @param scratchResourcesOnly    Whether to scratch the resources only or not.
240      * @param tag                     GPU resource tag used to purge unlocked resources.
241      */
242     void PurgeUnlockedResourcesByTag(bool scratchResourcesOnly, const GPUResourceTag &tag);
243 
244     /**
245      * @brief                         Purge unlocked resources by pid from the cache until
246      *                                the provided byte count has been reached or we have purged all unlocked resources.
247      * @param scratchResourcesOnly    Whether to scratch the resources only or not.
248      * @param exitedPidSet            GPU resource exitedPidSet used to purge unlocked resources.
249      */
250     void PurgeUnlockedResourcesByPid(bool scratchResourcesOnly, const std::set<pid_t>& exitedPidSet);
251 
252     /**
253      * @brief                         Register LeashWindow callback function
254      *                                provided callback function when gpu reset with device lost error.
255      * @param LeashWindowCallback     callback function for skia recall
256      */
257     void RegisterVulkanErrorCallback(const std::function<void()>& vulkanErrorCallback);
258 
259     /**
260      * @brief                       Purge unlocked resources in every frame
261      * @param scratchResourcesOnly  Whether to scratch the resources only or not.
262      * @param exitedPidSet          GPU resource of exited PidSet used to purge unlocked resources.
263      * @param protectedPidSet       GPU resource of protectedPidSet will not be purged.
264      */
265     void PurgeCacheBetweenFrames(bool scratchResourcesOnly, const std::set<pid_t>& exitedPidSet,
266         const std::set<pid_t>& protectedPidSet);
267 
268     /**
269      * @brief                   Purge unlocked resources from the safe cache until
270      *                          the provided byte count has been reached or we have purged all unlocked resources.
271      */
272     void PurgeUnlockAndSafeCacheGpuResources();
273 
274     /**
275      * @brief                   Releases GPUResource objects and removes them from the cache by tag.
276      * @param tag               GPU resource tag used to release.
277      */
278     void ReleaseByTag(const GPUResourceTag &tag);
279 
280     /**
281      * @brief                   Enumerates all cached GPU resources and dumps their memory to traceMemoryDump.
282      * @param traceMemoryDump   A trace to memory dump.
283      * @param tag               GPU resource tag used to dump memory statistics.
284      */
285     void DumpMemoryStatisticsByTag(TraceMemoryDump* traceMemoryDump, GPUResourceTag &tag) const;
286 
287     /**
288      * @brief                   Enumerates all cached GPU resources and return their memory.
289      * @param traceMemoryDump   A trace to memory dump.
290      * @param tag               GPU resource tag used to dump memory statistics.
291      */
292     uint64_t NewDumpMemoryStatisticsByTag(TraceMemoryDump* traceMemoryDump, GPUResourceTag &tag) const;
293 
294     /**
295      * @brief                   Enumerates all cached GPU resources and dumps their memory to traceMemoryDump.
296      * @param traceMemoryDump   A trace to memory dump.
297      */
298     void DumpMemoryStatistics(TraceMemoryDump* traceMemoryDump) const;
299 
300     /**
301      * @brief                   Reset GPU contect cache.
302      */
303     void ResetContext();
304 
305     /**
306      * @brief                   Set current resource tag for gpu cache recycle.
307      * @param tag               GPU resource tag used to set current GPU resource tag.
308      */
309     void SetCurrentGpuResourceTag(const GPUResourceTag &tag);
310 
311     /**
312      * @brief                   Get current resource tag for gpu cache recycle.
313      * @return                  Current GPU resource tag used to set current GPU resource tag.
314      */
315     GPUResourceTag GetCurrentGpuResourceTag() const;
316 
317     /**
318      * @brief                   Get updated memory map.
319      * @param out               Updated memory map.
320      */
321     void GetUpdatedMemoryMap(std::unordered_map<pid_t, size_t> &out);
322 
323     /**
324      * @brief                   Init gpu memory limit.
325      * @param callback          Memory overflow calllback.
326      * @param size              Memory size limit.
327      */
328     void InitGpuMemoryLimit(MemoryOverflowCalllback callback, uint64_t size);
329 
330 #ifdef RS_ENABLE_VK
331     /**
332      * @brief                   Store vulkan pipeline cache
333      */
334     void StoreVkPipelineCacheData();
335 #endif
336 
337     void BeginFrame();
338 
339     void EndFrame();
340 
341     void SetGpuCacheSuppressWindowSwitch(bool enabled);
342 
343     void SetGpuMemoryAsyncReclaimerSwitch(bool enabled, const std::function<void()>& setThreadPriority);
344 
345     void FlushGpuMemoryInWaitQueue();
346 
347     void SetEarlyZEnabled(bool flag);
348 
349     void GetHpsEffectSupport(std::vector<const char*>& instanceExtensions);
350 
351     void SuppressGpuCacheBelowCertainRatio(const std::function<bool(void)>& nextFrameHasArrived);
352 
353     /**
354      * @brief       Get the adaptation layer instance, called in the adaptation layer.
355      * @param T     The name of Impl class.
356      * @return      Adaptation Layer instance.
357      */
358     template<typename T>
GetImpl()359     T* GetImpl() const
360     {
361         return impl_->DowncastingTo<T>();
362     }
363 
364     void RegisterPostFunc(const std::function<void(const std::function<void()>& task)>& func);
365 
366     /**
367      * @brief                   Defragment or clear Vma Cache if needed
368      */
369     void VmaDefragment();
370 private:
371     std::shared_ptr<GPUContextImpl> impl_;
372 };
373 } // namespace Drawing
374 } // namespace Rosen
375 } // namespace OHOS
376 
377 #endif // !GPU_CONTEXT_H
378