1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "skia_gpu_context.h"
17 #include "include/gpu/gl/GrGLInterface.h"
18 #include "src/gpu/GrDirectContextPriv.h"
19 #include "include/core/SkTypes.h"
20
21 #include "skia_data.h"
22 #include "utils/data.h"
23 #include "utils/log.h"
24 #include "skia_trace_memory_dump.h"
25 #include "utils/system_properties.h"
26 #include "skia_task_executor.h"
27
28 namespace OHOS {
29 namespace Rosen {
30 namespace Drawing {
31 static std::mutex g_registarMutex;
SkiaPersistentCache(GPUContextOptions::PersistentCache * cache)32 SkiaPersistentCache::SkiaPersistentCache(GPUContextOptions::PersistentCache* cache) : cache_(cache) {}
33
load(const SkData & key)34 sk_sp<SkData> SkiaPersistentCache::load(const SkData& key)
35 {
36 Data keyData;
37 if (!cache_) {
38 LOGD("SkiaPersistentCache::load, failed! cache or key invalid");
39 return nullptr;
40 }
41 auto skiaKeyDataImpl = keyData.GetImpl<SkiaData>();
42 skiaKeyDataImpl->SetSkData(sk_ref_sp(&key));
43
44 auto retData = cache_->Load(keyData);
45 if (retData == nullptr) {
46 LOGD("SkiaPersistentCache::load, failed! load data invalid");
47 return nullptr;
48 }
49
50 return retData->GetImpl<SkiaData>()->GetSkData();
51 }
52
store(const SkData & key,const SkData & data)53 void SkiaPersistentCache::store(const SkData& key, const SkData& data)
54 {
55 Data keyData;
56 Data storeData;
57 if (!cache_) {
58 LOGD("SkiaPersistentCache::store, failed! cache or {key,data} invalid");
59 return;
60 }
61
62 keyData.GetImpl<SkiaData>()->SetSkData(sk_ref_sp(&key));
63 storeData.GetImpl<SkiaData>()->SetSkData(sk_ref_sp(&data));
64 cache_->Store(keyData, storeData);
65 }
66
SkiaGPUContext()67 SkiaGPUContext::SkiaGPUContext() : grContext_(nullptr), skiaPersistentCache_(nullptr) {}
68
69 class CommonPoolExecutor : public SkExecutor {
70 public:
add(std::function<void (void)> func)71 void add(std::function<void(void)> func) override
72 {
73 TaskPoolExecutor::PostTask(std::move(func));
74 }
75 };
76
77 static CommonPoolExecutor g_defaultExecutor;
78
BuildFromGL(const GPUContextOptions & options)79 bool SkiaGPUContext::BuildFromGL(const GPUContextOptions& options)
80 {
81 sk_sp<const GrGLInterface> glInterface(GrGLCreateNativeInterface());
82 if (options.GetPersistentCache() != nullptr) {
83 skiaPersistentCache_ = std::make_shared<SkiaPersistentCache>(options.GetPersistentCache());
84 }
85
86 GrContextOptions grOptions;
87 grOptions.fGpuPathRenderers &= ~GpuPathRenderers::kCoverageCounting;
88 // fix svg antialiasing bug
89 grOptions.fGpuPathRenderers &= ~GpuPathRenderers::kAtlas;
90 grOptions.fPreferExternalImagesOverES3 = true;
91 grOptions.fDisableDistanceFieldPaths = true;
92 grOptions.fAllowPathMaskCaching = options.GetAllowPathMaskCaching();
93 grOptions.fPersistentCache = skiaPersistentCache_.get();
94 grOptions.fExecutor = &g_defaultExecutor;
95 grContext_ = GrDirectContext::MakeGL(std::move(glInterface), grOptions);
96 return grContext_ != nullptr ? true : false;
97 }
98
99 #ifdef RS_ENABLE_VK
BuildFromVK(const GrVkBackendContext & context)100 bool SkiaGPUContext::BuildFromVK(const GrVkBackendContext& context)
101 {
102 if (!SystemProperties::IsUseVulkan()) {
103 return false;
104 }
105 GrContextOptions grOptions;
106 grOptions.fExecutor = &g_defaultExecutor;
107 grContext_ = GrDirectContext::MakeVulkan(context, grOptions);
108 return grContext_ != nullptr;
109 }
110
BuildFromVK(const GrVkBackendContext & context,const GPUContextOptions & options)111 bool SkiaGPUContext::BuildFromVK(const GrVkBackendContext& context, const GPUContextOptions& options)
112 {
113 if (!SystemProperties::IsUseVulkan()) {
114 return false;
115 }
116 if (options.GetPersistentCache() != nullptr) {
117 skiaPersistentCache_ = std::make_shared<SkiaPersistentCache>(options.GetPersistentCache());
118 }
119 GrContextOptions grOptions;
120 grOptions.fGpuPathRenderers &= ~GpuPathRenderers::kCoverageCounting;
121 // fix svg antialiasing bug
122 grOptions.fGpuPathRenderers &= ~GpuPathRenderers::kAtlas;
123 grOptions.fPreferExternalImagesOverES3 = true;
124 grOptions.fDisableDistanceFieldPaths = true;
125 grOptions.fAllowPathMaskCaching = options.GetAllowPathMaskCaching();
126 grOptions.fPersistentCache = skiaPersistentCache_.get();
127 grOptions.fExecutor = &g_defaultExecutor;
128 grContext_ = GrDirectContext::MakeVulkan(context, grOptions);
129 return grContext_ != nullptr;
130 }
131 #endif
132
Flush()133 void SkiaGPUContext::Flush()
134 {
135 if (!grContext_) {
136 LOGD("SkiaGPUContext::Flush, grContext_ is nullptr");
137 return;
138 }
139 grContext_->flush();
140 }
141
FlushAndSubmit(bool syncCpu)142 void SkiaGPUContext::FlushAndSubmit(bool syncCpu)
143 {
144 if (!grContext_) {
145 LOGD("SkiaGPUContext::FlushAndSubmit, grContext_ is nullptr");
146 return;
147 }
148 grContext_->flushAndSubmit(syncCpu);
149 }
150
Submit()151 void SkiaGPUContext::Submit()
152 {
153 if (!grContext_) {
154 LOGD("SkiaGPUContext::Submit, grContext_ is nullptr");
155 return;
156 }
157 grContext_->submit();
158 }
159
PerformDeferredCleanup(std::chrono::milliseconds msNotUsed)160 void SkiaGPUContext::PerformDeferredCleanup(std::chrono::milliseconds msNotUsed)
161 {
162 if (!grContext_) {
163 LOGD("SkiaGPUContext::PerformDeferredCleanup, grContext_ is nullptr");
164 return;
165 }
166 grContext_->performDeferredCleanup(msNotUsed);
167 }
168
GetResourceCacheLimits(int * maxResource,size_t * maxResourceBytes) const169 void SkiaGPUContext::GetResourceCacheLimits(int* maxResource, size_t* maxResourceBytes) const
170 {
171 if (!grContext_) {
172 LOGD("SkiaGPUContext::GetResourceCacheLimits, grContext_ is nullptr");
173 return;
174 }
175 grContext_->getResourceCacheLimits(maxResource, maxResourceBytes);
176 }
177
SetResourceCacheLimits(int maxResource,size_t maxResourceBytes)178 void SkiaGPUContext::SetResourceCacheLimits(int maxResource, size_t maxResourceBytes)
179 {
180 if (!grContext_) {
181 LOGD("SkiaGPUContext::SetResourceCacheLimits, grContext_ is nullptr");
182 return;
183 }
184 grContext_->setResourceCacheLimits(maxResource, maxResourceBytes);
185 }
186
GetResourceCacheUsage(int * resourceCount,size_t * resourceBytes) const187 void SkiaGPUContext::GetResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const
188 {
189 if (!grContext_) {
190 LOGD("SkiaGPUContext::GetResourceCacheUsage, grContext_ is nullptr");
191 return;
192 }
193 grContext_->getResourceCacheUsage(resourceCount, resourceBytes);
194 }
195
FreeGpuResources()196 void SkiaGPUContext::FreeGpuResources()
197 {
198 if (!grContext_) {
199 LOGD("SkiaGPUContext::FreeGpuResources, grContext_ is nullptr");
200 return;
201 }
202 grContext_->freeGpuResources();
203 }
204
DumpGpuStats(std::string & out)205 void SkiaGPUContext::DumpGpuStats(std::string& out)
206 {
207 if (!grContext_) {
208 LOGD("SkiaGPUContext::DumpGpuStats, grContext_ is nullptr");
209 return;
210 }
211 SkString stat;
212 grContext_->priv().dumpGpuStats(&stat);
213 grContext_->dumpVmaStats(&stat);
214 out = stat.c_str();
215 }
216
ReleaseResourcesAndAbandonContext()217 void SkiaGPUContext::ReleaseResourcesAndAbandonContext()
218 {
219 if (!grContext_) {
220 LOGD("SkiaGPUContext::ReleaseResourcesAndAbandonContext, grContext_ is nullptr");
221 return;
222 }
223 grContext_->releaseResourcesAndAbandonContext();
224 }
225
PurgeUnlockedResources(bool scratchResourcesOnly)226 void SkiaGPUContext::PurgeUnlockedResources(bool scratchResourcesOnly)
227 {
228 if (!grContext_) {
229 LOGD("SkiaGPUContext::PurgeUnlockedResources, grContext_ is nullptr");
230 return;
231 }
232 grContext_->purgeUnlockedResources(scratchResourcesOnly);
233 }
234
PurgeUnlockedResourcesByTag(bool scratchResourcesOnly,const GPUResourceTag & tag)235 void SkiaGPUContext::PurgeUnlockedResourcesByTag(bool scratchResourcesOnly, const GPUResourceTag &tag)
236 {
237 if (!grContext_) {
238 LOGD("SkiaGPUContext::PurgeUnlockedResourcesByTag, grContext_ is nullptr");
239 return;
240 }
241 GrGpuResourceTag grTag(tag.fPid, tag.fTid, tag.fWid, tag.fFid, tag.fName);
242 grContext_->purgeUnlockedResourcesByTag(scratchResourcesOnly, grTag);
243 }
244
PurgeUnlockedResourcesByPid(bool scratchResourcesOnly,const std::set<pid_t> & exitedPidSet)245 void SkiaGPUContext::PurgeUnlockedResourcesByPid(bool scratchResourcesOnly, const std::set<pid_t>& exitedPidSet)
246 {
247 if (!grContext_) {
248 LOGD("SkiaGPUContext::PurgeUnlockedResourcesByPid, grContext_ is nullptr");
249 return;
250 }
251 grContext_->purgeUnlockedResourcesByPid(scratchResourcesOnly, exitedPidSet);
252 }
253
RegisterVulkanErrorCallback(const std::function<void ()> & vulkanErrorCallback)254 void SkiaGPUContext::RegisterVulkanErrorCallback(const std::function<void()>& vulkanErrorCallback)
255 {
256 if (!grContext_) {
257 LOGD("SkiaGPUContext::RegisterVulkanErrorCallback, grContext_ is nullptr");
258 return;
259 }
260 grContext_->registerVulkanErrorCallback(vulkanErrorCallback);
261 }
262
PurgeUnlockAndSafeCacheGpuResources()263 void SkiaGPUContext::PurgeUnlockAndSafeCacheGpuResources()
264 {
265 if (!grContext_) {
266 LOGD("SkiaGPUContext::PurgeUnlockAndSafeCacheGpuResources, grContext_ is nullptr");
267 return;
268 }
269 grContext_->purgeUnlockAndSafeCacheGpuResources();
270 }
271
PurgeCacheBetweenFrames(bool scratchResourcesOnly,const std::set<pid_t> & exitedPidSet,const std::set<pid_t> & protectedPidSet)272 void SkiaGPUContext::PurgeCacheBetweenFrames(bool scratchResourcesOnly, const std::set<pid_t>& exitedPidSet,
273 const std::set<pid_t>& protectedPidSet)
274 {
275 if (!grContext_) {
276 LOGD("SkiaGPUContext::PurgeCacheBetweenFrames,grContext_ is nullptr");
277 return;
278 }
279 grContext_->purgeCacheBetweenFrames(scratchResourcesOnly, exitedPidSet, protectedPidSet);
280 }
281
ReleaseByTag(const GPUResourceTag & tag)282 void SkiaGPUContext::ReleaseByTag(const GPUResourceTag &tag)
283 {
284 if (!grContext_) {
285 LOGD("SkiaGPUContext::ReleaseByTag, grContext_ is nullptr");
286 return;
287 }
288 GrGpuResourceTag grTag(tag.fPid, tag.fTid, tag.fWid, tag.fFid, tag.fName);
289 grContext_->releaseByTag(grTag);
290 }
291
ResetContext()292 void SkiaGPUContext::ResetContext()
293 {
294 if (!grContext_) {
295 LOGD("SkiaGPUContext::ResetContext, grContext_ is nullptr");
296 return;
297 }
298 grContext_->resetContext();
299 }
300
DumpMemoryStatisticsByTag(TraceMemoryDump * traceMemoryDump,GPUResourceTag & tag)301 void SkiaGPUContext::DumpMemoryStatisticsByTag(TraceMemoryDump* traceMemoryDump, GPUResourceTag &tag)
302 {
303 if (!grContext_) {
304 LOGD("SkiaGPUContext::DumpMemoryStatisticsByTag, grContext_ is nullptr");
305 return;
306 }
307
308 if (!traceMemoryDump) {
309 LOGD("SkiaGPUContext::DumpMemoryStatisticsByTag, traceMemoryDump is nullptr");
310 return;
311 }
312 SkTraceMemoryDump* skTraceMemoryDump = traceMemoryDump->GetImpl<SkiaTraceMemoryDump>()->GetTraceMemoryDump().get();
313 if (!skTraceMemoryDump) {
314 LOGD("SkiaGPUContext::DumpMemoryStatisticsByTag, sktraceMemoryDump is nullptr");
315 return;
316 }
317 GrGpuResourceTag grTag(tag.fPid, tag.fTid, tag.fWid, tag.fFid, tag.fName);
318 grContext_->dumpMemoryStatisticsByTag(skTraceMemoryDump, grTag);
319 }
320
DumpMemoryStatistics(TraceMemoryDump * traceMemoryDump)321 void SkiaGPUContext::DumpMemoryStatistics(TraceMemoryDump* traceMemoryDump)
322 {
323 if (!grContext_) {
324 LOGD("SkiaGPUContext::DumpMemoryStatistics, grContext_ is nullptr");
325 return;
326 }
327
328 if (!traceMemoryDump) {
329 LOGD("SkiaGPUContext::DumpMemoryStatistics, traceMemoryDump is nullptr");
330 return;
331 }
332 SkTraceMemoryDump* skTraceMemoryDump = traceMemoryDump->GetImpl<SkiaTraceMemoryDump>()->GetTraceMemoryDump().get();
333 if (!skTraceMemoryDump) {
334 LOGD("SkiaGPUContext::DumpMemoryStatistics, sktraceMemoryDump is nullptr");
335 return;
336 }
337 grContext_->dumpMemoryStatistics(skTraceMemoryDump);
338 }
339
SetCurrentGpuResourceTag(const GPUResourceTag & tag)340 void SkiaGPUContext::SetCurrentGpuResourceTag(const GPUResourceTag &tag)
341 {
342 if (!grContext_) {
343 LOGD("SkiaGPUContext::ReleaseByTag, grContext_ is nullptr");
344 return;
345 }
346 GrGpuResourceTag grTag(tag.fPid, tag.fTid, tag.fWid, tag.fFid, tag.fName);
347 grContext_->setCurrentGrResourceTag(grTag);
348 }
349
GetGrContext() const350 sk_sp<GrDirectContext> SkiaGPUContext::GetGrContext() const
351 {
352 return grContext_;
353 }
354
SetGrContext(const sk_sp<GrDirectContext> & grContext)355 void SkiaGPUContext::SetGrContext(const sk_sp<GrDirectContext>& grContext)
356 {
357 grContext_ = grContext;
358 }
359
GetUpdatedMemoryMap(std::unordered_map<pid_t,size_t> & out)360 void SkiaGPUContext::GetUpdatedMemoryMap(std::unordered_map<pid_t, size_t> &out)
361 {
362 if (!grContext_) {
363 LOGD("SkiaGPUContext::GetUpdatedMemoryMap, grContext_ is nullptr");
364 return;
365 }
366 grContext_->getUpdatedMemoryMap(out);
367 }
368
InitGpuMemoryLimit(MemoryOverflowCalllback callback,uint64_t size)369 void SkiaGPUContext::InitGpuMemoryLimit(MemoryOverflowCalllback callback, uint64_t size)
370 {
371 if (!grContext_) {
372 LOGD("SkiaGPUContext::InitGpuMemoryLimit, grContext_ is nullptr");
373 return;
374 }
375 grContext_->initGpuMemoryLimit(callback, size);
376 }
377 #ifdef RS_ENABLE_VK
StoreVkPipelineCacheData()378 void SkiaGPUContext::StoreVkPipelineCacheData()
379 {
380 if (!grContext_) {
381 LOGD("SkiaGPUContext::StoreVkPipelineCacheData, grContext_ is nullptr");
382 return;
383 }
384 grContext_->storeVkPipelineCacheData();
385 }
386 #endif
387
BeginFrame()388 void SkiaGPUContext::BeginFrame()
389 {
390 if (!grContext_) {
391 LOGD("SkiaGPUContext::BeginFrame, grContext_ is nullptr");
392 return;
393 }
394 grContext_->beginFrame();
395 }
396
EndFrame()397 void SkiaGPUContext::EndFrame()
398 {
399 if (!grContext_) {
400 LOGD("SkiaGPUContext::EndFrame, grContext_ is nullptr");
401 return;
402 }
403 grContext_->endFrame();
404 }
405
SetGpuCacheSuppressWindowSwitch(bool enabled)406 void SkiaGPUContext::SetGpuCacheSuppressWindowSwitch(bool enabled)
407 {
408 if (!grContext_) {
409 LOGD("SkiaGPUContext::SetGpuCacheSuppressWindowSwitch, grContext_ is nullptr");
410 return;
411 }
412 grContext_->setGpuCacheSuppressWindowSwitch(enabled);
413 }
414
SetGpuMemoryAsyncReclaimerSwitch(bool enabled,const std::function<void ()> & setThreadPriority)415 void SkiaGPUContext::SetGpuMemoryAsyncReclaimerSwitch(bool enabled, const std::function<void()>& setThreadPriority)
416 {
417 if (!grContext_) {
418 LOGD("SkiaGPUContext::SetGpuMemoryAsyncReclaimerSwitch, grContext_ is nullptr");
419 return;
420 }
421 grContext_->setGpuMemoryAsyncReclaimerSwitch(enabled, setThreadPriority);
422 }
423
FlushGpuMemoryInWaitQueue()424 void SkiaGPUContext::FlushGpuMemoryInWaitQueue()
425 {
426 if (!grContext_) {
427 LOGD("SkiaGPUContext::FlushGpuMemoryInWaitQueue, grContext_ is nullptr");
428 return;
429 }
430 grContext_->flushGpuMemoryInWaitQueue();
431 }
432
SuppressGpuCacheBelowCertainRatio(const std::function<bool (void)> & nextFrameHasArrived)433 void SkiaGPUContext::SuppressGpuCacheBelowCertainRatio(const std::function<bool(void)>& nextFrameHasArrived)
434 {
435 if (!grContext_) {
436 LOGD("SkiaGPUContext::SuppressGpuCacheBelowCertainRatio, grContext_ is nullptr");
437 return;
438 }
439 grContext_->suppressGpuCacheBelowCertainRatio(nextFrameHasArrived);
440 }
441
442 std::unordered_map<uintptr_t, std::function<void(const std::function<void()>& task)>>
443 SkiaGPUContext::contextPostMap_ = {};
444
RegisterPostFunc(const std::function<void (const std::function<void ()> & task)> & func)445 void SkiaGPUContext::RegisterPostFunc(const std::function<void(const std::function<void()>& task)>& func)
446 {
447 std::unique_lock lock(g_registarMutex);
448 if (grContext_ != nullptr) {
449 contextPostMap_[uintptr_t(grContext_.get())] = func;
450 }
451 }
452
GetPostFunc(sk_sp<GrDirectContext> grContext)453 std::function<void(const std::function<void()>& task)> SkiaGPUContext::GetPostFunc(sk_sp<GrDirectContext> grContext)
454 {
455 std::unique_lock lock(g_registarMutex);
456 if (grContext != nullptr && contextPostMap_.count(uintptr_t(grContext.get())) > 0) {
457 return contextPostMap_[uintptr_t(grContext.get())];
458 }
459 return nullptr;
460 }
461
VmaDefragment()462 void SkiaGPUContext::VmaDefragment()
463 {
464 if (grContext_ != nullptr) {
465 grContext_->vmaDefragment();
466 }
467 }
468 } // namespace Drawing
469 } // namespace Rosen
470 } // namespace OHOS
471