• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 
9 #include "include/gpu/GrDirectContext.h"
10 
11 #include "include/core/SkTraceMemoryDump.h"
12 #include "include/gpu/GrBackendSemaphore.h"
13 #include "include/gpu/GrContextThreadSafeProxy.h"
14 #include "src/core/SkAutoMalloc.h"
15 #include "src/core/SkTaskGroup.h"
16 #include "src/core/SkTraceEvent.h"
17 #include "src/gpu/GrBackendUtils.h"
18 #include "src/gpu/GrClientMappedBufferManager.h"
19 #include "src/gpu/GrContextThreadSafeProxyPriv.h"
20 #include "src/gpu/GrDirectContextPriv.h"
21 #include "src/gpu/GrDrawingManager.h"
22 #include "src/gpu/GrGpu.h"
23 #include "src/gpu/GrResourceProvider.h"
24 #include "src/gpu/GrSemaphore.h"
25 #include "src/gpu/GrShaderUtils.h"
26 #include "src/gpu/GrThreadSafePipelineBuilder.h"
27 #include "src/gpu/SurfaceContext.h"
28 #include "src/gpu/effects/GrSkSLFP.h"
29 #include "src/gpu/mock/GrMockGpu.h"
30 #include "src/gpu/text/GrAtlasManager.h"
31 #include "src/gpu/text/GrStrikeCache.h"
32 #include "src/image/SkImage_GpuBase.h"
33 #if SK_GPU_V1
34 #include "src/gpu/ops/SmallPathAtlasMgr.h"
35 #else
36 // A vestigial definition for v2 that will never be instantiated
37 namespace skgpu::v1 {
38 class SmallPathAtlasMgr {
39 public:
SmallPathAtlasMgr()40     SmallPathAtlasMgr() { SkASSERT(0); }
reset()41     void reset() { SkASSERT(0); }
42 };
43 }
44 #endif
45 #ifdef SK_GL
46 #include "src/gpu/gl/GrGLGpu.h"
47 #endif
48 #ifdef SK_METAL
49 #include "include/gpu/mtl/GrMtlBackendContext.h"
50 #include "src/gpu/mtl/GrMtlTrampoline.h"
51 #endif
52 #ifdef SK_VULKAN
53 #include "src/gpu/vk/GrVkGpu.h"
54 #endif
55 #ifdef SK_DIRECT3D
56 #include "src/gpu/d3d/GrD3DGpu.h"
57 #endif
58 #ifdef SK_DAWN
59 #include "src/gpu/dawn/GrDawnGpu.h"
60 #endif
61 #include <memory>
62 
63 #if GR_TEST_UTILS
64 #   include "include/utils/SkRandom.h"
65 #   if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
66 #       include <sanitizer/lsan_interface.h>
67 #   endif
68 #endif
69 
70 #define ASSERT_SINGLE_OWNER GR_ASSERT_SINGLE_OWNER(this->singleOwner())
71 
Next()72 GrDirectContext::DirectContextID GrDirectContext::DirectContextID::Next() {
73     static std::atomic<uint32_t> nextID{1};
74     uint32_t id;
75     do {
76         id = nextID.fetch_add(1, std::memory_order_relaxed);
77     } while (id == SK_InvalidUniqueID);
78     return DirectContextID(id);
79 }
80 
GrDirectContext(GrBackendApi backend,const GrContextOptions & options)81 GrDirectContext::GrDirectContext(GrBackendApi backend, const GrContextOptions& options)
82         : INHERITED(GrContextThreadSafeProxyPriv::Make(backend, options), false)
83         , fDirectContextID(DirectContextID::Next()) {
84 }
85 
~GrDirectContext()86 GrDirectContext::~GrDirectContext() {
87     ASSERT_SINGLE_OWNER
88     // this if-test protects against the case where the context is being destroyed
89     // before having been fully created
90     if (fGpu) {
91         this->flushAndSubmit();
92     }
93 
94     // We need to make sure all work is finished on the gpu before we start releasing resources.
95     this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/false);
96 
97     this->destroyDrawingManager();
98 
99     // Ideally we could just let the ptr drop, but resource cache queries this ptr in releaseAll.
100     if (fResourceCache) {
101         fResourceCache->releaseAll();
102     }
103     // This has to be after GrResourceCache::releaseAll so that other threads that are holding
104     // async pixel result don't try to destroy buffers off thread.
105     fMappedBufferManager.reset();
106 }
107 
threadSafeProxy()108 sk_sp<GrContextThreadSafeProxy> GrDirectContext::threadSafeProxy() {
109     return INHERITED::threadSafeProxy();
110 }
111 
resetGLTextureBindings()112 void GrDirectContext::resetGLTextureBindings() {
113     if (this->abandoned() || this->backend() != GrBackendApi::kOpenGL) {
114         return;
115     }
116     fGpu->resetTextureBindings();
117 }
118 
resetContext(uint32_t state)119 void GrDirectContext::resetContext(uint32_t state) {
120     ASSERT_SINGLE_OWNER
121     fGpu->markContextDirty(state);
122 }
123 
abandonContext()124 void GrDirectContext::abandonContext() {
125     if (INHERITED::abandoned()) {
126         return;
127     }
128 
129     INHERITED::abandonContext();
130 
131     // We need to make sure all work is finished on the gpu before we start releasing resources.
132     this->syncAllOutstandingGpuWork(this->caps()->mustSyncGpuDuringAbandon());
133 
134     fStrikeCache->freeAll();
135 
136     fMappedBufferManager->abandon();
137 
138     fResourceProvider->abandon();
139 
140     // abandon first so destructors don't try to free the resources in the API.
141     fResourceCache->abandonAll();
142 
143     fGpu->disconnect(GrGpu::DisconnectType::kAbandon);
144 
145     if (fSmallPathAtlasMgr) {
146         fSmallPathAtlasMgr->reset();
147     }
148     fAtlasManager->freeAll();
149 }
150 
abandoned()151 bool GrDirectContext::abandoned() {
152     if (INHERITED::abandoned()) {
153         return true;
154     }
155 
156     if (fGpu && fGpu->isDeviceLost()) {
157         this->abandonContext();
158         return true;
159     }
160     return false;
161 }
162 
oomed()163 bool GrDirectContext::oomed() { return fGpu ? fGpu->checkAndResetOOMed() : false; }
164 
releaseResourcesAndAbandonContext()165 void GrDirectContext::releaseResourcesAndAbandonContext() {
166     if (INHERITED::abandoned()) {
167         return;
168     }
169 
170     INHERITED::abandonContext();
171 
172     // We need to make sure all work is finished on the gpu before we start releasing resources.
173     this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/true);
174 
175     fResourceProvider->abandon();
176 
177     // Release all resources in the backend 3D API.
178     fResourceCache->releaseAll();
179 
180     // Must be after GrResourceCache::releaseAll().
181     fMappedBufferManager.reset();
182 
183     fGpu->disconnect(GrGpu::DisconnectType::kCleanup);
184     if (fSmallPathAtlasMgr) {
185         fSmallPathAtlasMgr->reset();
186     }
187     fAtlasManager->freeAll();
188 }
189 
freeGpuResources()190 void GrDirectContext::freeGpuResources() {
191     ASSERT_SINGLE_OWNER
192 
193     if (this->abandoned()) {
194         return;
195     }
196 
197     this->flushAndSubmit();
198     if (fSmallPathAtlasMgr) {
199         fSmallPathAtlasMgr->reset();
200     }
201     fAtlasManager->freeAll();
202     // The TextBlobCache doesn't actually hold any GPU resource but this is a convenient
203     // place to purge blobs.
204     this->getTextBlobCache()->freeAll();
205     // TODO: the glyph cache doesn't hold any GpuResources so this call should not be needed here.
206     // Some slack in the GrTextBlob's implementation requires it though. That could be fixed.
207     fStrikeCache->freeAll();
208 
209     this->drawingManager()->freeGpuResources();
210 
211     fResourceCache->purgeUnlockedResources();
212 }
213 
init()214 bool GrDirectContext::init() {
215     ASSERT_SINGLE_OWNER
216     if (!fGpu) {
217         return false;
218     }
219 
220     fThreadSafeProxy->priv().init(fGpu->refCaps(), fGpu->refPipelineBuilder());
221     if (!INHERITED::init()) {
222         return false;
223     }
224 
225     SkASSERT(this->getTextBlobCache());
226     SkASSERT(this->threadSafeCache());
227 
228     fStrikeCache = std::make_unique<GrStrikeCache>();
229     fResourceCache = std::make_unique<GrResourceCache>(this->singleOwner(),
230                                                        this->directContextID(),
231                                                        this->contextID());
232     fResourceCache->setProxyProvider(this->proxyProvider());
233     fResourceCache->setThreadSafeCache(this->threadSafeCache());
234 #if GR_TEST_UTILS
235     if (this->options().fResourceCacheLimitOverride != -1) {
236         this->setResourceCacheLimit(this->options().fResourceCacheLimitOverride);
237     }
238 #endif
239     fResourceProvider = std::make_unique<GrResourceProvider>(fGpu.get(), fResourceCache.get(),
240                                                              this->singleOwner());
241     fMappedBufferManager = std::make_unique<GrClientMappedBufferManager>(this->directContextID());
242 
243     fDidTestPMConversions = false;
244 
245     // DDL TODO: we need to think through how the task group & persistent cache
246     // get passed on to/shared between all the DDLRecorders created with this context.
247     if (this->options().fExecutor) {
248         fTaskGroup = std::make_unique<SkTaskGroup>(*this->options().fExecutor);
249     }
250 
251     fPersistentCache = this->options().fPersistentCache;
252 
253     GrDrawOpAtlas::AllowMultitexturing allowMultitexturing;
254     if (GrContextOptions::Enable::kNo == this->options().fAllowMultipleGlyphCacheTextures ||
255         // multitexturing supported only if range can represent the index + texcoords fully
256         !(this->caps()->shaderCaps()->floatIs32Bits() ||
257         this->caps()->shaderCaps()->integerSupport())) {
258         allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kNo;
259     } else {
260         allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kYes;
261     }
262 
263     GrProxyProvider* proxyProvider = this->priv().proxyProvider();
264 
265     fAtlasManager = std::make_unique<GrAtlasManager>(proxyProvider,
266                                                      this->options().fGlyphCacheTextureMaximumBytes,
267                                                      allowMultitexturing);
268     this->priv().addOnFlushCallbackObject(fAtlasManager.get());
269 
270     return true;
271 }
272 
getResourceCacheUsage(int * resourceCount,size_t * resourceBytes) const273 void GrDirectContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
274     ASSERT_SINGLE_OWNER
275 
276     if (resourceCount) {
277         *resourceCount = fResourceCache->getBudgetedResourceCount();
278     }
279     if (resourceBytes) {
280         *resourceBytes = fResourceCache->getBudgetedResourceBytes();
281     }
282 }
283 
getResourceCachePurgeableBytes() const284 size_t GrDirectContext::getResourceCachePurgeableBytes() const {
285     ASSERT_SINGLE_OWNER
286     return fResourceCache->getPurgeableBytes();
287 }
288 
getResourceCacheLimits(int * maxResources,size_t * maxResourceBytes) const289 void GrDirectContext::getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const {
290     ASSERT_SINGLE_OWNER
291     if (maxResources) {
292         *maxResources = -1;
293     }
294     if (maxResourceBytes) {
295         *maxResourceBytes = this->getResourceCacheLimit();
296     }
297 }
298 
getResourceCacheLimit() const299 size_t GrDirectContext::getResourceCacheLimit() const {
300     ASSERT_SINGLE_OWNER
301     return fResourceCache->getMaxResourceBytes();
302 }
303 
setResourceCacheLimits(int unused,size_t maxResourceBytes)304 void GrDirectContext::setResourceCacheLimits(int unused, size_t maxResourceBytes) {
305     ASSERT_SINGLE_OWNER
306     this->setResourceCacheLimit(maxResourceBytes);
307 }
308 
setResourceCacheLimit(size_t maxResourceBytes)309 void GrDirectContext::setResourceCacheLimit(size_t maxResourceBytes) {
310     ASSERT_SINGLE_OWNER
311     fResourceCache->setLimit(maxResourceBytes);
312 }
313 
purgeUnlockedResources(bool scratchResourcesOnly)314 void GrDirectContext::purgeUnlockedResources(bool scratchResourcesOnly) {
315     ASSERT_SINGLE_OWNER
316 
317     if (this->abandoned()) {
318         return;
319     }
320 
321     fResourceCache->purgeUnlockedResources(scratchResourcesOnly);
322     fResourceCache->purgeAsNeeded();
323 
324     // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
325     // place to purge stale blobs
326     this->getTextBlobCache()->purgeStaleBlobs();
327 
328     fGpu->releaseUnlockedBackendObjects();
329 }
330 
purgeUnlockAndSafeCacheGpuResources()331 void GrDirectContext::purgeUnlockAndSafeCacheGpuResources() {
332     ASSERT_SINGLE_OWNER
333 
334     if (this->abandoned()) {
335         return;
336     }
337 
338     fResourceCache->purgeUnlockAndSafeCacheGpuResources();
339     fResourceCache->purgeAsNeeded();
340 
341     // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
342     // place to purge stale blobs
343     this->getTextBlobCache()->purgeStaleBlobs();
344 
345     fGpu->releaseUnlockedBackendObjects();
346 }
347 
CalcHpsBluredImageDimension(const SkBlurArg & blurArg)348 std::array<int, 2> GrDirectContext::CalcHpsBluredImageDimension(const SkBlurArg& blurArg) {
349     return fGpu->GetHpsDimension(blurArg);
350 }
351 
purgeUnlockedResourcesByTag(bool scratchResourceseOnly,const GrGpuResourceTag & tag)352 void GrDirectContext::purgeUnlockedResourcesByTag(bool scratchResourceseOnly, const GrGpuResourceTag& tag) {
353     ASSERT_SINGLE_OWNER
354     fResourceCache->purgeUnlockedResourcesByTag(scratchResourceseOnly, tag);
355     fResourceCache->purgeAsNeeded();
356 
357     // The textBlod Cache doesn't actually hold any GPU resource but this is a convenient
358     // place to purge stale blobs
359     this->getTextBlobCache()->purgeStaleBlobs();
360 }
361 
purgeUnlockedResourcesByPid(bool scratchResourcesOnly,const std::set<int> & exitedPidSet)362 void GrDirectContext::purgeUnlockedResourcesByPid(bool scratchResourcesOnly, const std::set<int>& exitedPidSet) {
363     ASSERT_SINGLE_OWNER
364     fResourceCache->purgeUnlockedResourcesByPid(scratchResourcesOnly, exitedPidSet);
365     fResourceCache->purgeAsNeeded();
366 
367     // The TextBlobCache doesn't actually hold any GPU resource but this is a convenient
368     // place to purge blobs.
369     this->getTextBlobCache()->freeAll();
370     // The StrikeCache indirectly references typeface, and in order to dereference the typeface,
371     // it is necessary to clear the StrikeCache when the application exits.
372     fStrikeCache->freeAll();
373 }
374 
registerVulkanErrorCallback(const std::function<void ()> & vulkanErrorCallback)375 void GrDirectContext::registerVulkanErrorCallback(const std::function<void()>& vulkanErrorCallback)
376 {
377     vulkanErrorCallback_ = vulkanErrorCallback;
378 }
379 
processVulkanError()380 void GrDirectContext::processVulkanError()
381 {
382     if(vulkanErrorCallback_) {
383         vulkanErrorCallback_();
384     } else {
385         SK_LOGE("checkVkResult vulkanErrorCallback_ nullptr");
386     }
387 }
388 
purgeCacheBetweenFrames(bool scratchResourcesOnly,const std::set<int> & exitedPidSet,const std::set<int> & protectedPidSet)389 void GrDirectContext::purgeCacheBetweenFrames(bool scratchResourcesOnly, const std::set<int>& exitedPidSet,
390         const std::set<int>& protectedPidSet)
391 {
392     ASSERT_SINGLE_OWNER
393 
394     if (this->abandoned()) {
395         return;
396     }
397 
398     fResourceCache->purgeCacheBetweenFrames(scratchResourcesOnly, exitedPidSet, protectedPidSet);
399 }
400 
performDeferredCleanup(std::chrono::milliseconds msNotUsed,bool scratchResourcesOnly)401 void GrDirectContext::performDeferredCleanup(std::chrono::milliseconds msNotUsed,
402                                              bool scratchResourcesOnly) {
403     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
404 
405     ASSERT_SINGLE_OWNER
406 
407     if (this->abandoned()) {
408         return;
409     }
410 
411     this->checkAsyncWorkCompletion();
412     fMappedBufferManager->process();
413     auto purgeTime = GrStdSteadyClock::now() - msNotUsed;
414 
415     fResourceCache->purgeAsNeeded();
416     fResourceCache->purgeResourcesNotUsedSince(purgeTime, scratchResourcesOnly);
417 
418     // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
419     // place to purge stale blobs
420     this->getTextBlobCache()->purgeStaleBlobs();
421 }
422 
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)423 void GrDirectContext::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
424     ASSERT_SINGLE_OWNER
425 
426     if (this->abandoned()) {
427         return;
428     }
429 
430     fResourceCache->purgeUnlockedResources(bytesToPurge, preferScratchResources);
431 }
432 
433 ////////////////////////////////////////////////////////////////////////////////
wait(int numSemaphores,const GrBackendSemaphore waitSemaphores[],bool deleteSemaphoresAfterWait)434 bool GrDirectContext::wait(int numSemaphores, const GrBackendSemaphore waitSemaphores[],
435                            bool deleteSemaphoresAfterWait) {
436     if (!fGpu || !fGpu->caps()->semaphoreSupport()) {
437         return false;
438     }
439     GrWrapOwnership ownership =
440             deleteSemaphoresAfterWait ? kAdopt_GrWrapOwnership : kBorrow_GrWrapOwnership;
441     for (int i = 0; i < numSemaphores; ++i) {
442         std::unique_ptr<GrSemaphore> sema = fResourceProvider->wrapBackendSemaphore(
443                 waitSemaphores[i], GrSemaphoreWrapType::kWillWait, ownership);
444         // If we failed to wrap the semaphore it means the client didn't give us a valid semaphore
445         // to begin with. Therefore, it is fine to not wait on it.
446         if (sema) {
447             fGpu->waitSemaphore(sema.get());
448         }
449     }
450     return true;
451 }
452 
onGetSmallPathAtlasMgr()453 skgpu::v1::SmallPathAtlasMgr* GrDirectContext::onGetSmallPathAtlasMgr() {
454 #if SK_GPU_V1
455     if (!fSmallPathAtlasMgr) {
456         fSmallPathAtlasMgr = std::make_unique<skgpu::v1::SmallPathAtlasMgr>();
457 
458         this->priv().addOnFlushCallbackObject(fSmallPathAtlasMgr.get());
459     }
460 
461     if (!fSmallPathAtlasMgr->initAtlas(this->proxyProvider(), this->caps())) {
462         return nullptr;
463     }
464 #endif
465 
466     return fSmallPathAtlasMgr.get();
467 }
468 
469 ////////////////////////////////////////////////////////////////////////////////
470 
flush(const GrFlushInfo & info)471 GrSemaphoresSubmitted GrDirectContext::flush(const GrFlushInfo& info) {
472     ASSERT_SINGLE_OWNER
473     if (this->abandoned()) {
474         if (info.fFinishedProc) {
475             info.fFinishedProc(info.fFinishedContext);
476         }
477         if (info.fSubmittedProc) {
478             info.fSubmittedProc(info.fSubmittedContext, false);
479         }
480         return GrSemaphoresSubmitted::kNo;
481     }
482 
483     return this->drawingManager()->flushSurfaces({}, SkSurface::BackendSurfaceAccess::kNoAccess,
484                                                  info, nullptr);
485 }
486 
submit(bool syncCpu)487 bool GrDirectContext::submit(bool syncCpu) {
488     ASSERT_SINGLE_OWNER
489     if (this->abandoned()) {
490         return false;
491     }
492 
493     if (!fGpu) {
494         return false;
495     }
496 
497     return fGpu->submitToGpu(syncCpu);
498 }
499 
500 ////////////////////////////////////////////////////////////////////////////////
501 
checkAsyncWorkCompletion()502 void GrDirectContext::checkAsyncWorkCompletion() {
503     if (fGpu) {
504         fGpu->checkFinishProcs();
505     }
506 }
507 
syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned)508 void GrDirectContext::syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned) {
509     if (fGpu && (!this->abandoned() || shouldExecuteWhileAbandoned)) {
510         fGpu->finishOutstandingGpuWork();
511         this->checkAsyncWorkCompletion();
512     }
513 }
514 
515 ////////////////////////////////////////////////////////////////////////////////
516 
storeVkPipelineCacheData()517 void GrDirectContext::storeVkPipelineCacheData() {
518     if (fGpu) {
519         fGpu->storeVkPipelineCacheData();
520     }
521 }
522 
523 ////////////////////////////////////////////////////////////////////////////////
524 
supportsDistanceFieldText() const525 bool GrDirectContext::supportsDistanceFieldText() const {
526     return this->caps()->shaderCaps()->supportsDistanceFieldText();
527 }
528 
529 //////////////////////////////////////////////////////////////////////////////
dumpAllResource(std::stringstream & dump) const530 void GrDirectContext::dumpAllResource(std::stringstream &dump) const {
531 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
532     if (fResourceCache) {
533         fResourceCache->dumpAllResource(dump);
534     }
535 #endif
536 }
537 
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const538 void GrDirectContext::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
539     ASSERT_SINGLE_OWNER
540     fResourceCache->dumpMemoryStatistics(traceMemoryDump);
541     traceMemoryDump->dumpNumericValue("skia/gr_text_blob_cache", "size", "bytes",
542                                       this->getTextBlobCache()->usedBytes());
543 }
544 
dumpMemoryStatisticsByTag(SkTraceMemoryDump * traceMemoryDump,const GrGpuResourceTag & tag) const545 void GrDirectContext::dumpMemoryStatisticsByTag(SkTraceMemoryDump* traceMemoryDump, const GrGpuResourceTag& tag) const {
546     ASSERT_SINGLE_OWNER
547     fResourceCache->dumpMemoryStatistics(traceMemoryDump, tag);
548     traceMemoryDump->dumpNumericValue("skia/gr_text_blob_cache", "size", "bytes",
549         this->getTextBlobCache()->usedBytes());
550 }
551 
setCurrentGrResourceTag(const GrGpuResourceTag & tag)552 void GrDirectContext::setCurrentGrResourceTag(const GrGpuResourceTag& tag) {
553     if (fResourceCache) {
554         return fResourceCache->setCurrentGrResourceTag(tag);
555     }
556 }
557 
popGrResourceTag()558 void GrDirectContext::popGrResourceTag()
559 {
560     if (fResourceCache) {
561         return fResourceCache->popGrResourceTag();
562     }
563 }
564 
getCurrentGrResourceTag() const565 GrGpuResourceTag GrDirectContext::getCurrentGrResourceTag() const {
566     if (fResourceCache) {
567         return fResourceCache->getCurrentGrResourceTag();
568     }
569     return {};
570 }
releaseByTag(const GrGpuResourceTag & tag)571 void GrDirectContext::releaseByTag(const GrGpuResourceTag& tag) {
572     if (fResourceCache) {
573         fResourceCache->releaseByTag(tag);
574     }
575 }
getAllGrGpuResourceTags() const576 std::set<GrGpuResourceTag> GrDirectContext::getAllGrGpuResourceTags() const {
577     if (fResourceCache) {
578         return fResourceCache->getAllGrGpuResourceTags();
579     }
580     return {};
581 }
582 
583 #ifdef SKIA_OHOS
584 // OH ISSUE: set purgeable resource max count limit.
setPurgeableResourceLimit(int purgeableMaxCount)585 void GrDirectContext::setPurgeableResourceLimit(int purgeableMaxCount)
586 {
587     ASSERT_SINGLE_OWNER
588     if (fResourceCache) {
589         fResourceCache->setPurgeableResourceLimit(purgeableMaxCount);
590     }
591 }
592 #endif
593 
594 // OH ISSUE: get the memory information of the updated pid.
getUpdatedMemoryMap(std::unordered_map<int32_t,size_t> & out)595 void GrDirectContext::getUpdatedMemoryMap(std::unordered_map<int32_t, size_t> &out)
596 {
597     if (fResourceCache) {
598         fResourceCache->getUpdatedMemoryMap(out);
599     }
600 }
601 
602 // OH ISSUE: init gpu memory limit.
initGpuMemoryLimit(MemoryOverflowCalllback callback,uint64_t size)603 void GrDirectContext::initGpuMemoryLimit(MemoryOverflowCalllback callback, uint64_t size)
604 {
605     if (fResourceCache) {
606         fResourceCache->initGpuMemoryLimit(callback, size);
607     }
608 }
609 
610 // OH ISSUE: check whether the PID is abnormal.
isPidAbnormal() const611 bool GrDirectContext::isPidAbnormal() const
612 {
613     if (fResourceCache) {
614         return fResourceCache->isPidAbnormal();
615     }
616     return false;
617 }
618 
vmaDefragment()619 void GrDirectContext::vmaDefragment()
620 {
621     if (fGpu) {
622         fGpu->vmaDefragment();
623     }
624 }
625 
dumpVmaStats(SkString * out)626 void GrDirectContext::dumpVmaStats(SkString *out)
627 {
628     if (out == nullptr) {
629         return;
630     }
631     if (fGpu) {
632         fGpu->dumpVmaStats(out);
633     }
634 }
635 
636 // OH ISSUE: intra frame and inter frame identification
beginFrame()637 void GrDirectContext::beginFrame()
638 {
639 #ifdef SK_VULKAN
640     if (fResourceCache) {
641         fResourceCache->beginFrame();
642     }
643 #endif
644 }
645 
646 // OH ISSUE: intra frame and inter frame identification
endFrame()647 void GrDirectContext::endFrame()
648 {
649 #ifdef SK_VULKAN
650     if (fResourceCache) {
651         fResourceCache->endFrame();
652     }
653 #endif
654 }
655 
656 // OH ISSUE: asyn memory reclaimer
setGpuMemoryAsyncReclaimerSwitch(bool enabled,const std::function<void ()> & setThreadPriority)657 void GrDirectContext::setGpuMemoryAsyncReclaimerSwitch(bool enabled, const std::function<void()>& setThreadPriority)
658 {
659 #ifdef SK_VULKAN
660     if (fGpu) {
661         fGpu->setGpuMemoryAsyncReclaimerSwitch(enabled, setThreadPriority);
662     }
663 #endif
664 }
665 
666 // OH ISSUE: asyn memory reclaimer
flushGpuMemoryInWaitQueue()667 void GrDirectContext::flushGpuMemoryInWaitQueue()
668 {
669 #ifdef SK_VULKAN
670     if (fGpu) {
671         fGpu->flushGpuMemoryInWaitQueue();
672     }
673 #endif
674 }
675 
676 // OH ISSUE: suppress release window
setGpuCacheSuppressWindowSwitch(bool enabled)677 void GrDirectContext::setGpuCacheSuppressWindowSwitch(bool enabled)
678 {
679 #ifdef SK_VULKAN
680     ASSERT_SINGLE_OWNER
681 
682     if (this->abandoned()) {
683         return;
684     }
685 
686     fResourceCache->setGpuCacheSuppressWindowSwitch(enabled);
687 #endif
688 }
689 
690 // OH ISSUE: suppress release window
suppressGpuCacheBelowCertainRatio(const std::function<bool (void)> & nextFrameHasArrived)691 void GrDirectContext::suppressGpuCacheBelowCertainRatio(const std::function<bool(void)>& nextFrameHasArrived)
692 {
693 #ifdef SK_VULKAN
694     ASSERT_SINGLE_OWNER
695 
696     if (this->abandoned()) {
697         return;
698     }
699 
700     fResourceCache->suppressGpuCacheBelowCertainRatio(nextFrameHasArrived);
701 #endif
702 }
703 
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected)704 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
705                                                        const GrBackendFormat& backendFormat,
706                                                        GrMipmapped mipMapped,
707                                                        GrRenderable renderable,
708                                                        GrProtected isProtected) {
709     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
710     if (this->abandoned()) {
711         return GrBackendTexture();
712     }
713 
714     return fGpu->createBackendTexture({width, height}, backendFormat, renderable,
715                                       mipMapped, isProtected);
716 }
717 
createBackendTexture(int width,int height,SkColorType skColorType,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected)718 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
719                                                        SkColorType skColorType,
720                                                        GrMipmapped mipMapped,
721                                                        GrRenderable renderable,
722                                                        GrProtected isProtected) {
723     if (this->abandoned()) {
724         return GrBackendTexture();
725     }
726 
727     const GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
728 
729     return this->createBackendTexture(width, height, format, mipMapped, renderable, isProtected);
730 }
731 
create_and_clear_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,sk_sp<GrRefCntedCallback> finishedCallback,std::array<float,4> color)732 static GrBackendTexture create_and_clear_backend_texture(GrDirectContext* dContext,
733                                                          SkISize dimensions,
734                                                          const GrBackendFormat& backendFormat,
735                                                          GrMipmapped mipMapped,
736                                                          GrRenderable renderable,
737                                                          GrProtected isProtected,
738                                                          sk_sp<GrRefCntedCallback> finishedCallback,
739                                                          std::array<float, 4> color) {
740     GrGpu* gpu = dContext->priv().getGpu();
741     GrBackendTexture beTex = gpu->createBackendTexture(dimensions, backendFormat, renderable,
742                                                        mipMapped, isProtected);
743     if (!beTex.isValid()) {
744         return {};
745     }
746 
747     if (!dContext->priv().getGpu()->clearBackendTexture(beTex,
748                                                         std::move(finishedCallback),
749                                                         color)) {
750         dContext->deleteBackendTexture(beTex);
751         return {};
752     }
753     return beTex;
754 }
755 
update_texture_with_pixmaps(GrDirectContext * context,const SkPixmap src[],int numLevels,const GrBackendTexture & backendTexture,GrSurfaceOrigin textureOrigin,sk_sp<GrRefCntedCallback> finishedCallback)756 static bool update_texture_with_pixmaps(GrDirectContext* context,
757                                         const SkPixmap src[],
758                                         int numLevels,
759                                         const GrBackendTexture& backendTexture,
760                                         GrSurfaceOrigin textureOrigin,
761                                         sk_sp<GrRefCntedCallback> finishedCallback) {
762     GrColorType ct = SkColorTypeToGrColorType(src[0].colorType());
763     const GrBackendFormat& format = backendTexture.getBackendFormat();
764 
765     if (!context->priv().caps()->areColorTypeAndFormatCompatible(ct, format)) {
766         return false;
767     }
768 
769     auto proxy = context->priv().proxyProvider()->wrapBackendTexture(backendTexture,
770                                                                      kBorrow_GrWrapOwnership,
771                                                                      GrWrapCacheable::kNo,
772                                                                      kRW_GrIOType,
773                                                                      std::move(finishedCallback));
774     if (!proxy) {
775         return false;
776     }
777 
778     GrSwizzle swizzle = context->priv().caps()->getReadSwizzle(format, ct);
779     GrSurfaceProxyView view(std::move(proxy), textureOrigin, swizzle);
780     skgpu::SurfaceContext surfaceContext(context, std::move(view), src[0].info().colorInfo());
781     SkAutoSTArray<15, GrCPixmap> tmpSrc(numLevels);
782     for (int i = 0; i < numLevels; ++i) {
783         tmpSrc[i] = src[i];
784     }
785     if (!surfaceContext.writePixels(context, tmpSrc.get(), numLevels)) {
786         return false;
787     }
788 
789     GrSurfaceProxy* p = surfaceContext.asSurfaceProxy();
790     GrFlushInfo info;
791     context->priv().drawingManager()->flushSurfaces({&p, 1},
792                                                     SkSurface::BackendSurfaceAccess::kNoAccess,
793                                                     info,
794                                                     nullptr);
795     return true;
796 }
797 
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)798 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
799                                                        const GrBackendFormat& backendFormat,
800                                                        const SkColor4f& color,
801                                                        GrMipmapped mipMapped,
802                                                        GrRenderable renderable,
803                                                        GrProtected isProtected,
804                                                        GrGpuFinishedProc finishedProc,
805                                                        GrGpuFinishedContext finishedContext) {
806     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
807 
808     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
809     if (this->abandoned()) {
810         return {};
811     }
812 
813     return create_and_clear_backend_texture(this,
814                                             {width, height},
815                                             backendFormat,
816                                             mipMapped,
817                                             renderable,
818                                             isProtected,
819                                             std::move(finishedCallback),
820                                             color.array());
821 }
822 
createBackendTexture(int width,int height,SkColorType skColorType,const SkColor4f & color,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)823 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
824                                                        SkColorType skColorType,
825                                                        const SkColor4f& color,
826                                                        GrMipmapped mipMapped,
827                                                        GrRenderable renderable,
828                                                        GrProtected isProtected,
829                                                        GrGpuFinishedProc finishedProc,
830                                                        GrGpuFinishedContext finishedContext) {
831     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
832 
833     if (this->abandoned()) {
834         return {};
835     }
836 
837     GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
838     if (!format.isValid()) {
839         return {};
840     }
841 
842     GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
843     SkColor4f swizzledColor = this->caps()->getWriteSwizzle(format, grColorType).applyTo(color);
844 
845     return create_and_clear_backend_texture(this,
846                                             {width, height},
847                                             format,
848                                             mipMapped,
849                                             renderable,
850                                             isProtected,
851                                             std::move(finishedCallback),
852                                             swizzledColor.array());
853 }
854 
createBackendTexture(const SkPixmap srcData[],int numProvidedLevels,GrSurfaceOrigin textureOrigin,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)855 GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap srcData[],
856                                                        int numProvidedLevels,
857                                                        GrSurfaceOrigin textureOrigin,
858                                                        GrRenderable renderable,
859                                                        GrProtected isProtected,
860                                                        GrGpuFinishedProc finishedProc,
861                                                        GrGpuFinishedContext finishedContext) {
862     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
863 
864     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
865 
866     if (this->abandoned()) {
867         return {};
868     }
869 
870     if (!srcData || numProvidedLevels <= 0) {
871         return {};
872     }
873 
874     SkColorType colorType = srcData[0].colorType();
875 
876     GrMipmapped mipMapped = GrMipmapped::kNo;
877     if (numProvidedLevels > 1) {
878         mipMapped = GrMipmapped::kYes;
879     }
880 
881     GrBackendFormat backendFormat = this->defaultBackendFormat(colorType, renderable);
882     GrBackendTexture beTex = this->createBackendTexture(srcData[0].width(),
883                                                         srcData[0].height(),
884                                                         backendFormat,
885                                                         mipMapped,
886                                                         renderable,
887                                                         isProtected);
888     if (!beTex.isValid()) {
889         return {};
890     }
891     if (!update_texture_with_pixmaps(this,
892                                      srcData,
893                                      numProvidedLevels,
894                                      beTex,
895                                      textureOrigin,
896                                      std::move(finishedCallback))) {
897         this->deleteBackendTexture(beTex);
898         return {};
899     }
900     return beTex;
901 }
902 
updateBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)903 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
904                                            const SkColor4f& color,
905                                            GrGpuFinishedProc finishedProc,
906                                            GrGpuFinishedContext finishedContext) {
907     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
908 
909     if (this->abandoned()) {
910         return false;
911     }
912 
913     return fGpu->clearBackendTexture(backendTexture, std::move(finishedCallback), color.array());
914 }
915 
updateBackendTexture(const GrBackendTexture & backendTexture,SkColorType skColorType,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)916 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
917                                            SkColorType skColorType,
918                                            const SkColor4f& color,
919                                            GrGpuFinishedProc finishedProc,
920                                            GrGpuFinishedContext finishedContext) {
921     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
922 
923     if (this->abandoned()) {
924         return false;
925     }
926 
927     GrBackendFormat format = backendTexture.getBackendFormat();
928     GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
929 
930     if (!this->caps()->areColorTypeAndFormatCompatible(grColorType, format)) {
931         return false;
932     }
933 
934     GrSwizzle swizzle = this->caps()->getWriteSwizzle(format, grColorType);
935     SkColor4f swizzledColor = swizzle.applyTo(color);
936 
937     return fGpu->clearBackendTexture(backendTexture,
938                                      std::move(finishedCallback),
939                                      swizzledColor.array());
940 }
941 
updateBackendTexture(const GrBackendTexture & backendTexture,const SkPixmap srcData[],int numLevels,GrSurfaceOrigin textureOrigin,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)942 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
943                                            const SkPixmap srcData[],
944                                            int numLevels,
945                                            GrSurfaceOrigin textureOrigin,
946                                            GrGpuFinishedProc finishedProc,
947                                            GrGpuFinishedContext finishedContext) {
948     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
949 
950     if (this->abandoned()) {
951         return false;
952     }
953 
954     if (!srcData || numLevels <= 0) {
955         return false;
956     }
957 
958     // If the texture has MIP levels then we require that the full set is overwritten.
959     int numExpectedLevels = 1;
960     if (backendTexture.hasMipmaps()) {
961         numExpectedLevels = SkMipmap::ComputeLevelCount(backendTexture.width(),
962                                                         backendTexture.height()) + 1;
963     }
964     if (numLevels != numExpectedLevels) {
965         return false;
966     }
967     return update_texture_with_pixmaps(this,
968                                        srcData,
969                                        numLevels,
970                                        backendTexture,
971                                        textureOrigin,
972                                        std::move(finishedCallback));
973 }
974 
975 //////////////////////////////////////////////////////////////////////////////
976 
create_and_update_compressed_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrProtected isProtected,sk_sp<GrRefCntedCallback> finishedCallback,const void * data,size_t size)977 static GrBackendTexture create_and_update_compressed_backend_texture(
978         GrDirectContext* dContext,
979         SkISize dimensions,
980         const GrBackendFormat& backendFormat,
981         GrMipmapped mipMapped,
982         GrProtected isProtected,
983         sk_sp<GrRefCntedCallback> finishedCallback,
984         const void* data,
985         size_t size) {
986     GrGpu* gpu = dContext->priv().getGpu();
987 
988     GrBackendTexture beTex = gpu->createCompressedBackendTexture(dimensions, backendFormat,
989                                                                  mipMapped, isProtected);
990     if (!beTex.isValid()) {
991         return {};
992     }
993 
994     if (!dContext->priv().getGpu()->updateCompressedBackendTexture(
995                 beTex, std::move(finishedCallback), data, size)) {
996         dContext->deleteBackendTexture(beTex);
997         return {};
998     }
999     return beTex;
1000 }
1001 
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,GrMipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1002 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
1003         int width, int height,
1004         const GrBackendFormat& backendFormat,
1005         const SkColor4f& color,
1006         GrMipmapped mipmapped,
1007         GrProtected isProtected,
1008         GrGpuFinishedProc finishedProc,
1009         GrGpuFinishedContext finishedContext) {
1010     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1011     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1012 
1013     if (this->abandoned()) {
1014         return {};
1015     }
1016 
1017     SkImage::CompressionType compression = GrBackendFormatToCompressionType(backendFormat);
1018     if (compression == SkImage::CompressionType::kNone) {
1019         return {};
1020     }
1021 
1022     size_t size = SkCompressedDataSize(compression,
1023                                        {width, height},
1024                                        nullptr,
1025                                        mipmapped == GrMipmapped::kYes);
1026     auto storage = std::make_unique<char[]>(size);
1027     GrFillInCompressedData(compression, {width, height}, mipmapped, storage.get(), color);
1028     return create_and_update_compressed_backend_texture(this,
1029                                                         {width, height},
1030                                                         backendFormat,
1031                                                         mipmapped,
1032                                                         isProtected,
1033                                                         std::move(finishedCallback),
1034                                                         storage.get(),
1035                                                         size);
1036 }
1037 
createCompressedBackendTexture(int width,int height,SkImage::CompressionType compression,const SkColor4f & color,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1038 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
1039         int width, int height,
1040         SkImage::CompressionType compression,
1041         const SkColor4f& color,
1042         GrMipmapped mipMapped,
1043         GrProtected isProtected,
1044         GrGpuFinishedProc finishedProc,
1045         GrGpuFinishedContext finishedContext) {
1046     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1047     GrBackendFormat format = this->compressedBackendFormat(compression);
1048     return this->createCompressedBackendTexture(width, height, format, color,
1049                                                 mipMapped, isProtected, finishedProc,
1050                                                 finishedContext);
1051 }
1052 
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const void * compressedData,size_t dataSize,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1053 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
1054         int width, int height,
1055         const GrBackendFormat& backendFormat,
1056         const void* compressedData,
1057         size_t dataSize,
1058         GrMipmapped mipMapped,
1059         GrProtected isProtected,
1060         GrGpuFinishedProc finishedProc,
1061         GrGpuFinishedContext finishedContext) {
1062     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1063     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1064 
1065     if (this->abandoned()) {
1066         return {};
1067     }
1068 
1069     return create_and_update_compressed_backend_texture(this,
1070                                                         {width, height},
1071                                                         backendFormat,
1072                                                         mipMapped,
1073                                                         isProtected,
1074                                                         std::move(finishedCallback),
1075                                                         compressedData,
1076                                                         dataSize);
1077 }
1078 
createCompressedBackendTexture(int width,int height,SkImage::CompressionType compression,const void * data,size_t dataSize,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1079 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
1080         int width, int height,
1081         SkImage::CompressionType compression,
1082         const void* data, size_t dataSize,
1083         GrMipmapped mipMapped,
1084         GrProtected isProtected,
1085         GrGpuFinishedProc finishedProc,
1086         GrGpuFinishedContext finishedContext) {
1087     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1088     GrBackendFormat format = this->compressedBackendFormat(compression);
1089     return this->createCompressedBackendTexture(width, height, format, data, dataSize, mipMapped,
1090                                                 isProtected, finishedProc, finishedContext);
1091 }
1092 
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1093 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1094                                                      const SkColor4f& color,
1095                                                      GrGpuFinishedProc finishedProc,
1096                                                      GrGpuFinishedContext finishedContext) {
1097     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1098 
1099     if (this->abandoned()) {
1100         return false;
1101     }
1102 
1103     SkImage::CompressionType compression =
1104             GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
1105     if (compression == SkImage::CompressionType::kNone) {
1106         return {};
1107     }
1108     size_t size = SkCompressedDataSize(compression,
1109                                        backendTexture.dimensions(),
1110                                        nullptr,
1111                                        backendTexture.hasMipmaps());
1112     SkAutoMalloc storage(size);
1113     GrFillInCompressedData(compression,
1114                            backendTexture.dimensions(),
1115                            backendTexture.mipmapped(),
1116                            static_cast<char*>(storage.get()),
1117                            color);
1118     return fGpu->updateCompressedBackendTexture(backendTexture,
1119                                                 std::move(finishedCallback),
1120                                                 storage.get(),
1121                                                 size);
1122 }
1123 
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const void * compressedData,size_t dataSize,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1124 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1125                                                      const void* compressedData,
1126                                                      size_t dataSize,
1127                                                      GrGpuFinishedProc finishedProc,
1128                                                      GrGpuFinishedContext finishedContext) {
1129     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1130 
1131     if (this->abandoned()) {
1132         return false;
1133     }
1134 
1135     if (!compressedData) {
1136         return false;
1137     }
1138 
1139     return fGpu->updateCompressedBackendTexture(backendTexture,
1140                                                 std::move(finishedCallback),
1141                                                 compressedData,
1142                                                 dataSize);
1143 }
1144 
1145 //////////////////////////////////////////////////////////////////////////////
1146 
setBackendTextureState(const GrBackendTexture & backendTexture,const GrBackendSurfaceMutableState & state,GrBackendSurfaceMutableState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1147 bool GrDirectContext::setBackendTextureState(const GrBackendTexture& backendTexture,
1148                                              const GrBackendSurfaceMutableState& state,
1149                                              GrBackendSurfaceMutableState* previousState,
1150                                              GrGpuFinishedProc finishedProc,
1151                                              GrGpuFinishedContext finishedContext) {
1152     auto callback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1153 
1154     if (this->abandoned()) {
1155         return false;
1156     }
1157 
1158     return fGpu->setBackendTextureState(backendTexture, state, previousState, std::move(callback));
1159 }
1160 
1161 
setBackendRenderTargetState(const GrBackendRenderTarget & backendRenderTarget,const GrBackendSurfaceMutableState & state,GrBackendSurfaceMutableState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1162 bool GrDirectContext::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
1163                                                   const GrBackendSurfaceMutableState& state,
1164                                                   GrBackendSurfaceMutableState* previousState,
1165                                                   GrGpuFinishedProc finishedProc,
1166                                                   GrGpuFinishedContext finishedContext) {
1167     auto callback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1168 
1169     if (this->abandoned()) {
1170         return false;
1171     }
1172 
1173     return fGpu->setBackendRenderTargetState(backendRenderTarget, state, previousState,
1174                                              std::move(callback));
1175 }
1176 
deleteBackendTexture(GrBackendTexture backendTex)1177 void GrDirectContext::deleteBackendTexture(GrBackendTexture backendTex) {
1178     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1179     // For the Vulkan backend we still must destroy the backend texture when the context is
1180     // abandoned.
1181     if ((this->abandoned() && this->backend() != GrBackendApi::kVulkan) || !backendTex.isValid()) {
1182         return;
1183     }
1184 
1185     fGpu->deleteBackendTexture(backendTex);
1186 }
1187 
1188 //////////////////////////////////////////////////////////////////////////////
1189 
precompileShader(const SkData & key,const SkData & data)1190 bool GrDirectContext::precompileShader(const SkData& key, const SkData& data) {
1191     return fGpu->precompileShader(key, data);
1192 }
1193 
1194 #ifdef SK_ENABLE_DUMP_GPU
1195 #include "include/core/SkString.h"
1196 #include "src/utils/SkJSONWriter.h"
dump() const1197 SkString GrDirectContext::dump() const {
1198     SkDynamicMemoryWStream stream;
1199     SkJSONWriter writer(&stream, SkJSONWriter::Mode::kPretty);
1200     writer.beginObject();
1201 
1202     writer.appendString("backend", GrBackendApiToStr(this->backend()));
1203 
1204     writer.appendName("caps");
1205     this->caps()->dumpJSON(&writer);
1206 
1207     writer.appendName("gpu");
1208     this->fGpu->dumpJSON(&writer);
1209 
1210     writer.appendName("context");
1211     this->dumpJSON(&writer);
1212 
1213     // Flush JSON to the memory stream
1214     writer.endObject();
1215     writer.flush();
1216 
1217     // Null terminate the JSON data in the memory stream
1218     stream.write8(0);
1219 
1220     // Allocate a string big enough to hold all the data, then copy out of the stream
1221     SkString result(stream.bytesWritten());
1222     stream.copyToAndReset(result.writable_str());
1223     return result;
1224 }
1225 #endif
1226 
1227 #ifdef SK_GL
1228 
1229 /*************************************************************************************************/
MakeGL(sk_sp<const GrGLInterface> glInterface)1230 sk_sp<GrDirectContext> GrDirectContext::MakeGL(sk_sp<const GrGLInterface> glInterface) {
1231     GrContextOptions defaultOptions;
1232     return MakeGL(std::move(glInterface), defaultOptions);
1233 }
1234 
MakeGL(const GrContextOptions & options)1235 sk_sp<GrDirectContext> GrDirectContext::MakeGL(const GrContextOptions& options) {
1236     return MakeGL(nullptr, options);
1237 }
1238 
MakeGL()1239 sk_sp<GrDirectContext> GrDirectContext::MakeGL() {
1240     GrContextOptions defaultOptions;
1241     return MakeGL(nullptr, defaultOptions);
1242 }
1243 
1244 #if GR_TEST_UTILS
make_get_error_with_random_oom(GrGLFunction<GrGLGetErrorFn> original)1245 GrGLFunction<GrGLGetErrorFn> make_get_error_with_random_oom(GrGLFunction<GrGLGetErrorFn> original) {
1246     // A SkRandom and a GrGLFunction<GrGLGetErrorFn> are too big to be captured by a
1247     // GrGLFunction<GrGLGetError> (surprise, surprise). So we make a context object and
1248     // capture that by pointer. However, GrGLFunction doesn't support calling a destructor
1249     // on the thing it captures. So we leak the context.
1250     struct GetErrorContext {
1251         SkRandom fRandom;
1252         GrGLFunction<GrGLGetErrorFn> fGetError;
1253     };
1254 
1255     auto errorContext = new GetErrorContext;
1256 
1257 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
1258     __lsan_ignore_object(errorContext);
1259 #endif
1260 
1261     errorContext->fGetError = original;
1262 
1263     return GrGLFunction<GrGLGetErrorFn>([errorContext]() {
1264         GrGLenum error = errorContext->fGetError();
1265         if (error == GR_GL_NO_ERROR && (errorContext->fRandom.nextU() % 300) == 0) {
1266             error = GR_GL_OUT_OF_MEMORY;
1267         }
1268         return error;
1269     });
1270 }
1271 #endif
1272 
MakeGL(sk_sp<const GrGLInterface> glInterface,const GrContextOptions & options)1273 sk_sp<GrDirectContext> GrDirectContext::MakeGL(sk_sp<const GrGLInterface> glInterface,
1274                                                const GrContextOptions& options) {
1275     sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kOpenGL, options));
1276 #if GR_TEST_UTILS
1277     if (options.fRandomGLOOM) {
1278         auto copy = sk_make_sp<GrGLInterface>(*glInterface);
1279         copy->fFunctions.fGetError =
1280                 make_get_error_with_random_oom(glInterface->fFunctions.fGetError);
1281 #if GR_GL_CHECK_ERROR
1282         // Suppress logging GL errors since we'll be synthetically generating them.
1283         copy->suppressErrorLogging();
1284 #endif
1285         glInterface = std::move(copy);
1286     }
1287 #endif
1288     direct->fGpu = GrGLGpu::Make(std::move(glInterface), options, direct.get());
1289     if (!direct->init()) {
1290         return nullptr;
1291     }
1292     return direct;
1293 }
1294 #endif
1295 
1296 /*************************************************************************************************/
MakeMock(const GrMockOptions * mockOptions)1297 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions) {
1298     GrContextOptions defaultOptions;
1299     return MakeMock(mockOptions, defaultOptions);
1300 }
1301 
MakeMock(const GrMockOptions * mockOptions,const GrContextOptions & options)1302 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions,
1303                                                  const GrContextOptions& options) {
1304     sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMock, options));
1305 
1306     direct->fGpu = GrMockGpu::Make(mockOptions, options, direct.get());
1307     if (!direct->init()) {
1308         return nullptr;
1309     }
1310 
1311     return direct;
1312 }
1313 
1314 #ifdef SK_VULKAN
1315 /*************************************************************************************************/
MakeVulkan(const GrVkBackendContext & backendContext)1316 sk_sp<GrDirectContext> GrDirectContext::MakeVulkan(const GrVkBackendContext& backendContext) {
1317     GrContextOptions defaultOptions;
1318     return MakeVulkan(backendContext, defaultOptions);
1319 }
1320 
MakeVulkan(const GrVkBackendContext & backendContext,const GrContextOptions & options)1321 sk_sp<GrDirectContext> GrDirectContext::MakeVulkan(const GrVkBackendContext& backendContext,
1322                                                    const GrContextOptions& options) {
1323     sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kVulkan, options));
1324 
1325     direct->fGpu = GrVkGpu::Make(backendContext, options, direct.get());
1326     if (!direct->init()) {
1327         return nullptr;
1328     }
1329 
1330     return direct;
1331 }
1332 #endif
1333 
1334 #ifdef SK_METAL
1335 /*************************************************************************************************/
MakeMetal(const GrMtlBackendContext & backendContext)1336 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(const GrMtlBackendContext& backendContext) {
1337     GrContextOptions defaultOptions;
1338     return MakeMetal(backendContext, defaultOptions);
1339 }
1340 
MakeMetal(const GrMtlBackendContext & backendContext,const GrContextOptions & options)1341 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(const GrMtlBackendContext& backendContext,
1342                                                      const GrContextOptions& options) {
1343     sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMetal, options));
1344 
1345     direct->fGpu = GrMtlTrampoline::MakeGpu(backendContext, options, direct.get());
1346     if (!direct->init()) {
1347         return nullptr;
1348     }
1349 
1350     return direct;
1351 }
1352 
1353 // deprecated
MakeMetal(void * device,void * queue)1354 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(void* device, void* queue) {
1355     GrContextOptions defaultOptions;
1356     return MakeMetal(device, queue, defaultOptions);
1357 }
1358 
1359 // deprecated
1360 // remove include/gpu/mtl/GrMtlBackendContext.h, above, when removed
MakeMetal(void * device,void * queue,const GrContextOptions & options)1361 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(void* device, void* queue,
1362                                                   const GrContextOptions& options) {
1363     sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMetal, options));
1364     GrMtlBackendContext backendContext = {};
1365     backendContext.fDevice.reset(device);
1366     backendContext.fQueue.reset(queue);
1367 
1368     return GrDirectContext::MakeMetal(backendContext, options);
1369 }
1370 #endif
1371 
1372 #ifdef SK_DIRECT3D
1373 /*************************************************************************************************/
MakeDirect3D(const GrD3DBackendContext & backendContext)1374 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext) {
1375     GrContextOptions defaultOptions;
1376     return MakeDirect3D(backendContext, defaultOptions);
1377 }
1378 
MakeDirect3D(const GrD3DBackendContext & backendContext,const GrContextOptions & options)1379 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext,
1380                                                      const GrContextOptions& options) {
1381     sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kDirect3D, options));
1382 
1383     direct->fGpu = GrD3DGpu::Make(backendContext, options, direct.get());
1384     if (!direct->init()) {
1385         return nullptr;
1386     }
1387 
1388     return direct;
1389 }
1390 #endif
1391 
1392 #ifdef SK_DAWN
1393 /*************************************************************************************************/
MakeDawn(const wgpu::Device & device)1394 sk_sp<GrDirectContext> GrDirectContext::MakeDawn(const wgpu::Device& device) {
1395     GrContextOptions defaultOptions;
1396     return MakeDawn(device, defaultOptions);
1397 }
1398 
MakeDawn(const wgpu::Device & device,const GrContextOptions & options)1399 sk_sp<GrDirectContext> GrDirectContext::MakeDawn(const wgpu::Device& device,
1400                                                  const GrContextOptions& options) {
1401     sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kDawn, options));
1402 
1403     direct->fGpu = GrDawnGpu::Make(device, options, direct.get());
1404     if (!direct->init()) {
1405         return nullptr;
1406     }
1407 
1408     return direct;
1409 }
1410 
1411 #endif
1412