• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 
9 #include "include/gpu/GrDirectContext.h"
10 
11 #include "include/core/SkTraceMemoryDump.h"
12 #include "include/gpu/GrBackendSemaphore.h"
13 #include "include/gpu/GrContextThreadSafeProxy.h"
14 #include "src/core/SkAutoMalloc.h"
15 #include "src/core/SkTaskGroup.h"
16 #include "src/core/SkTraceEvent.h"
17 #include "src/gpu/GrBackendUtils.h"
18 #include "src/gpu/GrClientMappedBufferManager.h"
19 #include "src/gpu/GrContextThreadSafeProxyPriv.h"
20 #include "src/gpu/GrDirectContextPriv.h"
21 #include "src/gpu/GrDrawingManager.h"
22 #include "src/gpu/GrGpu.h"
23 #include "src/gpu/GrResourceProvider.h"
24 #include "src/gpu/GrSemaphore.h"
25 #include "src/gpu/GrShaderUtils.h"
26 #include "src/gpu/GrThreadSafePipelineBuilder.h"
27 #include "src/gpu/SurfaceContext.h"
28 #include "src/gpu/effects/GrSkSLFP.h"
29 #include "src/gpu/mock/GrMockGpu.h"
30 #include "src/gpu/text/GrAtlasManager.h"
31 #include "src/gpu/text/GrStrikeCache.h"
32 #include "src/image/SkImage_GpuBase.h"
33 #if SK_GPU_V1
34 #include "src/gpu/ops/SmallPathAtlasMgr.h"
35 #else
36 // A vestigial definition for v2 that will never be instantiated
37 namespace skgpu::v1 {
38 class SmallPathAtlasMgr {
39 public:
SmallPathAtlasMgr()40     SmallPathAtlasMgr() { SkASSERT(0); }
reset()41     void reset() { SkASSERT(0); }
42 };
43 }
44 #endif
45 #ifdef SK_GL
46 #include "src/gpu/gl/GrGLGpu.h"
47 #endif
48 #ifdef SK_METAL
49 #include "include/gpu/mtl/GrMtlBackendContext.h"
50 #include "src/gpu/mtl/GrMtlTrampoline.h"
51 #endif
52 #ifdef SK_VULKAN
53 #include "src/gpu/vk/GrVkGpu.h"
54 #endif
55 #ifdef SK_DIRECT3D
56 #include "src/gpu/d3d/GrD3DGpu.h"
57 #endif
58 #ifdef SK_DAWN
59 #include "src/gpu/dawn/GrDawnGpu.h"
60 #endif
61 #include <memory>
62 
63 #if GR_TEST_UTILS
64 #   include "include/utils/SkRandom.h"
65 #   if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
66 #       include <sanitizer/lsan_interface.h>
67 #   endif
68 #endif
69 
70 #define ASSERT_SINGLE_OWNER GR_ASSERT_SINGLE_OWNER(this->singleOwner())
71 
Next()72 GrDirectContext::DirectContextID GrDirectContext::DirectContextID::Next() {
73     static std::atomic<uint32_t> nextID{1};
74     uint32_t id;
75     do {
76         id = nextID.fetch_add(1, std::memory_order_relaxed);
77     } while (id == SK_InvalidUniqueID);
78     return DirectContextID(id);
79 }
80 
GrDirectContext(GrBackendApi backend,const GrContextOptions & options)81 GrDirectContext::GrDirectContext(GrBackendApi backend, const GrContextOptions& options)
82         : INHERITED(GrContextThreadSafeProxyPriv::Make(backend, options), false)
83         , fDirectContextID(DirectContextID::Next()) {
84 }
85 
~GrDirectContext()86 GrDirectContext::~GrDirectContext() {
87     ASSERT_SINGLE_OWNER
88     // this if-test protects against the case where the context is being destroyed
89     // before having been fully created
90     if (fGpu) {
91         this->flushAndSubmit();
92     }
93 
94     // We need to make sure all work is finished on the gpu before we start releasing resources.
95     this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/false);
96 
97     this->destroyDrawingManager();
98 
99     // Ideally we could just let the ptr drop, but resource cache queries this ptr in releaseAll.
100     if (fResourceCache) {
101         fResourceCache->releaseAll();
102     }
103     // This has to be after GrResourceCache::releaseAll so that other threads that are holding
104     // async pixel result don't try to destroy buffers off thread.
105     fMappedBufferManager.reset();
106 }
107 
threadSafeProxy()108 sk_sp<GrContextThreadSafeProxy> GrDirectContext::threadSafeProxy() {
109     return INHERITED::threadSafeProxy();
110 }
111 
resetGLTextureBindings()112 void GrDirectContext::resetGLTextureBindings() {
113     if (this->abandoned() || this->backend() != GrBackendApi::kOpenGL) {
114         return;
115     }
116     fGpu->resetTextureBindings();
117 }
118 
resetContext(uint32_t state)119 void GrDirectContext::resetContext(uint32_t state) {
120     ASSERT_SINGLE_OWNER
121     fGpu->markContextDirty(state);
122 }
123 
abandonContext()124 void GrDirectContext::abandonContext() {
125     if (INHERITED::abandoned()) {
126         return;
127     }
128 
129     INHERITED::abandonContext();
130 
131     // We need to make sure all work is finished on the gpu before we start releasing resources.
132     this->syncAllOutstandingGpuWork(this->caps()->mustSyncGpuDuringAbandon());
133 
134     fStrikeCache->freeAll();
135 
136     fMappedBufferManager->abandon();
137 
138     fResourceProvider->abandon();
139 
140     // abandon first so destructors don't try to free the resources in the API.
141     fResourceCache->abandonAll();
142 
143     fGpu->disconnect(GrGpu::DisconnectType::kAbandon);
144 
145     if (fSmallPathAtlasMgr) {
146         fSmallPathAtlasMgr->reset();
147     }
148     fAtlasManager->freeAll();
149 }
150 
abandoned()151 bool GrDirectContext::abandoned() {
152     if (INHERITED::abandoned()) {
153         return true;
154     }
155 
156     if (fGpu && fGpu->isDeviceLost()) {
157         this->abandonContext();
158         return true;
159     }
160     return false;
161 }
162 
oomed()163 bool GrDirectContext::oomed() { return fGpu ? fGpu->checkAndResetOOMed() : false; }
164 
releaseResourcesAndAbandonContext()165 void GrDirectContext::releaseResourcesAndAbandonContext() {
166     if (INHERITED::abandoned()) {
167         return;
168     }
169 
170     INHERITED::abandonContext();
171 
172     // We need to make sure all work is finished on the gpu before we start releasing resources.
173     this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/true);
174 
175     fResourceProvider->abandon();
176 
177     // Release all resources in the backend 3D API.
178     fResourceCache->releaseAll();
179 
180     // Must be after GrResourceCache::releaseAll().
181     fMappedBufferManager.reset();
182 
183     fGpu->disconnect(GrGpu::DisconnectType::kCleanup);
184     if (fSmallPathAtlasMgr) {
185         fSmallPathAtlasMgr->reset();
186     }
187     fAtlasManager->freeAll();
188 }
189 
freeGpuResources()190 void GrDirectContext::freeGpuResources() {
191     ASSERT_SINGLE_OWNER
192 
193     if (this->abandoned()) {
194         return;
195     }
196 
197     this->flushAndSubmit();
198     if (fSmallPathAtlasMgr) {
199         fSmallPathAtlasMgr->reset();
200     }
201     fAtlasManager->freeAll();
202     // The TextBlobCache doesn't actually hold any GPU resource but this is a convenient
203     // place to purge blobs.
204     this->getTextBlobCache()->freeAll();
205     // TODO: the glyph cache doesn't hold any GpuResources so this call should not be needed here.
206     // Some slack in the GrTextBlob's implementation requires it though. That could be fixed.
207     fStrikeCache->freeAll();
208 
209     this->drawingManager()->freeGpuResources();
210 
211     fResourceCache->purgeUnlockedResources();
212 }
213 
init()214 bool GrDirectContext::init() {
215     ASSERT_SINGLE_OWNER
216     if (!fGpu) {
217         return false;
218     }
219 
220     fThreadSafeProxy->priv().init(fGpu->refCaps(), fGpu->refPipelineBuilder());
221     if (!INHERITED::init()) {
222         return false;
223     }
224 
225     SkASSERT(this->getTextBlobCache());
226     SkASSERT(this->threadSafeCache());
227 
228     fStrikeCache = std::make_unique<GrStrikeCache>();
229     fResourceCache = std::make_unique<GrResourceCache>(this->singleOwner(),
230                                                        this->directContextID(),
231                                                        this->contextID());
232     fResourceCache->setProxyProvider(this->proxyProvider());
233     fResourceCache->setThreadSafeCache(this->threadSafeCache());
234 #if GR_TEST_UTILS
235     if (this->options().fResourceCacheLimitOverride != -1) {
236         this->setResourceCacheLimit(this->options().fResourceCacheLimitOverride);
237     }
238 #endif
239     fResourceProvider = std::make_unique<GrResourceProvider>(fGpu.get(), fResourceCache.get(),
240                                                              this->singleOwner());
241     fMappedBufferManager = std::make_unique<GrClientMappedBufferManager>(this->directContextID());
242 
243     fDidTestPMConversions = false;
244 
245     // DDL TODO: we need to think through how the task group & persistent cache
246     // get passed on to/shared between all the DDLRecorders created with this context.
247     if (this->options().fExecutor) {
248         fTaskGroup = std::make_unique<SkTaskGroup>(*this->options().fExecutor);
249     }
250 
251     fPersistentCache = this->options().fPersistentCache;
252 
253     GrDrawOpAtlas::AllowMultitexturing allowMultitexturing;
254     if (GrContextOptions::Enable::kNo == this->options().fAllowMultipleGlyphCacheTextures ||
255         // multitexturing supported only if range can represent the index + texcoords fully
256         !(this->caps()->shaderCaps()->floatIs32Bits() ||
257         this->caps()->shaderCaps()->integerSupport())) {
258         allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kNo;
259     } else {
260         allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kYes;
261     }
262 
263     GrProxyProvider* proxyProvider = this->priv().proxyProvider();
264 
265     fAtlasManager = std::make_unique<GrAtlasManager>(proxyProvider,
266                                                      this->options().fGlyphCacheTextureMaximumBytes,
267                                                      allowMultitexturing);
268     this->priv().addOnFlushCallbackObject(fAtlasManager.get());
269 
270     return true;
271 }
272 
getResourceCacheUsage(int * resourceCount,size_t * resourceBytes) const273 void GrDirectContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
274     ASSERT_SINGLE_OWNER
275 
276     if (resourceCount) {
277         *resourceCount = fResourceCache->getBudgetedResourceCount();
278     }
279     if (resourceBytes) {
280         *resourceBytes = fResourceCache->getBudgetedResourceBytes();
281     }
282 }
283 
getResourceCachePurgeableBytes() const284 size_t GrDirectContext::getResourceCachePurgeableBytes() const {
285     ASSERT_SINGLE_OWNER
286     return fResourceCache->getPurgeableBytes();
287 }
288 
getResourceCacheLimits(int * maxResources,size_t * maxResourceBytes) const289 void GrDirectContext::getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const {
290     ASSERT_SINGLE_OWNER
291     if (maxResources) {
292         *maxResources = -1;
293     }
294     if (maxResourceBytes) {
295         *maxResourceBytes = this->getResourceCacheLimit();
296     }
297 }
298 
getResourceCacheLimit() const299 size_t GrDirectContext::getResourceCacheLimit() const {
300     ASSERT_SINGLE_OWNER
301     return fResourceCache->getMaxResourceBytes();
302 }
303 
setResourceCacheLimits(int unused,size_t maxResourceBytes)304 void GrDirectContext::setResourceCacheLimits(int unused, size_t maxResourceBytes) {
305     ASSERT_SINGLE_OWNER
306     this->setResourceCacheLimit(maxResourceBytes);
307 }
308 
setResourceCacheLimit(size_t maxResourceBytes)309 void GrDirectContext::setResourceCacheLimit(size_t maxResourceBytes) {
310     ASSERT_SINGLE_OWNER
311     fResourceCache->setLimit(maxResourceBytes);
312 }
313 
purgeUnlockedResources(bool scratchResourcesOnly)314 void GrDirectContext::purgeUnlockedResources(bool scratchResourcesOnly) {
315     ASSERT_SINGLE_OWNER
316 
317     if (this->abandoned()) {
318         return;
319     }
320 
321     fResourceCache->purgeUnlockedResources(scratchResourcesOnly);
322     fResourceCache->purgeAsNeeded();
323 
324     // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
325     // place to purge stale blobs
326     this->getTextBlobCache()->purgeStaleBlobs();
327 
328     fGpu->releaseUnlockedBackendObjects();
329 }
330 
purgeUnlockAndSafeCacheGpuResources()331 void GrDirectContext::purgeUnlockAndSafeCacheGpuResources() {
332     ASSERT_SINGLE_OWNER
333 
334     if (this->abandoned()) {
335         return;
336     }
337 
338     fResourceCache->purgeUnlockAndSafeCacheGpuResources();
339     fResourceCache->purgeAsNeeded();
340 
341     // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
342     // place to purge stale blobs
343     this->getTextBlobCache()->purgeStaleBlobs();
344 
345     fGpu->releaseUnlockedBackendObjects();
346 }
347 
CalcHpsBluredImageDimension(const SkBlurArg & blurArg)348 std::array<int, 2> GrDirectContext::CalcHpsBluredImageDimension(const SkBlurArg& blurArg) {
349     return fGpu->GetHpsDimension(blurArg);
350 }
351 
purgeUnlockedResourcesByTag(bool scratchResourceseOnly,const GrGpuResourceTag & tag)352 void GrDirectContext::purgeUnlockedResourcesByTag(bool scratchResourceseOnly, const GrGpuResourceTag& tag) {
353     ASSERT_SINGLE_OWNER
354     fResourceCache->purgeUnlockedResourcesByTag(scratchResourceseOnly, tag);
355     fResourceCache->purgeAsNeeded();
356 
357     // The textBlod Cache doesn't actually hold any GPU resource but this is a convenient
358     // place to purge stale blobs
359     this->getTextBlobCache()->purgeStaleBlobs();
360 }
361 
purgeUnlockedResourcesByPid(bool scratchResourcesOnly,const std::set<int> & exitedPidSet)362 void GrDirectContext::purgeUnlockedResourcesByPid(bool scratchResourcesOnly, const std::set<int>& exitedPidSet) {
363     ASSERT_SINGLE_OWNER
364     fResourceCache->purgeUnlockedResourcesByPid(scratchResourcesOnly, exitedPidSet);
365     fResourceCache->purgeAsNeeded();
366 
367     // The TextBlobCache doesn't actually hold any GPU resource but this is a convenient
368     // place to purge blobs.
369     this->getTextBlobCache()->freeAll();
370     // The StrikeCache indirectly references typeface, and in order to dereference the typeface,
371     // it is necessary to clear the StrikeCache when the application exits.
372     fStrikeCache->freeAll();
373 }
374 
registerVulkanErrorCallback(const std::function<void ()> & vulkanErrorCallback)375 void GrDirectContext::registerVulkanErrorCallback(const std::function<void()>& vulkanErrorCallback)
376 {
377     vulkanErrorCallback_ = vulkanErrorCallback;
378 }
379 
purgeCacheBetweenFrames(bool scratchResourcesOnly,const std::set<int> & exitedPidSet,const std::set<int> & protectedPidSet)380 void GrDirectContext::purgeCacheBetweenFrames(bool scratchResourcesOnly, const std::set<int>& exitedPidSet,
381         const std::set<int>& protectedPidSet)
382 {
383     ASSERT_SINGLE_OWNER
384 
385     if (this->abandoned()) {
386         return;
387     }
388 
389     fResourceCache->purgeCacheBetweenFrames(scratchResourcesOnly, exitedPidSet, protectedPidSet);
390 }
391 
performDeferredCleanup(std::chrono::milliseconds msNotUsed,bool scratchResourcesOnly)392 void GrDirectContext::performDeferredCleanup(std::chrono::milliseconds msNotUsed,
393                                              bool scratchResourcesOnly) {
394     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
395 
396     ASSERT_SINGLE_OWNER
397 
398     if (this->abandoned()) {
399         return;
400     }
401 
402     this->checkAsyncWorkCompletion();
403     fMappedBufferManager->process();
404     auto purgeTime = GrStdSteadyClock::now() - msNotUsed;
405 
406     fResourceCache->purgeAsNeeded();
407     fResourceCache->purgeResourcesNotUsedSince(purgeTime, scratchResourcesOnly);
408 
409     // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
410     // place to purge stale blobs
411     this->getTextBlobCache()->purgeStaleBlobs();
412 }
413 
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)414 void GrDirectContext::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
415     ASSERT_SINGLE_OWNER
416 
417     if (this->abandoned()) {
418         return;
419     }
420 
421     fResourceCache->purgeUnlockedResources(bytesToPurge, preferScratchResources);
422 }
423 
424 ////////////////////////////////////////////////////////////////////////////////
wait(int numSemaphores,const GrBackendSemaphore waitSemaphores[],bool deleteSemaphoresAfterWait)425 bool GrDirectContext::wait(int numSemaphores, const GrBackendSemaphore waitSemaphores[],
426                            bool deleteSemaphoresAfterWait) {
427     if (!fGpu || !fGpu->caps()->semaphoreSupport()) {
428         return false;
429     }
430     GrWrapOwnership ownership =
431             deleteSemaphoresAfterWait ? kAdopt_GrWrapOwnership : kBorrow_GrWrapOwnership;
432     for (int i = 0; i < numSemaphores; ++i) {
433         std::unique_ptr<GrSemaphore> sema = fResourceProvider->wrapBackendSemaphore(
434                 waitSemaphores[i], GrSemaphoreWrapType::kWillWait, ownership);
435         // If we failed to wrap the semaphore it means the client didn't give us a valid semaphore
436         // to begin with. Therefore, it is fine to not wait on it.
437         if (sema) {
438             fGpu->waitSemaphore(sema.get());
439         }
440     }
441     return true;
442 }
443 
onGetSmallPathAtlasMgr()444 skgpu::v1::SmallPathAtlasMgr* GrDirectContext::onGetSmallPathAtlasMgr() {
445 #if SK_GPU_V1
446     if (!fSmallPathAtlasMgr) {
447         fSmallPathAtlasMgr = std::make_unique<skgpu::v1::SmallPathAtlasMgr>();
448 
449         this->priv().addOnFlushCallbackObject(fSmallPathAtlasMgr.get());
450     }
451 
452     if (!fSmallPathAtlasMgr->initAtlas(this->proxyProvider(), this->caps())) {
453         return nullptr;
454     }
455 #endif
456 
457     return fSmallPathAtlasMgr.get();
458 }
459 
460 ////////////////////////////////////////////////////////////////////////////////
461 
flush(const GrFlushInfo & info)462 GrSemaphoresSubmitted GrDirectContext::flush(const GrFlushInfo& info) {
463     ASSERT_SINGLE_OWNER
464     if (this->abandoned()) {
465         if (info.fFinishedProc) {
466             info.fFinishedProc(info.fFinishedContext);
467         }
468         if (info.fSubmittedProc) {
469             info.fSubmittedProc(info.fSubmittedContext, false);
470         }
471         return GrSemaphoresSubmitted::kNo;
472     }
473 
474     return this->drawingManager()->flushSurfaces({}, SkSurface::BackendSurfaceAccess::kNoAccess,
475                                                  info, nullptr);
476 }
477 
submit(bool syncCpu)478 bool GrDirectContext::submit(bool syncCpu) {
479     ASSERT_SINGLE_OWNER
480     if (this->abandoned()) {
481         return false;
482     }
483 
484     if (!fGpu) {
485         return false;
486     }
487 
488     return fGpu->submitToGpu(syncCpu);
489 }
490 
491 ////////////////////////////////////////////////////////////////////////////////
492 
checkAsyncWorkCompletion()493 void GrDirectContext::checkAsyncWorkCompletion() {
494     if (fGpu) {
495         fGpu->checkFinishProcs();
496     }
497 }
498 
syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned)499 void GrDirectContext::syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned) {
500     if (fGpu && (!this->abandoned() || shouldExecuteWhileAbandoned)) {
501         fGpu->finishOutstandingGpuWork();
502         this->checkAsyncWorkCompletion();
503     }
504 }
505 
506 ////////////////////////////////////////////////////////////////////////////////
507 
storeVkPipelineCacheData()508 void GrDirectContext::storeVkPipelineCacheData() {
509     if (fGpu) {
510         fGpu->storeVkPipelineCacheData();
511     }
512 }
513 
514 ////////////////////////////////////////////////////////////////////////////////
515 
supportsDistanceFieldText() const516 bool GrDirectContext::supportsDistanceFieldText() const {
517     return this->caps()->shaderCaps()->supportsDistanceFieldText();
518 }
519 
520 //////////////////////////////////////////////////////////////////////////////
dumpAllResource(std::stringstream & dump) const521 void GrDirectContext::dumpAllResource(std::stringstream &dump) const {
522 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
523     fResourceCache->dumpAllResource(dump);
524 #endif
525 }
526 
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const527 void GrDirectContext::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
528     ASSERT_SINGLE_OWNER
529     fResourceCache->dumpMemoryStatistics(traceMemoryDump);
530     traceMemoryDump->dumpNumericValue("skia/gr_text_blob_cache", "size", "bytes",
531                                       this->getTextBlobCache()->usedBytes());
532 }
533 
dumpMemoryStatisticsByTag(SkTraceMemoryDump * traceMemoryDump,const GrGpuResourceTag & tag) const534 void GrDirectContext::dumpMemoryStatisticsByTag(SkTraceMemoryDump* traceMemoryDump, const GrGpuResourceTag& tag) const {
535     ASSERT_SINGLE_OWNER
536     fResourceCache->dumpMemoryStatistics(traceMemoryDump, tag);
537     traceMemoryDump->dumpNumericValue("skia/gr_text_blob_cache", "size", "bytes",
538         this->getTextBlobCache()->usedBytes());
539 }
540 
setCurrentGrResourceTag(const GrGpuResourceTag & tag)541 void GrDirectContext::setCurrentGrResourceTag(const GrGpuResourceTag& tag) {
542     if (fResourceCache) {
543         return fResourceCache->setCurrentGrResourceTag(tag);
544     }
545 }
546 
popGrResourceTag()547 void GrDirectContext::popGrResourceTag()
548 {
549     if (fResourceCache) {
550         return fResourceCache->popGrResourceTag();
551     }
552 }
553 
getCurrentGrResourceTag() const554 GrGpuResourceTag GrDirectContext::getCurrentGrResourceTag() const {
555     if (fResourceCache) {
556         return fResourceCache->getCurrentGrResourceTag();
557     }
558     return {};
559 }
releaseByTag(const GrGpuResourceTag & tag)560 void GrDirectContext::releaseByTag(const GrGpuResourceTag& tag) {
561     if (fResourceCache) {
562         fResourceCache->releaseByTag(tag);
563     }
564 }
getAllGrGpuResourceTags() const565 std::set<GrGpuResourceTag> GrDirectContext::getAllGrGpuResourceTags() const {
566     if (fResourceCache) {
567         return fResourceCache->getAllGrGpuResourceTags();
568     }
569     return {};
570 }
571 
572 #ifdef SKIA_OHOS
573 // OH ISSUE: set purgeable resource max count limit.
setPurgeableResourceLimit(int purgeableMaxCount)574 void GrDirectContext::setPurgeableResourceLimit(int purgeableMaxCount)
575 {
576     ASSERT_SINGLE_OWNER
577     if (fResourceCache) {
578         fResourceCache->setPurgeableResourceLimit(purgeableMaxCount);
579     }
580 }
581 #endif
582 
583 // OH ISSUE: get the memory information of the updated pid.
getUpdatedMemoryMap(std::unordered_map<int32_t,size_t> & out)584 void GrDirectContext::getUpdatedMemoryMap(std::unordered_map<int32_t, size_t> &out)
585 {
586     if (fResourceCache) {
587         fResourceCache->getUpdatedMemoryMap(out);
588     }
589 }
590 
591 // OH ISSUE: init gpu memory limit.
initGpuMemoryLimit(MemoryOverflowCalllback callback,uint64_t size)592 void GrDirectContext::initGpuMemoryLimit(MemoryOverflowCalllback callback, uint64_t size)
593 {
594     if (fResourceCache) {
595         fResourceCache->initGpuMemoryLimit(callback, size);
596     }
597 }
598 
599 // OH ISSUE: check whether the PID is abnormal.
isPidAbnormal() const600 bool GrDirectContext::isPidAbnormal() const
601 {
602     if (fResourceCache) {
603         return fResourceCache->isPidAbnormal();
604     }
605     return false;
606 }
607 
vmaDefragment()608 void GrDirectContext::vmaDefragment()
609 {
610     if (fGpu) {
611         fGpu->vmaDefragment();
612     }
613 }
614 
dumpVmaStats(SkString * out)615 void GrDirectContext::dumpVmaStats(SkString *out)
616 {
617     if (out == nullptr) {
618         return;
619     }
620     if (fGpu) {
621         fGpu->dumpVmaStats(out);
622     }
623 }
624 
625 // OH ISSUE: intra frame and inter frame identification
beginFrame()626 void GrDirectContext::beginFrame()
627 {
628 #ifdef SK_VULKAN
629     if (fResourceCache) {
630         fResourceCache->beginFrame();
631     }
632 #endif
633 }
634 
635 // OH ISSUE: intra frame and inter frame identification
endFrame()636 void GrDirectContext::endFrame()
637 {
638 #ifdef SK_VULKAN
639     if (fResourceCache) {
640         fResourceCache->endFrame();
641     }
642 #endif
643 }
644 
645 // OH ISSUE: asyn memory reclaimer
setGpuMemoryAsyncReclaimerSwitch(bool enabled,const std::function<void ()> & setThreadPriority)646 void GrDirectContext::setGpuMemoryAsyncReclaimerSwitch(bool enabled, const std::function<void()>& setThreadPriority)
647 {
648 #ifdef SK_VULKAN
649     if (fGpu) {
650         fGpu->setGpuMemoryAsyncReclaimerSwitch(enabled, setThreadPriority);
651     }
652 #endif
653 }
654 
655 // OH ISSUE: asyn memory reclaimer
flushGpuMemoryInWaitQueue()656 void GrDirectContext::flushGpuMemoryInWaitQueue()
657 {
658 #ifdef SK_VULKAN
659     if (fGpu) {
660         fGpu->flushGpuMemoryInWaitQueue();
661     }
662 #endif
663 }
664 
665 // OH ISSUE: suppress release window
setGpuCacheSuppressWindowSwitch(bool enabled)666 void GrDirectContext::setGpuCacheSuppressWindowSwitch(bool enabled)
667 {
668 #ifdef SK_VULKAN
669     ASSERT_SINGLE_OWNER
670 
671     if (this->abandoned()) {
672         return;
673     }
674 
675     fResourceCache->setGpuCacheSuppressWindowSwitch(enabled);
676 #endif
677 }
678 
679 // OH ISSUE: suppress release window
suppressGpuCacheBelowCertainRatio(const std::function<bool (void)> & nextFrameHasArrived)680 void GrDirectContext::suppressGpuCacheBelowCertainRatio(const std::function<bool(void)>& nextFrameHasArrived)
681 {
682 #ifdef SK_VULKAN
683     ASSERT_SINGLE_OWNER
684 
685     if (this->abandoned()) {
686         return;
687     }
688 
689     fResourceCache->suppressGpuCacheBelowCertainRatio(nextFrameHasArrived);
690 #endif
691 }
692 
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected)693 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
694                                                        const GrBackendFormat& backendFormat,
695                                                        GrMipmapped mipMapped,
696                                                        GrRenderable renderable,
697                                                        GrProtected isProtected) {
698     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
699     if (this->abandoned()) {
700         return GrBackendTexture();
701     }
702 
703     return fGpu->createBackendTexture({width, height}, backendFormat, renderable,
704                                       mipMapped, isProtected);
705 }
706 
createBackendTexture(int width,int height,SkColorType skColorType,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected)707 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
708                                                        SkColorType skColorType,
709                                                        GrMipmapped mipMapped,
710                                                        GrRenderable renderable,
711                                                        GrProtected isProtected) {
712     if (this->abandoned()) {
713         return GrBackendTexture();
714     }
715 
716     const GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
717 
718     return this->createBackendTexture(width, height, format, mipMapped, renderable, isProtected);
719 }
720 
create_and_clear_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,sk_sp<GrRefCntedCallback> finishedCallback,std::array<float,4> color)721 static GrBackendTexture create_and_clear_backend_texture(GrDirectContext* dContext,
722                                                          SkISize dimensions,
723                                                          const GrBackendFormat& backendFormat,
724                                                          GrMipmapped mipMapped,
725                                                          GrRenderable renderable,
726                                                          GrProtected isProtected,
727                                                          sk_sp<GrRefCntedCallback> finishedCallback,
728                                                          std::array<float, 4> color) {
729     GrGpu* gpu = dContext->priv().getGpu();
730     GrBackendTexture beTex = gpu->createBackendTexture(dimensions, backendFormat, renderable,
731                                                        mipMapped, isProtected);
732     if (!beTex.isValid()) {
733         return {};
734     }
735 
736     if (!dContext->priv().getGpu()->clearBackendTexture(beTex,
737                                                         std::move(finishedCallback),
738                                                         color)) {
739         dContext->deleteBackendTexture(beTex);
740         return {};
741     }
742     return beTex;
743 }
744 
update_texture_with_pixmaps(GrDirectContext * context,const SkPixmap src[],int numLevels,const GrBackendTexture & backendTexture,GrSurfaceOrigin textureOrigin,sk_sp<GrRefCntedCallback> finishedCallback)745 static bool update_texture_with_pixmaps(GrDirectContext* context,
746                                         const SkPixmap src[],
747                                         int numLevels,
748                                         const GrBackendTexture& backendTexture,
749                                         GrSurfaceOrigin textureOrigin,
750                                         sk_sp<GrRefCntedCallback> finishedCallback) {
751     GrColorType ct = SkColorTypeToGrColorType(src[0].colorType());
752     const GrBackendFormat& format = backendTexture.getBackendFormat();
753 
754     if (!context->priv().caps()->areColorTypeAndFormatCompatible(ct, format)) {
755         return false;
756     }
757 
758     auto proxy = context->priv().proxyProvider()->wrapBackendTexture(backendTexture,
759                                                                      kBorrow_GrWrapOwnership,
760                                                                      GrWrapCacheable::kNo,
761                                                                      kRW_GrIOType,
762                                                                      std::move(finishedCallback));
763     if (!proxy) {
764         return false;
765     }
766 
767     GrSwizzle swizzle = context->priv().caps()->getReadSwizzle(format, ct);
768     GrSurfaceProxyView view(std::move(proxy), textureOrigin, swizzle);
769     skgpu::SurfaceContext surfaceContext(context, std::move(view), src[0].info().colorInfo());
770     SkAutoSTArray<15, GrCPixmap> tmpSrc(numLevels);
771     for (int i = 0; i < numLevels; ++i) {
772         tmpSrc[i] = src[i];
773     }
774     if (!surfaceContext.writePixels(context, tmpSrc.get(), numLevels)) {
775         return false;
776     }
777 
778     GrSurfaceProxy* p = surfaceContext.asSurfaceProxy();
779     GrFlushInfo info;
780     context->priv().drawingManager()->flushSurfaces({&p, 1},
781                                                     SkSurface::BackendSurfaceAccess::kNoAccess,
782                                                     info,
783                                                     nullptr);
784     return true;
785 }
786 
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)787 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
788                                                        const GrBackendFormat& backendFormat,
789                                                        const SkColor4f& color,
790                                                        GrMipmapped mipMapped,
791                                                        GrRenderable renderable,
792                                                        GrProtected isProtected,
793                                                        GrGpuFinishedProc finishedProc,
794                                                        GrGpuFinishedContext finishedContext) {
795     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
796 
797     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
798     if (this->abandoned()) {
799         return {};
800     }
801 
802     return create_and_clear_backend_texture(this,
803                                             {width, height},
804                                             backendFormat,
805                                             mipMapped,
806                                             renderable,
807                                             isProtected,
808                                             std::move(finishedCallback),
809                                             color.array());
810 }
811 
createBackendTexture(int width,int height,SkColorType skColorType,const SkColor4f & color,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)812 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
813                                                        SkColorType skColorType,
814                                                        const SkColor4f& color,
815                                                        GrMipmapped mipMapped,
816                                                        GrRenderable renderable,
817                                                        GrProtected isProtected,
818                                                        GrGpuFinishedProc finishedProc,
819                                                        GrGpuFinishedContext finishedContext) {
820     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
821 
822     if (this->abandoned()) {
823         return {};
824     }
825 
826     GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
827     if (!format.isValid()) {
828         return {};
829     }
830 
831     GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
832     SkColor4f swizzledColor = this->caps()->getWriteSwizzle(format, grColorType).applyTo(color);
833 
834     return create_and_clear_backend_texture(this,
835                                             {width, height},
836                                             format,
837                                             mipMapped,
838                                             renderable,
839                                             isProtected,
840                                             std::move(finishedCallback),
841                                             swizzledColor.array());
842 }
843 
createBackendTexture(const SkPixmap srcData[],int numProvidedLevels,GrSurfaceOrigin textureOrigin,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)844 GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap srcData[],
845                                                        int numProvidedLevels,
846                                                        GrSurfaceOrigin textureOrigin,
847                                                        GrRenderable renderable,
848                                                        GrProtected isProtected,
849                                                        GrGpuFinishedProc finishedProc,
850                                                        GrGpuFinishedContext finishedContext) {
851     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
852 
853     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
854 
855     if (this->abandoned()) {
856         return {};
857     }
858 
859     if (!srcData || numProvidedLevels <= 0) {
860         return {};
861     }
862 
863     SkColorType colorType = srcData[0].colorType();
864 
865     GrMipmapped mipMapped = GrMipmapped::kNo;
866     if (numProvidedLevels > 1) {
867         mipMapped = GrMipmapped::kYes;
868     }
869 
870     GrBackendFormat backendFormat = this->defaultBackendFormat(colorType, renderable);
871     GrBackendTexture beTex = this->createBackendTexture(srcData[0].width(),
872                                                         srcData[0].height(),
873                                                         backendFormat,
874                                                         mipMapped,
875                                                         renderable,
876                                                         isProtected);
877     if (!beTex.isValid()) {
878         return {};
879     }
880     if (!update_texture_with_pixmaps(this,
881                                      srcData,
882                                      numProvidedLevels,
883                                      beTex,
884                                      textureOrigin,
885                                      std::move(finishedCallback))) {
886         this->deleteBackendTexture(beTex);
887         return {};
888     }
889     return beTex;
890 }
891 
updateBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)892 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
893                                            const SkColor4f& color,
894                                            GrGpuFinishedProc finishedProc,
895                                            GrGpuFinishedContext finishedContext) {
896     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
897 
898     if (this->abandoned()) {
899         return false;
900     }
901 
902     return fGpu->clearBackendTexture(backendTexture, std::move(finishedCallback), color.array());
903 }
904 
updateBackendTexture(const GrBackendTexture & backendTexture,SkColorType skColorType,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)905 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
906                                            SkColorType skColorType,
907                                            const SkColor4f& color,
908                                            GrGpuFinishedProc finishedProc,
909                                            GrGpuFinishedContext finishedContext) {
910     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
911 
912     if (this->abandoned()) {
913         return false;
914     }
915 
916     GrBackendFormat format = backendTexture.getBackendFormat();
917     GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
918 
919     if (!this->caps()->areColorTypeAndFormatCompatible(grColorType, format)) {
920         return false;
921     }
922 
923     GrSwizzle swizzle = this->caps()->getWriteSwizzle(format, grColorType);
924     SkColor4f swizzledColor = swizzle.applyTo(color);
925 
926     return fGpu->clearBackendTexture(backendTexture,
927                                      std::move(finishedCallback),
928                                      swizzledColor.array());
929 }
930 
updateBackendTexture(const GrBackendTexture & backendTexture,const SkPixmap srcData[],int numLevels,GrSurfaceOrigin textureOrigin,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)931 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
932                                            const SkPixmap srcData[],
933                                            int numLevels,
934                                            GrSurfaceOrigin textureOrigin,
935                                            GrGpuFinishedProc finishedProc,
936                                            GrGpuFinishedContext finishedContext) {
937     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
938 
939     if (this->abandoned()) {
940         return false;
941     }
942 
943     if (!srcData || numLevels <= 0) {
944         return false;
945     }
946 
947     // If the texture has MIP levels then we require that the full set is overwritten.
948     int numExpectedLevels = 1;
949     if (backendTexture.hasMipmaps()) {
950         numExpectedLevels = SkMipmap::ComputeLevelCount(backendTexture.width(),
951                                                         backendTexture.height()) + 1;
952     }
953     if (numLevels != numExpectedLevels) {
954         return false;
955     }
956     return update_texture_with_pixmaps(this,
957                                        srcData,
958                                        numLevels,
959                                        backendTexture,
960                                        textureOrigin,
961                                        std::move(finishedCallback));
962 }
963 
964 //////////////////////////////////////////////////////////////////////////////
965 
create_and_update_compressed_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrProtected isProtected,sk_sp<GrRefCntedCallback> finishedCallback,const void * data,size_t size)966 static GrBackendTexture create_and_update_compressed_backend_texture(
967         GrDirectContext* dContext,
968         SkISize dimensions,
969         const GrBackendFormat& backendFormat,
970         GrMipmapped mipMapped,
971         GrProtected isProtected,
972         sk_sp<GrRefCntedCallback> finishedCallback,
973         const void* data,
974         size_t size) {
975     GrGpu* gpu = dContext->priv().getGpu();
976 
977     GrBackendTexture beTex = gpu->createCompressedBackendTexture(dimensions, backendFormat,
978                                                                  mipMapped, isProtected);
979     if (!beTex.isValid()) {
980         return {};
981     }
982 
983     if (!dContext->priv().getGpu()->updateCompressedBackendTexture(
984                 beTex, std::move(finishedCallback), data, size)) {
985         dContext->deleteBackendTexture(beTex);
986         return {};
987     }
988     return beTex;
989 }
990 
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,GrMipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)991 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
992         int width, int height,
993         const GrBackendFormat& backendFormat,
994         const SkColor4f& color,
995         GrMipmapped mipmapped,
996         GrProtected isProtected,
997         GrGpuFinishedProc finishedProc,
998         GrGpuFinishedContext finishedContext) {
999     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1000     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1001 
1002     if (this->abandoned()) {
1003         return {};
1004     }
1005 
1006     SkImage::CompressionType compression = GrBackendFormatToCompressionType(backendFormat);
1007     if (compression == SkImage::CompressionType::kNone) {
1008         return {};
1009     }
1010 
1011     size_t size = SkCompressedDataSize(compression,
1012                                        {width, height},
1013                                        nullptr,
1014                                        mipmapped == GrMipmapped::kYes);
1015     auto storage = std::make_unique<char[]>(size);
1016     GrFillInCompressedData(compression, {width, height}, mipmapped, storage.get(), color);
1017     return create_and_update_compressed_backend_texture(this,
1018                                                         {width, height},
1019                                                         backendFormat,
1020                                                         mipmapped,
1021                                                         isProtected,
1022                                                         std::move(finishedCallback),
1023                                                         storage.get(),
1024                                                         size);
1025 }
1026 
createCompressedBackendTexture(int width,int height,SkImage::CompressionType compression,const SkColor4f & color,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1027 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
1028         int width, int height,
1029         SkImage::CompressionType compression,
1030         const SkColor4f& color,
1031         GrMipmapped mipMapped,
1032         GrProtected isProtected,
1033         GrGpuFinishedProc finishedProc,
1034         GrGpuFinishedContext finishedContext) {
1035     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1036     GrBackendFormat format = this->compressedBackendFormat(compression);
1037     return this->createCompressedBackendTexture(width, height, format, color,
1038                                                 mipMapped, isProtected, finishedProc,
1039                                                 finishedContext);
1040 }
1041 
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const void * compressedData,size_t dataSize,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1042 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
1043         int width, int height,
1044         const GrBackendFormat& backendFormat,
1045         const void* compressedData,
1046         size_t dataSize,
1047         GrMipmapped mipMapped,
1048         GrProtected isProtected,
1049         GrGpuFinishedProc finishedProc,
1050         GrGpuFinishedContext finishedContext) {
1051     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1052     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1053 
1054     if (this->abandoned()) {
1055         return {};
1056     }
1057 
1058     return create_and_update_compressed_backend_texture(this,
1059                                                         {width, height},
1060                                                         backendFormat,
1061                                                         mipMapped,
1062                                                         isProtected,
1063                                                         std::move(finishedCallback),
1064                                                         compressedData,
1065                                                         dataSize);
1066 }
1067 
createCompressedBackendTexture(int width,int height,SkImage::CompressionType compression,const void * data,size_t dataSize,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1068 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
1069         int width, int height,
1070         SkImage::CompressionType compression,
1071         const void* data, size_t dataSize,
1072         GrMipmapped mipMapped,
1073         GrProtected isProtected,
1074         GrGpuFinishedProc finishedProc,
1075         GrGpuFinishedContext finishedContext) {
1076     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1077     GrBackendFormat format = this->compressedBackendFormat(compression);
1078     return this->createCompressedBackendTexture(width, height, format, data, dataSize, mipMapped,
1079                                                 isProtected, finishedProc, finishedContext);
1080 }
1081 
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1082 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1083                                                      const SkColor4f& color,
1084                                                      GrGpuFinishedProc finishedProc,
1085                                                      GrGpuFinishedContext finishedContext) {
1086     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1087 
1088     if (this->abandoned()) {
1089         return false;
1090     }
1091 
1092     SkImage::CompressionType compression =
1093             GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
1094     if (compression == SkImage::CompressionType::kNone) {
1095         return {};
1096     }
1097     size_t size = SkCompressedDataSize(compression,
1098                                        backendTexture.dimensions(),
1099                                        nullptr,
1100                                        backendTexture.hasMipmaps());
1101     SkAutoMalloc storage(size);
1102     GrFillInCompressedData(compression,
1103                            backendTexture.dimensions(),
1104                            backendTexture.mipmapped(),
1105                            static_cast<char*>(storage.get()),
1106                            color);
1107     return fGpu->updateCompressedBackendTexture(backendTexture,
1108                                                 std::move(finishedCallback),
1109                                                 storage.get(),
1110                                                 size);
1111 }
1112 
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const void * compressedData,size_t dataSize,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1113 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1114                                                      const void* compressedData,
1115                                                      size_t dataSize,
1116                                                      GrGpuFinishedProc finishedProc,
1117                                                      GrGpuFinishedContext finishedContext) {
1118     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1119 
1120     if (this->abandoned()) {
1121         return false;
1122     }
1123 
1124     if (!compressedData) {
1125         return false;
1126     }
1127 
1128     return fGpu->updateCompressedBackendTexture(backendTexture,
1129                                                 std::move(finishedCallback),
1130                                                 compressedData,
1131                                                 dataSize);
1132 }
1133 
1134 //////////////////////////////////////////////////////////////////////////////
1135 
setBackendTextureState(const GrBackendTexture & backendTexture,const GrBackendSurfaceMutableState & state,GrBackendSurfaceMutableState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1136 bool GrDirectContext::setBackendTextureState(const GrBackendTexture& backendTexture,
1137                                              const GrBackendSurfaceMutableState& state,
1138                                              GrBackendSurfaceMutableState* previousState,
1139                                              GrGpuFinishedProc finishedProc,
1140                                              GrGpuFinishedContext finishedContext) {
1141     auto callback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1142 
1143     if (this->abandoned()) {
1144         return false;
1145     }
1146 
1147     return fGpu->setBackendTextureState(backendTexture, state, previousState, std::move(callback));
1148 }
1149 
1150 
setBackendRenderTargetState(const GrBackendRenderTarget & backendRenderTarget,const GrBackendSurfaceMutableState & state,GrBackendSurfaceMutableState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1151 bool GrDirectContext::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
1152                                                   const GrBackendSurfaceMutableState& state,
1153                                                   GrBackendSurfaceMutableState* previousState,
1154                                                   GrGpuFinishedProc finishedProc,
1155                                                   GrGpuFinishedContext finishedContext) {
1156     auto callback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1157 
1158     if (this->abandoned()) {
1159         return false;
1160     }
1161 
1162     return fGpu->setBackendRenderTargetState(backendRenderTarget, state, previousState,
1163                                              std::move(callback));
1164 }
1165 
deleteBackendTexture(GrBackendTexture backendTex)1166 void GrDirectContext::deleteBackendTexture(GrBackendTexture backendTex) {
1167     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1168     // For the Vulkan backend we still must destroy the backend texture when the context is
1169     // abandoned.
1170     if ((this->abandoned() && this->backend() != GrBackendApi::kVulkan) || !backendTex.isValid()) {
1171         return;
1172     }
1173 
1174     fGpu->deleteBackendTexture(backendTex);
1175 }
1176 
1177 //////////////////////////////////////////////////////////////////////////////
1178 
precompileShader(const SkData & key,const SkData & data)1179 bool GrDirectContext::precompileShader(const SkData& key, const SkData& data) {
1180     return fGpu->precompileShader(key, data);
1181 }
1182 
1183 #ifdef SK_ENABLE_DUMP_GPU
1184 #include "include/core/SkString.h"
1185 #include "src/utils/SkJSONWriter.h"
dump() const1186 SkString GrDirectContext::dump() const {
1187     SkDynamicMemoryWStream stream;
1188     SkJSONWriter writer(&stream, SkJSONWriter::Mode::kPretty);
1189     writer.beginObject();
1190 
1191     writer.appendString("backend", GrBackendApiToStr(this->backend()));
1192 
1193     writer.appendName("caps");
1194     this->caps()->dumpJSON(&writer);
1195 
1196     writer.appendName("gpu");
1197     this->fGpu->dumpJSON(&writer);
1198 
1199     writer.appendName("context");
1200     this->dumpJSON(&writer);
1201 
1202     // Flush JSON to the memory stream
1203     writer.endObject();
1204     writer.flush();
1205 
1206     // Null terminate the JSON data in the memory stream
1207     stream.write8(0);
1208 
1209     // Allocate a string big enough to hold all the data, then copy out of the stream
1210     SkString result(stream.bytesWritten());
1211     stream.copyToAndReset(result.writable_str());
1212     return result;
1213 }
1214 #endif
1215 
1216 #ifdef SK_GL
1217 
1218 /*************************************************************************************************/
MakeGL(sk_sp<const GrGLInterface> glInterface)1219 sk_sp<GrDirectContext> GrDirectContext::MakeGL(sk_sp<const GrGLInterface> glInterface) {
1220     GrContextOptions defaultOptions;
1221     return MakeGL(std::move(glInterface), defaultOptions);
1222 }
1223 
MakeGL(const GrContextOptions & options)1224 sk_sp<GrDirectContext> GrDirectContext::MakeGL(const GrContextOptions& options) {
1225     return MakeGL(nullptr, options);
1226 }
1227 
MakeGL()1228 sk_sp<GrDirectContext> GrDirectContext::MakeGL() {
1229     GrContextOptions defaultOptions;
1230     return MakeGL(nullptr, defaultOptions);
1231 }
1232 
1233 #if GR_TEST_UTILS
make_get_error_with_random_oom(GrGLFunction<GrGLGetErrorFn> original)1234 GrGLFunction<GrGLGetErrorFn> make_get_error_with_random_oom(GrGLFunction<GrGLGetErrorFn> original) {
1235     // A SkRandom and a GrGLFunction<GrGLGetErrorFn> are too big to be captured by a
1236     // GrGLFunction<GrGLGetError> (surprise, surprise). So we make a context object and
1237     // capture that by pointer. However, GrGLFunction doesn't support calling a destructor
1238     // on the thing it captures. So we leak the context.
1239     struct GetErrorContext {
1240         SkRandom fRandom;
1241         GrGLFunction<GrGLGetErrorFn> fGetError;
1242     };
1243 
1244     auto errorContext = new GetErrorContext;
1245 
1246 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
1247     __lsan_ignore_object(errorContext);
1248 #endif
1249 
1250     errorContext->fGetError = original;
1251 
1252     return GrGLFunction<GrGLGetErrorFn>([errorContext]() {
1253         GrGLenum error = errorContext->fGetError();
1254         if (error == GR_GL_NO_ERROR && (errorContext->fRandom.nextU() % 300) == 0) {
1255             error = GR_GL_OUT_OF_MEMORY;
1256         }
1257         return error;
1258     });
1259 }
1260 #endif
1261 
MakeGL(sk_sp<const GrGLInterface> glInterface,const GrContextOptions & options)1262 sk_sp<GrDirectContext> GrDirectContext::MakeGL(sk_sp<const GrGLInterface> glInterface,
1263                                                const GrContextOptions& options) {
1264     sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kOpenGL, options));
1265 #if GR_TEST_UTILS
1266     if (options.fRandomGLOOM) {
1267         auto copy = sk_make_sp<GrGLInterface>(*glInterface);
1268         copy->fFunctions.fGetError =
1269                 make_get_error_with_random_oom(glInterface->fFunctions.fGetError);
1270 #if GR_GL_CHECK_ERROR
1271         // Suppress logging GL errors since we'll be synthetically generating them.
1272         copy->suppressErrorLogging();
1273 #endif
1274         glInterface = std::move(copy);
1275     }
1276 #endif
1277     direct->fGpu = GrGLGpu::Make(std::move(glInterface), options, direct.get());
1278     if (!direct->init()) {
1279         return nullptr;
1280     }
1281     return direct;
1282 }
1283 #endif
1284 
1285 /*************************************************************************************************/
MakeMock(const GrMockOptions * mockOptions)1286 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions) {
1287     GrContextOptions defaultOptions;
1288     return MakeMock(mockOptions, defaultOptions);
1289 }
1290 
MakeMock(const GrMockOptions * mockOptions,const GrContextOptions & options)1291 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions,
1292                                                  const GrContextOptions& options) {
1293     sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMock, options));
1294 
1295     direct->fGpu = GrMockGpu::Make(mockOptions, options, direct.get());
1296     if (!direct->init()) {
1297         return nullptr;
1298     }
1299 
1300     return direct;
1301 }
1302 
1303 #ifdef SK_VULKAN
1304 /*************************************************************************************************/
MakeVulkan(const GrVkBackendContext & backendContext)1305 sk_sp<GrDirectContext> GrDirectContext::MakeVulkan(const GrVkBackendContext& backendContext) {
1306     GrContextOptions defaultOptions;
1307     return MakeVulkan(backendContext, defaultOptions);
1308 }
1309 
MakeVulkan(const GrVkBackendContext & backendContext,const GrContextOptions & options)1310 sk_sp<GrDirectContext> GrDirectContext::MakeVulkan(const GrVkBackendContext& backendContext,
1311                                                    const GrContextOptions& options) {
1312     sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kVulkan, options));
1313 
1314     direct->fGpu = GrVkGpu::Make(backendContext, options, direct.get());
1315     if (!direct->init()) {
1316         return nullptr;
1317     }
1318 
1319     return direct;
1320 }
1321 #endif
1322 
1323 #ifdef SK_METAL
1324 /*************************************************************************************************/
MakeMetal(const GrMtlBackendContext & backendContext)1325 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(const GrMtlBackendContext& backendContext) {
1326     GrContextOptions defaultOptions;
1327     return MakeMetal(backendContext, defaultOptions);
1328 }
1329 
MakeMetal(const GrMtlBackendContext & backendContext,const GrContextOptions & options)1330 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(const GrMtlBackendContext& backendContext,
1331                                                      const GrContextOptions& options) {
1332     sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMetal, options));
1333 
1334     direct->fGpu = GrMtlTrampoline::MakeGpu(backendContext, options, direct.get());
1335     if (!direct->init()) {
1336         return nullptr;
1337     }
1338 
1339     return direct;
1340 }
1341 
1342 // deprecated
MakeMetal(void * device,void * queue)1343 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(void* device, void* queue) {
1344     GrContextOptions defaultOptions;
1345     return MakeMetal(device, queue, defaultOptions);
1346 }
1347 
1348 // deprecated
1349 // remove include/gpu/mtl/GrMtlBackendContext.h, above, when removed
MakeMetal(void * device,void * queue,const GrContextOptions & options)1350 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(void* device, void* queue,
1351                                                   const GrContextOptions& options) {
1352     sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMetal, options));
1353     GrMtlBackendContext backendContext = {};
1354     backendContext.fDevice.reset(device);
1355     backendContext.fQueue.reset(queue);
1356 
1357     return GrDirectContext::MakeMetal(backendContext, options);
1358 }
1359 #endif
1360 
1361 #ifdef SK_DIRECT3D
1362 /*************************************************************************************************/
MakeDirect3D(const GrD3DBackendContext & backendContext)1363 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext) {
1364     GrContextOptions defaultOptions;
1365     return MakeDirect3D(backendContext, defaultOptions);
1366 }
1367 
MakeDirect3D(const GrD3DBackendContext & backendContext,const GrContextOptions & options)1368 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext,
1369                                                      const GrContextOptions& options) {
1370     sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kDirect3D, options));
1371 
1372     direct->fGpu = GrD3DGpu::Make(backendContext, options, direct.get());
1373     if (!direct->init()) {
1374         return nullptr;
1375     }
1376 
1377     return direct;
1378 }
1379 #endif
1380 
1381 #ifdef SK_DAWN
1382 /*************************************************************************************************/
MakeDawn(const wgpu::Device & device)1383 sk_sp<GrDirectContext> GrDirectContext::MakeDawn(const wgpu::Device& device) {
1384     GrContextOptions defaultOptions;
1385     return MakeDawn(device, defaultOptions);
1386 }
1387 
MakeDawn(const wgpu::Device & device,const GrContextOptions & options)1388 sk_sp<GrDirectContext> GrDirectContext::MakeDawn(const wgpu::Device& device,
1389                                                  const GrContextOptions& options) {
1390     sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kDawn, options));
1391 
1392     direct->fGpu = GrDawnGpu::Make(device, options, direct.get());
1393     if (!direct->init()) {
1394         return nullptr;
1395     }
1396 
1397     return direct;
1398 }
1399 
1400 #endif
1401