• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 
9 #include "include/gpu/GrDirectContext.h"
10 
11 #include "include/core/SkTraceMemoryDump.h"
12 #include "include/gpu/GrBackendSemaphore.h"
13 #include "include/gpu/GrContextThreadSafeProxy.h"
14 #include "src/core/SkAutoMalloc.h"
15 #include "src/core/SkTaskGroup.h"
16 #include "src/core/SkTraceEvent.h"
17 #include "src/gpu/GrBackendUtils.h"
18 #include "src/gpu/GrClientMappedBufferManager.h"
19 #include "src/gpu/GrContextThreadSafeProxyPriv.h"
20 #include "src/gpu/GrDirectContextPriv.h"
21 #include "src/gpu/GrDrawingManager.h"
22 #include "src/gpu/GrGpu.h"
23 #include "src/gpu/GrResourceProvider.h"
24 #include "src/gpu/GrSemaphore.h"
25 #include "src/gpu/GrShaderUtils.h"
26 #include "src/gpu/GrThreadSafePipelineBuilder.h"
27 #include "src/gpu/SurfaceContext.h"
28 #include "src/gpu/effects/GrSkSLFP.h"
29 #include "src/gpu/mock/GrMockGpu.h"
30 #include "src/gpu/text/GrAtlasManager.h"
31 #include "src/gpu/text/GrStrikeCache.h"
32 #include "src/image/SkImage_GpuBase.h"
33 #if SK_GPU_V1
34 #include "src/gpu/ops/SmallPathAtlasMgr.h"
35 #else
36 // A vestigial definition for v2 that will never be instantiated
37 namespace skgpu::v1 {
38 class SmallPathAtlasMgr {
39 public:
SmallPathAtlasMgr()40     SmallPathAtlasMgr() { SkASSERT(0); }
reset()41     void reset() { SkASSERT(0); }
42 };
43 }
44 #endif
45 #ifdef SK_GL
46 #include "src/gpu/gl/GrGLGpu.h"
47 #endif
48 #ifdef SK_METAL
49 #include "include/gpu/mtl/GrMtlBackendContext.h"
50 #include "src/gpu/mtl/GrMtlTrampoline.h"
51 #endif
52 #ifdef SK_VULKAN
53 #include "src/gpu/vk/GrVkGpu.h"
54 #endif
55 #ifdef SK_DIRECT3D
56 #include "src/gpu/d3d/GrD3DGpu.h"
57 #endif
58 #ifdef SK_DAWN
59 #include "src/gpu/dawn/GrDawnGpu.h"
60 #endif
61 #include <memory>
62 
63 #if GR_TEST_UTILS
64 #   include "include/utils/SkRandom.h"
65 #   if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
66 #       include <sanitizer/lsan_interface.h>
67 #   endif
68 #endif
69 
70 #define ASSERT_SINGLE_OWNER GR_ASSERT_SINGLE_OWNER(this->singleOwner())
71 
Next()72 GrDirectContext::DirectContextID GrDirectContext::DirectContextID::Next() {
73     static std::atomic<uint32_t> nextID{1};
74     uint32_t id;
75     do {
76         id = nextID.fetch_add(1, std::memory_order_relaxed);
77     } while (id == SK_InvalidUniqueID);
78     return DirectContextID(id);
79 }
80 
GrDirectContext(GrBackendApi backend,const GrContextOptions & options)81 GrDirectContext::GrDirectContext(GrBackendApi backend, const GrContextOptions& options)
82         : INHERITED(GrContextThreadSafeProxyPriv::Make(backend, options), false)
83         , fDirectContextID(DirectContextID::Next()) {
84 }
85 
~GrDirectContext()86 GrDirectContext::~GrDirectContext() {
87     ASSERT_SINGLE_OWNER
88     // this if-test protects against the case where the context is being destroyed
89     // before having been fully created
90     if (fGpu) {
91         this->flushAndSubmit();
92     }
93 
94     // We need to make sure all work is finished on the gpu before we start releasing resources.
95     this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/false);
96 
97     this->destroyDrawingManager();
98 
99     // Ideally we could just let the ptr drop, but resource cache queries this ptr in releaseAll.
100     if (fResourceCache) {
101         fResourceCache->releaseAll();
102     }
103     // This has to be after GrResourceCache::releaseAll so that other threads that are holding
104     // async pixel result don't try to destroy buffers off thread.
105     fMappedBufferManager.reset();
106 }
107 
threadSafeProxy()108 sk_sp<GrContextThreadSafeProxy> GrDirectContext::threadSafeProxy() {
109     return INHERITED::threadSafeProxy();
110 }
111 
resetGLTextureBindings()112 void GrDirectContext::resetGLTextureBindings() {
113     if (this->abandoned() || this->backend() != GrBackendApi::kOpenGL) {
114         return;
115     }
116     fGpu->resetTextureBindings();
117 }
118 
resetContext(uint32_t state)119 void GrDirectContext::resetContext(uint32_t state) {
120     ASSERT_SINGLE_OWNER
121     fGpu->markContextDirty(state);
122 }
123 
abandonContext()124 void GrDirectContext::abandonContext() {
125     if (INHERITED::abandoned()) {
126         return;
127     }
128 
129     INHERITED::abandonContext();
130 
131     // We need to make sure all work is finished on the gpu before we start releasing resources.
132     this->syncAllOutstandingGpuWork(this->caps()->mustSyncGpuDuringAbandon());
133 
134     fStrikeCache->freeAll();
135 
136     fMappedBufferManager->abandon();
137 
138     fResourceProvider->abandon();
139 
140     // abandon first so destructors don't try to free the resources in the API.
141     fResourceCache->abandonAll();
142 
143     fGpu->disconnect(GrGpu::DisconnectType::kAbandon);
144 
145     if (fSmallPathAtlasMgr) {
146         fSmallPathAtlasMgr->reset();
147     }
148     fAtlasManager->freeAll();
149 }
150 
abandoned()151 bool GrDirectContext::abandoned() {
152     if (INHERITED::abandoned()) {
153         return true;
154     }
155 
156     if (fGpu && fGpu->isDeviceLost()) {
157         this->abandonContext();
158         return true;
159     }
160     return false;
161 }
162 
oomed()163 bool GrDirectContext::oomed() { return fGpu ? fGpu->checkAndResetOOMed() : false; }
164 
releaseResourcesAndAbandonContext()165 void GrDirectContext::releaseResourcesAndAbandonContext() {
166     if (INHERITED::abandoned()) {
167         return;
168     }
169 
170     INHERITED::abandonContext();
171 
172     // We need to make sure all work is finished on the gpu before we start releasing resources.
173     this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/true);
174 
175     fResourceProvider->abandon();
176 
177     // Release all resources in the backend 3D API.
178     fResourceCache->releaseAll();
179 
180     // Must be after GrResourceCache::releaseAll().
181     fMappedBufferManager.reset();
182 
183     fGpu->disconnect(GrGpu::DisconnectType::kCleanup);
184     if (fSmallPathAtlasMgr) {
185         fSmallPathAtlasMgr->reset();
186     }
187     fAtlasManager->freeAll();
188 }
189 
freeGpuResources()190 void GrDirectContext::freeGpuResources() {
191     ASSERT_SINGLE_OWNER
192 
193     if (this->abandoned()) {
194         return;
195     }
196 
197     this->flushAndSubmit();
198     if (fSmallPathAtlasMgr) {
199         fSmallPathAtlasMgr->reset();
200     }
201     fAtlasManager->freeAll();
202     // The TextBlobCache doesn't actually hold any GPU resource but this is a convenient
203     // place to purge blobs.
204     this->getTextBlobCache()->freeAll();
205     // TODO: the glyph cache doesn't hold any GpuResources so this call should not be needed here.
206     // Some slack in the GrTextBlob's implementation requires it though. That could be fixed.
207     fStrikeCache->freeAll();
208 
209     this->drawingManager()->freeGpuResources();
210 
211     fResourceCache->purgeUnlockedResources();
212 }
213 
init()214 bool GrDirectContext::init() {
215     ASSERT_SINGLE_OWNER
216     if (!fGpu) {
217         return false;
218     }
219 
220     fThreadSafeProxy->priv().init(fGpu->refCaps(), fGpu->refPipelineBuilder());
221     if (!INHERITED::init()) {
222         return false;
223     }
224 
225     SkASSERT(this->getTextBlobCache());
226     SkASSERT(this->threadSafeCache());
227 
228     fStrikeCache = std::make_unique<GrStrikeCache>();
229     fResourceCache = std::make_unique<GrResourceCache>(this->singleOwner(),
230                                                        this->directContextID(),
231                                                        this->contextID());
232     fResourceCache->setProxyProvider(this->proxyProvider());
233     fResourceCache->setThreadSafeCache(this->threadSafeCache());
234 #if GR_TEST_UTILS
235     if (this->options().fResourceCacheLimitOverride != -1) {
236         this->setResourceCacheLimit(this->options().fResourceCacheLimitOverride);
237     }
238 #endif
239     fResourceProvider = std::make_unique<GrResourceProvider>(fGpu.get(), fResourceCache.get(),
240                                                              this->singleOwner());
241     fMappedBufferManager = std::make_unique<GrClientMappedBufferManager>(this->directContextID());
242 
243     fDidTestPMConversions = false;
244 
245     // DDL TODO: we need to think through how the task group & persistent cache
246     // get passed on to/shared between all the DDLRecorders created with this context.
247     if (this->options().fExecutor) {
248         fTaskGroup = std::make_unique<SkTaskGroup>(*this->options().fExecutor);
249     }
250 
251     fPersistentCache = this->options().fPersistentCache;
252 
253     GrDrawOpAtlas::AllowMultitexturing allowMultitexturing;
254     if (GrContextOptions::Enable::kNo == this->options().fAllowMultipleGlyphCacheTextures ||
255         // multitexturing supported only if range can represent the index + texcoords fully
256         !(this->caps()->shaderCaps()->floatIs32Bits() ||
257         this->caps()->shaderCaps()->integerSupport())) {
258         allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kNo;
259     } else {
260         allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kYes;
261     }
262 
263     GrProxyProvider* proxyProvider = this->priv().proxyProvider();
264 
265     fAtlasManager = std::make_unique<GrAtlasManager>(proxyProvider,
266                                                      this->options().fGlyphCacheTextureMaximumBytes,
267                                                      allowMultitexturing);
268     this->priv().addOnFlushCallbackObject(fAtlasManager.get());
269 
270     return true;
271 }
272 
getResourceCacheUsage(int * resourceCount,size_t * resourceBytes) const273 void GrDirectContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
274     ASSERT_SINGLE_OWNER
275 
276     if (resourceCount) {
277         *resourceCount = fResourceCache->getBudgetedResourceCount();
278     }
279     if (resourceBytes) {
280         *resourceBytes = fResourceCache->getBudgetedResourceBytes();
281     }
282 }
283 
getResourceCachePurgeableBytes() const284 size_t GrDirectContext::getResourceCachePurgeableBytes() const {
285     ASSERT_SINGLE_OWNER
286     return fResourceCache->getPurgeableBytes();
287 }
288 
getResourceCacheLimits(int * maxResources,size_t * maxResourceBytes) const289 void GrDirectContext::getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const {
290     ASSERT_SINGLE_OWNER
291     if (maxResources) {
292         *maxResources = -1;
293     }
294     if (maxResourceBytes) {
295         *maxResourceBytes = this->getResourceCacheLimit();
296     }
297 }
298 
getResourceCacheLimit() const299 size_t GrDirectContext::getResourceCacheLimit() const {
300     ASSERT_SINGLE_OWNER
301     return fResourceCache->getMaxResourceBytes();
302 }
303 
setResourceCacheLimits(int unused,size_t maxResourceBytes)304 void GrDirectContext::setResourceCacheLimits(int unused, size_t maxResourceBytes) {
305     ASSERT_SINGLE_OWNER
306     this->setResourceCacheLimit(maxResourceBytes);
307 }
308 
setResourceCacheLimit(size_t maxResourceBytes)309 void GrDirectContext::setResourceCacheLimit(size_t maxResourceBytes) {
310     ASSERT_SINGLE_OWNER
311     fResourceCache->setLimit(maxResourceBytes);
312 }
313 
purgeUnlockedResources(bool scratchResourcesOnly)314 void GrDirectContext::purgeUnlockedResources(bool scratchResourcesOnly) {
315     ASSERT_SINGLE_OWNER
316 
317     if (this->abandoned()) {
318         return;
319     }
320 
321     fResourceCache->purgeUnlockedResources(scratchResourcesOnly);
322     fResourceCache->purgeAsNeeded();
323 
324     // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
325     // place to purge stale blobs
326     this->getTextBlobCache()->purgeStaleBlobs();
327 
328     fGpu->releaseUnlockedBackendObjects();
329 }
330 
purgeUnlockAndSafeCacheGpuResources()331 void GrDirectContext::purgeUnlockAndSafeCacheGpuResources() {
332     ASSERT_SINGLE_OWNER
333 
334     if (this->abandoned()) {
335         return;
336     }
337 
338     fResourceCache->purgeUnlockAndSafeCacheGpuResources();
339     fResourceCache->purgeAsNeeded();
340 
341     // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
342     // place to purge stale blobs
343     this->getTextBlobCache()->purgeStaleBlobs();
344 
345     fGpu->releaseUnlockedBackendObjects();
346 }
347 
CalcHpsBluredImageDimension(const SkBlurArg & blurArg)348 std::array<int, 2> GrDirectContext::CalcHpsBluredImageDimension(const SkBlurArg& blurArg) {
349     return fGpu->GetHpsDimension(blurArg);
350 }
351 
purgeUnlockedResourcesByTag(bool scratchResourceseOnly,const GrGpuResourceTag & tag)352 void GrDirectContext::purgeUnlockedResourcesByTag(bool scratchResourceseOnly, const GrGpuResourceTag& tag) {
353     ASSERT_SINGLE_OWNER
354     fResourceCache->purgeUnlockedResourcesByTag(scratchResourceseOnly, tag);
355     fResourceCache->purgeAsNeeded();
356 
357     // The textBlod Cache doesn't actually hold any GPU resource but this is a convenient
358     // place to purge stale blobs
359     this->getTextBlobCache()->purgeStaleBlobs();
360 }
361 
purgeUnlockedResourcesByPid(bool scratchResourcesOnly,const std::set<int> & exitedPidSet)362 void GrDirectContext::purgeUnlockedResourcesByPid(bool scratchResourcesOnly, const std::set<int>& exitedPidSet) {
363     ASSERT_SINGLE_OWNER
364     fResourceCache->purgeUnlockedResourcesByPid(scratchResourcesOnly, exitedPidSet);
365     fResourceCache->purgeAsNeeded();
366 
367     // The TextBlobCache doesn't actually hold any GPU resource but this is a convenient
368     // place to purge blobs.
369     this->getTextBlobCache()->freeAll();
370     // The StrikeCache indirectly references typeface, and in order to dereference the typeface,
371     // it is necessary to clear the StrikeCache when the application exits.
372     fStrikeCache->freeAll();
373 }
374 
purgeCacheBetweenFrames(bool scratchResourcesOnly,const std::set<int> & exitedPidSet,const std::set<int> & protectedPidSet)375 void GrDirectContext::purgeCacheBetweenFrames(bool scratchResourcesOnly, const std::set<int>& exitedPidSet,
376         const std::set<int>& protectedPidSet)
377 {
378     ASSERT_SINGLE_OWNER
379 
380     if (this->abandoned()) {
381         return;
382     }
383 
384     fResourceCache->purgeCacheBetweenFrames(scratchResourcesOnly, exitedPidSet, protectedPidSet);
385 }
386 
performDeferredCleanup(std::chrono::milliseconds msNotUsed,bool scratchResourcesOnly)387 void GrDirectContext::performDeferredCleanup(std::chrono::milliseconds msNotUsed,
388                                              bool scratchResourcesOnly) {
389     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
390 
391     ASSERT_SINGLE_OWNER
392 
393     if (this->abandoned()) {
394         return;
395     }
396 
397     this->checkAsyncWorkCompletion();
398     fMappedBufferManager->process();
399     auto purgeTime = GrStdSteadyClock::now() - msNotUsed;
400 
401     fResourceCache->purgeAsNeeded();
402     fResourceCache->purgeResourcesNotUsedSince(purgeTime, scratchResourcesOnly);
403 
404     // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
405     // place to purge stale blobs
406     this->getTextBlobCache()->purgeStaleBlobs();
407 }
408 
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)409 void GrDirectContext::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
410     ASSERT_SINGLE_OWNER
411 
412     if (this->abandoned()) {
413         return;
414     }
415 
416     fResourceCache->purgeUnlockedResources(bytesToPurge, preferScratchResources);
417 }
418 
419 ////////////////////////////////////////////////////////////////////////////////
wait(int numSemaphores,const GrBackendSemaphore waitSemaphores[],bool deleteSemaphoresAfterWait)420 bool GrDirectContext::wait(int numSemaphores, const GrBackendSemaphore waitSemaphores[],
421                            bool deleteSemaphoresAfterWait) {
422     if (!fGpu || !fGpu->caps()->semaphoreSupport()) {
423         return false;
424     }
425     GrWrapOwnership ownership =
426             deleteSemaphoresAfterWait ? kAdopt_GrWrapOwnership : kBorrow_GrWrapOwnership;
427     for (int i = 0; i < numSemaphores; ++i) {
428         std::unique_ptr<GrSemaphore> sema = fResourceProvider->wrapBackendSemaphore(
429                 waitSemaphores[i], GrSemaphoreWrapType::kWillWait, ownership);
430         // If we failed to wrap the semaphore it means the client didn't give us a valid semaphore
431         // to begin with. Therefore, it is fine to not wait on it.
432         if (sema) {
433             fGpu->waitSemaphore(sema.get());
434         }
435     }
436     return true;
437 }
438 
onGetSmallPathAtlasMgr()439 skgpu::v1::SmallPathAtlasMgr* GrDirectContext::onGetSmallPathAtlasMgr() {
440 #if SK_GPU_V1
441     if (!fSmallPathAtlasMgr) {
442         fSmallPathAtlasMgr = std::make_unique<skgpu::v1::SmallPathAtlasMgr>();
443 
444         this->priv().addOnFlushCallbackObject(fSmallPathAtlasMgr.get());
445     }
446 
447     if (!fSmallPathAtlasMgr->initAtlas(this->proxyProvider(), this->caps())) {
448         return nullptr;
449     }
450 #endif
451 
452     return fSmallPathAtlasMgr.get();
453 }
454 
455 ////////////////////////////////////////////////////////////////////////////////
456 
flush(const GrFlushInfo & info)457 GrSemaphoresSubmitted GrDirectContext::flush(const GrFlushInfo& info) {
458     ASSERT_SINGLE_OWNER
459     if (this->abandoned()) {
460         if (info.fFinishedProc) {
461             info.fFinishedProc(info.fFinishedContext);
462         }
463         if (info.fSubmittedProc) {
464             info.fSubmittedProc(info.fSubmittedContext, false);
465         }
466         return GrSemaphoresSubmitted::kNo;
467     }
468 
469     return this->drawingManager()->flushSurfaces({}, SkSurface::BackendSurfaceAccess::kNoAccess,
470                                                  info, nullptr);
471 }
472 
submit(bool syncCpu)473 bool GrDirectContext::submit(bool syncCpu) {
474     ASSERT_SINGLE_OWNER
475     if (this->abandoned()) {
476         return false;
477     }
478 
479     if (!fGpu) {
480         return false;
481     }
482 
483     return fGpu->submitToGpu(syncCpu);
484 }
485 
486 ////////////////////////////////////////////////////////////////////////////////
487 
checkAsyncWorkCompletion()488 void GrDirectContext::checkAsyncWorkCompletion() {
489     if (fGpu) {
490         fGpu->checkFinishProcs();
491     }
492 }
493 
syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned)494 void GrDirectContext::syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned) {
495     if (fGpu && (!this->abandoned() || shouldExecuteWhileAbandoned)) {
496         fGpu->finishOutstandingGpuWork();
497         this->checkAsyncWorkCompletion();
498     }
499 }
500 
501 ////////////////////////////////////////////////////////////////////////////////
502 
storeVkPipelineCacheData()503 void GrDirectContext::storeVkPipelineCacheData() {
504     if (fGpu) {
505         fGpu->storeVkPipelineCacheData();
506     }
507 }
508 
509 ////////////////////////////////////////////////////////////////////////////////
510 
supportsDistanceFieldText() const511 bool GrDirectContext::supportsDistanceFieldText() const {
512     return this->caps()->shaderCaps()->supportsDistanceFieldText();
513 }
514 
515 //////////////////////////////////////////////////////////////////////////////
516 
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const517 void GrDirectContext::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
518     ASSERT_SINGLE_OWNER
519     fResourceCache->dumpMemoryStatistics(traceMemoryDump);
520     traceMemoryDump->dumpNumericValue("skia/gr_text_blob_cache", "size", "bytes",
521                                       this->getTextBlobCache()->usedBytes());
522 }
523 
dumpMemoryStatisticsByTag(SkTraceMemoryDump * traceMemoryDump,const GrGpuResourceTag & tag) const524 void GrDirectContext::dumpMemoryStatisticsByTag(SkTraceMemoryDump* traceMemoryDump, const GrGpuResourceTag& tag) const {
525     ASSERT_SINGLE_OWNER
526     fResourceCache->dumpMemoryStatistics(traceMemoryDump, tag);
527     traceMemoryDump->dumpNumericValue("skia/gr_text_blob_cache", "size", "bytes",
528         this->getTextBlobCache()->usedBytes());
529 }
530 
setCurrentGrResourceTag(const GrGpuResourceTag & tag)531 void GrDirectContext::setCurrentGrResourceTag(const GrGpuResourceTag& tag) {
532     if (fResourceCache) {
533         return fResourceCache->setCurrentGrResourceTag(tag);
534     }
535 }
536 
popGrResourceTag()537 void GrDirectContext::popGrResourceTag()
538 {
539     if (fResourceCache) {
540         return fResourceCache->popGrResourceTag();
541     }
542 }
543 
getCurrentGrResourceTag() const544 GrGpuResourceTag GrDirectContext::getCurrentGrResourceTag() const {
545     if (fResourceCache) {
546         return fResourceCache->getCurrentGrResourceTag();
547     }
548     return {};
549 }
releaseByTag(const GrGpuResourceTag & tag)550 void GrDirectContext::releaseByTag(const GrGpuResourceTag& tag) {
551     if (fResourceCache) {
552         fResourceCache->releaseByTag(tag);
553     }
554 }
getAllGrGpuResourceTags() const555 std::set<GrGpuResourceTag> GrDirectContext::getAllGrGpuResourceTags() const {
556     if (fResourceCache) {
557         return fResourceCache->getAllGrGpuResourceTags();
558     }
559     return {};
560 }
561 
562 // OH ISSUE: get the memory information of the updated pid.
getUpdatedMemoryMap(std::unordered_map<int32_t,size_t> & out)563 void GrDirectContext::getUpdatedMemoryMap(std::unordered_map<int32_t, size_t> &out)
564 {
565     if (fResourceCache) {
566         fResourceCache->getUpdatedMemoryMap(out);
567     }
568 }
569 
570 // OH ISSUE: init gpu memory limit.
initGpuMemoryLimit(MemoryOverflowCalllback callback,uint64_t size)571 void GrDirectContext::initGpuMemoryLimit(MemoryOverflowCalllback callback, uint64_t size)
572 {
573     if (fResourceCache) {
574         fResourceCache->initGpuMemoryLimit(callback, size);
575     }
576 }
577 
578 // OH ISSUE: check whether the PID is abnormal.
isPidAbnormal() const579 bool GrDirectContext::isPidAbnormal() const
580 {
581     if (fResourceCache) {
582         return fResourceCache->isPidAbnormal();
583     }
584     return false;
585 }
586 
vmaDefragment()587 void GrDirectContext::vmaDefragment()
588 {
589     if (fGpu) {
590         fGpu->vmaDefragment();
591     }
592 }
593 
dumpVmaStats(SkString * out)594 void GrDirectContext::dumpVmaStats(SkString *out)
595 {
596     if (out == nullptr) {
597         return;
598     }
599     if (fGpu) {
600         fGpu->dumpVmaStats(out);
601     }
602 }
603 
604 // OH ISSUE: intra frame and inter frame identification
beginFrame()605 void GrDirectContext::beginFrame()
606 {
607 #ifdef SK_VULKAN
608     if (fResourceCache) {
609         fResourceCache->beginFrame();
610     }
611 #endif
612 }
613 
614 // OH ISSUE: intra frame and inter frame identification
endFrame()615 void GrDirectContext::endFrame()
616 {
617 #ifdef SK_VULKAN
618     if (fResourceCache) {
619         fResourceCache->endFrame();
620     }
621 #endif
622 }
623 
624 // OH ISSUE: asyn memory reclaimer
setGpuMemoryAsyncReclaimerSwitch(bool enabled,const std::function<void ()> & setThreadPriority)625 void GrDirectContext::setGpuMemoryAsyncReclaimerSwitch(bool enabled, const std::function<void()>& setThreadPriority)
626 {
627 #ifdef SK_VULKAN
628     if (fGpu) {
629         fGpu->setGpuMemoryAsyncReclaimerSwitch(enabled, setThreadPriority);
630     }
631 #endif
632 }
633 
634 // OH ISSUE: asyn memory reclaimer
flushGpuMemoryInWaitQueue()635 void GrDirectContext::flushGpuMemoryInWaitQueue()
636 {
637 #ifdef SK_VULKAN
638     if (fGpu) {
639         fGpu->flushGpuMemoryInWaitQueue();
640     }
641 #endif
642 }
643 
644 // OH ISSUE: suppress release window
setGpuCacheSuppressWindowSwitch(bool enabled)645 void GrDirectContext::setGpuCacheSuppressWindowSwitch(bool enabled)
646 {
647 #ifdef SK_VULKAN
648     ASSERT_SINGLE_OWNER
649 
650     if (this->abandoned()) {
651         return;
652     }
653 
654     fResourceCache->setGpuCacheSuppressWindowSwitch(enabled);
655 #endif
656 }
657 
658 // OH ISSUE: suppress release window
suppressGpuCacheBelowCertainRatio(const std::function<bool (void)> & nextFrameHasArrived)659 void GrDirectContext::suppressGpuCacheBelowCertainRatio(const std::function<bool(void)>& nextFrameHasArrived)
660 {
661 #ifdef SK_VULKAN
662     ASSERT_SINGLE_OWNER
663 
664     if (this->abandoned()) {
665         return;
666     }
667 
668     fResourceCache->suppressGpuCacheBelowCertainRatio(nextFrameHasArrived);
669 #endif
670 }
671 
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected)672 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
673                                                        const GrBackendFormat& backendFormat,
674                                                        GrMipmapped mipMapped,
675                                                        GrRenderable renderable,
676                                                        GrProtected isProtected) {
677     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
678     if (this->abandoned()) {
679         return GrBackendTexture();
680     }
681 
682     return fGpu->createBackendTexture({width, height}, backendFormat, renderable,
683                                       mipMapped, isProtected);
684 }
685 
createBackendTexture(int width,int height,SkColorType skColorType,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected)686 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
687                                                        SkColorType skColorType,
688                                                        GrMipmapped mipMapped,
689                                                        GrRenderable renderable,
690                                                        GrProtected isProtected) {
691     if (this->abandoned()) {
692         return GrBackendTexture();
693     }
694 
695     const GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
696 
697     return this->createBackendTexture(width, height, format, mipMapped, renderable, isProtected);
698 }
699 
create_and_clear_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,sk_sp<GrRefCntedCallback> finishedCallback,std::array<float,4> color)700 static GrBackendTexture create_and_clear_backend_texture(GrDirectContext* dContext,
701                                                          SkISize dimensions,
702                                                          const GrBackendFormat& backendFormat,
703                                                          GrMipmapped mipMapped,
704                                                          GrRenderable renderable,
705                                                          GrProtected isProtected,
706                                                          sk_sp<GrRefCntedCallback> finishedCallback,
707                                                          std::array<float, 4> color) {
708     GrGpu* gpu = dContext->priv().getGpu();
709     GrBackendTexture beTex = gpu->createBackendTexture(dimensions, backendFormat, renderable,
710                                                        mipMapped, isProtected);
711     if (!beTex.isValid()) {
712         return {};
713     }
714 
715     if (!dContext->priv().getGpu()->clearBackendTexture(beTex,
716                                                         std::move(finishedCallback),
717                                                         color)) {
718         dContext->deleteBackendTexture(beTex);
719         return {};
720     }
721     return beTex;
722 }
723 
update_texture_with_pixmaps(GrDirectContext * context,const SkPixmap src[],int numLevels,const GrBackendTexture & backendTexture,GrSurfaceOrigin textureOrigin,sk_sp<GrRefCntedCallback> finishedCallback)724 static bool update_texture_with_pixmaps(GrDirectContext* context,
725                                         const SkPixmap src[],
726                                         int numLevels,
727                                         const GrBackendTexture& backendTexture,
728                                         GrSurfaceOrigin textureOrigin,
729                                         sk_sp<GrRefCntedCallback> finishedCallback) {
730     GrColorType ct = SkColorTypeToGrColorType(src[0].colorType());
731     const GrBackendFormat& format = backendTexture.getBackendFormat();
732 
733     if (!context->priv().caps()->areColorTypeAndFormatCompatible(ct, format)) {
734         return false;
735     }
736 
737     auto proxy = context->priv().proxyProvider()->wrapBackendTexture(backendTexture,
738                                                                      kBorrow_GrWrapOwnership,
739                                                                      GrWrapCacheable::kNo,
740                                                                      kRW_GrIOType,
741                                                                      std::move(finishedCallback));
742     if (!proxy) {
743         return false;
744     }
745 
746     GrSwizzle swizzle = context->priv().caps()->getReadSwizzle(format, ct);
747     GrSurfaceProxyView view(std::move(proxy), textureOrigin, swizzle);
748     skgpu::SurfaceContext surfaceContext(context, std::move(view), src[0].info().colorInfo());
749     SkAutoSTArray<15, GrCPixmap> tmpSrc(numLevels);
750     for (int i = 0; i < numLevels; ++i) {
751         tmpSrc[i] = src[i];
752     }
753     if (!surfaceContext.writePixels(context, tmpSrc.get(), numLevels)) {
754         return false;
755     }
756 
757     GrSurfaceProxy* p = surfaceContext.asSurfaceProxy();
758     GrFlushInfo info;
759     context->priv().drawingManager()->flushSurfaces({&p, 1},
760                                                     SkSurface::BackendSurfaceAccess::kNoAccess,
761                                                     info,
762                                                     nullptr);
763     return true;
764 }
765 
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)766 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
767                                                        const GrBackendFormat& backendFormat,
768                                                        const SkColor4f& color,
769                                                        GrMipmapped mipMapped,
770                                                        GrRenderable renderable,
771                                                        GrProtected isProtected,
772                                                        GrGpuFinishedProc finishedProc,
773                                                        GrGpuFinishedContext finishedContext) {
774     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
775 
776     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
777     if (this->abandoned()) {
778         return {};
779     }
780 
781     return create_and_clear_backend_texture(this,
782                                             {width, height},
783                                             backendFormat,
784                                             mipMapped,
785                                             renderable,
786                                             isProtected,
787                                             std::move(finishedCallback),
788                                             color.array());
789 }
790 
createBackendTexture(int width,int height,SkColorType skColorType,const SkColor4f & color,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)791 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
792                                                        SkColorType skColorType,
793                                                        const SkColor4f& color,
794                                                        GrMipmapped mipMapped,
795                                                        GrRenderable renderable,
796                                                        GrProtected isProtected,
797                                                        GrGpuFinishedProc finishedProc,
798                                                        GrGpuFinishedContext finishedContext) {
799     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
800 
801     if (this->abandoned()) {
802         return {};
803     }
804 
805     GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
806     if (!format.isValid()) {
807         return {};
808     }
809 
810     GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
811     SkColor4f swizzledColor = this->caps()->getWriteSwizzle(format, grColorType).applyTo(color);
812 
813     return create_and_clear_backend_texture(this,
814                                             {width, height},
815                                             format,
816                                             mipMapped,
817                                             renderable,
818                                             isProtected,
819                                             std::move(finishedCallback),
820                                             swizzledColor.array());
821 }
822 
createBackendTexture(const SkPixmap srcData[],int numProvidedLevels,GrSurfaceOrigin textureOrigin,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)823 GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap srcData[],
824                                                        int numProvidedLevels,
825                                                        GrSurfaceOrigin textureOrigin,
826                                                        GrRenderable renderable,
827                                                        GrProtected isProtected,
828                                                        GrGpuFinishedProc finishedProc,
829                                                        GrGpuFinishedContext finishedContext) {
830     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
831 
832     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
833 
834     if (this->abandoned()) {
835         return {};
836     }
837 
838     if (!srcData || numProvidedLevels <= 0) {
839         return {};
840     }
841 
842     SkColorType colorType = srcData[0].colorType();
843 
844     GrMipmapped mipMapped = GrMipmapped::kNo;
845     if (numProvidedLevels > 1) {
846         mipMapped = GrMipmapped::kYes;
847     }
848 
849     GrBackendFormat backendFormat = this->defaultBackendFormat(colorType, renderable);
850     GrBackendTexture beTex = this->createBackendTexture(srcData[0].width(),
851                                                         srcData[0].height(),
852                                                         backendFormat,
853                                                         mipMapped,
854                                                         renderable,
855                                                         isProtected);
856     if (!beTex.isValid()) {
857         return {};
858     }
859     if (!update_texture_with_pixmaps(this,
860                                      srcData,
861                                      numProvidedLevels,
862                                      beTex,
863                                      textureOrigin,
864                                      std::move(finishedCallback))) {
865         this->deleteBackendTexture(beTex);
866         return {};
867     }
868     return beTex;
869 }
870 
updateBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)871 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
872                                            const SkColor4f& color,
873                                            GrGpuFinishedProc finishedProc,
874                                            GrGpuFinishedContext finishedContext) {
875     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
876 
877     if (this->abandoned()) {
878         return false;
879     }
880 
881     return fGpu->clearBackendTexture(backendTexture, std::move(finishedCallback), color.array());
882 }
883 
updateBackendTexture(const GrBackendTexture & backendTexture,SkColorType skColorType,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)884 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
885                                            SkColorType skColorType,
886                                            const SkColor4f& color,
887                                            GrGpuFinishedProc finishedProc,
888                                            GrGpuFinishedContext finishedContext) {
889     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
890 
891     if (this->abandoned()) {
892         return false;
893     }
894 
895     GrBackendFormat format = backendTexture.getBackendFormat();
896     GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
897 
898     if (!this->caps()->areColorTypeAndFormatCompatible(grColorType, format)) {
899         return false;
900     }
901 
902     GrSwizzle swizzle = this->caps()->getWriteSwizzle(format, grColorType);
903     SkColor4f swizzledColor = swizzle.applyTo(color);
904 
905     return fGpu->clearBackendTexture(backendTexture,
906                                      std::move(finishedCallback),
907                                      swizzledColor.array());
908 }
909 
updateBackendTexture(const GrBackendTexture & backendTexture,const SkPixmap srcData[],int numLevels,GrSurfaceOrigin textureOrigin,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)910 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
911                                            const SkPixmap srcData[],
912                                            int numLevels,
913                                            GrSurfaceOrigin textureOrigin,
914                                            GrGpuFinishedProc finishedProc,
915                                            GrGpuFinishedContext finishedContext) {
916     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
917 
918     if (this->abandoned()) {
919         return false;
920     }
921 
922     if (!srcData || numLevels <= 0) {
923         return false;
924     }
925 
926     // If the texture has MIP levels then we require that the full set is overwritten.
927     int numExpectedLevels = 1;
928     if (backendTexture.hasMipmaps()) {
929         numExpectedLevels = SkMipmap::ComputeLevelCount(backendTexture.width(),
930                                                         backendTexture.height()) + 1;
931     }
932     if (numLevels != numExpectedLevels) {
933         return false;
934     }
935     return update_texture_with_pixmaps(this,
936                                        srcData,
937                                        numLevels,
938                                        backendTexture,
939                                        textureOrigin,
940                                        std::move(finishedCallback));
941 }
942 
943 //////////////////////////////////////////////////////////////////////////////
944 
create_and_update_compressed_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrProtected isProtected,sk_sp<GrRefCntedCallback> finishedCallback,const void * data,size_t size)945 static GrBackendTexture create_and_update_compressed_backend_texture(
946         GrDirectContext* dContext,
947         SkISize dimensions,
948         const GrBackendFormat& backendFormat,
949         GrMipmapped mipMapped,
950         GrProtected isProtected,
951         sk_sp<GrRefCntedCallback> finishedCallback,
952         const void* data,
953         size_t size) {
954     GrGpu* gpu = dContext->priv().getGpu();
955 
956     GrBackendTexture beTex = gpu->createCompressedBackendTexture(dimensions, backendFormat,
957                                                                  mipMapped, isProtected);
958     if (!beTex.isValid()) {
959         return {};
960     }
961 
962     if (!dContext->priv().getGpu()->updateCompressedBackendTexture(
963                 beTex, std::move(finishedCallback), data, size)) {
964         dContext->deleteBackendTexture(beTex);
965         return {};
966     }
967     return beTex;
968 }
969 
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,GrMipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)970 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
971         int width, int height,
972         const GrBackendFormat& backendFormat,
973         const SkColor4f& color,
974         GrMipmapped mipmapped,
975         GrProtected isProtected,
976         GrGpuFinishedProc finishedProc,
977         GrGpuFinishedContext finishedContext) {
978     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
979     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
980 
981     if (this->abandoned()) {
982         return {};
983     }
984 
985     SkImage::CompressionType compression = GrBackendFormatToCompressionType(backendFormat);
986     if (compression == SkImage::CompressionType::kNone) {
987         return {};
988     }
989 
990     size_t size = SkCompressedDataSize(compression,
991                                        {width, height},
992                                        nullptr,
993                                        mipmapped == GrMipmapped::kYes);
994     auto storage = std::make_unique<char[]>(size);
995     GrFillInCompressedData(compression, {width, height}, mipmapped, storage.get(), color);
996     return create_and_update_compressed_backend_texture(this,
997                                                         {width, height},
998                                                         backendFormat,
999                                                         mipmapped,
1000                                                         isProtected,
1001                                                         std::move(finishedCallback),
1002                                                         storage.get(),
1003                                                         size);
1004 }
1005 
createCompressedBackendTexture(int width,int height,SkImage::CompressionType compression,const SkColor4f & color,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1006 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
1007         int width, int height,
1008         SkImage::CompressionType compression,
1009         const SkColor4f& color,
1010         GrMipmapped mipMapped,
1011         GrProtected isProtected,
1012         GrGpuFinishedProc finishedProc,
1013         GrGpuFinishedContext finishedContext) {
1014     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1015     GrBackendFormat format = this->compressedBackendFormat(compression);
1016     return this->createCompressedBackendTexture(width, height, format, color,
1017                                                 mipMapped, isProtected, finishedProc,
1018                                                 finishedContext);
1019 }
1020 
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const void * compressedData,size_t dataSize,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1021 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
1022         int width, int height,
1023         const GrBackendFormat& backendFormat,
1024         const void* compressedData,
1025         size_t dataSize,
1026         GrMipmapped mipMapped,
1027         GrProtected isProtected,
1028         GrGpuFinishedProc finishedProc,
1029         GrGpuFinishedContext finishedContext) {
1030     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1031     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1032 
1033     if (this->abandoned()) {
1034         return {};
1035     }
1036 
1037     return create_and_update_compressed_backend_texture(this,
1038                                                         {width, height},
1039                                                         backendFormat,
1040                                                         mipMapped,
1041                                                         isProtected,
1042                                                         std::move(finishedCallback),
1043                                                         compressedData,
1044                                                         dataSize);
1045 }
1046 
createCompressedBackendTexture(int width,int height,SkImage::CompressionType compression,const void * data,size_t dataSize,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1047 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
1048         int width, int height,
1049         SkImage::CompressionType compression,
1050         const void* data, size_t dataSize,
1051         GrMipmapped mipMapped,
1052         GrProtected isProtected,
1053         GrGpuFinishedProc finishedProc,
1054         GrGpuFinishedContext finishedContext) {
1055     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1056     GrBackendFormat format = this->compressedBackendFormat(compression);
1057     return this->createCompressedBackendTexture(width, height, format, data, dataSize, mipMapped,
1058                                                 isProtected, finishedProc, finishedContext);
1059 }
1060 
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1061 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1062                                                      const SkColor4f& color,
1063                                                      GrGpuFinishedProc finishedProc,
1064                                                      GrGpuFinishedContext finishedContext) {
1065     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1066 
1067     if (this->abandoned()) {
1068         return false;
1069     }
1070 
1071     SkImage::CompressionType compression =
1072             GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
1073     if (compression == SkImage::CompressionType::kNone) {
1074         return {};
1075     }
1076     size_t size = SkCompressedDataSize(compression,
1077                                        backendTexture.dimensions(),
1078                                        nullptr,
1079                                        backendTexture.hasMipmaps());
1080     SkAutoMalloc storage(size);
1081     GrFillInCompressedData(compression,
1082                            backendTexture.dimensions(),
1083                            backendTexture.mipmapped(),
1084                            static_cast<char*>(storage.get()),
1085                            color);
1086     return fGpu->updateCompressedBackendTexture(backendTexture,
1087                                                 std::move(finishedCallback),
1088                                                 storage.get(),
1089                                                 size);
1090 }
1091 
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const void * compressedData,size_t dataSize,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1092 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1093                                                      const void* compressedData,
1094                                                      size_t dataSize,
1095                                                      GrGpuFinishedProc finishedProc,
1096                                                      GrGpuFinishedContext finishedContext) {
1097     auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1098 
1099     if (this->abandoned()) {
1100         return false;
1101     }
1102 
1103     if (!compressedData) {
1104         return false;
1105     }
1106 
1107     return fGpu->updateCompressedBackendTexture(backendTexture,
1108                                                 std::move(finishedCallback),
1109                                                 compressedData,
1110                                                 dataSize);
1111 }
1112 
1113 //////////////////////////////////////////////////////////////////////////////
1114 
setBackendTextureState(const GrBackendTexture & backendTexture,const GrBackendSurfaceMutableState & state,GrBackendSurfaceMutableState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1115 bool GrDirectContext::setBackendTextureState(const GrBackendTexture& backendTexture,
1116                                              const GrBackendSurfaceMutableState& state,
1117                                              GrBackendSurfaceMutableState* previousState,
1118                                              GrGpuFinishedProc finishedProc,
1119                                              GrGpuFinishedContext finishedContext) {
1120     auto callback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1121 
1122     if (this->abandoned()) {
1123         return false;
1124     }
1125 
1126     return fGpu->setBackendTextureState(backendTexture, state, previousState, std::move(callback));
1127 }
1128 
1129 
setBackendRenderTargetState(const GrBackendRenderTarget & backendRenderTarget,const GrBackendSurfaceMutableState & state,GrBackendSurfaceMutableState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1130 bool GrDirectContext::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
1131                                                   const GrBackendSurfaceMutableState& state,
1132                                                   GrBackendSurfaceMutableState* previousState,
1133                                                   GrGpuFinishedProc finishedProc,
1134                                                   GrGpuFinishedContext finishedContext) {
1135     auto callback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1136 
1137     if (this->abandoned()) {
1138         return false;
1139     }
1140 
1141     return fGpu->setBackendRenderTargetState(backendRenderTarget, state, previousState,
1142                                              std::move(callback));
1143 }
1144 
deleteBackendTexture(GrBackendTexture backendTex)1145 void GrDirectContext::deleteBackendTexture(GrBackendTexture backendTex) {
1146     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1147     // For the Vulkan backend we still must destroy the backend texture when the context is
1148     // abandoned.
1149     if ((this->abandoned() && this->backend() != GrBackendApi::kVulkan) || !backendTex.isValid()) {
1150         return;
1151     }
1152 
1153     fGpu->deleteBackendTexture(backendTex);
1154 }
1155 
1156 //////////////////////////////////////////////////////////////////////////////
1157 
precompileShader(const SkData & key,const SkData & data)1158 bool GrDirectContext::precompileShader(const SkData& key, const SkData& data) {
1159     return fGpu->precompileShader(key, data);
1160 }
1161 
1162 #ifdef SK_ENABLE_DUMP_GPU
1163 #include "include/core/SkString.h"
1164 #include "src/utils/SkJSONWriter.h"
dump() const1165 SkString GrDirectContext::dump() const {
1166     SkDynamicMemoryWStream stream;
1167     SkJSONWriter writer(&stream, SkJSONWriter::Mode::kPretty);
1168     writer.beginObject();
1169 
1170     writer.appendString("backend", GrBackendApiToStr(this->backend()));
1171 
1172     writer.appendName("caps");
1173     this->caps()->dumpJSON(&writer);
1174 
1175     writer.appendName("gpu");
1176     this->fGpu->dumpJSON(&writer);
1177 
1178     writer.appendName("context");
1179     this->dumpJSON(&writer);
1180 
1181     // Flush JSON to the memory stream
1182     writer.endObject();
1183     writer.flush();
1184 
1185     // Null terminate the JSON data in the memory stream
1186     stream.write8(0);
1187 
1188     // Allocate a string big enough to hold all the data, then copy out of the stream
1189     SkString result(stream.bytesWritten());
1190     stream.copyToAndReset(result.writable_str());
1191     return result;
1192 }
1193 #endif
1194 
1195 #ifdef SK_GL
1196 
1197 /*************************************************************************************************/
MakeGL(sk_sp<const GrGLInterface> glInterface)1198 sk_sp<GrDirectContext> GrDirectContext::MakeGL(sk_sp<const GrGLInterface> glInterface) {
1199     GrContextOptions defaultOptions;
1200     return MakeGL(std::move(glInterface), defaultOptions);
1201 }
1202 
MakeGL(const GrContextOptions & options)1203 sk_sp<GrDirectContext> GrDirectContext::MakeGL(const GrContextOptions& options) {
1204     return MakeGL(nullptr, options);
1205 }
1206 
MakeGL()1207 sk_sp<GrDirectContext> GrDirectContext::MakeGL() {
1208     GrContextOptions defaultOptions;
1209     return MakeGL(nullptr, defaultOptions);
1210 }
1211 
1212 #if GR_TEST_UTILS
make_get_error_with_random_oom(GrGLFunction<GrGLGetErrorFn> original)1213 GrGLFunction<GrGLGetErrorFn> make_get_error_with_random_oom(GrGLFunction<GrGLGetErrorFn> original) {
1214     // A SkRandom and a GrGLFunction<GrGLGetErrorFn> are too big to be captured by a
1215     // GrGLFunction<GrGLGetError> (surprise, surprise). So we make a context object and
1216     // capture that by pointer. However, GrGLFunction doesn't support calling a destructor
1217     // on the thing it captures. So we leak the context.
1218     struct GetErrorContext {
1219         SkRandom fRandom;
1220         GrGLFunction<GrGLGetErrorFn> fGetError;
1221     };
1222 
1223     auto errorContext = new GetErrorContext;
1224 
1225 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
1226     __lsan_ignore_object(errorContext);
1227 #endif
1228 
1229     errorContext->fGetError = original;
1230 
1231     return GrGLFunction<GrGLGetErrorFn>([errorContext]() {
1232         GrGLenum error = errorContext->fGetError();
1233         if (error == GR_GL_NO_ERROR && (errorContext->fRandom.nextU() % 300) == 0) {
1234             error = GR_GL_OUT_OF_MEMORY;
1235         }
1236         return error;
1237     });
1238 }
1239 #endif
1240 
MakeGL(sk_sp<const GrGLInterface> glInterface,const GrContextOptions & options)1241 sk_sp<GrDirectContext> GrDirectContext::MakeGL(sk_sp<const GrGLInterface> glInterface,
1242                                                const GrContextOptions& options) {
1243     sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kOpenGL, options));
1244 #if GR_TEST_UTILS
1245     if (options.fRandomGLOOM) {
1246         auto copy = sk_make_sp<GrGLInterface>(*glInterface);
1247         copy->fFunctions.fGetError =
1248                 make_get_error_with_random_oom(glInterface->fFunctions.fGetError);
1249 #if GR_GL_CHECK_ERROR
1250         // Suppress logging GL errors since we'll be synthetically generating them.
1251         copy->suppressErrorLogging();
1252 #endif
1253         glInterface = std::move(copy);
1254     }
1255 #endif
1256     direct->fGpu = GrGLGpu::Make(std::move(glInterface), options, direct.get());
1257     if (!direct->init()) {
1258         return nullptr;
1259     }
1260     return direct;
1261 }
1262 #endif
1263 
1264 /*************************************************************************************************/
MakeMock(const GrMockOptions * mockOptions)1265 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions) {
1266     GrContextOptions defaultOptions;
1267     return MakeMock(mockOptions, defaultOptions);
1268 }
1269 
MakeMock(const GrMockOptions * mockOptions,const GrContextOptions & options)1270 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions,
1271                                                  const GrContextOptions& options) {
1272     sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMock, options));
1273 
1274     direct->fGpu = GrMockGpu::Make(mockOptions, options, direct.get());
1275     if (!direct->init()) {
1276         return nullptr;
1277     }
1278 
1279     return direct;
1280 }
1281 
1282 #ifdef SK_VULKAN
1283 /*************************************************************************************************/
MakeVulkan(const GrVkBackendContext & backendContext)1284 sk_sp<GrDirectContext> GrDirectContext::MakeVulkan(const GrVkBackendContext& backendContext) {
1285     GrContextOptions defaultOptions;
1286     return MakeVulkan(backendContext, defaultOptions);
1287 }
1288 
MakeVulkan(const GrVkBackendContext & backendContext,const GrContextOptions & options)1289 sk_sp<GrDirectContext> GrDirectContext::MakeVulkan(const GrVkBackendContext& backendContext,
1290                                                    const GrContextOptions& options) {
1291     sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kVulkan, options));
1292 
1293     direct->fGpu = GrVkGpu::Make(backendContext, options, direct.get());
1294     if (!direct->init()) {
1295         return nullptr;
1296     }
1297 
1298     return direct;
1299 }
1300 #endif
1301 
1302 #ifdef SK_METAL
1303 /*************************************************************************************************/
MakeMetal(const GrMtlBackendContext & backendContext)1304 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(const GrMtlBackendContext& backendContext) {
1305     GrContextOptions defaultOptions;
1306     return MakeMetal(backendContext, defaultOptions);
1307 }
1308 
MakeMetal(const GrMtlBackendContext & backendContext,const GrContextOptions & options)1309 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(const GrMtlBackendContext& backendContext,
1310                                                      const GrContextOptions& options) {
1311     sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMetal, options));
1312 
1313     direct->fGpu = GrMtlTrampoline::MakeGpu(backendContext, options, direct.get());
1314     if (!direct->init()) {
1315         return nullptr;
1316     }
1317 
1318     return direct;
1319 }
1320 
1321 // deprecated
MakeMetal(void * device,void * queue)1322 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(void* device, void* queue) {
1323     GrContextOptions defaultOptions;
1324     return MakeMetal(device, queue, defaultOptions);
1325 }
1326 
1327 // deprecated
1328 // remove include/gpu/mtl/GrMtlBackendContext.h, above, when removed
MakeMetal(void * device,void * queue,const GrContextOptions & options)1329 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(void* device, void* queue,
1330                                                   const GrContextOptions& options) {
1331     sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMetal, options));
1332     GrMtlBackendContext backendContext = {};
1333     backendContext.fDevice.reset(device);
1334     backendContext.fQueue.reset(queue);
1335 
1336     return GrDirectContext::MakeMetal(backendContext, options);
1337 }
1338 #endif
1339 
1340 #ifdef SK_DIRECT3D
1341 /*************************************************************************************************/
MakeDirect3D(const GrD3DBackendContext & backendContext)1342 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext) {
1343     GrContextOptions defaultOptions;
1344     return MakeDirect3D(backendContext, defaultOptions);
1345 }
1346 
MakeDirect3D(const GrD3DBackendContext & backendContext,const GrContextOptions & options)1347 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext,
1348                                                      const GrContextOptions& options) {
1349     sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kDirect3D, options));
1350 
1351     direct->fGpu = GrD3DGpu::Make(backendContext, options, direct.get());
1352     if (!direct->init()) {
1353         return nullptr;
1354     }
1355 
1356     return direct;
1357 }
1358 #endif
1359 
1360 #ifdef SK_DAWN
1361 /*************************************************************************************************/
MakeDawn(const wgpu::Device & device)1362 sk_sp<GrDirectContext> GrDirectContext::MakeDawn(const wgpu::Device& device) {
1363     GrContextOptions defaultOptions;
1364     return MakeDawn(device, defaultOptions);
1365 }
1366 
MakeDawn(const wgpu::Device & device,const GrContextOptions & options)1367 sk_sp<GrDirectContext> GrDirectContext::MakeDawn(const wgpu::Device& device,
1368                                                  const GrContextOptions& options) {
1369     sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kDawn, options));
1370 
1371     direct->fGpu = GrDawnGpu::Make(device, options, direct.get());
1372     if (!direct->init()) {
1373         return nullptr;
1374     }
1375 
1376     return direct;
1377 }
1378 
1379 #endif
1380