1 /*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8
9 #include "include/gpu/GrDirectContext.h"
10
11 #include "include/core/SkTraceMemoryDump.h"
12 #include "include/gpu/GrBackendSemaphore.h"
13 #include "include/gpu/GrContextThreadSafeProxy.h"
14 #include "src/core/SkAutoMalloc.h"
15 #include "src/core/SkTaskGroup.h"
16 #include "src/core/SkTraceEvent.h"
17 #include "src/gpu/GrBackendUtils.h"
18 #include "src/gpu/GrClientMappedBufferManager.h"
19 #include "src/gpu/GrContextThreadSafeProxyPriv.h"
20 #include "src/gpu/GrDirectContextPriv.h"
21 #include "src/gpu/GrDrawingManager.h"
22 #include "src/gpu/GrGpu.h"
23 #include "src/gpu/GrResourceProvider.h"
24 #include "src/gpu/GrSemaphore.h"
25 #include "src/gpu/GrShaderUtils.h"
26 #include "src/gpu/GrThreadSafePipelineBuilder.h"
27 #include "src/gpu/SurfaceContext.h"
28 #include "src/gpu/effects/GrSkSLFP.h"
29 #include "src/gpu/mock/GrMockGpu.h"
30 #include "src/gpu/text/GrAtlasManager.h"
31 #include "src/gpu/text/GrStrikeCache.h"
32 #include "src/image/SkImage_GpuBase.h"
33 #if SK_GPU_V1
34 #include "src/gpu/ops/SmallPathAtlasMgr.h"
35 #else
36 // A vestigial definition for v2 that will never be instantiated
37 namespace skgpu::v1 {
38 class SmallPathAtlasMgr {
39 public:
SmallPathAtlasMgr()40 SmallPathAtlasMgr() { SkASSERT(0); }
reset()41 void reset() { SkASSERT(0); }
42 };
43 }
44 #endif
45 #ifdef SK_GL
46 #include "src/gpu/gl/GrGLGpu.h"
47 #endif
48 #ifdef SK_METAL
49 #include "include/gpu/mtl/GrMtlBackendContext.h"
50 #include "src/gpu/mtl/GrMtlTrampoline.h"
51 #endif
52 #ifdef SK_VULKAN
53 #include "src/gpu/vk/GrVkGpu.h"
54 #endif
55 #ifdef SK_DIRECT3D
56 #include "src/gpu/d3d/GrD3DGpu.h"
57 #endif
58 #ifdef SK_DAWN
59 #include "src/gpu/dawn/GrDawnGpu.h"
60 #endif
61 #include <memory>
62
63 #if GR_TEST_UTILS
64 # include "include/utils/SkRandom.h"
65 # if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
66 # include <sanitizer/lsan_interface.h>
67 # endif
68 #endif
69
70 #define ASSERT_SINGLE_OWNER GR_ASSERT_SINGLE_OWNER(this->singleOwner())
71
Next()72 GrDirectContext::DirectContextID GrDirectContext::DirectContextID::Next() {
73 static std::atomic<uint32_t> nextID{1};
74 uint32_t id;
75 do {
76 id = nextID.fetch_add(1, std::memory_order_relaxed);
77 } while (id == SK_InvalidUniqueID);
78 return DirectContextID(id);
79 }
80
GrDirectContext(GrBackendApi backend,const GrContextOptions & options)81 GrDirectContext::GrDirectContext(GrBackendApi backend, const GrContextOptions& options)
82 : INHERITED(GrContextThreadSafeProxyPriv::Make(backend, options), false)
83 , fDirectContextID(DirectContextID::Next()) {
84 }
85
~GrDirectContext()86 GrDirectContext::~GrDirectContext() {
87 ASSERT_SINGLE_OWNER
88 // this if-test protects against the case where the context is being destroyed
89 // before having been fully created
90 if (fGpu) {
91 this->flushAndSubmit();
92 }
93
94 // We need to make sure all work is finished on the gpu before we start releasing resources.
95 this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/false);
96
97 this->destroyDrawingManager();
98
99 // Ideally we could just let the ptr drop, but resource cache queries this ptr in releaseAll.
100 if (fResourceCache) {
101 fResourceCache->releaseAll();
102 }
103 // This has to be after GrResourceCache::releaseAll so that other threads that are holding
104 // async pixel result don't try to destroy buffers off thread.
105 fMappedBufferManager.reset();
106 }
107
threadSafeProxy()108 sk_sp<GrContextThreadSafeProxy> GrDirectContext::threadSafeProxy() {
109 return INHERITED::threadSafeProxy();
110 }
111
resetGLTextureBindings()112 void GrDirectContext::resetGLTextureBindings() {
113 if (this->abandoned() || this->backend() != GrBackendApi::kOpenGL) {
114 return;
115 }
116 fGpu->resetTextureBindings();
117 }
118
resetContext(uint32_t state)119 void GrDirectContext::resetContext(uint32_t state) {
120 ASSERT_SINGLE_OWNER
121 fGpu->markContextDirty(state);
122 }
123
abandonContext()124 void GrDirectContext::abandonContext() {
125 if (INHERITED::abandoned()) {
126 return;
127 }
128
129 INHERITED::abandonContext();
130
131 // We need to make sure all work is finished on the gpu before we start releasing resources.
132 this->syncAllOutstandingGpuWork(this->caps()->mustSyncGpuDuringAbandon());
133
134 fStrikeCache->freeAll();
135
136 fMappedBufferManager->abandon();
137
138 fResourceProvider->abandon();
139
140 // abandon first so destructors don't try to free the resources in the API.
141 fResourceCache->abandonAll();
142
143 fGpu->disconnect(GrGpu::DisconnectType::kAbandon);
144
145 if (fSmallPathAtlasMgr) {
146 fSmallPathAtlasMgr->reset();
147 }
148 fAtlasManager->freeAll();
149 }
150
abandoned()151 bool GrDirectContext::abandoned() {
152 if (INHERITED::abandoned()) {
153 return true;
154 }
155
156 if (fGpu && fGpu->isDeviceLost()) {
157 this->abandonContext();
158 return true;
159 }
160 return false;
161 }
162
oomed()163 bool GrDirectContext::oomed() { return fGpu ? fGpu->checkAndResetOOMed() : false; }
164
releaseResourcesAndAbandonContext()165 void GrDirectContext::releaseResourcesAndAbandonContext() {
166 if (INHERITED::abandoned()) {
167 return;
168 }
169
170 INHERITED::abandonContext();
171
172 // We need to make sure all work is finished on the gpu before we start releasing resources.
173 this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/true);
174
175 fResourceProvider->abandon();
176
177 // Release all resources in the backend 3D API.
178 fResourceCache->releaseAll();
179
180 // Must be after GrResourceCache::releaseAll().
181 fMappedBufferManager.reset();
182
183 fGpu->disconnect(GrGpu::DisconnectType::kCleanup);
184 if (fSmallPathAtlasMgr) {
185 fSmallPathAtlasMgr->reset();
186 }
187 fAtlasManager->freeAll();
188 }
189
freeGpuResources()190 void GrDirectContext::freeGpuResources() {
191 ASSERT_SINGLE_OWNER
192
193 if (this->abandoned()) {
194 return;
195 }
196
197 this->flushAndSubmit();
198 if (fSmallPathAtlasMgr) {
199 fSmallPathAtlasMgr->reset();
200 }
201 fAtlasManager->freeAll();
202 // The TextBlobCache doesn't actually hold any GPU resource but this is a convenient
203 // place to purge blobs.
204 this->getTextBlobCache()->freeAll();
205 // TODO: the glyph cache doesn't hold any GpuResources so this call should not be needed here.
206 // Some slack in the GrTextBlob's implementation requires it though. That could be fixed.
207 fStrikeCache->freeAll();
208
209 this->drawingManager()->freeGpuResources();
210
211 fResourceCache->purgeUnlockedResources();
212 }
213
init()214 bool GrDirectContext::init() {
215 ASSERT_SINGLE_OWNER
216 if (!fGpu) {
217 return false;
218 }
219
220 fThreadSafeProxy->priv().init(fGpu->refCaps(), fGpu->refPipelineBuilder());
221 if (!INHERITED::init()) {
222 return false;
223 }
224
225 SkASSERT(this->getTextBlobCache());
226 SkASSERT(this->threadSafeCache());
227
228 fStrikeCache = std::make_unique<GrStrikeCache>();
229 fResourceCache = std::make_unique<GrResourceCache>(this->singleOwner(),
230 this->directContextID(),
231 this->contextID());
232 fResourceCache->setProxyProvider(this->proxyProvider());
233 fResourceCache->setThreadSafeCache(this->threadSafeCache());
234 #if GR_TEST_UTILS
235 if (this->options().fResourceCacheLimitOverride != -1) {
236 this->setResourceCacheLimit(this->options().fResourceCacheLimitOverride);
237 }
238 #endif
239 fResourceProvider = std::make_unique<GrResourceProvider>(fGpu.get(), fResourceCache.get(),
240 this->singleOwner());
241 fMappedBufferManager = std::make_unique<GrClientMappedBufferManager>(this->directContextID());
242
243 fDidTestPMConversions = false;
244
245 // DDL TODO: we need to think through how the task group & persistent cache
246 // get passed on to/shared between all the DDLRecorders created with this context.
247 if (this->options().fExecutor) {
248 fTaskGroup = std::make_unique<SkTaskGroup>(*this->options().fExecutor);
249 }
250
251 fPersistentCache = this->options().fPersistentCache;
252
253 GrDrawOpAtlas::AllowMultitexturing allowMultitexturing;
254 if (GrContextOptions::Enable::kNo == this->options().fAllowMultipleGlyphCacheTextures ||
255 // multitexturing supported only if range can represent the index + texcoords fully
256 !(this->caps()->shaderCaps()->floatIs32Bits() ||
257 this->caps()->shaderCaps()->integerSupport())) {
258 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kNo;
259 } else {
260 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kYes;
261 }
262
263 GrProxyProvider* proxyProvider = this->priv().proxyProvider();
264
265 fAtlasManager = std::make_unique<GrAtlasManager>(proxyProvider,
266 this->options().fGlyphCacheTextureMaximumBytes,
267 allowMultitexturing);
268 this->priv().addOnFlushCallbackObject(fAtlasManager.get());
269
270 return true;
271 }
272
getResourceCacheUsage(int * resourceCount,size_t * resourceBytes) const273 void GrDirectContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
274 ASSERT_SINGLE_OWNER
275
276 if (resourceCount) {
277 *resourceCount = fResourceCache->getBudgetedResourceCount();
278 }
279 if (resourceBytes) {
280 *resourceBytes = fResourceCache->getBudgetedResourceBytes();
281 }
282 }
283
getResourceCachePurgeableBytes() const284 size_t GrDirectContext::getResourceCachePurgeableBytes() const {
285 ASSERT_SINGLE_OWNER
286 return fResourceCache->getPurgeableBytes();
287 }
288
getResourceCacheLimits(int * maxResources,size_t * maxResourceBytes) const289 void GrDirectContext::getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const {
290 ASSERT_SINGLE_OWNER
291 if (maxResources) {
292 *maxResources = -1;
293 }
294 if (maxResourceBytes) {
295 *maxResourceBytes = this->getResourceCacheLimit();
296 }
297 }
298
getResourceCacheLimit() const299 size_t GrDirectContext::getResourceCacheLimit() const {
300 ASSERT_SINGLE_OWNER
301 return fResourceCache->getMaxResourceBytes();
302 }
303
setResourceCacheLimits(int unused,size_t maxResourceBytes)304 void GrDirectContext::setResourceCacheLimits(int unused, size_t maxResourceBytes) {
305 ASSERT_SINGLE_OWNER
306 this->setResourceCacheLimit(maxResourceBytes);
307 }
308
setResourceCacheLimit(size_t maxResourceBytes)309 void GrDirectContext::setResourceCacheLimit(size_t maxResourceBytes) {
310 ASSERT_SINGLE_OWNER
311 fResourceCache->setLimit(maxResourceBytes);
312 }
313
purgeUnlockedResources(bool scratchResourcesOnly)314 void GrDirectContext::purgeUnlockedResources(bool scratchResourcesOnly) {
315 ASSERT_SINGLE_OWNER
316
317 if (this->abandoned()) {
318 return;
319 }
320
321 fResourceCache->purgeUnlockedResources(scratchResourcesOnly);
322 fResourceCache->purgeAsNeeded();
323
324 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
325 // place to purge stale blobs
326 this->getTextBlobCache()->purgeStaleBlobs();
327
328 fGpu->releaseUnlockedBackendObjects();
329 }
330
purgeUnlockAndSafeCacheGpuResources()331 void GrDirectContext::purgeUnlockAndSafeCacheGpuResources() {
332 ASSERT_SINGLE_OWNER
333
334 if (this->abandoned()) {
335 return;
336 }
337
338 fResourceCache->purgeUnlockAndSafeCacheGpuResources();
339 fResourceCache->purgeAsNeeded();
340
341 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
342 // place to purge stale blobs
343 this->getTextBlobCache()->purgeStaleBlobs();
344
345 fGpu->releaseUnlockedBackendObjects();
346 }
347
CalcHpsBluredImageDimension(const SkBlurArg & blurArg)348 std::array<int, 2> GrDirectContext::CalcHpsBluredImageDimension(const SkBlurArg& blurArg) {
349 return fGpu->GetHpsDimension(blurArg);
350 }
351
purgeUnlockedResourcesByTag(bool scratchResourceseOnly,const GrGpuResourceTag & tag)352 void GrDirectContext::purgeUnlockedResourcesByTag(bool scratchResourceseOnly, const GrGpuResourceTag& tag) {
353 ASSERT_SINGLE_OWNER
354 fResourceCache->purgeUnlockedResourcesByTag(scratchResourceseOnly, tag);
355 fResourceCache->purgeAsNeeded();
356
357 // The textBlod Cache doesn't actually hold any GPU resource but this is a convenient
358 // place to purge stale blobs
359 this->getTextBlobCache()->purgeStaleBlobs();
360 }
361
purgeUnlockedResourcesByPid(bool scratchResourcesOnly,const std::set<int> & exitedPidSet)362 void GrDirectContext::purgeUnlockedResourcesByPid(bool scratchResourcesOnly, const std::set<int>& exitedPidSet) {
363 ASSERT_SINGLE_OWNER
364 fResourceCache->purgeUnlockedResourcesByPid(scratchResourcesOnly, exitedPidSet);
365 fResourceCache->purgeAsNeeded();
366
367 // The TextBlobCache doesn't actually hold any GPU resource but this is a convenient
368 // place to purge blobs.
369 this->getTextBlobCache()->freeAll();
370 // The StrikeCache indirectly references typeface, and in order to dereference the typeface,
371 // it is necessary to clear the StrikeCache when the application exits.
372 fStrikeCache->freeAll();
373 }
374
registerVulkanErrorCallback(const std::function<void ()> & vulkanErrorCallback)375 void GrDirectContext::registerVulkanErrorCallback(const std::function<void()>& vulkanErrorCallback)
376 {
377 vulkanErrorCallback_ = vulkanErrorCallback;
378 }
379
purgeCacheBetweenFrames(bool scratchResourcesOnly,const std::set<int> & exitedPidSet,const std::set<int> & protectedPidSet)380 void GrDirectContext::purgeCacheBetweenFrames(bool scratchResourcesOnly, const std::set<int>& exitedPidSet,
381 const std::set<int>& protectedPidSet)
382 {
383 ASSERT_SINGLE_OWNER
384
385 if (this->abandoned()) {
386 return;
387 }
388
389 fResourceCache->purgeCacheBetweenFrames(scratchResourcesOnly, exitedPidSet, protectedPidSet);
390 }
391
performDeferredCleanup(std::chrono::milliseconds msNotUsed,bool scratchResourcesOnly)392 void GrDirectContext::performDeferredCleanup(std::chrono::milliseconds msNotUsed,
393 bool scratchResourcesOnly) {
394 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
395
396 ASSERT_SINGLE_OWNER
397
398 if (this->abandoned()) {
399 return;
400 }
401
402 this->checkAsyncWorkCompletion();
403 fMappedBufferManager->process();
404 auto purgeTime = GrStdSteadyClock::now() - msNotUsed;
405
406 fResourceCache->purgeAsNeeded();
407 fResourceCache->purgeResourcesNotUsedSince(purgeTime, scratchResourcesOnly);
408
409 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
410 // place to purge stale blobs
411 this->getTextBlobCache()->purgeStaleBlobs();
412 }
413
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)414 void GrDirectContext::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
415 ASSERT_SINGLE_OWNER
416
417 if (this->abandoned()) {
418 return;
419 }
420
421 fResourceCache->purgeUnlockedResources(bytesToPurge, preferScratchResources);
422 }
423
424 ////////////////////////////////////////////////////////////////////////////////
wait(int numSemaphores,const GrBackendSemaphore waitSemaphores[],bool deleteSemaphoresAfterWait)425 bool GrDirectContext::wait(int numSemaphores, const GrBackendSemaphore waitSemaphores[],
426 bool deleteSemaphoresAfterWait) {
427 if (!fGpu || !fGpu->caps()->semaphoreSupport()) {
428 return false;
429 }
430 GrWrapOwnership ownership =
431 deleteSemaphoresAfterWait ? kAdopt_GrWrapOwnership : kBorrow_GrWrapOwnership;
432 for (int i = 0; i < numSemaphores; ++i) {
433 std::unique_ptr<GrSemaphore> sema = fResourceProvider->wrapBackendSemaphore(
434 waitSemaphores[i], GrSemaphoreWrapType::kWillWait, ownership);
435 // If we failed to wrap the semaphore it means the client didn't give us a valid semaphore
436 // to begin with. Therefore, it is fine to not wait on it.
437 if (sema) {
438 fGpu->waitSemaphore(sema.get());
439 }
440 }
441 return true;
442 }
443
onGetSmallPathAtlasMgr()444 skgpu::v1::SmallPathAtlasMgr* GrDirectContext::onGetSmallPathAtlasMgr() {
445 #if SK_GPU_V1
446 if (!fSmallPathAtlasMgr) {
447 fSmallPathAtlasMgr = std::make_unique<skgpu::v1::SmallPathAtlasMgr>();
448
449 this->priv().addOnFlushCallbackObject(fSmallPathAtlasMgr.get());
450 }
451
452 if (!fSmallPathAtlasMgr->initAtlas(this->proxyProvider(), this->caps())) {
453 return nullptr;
454 }
455 #endif
456
457 return fSmallPathAtlasMgr.get();
458 }
459
460 ////////////////////////////////////////////////////////////////////////////////
461
flush(const GrFlushInfo & info)462 GrSemaphoresSubmitted GrDirectContext::flush(const GrFlushInfo& info) {
463 ASSERT_SINGLE_OWNER
464 if (this->abandoned()) {
465 if (info.fFinishedProc) {
466 info.fFinishedProc(info.fFinishedContext);
467 }
468 if (info.fSubmittedProc) {
469 info.fSubmittedProc(info.fSubmittedContext, false);
470 }
471 return GrSemaphoresSubmitted::kNo;
472 }
473
474 return this->drawingManager()->flushSurfaces({}, SkSurface::BackendSurfaceAccess::kNoAccess,
475 info, nullptr);
476 }
477
submit(bool syncCpu)478 bool GrDirectContext::submit(bool syncCpu) {
479 ASSERT_SINGLE_OWNER
480 if (this->abandoned()) {
481 return false;
482 }
483
484 if (!fGpu) {
485 return false;
486 }
487
488 return fGpu->submitToGpu(syncCpu);
489 }
490
491 ////////////////////////////////////////////////////////////////////////////////
492
checkAsyncWorkCompletion()493 void GrDirectContext::checkAsyncWorkCompletion() {
494 if (fGpu) {
495 fGpu->checkFinishProcs();
496 }
497 }
498
syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned)499 void GrDirectContext::syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned) {
500 if (fGpu && (!this->abandoned() || shouldExecuteWhileAbandoned)) {
501 fGpu->finishOutstandingGpuWork();
502 this->checkAsyncWorkCompletion();
503 }
504 }
505
506 ////////////////////////////////////////////////////////////////////////////////
507
storeVkPipelineCacheData()508 void GrDirectContext::storeVkPipelineCacheData() {
509 if (fGpu) {
510 fGpu->storeVkPipelineCacheData();
511 }
512 }
513
514 ////////////////////////////////////////////////////////////////////////////////
515
supportsDistanceFieldText() const516 bool GrDirectContext::supportsDistanceFieldText() const {
517 return this->caps()->shaderCaps()->supportsDistanceFieldText();
518 }
519
520 //////////////////////////////////////////////////////////////////////////////
521
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const522 void GrDirectContext::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
523 ASSERT_SINGLE_OWNER
524 fResourceCache->dumpMemoryStatistics(traceMemoryDump);
525 traceMemoryDump->dumpNumericValue("skia/gr_text_blob_cache", "size", "bytes",
526 this->getTextBlobCache()->usedBytes());
527 }
528
dumpMemoryStatisticsByTag(SkTraceMemoryDump * traceMemoryDump,const GrGpuResourceTag & tag) const529 void GrDirectContext::dumpMemoryStatisticsByTag(SkTraceMemoryDump* traceMemoryDump, const GrGpuResourceTag& tag) const {
530 ASSERT_SINGLE_OWNER
531 fResourceCache->dumpMemoryStatistics(traceMemoryDump, tag);
532 traceMemoryDump->dumpNumericValue("skia/gr_text_blob_cache", "size", "bytes",
533 this->getTextBlobCache()->usedBytes());
534 }
535
setCurrentGrResourceTag(const GrGpuResourceTag & tag)536 void GrDirectContext::setCurrentGrResourceTag(const GrGpuResourceTag& tag) {
537 if (fResourceCache) {
538 return fResourceCache->setCurrentGrResourceTag(tag);
539 }
540 }
541
popGrResourceTag()542 void GrDirectContext::popGrResourceTag()
543 {
544 if (fResourceCache) {
545 return fResourceCache->popGrResourceTag();
546 }
547 }
548
getCurrentGrResourceTag() const549 GrGpuResourceTag GrDirectContext::getCurrentGrResourceTag() const {
550 if (fResourceCache) {
551 return fResourceCache->getCurrentGrResourceTag();
552 }
553 return {};
554 }
releaseByTag(const GrGpuResourceTag & tag)555 void GrDirectContext::releaseByTag(const GrGpuResourceTag& tag) {
556 if (fResourceCache) {
557 fResourceCache->releaseByTag(tag);
558 }
559 }
getAllGrGpuResourceTags() const560 std::set<GrGpuResourceTag> GrDirectContext::getAllGrGpuResourceTags() const {
561 if (fResourceCache) {
562 return fResourceCache->getAllGrGpuResourceTags();
563 }
564 return {};
565 }
566
567 // OH ISSUE: get the memory information of the updated pid.
getUpdatedMemoryMap(std::unordered_map<int32_t,size_t> & out)568 void GrDirectContext::getUpdatedMemoryMap(std::unordered_map<int32_t, size_t> &out)
569 {
570 if (fResourceCache) {
571 fResourceCache->getUpdatedMemoryMap(out);
572 }
573 }
574
575 // OH ISSUE: init gpu memory limit.
initGpuMemoryLimit(MemoryOverflowCalllback callback,uint64_t size)576 void GrDirectContext::initGpuMemoryLimit(MemoryOverflowCalllback callback, uint64_t size)
577 {
578 if (fResourceCache) {
579 fResourceCache->initGpuMemoryLimit(callback, size);
580 }
581 }
582
583 // OH ISSUE: check whether the PID is abnormal.
isPidAbnormal() const584 bool GrDirectContext::isPidAbnormal() const
585 {
586 if (fResourceCache) {
587 return fResourceCache->isPidAbnormal();
588 }
589 return false;
590 }
591
vmaDefragment()592 void GrDirectContext::vmaDefragment()
593 {
594 if (fGpu) {
595 fGpu->vmaDefragment();
596 }
597 }
598
dumpVmaStats(SkString * out)599 void GrDirectContext::dumpVmaStats(SkString *out)
600 {
601 if (out == nullptr) {
602 return;
603 }
604 if (fGpu) {
605 fGpu->dumpVmaStats(out);
606 }
607 }
608
609 // OH ISSUE: intra frame and inter frame identification
beginFrame()610 void GrDirectContext::beginFrame()
611 {
612 #ifdef SK_VULKAN
613 if (fResourceCache) {
614 fResourceCache->beginFrame();
615 }
616 #endif
617 }
618
619 // OH ISSUE: intra frame and inter frame identification
endFrame()620 void GrDirectContext::endFrame()
621 {
622 #ifdef SK_VULKAN
623 if (fResourceCache) {
624 fResourceCache->endFrame();
625 }
626 #endif
627 }
628
629 // OH ISSUE: asyn memory reclaimer
setGpuMemoryAsyncReclaimerSwitch(bool enabled,const std::function<void ()> & setThreadPriority)630 void GrDirectContext::setGpuMemoryAsyncReclaimerSwitch(bool enabled, const std::function<void()>& setThreadPriority)
631 {
632 #ifdef SK_VULKAN
633 if (fGpu) {
634 fGpu->setGpuMemoryAsyncReclaimerSwitch(enabled, setThreadPriority);
635 }
636 #endif
637 }
638
639 // OH ISSUE: asyn memory reclaimer
flushGpuMemoryInWaitQueue()640 void GrDirectContext::flushGpuMemoryInWaitQueue()
641 {
642 #ifdef SK_VULKAN
643 if (fGpu) {
644 fGpu->flushGpuMemoryInWaitQueue();
645 }
646 #endif
647 }
648
649 // OH ISSUE: suppress release window
setGpuCacheSuppressWindowSwitch(bool enabled)650 void GrDirectContext::setGpuCacheSuppressWindowSwitch(bool enabled)
651 {
652 #ifdef SK_VULKAN
653 ASSERT_SINGLE_OWNER
654
655 if (this->abandoned()) {
656 return;
657 }
658
659 fResourceCache->setGpuCacheSuppressWindowSwitch(enabled);
660 #endif
661 }
662
663 // OH ISSUE: suppress release window
suppressGpuCacheBelowCertainRatio(const std::function<bool (void)> & nextFrameHasArrived)664 void GrDirectContext::suppressGpuCacheBelowCertainRatio(const std::function<bool(void)>& nextFrameHasArrived)
665 {
666 #ifdef SK_VULKAN
667 ASSERT_SINGLE_OWNER
668
669 if (this->abandoned()) {
670 return;
671 }
672
673 fResourceCache->suppressGpuCacheBelowCertainRatio(nextFrameHasArrived);
674 #endif
675 }
676
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected)677 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
678 const GrBackendFormat& backendFormat,
679 GrMipmapped mipMapped,
680 GrRenderable renderable,
681 GrProtected isProtected) {
682 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
683 if (this->abandoned()) {
684 return GrBackendTexture();
685 }
686
687 return fGpu->createBackendTexture({width, height}, backendFormat, renderable,
688 mipMapped, isProtected);
689 }
690
createBackendTexture(int width,int height,SkColorType skColorType,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected)691 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
692 SkColorType skColorType,
693 GrMipmapped mipMapped,
694 GrRenderable renderable,
695 GrProtected isProtected) {
696 if (this->abandoned()) {
697 return GrBackendTexture();
698 }
699
700 const GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
701
702 return this->createBackendTexture(width, height, format, mipMapped, renderable, isProtected);
703 }
704
create_and_clear_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,sk_sp<GrRefCntedCallback> finishedCallback,std::array<float,4> color)705 static GrBackendTexture create_and_clear_backend_texture(GrDirectContext* dContext,
706 SkISize dimensions,
707 const GrBackendFormat& backendFormat,
708 GrMipmapped mipMapped,
709 GrRenderable renderable,
710 GrProtected isProtected,
711 sk_sp<GrRefCntedCallback> finishedCallback,
712 std::array<float, 4> color) {
713 GrGpu* gpu = dContext->priv().getGpu();
714 GrBackendTexture beTex = gpu->createBackendTexture(dimensions, backendFormat, renderable,
715 mipMapped, isProtected);
716 if (!beTex.isValid()) {
717 return {};
718 }
719
720 if (!dContext->priv().getGpu()->clearBackendTexture(beTex,
721 std::move(finishedCallback),
722 color)) {
723 dContext->deleteBackendTexture(beTex);
724 return {};
725 }
726 return beTex;
727 }
728
update_texture_with_pixmaps(GrDirectContext * context,const SkPixmap src[],int numLevels,const GrBackendTexture & backendTexture,GrSurfaceOrigin textureOrigin,sk_sp<GrRefCntedCallback> finishedCallback)729 static bool update_texture_with_pixmaps(GrDirectContext* context,
730 const SkPixmap src[],
731 int numLevels,
732 const GrBackendTexture& backendTexture,
733 GrSurfaceOrigin textureOrigin,
734 sk_sp<GrRefCntedCallback> finishedCallback) {
735 GrColorType ct = SkColorTypeToGrColorType(src[0].colorType());
736 const GrBackendFormat& format = backendTexture.getBackendFormat();
737
738 if (!context->priv().caps()->areColorTypeAndFormatCompatible(ct, format)) {
739 return false;
740 }
741
742 auto proxy = context->priv().proxyProvider()->wrapBackendTexture(backendTexture,
743 kBorrow_GrWrapOwnership,
744 GrWrapCacheable::kNo,
745 kRW_GrIOType,
746 std::move(finishedCallback));
747 if (!proxy) {
748 return false;
749 }
750
751 GrSwizzle swizzle = context->priv().caps()->getReadSwizzle(format, ct);
752 GrSurfaceProxyView view(std::move(proxy), textureOrigin, swizzle);
753 skgpu::SurfaceContext surfaceContext(context, std::move(view), src[0].info().colorInfo());
754 SkAutoSTArray<15, GrCPixmap> tmpSrc(numLevels);
755 for (int i = 0; i < numLevels; ++i) {
756 tmpSrc[i] = src[i];
757 }
758 if (!surfaceContext.writePixels(context, tmpSrc.get(), numLevels)) {
759 return false;
760 }
761
762 GrSurfaceProxy* p = surfaceContext.asSurfaceProxy();
763 GrFlushInfo info;
764 context->priv().drawingManager()->flushSurfaces({&p, 1},
765 SkSurface::BackendSurfaceAccess::kNoAccess,
766 info,
767 nullptr);
768 return true;
769 }
770
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)771 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
772 const GrBackendFormat& backendFormat,
773 const SkColor4f& color,
774 GrMipmapped mipMapped,
775 GrRenderable renderable,
776 GrProtected isProtected,
777 GrGpuFinishedProc finishedProc,
778 GrGpuFinishedContext finishedContext) {
779 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
780
781 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
782 if (this->abandoned()) {
783 return {};
784 }
785
786 return create_and_clear_backend_texture(this,
787 {width, height},
788 backendFormat,
789 mipMapped,
790 renderable,
791 isProtected,
792 std::move(finishedCallback),
793 color.array());
794 }
795
createBackendTexture(int width,int height,SkColorType skColorType,const SkColor4f & color,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)796 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
797 SkColorType skColorType,
798 const SkColor4f& color,
799 GrMipmapped mipMapped,
800 GrRenderable renderable,
801 GrProtected isProtected,
802 GrGpuFinishedProc finishedProc,
803 GrGpuFinishedContext finishedContext) {
804 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
805
806 if (this->abandoned()) {
807 return {};
808 }
809
810 GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
811 if (!format.isValid()) {
812 return {};
813 }
814
815 GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
816 SkColor4f swizzledColor = this->caps()->getWriteSwizzle(format, grColorType).applyTo(color);
817
818 return create_and_clear_backend_texture(this,
819 {width, height},
820 format,
821 mipMapped,
822 renderable,
823 isProtected,
824 std::move(finishedCallback),
825 swizzledColor.array());
826 }
827
createBackendTexture(const SkPixmap srcData[],int numProvidedLevels,GrSurfaceOrigin textureOrigin,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)828 GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap srcData[],
829 int numProvidedLevels,
830 GrSurfaceOrigin textureOrigin,
831 GrRenderable renderable,
832 GrProtected isProtected,
833 GrGpuFinishedProc finishedProc,
834 GrGpuFinishedContext finishedContext) {
835 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
836
837 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
838
839 if (this->abandoned()) {
840 return {};
841 }
842
843 if (!srcData || numProvidedLevels <= 0) {
844 return {};
845 }
846
847 SkColorType colorType = srcData[0].colorType();
848
849 GrMipmapped mipMapped = GrMipmapped::kNo;
850 if (numProvidedLevels > 1) {
851 mipMapped = GrMipmapped::kYes;
852 }
853
854 GrBackendFormat backendFormat = this->defaultBackendFormat(colorType, renderable);
855 GrBackendTexture beTex = this->createBackendTexture(srcData[0].width(),
856 srcData[0].height(),
857 backendFormat,
858 mipMapped,
859 renderable,
860 isProtected);
861 if (!beTex.isValid()) {
862 return {};
863 }
864 if (!update_texture_with_pixmaps(this,
865 srcData,
866 numProvidedLevels,
867 beTex,
868 textureOrigin,
869 std::move(finishedCallback))) {
870 this->deleteBackendTexture(beTex);
871 return {};
872 }
873 return beTex;
874 }
875
updateBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)876 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
877 const SkColor4f& color,
878 GrGpuFinishedProc finishedProc,
879 GrGpuFinishedContext finishedContext) {
880 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
881
882 if (this->abandoned()) {
883 return false;
884 }
885
886 return fGpu->clearBackendTexture(backendTexture, std::move(finishedCallback), color.array());
887 }
888
updateBackendTexture(const GrBackendTexture & backendTexture,SkColorType skColorType,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)889 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
890 SkColorType skColorType,
891 const SkColor4f& color,
892 GrGpuFinishedProc finishedProc,
893 GrGpuFinishedContext finishedContext) {
894 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
895
896 if (this->abandoned()) {
897 return false;
898 }
899
900 GrBackendFormat format = backendTexture.getBackendFormat();
901 GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
902
903 if (!this->caps()->areColorTypeAndFormatCompatible(grColorType, format)) {
904 return false;
905 }
906
907 GrSwizzle swizzle = this->caps()->getWriteSwizzle(format, grColorType);
908 SkColor4f swizzledColor = swizzle.applyTo(color);
909
910 return fGpu->clearBackendTexture(backendTexture,
911 std::move(finishedCallback),
912 swizzledColor.array());
913 }
914
updateBackendTexture(const GrBackendTexture & backendTexture,const SkPixmap srcData[],int numLevels,GrSurfaceOrigin textureOrigin,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)915 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
916 const SkPixmap srcData[],
917 int numLevels,
918 GrSurfaceOrigin textureOrigin,
919 GrGpuFinishedProc finishedProc,
920 GrGpuFinishedContext finishedContext) {
921 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
922
923 if (this->abandoned()) {
924 return false;
925 }
926
927 if (!srcData || numLevels <= 0) {
928 return false;
929 }
930
931 // If the texture has MIP levels then we require that the full set is overwritten.
932 int numExpectedLevels = 1;
933 if (backendTexture.hasMipmaps()) {
934 numExpectedLevels = SkMipmap::ComputeLevelCount(backendTexture.width(),
935 backendTexture.height()) + 1;
936 }
937 if (numLevels != numExpectedLevels) {
938 return false;
939 }
940 return update_texture_with_pixmaps(this,
941 srcData,
942 numLevels,
943 backendTexture,
944 textureOrigin,
945 std::move(finishedCallback));
946 }
947
948 //////////////////////////////////////////////////////////////////////////////
949
create_and_update_compressed_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrProtected isProtected,sk_sp<GrRefCntedCallback> finishedCallback,const void * data,size_t size)950 static GrBackendTexture create_and_update_compressed_backend_texture(
951 GrDirectContext* dContext,
952 SkISize dimensions,
953 const GrBackendFormat& backendFormat,
954 GrMipmapped mipMapped,
955 GrProtected isProtected,
956 sk_sp<GrRefCntedCallback> finishedCallback,
957 const void* data,
958 size_t size) {
959 GrGpu* gpu = dContext->priv().getGpu();
960
961 GrBackendTexture beTex = gpu->createCompressedBackendTexture(dimensions, backendFormat,
962 mipMapped, isProtected);
963 if (!beTex.isValid()) {
964 return {};
965 }
966
967 if (!dContext->priv().getGpu()->updateCompressedBackendTexture(
968 beTex, std::move(finishedCallback), data, size)) {
969 dContext->deleteBackendTexture(beTex);
970 return {};
971 }
972 return beTex;
973 }
974
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,GrMipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)975 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
976 int width, int height,
977 const GrBackendFormat& backendFormat,
978 const SkColor4f& color,
979 GrMipmapped mipmapped,
980 GrProtected isProtected,
981 GrGpuFinishedProc finishedProc,
982 GrGpuFinishedContext finishedContext) {
983 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
984 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
985
986 if (this->abandoned()) {
987 return {};
988 }
989
990 SkImage::CompressionType compression = GrBackendFormatToCompressionType(backendFormat);
991 if (compression == SkImage::CompressionType::kNone) {
992 return {};
993 }
994
995 size_t size = SkCompressedDataSize(compression,
996 {width, height},
997 nullptr,
998 mipmapped == GrMipmapped::kYes);
999 auto storage = std::make_unique<char[]>(size);
1000 GrFillInCompressedData(compression, {width, height}, mipmapped, storage.get(), color);
1001 return create_and_update_compressed_backend_texture(this,
1002 {width, height},
1003 backendFormat,
1004 mipmapped,
1005 isProtected,
1006 std::move(finishedCallback),
1007 storage.get(),
1008 size);
1009 }
1010
createCompressedBackendTexture(int width,int height,SkImage::CompressionType compression,const SkColor4f & color,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1011 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
1012 int width, int height,
1013 SkImage::CompressionType compression,
1014 const SkColor4f& color,
1015 GrMipmapped mipMapped,
1016 GrProtected isProtected,
1017 GrGpuFinishedProc finishedProc,
1018 GrGpuFinishedContext finishedContext) {
1019 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1020 GrBackendFormat format = this->compressedBackendFormat(compression);
1021 return this->createCompressedBackendTexture(width, height, format, color,
1022 mipMapped, isProtected, finishedProc,
1023 finishedContext);
1024 }
1025
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const void * compressedData,size_t dataSize,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1026 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
1027 int width, int height,
1028 const GrBackendFormat& backendFormat,
1029 const void* compressedData,
1030 size_t dataSize,
1031 GrMipmapped mipMapped,
1032 GrProtected isProtected,
1033 GrGpuFinishedProc finishedProc,
1034 GrGpuFinishedContext finishedContext) {
1035 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1036 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1037
1038 if (this->abandoned()) {
1039 return {};
1040 }
1041
1042 return create_and_update_compressed_backend_texture(this,
1043 {width, height},
1044 backendFormat,
1045 mipMapped,
1046 isProtected,
1047 std::move(finishedCallback),
1048 compressedData,
1049 dataSize);
1050 }
1051
createCompressedBackendTexture(int width,int height,SkImage::CompressionType compression,const void * data,size_t dataSize,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1052 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
1053 int width, int height,
1054 SkImage::CompressionType compression,
1055 const void* data, size_t dataSize,
1056 GrMipmapped mipMapped,
1057 GrProtected isProtected,
1058 GrGpuFinishedProc finishedProc,
1059 GrGpuFinishedContext finishedContext) {
1060 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1061 GrBackendFormat format = this->compressedBackendFormat(compression);
1062 return this->createCompressedBackendTexture(width, height, format, data, dataSize, mipMapped,
1063 isProtected, finishedProc, finishedContext);
1064 }
1065
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1066 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1067 const SkColor4f& color,
1068 GrGpuFinishedProc finishedProc,
1069 GrGpuFinishedContext finishedContext) {
1070 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1071
1072 if (this->abandoned()) {
1073 return false;
1074 }
1075
1076 SkImage::CompressionType compression =
1077 GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
1078 if (compression == SkImage::CompressionType::kNone) {
1079 return {};
1080 }
1081 size_t size = SkCompressedDataSize(compression,
1082 backendTexture.dimensions(),
1083 nullptr,
1084 backendTexture.hasMipmaps());
1085 SkAutoMalloc storage(size);
1086 GrFillInCompressedData(compression,
1087 backendTexture.dimensions(),
1088 backendTexture.mipmapped(),
1089 static_cast<char*>(storage.get()),
1090 color);
1091 return fGpu->updateCompressedBackendTexture(backendTexture,
1092 std::move(finishedCallback),
1093 storage.get(),
1094 size);
1095 }
1096
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const void * compressedData,size_t dataSize,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1097 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1098 const void* compressedData,
1099 size_t dataSize,
1100 GrGpuFinishedProc finishedProc,
1101 GrGpuFinishedContext finishedContext) {
1102 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1103
1104 if (this->abandoned()) {
1105 return false;
1106 }
1107
1108 if (!compressedData) {
1109 return false;
1110 }
1111
1112 return fGpu->updateCompressedBackendTexture(backendTexture,
1113 std::move(finishedCallback),
1114 compressedData,
1115 dataSize);
1116 }
1117
1118 //////////////////////////////////////////////////////////////////////////////
1119
setBackendTextureState(const GrBackendTexture & backendTexture,const GrBackendSurfaceMutableState & state,GrBackendSurfaceMutableState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1120 bool GrDirectContext::setBackendTextureState(const GrBackendTexture& backendTexture,
1121 const GrBackendSurfaceMutableState& state,
1122 GrBackendSurfaceMutableState* previousState,
1123 GrGpuFinishedProc finishedProc,
1124 GrGpuFinishedContext finishedContext) {
1125 auto callback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1126
1127 if (this->abandoned()) {
1128 return false;
1129 }
1130
1131 return fGpu->setBackendTextureState(backendTexture, state, previousState, std::move(callback));
1132 }
1133
1134
setBackendRenderTargetState(const GrBackendRenderTarget & backendRenderTarget,const GrBackendSurfaceMutableState & state,GrBackendSurfaceMutableState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1135 bool GrDirectContext::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
1136 const GrBackendSurfaceMutableState& state,
1137 GrBackendSurfaceMutableState* previousState,
1138 GrGpuFinishedProc finishedProc,
1139 GrGpuFinishedContext finishedContext) {
1140 auto callback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1141
1142 if (this->abandoned()) {
1143 return false;
1144 }
1145
1146 return fGpu->setBackendRenderTargetState(backendRenderTarget, state, previousState,
1147 std::move(callback));
1148 }
1149
deleteBackendTexture(GrBackendTexture backendTex)1150 void GrDirectContext::deleteBackendTexture(GrBackendTexture backendTex) {
1151 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1152 // For the Vulkan backend we still must destroy the backend texture when the context is
1153 // abandoned.
1154 if ((this->abandoned() && this->backend() != GrBackendApi::kVulkan) || !backendTex.isValid()) {
1155 return;
1156 }
1157
1158 fGpu->deleteBackendTexture(backendTex);
1159 }
1160
1161 //////////////////////////////////////////////////////////////////////////////
1162
precompileShader(const SkData & key,const SkData & data)1163 bool GrDirectContext::precompileShader(const SkData& key, const SkData& data) {
1164 return fGpu->precompileShader(key, data);
1165 }
1166
1167 #ifdef SK_ENABLE_DUMP_GPU
1168 #include "include/core/SkString.h"
1169 #include "src/utils/SkJSONWriter.h"
dump() const1170 SkString GrDirectContext::dump() const {
1171 SkDynamicMemoryWStream stream;
1172 SkJSONWriter writer(&stream, SkJSONWriter::Mode::kPretty);
1173 writer.beginObject();
1174
1175 writer.appendString("backend", GrBackendApiToStr(this->backend()));
1176
1177 writer.appendName("caps");
1178 this->caps()->dumpJSON(&writer);
1179
1180 writer.appendName("gpu");
1181 this->fGpu->dumpJSON(&writer);
1182
1183 writer.appendName("context");
1184 this->dumpJSON(&writer);
1185
1186 // Flush JSON to the memory stream
1187 writer.endObject();
1188 writer.flush();
1189
1190 // Null terminate the JSON data in the memory stream
1191 stream.write8(0);
1192
1193 // Allocate a string big enough to hold all the data, then copy out of the stream
1194 SkString result(stream.bytesWritten());
1195 stream.copyToAndReset(result.writable_str());
1196 return result;
1197 }
1198 #endif
1199
1200 #ifdef SK_GL
1201
1202 /*************************************************************************************************/
MakeGL(sk_sp<const GrGLInterface> glInterface)1203 sk_sp<GrDirectContext> GrDirectContext::MakeGL(sk_sp<const GrGLInterface> glInterface) {
1204 GrContextOptions defaultOptions;
1205 return MakeGL(std::move(glInterface), defaultOptions);
1206 }
1207
MakeGL(const GrContextOptions & options)1208 sk_sp<GrDirectContext> GrDirectContext::MakeGL(const GrContextOptions& options) {
1209 return MakeGL(nullptr, options);
1210 }
1211
MakeGL()1212 sk_sp<GrDirectContext> GrDirectContext::MakeGL() {
1213 GrContextOptions defaultOptions;
1214 return MakeGL(nullptr, defaultOptions);
1215 }
1216
1217 #if GR_TEST_UTILS
make_get_error_with_random_oom(GrGLFunction<GrGLGetErrorFn> original)1218 GrGLFunction<GrGLGetErrorFn> make_get_error_with_random_oom(GrGLFunction<GrGLGetErrorFn> original) {
1219 // A SkRandom and a GrGLFunction<GrGLGetErrorFn> are too big to be captured by a
1220 // GrGLFunction<GrGLGetError> (surprise, surprise). So we make a context object and
1221 // capture that by pointer. However, GrGLFunction doesn't support calling a destructor
1222 // on the thing it captures. So we leak the context.
1223 struct GetErrorContext {
1224 SkRandom fRandom;
1225 GrGLFunction<GrGLGetErrorFn> fGetError;
1226 };
1227
1228 auto errorContext = new GetErrorContext;
1229
1230 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
1231 __lsan_ignore_object(errorContext);
1232 #endif
1233
1234 errorContext->fGetError = original;
1235
1236 return GrGLFunction<GrGLGetErrorFn>([errorContext]() {
1237 GrGLenum error = errorContext->fGetError();
1238 if (error == GR_GL_NO_ERROR && (errorContext->fRandom.nextU() % 300) == 0) {
1239 error = GR_GL_OUT_OF_MEMORY;
1240 }
1241 return error;
1242 });
1243 }
1244 #endif
1245
MakeGL(sk_sp<const GrGLInterface> glInterface,const GrContextOptions & options)1246 sk_sp<GrDirectContext> GrDirectContext::MakeGL(sk_sp<const GrGLInterface> glInterface,
1247 const GrContextOptions& options) {
1248 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kOpenGL, options));
1249 #if GR_TEST_UTILS
1250 if (options.fRandomGLOOM) {
1251 auto copy = sk_make_sp<GrGLInterface>(*glInterface);
1252 copy->fFunctions.fGetError =
1253 make_get_error_with_random_oom(glInterface->fFunctions.fGetError);
1254 #if GR_GL_CHECK_ERROR
1255 // Suppress logging GL errors since we'll be synthetically generating them.
1256 copy->suppressErrorLogging();
1257 #endif
1258 glInterface = std::move(copy);
1259 }
1260 #endif
1261 direct->fGpu = GrGLGpu::Make(std::move(glInterface), options, direct.get());
1262 if (!direct->init()) {
1263 return nullptr;
1264 }
1265 return direct;
1266 }
1267 #endif
1268
1269 /*************************************************************************************************/
MakeMock(const GrMockOptions * mockOptions)1270 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions) {
1271 GrContextOptions defaultOptions;
1272 return MakeMock(mockOptions, defaultOptions);
1273 }
1274
MakeMock(const GrMockOptions * mockOptions,const GrContextOptions & options)1275 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions,
1276 const GrContextOptions& options) {
1277 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMock, options));
1278
1279 direct->fGpu = GrMockGpu::Make(mockOptions, options, direct.get());
1280 if (!direct->init()) {
1281 return nullptr;
1282 }
1283
1284 return direct;
1285 }
1286
1287 #ifdef SK_VULKAN
1288 /*************************************************************************************************/
MakeVulkan(const GrVkBackendContext & backendContext)1289 sk_sp<GrDirectContext> GrDirectContext::MakeVulkan(const GrVkBackendContext& backendContext) {
1290 GrContextOptions defaultOptions;
1291 return MakeVulkan(backendContext, defaultOptions);
1292 }
1293
MakeVulkan(const GrVkBackendContext & backendContext,const GrContextOptions & options)1294 sk_sp<GrDirectContext> GrDirectContext::MakeVulkan(const GrVkBackendContext& backendContext,
1295 const GrContextOptions& options) {
1296 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kVulkan, options));
1297
1298 direct->fGpu = GrVkGpu::Make(backendContext, options, direct.get());
1299 if (!direct->init()) {
1300 return nullptr;
1301 }
1302
1303 return direct;
1304 }
1305 #endif
1306
1307 #ifdef SK_METAL
1308 /*************************************************************************************************/
MakeMetal(const GrMtlBackendContext & backendContext)1309 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(const GrMtlBackendContext& backendContext) {
1310 GrContextOptions defaultOptions;
1311 return MakeMetal(backendContext, defaultOptions);
1312 }
1313
MakeMetal(const GrMtlBackendContext & backendContext,const GrContextOptions & options)1314 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(const GrMtlBackendContext& backendContext,
1315 const GrContextOptions& options) {
1316 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMetal, options));
1317
1318 direct->fGpu = GrMtlTrampoline::MakeGpu(backendContext, options, direct.get());
1319 if (!direct->init()) {
1320 return nullptr;
1321 }
1322
1323 return direct;
1324 }
1325
1326 // deprecated
MakeMetal(void * device,void * queue)1327 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(void* device, void* queue) {
1328 GrContextOptions defaultOptions;
1329 return MakeMetal(device, queue, defaultOptions);
1330 }
1331
1332 // deprecated
1333 // remove include/gpu/mtl/GrMtlBackendContext.h, above, when removed
MakeMetal(void * device,void * queue,const GrContextOptions & options)1334 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(void* device, void* queue,
1335 const GrContextOptions& options) {
1336 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMetal, options));
1337 GrMtlBackendContext backendContext = {};
1338 backendContext.fDevice.reset(device);
1339 backendContext.fQueue.reset(queue);
1340
1341 return GrDirectContext::MakeMetal(backendContext, options);
1342 }
1343 #endif
1344
1345 #ifdef SK_DIRECT3D
1346 /*************************************************************************************************/
MakeDirect3D(const GrD3DBackendContext & backendContext)1347 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext) {
1348 GrContextOptions defaultOptions;
1349 return MakeDirect3D(backendContext, defaultOptions);
1350 }
1351
MakeDirect3D(const GrD3DBackendContext & backendContext,const GrContextOptions & options)1352 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext,
1353 const GrContextOptions& options) {
1354 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kDirect3D, options));
1355
1356 direct->fGpu = GrD3DGpu::Make(backendContext, options, direct.get());
1357 if (!direct->init()) {
1358 return nullptr;
1359 }
1360
1361 return direct;
1362 }
1363 #endif
1364
1365 #ifdef SK_DAWN
1366 /*************************************************************************************************/
MakeDawn(const wgpu::Device & device)1367 sk_sp<GrDirectContext> GrDirectContext::MakeDawn(const wgpu::Device& device) {
1368 GrContextOptions defaultOptions;
1369 return MakeDawn(device, defaultOptions);
1370 }
1371
MakeDawn(const wgpu::Device & device,const GrContextOptions & options)1372 sk_sp<GrDirectContext> GrDirectContext::MakeDawn(const wgpu::Device& device,
1373 const GrContextOptions& options) {
1374 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kDawn, options));
1375
1376 direct->fGpu = GrDawnGpu::Make(device, options, direct.get());
1377 if (!direct->init()) {
1378 return nullptr;
1379 }
1380
1381 return direct;
1382 }
1383
1384 #endif
1385