1 /*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8
9 #include "include/gpu/GrDirectContext.h"
10
11 #include "include/core/SkTraceMemoryDump.h"
12 #include "include/gpu/GrBackendSemaphore.h"
13 #include "include/gpu/GrContextThreadSafeProxy.h"
14 #include "src/core/SkAutoMalloc.h"
15 #include "src/core/SkTaskGroup.h"
16 #include "src/core/SkTraceEvent.h"
17 #include "src/gpu/GrBackendUtils.h"
18 #include "src/gpu/GrClientMappedBufferManager.h"
19 #include "src/gpu/GrContextThreadSafeProxyPriv.h"
20 #include "src/gpu/GrDirectContextPriv.h"
21 #include "src/gpu/GrDrawingManager.h"
22 #include "src/gpu/GrGpu.h"
23 #include "src/gpu/GrResourceProvider.h"
24 #include "src/gpu/GrSemaphore.h"
25 #include "src/gpu/GrShaderUtils.h"
26 #include "src/gpu/GrThreadSafePipelineBuilder.h"
27 #include "src/gpu/SurfaceContext.h"
28 #include "src/gpu/effects/GrSkSLFP.h"
29 #include "src/gpu/mock/GrMockGpu.h"
30 #include "src/gpu/text/GrAtlasManager.h"
31 #include "src/gpu/text/GrStrikeCache.h"
32 #include "src/image/SkImage_GpuBase.h"
33 #if SK_GPU_V1
34 #include "src/gpu/ops/SmallPathAtlasMgr.h"
35 #else
36 // A vestigial definition for v2 that will never be instantiated
37 namespace skgpu::v1 {
38 class SmallPathAtlasMgr {
39 public:
SmallPathAtlasMgr()40 SmallPathAtlasMgr() { SkASSERT(0); }
reset()41 void reset() { SkASSERT(0); }
42 };
43 }
44 #endif
45 #ifdef SK_GL
46 #include "src/gpu/gl/GrGLGpu.h"
47 #endif
48 #ifdef SK_METAL
49 #include "include/gpu/mtl/GrMtlBackendContext.h"
50 #include "src/gpu/mtl/GrMtlTrampoline.h"
51 #endif
52 #ifdef SK_VULKAN
53 #include "src/gpu/vk/GrVkGpu.h"
54 #endif
55 #ifdef SK_DIRECT3D
56 #include "src/gpu/d3d/GrD3DGpu.h"
57 #endif
58 #ifdef SK_DAWN
59 #include "src/gpu/dawn/GrDawnGpu.h"
60 #endif
61 #include <memory>
62
63 #if GR_TEST_UTILS
64 # include "include/utils/SkRandom.h"
65 # if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
66 # include <sanitizer/lsan_interface.h>
67 # endif
68 #endif
69
70 #define ASSERT_SINGLE_OWNER GR_ASSERT_SINGLE_OWNER(this->singleOwner())
71
Next()72 GrDirectContext::DirectContextID GrDirectContext::DirectContextID::Next() {
73 static std::atomic<uint32_t> nextID{1};
74 uint32_t id;
75 do {
76 id = nextID.fetch_add(1, std::memory_order_relaxed);
77 } while (id == SK_InvalidUniqueID);
78 return DirectContextID(id);
79 }
80
GrDirectContext(GrBackendApi backend,const GrContextOptions & options)81 GrDirectContext::GrDirectContext(GrBackendApi backend, const GrContextOptions& options)
82 : INHERITED(GrContextThreadSafeProxyPriv::Make(backend, options), false)
83 , fDirectContextID(DirectContextID::Next()) {
84 }
85
~GrDirectContext()86 GrDirectContext::~GrDirectContext() {
87 ASSERT_SINGLE_OWNER
88 // this if-test protects against the case where the context is being destroyed
89 // before having been fully created
90 if (fGpu) {
91 this->flushAndSubmit();
92 }
93
94 // We need to make sure all work is finished on the gpu before we start releasing resources.
95 this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/false);
96
97 this->destroyDrawingManager();
98
99 // Ideally we could just let the ptr drop, but resource cache queries this ptr in releaseAll.
100 if (fResourceCache) {
101 fResourceCache->releaseAll();
102 }
103 // This has to be after GrResourceCache::releaseAll so that other threads that are holding
104 // async pixel result don't try to destroy buffers off thread.
105 fMappedBufferManager.reset();
106 }
107
threadSafeProxy()108 sk_sp<GrContextThreadSafeProxy> GrDirectContext::threadSafeProxy() {
109 return INHERITED::threadSafeProxy();
110 }
111
resetGLTextureBindings()112 void GrDirectContext::resetGLTextureBindings() {
113 if (this->abandoned() || this->backend() != GrBackendApi::kOpenGL) {
114 return;
115 }
116 fGpu->resetTextureBindings();
117 }
118
resetContext(uint32_t state)119 void GrDirectContext::resetContext(uint32_t state) {
120 ASSERT_SINGLE_OWNER
121 fGpu->markContextDirty(state);
122 }
123
abandonContext()124 void GrDirectContext::abandonContext() {
125 if (INHERITED::abandoned()) {
126 return;
127 }
128
129 INHERITED::abandonContext();
130
131 // We need to make sure all work is finished on the gpu before we start releasing resources.
132 this->syncAllOutstandingGpuWork(this->caps()->mustSyncGpuDuringAbandon());
133
134 fStrikeCache->freeAll();
135
136 fMappedBufferManager->abandon();
137
138 fResourceProvider->abandon();
139
140 // abandon first so destructors don't try to free the resources in the API.
141 fResourceCache->abandonAll();
142
143 fGpu->disconnect(GrGpu::DisconnectType::kAbandon);
144
145 // Must be after GrResourceCache::abandonAll().
146 fMappedBufferManager.reset();
147
148 if (fSmallPathAtlasMgr) {
149 fSmallPathAtlasMgr->reset();
150 }
151 fAtlasManager->freeAll();
152 }
153
abandoned()154 bool GrDirectContext::abandoned() {
155 if (INHERITED::abandoned()) {
156 return true;
157 }
158
159 if (fGpu && fGpu->isDeviceLost()) {
160 this->abandonContext();
161 return true;
162 }
163 return false;
164 }
165
oomed()166 bool GrDirectContext::oomed() { return fGpu ? fGpu->checkAndResetOOMed() : false; }
167
releaseResourcesAndAbandonContext()168 void GrDirectContext::releaseResourcesAndAbandonContext() {
169 if (INHERITED::abandoned()) {
170 return;
171 }
172
173 INHERITED::abandonContext();
174
175 // We need to make sure all work is finished on the gpu before we start releasing resources.
176 this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/true);
177
178 fResourceProvider->abandon();
179
180 // Release all resources in the backend 3D API.
181 fResourceCache->releaseAll();
182
183 // Must be after GrResourceCache::releaseAll().
184 fMappedBufferManager.reset();
185
186 fGpu->disconnect(GrGpu::DisconnectType::kCleanup);
187 if (fSmallPathAtlasMgr) {
188 fSmallPathAtlasMgr->reset();
189 }
190 fAtlasManager->freeAll();
191 }
192
freeGpuResources()193 void GrDirectContext::freeGpuResources() {
194 ASSERT_SINGLE_OWNER
195
196 if (this->abandoned()) {
197 return;
198 }
199
200 this->flushAndSubmit();
201 if (fSmallPathAtlasMgr) {
202 fSmallPathAtlasMgr->reset();
203 }
204 fAtlasManager->freeAll();
205
206 // TODO: the glyph cache doesn't hold any GpuResources so this call should not be needed here.
207 // Some slack in the GrTextBlob's implementation requires it though. That could be fixed.
208 fStrikeCache->freeAll();
209
210 this->drawingManager()->freeGpuResources();
211
212 fResourceCache->purgeUnlockedResources();
213 }
214
init()215 bool GrDirectContext::init() {
216 ASSERT_SINGLE_OWNER
217 if (!fGpu) {
218 return false;
219 }
220
221 fThreadSafeProxy->priv().init(fGpu->refCaps(), fGpu->refPipelineBuilder());
222 if (!INHERITED::init()) {
223 return false;
224 }
225
226 SkASSERT(this->getTextBlobCache());
227 SkASSERT(this->threadSafeCache());
228
229 fStrikeCache = std::make_unique<GrStrikeCache>();
230 fResourceCache = std::make_unique<GrResourceCache>(this->singleOwner(),
231 this->directContextID(),
232 this->contextID());
233 fResourceCache->setProxyProvider(this->proxyProvider());
234 fResourceCache->setThreadSafeCache(this->threadSafeCache());
235 #if GR_TEST_UTILS
236 if (this->options().fResourceCacheLimitOverride != -1) {
237 this->setResourceCacheLimit(this->options().fResourceCacheLimitOverride);
238 }
239 #endif
240 fResourceProvider = std::make_unique<GrResourceProvider>(fGpu.get(), fResourceCache.get(),
241 this->singleOwner());
242 fMappedBufferManager = std::make_unique<GrClientMappedBufferManager>(this->directContextID());
243
244 fDidTestPMConversions = false;
245
246 // DDL TODO: we need to think through how the task group & persistent cache
247 // get passed on to/shared between all the DDLRecorders created with this context.
248 if (this->options().fExecutor) {
249 fTaskGroup = std::make_unique<SkTaskGroup>(*this->options().fExecutor);
250 }
251
252 fPersistentCache = this->options().fPersistentCache;
253
254 GrDrawOpAtlas::AllowMultitexturing allowMultitexturing;
255 if (GrContextOptions::Enable::kNo == this->options().fAllowMultipleGlyphCacheTextures ||
256 // multitexturing supported only if range can represent the index + texcoords fully
257 !(this->caps()->shaderCaps()->floatIs32Bits() ||
258 this->caps()->shaderCaps()->integerSupport())) {
259 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kNo;
260 } else {
261 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kYes;
262 }
263
264 GrProxyProvider* proxyProvider = this->priv().proxyProvider();
265
266 fAtlasManager = std::make_unique<GrAtlasManager>(proxyProvider,
267 this->options().fGlyphCacheTextureMaximumBytes,
268 allowMultitexturing);
269 this->priv().addOnFlushCallbackObject(fAtlasManager.get());
270
271 return true;
272 }
273
getResourceCacheUsage(int * resourceCount,size_t * resourceBytes) const274 void GrDirectContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
275 ASSERT_SINGLE_OWNER
276
277 if (resourceCount) {
278 *resourceCount = fResourceCache->getBudgetedResourceCount();
279 }
280 if (resourceBytes) {
281 *resourceBytes = fResourceCache->getBudgetedResourceBytes();
282 }
283 }
284
getResourceCachePurgeableBytes() const285 size_t GrDirectContext::getResourceCachePurgeableBytes() const {
286 ASSERT_SINGLE_OWNER
287 return fResourceCache->getPurgeableBytes();
288 }
289
getResourceCacheLimits(int * maxResources,size_t * maxResourceBytes) const290 void GrDirectContext::getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const {
291 ASSERT_SINGLE_OWNER
292 if (maxResources) {
293 *maxResources = -1;
294 }
295 if (maxResourceBytes) {
296 *maxResourceBytes = this->getResourceCacheLimit();
297 }
298 }
299
getResourceCacheLimit() const300 size_t GrDirectContext::getResourceCacheLimit() const {
301 ASSERT_SINGLE_OWNER
302 return fResourceCache->getMaxResourceBytes();
303 }
304
setResourceCacheLimits(int unused,size_t maxResourceBytes)305 void GrDirectContext::setResourceCacheLimits(int unused, size_t maxResourceBytes) {
306 ASSERT_SINGLE_OWNER
307 this->setResourceCacheLimit(maxResourceBytes);
308 }
309
setResourceCacheLimit(size_t maxResourceBytes)310 void GrDirectContext::setResourceCacheLimit(size_t maxResourceBytes) {
311 ASSERT_SINGLE_OWNER
312 fResourceCache->setLimit(maxResourceBytes);
313 }
314
purgeUnlockedResources(bool scratchResourcesOnly)315 void GrDirectContext::purgeUnlockedResources(bool scratchResourcesOnly) {
316 ASSERT_SINGLE_OWNER
317
318 if (this->abandoned()) {
319 return;
320 }
321
322 fResourceCache->purgeUnlockedResources(scratchResourcesOnly);
323 fResourceCache->purgeAsNeeded();
324
325 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
326 // place to purge stale blobs
327 this->getTextBlobCache()->purgeStaleBlobs();
328
329 fGpu->releaseUnlockedBackendObjects();
330 }
331
purgeUnlockAndSafeCacheGpuResources()332 void GrDirectContext::purgeUnlockAndSafeCacheGpuResources() {
333 ASSERT_SINGLE_OWNER
334
335 if (this->abandoned()) {
336 return;
337 }
338
339 fResourceCache->purgeUnlockAndSafeCacheGpuResources();
340 fResourceCache->purgeAsNeeded();
341
342 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
343 // place to purge stale blobs
344 this->getTextBlobCache()->purgeStaleBlobs();
345
346 fGpu->releaseUnlockedBackendObjects();
347 }
348
purgeUnlockedResourcesByTag(bool scratchResourceseOnly,const GrGpuResourceTag & tag)349 void GrDirectContext::purgeUnlockedResourcesByTag(bool scratchResourceseOnly, const GrGpuResourceTag& tag) {
350 ASSERT_SINGLE_OWNER
351 fResourceCache->purgeUnlockedResourcesByTag(scratchResourceseOnly, tag);
352 fResourceCache->purgeAsNeeded();
353
354 // The textBlod Cache doesn't actually hold any GPU resource but this is a convenient
355 // place to purge stale blobs
356 this->getTextBlobCache()->purgeStaleBlobs();
357 }
358
purgeUnlockedResourcesByPid(bool scratchResourcesOnly,const std::set<int> & exitedPidSet)359 void GrDirectContext::purgeUnlockedResourcesByPid(bool scratchResourcesOnly, const std::set<int>& exitedPidSet) {
360 ASSERT_SINGLE_OWNER
361 fResourceCache->purgeUnlockedResourcesByPid(scratchResourcesOnly, exitedPidSet);
362 fResourceCache->purgeAsNeeded();
363
364 // The textBlod Cache doesn't actually hold any GPU resource but this is a convenient
365 // place to purge stale blobs
366 this->getTextBlobCache()->purgeStaleBlobs();
367 }
368
performDeferredCleanup(std::chrono::milliseconds msNotUsed,bool scratchResourcesOnly)369 void GrDirectContext::performDeferredCleanup(std::chrono::milliseconds msNotUsed,
370 bool scratchResourcesOnly) {
371 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
372
373 ASSERT_SINGLE_OWNER
374
375 if (this->abandoned()) {
376 return;
377 }
378
379 this->checkAsyncWorkCompletion();
380 fMappedBufferManager->process();
381 auto purgeTime = GrStdSteadyClock::now() - msNotUsed;
382
383 fResourceCache->purgeAsNeeded();
384 fResourceCache->purgeResourcesNotUsedSince(purgeTime, scratchResourcesOnly);
385
386 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
387 // place to purge stale blobs
388 this->getTextBlobCache()->purgeStaleBlobs();
389 }
390
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)391 void GrDirectContext::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
392 ASSERT_SINGLE_OWNER
393
394 if (this->abandoned()) {
395 return;
396 }
397
398 fResourceCache->purgeUnlockedResources(bytesToPurge, preferScratchResources);
399 }
400
401 ////////////////////////////////////////////////////////////////////////////////
wait(int numSemaphores,const GrBackendSemaphore waitSemaphores[],bool deleteSemaphoresAfterWait)402 bool GrDirectContext::wait(int numSemaphores, const GrBackendSemaphore waitSemaphores[],
403 bool deleteSemaphoresAfterWait) {
404 if (!fGpu || !fGpu->caps()->semaphoreSupport()) {
405 return false;
406 }
407 GrWrapOwnership ownership =
408 deleteSemaphoresAfterWait ? kAdopt_GrWrapOwnership : kBorrow_GrWrapOwnership;
409 for (int i = 0; i < numSemaphores; ++i) {
410 std::unique_ptr<GrSemaphore> sema = fResourceProvider->wrapBackendSemaphore(
411 waitSemaphores[i], GrSemaphoreWrapType::kWillWait, ownership);
412 // If we failed to wrap the semaphore it means the client didn't give us a valid semaphore
413 // to begin with. Therefore, it is fine to not wait on it.
414 if (sema) {
415 fGpu->waitSemaphore(sema.get());
416 }
417 }
418 return true;
419 }
420
onGetSmallPathAtlasMgr()421 skgpu::v1::SmallPathAtlasMgr* GrDirectContext::onGetSmallPathAtlasMgr() {
422 #if SK_GPU_V1
423 if (!fSmallPathAtlasMgr) {
424 fSmallPathAtlasMgr = std::make_unique<skgpu::v1::SmallPathAtlasMgr>();
425
426 this->priv().addOnFlushCallbackObject(fSmallPathAtlasMgr.get());
427 }
428
429 if (!fSmallPathAtlasMgr->initAtlas(this->proxyProvider(), this->caps())) {
430 return nullptr;
431 }
432 #endif
433
434 return fSmallPathAtlasMgr.get();
435 }
436
437 ////////////////////////////////////////////////////////////////////////////////
438
flush(const GrFlushInfo & info)439 GrSemaphoresSubmitted GrDirectContext::flush(const GrFlushInfo& info) {
440 ASSERT_SINGLE_OWNER
441 if (this->abandoned()) {
442 if (info.fFinishedProc) {
443 info.fFinishedProc(info.fFinishedContext);
444 }
445 if (info.fSubmittedProc) {
446 info.fSubmittedProc(info.fSubmittedContext, false);
447 }
448 return GrSemaphoresSubmitted::kNo;
449 }
450
451 return this->drawingManager()->flushSurfaces({}, SkSurface::BackendSurfaceAccess::kNoAccess,
452 info, nullptr);
453 }
454
submit(bool syncCpu)455 bool GrDirectContext::submit(bool syncCpu) {
456 ASSERT_SINGLE_OWNER
457 if (this->abandoned()) {
458 return false;
459 }
460
461 if (!fGpu) {
462 return false;
463 }
464
465 return fGpu->submitToGpu(syncCpu);
466 }
467
468 ////////////////////////////////////////////////////////////////////////////////
469
checkAsyncWorkCompletion()470 void GrDirectContext::checkAsyncWorkCompletion() {
471 if (fGpu) {
472 fGpu->checkFinishProcs();
473 }
474 }
475
syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned)476 void GrDirectContext::syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned) {
477 if (fGpu && (!this->abandoned() || shouldExecuteWhileAbandoned)) {
478 fGpu->finishOutstandingGpuWork();
479 this->checkAsyncWorkCompletion();
480 }
481 }
482
483 ////////////////////////////////////////////////////////////////////////////////
484
storeVkPipelineCacheData()485 void GrDirectContext::storeVkPipelineCacheData() {
486 if (fGpu) {
487 fGpu->storeVkPipelineCacheData();
488 }
489 }
490
491 ////////////////////////////////////////////////////////////////////////////////
492
supportsDistanceFieldText() const493 bool GrDirectContext::supportsDistanceFieldText() const {
494 return this->caps()->shaderCaps()->supportsDistanceFieldText();
495 }
496
497 //////////////////////////////////////////////////////////////////////////////
498
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const499 void GrDirectContext::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
500 ASSERT_SINGLE_OWNER
501 fResourceCache->dumpMemoryStatistics(traceMemoryDump);
502 traceMemoryDump->dumpNumericValue("skia/gr_text_blob_cache", "size", "bytes",
503 this->getTextBlobCache()->usedBytes());
504 }
505
dumpMemoryStatisticsByTag(SkTraceMemoryDump * traceMemoryDump,const GrGpuResourceTag & tag) const506 void GrDirectContext::dumpMemoryStatisticsByTag(SkTraceMemoryDump* traceMemoryDump, const GrGpuResourceTag& tag) const {
507 ASSERT_SINGLE_OWNER
508 fResourceCache->dumpMemoryStatistics(traceMemoryDump, tag);
509 traceMemoryDump->dumpNumericValue("skia/gr_text_blob_cache", "size", "bytes",
510 this->getTextBlobCache()->usedBytes());
511 }
512
setCurrentGrResourceTag(const GrGpuResourceTag & tag)513 void GrDirectContext::setCurrentGrResourceTag(const GrGpuResourceTag& tag) {
514 if (fResourceCache) {
515 return fResourceCache->setCurrentGrResourceTag(tag);
516 }
517 }
518
popGrResourceTag()519 void GrDirectContext::popGrResourceTag()
520 {
521 if (fResourceCache) {
522 return fResourceCache->popGrResourceTag();
523 }
524 }
525
getCurrentGrResourceTag() const526 GrGpuResourceTag GrDirectContext::getCurrentGrResourceTag() const {
527 if (fResourceCache) {
528 return fResourceCache->getCurrentGrResourceTag();
529 }
530 return {};
531 }
releaseByTag(const GrGpuResourceTag & tag)532 void GrDirectContext::releaseByTag(const GrGpuResourceTag& tag) {
533 if (fResourceCache) {
534 fResourceCache->releaseByTag(tag);
535 }
536 }
getAllGrGpuResourceTags() const537 std::set<GrGpuResourceTag> GrDirectContext::getAllGrGpuResourceTags() const {
538 if (fResourceCache) {
539 return fResourceCache->getAllGrGpuResourceTags();
540 }
541 return {};
542 }
543
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected)544 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
545 const GrBackendFormat& backendFormat,
546 GrMipmapped mipMapped,
547 GrRenderable renderable,
548 GrProtected isProtected) {
549 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
550 if (this->abandoned()) {
551 return GrBackendTexture();
552 }
553
554 return fGpu->createBackendTexture({width, height}, backendFormat, renderable,
555 mipMapped, isProtected);
556 }
557
createBackendTexture(int width,int height,SkColorType skColorType,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected)558 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
559 SkColorType skColorType,
560 GrMipmapped mipMapped,
561 GrRenderable renderable,
562 GrProtected isProtected) {
563 if (this->abandoned()) {
564 return GrBackendTexture();
565 }
566
567 const GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
568
569 return this->createBackendTexture(width, height, format, mipMapped, renderable, isProtected);
570 }
571
create_and_clear_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,sk_sp<GrRefCntedCallback> finishedCallback,std::array<float,4> color)572 static GrBackendTexture create_and_clear_backend_texture(GrDirectContext* dContext,
573 SkISize dimensions,
574 const GrBackendFormat& backendFormat,
575 GrMipmapped mipMapped,
576 GrRenderable renderable,
577 GrProtected isProtected,
578 sk_sp<GrRefCntedCallback> finishedCallback,
579 std::array<float, 4> color) {
580 GrGpu* gpu = dContext->priv().getGpu();
581 GrBackendTexture beTex = gpu->createBackendTexture(dimensions, backendFormat, renderable,
582 mipMapped, isProtected);
583 if (!beTex.isValid()) {
584 return {};
585 }
586
587 if (!dContext->priv().getGpu()->clearBackendTexture(beTex,
588 std::move(finishedCallback),
589 color)) {
590 dContext->deleteBackendTexture(beTex);
591 return {};
592 }
593 return beTex;
594 }
595
update_texture_with_pixmaps(GrDirectContext * context,const SkPixmap src[],int numLevels,const GrBackendTexture & backendTexture,GrSurfaceOrigin textureOrigin,sk_sp<GrRefCntedCallback> finishedCallback)596 static bool update_texture_with_pixmaps(GrDirectContext* context,
597 const SkPixmap src[],
598 int numLevels,
599 const GrBackendTexture& backendTexture,
600 GrSurfaceOrigin textureOrigin,
601 sk_sp<GrRefCntedCallback> finishedCallback) {
602 GrColorType ct = SkColorTypeToGrColorType(src[0].colorType());
603 const GrBackendFormat& format = backendTexture.getBackendFormat();
604
605 if (!context->priv().caps()->areColorTypeAndFormatCompatible(ct, format)) {
606 return false;
607 }
608
609 auto proxy = context->priv().proxyProvider()->wrapBackendTexture(backendTexture,
610 kBorrow_GrWrapOwnership,
611 GrWrapCacheable::kNo,
612 kRW_GrIOType,
613 std::move(finishedCallback));
614 if (!proxy) {
615 return false;
616 }
617
618 GrSwizzle swizzle = context->priv().caps()->getReadSwizzle(format, ct);
619 GrSurfaceProxyView view(std::move(proxy), textureOrigin, swizzle);
620 skgpu::SurfaceContext surfaceContext(context, std::move(view), src[0].info().colorInfo());
621 SkAutoSTArray<15, GrCPixmap> tmpSrc(numLevels);
622 for (int i = 0; i < numLevels; ++i) {
623 tmpSrc[i] = src[i];
624 }
625 if (!surfaceContext.writePixels(context, tmpSrc.get(), numLevels)) {
626 return false;
627 }
628
629 GrSurfaceProxy* p = surfaceContext.asSurfaceProxy();
630 GrFlushInfo info;
631 context->priv().drawingManager()->flushSurfaces({&p, 1},
632 SkSurface::BackendSurfaceAccess::kNoAccess,
633 info,
634 nullptr);
635 return true;
636 }
637
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)638 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
639 const GrBackendFormat& backendFormat,
640 const SkColor4f& color,
641 GrMipmapped mipMapped,
642 GrRenderable renderable,
643 GrProtected isProtected,
644 GrGpuFinishedProc finishedProc,
645 GrGpuFinishedContext finishedContext) {
646 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
647
648 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
649 if (this->abandoned()) {
650 return {};
651 }
652
653 return create_and_clear_backend_texture(this,
654 {width, height},
655 backendFormat,
656 mipMapped,
657 renderable,
658 isProtected,
659 std::move(finishedCallback),
660 color.array());
661 }
662
createBackendTexture(int width,int height,SkColorType skColorType,const SkColor4f & color,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)663 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
664 SkColorType skColorType,
665 const SkColor4f& color,
666 GrMipmapped mipMapped,
667 GrRenderable renderable,
668 GrProtected isProtected,
669 GrGpuFinishedProc finishedProc,
670 GrGpuFinishedContext finishedContext) {
671 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
672
673 if (this->abandoned()) {
674 return {};
675 }
676
677 GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
678 if (!format.isValid()) {
679 return {};
680 }
681
682 GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
683 SkColor4f swizzledColor = this->caps()->getWriteSwizzle(format, grColorType).applyTo(color);
684
685 return create_and_clear_backend_texture(this,
686 {width, height},
687 format,
688 mipMapped,
689 renderable,
690 isProtected,
691 std::move(finishedCallback),
692 swizzledColor.array());
693 }
694
createBackendTexture(const SkPixmap srcData[],int numProvidedLevels,GrSurfaceOrigin textureOrigin,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)695 GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap srcData[],
696 int numProvidedLevels,
697 GrSurfaceOrigin textureOrigin,
698 GrRenderable renderable,
699 GrProtected isProtected,
700 GrGpuFinishedProc finishedProc,
701 GrGpuFinishedContext finishedContext) {
702 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
703
704 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
705
706 if (this->abandoned()) {
707 return {};
708 }
709
710 if (!srcData || numProvidedLevels <= 0) {
711 return {};
712 }
713
714 SkColorType colorType = srcData[0].colorType();
715
716 GrMipmapped mipMapped = GrMipmapped::kNo;
717 if (numProvidedLevels > 1) {
718 mipMapped = GrMipmapped::kYes;
719 }
720
721 GrBackendFormat backendFormat = this->defaultBackendFormat(colorType, renderable);
722 GrBackendTexture beTex = this->createBackendTexture(srcData[0].width(),
723 srcData[0].height(),
724 backendFormat,
725 mipMapped,
726 renderable,
727 isProtected);
728 if (!beTex.isValid()) {
729 return {};
730 }
731 if (!update_texture_with_pixmaps(this,
732 srcData,
733 numProvidedLevels,
734 beTex,
735 textureOrigin,
736 std::move(finishedCallback))) {
737 this->deleteBackendTexture(beTex);
738 return {};
739 }
740 return beTex;
741 }
742
updateBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)743 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
744 const SkColor4f& color,
745 GrGpuFinishedProc finishedProc,
746 GrGpuFinishedContext finishedContext) {
747 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
748
749 if (this->abandoned()) {
750 return false;
751 }
752
753 return fGpu->clearBackendTexture(backendTexture, std::move(finishedCallback), color.array());
754 }
755
updateBackendTexture(const GrBackendTexture & backendTexture,SkColorType skColorType,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)756 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
757 SkColorType skColorType,
758 const SkColor4f& color,
759 GrGpuFinishedProc finishedProc,
760 GrGpuFinishedContext finishedContext) {
761 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
762
763 if (this->abandoned()) {
764 return false;
765 }
766
767 GrBackendFormat format = backendTexture.getBackendFormat();
768 GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
769
770 if (!this->caps()->areColorTypeAndFormatCompatible(grColorType, format)) {
771 return false;
772 }
773
774 GrSwizzle swizzle = this->caps()->getWriteSwizzle(format, grColorType);
775 SkColor4f swizzledColor = swizzle.applyTo(color);
776
777 return fGpu->clearBackendTexture(backendTexture,
778 std::move(finishedCallback),
779 swizzledColor.array());
780 }
781
updateBackendTexture(const GrBackendTexture & backendTexture,const SkPixmap srcData[],int numLevels,GrSurfaceOrigin textureOrigin,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)782 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
783 const SkPixmap srcData[],
784 int numLevels,
785 GrSurfaceOrigin textureOrigin,
786 GrGpuFinishedProc finishedProc,
787 GrGpuFinishedContext finishedContext) {
788 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
789
790 if (this->abandoned()) {
791 return false;
792 }
793
794 if (!srcData || numLevels <= 0) {
795 return false;
796 }
797
798 // If the texture has MIP levels then we require that the full set is overwritten.
799 int numExpectedLevels = 1;
800 if (backendTexture.hasMipmaps()) {
801 numExpectedLevels = SkMipmap::ComputeLevelCount(backendTexture.width(),
802 backendTexture.height()) + 1;
803 }
804 if (numLevels != numExpectedLevels) {
805 return false;
806 }
807 return update_texture_with_pixmaps(this,
808 srcData,
809 numLevels,
810 backendTexture,
811 textureOrigin,
812 std::move(finishedCallback));
813 }
814
815 //////////////////////////////////////////////////////////////////////////////
816
create_and_update_compressed_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrProtected isProtected,sk_sp<GrRefCntedCallback> finishedCallback,const void * data,size_t size)817 static GrBackendTexture create_and_update_compressed_backend_texture(
818 GrDirectContext* dContext,
819 SkISize dimensions,
820 const GrBackendFormat& backendFormat,
821 GrMipmapped mipMapped,
822 GrProtected isProtected,
823 sk_sp<GrRefCntedCallback> finishedCallback,
824 const void* data,
825 size_t size) {
826 GrGpu* gpu = dContext->priv().getGpu();
827
828 GrBackendTexture beTex = gpu->createCompressedBackendTexture(dimensions, backendFormat,
829 mipMapped, isProtected);
830 if (!beTex.isValid()) {
831 return {};
832 }
833
834 if (!dContext->priv().getGpu()->updateCompressedBackendTexture(
835 beTex, std::move(finishedCallback), data, size)) {
836 dContext->deleteBackendTexture(beTex);
837 return {};
838 }
839 return beTex;
840 }
841
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,GrMipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)842 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
843 int width, int height,
844 const GrBackendFormat& backendFormat,
845 const SkColor4f& color,
846 GrMipmapped mipmapped,
847 GrProtected isProtected,
848 GrGpuFinishedProc finishedProc,
849 GrGpuFinishedContext finishedContext) {
850 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
851 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
852
853 if (this->abandoned()) {
854 return {};
855 }
856
857 SkImage::CompressionType compression = GrBackendFormatToCompressionType(backendFormat);
858 if (compression == SkImage::CompressionType::kNone) {
859 return {};
860 }
861
862 size_t size = SkCompressedDataSize(compression,
863 {width, height},
864 nullptr,
865 mipmapped == GrMipmapped::kYes);
866 auto storage = std::make_unique<char[]>(size);
867 GrFillInCompressedData(compression, {width, height}, mipmapped, storage.get(), color);
868 return create_and_update_compressed_backend_texture(this,
869 {width, height},
870 backendFormat,
871 mipmapped,
872 isProtected,
873 std::move(finishedCallback),
874 storage.get(),
875 size);
876 }
877
createCompressedBackendTexture(int width,int height,SkImage::CompressionType compression,const SkColor4f & color,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)878 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
879 int width, int height,
880 SkImage::CompressionType compression,
881 const SkColor4f& color,
882 GrMipmapped mipMapped,
883 GrProtected isProtected,
884 GrGpuFinishedProc finishedProc,
885 GrGpuFinishedContext finishedContext) {
886 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
887 GrBackendFormat format = this->compressedBackendFormat(compression);
888 return this->createCompressedBackendTexture(width, height, format, color,
889 mipMapped, isProtected, finishedProc,
890 finishedContext);
891 }
892
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const void * compressedData,size_t dataSize,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)893 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
894 int width, int height,
895 const GrBackendFormat& backendFormat,
896 const void* compressedData,
897 size_t dataSize,
898 GrMipmapped mipMapped,
899 GrProtected isProtected,
900 GrGpuFinishedProc finishedProc,
901 GrGpuFinishedContext finishedContext) {
902 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
903 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
904
905 if (this->abandoned()) {
906 return {};
907 }
908
909 return create_and_update_compressed_backend_texture(this,
910 {width, height},
911 backendFormat,
912 mipMapped,
913 isProtected,
914 std::move(finishedCallback),
915 compressedData,
916 dataSize);
917 }
918
createCompressedBackendTexture(int width,int height,SkImage::CompressionType compression,const void * data,size_t dataSize,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)919 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
920 int width, int height,
921 SkImage::CompressionType compression,
922 const void* data, size_t dataSize,
923 GrMipmapped mipMapped,
924 GrProtected isProtected,
925 GrGpuFinishedProc finishedProc,
926 GrGpuFinishedContext finishedContext) {
927 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
928 GrBackendFormat format = this->compressedBackendFormat(compression);
929 return this->createCompressedBackendTexture(width, height, format, data, dataSize, mipMapped,
930 isProtected, finishedProc, finishedContext);
931 }
932
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)933 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
934 const SkColor4f& color,
935 GrGpuFinishedProc finishedProc,
936 GrGpuFinishedContext finishedContext) {
937 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
938
939 if (this->abandoned()) {
940 return false;
941 }
942
943 SkImage::CompressionType compression =
944 GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
945 if (compression == SkImage::CompressionType::kNone) {
946 return {};
947 }
948 size_t size = SkCompressedDataSize(compression,
949 backendTexture.dimensions(),
950 nullptr,
951 backendTexture.hasMipmaps());
952 SkAutoMalloc storage(size);
953 GrFillInCompressedData(compression,
954 backendTexture.dimensions(),
955 backendTexture.mipmapped(),
956 static_cast<char*>(storage.get()),
957 color);
958 return fGpu->updateCompressedBackendTexture(backendTexture,
959 std::move(finishedCallback),
960 storage.get(),
961 size);
962 }
963
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const void * compressedData,size_t dataSize,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)964 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
965 const void* compressedData,
966 size_t dataSize,
967 GrGpuFinishedProc finishedProc,
968 GrGpuFinishedContext finishedContext) {
969 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
970
971 if (this->abandoned()) {
972 return false;
973 }
974
975 if (!compressedData) {
976 return false;
977 }
978
979 return fGpu->updateCompressedBackendTexture(backendTexture,
980 std::move(finishedCallback),
981 compressedData,
982 dataSize);
983 }
984
985 //////////////////////////////////////////////////////////////////////////////
986
setBackendTextureState(const GrBackendTexture & backendTexture,const GrBackendSurfaceMutableState & state,GrBackendSurfaceMutableState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)987 bool GrDirectContext::setBackendTextureState(const GrBackendTexture& backendTexture,
988 const GrBackendSurfaceMutableState& state,
989 GrBackendSurfaceMutableState* previousState,
990 GrGpuFinishedProc finishedProc,
991 GrGpuFinishedContext finishedContext) {
992 auto callback = GrRefCntedCallback::Make(finishedProc, finishedContext);
993
994 if (this->abandoned()) {
995 return false;
996 }
997
998 return fGpu->setBackendTextureState(backendTexture, state, previousState, std::move(callback));
999 }
1000
1001
setBackendRenderTargetState(const GrBackendRenderTarget & backendRenderTarget,const GrBackendSurfaceMutableState & state,GrBackendSurfaceMutableState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1002 bool GrDirectContext::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
1003 const GrBackendSurfaceMutableState& state,
1004 GrBackendSurfaceMutableState* previousState,
1005 GrGpuFinishedProc finishedProc,
1006 GrGpuFinishedContext finishedContext) {
1007 auto callback = GrRefCntedCallback::Make(finishedProc, finishedContext);
1008
1009 if (this->abandoned()) {
1010 return false;
1011 }
1012
1013 return fGpu->setBackendRenderTargetState(backendRenderTarget, state, previousState,
1014 std::move(callback));
1015 }
1016
deleteBackendTexture(GrBackendTexture backendTex)1017 void GrDirectContext::deleteBackendTexture(GrBackendTexture backendTex) {
1018 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1019 // For the Vulkan backend we still must destroy the backend texture when the context is
1020 // abandoned.
1021 if ((this->abandoned() && this->backend() != GrBackendApi::kVulkan) || !backendTex.isValid()) {
1022 return;
1023 }
1024
1025 fGpu->deleteBackendTexture(backendTex);
1026 }
1027
1028 //////////////////////////////////////////////////////////////////////////////
1029
precompileShader(const SkData & key,const SkData & data)1030 bool GrDirectContext::precompileShader(const SkData& key, const SkData& data) {
1031 return fGpu->precompileShader(key, data);
1032 }
1033
1034 #ifdef SK_ENABLE_DUMP_GPU
1035 #include "include/core/SkString.h"
1036 #include "src/utils/SkJSONWriter.h"
dump() const1037 SkString GrDirectContext::dump() const {
1038 SkDynamicMemoryWStream stream;
1039 SkJSONWriter writer(&stream, SkJSONWriter::Mode::kPretty);
1040 writer.beginObject();
1041
1042 writer.appendString("backend", GrBackendApiToStr(this->backend()));
1043
1044 writer.appendName("caps");
1045 this->caps()->dumpJSON(&writer);
1046
1047 writer.appendName("gpu");
1048 this->fGpu->dumpJSON(&writer);
1049
1050 writer.appendName("context");
1051 this->dumpJSON(&writer);
1052
1053 // Flush JSON to the memory stream
1054 writer.endObject();
1055 writer.flush();
1056
1057 // Null terminate the JSON data in the memory stream
1058 stream.write8(0);
1059
1060 // Allocate a string big enough to hold all the data, then copy out of the stream
1061 SkString result(stream.bytesWritten());
1062 stream.copyToAndReset(result.writable_str());
1063 return result;
1064 }
1065 #endif
1066
1067 #ifdef SK_GL
1068
1069 /*************************************************************************************************/
MakeGL(sk_sp<const GrGLInterface> glInterface)1070 sk_sp<GrDirectContext> GrDirectContext::MakeGL(sk_sp<const GrGLInterface> glInterface) {
1071 GrContextOptions defaultOptions;
1072 return MakeGL(std::move(glInterface), defaultOptions);
1073 }
1074
MakeGL(const GrContextOptions & options)1075 sk_sp<GrDirectContext> GrDirectContext::MakeGL(const GrContextOptions& options) {
1076 return MakeGL(nullptr, options);
1077 }
1078
MakeGL()1079 sk_sp<GrDirectContext> GrDirectContext::MakeGL() {
1080 GrContextOptions defaultOptions;
1081 return MakeGL(nullptr, defaultOptions);
1082 }
1083
1084 #if GR_TEST_UTILS
make_get_error_with_random_oom(GrGLFunction<GrGLGetErrorFn> original)1085 GrGLFunction<GrGLGetErrorFn> make_get_error_with_random_oom(GrGLFunction<GrGLGetErrorFn> original) {
1086 // A SkRandom and a GrGLFunction<GrGLGetErrorFn> are too big to be captured by a
1087 // GrGLFunction<GrGLGetError> (surprise, surprise). So we make a context object and
1088 // capture that by pointer. However, GrGLFunction doesn't support calling a destructor
1089 // on the thing it captures. So we leak the context.
1090 struct GetErrorContext {
1091 SkRandom fRandom;
1092 GrGLFunction<GrGLGetErrorFn> fGetError;
1093 };
1094
1095 auto errorContext = new GetErrorContext;
1096
1097 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
1098 __lsan_ignore_object(errorContext);
1099 #endif
1100
1101 errorContext->fGetError = original;
1102
1103 return GrGLFunction<GrGLGetErrorFn>([errorContext]() {
1104 GrGLenum error = errorContext->fGetError();
1105 if (error == GR_GL_NO_ERROR && (errorContext->fRandom.nextU() % 300) == 0) {
1106 error = GR_GL_OUT_OF_MEMORY;
1107 }
1108 return error;
1109 });
1110 }
1111 #endif
1112
MakeGL(sk_sp<const GrGLInterface> glInterface,const GrContextOptions & options)1113 sk_sp<GrDirectContext> GrDirectContext::MakeGL(sk_sp<const GrGLInterface> glInterface,
1114 const GrContextOptions& options) {
1115 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kOpenGL, options));
1116 #if GR_TEST_UTILS
1117 if (options.fRandomGLOOM) {
1118 auto copy = sk_make_sp<GrGLInterface>(*glInterface);
1119 copy->fFunctions.fGetError =
1120 make_get_error_with_random_oom(glInterface->fFunctions.fGetError);
1121 #if GR_GL_CHECK_ERROR
1122 // Suppress logging GL errors since we'll be synthetically generating them.
1123 copy->suppressErrorLogging();
1124 #endif
1125 glInterface = std::move(copy);
1126 }
1127 #endif
1128 direct->fGpu = GrGLGpu::Make(std::move(glInterface), options, direct.get());
1129 if (!direct->init()) {
1130 return nullptr;
1131 }
1132 return direct;
1133 }
1134 #endif
1135
1136 /*************************************************************************************************/
MakeMock(const GrMockOptions * mockOptions)1137 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions) {
1138 GrContextOptions defaultOptions;
1139 return MakeMock(mockOptions, defaultOptions);
1140 }
1141
MakeMock(const GrMockOptions * mockOptions,const GrContextOptions & options)1142 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions,
1143 const GrContextOptions& options) {
1144 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMock, options));
1145
1146 direct->fGpu = GrMockGpu::Make(mockOptions, options, direct.get());
1147 if (!direct->init()) {
1148 return nullptr;
1149 }
1150
1151 return direct;
1152 }
1153
1154 #ifdef SK_VULKAN
1155 /*************************************************************************************************/
MakeVulkan(const GrVkBackendContext & backendContext)1156 sk_sp<GrDirectContext> GrDirectContext::MakeVulkan(const GrVkBackendContext& backendContext) {
1157 GrContextOptions defaultOptions;
1158 return MakeVulkan(backendContext, defaultOptions);
1159 }
1160
MakeVulkan(const GrVkBackendContext & backendContext,const GrContextOptions & options)1161 sk_sp<GrDirectContext> GrDirectContext::MakeVulkan(const GrVkBackendContext& backendContext,
1162 const GrContextOptions& options) {
1163 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kVulkan, options));
1164
1165 direct->fGpu = GrVkGpu::Make(backendContext, options, direct.get());
1166 if (!direct->init()) {
1167 return nullptr;
1168 }
1169
1170 return direct;
1171 }
1172 #endif
1173
1174 #ifdef SK_METAL
1175 /*************************************************************************************************/
MakeMetal(const GrMtlBackendContext & backendContext)1176 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(const GrMtlBackendContext& backendContext) {
1177 GrContextOptions defaultOptions;
1178 return MakeMetal(backendContext, defaultOptions);
1179 }
1180
MakeMetal(const GrMtlBackendContext & backendContext,const GrContextOptions & options)1181 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(const GrMtlBackendContext& backendContext,
1182 const GrContextOptions& options) {
1183 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMetal, options));
1184
1185 direct->fGpu = GrMtlTrampoline::MakeGpu(backendContext, options, direct.get());
1186 if (!direct->init()) {
1187 return nullptr;
1188 }
1189
1190 return direct;
1191 }
1192
1193 // deprecated
MakeMetal(void * device,void * queue)1194 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(void* device, void* queue) {
1195 GrContextOptions defaultOptions;
1196 return MakeMetal(device, queue, defaultOptions);
1197 }
1198
1199 // deprecated
1200 // remove include/gpu/mtl/GrMtlBackendContext.h, above, when removed
MakeMetal(void * device,void * queue,const GrContextOptions & options)1201 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(void* device, void* queue,
1202 const GrContextOptions& options) {
1203 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMetal, options));
1204 GrMtlBackendContext backendContext = {};
1205 backendContext.fDevice.reset(device);
1206 backendContext.fQueue.reset(queue);
1207
1208 return GrDirectContext::MakeMetal(backendContext, options);
1209 }
1210 #endif
1211
1212 #ifdef SK_DIRECT3D
1213 /*************************************************************************************************/
MakeDirect3D(const GrD3DBackendContext & backendContext)1214 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext) {
1215 GrContextOptions defaultOptions;
1216 return MakeDirect3D(backendContext, defaultOptions);
1217 }
1218
MakeDirect3D(const GrD3DBackendContext & backendContext,const GrContextOptions & options)1219 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext,
1220 const GrContextOptions& options) {
1221 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kDirect3D, options));
1222
1223 direct->fGpu = GrD3DGpu::Make(backendContext, options, direct.get());
1224 if (!direct->init()) {
1225 return nullptr;
1226 }
1227
1228 return direct;
1229 }
1230 #endif
1231
1232 #ifdef SK_DAWN
1233 /*************************************************************************************************/
MakeDawn(const wgpu::Device & device)1234 sk_sp<GrDirectContext> GrDirectContext::MakeDawn(const wgpu::Device& device) {
1235 GrContextOptions defaultOptions;
1236 return MakeDawn(device, defaultOptions);
1237 }
1238
MakeDawn(const wgpu::Device & device,const GrContextOptions & options)1239 sk_sp<GrDirectContext> GrDirectContext::MakeDawn(const wgpu::Device& device,
1240 const GrContextOptions& options) {
1241 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kDawn, options));
1242
1243 direct->fGpu = GrDawnGpu::Make(device, options, direct.get());
1244 if (!direct->init()) {
1245 return nullptr;
1246 }
1247
1248 return direct;
1249 }
1250
1251 #endif
1252