1 /*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8
9 #include "include/gpu/GrDirectContext.h"
10
11 #include "include/core/SkTraceMemoryDump.h"
12 #include "include/gpu/GrBackendSemaphore.h"
13 #include "include/gpu/GrContextThreadSafeProxy.h"
14 #include "src/core/SkAutoMalloc.h"
15 #include "src/core/SkTaskGroup.h"
16 #include "src/core/SkTraceEvent.h"
17 #include "src/gpu/GrBackendUtils.h"
18 #include "src/gpu/GrClientMappedBufferManager.h"
19 #include "src/gpu/GrContextThreadSafeProxyPriv.h"
20 #include "src/gpu/GrDirectContextPriv.h"
21 #include "src/gpu/GrDrawingManager.h"
22 #include "src/gpu/GrGpu.h"
23 #include "src/gpu/GrResourceProvider.h"
24 #include "src/gpu/GrSemaphore.h"
25 #include "src/gpu/GrShaderUtils.h"
26 #include "src/gpu/GrThreadSafePipelineBuilder.h"
27 #include "src/gpu/SurfaceContext.h"
28 #include "src/gpu/effects/GrSkSLFP.h"
29 #include "src/gpu/mock/GrMockGpu.h"
30 #include "src/gpu/text/GrAtlasManager.h"
31 #include "src/gpu/text/GrStrikeCache.h"
32 #include "src/image/SkImage_GpuBase.h"
33 #if SK_GPU_V1
34 #include "src/gpu/ops/SmallPathAtlasMgr.h"
35 #else
36 // A vestigial definition for v2 that will never be instantiated
37 namespace skgpu::v1 {
38 class SmallPathAtlasMgr {
39 public:
SmallPathAtlasMgr()40 SmallPathAtlasMgr() { SkASSERT(0); }
reset()41 void reset() { SkASSERT(0); }
42 };
43 }
44 #endif
45 #ifdef SK_GL
46 #include "src/gpu/gl/GrGLGpu.h"
47 #endif
48 #ifdef SK_METAL
49 #include "include/gpu/mtl/GrMtlBackendContext.h"
50 #include "src/gpu/mtl/GrMtlTrampoline.h"
51 #endif
52 #ifdef SK_VULKAN
53 #include "src/gpu/vk/GrVkGpu.h"
54 #endif
55 #ifdef SK_DIRECT3D
56 #include "src/gpu/d3d/GrD3DGpu.h"
57 #endif
58 #ifdef SK_DAWN
59 #include "src/gpu/dawn/GrDawnGpu.h"
60 #endif
61 #include <memory>
62
63 #if GR_TEST_UTILS
64 # include "include/utils/SkRandom.h"
65 # if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
66 # include <sanitizer/lsan_interface.h>
67 # endif
68 #endif
69
70 #define ASSERT_SINGLE_OWNER GR_ASSERT_SINGLE_OWNER(this->singleOwner())
71
Next()72 GrDirectContext::DirectContextID GrDirectContext::DirectContextID::Next() {
73 static std::atomic<uint32_t> nextID{1};
74 uint32_t id;
75 do {
76 id = nextID.fetch_add(1, std::memory_order_relaxed);
77 } while (id == SK_InvalidUniqueID);
78 return DirectContextID(id);
79 }
80
GrDirectContext(GrBackendApi backend,const GrContextOptions & options)81 GrDirectContext::GrDirectContext(GrBackendApi backend, const GrContextOptions& options)
82 : INHERITED(GrContextThreadSafeProxyPriv::Make(backend, options), false)
83 , fDirectContextID(DirectContextID::Next()) {
84 }
85
~GrDirectContext()86 GrDirectContext::~GrDirectContext() {
87 ASSERT_SINGLE_OWNER
88 // this if-test protects against the case where the context is being destroyed
89 // before having been fully created
90 if (fGpu) {
91 this->flushAndSubmit();
92 }
93
94 // We need to make sure all work is finished on the gpu before we start releasing resources.
95 this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/false);
96
97 this->destroyDrawingManager();
98
99 // Ideally we could just let the ptr drop, but resource cache queries this ptr in releaseAll.
100 if (fResourceCache) {
101 fResourceCache->releaseAll();
102 }
103 // This has to be after GrResourceCache::releaseAll so that other threads that are holding
104 // async pixel result don't try to destroy buffers off thread.
105 fMappedBufferManager.reset();
106 }
107
threadSafeProxy()108 sk_sp<GrContextThreadSafeProxy> GrDirectContext::threadSafeProxy() {
109 return INHERITED::threadSafeProxy();
110 }
111
resetGLTextureBindings()112 void GrDirectContext::resetGLTextureBindings() {
113 if (this->abandoned() || this->backend() != GrBackendApi::kOpenGL) {
114 return;
115 }
116 fGpu->resetTextureBindings();
117 }
118
resetContext(uint32_t state)119 void GrDirectContext::resetContext(uint32_t state) {
120 ASSERT_SINGLE_OWNER
121 fGpu->markContextDirty(state);
122 }
123
abandonContext()124 void GrDirectContext::abandonContext() {
125 if (INHERITED::abandoned()) {
126 return;
127 }
128
129 INHERITED::abandonContext();
130
131 // We need to make sure all work is finished on the gpu before we start releasing resources.
132 this->syncAllOutstandingGpuWork(this->caps()->mustSyncGpuDuringAbandon());
133
134 fStrikeCache->freeAll();
135
136 fMappedBufferManager->abandon();
137
138 fResourceProvider->abandon();
139
140 // abandon first so destructors don't try to free the resources in the API.
141 fResourceCache->abandonAll();
142
143 fGpu->disconnect(GrGpu::DisconnectType::kAbandon);
144
145 // Must be after GrResourceCache::abandonAll().
146 fMappedBufferManager.reset();
147
148 if (fSmallPathAtlasMgr) {
149 fSmallPathAtlasMgr->reset();
150 }
151 fAtlasManager->freeAll();
152 }
153
abandoned()154 bool GrDirectContext::abandoned() {
155 if (INHERITED::abandoned()) {
156 return true;
157 }
158
159 if (fGpu && fGpu->isDeviceLost()) {
160 this->abandonContext();
161 return true;
162 }
163 return false;
164 }
165
oomed()166 bool GrDirectContext::oomed() { return fGpu ? fGpu->checkAndResetOOMed() : false; }
167
releaseResourcesAndAbandonContext()168 void GrDirectContext::releaseResourcesAndAbandonContext() {
169 if (INHERITED::abandoned()) {
170 return;
171 }
172
173 INHERITED::abandonContext();
174
175 // We need to make sure all work is finished on the gpu before we start releasing resources.
176 this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/true);
177
178 fResourceProvider->abandon();
179
180 // Release all resources in the backend 3D API.
181 fResourceCache->releaseAll();
182
183 // Must be after GrResourceCache::releaseAll().
184 fMappedBufferManager.reset();
185
186 fGpu->disconnect(GrGpu::DisconnectType::kCleanup);
187 if (fSmallPathAtlasMgr) {
188 fSmallPathAtlasMgr->reset();
189 }
190 fAtlasManager->freeAll();
191 }
192
freeGpuResources()193 void GrDirectContext::freeGpuResources() {
194 ASSERT_SINGLE_OWNER
195
196 if (this->abandoned()) {
197 return;
198 }
199
200 this->flushAndSubmit();
201 if (fSmallPathAtlasMgr) {
202 fSmallPathAtlasMgr->reset();
203 }
204 fAtlasManager->freeAll();
205
206 // TODO: the glyph cache doesn't hold any GpuResources so this call should not be needed here.
207 // Some slack in the GrTextBlob's implementation requires it though. That could be fixed.
208 fStrikeCache->freeAll();
209
210 this->drawingManager()->freeGpuResources();
211
212 fResourceCache->purgeUnlockedResources();
213 }
214
init()215 bool GrDirectContext::init() {
216 ASSERT_SINGLE_OWNER
217 if (!fGpu) {
218 return false;
219 }
220
221 fThreadSafeProxy->priv().init(fGpu->refCaps(), fGpu->refPipelineBuilder());
222 if (!INHERITED::init()) {
223 return false;
224 }
225
226 SkASSERT(this->getTextBlobCache());
227 SkASSERT(this->threadSafeCache());
228
229 fStrikeCache = std::make_unique<GrStrikeCache>();
230 fResourceCache = std::make_unique<GrResourceCache>(this->singleOwner(),
231 this->directContextID(),
232 this->contextID());
233 fResourceCache->setProxyProvider(this->proxyProvider());
234 fResourceCache->setThreadSafeCache(this->threadSafeCache());
235 #if GR_TEST_UTILS
236 if (this->options().fResourceCacheLimitOverride != -1) {
237 this->setResourceCacheLimit(this->options().fResourceCacheLimitOverride);
238 }
239 #endif
240 fResourceProvider = std::make_unique<GrResourceProvider>(fGpu.get(), fResourceCache.get(),
241 this->singleOwner());
242 fMappedBufferManager = std::make_unique<GrClientMappedBufferManager>(this->directContextID());
243
244 fDidTestPMConversions = false;
245
246 // DDL TODO: we need to think through how the task group & persistent cache
247 // get passed on to/shared between all the DDLRecorders created with this context.
248 if (this->options().fExecutor) {
249 fTaskGroup = std::make_unique<SkTaskGroup>(*this->options().fExecutor);
250 }
251
252 fPersistentCache = this->options().fPersistentCache;
253
254 GrDrawOpAtlas::AllowMultitexturing allowMultitexturing;
255 if (GrContextOptions::Enable::kNo == this->options().fAllowMultipleGlyphCacheTextures ||
256 // multitexturing supported only if range can represent the index + texcoords fully
257 !(this->caps()->shaderCaps()->floatIs32Bits() ||
258 this->caps()->shaderCaps()->integerSupport())) {
259 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kNo;
260 } else {
261 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kYes;
262 }
263
264 GrProxyProvider* proxyProvider = this->priv().proxyProvider();
265
266 fAtlasManager = std::make_unique<GrAtlasManager>(proxyProvider,
267 this->options().fGlyphCacheTextureMaximumBytes,
268 allowMultitexturing);
269 this->priv().addOnFlushCallbackObject(fAtlasManager.get());
270
271 return true;
272 }
273
getResourceCacheUsage(int * resourceCount,size_t * resourceBytes) const274 void GrDirectContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
275 ASSERT_SINGLE_OWNER
276
277 if (resourceCount) {
278 *resourceCount = fResourceCache->getBudgetedResourceCount();
279 }
280 if (resourceBytes) {
281 *resourceBytes = fResourceCache->getBudgetedResourceBytes();
282 }
283 }
284
getResourceCachePurgeableBytes() const285 size_t GrDirectContext::getResourceCachePurgeableBytes() const {
286 ASSERT_SINGLE_OWNER
287 return fResourceCache->getPurgeableBytes();
288 }
289
getResourceCacheLimits(int * maxResources,size_t * maxResourceBytes) const290 void GrDirectContext::getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const {
291 ASSERT_SINGLE_OWNER
292 if (maxResources) {
293 *maxResources = -1;
294 }
295 if (maxResourceBytes) {
296 *maxResourceBytes = this->getResourceCacheLimit();
297 }
298 }
299
getResourceCacheLimit() const300 size_t GrDirectContext::getResourceCacheLimit() const {
301 ASSERT_SINGLE_OWNER
302 return fResourceCache->getMaxResourceBytes();
303 }
304
setResourceCacheLimits(int unused,size_t maxResourceBytes)305 void GrDirectContext::setResourceCacheLimits(int unused, size_t maxResourceBytes) {
306 ASSERT_SINGLE_OWNER
307 this->setResourceCacheLimit(maxResourceBytes);
308 }
309
setResourceCacheLimit(size_t maxResourceBytes)310 void GrDirectContext::setResourceCacheLimit(size_t maxResourceBytes) {
311 ASSERT_SINGLE_OWNER
312 fResourceCache->setLimit(maxResourceBytes);
313 }
314
purgeUnlockedResources(bool scratchResourcesOnly)315 void GrDirectContext::purgeUnlockedResources(bool scratchResourcesOnly) {
316 ASSERT_SINGLE_OWNER
317
318 if (this->abandoned()) {
319 return;
320 }
321
322 fResourceCache->purgeUnlockedResources(scratchResourcesOnly);
323 fResourceCache->purgeAsNeeded();
324
325 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
326 // place to purge stale blobs
327 this->getTextBlobCache()->purgeStaleBlobs();
328
329 fGpu->releaseUnlockedBackendObjects();
330 }
331
purgeUnlockAndSafeCacheGpuResources()332 void GrDirectContext::purgeUnlockAndSafeCacheGpuResources() {
333 ASSERT_SINGLE_OWNER
334
335 if (this->abandoned()) {
336 return;
337 }
338
339 fResourceCache->purgeUnlockAndSafeCacheGpuResources();
340 fResourceCache->purgeAsNeeded();
341
342 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
343 // place to purge stale blobs
344 this->getTextBlobCache()->purgeStaleBlobs();
345
346 fGpu->releaseUnlockedBackendObjects();
347 }
348
purgeUnlockedResourcesByTag(bool scratchResourceseOnly,const GrGpuResourceTag & tag)349 void GrDirectContext::purgeUnlockedResourcesByTag(bool scratchResourceseOnly, const GrGpuResourceTag& tag) {
350 ASSERT_SINGLE_OWNER
351 fResourceCache->purgeUnlockedResourcesByTag(scratchResourceseOnly, tag);
352 fResourceCache->purgeAsNeeded();
353
354 // The textBlod Cache doesn't actually hold any GPU resource but this is a convenient
355 // place to purge stale blobs
356 this->getTextBlobCache()->purgeStaleBlobs();
357 }
358
performDeferredCleanup(std::chrono::milliseconds msNotUsed,bool scratchResourcesOnly)359 void GrDirectContext::performDeferredCleanup(std::chrono::milliseconds msNotUsed,
360 bool scratchResourcesOnly) {
361 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
362
363 ASSERT_SINGLE_OWNER
364
365 if (this->abandoned()) {
366 return;
367 }
368
369 this->checkAsyncWorkCompletion();
370 fMappedBufferManager->process();
371 auto purgeTime = GrStdSteadyClock::now() - msNotUsed;
372
373 fResourceCache->purgeAsNeeded();
374 fResourceCache->purgeResourcesNotUsedSince(purgeTime, scratchResourcesOnly);
375
376 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
377 // place to purge stale blobs
378 this->getTextBlobCache()->purgeStaleBlobs();
379 }
380
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)381 void GrDirectContext::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
382 ASSERT_SINGLE_OWNER
383
384 if (this->abandoned()) {
385 return;
386 }
387
388 fResourceCache->purgeUnlockedResources(bytesToPurge, preferScratchResources);
389 }
390
391 ////////////////////////////////////////////////////////////////////////////////
wait(int numSemaphores,const GrBackendSemaphore waitSemaphores[],bool deleteSemaphoresAfterWait)392 bool GrDirectContext::wait(int numSemaphores, const GrBackendSemaphore waitSemaphores[],
393 bool deleteSemaphoresAfterWait) {
394 if (!fGpu || !fGpu->caps()->semaphoreSupport()) {
395 return false;
396 }
397 GrWrapOwnership ownership =
398 deleteSemaphoresAfterWait ? kAdopt_GrWrapOwnership : kBorrow_GrWrapOwnership;
399 for (int i = 0; i < numSemaphores; ++i) {
400 std::unique_ptr<GrSemaphore> sema = fResourceProvider->wrapBackendSemaphore(
401 waitSemaphores[i], GrSemaphoreWrapType::kWillWait, ownership);
402 // If we failed to wrap the semaphore it means the client didn't give us a valid semaphore
403 // to begin with. Therefore, it is fine to not wait on it.
404 if (sema) {
405 fGpu->waitSemaphore(sema.get());
406 }
407 }
408 return true;
409 }
410
onGetSmallPathAtlasMgr()411 skgpu::v1::SmallPathAtlasMgr* GrDirectContext::onGetSmallPathAtlasMgr() {
412 #if SK_GPU_V1
413 if (!fSmallPathAtlasMgr) {
414 fSmallPathAtlasMgr = std::make_unique<skgpu::v1::SmallPathAtlasMgr>();
415
416 this->priv().addOnFlushCallbackObject(fSmallPathAtlasMgr.get());
417 }
418
419 if (!fSmallPathAtlasMgr->initAtlas(this->proxyProvider(), this->caps())) {
420 return nullptr;
421 }
422 #endif
423
424 return fSmallPathAtlasMgr.get();
425 }
426
427 ////////////////////////////////////////////////////////////////////////////////
428
flush(const GrFlushInfo & info)429 GrSemaphoresSubmitted GrDirectContext::flush(const GrFlushInfo& info) {
430 ASSERT_SINGLE_OWNER
431 if (this->abandoned()) {
432 if (info.fFinishedProc) {
433 info.fFinishedProc(info.fFinishedContext);
434 }
435 if (info.fSubmittedProc) {
436 info.fSubmittedProc(info.fSubmittedContext, false);
437 }
438 return GrSemaphoresSubmitted::kNo;
439 }
440
441 return this->drawingManager()->flushSurfaces({}, SkSurface::BackendSurfaceAccess::kNoAccess,
442 info, nullptr);
443 }
444
submit(bool syncCpu)445 bool GrDirectContext::submit(bool syncCpu) {
446 ASSERT_SINGLE_OWNER
447 if (this->abandoned()) {
448 return false;
449 }
450
451 if (!fGpu) {
452 return false;
453 }
454
455 return fGpu->submitToGpu(syncCpu);
456 }
457
458 ////////////////////////////////////////////////////////////////////////////////
459
checkAsyncWorkCompletion()460 void GrDirectContext::checkAsyncWorkCompletion() {
461 if (fGpu) {
462 fGpu->checkFinishProcs();
463 }
464 }
465
syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned)466 void GrDirectContext::syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned) {
467 if (fGpu && (!this->abandoned() || shouldExecuteWhileAbandoned)) {
468 fGpu->finishOutstandingGpuWork();
469 this->checkAsyncWorkCompletion();
470 }
471 }
472
473 ////////////////////////////////////////////////////////////////////////////////
474
storeVkPipelineCacheData()475 void GrDirectContext::storeVkPipelineCacheData() {
476 if (fGpu) {
477 fGpu->storeVkPipelineCacheData();
478 }
479 }
480
481 ////////////////////////////////////////////////////////////////////////////////
482
supportsDistanceFieldText() const483 bool GrDirectContext::supportsDistanceFieldText() const {
484 return this->caps()->shaderCaps()->supportsDistanceFieldText();
485 }
486
487 //////////////////////////////////////////////////////////////////////////////
488
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const489 void GrDirectContext::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
490 ASSERT_SINGLE_OWNER
491 fResourceCache->dumpMemoryStatistics(traceMemoryDump);
492 traceMemoryDump->dumpNumericValue("skia/gr_text_blob_cache", "size", "bytes",
493 this->getTextBlobCache()->usedBytes());
494 }
495
dumpMemoryStatisticsByTag(SkTraceMemoryDump * traceMemoryDump,const GrGpuResourceTag & tag) const496 void GrDirectContext::dumpMemoryStatisticsByTag(SkTraceMemoryDump* traceMemoryDump, const GrGpuResourceTag& tag) const {
497 ASSERT_SINGLE_OWNER
498 fResourceCache->dumpMemoryStatistics(traceMemoryDump, tag);
499 traceMemoryDump->dumpNumericValue("skia/gr_text_blob_cache", "size", "bytes",
500 this->getTextBlobCache()->usedBytes());
501 }
502
setCurrentGrResourceTag(const GrGpuResourceTag & tag)503 void GrDirectContext::setCurrentGrResourceTag(const GrGpuResourceTag& tag) {
504 if (fResourceCache) {
505 return fResourceCache->setCurrentGrResourceTag(tag);
506 }
507 }
508
popGrResourceTag()509 void GrDirectContext::popGrResourceTag()
510 {
511 if (fResourceCache) {
512 return fResourceCache->popGrResourceTag();
513 }
514 }
515
getCurrentGrResourceTag() const516 GrGpuResourceTag GrDirectContext::getCurrentGrResourceTag() const {
517 if (fResourceCache) {
518 return fResourceCache->getCurrentGrResourceTag();
519 }
520 return {};
521 }
releaseByTag(const GrGpuResourceTag & tag)522 void GrDirectContext::releaseByTag(const GrGpuResourceTag& tag) {
523 if (fResourceCache) {
524 fResourceCache->releaseByTag(tag);
525 }
526 }
getAllGrGpuResourceTags() const527 std::set<GrGpuResourceTag> GrDirectContext::getAllGrGpuResourceTags() const {
528 if (fResourceCache) {
529 return fResourceCache->getAllGrGpuResourceTags();
530 }
531 return {};
532 }
533
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected)534 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
535 const GrBackendFormat& backendFormat,
536 GrMipmapped mipMapped,
537 GrRenderable renderable,
538 GrProtected isProtected) {
539 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
540 if (this->abandoned()) {
541 return GrBackendTexture();
542 }
543
544 return fGpu->createBackendTexture({width, height}, backendFormat, renderable,
545 mipMapped, isProtected);
546 }
547
createBackendTexture(int width,int height,SkColorType skColorType,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected)548 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
549 SkColorType skColorType,
550 GrMipmapped mipMapped,
551 GrRenderable renderable,
552 GrProtected isProtected) {
553 if (this->abandoned()) {
554 return GrBackendTexture();
555 }
556
557 const GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
558
559 return this->createBackendTexture(width, height, format, mipMapped, renderable, isProtected);
560 }
561
create_and_clear_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,sk_sp<GrRefCntedCallback> finishedCallback,std::array<float,4> color)562 static GrBackendTexture create_and_clear_backend_texture(GrDirectContext* dContext,
563 SkISize dimensions,
564 const GrBackendFormat& backendFormat,
565 GrMipmapped mipMapped,
566 GrRenderable renderable,
567 GrProtected isProtected,
568 sk_sp<GrRefCntedCallback> finishedCallback,
569 std::array<float, 4> color) {
570 GrGpu* gpu = dContext->priv().getGpu();
571 GrBackendTexture beTex = gpu->createBackendTexture(dimensions, backendFormat, renderable,
572 mipMapped, isProtected);
573 if (!beTex.isValid()) {
574 return {};
575 }
576
577 if (!dContext->priv().getGpu()->clearBackendTexture(beTex,
578 std::move(finishedCallback),
579 color)) {
580 dContext->deleteBackendTexture(beTex);
581 return {};
582 }
583 return beTex;
584 }
585
update_texture_with_pixmaps(GrDirectContext * context,const SkPixmap src[],int numLevels,const GrBackendTexture & backendTexture,GrSurfaceOrigin textureOrigin,sk_sp<GrRefCntedCallback> finishedCallback)586 static bool update_texture_with_pixmaps(GrDirectContext* context,
587 const SkPixmap src[],
588 int numLevels,
589 const GrBackendTexture& backendTexture,
590 GrSurfaceOrigin textureOrigin,
591 sk_sp<GrRefCntedCallback> finishedCallback) {
592 GrColorType ct = SkColorTypeToGrColorType(src[0].colorType());
593 const GrBackendFormat& format = backendTexture.getBackendFormat();
594
595 if (!context->priv().caps()->areColorTypeAndFormatCompatible(ct, format)) {
596 return false;
597 }
598
599 auto proxy = context->priv().proxyProvider()->wrapBackendTexture(backendTexture,
600 kBorrow_GrWrapOwnership,
601 GrWrapCacheable::kNo,
602 kRW_GrIOType,
603 std::move(finishedCallback));
604 if (!proxy) {
605 return false;
606 }
607
608 GrSwizzle swizzle = context->priv().caps()->getReadSwizzle(format, ct);
609 GrSurfaceProxyView view(std::move(proxy), textureOrigin, swizzle);
610 skgpu::SurfaceContext surfaceContext(context, std::move(view), src[0].info().colorInfo());
611 SkAutoSTArray<15, GrCPixmap> tmpSrc(numLevels);
612 for (int i = 0; i < numLevels; ++i) {
613 tmpSrc[i] = src[i];
614 }
615 if (!surfaceContext.writePixels(context, tmpSrc.get(), numLevels)) {
616 return false;
617 }
618
619 GrSurfaceProxy* p = surfaceContext.asSurfaceProxy();
620 GrFlushInfo info;
621 context->priv().drawingManager()->flushSurfaces({&p, 1},
622 SkSurface::BackendSurfaceAccess::kNoAccess,
623 info,
624 nullptr);
625 return true;
626 }
627
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)628 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
629 const GrBackendFormat& backendFormat,
630 const SkColor4f& color,
631 GrMipmapped mipMapped,
632 GrRenderable renderable,
633 GrProtected isProtected,
634 GrGpuFinishedProc finishedProc,
635 GrGpuFinishedContext finishedContext) {
636 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
637
638 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
639 if (this->abandoned()) {
640 return {};
641 }
642
643 return create_and_clear_backend_texture(this,
644 {width, height},
645 backendFormat,
646 mipMapped,
647 renderable,
648 isProtected,
649 std::move(finishedCallback),
650 color.array());
651 }
652
createBackendTexture(int width,int height,SkColorType skColorType,const SkColor4f & color,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)653 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
654 SkColorType skColorType,
655 const SkColor4f& color,
656 GrMipmapped mipMapped,
657 GrRenderable renderable,
658 GrProtected isProtected,
659 GrGpuFinishedProc finishedProc,
660 GrGpuFinishedContext finishedContext) {
661 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
662
663 if (this->abandoned()) {
664 return {};
665 }
666
667 GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
668 if (!format.isValid()) {
669 return {};
670 }
671
672 GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
673 SkColor4f swizzledColor = this->caps()->getWriteSwizzle(format, grColorType).applyTo(color);
674
675 return create_and_clear_backend_texture(this,
676 {width, height},
677 format,
678 mipMapped,
679 renderable,
680 isProtected,
681 std::move(finishedCallback),
682 swizzledColor.array());
683 }
684
createBackendTexture(const SkPixmap srcData[],int numProvidedLevels,GrSurfaceOrigin textureOrigin,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)685 GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap srcData[],
686 int numProvidedLevels,
687 GrSurfaceOrigin textureOrigin,
688 GrRenderable renderable,
689 GrProtected isProtected,
690 GrGpuFinishedProc finishedProc,
691 GrGpuFinishedContext finishedContext) {
692 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
693
694 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
695
696 if (this->abandoned()) {
697 return {};
698 }
699
700 if (!srcData || numProvidedLevels <= 0) {
701 return {};
702 }
703
704 SkColorType colorType = srcData[0].colorType();
705
706 GrMipmapped mipMapped = GrMipmapped::kNo;
707 if (numProvidedLevels > 1) {
708 mipMapped = GrMipmapped::kYes;
709 }
710
711 GrBackendFormat backendFormat = this->defaultBackendFormat(colorType, renderable);
712 GrBackendTexture beTex = this->createBackendTexture(srcData[0].width(),
713 srcData[0].height(),
714 backendFormat,
715 mipMapped,
716 renderable,
717 isProtected);
718 if (!beTex.isValid()) {
719 return {};
720 }
721 if (!update_texture_with_pixmaps(this,
722 srcData,
723 numProvidedLevels,
724 beTex,
725 textureOrigin,
726 std::move(finishedCallback))) {
727 this->deleteBackendTexture(beTex);
728 return {};
729 }
730 return beTex;
731 }
732
updateBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)733 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
734 const SkColor4f& color,
735 GrGpuFinishedProc finishedProc,
736 GrGpuFinishedContext finishedContext) {
737 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
738
739 if (this->abandoned()) {
740 return false;
741 }
742
743 return fGpu->clearBackendTexture(backendTexture, std::move(finishedCallback), color.array());
744 }
745
updateBackendTexture(const GrBackendTexture & backendTexture,SkColorType skColorType,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)746 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
747 SkColorType skColorType,
748 const SkColor4f& color,
749 GrGpuFinishedProc finishedProc,
750 GrGpuFinishedContext finishedContext) {
751 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
752
753 if (this->abandoned()) {
754 return false;
755 }
756
757 GrBackendFormat format = backendTexture.getBackendFormat();
758 GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
759
760 if (!this->caps()->areColorTypeAndFormatCompatible(grColorType, format)) {
761 return false;
762 }
763
764 GrSwizzle swizzle = this->caps()->getWriteSwizzle(format, grColorType);
765 SkColor4f swizzledColor = swizzle.applyTo(color);
766
767 return fGpu->clearBackendTexture(backendTexture,
768 std::move(finishedCallback),
769 swizzledColor.array());
770 }
771
updateBackendTexture(const GrBackendTexture & backendTexture,const SkPixmap srcData[],int numLevels,GrSurfaceOrigin textureOrigin,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)772 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
773 const SkPixmap srcData[],
774 int numLevels,
775 GrSurfaceOrigin textureOrigin,
776 GrGpuFinishedProc finishedProc,
777 GrGpuFinishedContext finishedContext) {
778 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
779
780 if (this->abandoned()) {
781 return false;
782 }
783
784 if (!srcData || numLevels <= 0) {
785 return false;
786 }
787
788 // If the texture has MIP levels then we require that the full set is overwritten.
789 int numExpectedLevels = 1;
790 if (backendTexture.hasMipmaps()) {
791 numExpectedLevels = SkMipmap::ComputeLevelCount(backendTexture.width(),
792 backendTexture.height()) + 1;
793 }
794 if (numLevels != numExpectedLevels) {
795 return false;
796 }
797 return update_texture_with_pixmaps(this,
798 srcData,
799 numLevels,
800 backendTexture,
801 textureOrigin,
802 std::move(finishedCallback));
803 }
804
805 //////////////////////////////////////////////////////////////////////////////
806
create_and_update_compressed_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrProtected isProtected,sk_sp<GrRefCntedCallback> finishedCallback,const void * data,size_t size)807 static GrBackendTexture create_and_update_compressed_backend_texture(
808 GrDirectContext* dContext,
809 SkISize dimensions,
810 const GrBackendFormat& backendFormat,
811 GrMipmapped mipMapped,
812 GrProtected isProtected,
813 sk_sp<GrRefCntedCallback> finishedCallback,
814 const void* data,
815 size_t size) {
816 GrGpu* gpu = dContext->priv().getGpu();
817
818 GrBackendTexture beTex = gpu->createCompressedBackendTexture(dimensions, backendFormat,
819 mipMapped, isProtected);
820 if (!beTex.isValid()) {
821 return {};
822 }
823
824 if (!dContext->priv().getGpu()->updateCompressedBackendTexture(
825 beTex, std::move(finishedCallback), data, size)) {
826 dContext->deleteBackendTexture(beTex);
827 return {};
828 }
829 return beTex;
830 }
831
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,GrMipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)832 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
833 int width, int height,
834 const GrBackendFormat& backendFormat,
835 const SkColor4f& color,
836 GrMipmapped mipmapped,
837 GrProtected isProtected,
838 GrGpuFinishedProc finishedProc,
839 GrGpuFinishedContext finishedContext) {
840 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
841 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
842
843 if (this->abandoned()) {
844 return {};
845 }
846
847 SkImage::CompressionType compression = GrBackendFormatToCompressionType(backendFormat);
848 if (compression == SkImage::CompressionType::kNone) {
849 return {};
850 }
851
852 size_t size = SkCompressedDataSize(compression,
853 {width, height},
854 nullptr,
855 mipmapped == GrMipmapped::kYes);
856 auto storage = std::make_unique<char[]>(size);
857 GrFillInCompressedData(compression, {width, height}, mipmapped, storage.get(), color);
858 return create_and_update_compressed_backend_texture(this,
859 {width, height},
860 backendFormat,
861 mipmapped,
862 isProtected,
863 std::move(finishedCallback),
864 storage.get(),
865 size);
866 }
867
createCompressedBackendTexture(int width,int height,SkImage::CompressionType compression,const SkColor4f & color,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)868 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
869 int width, int height,
870 SkImage::CompressionType compression,
871 const SkColor4f& color,
872 GrMipmapped mipMapped,
873 GrProtected isProtected,
874 GrGpuFinishedProc finishedProc,
875 GrGpuFinishedContext finishedContext) {
876 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
877 GrBackendFormat format = this->compressedBackendFormat(compression);
878 return this->createCompressedBackendTexture(width, height, format, color,
879 mipMapped, isProtected, finishedProc,
880 finishedContext);
881 }
882
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const void * compressedData,size_t dataSize,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)883 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
884 int width, int height,
885 const GrBackendFormat& backendFormat,
886 const void* compressedData,
887 size_t dataSize,
888 GrMipmapped mipMapped,
889 GrProtected isProtected,
890 GrGpuFinishedProc finishedProc,
891 GrGpuFinishedContext finishedContext) {
892 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
893 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
894
895 if (this->abandoned()) {
896 return {};
897 }
898
899 return create_and_update_compressed_backend_texture(this,
900 {width, height},
901 backendFormat,
902 mipMapped,
903 isProtected,
904 std::move(finishedCallback),
905 compressedData,
906 dataSize);
907 }
908
createCompressedBackendTexture(int width,int height,SkImage::CompressionType compression,const void * data,size_t dataSize,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)909 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
910 int width, int height,
911 SkImage::CompressionType compression,
912 const void* data, size_t dataSize,
913 GrMipmapped mipMapped,
914 GrProtected isProtected,
915 GrGpuFinishedProc finishedProc,
916 GrGpuFinishedContext finishedContext) {
917 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
918 GrBackendFormat format = this->compressedBackendFormat(compression);
919 return this->createCompressedBackendTexture(width, height, format, data, dataSize, mipMapped,
920 isProtected, finishedProc, finishedContext);
921 }
922
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)923 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
924 const SkColor4f& color,
925 GrGpuFinishedProc finishedProc,
926 GrGpuFinishedContext finishedContext) {
927 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
928
929 if (this->abandoned()) {
930 return false;
931 }
932
933 SkImage::CompressionType compression =
934 GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
935 if (compression == SkImage::CompressionType::kNone) {
936 return {};
937 }
938 size_t size = SkCompressedDataSize(compression,
939 backendTexture.dimensions(),
940 nullptr,
941 backendTexture.hasMipmaps());
942 SkAutoMalloc storage(size);
943 GrFillInCompressedData(compression,
944 backendTexture.dimensions(),
945 backendTexture.mipmapped(),
946 static_cast<char*>(storage.get()),
947 color);
948 return fGpu->updateCompressedBackendTexture(backendTexture,
949 std::move(finishedCallback),
950 storage.get(),
951 size);
952 }
953
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const void * compressedData,size_t dataSize,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)954 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
955 const void* compressedData,
956 size_t dataSize,
957 GrGpuFinishedProc finishedProc,
958 GrGpuFinishedContext finishedContext) {
959 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
960
961 if (this->abandoned()) {
962 return false;
963 }
964
965 if (!compressedData) {
966 return false;
967 }
968
969 return fGpu->updateCompressedBackendTexture(backendTexture,
970 std::move(finishedCallback),
971 compressedData,
972 dataSize);
973 }
974
975 //////////////////////////////////////////////////////////////////////////////
976
setBackendTextureState(const GrBackendTexture & backendTexture,const GrBackendSurfaceMutableState & state,GrBackendSurfaceMutableState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)977 bool GrDirectContext::setBackendTextureState(const GrBackendTexture& backendTexture,
978 const GrBackendSurfaceMutableState& state,
979 GrBackendSurfaceMutableState* previousState,
980 GrGpuFinishedProc finishedProc,
981 GrGpuFinishedContext finishedContext) {
982 auto callback = GrRefCntedCallback::Make(finishedProc, finishedContext);
983
984 if (this->abandoned()) {
985 return false;
986 }
987
988 return fGpu->setBackendTextureState(backendTexture, state, previousState, std::move(callback));
989 }
990
991
setBackendRenderTargetState(const GrBackendRenderTarget & backendRenderTarget,const GrBackendSurfaceMutableState & state,GrBackendSurfaceMutableState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)992 bool GrDirectContext::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
993 const GrBackendSurfaceMutableState& state,
994 GrBackendSurfaceMutableState* previousState,
995 GrGpuFinishedProc finishedProc,
996 GrGpuFinishedContext finishedContext) {
997 auto callback = GrRefCntedCallback::Make(finishedProc, finishedContext);
998
999 if (this->abandoned()) {
1000 return false;
1001 }
1002
1003 return fGpu->setBackendRenderTargetState(backendRenderTarget, state, previousState,
1004 std::move(callback));
1005 }
1006
deleteBackendTexture(GrBackendTexture backendTex)1007 void GrDirectContext::deleteBackendTexture(GrBackendTexture backendTex) {
1008 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1009 // For the Vulkan backend we still must destroy the backend texture when the context is
1010 // abandoned.
1011 if ((this->abandoned() && this->backend() != GrBackendApi::kVulkan) || !backendTex.isValid()) {
1012 return;
1013 }
1014
1015 fGpu->deleteBackendTexture(backendTex);
1016 }
1017
1018 //////////////////////////////////////////////////////////////////////////////
1019
precompileShader(const SkData & key,const SkData & data)1020 bool GrDirectContext::precompileShader(const SkData& key, const SkData& data) {
1021 return fGpu->precompileShader(key, data);
1022 }
1023
1024 #ifdef SK_ENABLE_DUMP_GPU
1025 #include "include/core/SkString.h"
1026 #include "src/utils/SkJSONWriter.h"
dump() const1027 SkString GrDirectContext::dump() const {
1028 SkDynamicMemoryWStream stream;
1029 SkJSONWriter writer(&stream, SkJSONWriter::Mode::kPretty);
1030 writer.beginObject();
1031
1032 writer.appendString("backend", GrBackendApiToStr(this->backend()));
1033
1034 writer.appendName("caps");
1035 this->caps()->dumpJSON(&writer);
1036
1037 writer.appendName("gpu");
1038 this->fGpu->dumpJSON(&writer);
1039
1040 writer.appendName("context");
1041 this->dumpJSON(&writer);
1042
1043 // Flush JSON to the memory stream
1044 writer.endObject();
1045 writer.flush();
1046
1047 // Null terminate the JSON data in the memory stream
1048 stream.write8(0);
1049
1050 // Allocate a string big enough to hold all the data, then copy out of the stream
1051 SkString result(stream.bytesWritten());
1052 stream.copyToAndReset(result.writable_str());
1053 return result;
1054 }
1055 #endif
1056
1057 #ifdef SK_GL
1058
1059 /*************************************************************************************************/
MakeGL(sk_sp<const GrGLInterface> glInterface)1060 sk_sp<GrDirectContext> GrDirectContext::MakeGL(sk_sp<const GrGLInterface> glInterface) {
1061 GrContextOptions defaultOptions;
1062 return MakeGL(std::move(glInterface), defaultOptions);
1063 }
1064
MakeGL(const GrContextOptions & options)1065 sk_sp<GrDirectContext> GrDirectContext::MakeGL(const GrContextOptions& options) {
1066 return MakeGL(nullptr, options);
1067 }
1068
MakeGL()1069 sk_sp<GrDirectContext> GrDirectContext::MakeGL() {
1070 GrContextOptions defaultOptions;
1071 return MakeGL(nullptr, defaultOptions);
1072 }
1073
1074 #if GR_TEST_UTILS
make_get_error_with_random_oom(GrGLFunction<GrGLGetErrorFn> original)1075 GrGLFunction<GrGLGetErrorFn> make_get_error_with_random_oom(GrGLFunction<GrGLGetErrorFn> original) {
1076 // A SkRandom and a GrGLFunction<GrGLGetErrorFn> are too big to be captured by a
1077 // GrGLFunction<GrGLGetError> (surprise, surprise). So we make a context object and
1078 // capture that by pointer. However, GrGLFunction doesn't support calling a destructor
1079 // on the thing it captures. So we leak the context.
1080 struct GetErrorContext {
1081 SkRandom fRandom;
1082 GrGLFunction<GrGLGetErrorFn> fGetError;
1083 };
1084
1085 auto errorContext = new GetErrorContext;
1086
1087 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
1088 __lsan_ignore_object(errorContext);
1089 #endif
1090
1091 errorContext->fGetError = original;
1092
1093 return GrGLFunction<GrGLGetErrorFn>([errorContext]() {
1094 GrGLenum error = errorContext->fGetError();
1095 if (error == GR_GL_NO_ERROR && (errorContext->fRandom.nextU() % 300) == 0) {
1096 error = GR_GL_OUT_OF_MEMORY;
1097 }
1098 return error;
1099 });
1100 }
1101 #endif
1102
MakeGL(sk_sp<const GrGLInterface> glInterface,const GrContextOptions & options)1103 sk_sp<GrDirectContext> GrDirectContext::MakeGL(sk_sp<const GrGLInterface> glInterface,
1104 const GrContextOptions& options) {
1105 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kOpenGL, options));
1106 #if GR_TEST_UTILS
1107 if (options.fRandomGLOOM) {
1108 auto copy = sk_make_sp<GrGLInterface>(*glInterface);
1109 copy->fFunctions.fGetError =
1110 make_get_error_with_random_oom(glInterface->fFunctions.fGetError);
1111 #if GR_GL_CHECK_ERROR
1112 // Suppress logging GL errors since we'll be synthetically generating them.
1113 copy->suppressErrorLogging();
1114 #endif
1115 glInterface = std::move(copy);
1116 }
1117 #endif
1118 direct->fGpu = GrGLGpu::Make(std::move(glInterface), options, direct.get());
1119 if (!direct->init()) {
1120 return nullptr;
1121 }
1122 return direct;
1123 }
1124 #endif
1125
1126 /*************************************************************************************************/
MakeMock(const GrMockOptions * mockOptions)1127 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions) {
1128 GrContextOptions defaultOptions;
1129 return MakeMock(mockOptions, defaultOptions);
1130 }
1131
MakeMock(const GrMockOptions * mockOptions,const GrContextOptions & options)1132 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions,
1133 const GrContextOptions& options) {
1134 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMock, options));
1135
1136 direct->fGpu = GrMockGpu::Make(mockOptions, options, direct.get());
1137 if (!direct->init()) {
1138 return nullptr;
1139 }
1140
1141 return direct;
1142 }
1143
1144 #ifdef SK_VULKAN
1145 /*************************************************************************************************/
MakeVulkan(const GrVkBackendContext & backendContext)1146 sk_sp<GrDirectContext> GrDirectContext::MakeVulkan(const GrVkBackendContext& backendContext) {
1147 GrContextOptions defaultOptions;
1148 return MakeVulkan(backendContext, defaultOptions);
1149 }
1150
MakeVulkan(const GrVkBackendContext & backendContext,const GrContextOptions & options)1151 sk_sp<GrDirectContext> GrDirectContext::MakeVulkan(const GrVkBackendContext& backendContext,
1152 const GrContextOptions& options) {
1153 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kVulkan, options));
1154
1155 direct->fGpu = GrVkGpu::Make(backendContext, options, direct.get());
1156 if (!direct->init()) {
1157 return nullptr;
1158 }
1159
1160 return direct;
1161 }
1162 #endif
1163
1164 #ifdef SK_METAL
1165 /*************************************************************************************************/
MakeMetal(const GrMtlBackendContext & backendContext)1166 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(const GrMtlBackendContext& backendContext) {
1167 GrContextOptions defaultOptions;
1168 return MakeMetal(backendContext, defaultOptions);
1169 }
1170
MakeMetal(const GrMtlBackendContext & backendContext,const GrContextOptions & options)1171 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(const GrMtlBackendContext& backendContext,
1172 const GrContextOptions& options) {
1173 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMetal, options));
1174
1175 direct->fGpu = GrMtlTrampoline::MakeGpu(backendContext, options, direct.get());
1176 if (!direct->init()) {
1177 return nullptr;
1178 }
1179
1180 return direct;
1181 }
1182
1183 // deprecated
MakeMetal(void * device,void * queue)1184 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(void* device, void* queue) {
1185 GrContextOptions defaultOptions;
1186 return MakeMetal(device, queue, defaultOptions);
1187 }
1188
1189 // deprecated
1190 // remove include/gpu/mtl/GrMtlBackendContext.h, above, when removed
MakeMetal(void * device,void * queue,const GrContextOptions & options)1191 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(void* device, void* queue,
1192 const GrContextOptions& options) {
1193 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMetal, options));
1194 GrMtlBackendContext backendContext = {};
1195 backendContext.fDevice.reset(device);
1196 backendContext.fQueue.reset(queue);
1197
1198 return GrDirectContext::MakeMetal(backendContext, options);
1199 }
1200 #endif
1201
1202 #ifdef SK_DIRECT3D
1203 /*************************************************************************************************/
MakeDirect3D(const GrD3DBackendContext & backendContext)1204 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext) {
1205 GrContextOptions defaultOptions;
1206 return MakeDirect3D(backendContext, defaultOptions);
1207 }
1208
MakeDirect3D(const GrD3DBackendContext & backendContext,const GrContextOptions & options)1209 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext,
1210 const GrContextOptions& options) {
1211 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kDirect3D, options));
1212
1213 direct->fGpu = GrD3DGpu::Make(backendContext, options, direct.get());
1214 if (!direct->init()) {
1215 return nullptr;
1216 }
1217
1218 return direct;
1219 }
1220 #endif
1221
1222 #ifdef SK_DAWN
1223 /*************************************************************************************************/
MakeDawn(const wgpu::Device & device)1224 sk_sp<GrDirectContext> GrDirectContext::MakeDawn(const wgpu::Device& device) {
1225 GrContextOptions defaultOptions;
1226 return MakeDawn(device, defaultOptions);
1227 }
1228
MakeDawn(const wgpu::Device & device,const GrContextOptions & options)1229 sk_sp<GrDirectContext> GrDirectContext::MakeDawn(const wgpu::Device& device,
1230 const GrContextOptions& options) {
1231 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kDawn, options));
1232
1233 direct->fGpu = GrDawnGpu::Make(device, options, direct.get());
1234 if (!direct->init()) {
1235 return nullptr;
1236 }
1237
1238 return direct;
1239 }
1240
1241 #endif
1242