1 /*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8
9 #include "include/gpu/GrDirectContext.h"
10
11 #include "include/core/SkTraceMemoryDump.h"
12 #include "include/gpu/GrBackendSemaphore.h"
13 #include "include/gpu/GrContextThreadSafeProxy.h"
14 #include "src/core/SkAutoMalloc.h"
15 #include "src/core/SkTaskGroup.h"
16 #include "src/core/SkTraceEvent.h"
17 #include "src/gpu/GrBackendUtils.h"
18 #include "src/gpu/GrClientMappedBufferManager.h"
19 #include "src/gpu/GrContextThreadSafeProxyPriv.h"
20 #include "src/gpu/GrDirectContextPriv.h"
21 #include "src/gpu/GrDrawingManager.h"
22 #include "src/gpu/GrGpu.h"
23 #include "src/gpu/GrResourceProvider.h"
24 #include "src/gpu/GrSemaphore.h"
25 #include "src/gpu/GrShaderUtils.h"
26 #include "src/gpu/GrThreadSafePipelineBuilder.h"
27 #include "src/gpu/SurfaceContext.h"
28 #include "src/gpu/effects/GrSkSLFP.h"
29 #include "src/gpu/mock/GrMockGpu.h"
30 #include "src/gpu/text/GrAtlasManager.h"
31 #include "src/gpu/text/GrStrikeCache.h"
32 #include "src/image/SkImage_GpuBase.h"
33 #if SK_GPU_V1
34 #include "src/gpu/ops/SmallPathAtlasMgr.h"
35 #else
36 // A vestigial definition for v2 that will never be instantiated
37 namespace skgpu::v1 {
38 class SmallPathAtlasMgr {
39 public:
SmallPathAtlasMgr()40 SmallPathAtlasMgr() { SkASSERT(0); }
reset()41 void reset() { SkASSERT(0); }
42 };
43 }
44 #endif
45 #ifdef SK_GL
46 #include "src/gpu/gl/GrGLGpu.h"
47 #endif
48 #ifdef SK_METAL
49 #include "include/gpu/mtl/GrMtlBackendContext.h"
50 #include "src/gpu/mtl/GrMtlTrampoline.h"
51 #endif
52 #ifdef SK_VULKAN
53 #include "src/gpu/vk/GrVkGpu.h"
54 #endif
55 #ifdef SK_DIRECT3D
56 #include "src/gpu/d3d/GrD3DGpu.h"
57 #endif
58 #ifdef SK_DAWN
59 #include "src/gpu/dawn/GrDawnGpu.h"
60 #endif
61 #include <memory>
62
63 #if GR_TEST_UTILS
64 # include "include/utils/SkRandom.h"
65 # if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
66 # include <sanitizer/lsan_interface.h>
67 # endif
68 #endif
69
70 #define ASSERT_SINGLE_OWNER GR_ASSERT_SINGLE_OWNER(this->singleOwner())
71
Next()72 GrDirectContext::DirectContextID GrDirectContext::DirectContextID::Next() {
73 static std::atomic<uint32_t> nextID{1};
74 uint32_t id;
75 do {
76 id = nextID.fetch_add(1, std::memory_order_relaxed);
77 } while (id == SK_InvalidUniqueID);
78 return DirectContextID(id);
79 }
80
GrDirectContext(GrBackendApi backend,const GrContextOptions & options)81 GrDirectContext::GrDirectContext(GrBackendApi backend, const GrContextOptions& options)
82 : INHERITED(GrContextThreadSafeProxyPriv::Make(backend, options), false)
83 , fDirectContextID(DirectContextID::Next()) {
84 }
85
~GrDirectContext()86 GrDirectContext::~GrDirectContext() {
87 ASSERT_SINGLE_OWNER
88 // this if-test protects against the case where the context is being destroyed
89 // before having been fully created
90 if (fGpu) {
91 this->flushAndSubmit();
92 }
93
94 // We need to make sure all work is finished on the gpu before we start releasing resources.
95 this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/false);
96
97 this->destroyDrawingManager();
98
99 // Ideally we could just let the ptr drop, but resource cache queries this ptr in releaseAll.
100 if (fResourceCache) {
101 fResourceCache->releaseAll();
102 }
103 // This has to be after GrResourceCache::releaseAll so that other threads that are holding
104 // async pixel result don't try to destroy buffers off thread.
105 fMappedBufferManager.reset();
106 }
107
threadSafeProxy()108 sk_sp<GrContextThreadSafeProxy> GrDirectContext::threadSafeProxy() {
109 return INHERITED::threadSafeProxy();
110 }
111
resetGLTextureBindings()112 void GrDirectContext::resetGLTextureBindings() {
113 if (this->abandoned() || this->backend() != GrBackendApi::kOpenGL) {
114 return;
115 }
116 fGpu->resetTextureBindings();
117 }
118
resetContext(uint32_t state)119 void GrDirectContext::resetContext(uint32_t state) {
120 ASSERT_SINGLE_OWNER
121 fGpu->markContextDirty(state);
122 }
123
abandonContext()124 void GrDirectContext::abandonContext() {
125 if (INHERITED::abandoned()) {
126 return;
127 }
128
129 INHERITED::abandonContext();
130
131 // We need to make sure all work is finished on the gpu before we start releasing resources.
132 this->syncAllOutstandingGpuWork(this->caps()->mustSyncGpuDuringAbandon());
133
134 fStrikeCache->freeAll();
135
136 fMappedBufferManager->abandon();
137
138 fResourceProvider->abandon();
139
140 // abandon first so destructors don't try to free the resources in the API.
141 fResourceCache->abandonAll();
142
143 fGpu->disconnect(GrGpu::DisconnectType::kAbandon);
144
145 // Must be after GrResourceCache::abandonAll().
146 fMappedBufferManager.reset();
147
148 if (fSmallPathAtlasMgr) {
149 fSmallPathAtlasMgr->reset();
150 }
151 fAtlasManager->freeAll();
152 }
153
abandoned()154 bool GrDirectContext::abandoned() {
155 if (INHERITED::abandoned()) {
156 return true;
157 }
158
159 if (fGpu && fGpu->isDeviceLost()) {
160 this->abandonContext();
161 return true;
162 }
163 return false;
164 }
165
oomed()166 bool GrDirectContext::oomed() { return fGpu ? fGpu->checkAndResetOOMed() : false; }
167
releaseResourcesAndAbandonContext()168 void GrDirectContext::releaseResourcesAndAbandonContext() {
169 if (INHERITED::abandoned()) {
170 return;
171 }
172
173 INHERITED::abandonContext();
174
175 // We need to make sure all work is finished on the gpu before we start releasing resources.
176 this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/true);
177
178 fResourceProvider->abandon();
179
180 // Release all resources in the backend 3D API.
181 fResourceCache->releaseAll();
182
183 // Must be after GrResourceCache::releaseAll().
184 fMappedBufferManager.reset();
185
186 fGpu->disconnect(GrGpu::DisconnectType::kCleanup);
187 if (fSmallPathAtlasMgr) {
188 fSmallPathAtlasMgr->reset();
189 }
190 fAtlasManager->freeAll();
191 }
192
freeGpuResources()193 void GrDirectContext::freeGpuResources() {
194 ASSERT_SINGLE_OWNER
195
196 if (this->abandoned()) {
197 return;
198 }
199
200 this->flushAndSubmit();
201 if (fSmallPathAtlasMgr) {
202 fSmallPathAtlasMgr->reset();
203 }
204 fAtlasManager->freeAll();
205
206 // TODO: the glyph cache doesn't hold any GpuResources so this call should not be needed here.
207 // Some slack in the GrTextBlob's implementation requires it though. That could be fixed.
208 fStrikeCache->freeAll();
209
210 this->drawingManager()->freeGpuResources();
211
212 fResourceCache->purgeUnlockedResources();
213 }
214
init()215 bool GrDirectContext::init() {
216 ASSERT_SINGLE_OWNER
217 if (!fGpu) {
218 return false;
219 }
220
221 fThreadSafeProxy->priv().init(fGpu->refCaps(), fGpu->refPipelineBuilder());
222 if (!INHERITED::init()) {
223 return false;
224 }
225
226 SkASSERT(this->getTextBlobCache());
227 SkASSERT(this->threadSafeCache());
228
229 fStrikeCache = std::make_unique<GrStrikeCache>();
230 fResourceCache = std::make_unique<GrResourceCache>(this->singleOwner(),
231 this->directContextID(),
232 this->contextID());
233 fResourceCache->setProxyProvider(this->proxyProvider());
234 fResourceCache->setThreadSafeCache(this->threadSafeCache());
235 #if GR_TEST_UTILS
236 if (this->options().fResourceCacheLimitOverride != -1) {
237 this->setResourceCacheLimit(this->options().fResourceCacheLimitOverride);
238 }
239 #endif
240 fResourceProvider = std::make_unique<GrResourceProvider>(fGpu.get(), fResourceCache.get(),
241 this->singleOwner());
242 fMappedBufferManager = std::make_unique<GrClientMappedBufferManager>(this->directContextID());
243
244 fDidTestPMConversions = false;
245
246 // DDL TODO: we need to think through how the task group & persistent cache
247 // get passed on to/shared between all the DDLRecorders created with this context.
248 if (this->options().fExecutor) {
249 fTaskGroup = std::make_unique<SkTaskGroup>(*this->options().fExecutor);
250 }
251
252 fPersistentCache = this->options().fPersistentCache;
253
254 GrDrawOpAtlas::AllowMultitexturing allowMultitexturing;
255 if (GrContextOptions::Enable::kNo == this->options().fAllowMultipleGlyphCacheTextures ||
256 // multitexturing supported only if range can represent the index + texcoords fully
257 !(this->caps()->shaderCaps()->floatIs32Bits() ||
258 this->caps()->shaderCaps()->integerSupport())) {
259 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kNo;
260 } else {
261 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kYes;
262 }
263
264 GrProxyProvider* proxyProvider = this->priv().proxyProvider();
265
266 fAtlasManager = std::make_unique<GrAtlasManager>(proxyProvider,
267 this->options().fGlyphCacheTextureMaximumBytes,
268 allowMultitexturing);
269 this->priv().addOnFlushCallbackObject(fAtlasManager.get());
270
271 return true;
272 }
273
getResourceCacheUsage(int * resourceCount,size_t * resourceBytes) const274 void GrDirectContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
275 ASSERT_SINGLE_OWNER
276
277 if (resourceCount) {
278 *resourceCount = fResourceCache->getBudgetedResourceCount();
279 }
280 if (resourceBytes) {
281 *resourceBytes = fResourceCache->getBudgetedResourceBytes();
282 }
283 }
284
getResourceCachePurgeableBytes() const285 size_t GrDirectContext::getResourceCachePurgeableBytes() const {
286 ASSERT_SINGLE_OWNER
287 return fResourceCache->getPurgeableBytes();
288 }
289
getResourceCacheLimits(int * maxResources,size_t * maxResourceBytes) const290 void GrDirectContext::getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const {
291 ASSERT_SINGLE_OWNER
292 if (maxResources) {
293 *maxResources = -1;
294 }
295 if (maxResourceBytes) {
296 *maxResourceBytes = this->getResourceCacheLimit();
297 }
298 }
299
getResourceCacheLimit() const300 size_t GrDirectContext::getResourceCacheLimit() const {
301 ASSERT_SINGLE_OWNER
302 return fResourceCache->getMaxResourceBytes();
303 }
304
setResourceCacheLimits(int unused,size_t maxResourceBytes)305 void GrDirectContext::setResourceCacheLimits(int unused, size_t maxResourceBytes) {
306 ASSERT_SINGLE_OWNER
307 this->setResourceCacheLimit(maxResourceBytes);
308 }
309
setResourceCacheLimit(size_t maxResourceBytes)310 void GrDirectContext::setResourceCacheLimit(size_t maxResourceBytes) {
311 ASSERT_SINGLE_OWNER
312 fResourceCache->setLimit(maxResourceBytes);
313 }
314
purgeUnlockedResources(bool scratchResourcesOnly)315 void GrDirectContext::purgeUnlockedResources(bool scratchResourcesOnly) {
316 ASSERT_SINGLE_OWNER
317
318 if (this->abandoned()) {
319 return;
320 }
321
322 fResourceCache->purgeUnlockedResources(scratchResourcesOnly);
323 fResourceCache->purgeAsNeeded();
324
325 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
326 // place to purge stale blobs
327 this->getTextBlobCache()->purgeStaleBlobs();
328
329 fGpu->releaseUnlockedBackendObjects();
330 }
331
performDeferredCleanup(std::chrono::milliseconds msNotUsed,bool scratchResourcesOnly)332 void GrDirectContext::performDeferredCleanup(std::chrono::milliseconds msNotUsed,
333 bool scratchResourcesOnly) {
334 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
335
336 ASSERT_SINGLE_OWNER
337
338 if (this->abandoned()) {
339 return;
340 }
341
342 this->checkAsyncWorkCompletion();
343 fMappedBufferManager->process();
344 auto purgeTime = GrStdSteadyClock::now() - msNotUsed;
345
346 fResourceCache->purgeAsNeeded();
347 fResourceCache->purgeResourcesNotUsedSince(purgeTime, scratchResourcesOnly);
348
349 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
350 // place to purge stale blobs
351 this->getTextBlobCache()->purgeStaleBlobs();
352 }
353
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)354 void GrDirectContext::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
355 ASSERT_SINGLE_OWNER
356
357 if (this->abandoned()) {
358 return;
359 }
360
361 fResourceCache->purgeUnlockedResources(bytesToPurge, preferScratchResources);
362 }
363
364 ////////////////////////////////////////////////////////////////////////////////
wait(int numSemaphores,const GrBackendSemaphore waitSemaphores[],bool deleteSemaphoresAfterWait)365 bool GrDirectContext::wait(int numSemaphores, const GrBackendSemaphore waitSemaphores[],
366 bool deleteSemaphoresAfterWait) {
367 if (!fGpu || !fGpu->caps()->semaphoreSupport()) {
368 return false;
369 }
370 GrWrapOwnership ownership =
371 deleteSemaphoresAfterWait ? kAdopt_GrWrapOwnership : kBorrow_GrWrapOwnership;
372 for (int i = 0; i < numSemaphores; ++i) {
373 std::unique_ptr<GrSemaphore> sema = fResourceProvider->wrapBackendSemaphore(
374 waitSemaphores[i], GrSemaphoreWrapType::kWillWait, ownership);
375 // If we failed to wrap the semaphore it means the client didn't give us a valid semaphore
376 // to begin with. Therefore, it is fine to not wait on it.
377 if (sema) {
378 fGpu->waitSemaphore(sema.get());
379 }
380 }
381 return true;
382 }
383
onGetSmallPathAtlasMgr()384 skgpu::v1::SmallPathAtlasMgr* GrDirectContext::onGetSmallPathAtlasMgr() {
385 #if SK_GPU_V1
386 if (!fSmallPathAtlasMgr) {
387 fSmallPathAtlasMgr = std::make_unique<skgpu::v1::SmallPathAtlasMgr>();
388
389 this->priv().addOnFlushCallbackObject(fSmallPathAtlasMgr.get());
390 }
391
392 if (!fSmallPathAtlasMgr->initAtlas(this->proxyProvider(), this->caps())) {
393 return nullptr;
394 }
395 #endif
396
397 return fSmallPathAtlasMgr.get();
398 }
399
400 ////////////////////////////////////////////////////////////////////////////////
401
flush(const GrFlushInfo & info)402 GrSemaphoresSubmitted GrDirectContext::flush(const GrFlushInfo& info) {
403 ASSERT_SINGLE_OWNER
404 if (this->abandoned()) {
405 if (info.fFinishedProc) {
406 info.fFinishedProc(info.fFinishedContext);
407 }
408 if (info.fSubmittedProc) {
409 info.fSubmittedProc(info.fSubmittedContext, false);
410 }
411 return GrSemaphoresSubmitted::kNo;
412 }
413
414 return this->drawingManager()->flushSurfaces({}, SkSurface::BackendSurfaceAccess::kNoAccess,
415 info, nullptr);
416 }
417
submit(bool syncCpu)418 bool GrDirectContext::submit(bool syncCpu) {
419 ASSERT_SINGLE_OWNER
420 if (this->abandoned()) {
421 return false;
422 }
423
424 if (!fGpu) {
425 return false;
426 }
427
428 return fGpu->submitToGpu(syncCpu);
429 }
430
431 ////////////////////////////////////////////////////////////////////////////////
432
checkAsyncWorkCompletion()433 void GrDirectContext::checkAsyncWorkCompletion() {
434 if (fGpu) {
435 fGpu->checkFinishProcs();
436 }
437 }
438
syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned)439 void GrDirectContext::syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned) {
440 if (fGpu && (!this->abandoned() || shouldExecuteWhileAbandoned)) {
441 fGpu->finishOutstandingGpuWork();
442 this->checkAsyncWorkCompletion();
443 }
444 }
445
446 ////////////////////////////////////////////////////////////////////////////////
447
storeVkPipelineCacheData()448 void GrDirectContext::storeVkPipelineCacheData() {
449 if (fGpu) {
450 fGpu->storeVkPipelineCacheData();
451 }
452 }
453
454 ////////////////////////////////////////////////////////////////////////////////
455
supportsDistanceFieldText() const456 bool GrDirectContext::supportsDistanceFieldText() const {
457 return this->caps()->shaderCaps()->supportsDistanceFieldText();
458 }
459
460 //////////////////////////////////////////////////////////////////////////////
461
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const462 void GrDirectContext::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
463 ASSERT_SINGLE_OWNER
464 fResourceCache->dumpMemoryStatistics(traceMemoryDump);
465 traceMemoryDump->dumpNumericValue("skia/gr_text_blob_cache", "size", "bytes",
466 this->getTextBlobCache()->usedBytes());
467 }
468
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected)469 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
470 const GrBackendFormat& backendFormat,
471 GrMipmapped mipMapped,
472 GrRenderable renderable,
473 GrProtected isProtected) {
474 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
475 if (this->abandoned()) {
476 return GrBackendTexture();
477 }
478
479 return fGpu->createBackendTexture({width, height}, backendFormat, renderable,
480 mipMapped, isProtected);
481 }
482
createBackendTexture(int width,int height,SkColorType skColorType,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected)483 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
484 SkColorType skColorType,
485 GrMipmapped mipMapped,
486 GrRenderable renderable,
487 GrProtected isProtected) {
488 if (this->abandoned()) {
489 return GrBackendTexture();
490 }
491
492 const GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
493
494 return this->createBackendTexture(width, height, format, mipMapped, renderable, isProtected);
495 }
496
create_and_clear_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,sk_sp<GrRefCntedCallback> finishedCallback,std::array<float,4> color)497 static GrBackendTexture create_and_clear_backend_texture(GrDirectContext* dContext,
498 SkISize dimensions,
499 const GrBackendFormat& backendFormat,
500 GrMipmapped mipMapped,
501 GrRenderable renderable,
502 GrProtected isProtected,
503 sk_sp<GrRefCntedCallback> finishedCallback,
504 std::array<float, 4> color) {
505 GrGpu* gpu = dContext->priv().getGpu();
506 GrBackendTexture beTex = gpu->createBackendTexture(dimensions, backendFormat, renderable,
507 mipMapped, isProtected);
508 if (!beTex.isValid()) {
509 return {};
510 }
511
512 if (!dContext->priv().getGpu()->clearBackendTexture(beTex,
513 std::move(finishedCallback),
514 color)) {
515 dContext->deleteBackendTexture(beTex);
516 return {};
517 }
518 return beTex;
519 }
520
update_texture_with_pixmaps(GrDirectContext * context,const SkPixmap src[],int numLevels,const GrBackendTexture & backendTexture,GrSurfaceOrigin textureOrigin,sk_sp<GrRefCntedCallback> finishedCallback)521 static bool update_texture_with_pixmaps(GrDirectContext* context,
522 const SkPixmap src[],
523 int numLevels,
524 const GrBackendTexture& backendTexture,
525 GrSurfaceOrigin textureOrigin,
526 sk_sp<GrRefCntedCallback> finishedCallback) {
527 GrColorType ct = SkColorTypeToGrColorType(src[0].colorType());
528 const GrBackendFormat& format = backendTexture.getBackendFormat();
529
530 if (!context->priv().caps()->areColorTypeAndFormatCompatible(ct, format)) {
531 return false;
532 }
533
534 auto proxy = context->priv().proxyProvider()->wrapBackendTexture(backendTexture,
535 kBorrow_GrWrapOwnership,
536 GrWrapCacheable::kNo,
537 kRW_GrIOType,
538 std::move(finishedCallback));
539 if (!proxy) {
540 return false;
541 }
542
543 GrSwizzle swizzle = context->priv().caps()->getReadSwizzle(format, ct);
544 GrSurfaceProxyView view(std::move(proxy), textureOrigin, swizzle);
545 skgpu::SurfaceContext surfaceContext(context, std::move(view), src[0].info().colorInfo());
546 SkAutoSTArray<15, GrCPixmap> tmpSrc(numLevels);
547 for (int i = 0; i < numLevels; ++i) {
548 tmpSrc[i] = src[i];
549 }
550 if (!surfaceContext.writePixels(context, tmpSrc.get(), numLevels)) {
551 return false;
552 }
553
554 GrSurfaceProxy* p = surfaceContext.asSurfaceProxy();
555 GrFlushInfo info;
556 context->priv().drawingManager()->flushSurfaces({&p, 1},
557 SkSurface::BackendSurfaceAccess::kNoAccess,
558 info,
559 nullptr);
560 return true;
561 }
562
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)563 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
564 const GrBackendFormat& backendFormat,
565 const SkColor4f& color,
566 GrMipmapped mipMapped,
567 GrRenderable renderable,
568 GrProtected isProtected,
569 GrGpuFinishedProc finishedProc,
570 GrGpuFinishedContext finishedContext) {
571 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
572
573 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
574 if (this->abandoned()) {
575 return {};
576 }
577
578 return create_and_clear_backend_texture(this,
579 {width, height},
580 backendFormat,
581 mipMapped,
582 renderable,
583 isProtected,
584 std::move(finishedCallback),
585 color.array());
586 }
587
createBackendTexture(int width,int height,SkColorType skColorType,const SkColor4f & color,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)588 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
589 SkColorType skColorType,
590 const SkColor4f& color,
591 GrMipmapped mipMapped,
592 GrRenderable renderable,
593 GrProtected isProtected,
594 GrGpuFinishedProc finishedProc,
595 GrGpuFinishedContext finishedContext) {
596 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
597
598 if (this->abandoned()) {
599 return {};
600 }
601
602 GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
603 if (!format.isValid()) {
604 return {};
605 }
606
607 GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
608 SkColor4f swizzledColor = this->caps()->getWriteSwizzle(format, grColorType).applyTo(color);
609
610 return create_and_clear_backend_texture(this,
611 {width, height},
612 format,
613 mipMapped,
614 renderable,
615 isProtected,
616 std::move(finishedCallback),
617 swizzledColor.array());
618 }
619
createBackendTexture(const SkPixmap srcData[],int numProvidedLevels,GrSurfaceOrigin textureOrigin,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)620 GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap srcData[],
621 int numProvidedLevels,
622 GrSurfaceOrigin textureOrigin,
623 GrRenderable renderable,
624 GrProtected isProtected,
625 GrGpuFinishedProc finishedProc,
626 GrGpuFinishedContext finishedContext) {
627 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
628
629 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
630
631 if (this->abandoned()) {
632 return {};
633 }
634
635 if (!srcData || numProvidedLevels <= 0) {
636 return {};
637 }
638
639 SkColorType colorType = srcData[0].colorType();
640
641 GrMipmapped mipMapped = GrMipmapped::kNo;
642 if (numProvidedLevels > 1) {
643 mipMapped = GrMipmapped::kYes;
644 }
645
646 GrBackendFormat backendFormat = this->defaultBackendFormat(colorType, renderable);
647 GrBackendTexture beTex = this->createBackendTexture(srcData[0].width(),
648 srcData[0].height(),
649 backendFormat,
650 mipMapped,
651 renderable,
652 isProtected);
653 if (!beTex.isValid()) {
654 return {};
655 }
656 if (!update_texture_with_pixmaps(this,
657 srcData,
658 numProvidedLevels,
659 beTex,
660 textureOrigin,
661 std::move(finishedCallback))) {
662 this->deleteBackendTexture(beTex);
663 return {};
664 }
665 return beTex;
666 }
667
updateBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)668 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
669 const SkColor4f& color,
670 GrGpuFinishedProc finishedProc,
671 GrGpuFinishedContext finishedContext) {
672 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
673
674 if (this->abandoned()) {
675 return false;
676 }
677
678 return fGpu->clearBackendTexture(backendTexture, std::move(finishedCallback), color.array());
679 }
680
updateBackendTexture(const GrBackendTexture & backendTexture,SkColorType skColorType,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)681 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
682 SkColorType skColorType,
683 const SkColor4f& color,
684 GrGpuFinishedProc finishedProc,
685 GrGpuFinishedContext finishedContext) {
686 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
687
688 if (this->abandoned()) {
689 return false;
690 }
691
692 GrBackendFormat format = backendTexture.getBackendFormat();
693 GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
694
695 if (!this->caps()->areColorTypeAndFormatCompatible(grColorType, format)) {
696 return false;
697 }
698
699 GrSwizzle swizzle = this->caps()->getWriteSwizzle(format, grColorType);
700 SkColor4f swizzledColor = swizzle.applyTo(color);
701
702 return fGpu->clearBackendTexture(backendTexture,
703 std::move(finishedCallback),
704 swizzledColor.array());
705 }
706
updateBackendTexture(const GrBackendTexture & backendTexture,const SkPixmap srcData[],int numLevels,GrSurfaceOrigin textureOrigin,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)707 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
708 const SkPixmap srcData[],
709 int numLevels,
710 GrSurfaceOrigin textureOrigin,
711 GrGpuFinishedProc finishedProc,
712 GrGpuFinishedContext finishedContext) {
713 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
714
715 if (this->abandoned()) {
716 return false;
717 }
718
719 if (!srcData || numLevels <= 0) {
720 return false;
721 }
722
723 // If the texture has MIP levels then we require that the full set is overwritten.
724 int numExpectedLevels = 1;
725 if (backendTexture.hasMipmaps()) {
726 numExpectedLevels = SkMipmap::ComputeLevelCount(backendTexture.width(),
727 backendTexture.height()) + 1;
728 }
729 if (numLevels != numExpectedLevels) {
730 return false;
731 }
732 return update_texture_with_pixmaps(this,
733 srcData,
734 numLevels,
735 backendTexture,
736 textureOrigin,
737 std::move(finishedCallback));
738 }
739
740 //////////////////////////////////////////////////////////////////////////////
741
create_and_update_compressed_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrProtected isProtected,sk_sp<GrRefCntedCallback> finishedCallback,const void * data,size_t size)742 static GrBackendTexture create_and_update_compressed_backend_texture(
743 GrDirectContext* dContext,
744 SkISize dimensions,
745 const GrBackendFormat& backendFormat,
746 GrMipmapped mipMapped,
747 GrProtected isProtected,
748 sk_sp<GrRefCntedCallback> finishedCallback,
749 const void* data,
750 size_t size) {
751 GrGpu* gpu = dContext->priv().getGpu();
752
753 GrBackendTexture beTex = gpu->createCompressedBackendTexture(dimensions, backendFormat,
754 mipMapped, isProtected);
755 if (!beTex.isValid()) {
756 return {};
757 }
758
759 if (!dContext->priv().getGpu()->updateCompressedBackendTexture(
760 beTex, std::move(finishedCallback), data, size)) {
761 dContext->deleteBackendTexture(beTex);
762 return {};
763 }
764 return beTex;
765 }
766
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,GrMipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)767 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
768 int width, int height,
769 const GrBackendFormat& backendFormat,
770 const SkColor4f& color,
771 GrMipmapped mipmapped,
772 GrProtected isProtected,
773 GrGpuFinishedProc finishedProc,
774 GrGpuFinishedContext finishedContext) {
775 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
776 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
777
778 if (this->abandoned()) {
779 return {};
780 }
781
782 SkImage::CompressionType compression = GrBackendFormatToCompressionType(backendFormat);
783 if (compression == SkImage::CompressionType::kNone) {
784 return {};
785 }
786
787 size_t size = SkCompressedDataSize(compression,
788 {width, height},
789 nullptr,
790 mipmapped == GrMipmapped::kYes);
791 auto storage = std::make_unique<char[]>(size);
792 GrFillInCompressedData(compression, {width, height}, mipmapped, storage.get(), color);
793 return create_and_update_compressed_backend_texture(this,
794 {width, height},
795 backendFormat,
796 mipmapped,
797 isProtected,
798 std::move(finishedCallback),
799 storage.get(),
800 size);
801 }
802
createCompressedBackendTexture(int width,int height,SkImage::CompressionType compression,const SkColor4f & color,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)803 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
804 int width, int height,
805 SkImage::CompressionType compression,
806 const SkColor4f& color,
807 GrMipmapped mipMapped,
808 GrProtected isProtected,
809 GrGpuFinishedProc finishedProc,
810 GrGpuFinishedContext finishedContext) {
811 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
812 GrBackendFormat format = this->compressedBackendFormat(compression);
813 return this->createCompressedBackendTexture(width, height, format, color,
814 mipMapped, isProtected, finishedProc,
815 finishedContext);
816 }
817
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const void * compressedData,size_t dataSize,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)818 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
819 int width, int height,
820 const GrBackendFormat& backendFormat,
821 const void* compressedData,
822 size_t dataSize,
823 GrMipmapped mipMapped,
824 GrProtected isProtected,
825 GrGpuFinishedProc finishedProc,
826 GrGpuFinishedContext finishedContext) {
827 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
828 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
829
830 if (this->abandoned()) {
831 return {};
832 }
833
834 return create_and_update_compressed_backend_texture(this,
835 {width, height},
836 backendFormat,
837 mipMapped,
838 isProtected,
839 std::move(finishedCallback),
840 compressedData,
841 dataSize);
842 }
843
createCompressedBackendTexture(int width,int height,SkImage::CompressionType compression,const void * data,size_t dataSize,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)844 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
845 int width, int height,
846 SkImage::CompressionType compression,
847 const void* data, size_t dataSize,
848 GrMipmapped mipMapped,
849 GrProtected isProtected,
850 GrGpuFinishedProc finishedProc,
851 GrGpuFinishedContext finishedContext) {
852 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
853 GrBackendFormat format = this->compressedBackendFormat(compression);
854 return this->createCompressedBackendTexture(width, height, format, data, dataSize, mipMapped,
855 isProtected, finishedProc, finishedContext);
856 }
857
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)858 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
859 const SkColor4f& color,
860 GrGpuFinishedProc finishedProc,
861 GrGpuFinishedContext finishedContext) {
862 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
863
864 if (this->abandoned()) {
865 return false;
866 }
867
868 SkImage::CompressionType compression =
869 GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
870 if (compression == SkImage::CompressionType::kNone) {
871 return {};
872 }
873 size_t size = SkCompressedDataSize(compression,
874 backendTexture.dimensions(),
875 nullptr,
876 backendTexture.hasMipmaps());
877 SkAutoMalloc storage(size);
878 GrFillInCompressedData(compression,
879 backendTexture.dimensions(),
880 backendTexture.mipmapped(),
881 static_cast<char*>(storage.get()),
882 color);
883 return fGpu->updateCompressedBackendTexture(backendTexture,
884 std::move(finishedCallback),
885 storage.get(),
886 size);
887 }
888
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const void * compressedData,size_t dataSize,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)889 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
890 const void* compressedData,
891 size_t dataSize,
892 GrGpuFinishedProc finishedProc,
893 GrGpuFinishedContext finishedContext) {
894 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
895
896 if (this->abandoned()) {
897 return false;
898 }
899
900 if (!compressedData) {
901 return false;
902 }
903
904 return fGpu->updateCompressedBackendTexture(backendTexture,
905 std::move(finishedCallback),
906 compressedData,
907 dataSize);
908 }
909
910 //////////////////////////////////////////////////////////////////////////////
911
setBackendTextureState(const GrBackendTexture & backendTexture,const GrBackendSurfaceMutableState & state,GrBackendSurfaceMutableState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)912 bool GrDirectContext::setBackendTextureState(const GrBackendTexture& backendTexture,
913 const GrBackendSurfaceMutableState& state,
914 GrBackendSurfaceMutableState* previousState,
915 GrGpuFinishedProc finishedProc,
916 GrGpuFinishedContext finishedContext) {
917 auto callback = GrRefCntedCallback::Make(finishedProc, finishedContext);
918
919 if (this->abandoned()) {
920 return false;
921 }
922
923 return fGpu->setBackendTextureState(backendTexture, state, previousState, std::move(callback));
924 }
925
926
setBackendRenderTargetState(const GrBackendRenderTarget & backendRenderTarget,const GrBackendSurfaceMutableState & state,GrBackendSurfaceMutableState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)927 bool GrDirectContext::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
928 const GrBackendSurfaceMutableState& state,
929 GrBackendSurfaceMutableState* previousState,
930 GrGpuFinishedProc finishedProc,
931 GrGpuFinishedContext finishedContext) {
932 auto callback = GrRefCntedCallback::Make(finishedProc, finishedContext);
933
934 if (this->abandoned()) {
935 return false;
936 }
937
938 return fGpu->setBackendRenderTargetState(backendRenderTarget, state, previousState,
939 std::move(callback));
940 }
941
deleteBackendTexture(GrBackendTexture backendTex)942 void GrDirectContext::deleteBackendTexture(GrBackendTexture backendTex) {
943 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
944 // For the Vulkan backend we still must destroy the backend texture when the context is
945 // abandoned.
946 if ((this->abandoned() && this->backend() != GrBackendApi::kVulkan) || !backendTex.isValid()) {
947 return;
948 }
949
950 fGpu->deleteBackendTexture(backendTex);
951 }
952
953 //////////////////////////////////////////////////////////////////////////////
954
precompileShader(const SkData & key,const SkData & data)955 bool GrDirectContext::precompileShader(const SkData& key, const SkData& data) {
956 return fGpu->precompileShader(key, data);
957 }
958
959 #ifdef SK_ENABLE_DUMP_GPU
960 #include "include/core/SkString.h"
961 #include "src/utils/SkJSONWriter.h"
dump() const962 SkString GrDirectContext::dump() const {
963 SkDynamicMemoryWStream stream;
964 SkJSONWriter writer(&stream, SkJSONWriter::Mode::kPretty);
965 writer.beginObject();
966
967 writer.appendString("backend", GrBackendApiToStr(this->backend()));
968
969 writer.appendName("caps");
970 this->caps()->dumpJSON(&writer);
971
972 writer.appendName("gpu");
973 this->fGpu->dumpJSON(&writer);
974
975 writer.appendName("context");
976 this->dumpJSON(&writer);
977
978 // Flush JSON to the memory stream
979 writer.endObject();
980 writer.flush();
981
982 // Null terminate the JSON data in the memory stream
983 stream.write8(0);
984
985 // Allocate a string big enough to hold all the data, then copy out of the stream
986 SkString result(stream.bytesWritten());
987 stream.copyToAndReset(result.writable_str());
988 return result;
989 }
990 #endif
991
992 #ifdef SK_GL
993
994 /*************************************************************************************************/
MakeGL(sk_sp<const GrGLInterface> glInterface)995 sk_sp<GrDirectContext> GrDirectContext::MakeGL(sk_sp<const GrGLInterface> glInterface) {
996 GrContextOptions defaultOptions;
997 return MakeGL(std::move(glInterface), defaultOptions);
998 }
999
MakeGL(const GrContextOptions & options)1000 sk_sp<GrDirectContext> GrDirectContext::MakeGL(const GrContextOptions& options) {
1001 return MakeGL(nullptr, options);
1002 }
1003
MakeGL()1004 sk_sp<GrDirectContext> GrDirectContext::MakeGL() {
1005 GrContextOptions defaultOptions;
1006 return MakeGL(nullptr, defaultOptions);
1007 }
1008
1009 #if GR_TEST_UTILS
make_get_error_with_random_oom(GrGLFunction<GrGLGetErrorFn> original)1010 GrGLFunction<GrGLGetErrorFn> make_get_error_with_random_oom(GrGLFunction<GrGLGetErrorFn> original) {
1011 // A SkRandom and a GrGLFunction<GrGLGetErrorFn> are too big to be captured by a
1012 // GrGLFunction<GrGLGetError> (surprise, surprise). So we make a context object and
1013 // capture that by pointer. However, GrGLFunction doesn't support calling a destructor
1014 // on the thing it captures. So we leak the context.
1015 struct GetErrorContext {
1016 SkRandom fRandom;
1017 GrGLFunction<GrGLGetErrorFn> fGetError;
1018 };
1019
1020 auto errorContext = new GetErrorContext;
1021
1022 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
1023 __lsan_ignore_object(errorContext);
1024 #endif
1025
1026 errorContext->fGetError = original;
1027
1028 return GrGLFunction<GrGLGetErrorFn>([errorContext]() {
1029 GrGLenum error = errorContext->fGetError();
1030 if (error == GR_GL_NO_ERROR && (errorContext->fRandom.nextU() % 300) == 0) {
1031 error = GR_GL_OUT_OF_MEMORY;
1032 }
1033 return error;
1034 });
1035 }
1036 #endif
1037
MakeGL(sk_sp<const GrGLInterface> glInterface,const GrContextOptions & options)1038 sk_sp<GrDirectContext> GrDirectContext::MakeGL(sk_sp<const GrGLInterface> glInterface,
1039 const GrContextOptions& options) {
1040 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kOpenGL, options));
1041 #if GR_TEST_UTILS
1042 if (options.fRandomGLOOM) {
1043 auto copy = sk_make_sp<GrGLInterface>(*glInterface);
1044 copy->fFunctions.fGetError =
1045 make_get_error_with_random_oom(glInterface->fFunctions.fGetError);
1046 #if GR_GL_CHECK_ERROR
1047 // Suppress logging GL errors since we'll be synthetically generating them.
1048 copy->suppressErrorLogging();
1049 #endif
1050 glInterface = std::move(copy);
1051 }
1052 #endif
1053 direct->fGpu = GrGLGpu::Make(std::move(glInterface), options, direct.get());
1054 if (!direct->init()) {
1055 return nullptr;
1056 }
1057 return direct;
1058 }
1059 #endif
1060
1061 /*************************************************************************************************/
MakeMock(const GrMockOptions * mockOptions)1062 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions) {
1063 GrContextOptions defaultOptions;
1064 return MakeMock(mockOptions, defaultOptions);
1065 }
1066
MakeMock(const GrMockOptions * mockOptions,const GrContextOptions & options)1067 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions,
1068 const GrContextOptions& options) {
1069 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMock, options));
1070
1071 direct->fGpu = GrMockGpu::Make(mockOptions, options, direct.get());
1072 if (!direct->init()) {
1073 return nullptr;
1074 }
1075
1076 return direct;
1077 }
1078
1079 #ifdef SK_VULKAN
1080 /*************************************************************************************************/
MakeVulkan(const GrVkBackendContext & backendContext)1081 sk_sp<GrDirectContext> GrDirectContext::MakeVulkan(const GrVkBackendContext& backendContext) {
1082 GrContextOptions defaultOptions;
1083 return MakeVulkan(backendContext, defaultOptions);
1084 }
1085
MakeVulkan(const GrVkBackendContext & backendContext,const GrContextOptions & options)1086 sk_sp<GrDirectContext> GrDirectContext::MakeVulkan(const GrVkBackendContext& backendContext,
1087 const GrContextOptions& options) {
1088 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kVulkan, options));
1089
1090 direct->fGpu = GrVkGpu::Make(backendContext, options, direct.get());
1091 if (!direct->init()) {
1092 return nullptr;
1093 }
1094
1095 return direct;
1096 }
1097 #endif
1098
1099 #ifdef SK_METAL
1100 /*************************************************************************************************/
MakeMetal(const GrMtlBackendContext & backendContext)1101 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(const GrMtlBackendContext& backendContext) {
1102 GrContextOptions defaultOptions;
1103 return MakeMetal(backendContext, defaultOptions);
1104 }
1105
MakeMetal(const GrMtlBackendContext & backendContext,const GrContextOptions & options)1106 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(const GrMtlBackendContext& backendContext,
1107 const GrContextOptions& options) {
1108 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMetal, options));
1109
1110 direct->fGpu = GrMtlTrampoline::MakeGpu(backendContext, options, direct.get());
1111 if (!direct->init()) {
1112 return nullptr;
1113 }
1114
1115 return direct;
1116 }
1117
1118 // deprecated
MakeMetal(void * device,void * queue)1119 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(void* device, void* queue) {
1120 GrContextOptions defaultOptions;
1121 return MakeMetal(device, queue, defaultOptions);
1122 }
1123
1124 // deprecated
1125 // remove include/gpu/mtl/GrMtlBackendContext.h, above, when removed
MakeMetal(void * device,void * queue,const GrContextOptions & options)1126 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(void* device, void* queue,
1127 const GrContextOptions& options) {
1128 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMetal, options));
1129 GrMtlBackendContext backendContext = {};
1130 backendContext.fDevice.reset(device);
1131 backendContext.fQueue.reset(queue);
1132
1133 return GrDirectContext::MakeMetal(backendContext, options);
1134 }
1135 #endif
1136
1137 #ifdef SK_DIRECT3D
1138 /*************************************************************************************************/
MakeDirect3D(const GrD3DBackendContext & backendContext)1139 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext) {
1140 GrContextOptions defaultOptions;
1141 return MakeDirect3D(backendContext, defaultOptions);
1142 }
1143
MakeDirect3D(const GrD3DBackendContext & backendContext,const GrContextOptions & options)1144 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext,
1145 const GrContextOptions& options) {
1146 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kDirect3D, options));
1147
1148 direct->fGpu = GrD3DGpu::Make(backendContext, options, direct.get());
1149 if (!direct->init()) {
1150 return nullptr;
1151 }
1152
1153 return direct;
1154 }
1155 #endif
1156
1157 #ifdef SK_DAWN
1158 /*************************************************************************************************/
MakeDawn(const wgpu::Device & device)1159 sk_sp<GrDirectContext> GrDirectContext::MakeDawn(const wgpu::Device& device) {
1160 GrContextOptions defaultOptions;
1161 return MakeDawn(device, defaultOptions);
1162 }
1163
MakeDawn(const wgpu::Device & device,const GrContextOptions & options)1164 sk_sp<GrDirectContext> GrDirectContext::MakeDawn(const wgpu::Device& device,
1165 const GrContextOptions& options) {
1166 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kDawn, options));
1167
1168 direct->fGpu = GrDawnGpu::Make(device, options, direct.get());
1169 if (!direct->init()) {
1170 return nullptr;
1171 }
1172
1173 return direct;
1174 }
1175
1176 #endif
1177