1 /*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8
9 #include "include/gpu/GrDirectContext.h"
10
11 #include "include/core/SkTraceMemoryDump.h"
12 #include "include/gpu/GrContextThreadSafeProxy.h"
13 #include "src/core/SkAutoMalloc.h"
14 #include "src/core/SkTaskGroup.h"
15 #include "src/gpu/GrBackendUtils.h"
16 #include "src/gpu/GrClientMappedBufferManager.h"
17 #include "src/gpu/GrContextThreadSafeProxyPriv.h"
18 #include "src/gpu/GrDirectContextPriv.h"
19 #include "src/gpu/GrDrawingManager.h"
20 #include "src/gpu/GrGpu.h"
21 #include "src/gpu/GrResourceProvider.h"
22 #include "src/gpu/GrShaderUtils.h"
23 #include "src/gpu/GrSurfaceContext.h"
24 #include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
25 #include "src/gpu/effects/GrSkSLFP.h"
26 #include "src/gpu/gl/GrGLGpu.h"
27 #include "src/gpu/mock/GrMockGpu.h"
28 #include "src/gpu/ops/GrSmallPathAtlasMgr.h"
29 #include "src/gpu/text/GrAtlasManager.h"
30 #include "src/gpu/text/GrStrikeCache.h"
31 #include "src/image/SkImage_GpuBase.h"
32 #ifdef SK_METAL
33 #include "include/gpu/mtl/GrMtlBackendContext.h"
34 #include "src/gpu/mtl/GrMtlTrampoline.h"
35 #endif
36 #ifdef SK_VULKAN
37 #include "src/gpu/vk/GrVkGpu.h"
38 #endif
39 #ifdef SK_DIRECT3D
40 #include "src/gpu/d3d/GrD3DGpu.h"
41 #endif
42 #ifdef SK_DAWN
43 #include "src/gpu/dawn/GrDawnGpu.h"
44 #endif
45 #include <memory>
46
47 #if GR_TEST_UTILS
48 # include "include/utils/SkRandom.h"
49 # if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
50 # include <sanitizer/lsan_interface.h>
51 # endif
52 #endif
53
54 #define ASSERT_SINGLE_OWNER GR_ASSERT_SINGLE_OWNER(this->singleOwner())
55
Next()56 GrDirectContext::DirectContextID GrDirectContext::DirectContextID::Next() {
57 static std::atomic<uint32_t> nextID{1};
58 uint32_t id;
59 do {
60 id = nextID.fetch_add(1, std::memory_order_relaxed);
61 } while (id == SK_InvalidUniqueID);
62 return DirectContextID(id);
63 }
64
GrDirectContext(GrBackendApi backend,const GrContextOptions & options)65 GrDirectContext::GrDirectContext(GrBackendApi backend, const GrContextOptions& options)
66 : INHERITED(GrContextThreadSafeProxyPriv::Make(backend, options), false)
67 , fDirectContextID(DirectContextID::Next()) {
68 }
69
~GrDirectContext()70 GrDirectContext::~GrDirectContext() {
71 ASSERT_SINGLE_OWNER
72 // this if-test protects against the case where the context is being destroyed
73 // before having been fully created
74 if (fGpu) {
75 this->flushAndSubmit();
76 }
77
78 // We need to make sure all work is finished on the gpu before we start releasing resources.
79 this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/false);
80
81 this->destroyDrawingManager();
82
83 // Ideally we could just let the ptr drop, but resource cache queries this ptr in releaseAll.
84 if (fResourceCache) {
85 fResourceCache->releaseAll();
86 }
87 // This has to be after GrResourceCache::releaseAll so that other threads that are holding
88 // async pixel result don't try to destroy buffers off thread.
89 fMappedBufferManager.reset();
90 }
91
threadSafeProxy()92 sk_sp<GrContextThreadSafeProxy> GrDirectContext::threadSafeProxy() {
93 return INHERITED::threadSafeProxy();
94 }
95
resetGLTextureBindings()96 void GrDirectContext::resetGLTextureBindings() {
97 if (this->abandoned() || this->backend() != GrBackendApi::kOpenGL) {
98 return;
99 }
100 fGpu->resetTextureBindings();
101 }
102
resetContext(uint32_t state)103 void GrDirectContext::resetContext(uint32_t state) {
104 ASSERT_SINGLE_OWNER
105 fGpu->markContextDirty(state);
106 }
107
abandonContext()108 void GrDirectContext::abandonContext() {
109 if (INHERITED::abandoned()) {
110 return;
111 }
112
113 INHERITED::abandonContext();
114
115 // We need to make sure all work is finished on the gpu before we start releasing resources.
116 this->syncAllOutstandingGpuWork(this->caps()->mustSyncGpuDuringAbandon());
117
118 fStrikeCache->freeAll();
119
120 fMappedBufferManager->abandon();
121
122 fResourceProvider->abandon();
123
124 // abandon first so destructors don't try to free the resources in the API.
125 fResourceCache->abandonAll();
126
127 fGpu->disconnect(GrGpu::DisconnectType::kAbandon);
128
129 // Must be after GrResourceCache::abandonAll().
130 fMappedBufferManager.reset();
131
132 if (fSmallPathAtlasMgr) {
133 fSmallPathAtlasMgr->reset();
134 }
135 fAtlasManager->freeAll();
136 }
137
abandoned()138 bool GrDirectContext::abandoned() {
139 if (INHERITED::abandoned()) {
140 return true;
141 }
142
143 if (fGpu && fGpu->isDeviceLost()) {
144 this->abandonContext();
145 return true;
146 }
147 return false;
148 }
149
oomed()150 bool GrDirectContext::oomed() { return fGpu ? fGpu->checkAndResetOOMed() : false; }
151
releaseResourcesAndAbandonContext()152 void GrDirectContext::releaseResourcesAndAbandonContext() {
153 if (INHERITED::abandoned()) {
154 return;
155 }
156
157 INHERITED::abandonContext();
158
159 // We need to make sure all work is finished on the gpu before we start releasing resources.
160 this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/true);
161
162 fResourceProvider->abandon();
163
164 // Release all resources in the backend 3D API.
165 fResourceCache->releaseAll();
166
167 // Must be after GrResourceCache::releaseAll().
168 fMappedBufferManager.reset();
169
170 fGpu->disconnect(GrGpu::DisconnectType::kCleanup);
171 if (fSmallPathAtlasMgr) {
172 fSmallPathAtlasMgr->reset();
173 }
174 fAtlasManager->freeAll();
175 }
176
freeGpuResources()177 void GrDirectContext::freeGpuResources() {
178 ASSERT_SINGLE_OWNER
179
180 if (this->abandoned()) {
181 return;
182 }
183
184 this->flushAndSubmit();
185 if (fSmallPathAtlasMgr) {
186 fSmallPathAtlasMgr->reset();
187 }
188 fAtlasManager->freeAll();
189
190 // TODO: the glyph cache doesn't hold any GpuResources so this call should not be needed here.
191 // Some slack in the GrTextBlob's implementation requires it though. That could be fixed.
192 fStrikeCache->freeAll();
193
194 this->drawingManager()->freeGpuResources();
195
196 fResourceCache->purgeUnlockedResources();
197 }
198
init()199 bool GrDirectContext::init() {
200 ASSERT_SINGLE_OWNER
201 if (!fGpu) {
202 return false;
203 }
204
205 fThreadSafeProxy->priv().init(fGpu->refCaps(), fGpu->refPipelineBuilder());
206 if (!INHERITED::init()) {
207 return false;
208 }
209
210 SkASSERT(this->getTextBlobCache());
211 SkASSERT(this->threadSafeCache());
212
213 fStrikeCache = std::make_unique<GrStrikeCache>();
214 fResourceCache = std::make_unique<GrResourceCache>(this->singleOwner(),
215 this->directContextID(),
216 this->contextID());
217 fResourceCache->setProxyProvider(this->proxyProvider());
218 fResourceCache->setThreadSafeCache(this->threadSafeCache());
219 #if GR_TEST_UTILS
220 if (this->options().fResourceCacheLimitOverride != -1) {
221 this->setResourceCacheLimit(this->options().fResourceCacheLimitOverride);
222 }
223 #endif
224 fResourceProvider = std::make_unique<GrResourceProvider>(fGpu.get(), fResourceCache.get(),
225 this->singleOwner());
226 fMappedBufferManager = std::make_unique<GrClientMappedBufferManager>(this->directContextID());
227
228 fDidTestPMConversions = false;
229
230 // DDL TODO: we need to think through how the task group & persistent cache
231 // get passed on to/shared between all the DDLRecorders created with this context.
232 if (this->options().fExecutor) {
233 fTaskGroup = std::make_unique<SkTaskGroup>(*this->options().fExecutor);
234 }
235
236 fPersistentCache = this->options().fPersistentCache;
237 fShaderErrorHandler = this->options().fShaderErrorHandler;
238 if (!fShaderErrorHandler) {
239 fShaderErrorHandler = GrShaderUtils::DefaultShaderErrorHandler();
240 }
241
242 GrDrawOpAtlas::AllowMultitexturing allowMultitexturing;
243 if (GrContextOptions::Enable::kNo == this->options().fAllowMultipleGlyphCacheTextures ||
244 // multitexturing supported only if range can represent the index + texcoords fully
245 !(this->caps()->shaderCaps()->floatIs32Bits() ||
246 this->caps()->shaderCaps()->integerSupport())) {
247 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kNo;
248 } else {
249 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kYes;
250 }
251
252 GrProxyProvider* proxyProvider = this->priv().proxyProvider();
253
254 fAtlasManager = std::make_unique<GrAtlasManager>(proxyProvider,
255 this->options().fGlyphCacheTextureMaximumBytes,
256 allowMultitexturing);
257 this->priv().addOnFlushCallbackObject(fAtlasManager.get());
258
259 return true;
260 }
261
getResourceCacheUsage(int * resourceCount,size_t * resourceBytes) const262 void GrDirectContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
263 ASSERT_SINGLE_OWNER
264
265 if (resourceCount) {
266 *resourceCount = fResourceCache->getBudgetedResourceCount();
267 }
268 if (resourceBytes) {
269 *resourceBytes = fResourceCache->getBudgetedResourceBytes();
270 }
271 }
272
getResourceCachePurgeableBytes() const273 size_t GrDirectContext::getResourceCachePurgeableBytes() const {
274 ASSERT_SINGLE_OWNER
275 return fResourceCache->getPurgeableBytes();
276 }
277
getResourceCacheLimits(int * maxResources,size_t * maxResourceBytes) const278 void GrDirectContext::getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const {
279 ASSERT_SINGLE_OWNER
280 if (maxResources) {
281 *maxResources = -1;
282 }
283 if (maxResourceBytes) {
284 *maxResourceBytes = this->getResourceCacheLimit();
285 }
286 }
287
getResourceCacheLimit() const288 size_t GrDirectContext::getResourceCacheLimit() const {
289 ASSERT_SINGLE_OWNER
290 return fResourceCache->getMaxResourceBytes();
291 }
292
setResourceCacheLimits(int unused,size_t maxResourceBytes)293 void GrDirectContext::setResourceCacheLimits(int unused, size_t maxResourceBytes) {
294 ASSERT_SINGLE_OWNER
295 this->setResourceCacheLimit(maxResourceBytes);
296 }
297
setResourceCacheLimit(size_t maxResourceBytes)298 void GrDirectContext::setResourceCacheLimit(size_t maxResourceBytes) {
299 ASSERT_SINGLE_OWNER
300 fResourceCache->setLimit(maxResourceBytes);
301 }
302
purgeUnlockedResources(bool scratchResourcesOnly)303 void GrDirectContext::purgeUnlockedResources(bool scratchResourcesOnly) {
304 ASSERT_SINGLE_OWNER
305
306 if (this->abandoned()) {
307 return;
308 }
309
310 fResourceCache->purgeUnlockedResources(scratchResourcesOnly);
311 fResourceCache->purgeAsNeeded();
312
313 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
314 // place to purge stale blobs
315 this->getTextBlobCache()->purgeStaleBlobs();
316
317 fGpu->releaseUnlockedBackendObjects();
318 }
319
performDeferredCleanup(std::chrono::milliseconds msNotUsed,bool scratchResourcesOnly)320 void GrDirectContext::performDeferredCleanup(std::chrono::milliseconds msNotUsed,
321 bool scratchResourcesOnly) {
322 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
323
324 ASSERT_SINGLE_OWNER
325
326 if (this->abandoned()) {
327 return;
328 }
329
330 this->checkAsyncWorkCompletion();
331 fMappedBufferManager->process();
332 auto purgeTime = GrStdSteadyClock::now() - msNotUsed;
333
334 fResourceCache->purgeAsNeeded();
335 fResourceCache->purgeResourcesNotUsedSince(purgeTime, scratchResourcesOnly);
336
337 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
338 // place to purge stale blobs
339 this->getTextBlobCache()->purgeStaleBlobs();
340 }
341
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)342 void GrDirectContext::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
343 ASSERT_SINGLE_OWNER
344
345 if (this->abandoned()) {
346 return;
347 }
348
349 fResourceCache->purgeUnlockedResources(bytesToPurge, preferScratchResources);
350 }
351
352 ////////////////////////////////////////////////////////////////////////////////
wait(int numSemaphores,const GrBackendSemaphore waitSemaphores[],bool deleteSemaphoresAfterWait)353 bool GrDirectContext::wait(int numSemaphores, const GrBackendSemaphore waitSemaphores[],
354 bool deleteSemaphoresAfterWait) {
355 if (!fGpu || !fGpu->caps()->semaphoreSupport()) {
356 return false;
357 }
358 GrWrapOwnership ownership =
359 deleteSemaphoresAfterWait ? kAdopt_GrWrapOwnership : kBorrow_GrWrapOwnership;
360 for (int i = 0; i < numSemaphores; ++i) {
361 std::unique_ptr<GrSemaphore> sema = fResourceProvider->wrapBackendSemaphore(
362 waitSemaphores[i], GrResourceProvider::SemaphoreWrapType::kWillWait, ownership);
363 // If we failed to wrap the semaphore it means the client didn't give us a valid semaphore
364 // to begin with. Therefore, it is fine to not wait on it.
365 if (sema) {
366 fGpu->waitSemaphore(sema.get());
367 }
368 }
369 return true;
370 }
371
onGetSmallPathAtlasMgr()372 GrSmallPathAtlasMgr* GrDirectContext::onGetSmallPathAtlasMgr() {
373 if (!fSmallPathAtlasMgr) {
374 fSmallPathAtlasMgr = std::make_unique<GrSmallPathAtlasMgr>();
375
376 this->priv().addOnFlushCallbackObject(fSmallPathAtlasMgr.get());
377 }
378
379 if (!fSmallPathAtlasMgr->initAtlas(this->proxyProvider(), this->caps())) {
380 return nullptr;
381 }
382
383 return fSmallPathAtlasMgr.get();
384 }
385
386 ////////////////////////////////////////////////////////////////////////////////
387
flush(const GrFlushInfo & info)388 GrSemaphoresSubmitted GrDirectContext::flush(const GrFlushInfo& info) {
389 ASSERT_SINGLE_OWNER
390 if (this->abandoned()) {
391 if (info.fFinishedProc) {
392 info.fFinishedProc(info.fFinishedContext);
393 }
394 if (info.fSubmittedProc) {
395 info.fSubmittedProc(info.fSubmittedContext, false);
396 }
397 return GrSemaphoresSubmitted::kNo;
398 }
399
400 return this->drawingManager()->flushSurfaces({}, SkSurface::BackendSurfaceAccess::kNoAccess,
401 info, nullptr);
402 }
403
submit(bool syncCpu)404 bool GrDirectContext::submit(bool syncCpu) {
405 ASSERT_SINGLE_OWNER
406 if (this->abandoned()) {
407 return false;
408 }
409
410 if (!fGpu) {
411 return false;
412 }
413
414 return fGpu->submitToGpu(syncCpu);
415 }
416
417 ////////////////////////////////////////////////////////////////////////////////
418
checkAsyncWorkCompletion()419 void GrDirectContext::checkAsyncWorkCompletion() {
420 if (fGpu) {
421 fGpu->checkFinishProcs();
422 }
423 }
424
syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned)425 void GrDirectContext::syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned) {
426 if (fGpu && (!this->abandoned() || shouldExecuteWhileAbandoned)) {
427 fGpu->finishOutstandingGpuWork();
428 this->checkAsyncWorkCompletion();
429 }
430 }
431
432 ////////////////////////////////////////////////////////////////////////////////
433
storeVkPipelineCacheData()434 void GrDirectContext::storeVkPipelineCacheData() {
435 if (fGpu) {
436 fGpu->storeVkPipelineCacheData();
437 }
438 }
439
440 ////////////////////////////////////////////////////////////////////////////////
441
supportsDistanceFieldText() const442 bool GrDirectContext::supportsDistanceFieldText() const {
443 return this->caps()->shaderCaps()->supportsDistanceFieldText();
444 }
445
446 //////////////////////////////////////////////////////////////////////////////
447
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const448 void GrDirectContext::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
449 ASSERT_SINGLE_OWNER
450 fResourceCache->dumpMemoryStatistics(traceMemoryDump);
451 traceMemoryDump->dumpNumericValue("skia/gr_text_blob_cache", "size", "bytes",
452 this->getTextBlobCache()->usedBytes());
453 }
454
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected)455 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
456 const GrBackendFormat& backendFormat,
457 GrMipmapped mipMapped,
458 GrRenderable renderable,
459 GrProtected isProtected) {
460 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
461 if (this->abandoned()) {
462 return GrBackendTexture();
463 }
464
465 return fGpu->createBackendTexture({width, height}, backendFormat, renderable,
466 mipMapped, isProtected);
467 }
468
createBackendTexture(int width,int height,SkColorType skColorType,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected)469 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
470 SkColorType skColorType,
471 GrMipmapped mipMapped,
472 GrRenderable renderable,
473 GrProtected isProtected) {
474 if (this->abandoned()) {
475 return GrBackendTexture();
476 }
477
478 const GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
479
480 return this->createBackendTexture(width, height, format, mipMapped, renderable, isProtected);
481 }
482
create_and_clear_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,sk_sp<GrRefCntedCallback> finishedCallback,std::array<float,4> color)483 static GrBackendTexture create_and_clear_backend_texture(GrDirectContext* dContext,
484 SkISize dimensions,
485 const GrBackendFormat& backendFormat,
486 GrMipmapped mipMapped,
487 GrRenderable renderable,
488 GrProtected isProtected,
489 sk_sp<GrRefCntedCallback> finishedCallback,
490 std::array<float, 4> color) {
491 GrGpu* gpu = dContext->priv().getGpu();
492 GrBackendTexture beTex = gpu->createBackendTexture(dimensions, backendFormat, renderable,
493 mipMapped, isProtected);
494 if (!beTex.isValid()) {
495 return {};
496 }
497
498 if (!dContext->priv().getGpu()->clearBackendTexture(beTex,
499 std::move(finishedCallback),
500 color)) {
501 dContext->deleteBackendTexture(beTex);
502 return {};
503 }
504 return beTex;
505 }
506
update_texture_with_pixmaps(GrDirectContext * context,const SkPixmap src[],int numLevels,const GrBackendTexture & backendTexture,GrSurfaceOrigin textureOrigin,sk_sp<GrRefCntedCallback> finishedCallback)507 static bool update_texture_with_pixmaps(GrDirectContext* context,
508 const SkPixmap src[],
509 int numLevels,
510 const GrBackendTexture& backendTexture,
511 GrSurfaceOrigin textureOrigin,
512 sk_sp<GrRefCntedCallback> finishedCallback) {
513 GrColorType ct = SkColorTypeToGrColorType(src[0].colorType());
514 const GrBackendFormat& format = backendTexture.getBackendFormat();
515
516 if (!context->priv().caps()->areColorTypeAndFormatCompatible(ct, format)) {
517 return false;
518 }
519
520 auto proxy = context->priv().proxyProvider()->wrapBackendTexture(backendTexture,
521 kBorrow_GrWrapOwnership,
522 GrWrapCacheable::kNo,
523 kRW_GrIOType,
524 std::move(finishedCallback));
525 if (!proxy) {
526 return false;
527 }
528
529 GrSwizzle swizzle = context->priv().caps()->getReadSwizzle(format, ct);
530 GrSurfaceProxyView view(std::move(proxy), textureOrigin, swizzle);
531 GrSurfaceContext surfaceContext(context, std::move(view), src[0].info().colorInfo());
532 SkAutoSTArray<15, GrCPixmap> tmpSrc(numLevels);
533 for (int i = 0; i < numLevels; ++i) {
534 tmpSrc[i] = src[i];
535 }
536 if (!surfaceContext.writePixels(context, tmpSrc.get(), numLevels)) {
537 return false;
538 }
539
540 GrSurfaceProxy* p = surfaceContext.asSurfaceProxy();
541 GrFlushInfo info;
542 context->priv().drawingManager()->flushSurfaces({&p, 1},
543 SkSurface::BackendSurfaceAccess::kNoAccess,
544 info,
545 nullptr);
546 return true;
547 }
548
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)549 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
550 const GrBackendFormat& backendFormat,
551 const SkColor4f& color,
552 GrMipmapped mipMapped,
553 GrRenderable renderable,
554 GrProtected isProtected,
555 GrGpuFinishedProc finishedProc,
556 GrGpuFinishedContext finishedContext) {
557 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
558
559 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
560 if (this->abandoned()) {
561 return {};
562 }
563
564 return create_and_clear_backend_texture(this,
565 {width, height},
566 backendFormat,
567 mipMapped,
568 renderable,
569 isProtected,
570 std::move(finishedCallback),
571 color.array());
572 }
573
createBackendTexture(int width,int height,SkColorType skColorType,const SkColor4f & color,GrMipmapped mipMapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)574 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
575 SkColorType skColorType,
576 const SkColor4f& color,
577 GrMipmapped mipMapped,
578 GrRenderable renderable,
579 GrProtected isProtected,
580 GrGpuFinishedProc finishedProc,
581 GrGpuFinishedContext finishedContext) {
582 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
583
584 if (this->abandoned()) {
585 return {};
586 }
587
588 GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
589 if (!format.isValid()) {
590 return {};
591 }
592
593 GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
594 SkColor4f swizzledColor = this->caps()->getWriteSwizzle(format, grColorType).applyTo(color);
595
596 return create_and_clear_backend_texture(this,
597 {width, height},
598 format,
599 mipMapped,
600 renderable,
601 isProtected,
602 std::move(finishedCallback),
603 swizzledColor.array());
604 }
605
createBackendTexture(const SkPixmap srcData[],int numProvidedLevels,GrSurfaceOrigin textureOrigin,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)606 GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap srcData[],
607 int numProvidedLevels,
608 GrSurfaceOrigin textureOrigin,
609 GrRenderable renderable,
610 GrProtected isProtected,
611 GrGpuFinishedProc finishedProc,
612 GrGpuFinishedContext finishedContext) {
613 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
614
615 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
616
617 if (this->abandoned()) {
618 return {};
619 }
620
621 if (!srcData || numProvidedLevels <= 0) {
622 return {};
623 }
624
625 SkColorType colorType = srcData[0].colorType();
626
627 GrMipmapped mipMapped = GrMipmapped::kNo;
628 if (numProvidedLevels > 1) {
629 mipMapped = GrMipmapped::kYes;
630 }
631
632 GrBackendFormat backendFormat = this->defaultBackendFormat(colorType, renderable);
633 GrBackendTexture beTex = this->createBackendTexture(srcData[0].width(),
634 srcData[0].height(),
635 backendFormat,
636 mipMapped,
637 renderable,
638 isProtected);
639 if (!beTex.isValid()) {
640 return {};
641 }
642 if (!update_texture_with_pixmaps(this,
643 srcData,
644 numProvidedLevels,
645 beTex,
646 textureOrigin,
647 std::move(finishedCallback))) {
648 this->deleteBackendTexture(beTex);
649 return {};
650 }
651 return beTex;
652 }
653
updateBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)654 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
655 const SkColor4f& color,
656 GrGpuFinishedProc finishedProc,
657 GrGpuFinishedContext finishedContext) {
658 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
659
660 if (this->abandoned()) {
661 return false;
662 }
663
664 return fGpu->clearBackendTexture(backendTexture, std::move(finishedCallback), color.array());
665 }
666
updateBackendTexture(const GrBackendTexture & backendTexture,SkColorType skColorType,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)667 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
668 SkColorType skColorType,
669 const SkColor4f& color,
670 GrGpuFinishedProc finishedProc,
671 GrGpuFinishedContext finishedContext) {
672 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
673
674 if (this->abandoned()) {
675 return false;
676 }
677
678 GrBackendFormat format = backendTexture.getBackendFormat();
679 GrColorType grColorType = SkColorTypeAndFormatToGrColorType(this->caps(), skColorType, format);
680
681 if (!this->caps()->areColorTypeAndFormatCompatible(grColorType, format)) {
682 return false;
683 }
684
685 GrSwizzle swizzle = this->caps()->getWriteSwizzle(format, grColorType);
686 SkColor4f swizzledColor = swizzle.applyTo(color);
687
688 return fGpu->clearBackendTexture(backendTexture,
689 std::move(finishedCallback),
690 swizzledColor.array());
691 }
692
updateBackendTexture(const GrBackendTexture & backendTexture,const SkPixmap srcData[],int numLevels,GrSurfaceOrigin textureOrigin,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)693 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
694 const SkPixmap srcData[],
695 int numLevels,
696 GrSurfaceOrigin textureOrigin,
697 GrGpuFinishedProc finishedProc,
698 GrGpuFinishedContext finishedContext) {
699 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
700
701 if (this->abandoned()) {
702 return false;
703 }
704
705 if (!srcData || numLevels <= 0) {
706 return false;
707 }
708
709 // If the texture has MIP levels then we require that the full set is overwritten.
710 int numExpectedLevels = 1;
711 if (backendTexture.hasMipmaps()) {
712 numExpectedLevels = SkMipmap::ComputeLevelCount(backendTexture.width(),
713 backendTexture.height()) + 1;
714 }
715 if (numLevels != numExpectedLevels) {
716 return false;
717 }
718 return update_texture_with_pixmaps(this,
719 srcData,
720 numLevels,
721 backendTexture,
722 textureOrigin,
723 std::move(finishedCallback));
724 }
725
726 //////////////////////////////////////////////////////////////////////////////
727
create_and_update_compressed_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,GrMipmapped mipMapped,GrProtected isProtected,sk_sp<GrRefCntedCallback> finishedCallback,const void * data,size_t size)728 static GrBackendTexture create_and_update_compressed_backend_texture(
729 GrDirectContext* dContext,
730 SkISize dimensions,
731 const GrBackendFormat& backendFormat,
732 GrMipmapped mipMapped,
733 GrProtected isProtected,
734 sk_sp<GrRefCntedCallback> finishedCallback,
735 const void* data,
736 size_t size) {
737 GrGpu* gpu = dContext->priv().getGpu();
738
739 GrBackendTexture beTex = gpu->createCompressedBackendTexture(dimensions, backendFormat,
740 mipMapped, isProtected);
741 if (!beTex.isValid()) {
742 return {};
743 }
744
745 if (!dContext->priv().getGpu()->updateCompressedBackendTexture(
746 beTex, std::move(finishedCallback), data, size)) {
747 dContext->deleteBackendTexture(beTex);
748 return {};
749 }
750 return beTex;
751 }
752
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,GrMipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)753 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
754 int width, int height,
755 const GrBackendFormat& backendFormat,
756 const SkColor4f& color,
757 GrMipmapped mipmapped,
758 GrProtected isProtected,
759 GrGpuFinishedProc finishedProc,
760 GrGpuFinishedContext finishedContext) {
761 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
762 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
763
764 if (this->abandoned()) {
765 return {};
766 }
767
768 SkImage::CompressionType compression = GrBackendFormatToCompressionType(backendFormat);
769 if (compression == SkImage::CompressionType::kNone) {
770 return {};
771 }
772
773 size_t size = SkCompressedDataSize(compression,
774 {width, height},
775 nullptr,
776 mipmapped == GrMipmapped::kYes);
777 auto storage = std::make_unique<char[]>(size);
778 GrFillInCompressedData(compression, {width, height}, mipmapped, storage.get(), color);
779 return create_and_update_compressed_backend_texture(this,
780 {width, height},
781 backendFormat,
782 mipmapped,
783 isProtected,
784 std::move(finishedCallback),
785 storage.get(),
786 size);
787 }
788
createCompressedBackendTexture(int width,int height,SkImage::CompressionType compression,const SkColor4f & color,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)789 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
790 int width, int height,
791 SkImage::CompressionType compression,
792 const SkColor4f& color,
793 GrMipmapped mipMapped,
794 GrProtected isProtected,
795 GrGpuFinishedProc finishedProc,
796 GrGpuFinishedContext finishedContext) {
797 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
798 GrBackendFormat format = this->compressedBackendFormat(compression);
799 return this->createCompressedBackendTexture(width, height, format, color,
800 mipMapped, isProtected, finishedProc,
801 finishedContext);
802 }
803
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const void * compressedData,size_t dataSize,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)804 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
805 int width, int height,
806 const GrBackendFormat& backendFormat,
807 const void* compressedData,
808 size_t dataSize,
809 GrMipmapped mipMapped,
810 GrProtected isProtected,
811 GrGpuFinishedProc finishedProc,
812 GrGpuFinishedContext finishedContext) {
813 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
814 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
815
816 if (this->abandoned()) {
817 return {};
818 }
819
820 return create_and_update_compressed_backend_texture(this,
821 {width, height},
822 backendFormat,
823 mipMapped,
824 isProtected,
825 std::move(finishedCallback),
826 compressedData,
827 dataSize);
828 }
829
createCompressedBackendTexture(int width,int height,SkImage::CompressionType compression,const void * data,size_t dataSize,GrMipmapped mipMapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)830 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
831 int width, int height,
832 SkImage::CompressionType compression,
833 const void* data, size_t dataSize,
834 GrMipmapped mipMapped,
835 GrProtected isProtected,
836 GrGpuFinishedProc finishedProc,
837 GrGpuFinishedContext finishedContext) {
838 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
839 GrBackendFormat format = this->compressedBackendFormat(compression);
840 return this->createCompressedBackendTexture(width, height, format, data, dataSize, mipMapped,
841 isProtected, finishedProc, finishedContext);
842 }
843
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)844 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
845 const SkColor4f& color,
846 GrGpuFinishedProc finishedProc,
847 GrGpuFinishedContext finishedContext) {
848 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
849
850 if (this->abandoned()) {
851 return false;
852 }
853
854 SkImage::CompressionType compression =
855 GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
856 if (compression == SkImage::CompressionType::kNone) {
857 return {};
858 }
859 size_t size = SkCompressedDataSize(compression,
860 backendTexture.dimensions(),
861 nullptr,
862 backendTexture.hasMipmaps());
863 SkAutoMalloc storage(size);
864 GrFillInCompressedData(compression,
865 backendTexture.dimensions(),
866 backendTexture.mipmapped(),
867 static_cast<char*>(storage.get()),
868 color);
869 return fGpu->updateCompressedBackendTexture(backendTexture,
870 std::move(finishedCallback),
871 storage.get(),
872 size);
873 }
874
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const void * compressedData,size_t dataSize,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)875 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
876 const void* compressedData,
877 size_t dataSize,
878 GrGpuFinishedProc finishedProc,
879 GrGpuFinishedContext finishedContext) {
880 auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
881
882 if (this->abandoned()) {
883 return false;
884 }
885
886 if (!compressedData) {
887 return false;
888 }
889
890 return fGpu->updateCompressedBackendTexture(backendTexture,
891 std::move(finishedCallback),
892 compressedData,
893 dataSize);
894 }
895
896 //////////////////////////////////////////////////////////////////////////////
897
setBackendTextureState(const GrBackendTexture & backendTexture,const GrBackendSurfaceMutableState & state,GrBackendSurfaceMutableState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)898 bool GrDirectContext::setBackendTextureState(const GrBackendTexture& backendTexture,
899 const GrBackendSurfaceMutableState& state,
900 GrBackendSurfaceMutableState* previousState,
901 GrGpuFinishedProc finishedProc,
902 GrGpuFinishedContext finishedContext) {
903 auto callback = GrRefCntedCallback::Make(finishedProc, finishedContext);
904
905 if (this->abandoned()) {
906 return false;
907 }
908
909 return fGpu->setBackendTextureState(backendTexture, state, previousState, std::move(callback));
910 }
911
912
setBackendRenderTargetState(const GrBackendRenderTarget & backendRenderTarget,const GrBackendSurfaceMutableState & state,GrBackendSurfaceMutableState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)913 bool GrDirectContext::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
914 const GrBackendSurfaceMutableState& state,
915 GrBackendSurfaceMutableState* previousState,
916 GrGpuFinishedProc finishedProc,
917 GrGpuFinishedContext finishedContext) {
918 auto callback = GrRefCntedCallback::Make(finishedProc, finishedContext);
919
920 if (this->abandoned()) {
921 return false;
922 }
923
924 return fGpu->setBackendRenderTargetState(backendRenderTarget, state, previousState,
925 std::move(callback));
926 }
927
deleteBackendTexture(GrBackendTexture backendTex)928 void GrDirectContext::deleteBackendTexture(GrBackendTexture backendTex) {
929 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
930 // For the Vulkan backend we still must destroy the backend texture when the context is
931 // abandoned.
932 if ((this->abandoned() && this->backend() != GrBackendApi::kVulkan) || !backendTex.isValid()) {
933 return;
934 }
935
936 fGpu->deleteBackendTexture(backendTex);
937 }
938
939 //////////////////////////////////////////////////////////////////////////////
940
precompileShader(const SkData & key,const SkData & data)941 bool GrDirectContext::precompileShader(const SkData& key, const SkData& data) {
942 return fGpu->precompileShader(key, data);
943 }
944
945 #ifdef SK_ENABLE_DUMP_GPU
946 #include "include/core/SkString.h"
947 #include "src/utils/SkJSONWriter.h"
dump() const948 SkString GrDirectContext::dump() const {
949 SkDynamicMemoryWStream stream;
950 SkJSONWriter writer(&stream, SkJSONWriter::Mode::kPretty);
951 writer.beginObject();
952
953 writer.appendString("backend", GrBackendApiToStr(this->backend()));
954
955 writer.appendName("caps");
956 this->caps()->dumpJSON(&writer);
957
958 writer.appendName("gpu");
959 this->fGpu->dumpJSON(&writer);
960
961 writer.appendName("context");
962 this->dumpJSON(&writer);
963
964 // Flush JSON to the memory stream
965 writer.endObject();
966 writer.flush();
967
968 // Null terminate the JSON data in the memory stream
969 stream.write8(0);
970
971 // Allocate a string big enough to hold all the data, then copy out of the stream
972 SkString result(stream.bytesWritten());
973 stream.copyToAndReset(result.writable_str());
974 return result;
975 }
976 #endif
977
978 #ifdef SK_GL
979
980 /*************************************************************************************************/
MakeGL(sk_sp<const GrGLInterface> glInterface)981 sk_sp<GrDirectContext> GrDirectContext::MakeGL(sk_sp<const GrGLInterface> glInterface) {
982 GrContextOptions defaultOptions;
983 return MakeGL(std::move(glInterface), defaultOptions);
984 }
985
MakeGL(const GrContextOptions & options)986 sk_sp<GrDirectContext> GrDirectContext::MakeGL(const GrContextOptions& options) {
987 return MakeGL(nullptr, options);
988 }
989
MakeGL()990 sk_sp<GrDirectContext> GrDirectContext::MakeGL() {
991 GrContextOptions defaultOptions;
992 return MakeGL(nullptr, defaultOptions);
993 }
994
995 #if GR_TEST_UTILS
make_get_error_with_random_oom(GrGLFunction<GrGLGetErrorFn> original)996 GrGLFunction<GrGLGetErrorFn> make_get_error_with_random_oom(GrGLFunction<GrGLGetErrorFn> original) {
997 // A SkRandom and a GrGLFunction<GrGLGetErrorFn> are too big to be captured by a
998 // GrGLFunction<GrGLGetError> (surprise, surprise). So we make a context object and
999 // capture that by pointer. However, GrGLFunction doesn't support calling a destructor
1000 // on the thing it captures. So we leak the context.
1001 struct GetErrorContext {
1002 SkRandom fRandom;
1003 GrGLFunction<GrGLGetErrorFn> fGetError;
1004 };
1005
1006 auto errorContext = new GetErrorContext;
1007
1008 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
1009 __lsan_ignore_object(errorContext);
1010 #endif
1011
1012 errorContext->fGetError = original;
1013
1014 return GrGLFunction<GrGLGetErrorFn>([errorContext]() {
1015 GrGLenum error = errorContext->fGetError();
1016 if (error == GR_GL_NO_ERROR && (errorContext->fRandom.nextU() % 300) == 0) {
1017 error = GR_GL_OUT_OF_MEMORY;
1018 }
1019 return error;
1020 });
1021 }
1022 #endif
1023
MakeGL(sk_sp<const GrGLInterface> glInterface,const GrContextOptions & options)1024 sk_sp<GrDirectContext> GrDirectContext::MakeGL(sk_sp<const GrGLInterface> glInterface,
1025 const GrContextOptions& options) {
1026 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kOpenGL, options));
1027 #if GR_TEST_UTILS
1028 if (options.fRandomGLOOM) {
1029 auto copy = sk_make_sp<GrGLInterface>(*glInterface);
1030 copy->fFunctions.fGetError =
1031 make_get_error_with_random_oom(glInterface->fFunctions.fGetError);
1032 #if GR_GL_CHECK_ERROR
1033 // Suppress logging GL errors since we'll be synthetically generating them.
1034 copy->suppressErrorLogging();
1035 #endif
1036 glInterface = std::move(copy);
1037 }
1038 #endif
1039 direct->fGpu = GrGLGpu::Make(std::move(glInterface), options, direct.get());
1040 if (!direct->init()) {
1041 return nullptr;
1042 }
1043 return direct;
1044 }
1045 #endif
1046
1047 /*************************************************************************************************/
MakeMock(const GrMockOptions * mockOptions)1048 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions) {
1049 GrContextOptions defaultOptions;
1050 return MakeMock(mockOptions, defaultOptions);
1051 }
1052
MakeMock(const GrMockOptions * mockOptions,const GrContextOptions & options)1053 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions,
1054 const GrContextOptions& options) {
1055 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMock, options));
1056
1057 direct->fGpu = GrMockGpu::Make(mockOptions, options, direct.get());
1058 if (!direct->init()) {
1059 return nullptr;
1060 }
1061
1062 return direct;
1063 }
1064
1065 #ifdef SK_VULKAN
1066 /*************************************************************************************************/
MakeVulkan(const GrVkBackendContext & backendContext)1067 sk_sp<GrDirectContext> GrDirectContext::MakeVulkan(const GrVkBackendContext& backendContext) {
1068 GrContextOptions defaultOptions;
1069 return MakeVulkan(backendContext, defaultOptions);
1070 }
1071
MakeVulkan(const GrVkBackendContext & backendContext,const GrContextOptions & options)1072 sk_sp<GrDirectContext> GrDirectContext::MakeVulkan(const GrVkBackendContext& backendContext,
1073 const GrContextOptions& options) {
1074 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kVulkan, options));
1075
1076 direct->fGpu = GrVkGpu::Make(backendContext, options, direct.get());
1077 if (!direct->init()) {
1078 return nullptr;
1079 }
1080
1081 return direct;
1082 }
1083 #endif
1084
1085 #ifdef SK_METAL
1086 /*************************************************************************************************/
MakeMetal(const GrMtlBackendContext & backendContext)1087 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(const GrMtlBackendContext& backendContext) {
1088 GrContextOptions defaultOptions;
1089 return MakeMetal(backendContext, defaultOptions);
1090 }
1091
MakeMetal(const GrMtlBackendContext & backendContext,const GrContextOptions & options)1092 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(const GrMtlBackendContext& backendContext,
1093 const GrContextOptions& options) {
1094 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMetal, options));
1095
1096 direct->fGpu = GrMtlTrampoline::MakeGpu(backendContext, options, direct.get());
1097 if (!direct->init()) {
1098 return nullptr;
1099 }
1100
1101 return direct;
1102 }
1103
1104 // deprecated
MakeMetal(void * device,void * queue)1105 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(void* device, void* queue) {
1106 GrContextOptions defaultOptions;
1107 return MakeMetal(device, queue, defaultOptions);
1108 }
1109
1110 // deprecated
1111 // remove include/gpu/mtl/GrMtlBackendContext.h, above, when removed
MakeMetal(void * device,void * queue,const GrContextOptions & options)1112 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(void* device, void* queue,
1113 const GrContextOptions& options) {
1114 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMetal, options));
1115 GrMtlBackendContext backendContext = {};
1116 backendContext.fDevice.reset(device);
1117 backendContext.fQueue.reset(queue);
1118
1119 return GrDirectContext::MakeMetal(backendContext, options);
1120 }
1121 #endif
1122
1123 #ifdef SK_DIRECT3D
1124 /*************************************************************************************************/
MakeDirect3D(const GrD3DBackendContext & backendContext)1125 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext) {
1126 GrContextOptions defaultOptions;
1127 return MakeDirect3D(backendContext, defaultOptions);
1128 }
1129
MakeDirect3D(const GrD3DBackendContext & backendContext,const GrContextOptions & options)1130 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext,
1131 const GrContextOptions& options) {
1132 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kDirect3D, options));
1133
1134 direct->fGpu = GrD3DGpu::Make(backendContext, options, direct.get());
1135 if (!direct->init()) {
1136 return nullptr;
1137 }
1138
1139 return direct;
1140 }
1141 #endif
1142
1143 #ifdef SK_DAWN
1144 /*************************************************************************************************/
MakeDawn(const wgpu::Device & device)1145 sk_sp<GrDirectContext> GrDirectContext::MakeDawn(const wgpu::Device& device) {
1146 GrContextOptions defaultOptions;
1147 return MakeDawn(device, defaultOptions);
1148 }
1149
MakeDawn(const wgpu::Device & device,const GrContextOptions & options)1150 sk_sp<GrDirectContext> GrDirectContext::MakeDawn(const wgpu::Device& device,
1151 const GrContextOptions& options) {
1152 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kDawn, options));
1153
1154 direct->fGpu = GrDawnGpu::Make(device, options, direct.get());
1155 if (!direct->init()) {
1156 return nullptr;
1157 }
1158
1159 return direct;
1160 }
1161
1162 #endif
1163