1 /*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8
9 #include "include/gpu/GrDirectContext.h"
10
11 #include "include/core/SkTraceMemoryDump.h"
12 #include "include/gpu/GrBackendSemaphore.h"
13 #include "include/gpu/GrContextThreadSafeProxy.h"
14 #include "src/base/SkAutoMalloc.h"
15 #include "src/core/SkCompressedDataUtils.h"
16 #include "src/core/SkTaskGroup.h"
17 #include "src/core/SkTraceEvent.h"
18 #include "src/gpu/ganesh/GrBackendUtils.h"
19 #include "src/gpu/ganesh/GrClientMappedBufferManager.h"
20 #include "src/gpu/ganesh/GrContextThreadSafeProxyPriv.h"
21 #include "src/gpu/ganesh/GrDirectContextPriv.h"
22 #include "src/gpu/ganesh/GrDrawingManager.h"
23 #include "src/gpu/ganesh/GrGpu.h"
24 #include "src/gpu/ganesh/GrResourceProvider.h"
25 #include "src/gpu/ganesh/GrSemaphore.h"
26 #include "src/gpu/ganesh/GrThreadSafePipelineBuilder.h"
27 #include "src/gpu/ganesh/SurfaceContext.h"
28 #include "src/gpu/ganesh/effects/GrSkSLFP.h"
29 #include "src/gpu/ganesh/mock/GrMockGpu.h"
30 #include "src/gpu/ganesh/ops/SmallPathAtlasMgr.h"
31 #include "src/gpu/ganesh/text/GrAtlasManager.h"
32 #include "src/image/SkImage_GpuBase.h"
33 #include "src/text/gpu/StrikeCache.h"
34 #include "src/utils/SkShaderUtils.h"
35 #ifdef SK_GL
36 #include "src/gpu/ganesh/gl/GrGLGpu.h"
37 #endif
38 #ifdef SK_METAL
39 #include "include/gpu/mtl/GrMtlBackendContext.h"
40 #include "src/gpu/ganesh/mtl/GrMtlTrampoline.h"
41 #endif
42 #ifdef SK_VULKAN
43 #include "src/gpu/ganesh/vk/GrVkGpu.h"
44 #endif
45 #ifdef SK_DIRECT3D
46 #include "src/gpu/ganesh/d3d/GrD3DGpu.h"
47 #endif
48 #ifdef SK_DAWN
49 #include "src/gpu/ganesh/dawn/GrDawnGpu.h"
50 #endif
51 #include <memory>
52
53 #if GR_TEST_UTILS
54 # include "src/base/SkRandom.h"
55 # if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
56 # include <sanitizer/lsan_interface.h>
57 # endif
58 #endif
59
60 using namespace skia_private;
61
62 #define ASSERT_SINGLE_OWNER SKGPU_ASSERT_SINGLE_OWNER(this->singleOwner())
63
64 using StrikeCache = sktext::gpu::StrikeCache;
65
Next()66 GrDirectContext::DirectContextID GrDirectContext::DirectContextID::Next() {
67 static std::atomic<uint32_t> nextID{1};
68 uint32_t id;
69 do {
70 id = nextID.fetch_add(1, std::memory_order_relaxed);
71 } while (id == SK_InvalidUniqueID);
72 return DirectContextID(id);
73 }
74
GrDirectContext(GrBackendApi backend,const GrContextOptions & options)75 GrDirectContext::GrDirectContext(GrBackendApi backend, const GrContextOptions& options)
76 : INHERITED(GrContextThreadSafeProxyPriv::Make(backend, options), false)
77 , fDeleteCallbackHelper(new DeleteCallbackHelper(options.fContextDeleteContext,
78 options.fContextDeleteProc))
79 , fDirectContextID(DirectContextID::Next()) {
80 }
81
~GrDirectContext()82 GrDirectContext::~GrDirectContext() {
83 ASSERT_SINGLE_OWNER
84 // this if-test protects against the case where the context is being destroyed
85 // before having been fully created
86 if (fGpu) {
87 this->flushAndSubmit();
88 }
89
90 // We need to make sure all work is finished on the gpu before we start releasing resources.
91 this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/false);
92
93 this->destroyDrawingManager();
94
95 // Ideally we could just let the ptr drop, but resource cache queries this ptr in releaseAll.
96 if (fResourceCache) {
97 fResourceCache->releaseAll();
98 }
99 // This has to be after GrResourceCache::releaseAll so that other threads that are holding
100 // async pixel result don't try to destroy buffers off thread.
101 fMappedBufferManager.reset();
102 }
103
threadSafeProxy()104 sk_sp<GrContextThreadSafeProxy> GrDirectContext::threadSafeProxy() {
105 return INHERITED::threadSafeProxy();
106 }
107
resetGLTextureBindings()108 void GrDirectContext::resetGLTextureBindings() {
109 if (this->abandoned() || this->backend() != GrBackendApi::kOpenGL) {
110 return;
111 }
112 fGpu->resetTextureBindings();
113 }
114
resetContext(uint32_t state)115 void GrDirectContext::resetContext(uint32_t state) {
116 ASSERT_SINGLE_OWNER
117 fGpu->markContextDirty(state);
118 }
119
abandonContext()120 void GrDirectContext::abandonContext() {
121 if (INHERITED::abandoned()) {
122 return;
123 }
124
125 if (fInsideReleaseProcCnt) {
126 SkDEBUGFAIL("Calling GrDirectContext::abandonContext() while inside a ReleaseProc is not "
127 "allowed");
128 return;
129 }
130
131 INHERITED::abandonContext();
132
133 // We need to make sure all work is finished on the gpu before we start releasing resources.
134 this->syncAllOutstandingGpuWork(this->caps()->mustSyncGpuDuringAbandon());
135
136 fStrikeCache->freeAll();
137
138 fMappedBufferManager->abandon();
139
140 fResourceProvider->abandon();
141
142 // abandon first so destructors don't try to free the resources in the API.
143 fResourceCache->abandonAll();
144
145 fGpu->disconnect(GrGpu::DisconnectType::kAbandon);
146
147 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
148 if (fSmallPathAtlasMgr) {
149 fSmallPathAtlasMgr->reset();
150 }
151 #endif
152 fAtlasManager->freeAll();
153 }
154
abandoned()155 bool GrDirectContext::abandoned() {
156 if (INHERITED::abandoned()) {
157 return true;
158 }
159
160 if (fGpu && fGpu->isDeviceLost()) {
161 this->abandonContext();
162 return true;
163 }
164 return false;
165 }
166
oomed()167 bool GrDirectContext::oomed() { return fGpu ? fGpu->checkAndResetOOMed() : false; }
168
releaseResourcesAndAbandonContext()169 void GrDirectContext::releaseResourcesAndAbandonContext() {
170 if (INHERITED::abandoned()) {
171 return;
172 }
173
174 INHERITED::abandonContext();
175
176 // We need to make sure all work is finished on the gpu before we start releasing resources.
177 this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/true);
178
179 fResourceProvider->abandon();
180
181 // Release all resources in the backend 3D API.
182 fResourceCache->releaseAll();
183
184 // Must be after GrResourceCache::releaseAll().
185 fMappedBufferManager.reset();
186
187 fGpu->disconnect(GrGpu::DisconnectType::kCleanup);
188 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
189 if (fSmallPathAtlasMgr) {
190 fSmallPathAtlasMgr->reset();
191 }
192 #endif
193 fAtlasManager->freeAll();
194 }
195
freeGpuResources()196 void GrDirectContext::freeGpuResources() {
197 ASSERT_SINGLE_OWNER
198
199 if (this->abandoned()) {
200 return;
201 }
202
203 this->flushAndSubmit();
204 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
205 if (fSmallPathAtlasMgr) {
206 fSmallPathAtlasMgr->reset();
207 }
208 #endif
209 fAtlasManager->freeAll();
210
211 // TODO: the glyph cache doesn't hold any GpuResources so this call should not be needed here.
212 // Some slack in the GrTextBlob's implementation requires it though. That could be fixed.
213 fStrikeCache->freeAll();
214
215 this->drawingManager()->freeGpuResources();
216
217 fResourceCache->purgeUnlockedResources();
218 }
219
init()220 bool GrDirectContext::init() {
221 ASSERT_SINGLE_OWNER
222 if (!fGpu) {
223 SK_ABORT_IN_ANDROID_FRAMEWORK("fGpu creation failed");
224 return false;
225 }
226
227 fThreadSafeProxy->priv().init(fGpu->refCaps(), fGpu->refPipelineBuilder());
228 if (!INHERITED::init()) {
229 SK_ABORT_IN_ANDROID_FRAMEWORK("Inherited initialization failed");
230 return false;
231 }
232
233 SkASSERT(this->getTextBlobRedrawCoordinator());
234 SkASSERT(this->threadSafeCache());
235
236 fStrikeCache = std::make_unique<StrikeCache>();
237 fResourceCache = std::make_unique<GrResourceCache>(this->singleOwner(),
238 this->directContextID(),
239 this->contextID());
240 fResourceCache->setProxyProvider(this->proxyProvider());
241 fResourceCache->setThreadSafeCache(this->threadSafeCache());
242 #if GR_TEST_UTILS
243 if (this->options().fResourceCacheLimitOverride != -1) {
244 this->setResourceCacheLimit(this->options().fResourceCacheLimitOverride);
245 }
246 #endif
247 fResourceProvider = std::make_unique<GrResourceProvider>(fGpu.get(), fResourceCache.get(),
248 this->singleOwner());
249 fMappedBufferManager = std::make_unique<GrClientMappedBufferManager>(this->directContextID());
250
251 fDidTestPMConversions = false;
252
253 // DDL TODO: we need to think through how the task group & persistent cache
254 // get passed on to/shared between all the DDLRecorders created with this context.
255 if (this->options().fExecutor) {
256 fTaskGroup = std::make_unique<SkTaskGroup>(*this->options().fExecutor);
257 }
258
259 fPersistentCache = this->options().fPersistentCache;
260
261 GrDrawOpAtlas::AllowMultitexturing allowMultitexturing;
262 if (GrContextOptions::Enable::kNo == this->options().fAllowMultipleGlyphCacheTextures ||
263 // multitexturing supported only if range can represent the index + texcoords fully
264 !(this->caps()->shaderCaps()->fFloatIs32Bits ||
265 this->caps()->shaderCaps()->fIntegerSupport)) {
266 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kNo;
267 } else {
268 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kYes;
269 }
270
271 GrProxyProvider* proxyProvider = this->priv().proxyProvider();
272
273 fAtlasManager = std::make_unique<GrAtlasManager>(proxyProvider,
274 this->options().fGlyphCacheTextureMaximumBytes,
275 allowMultitexturing,
276 this->options().fSupportBilerpFromGlyphAtlas);
277 this->priv().addOnFlushCallbackObject(fAtlasManager.get());
278
279 return true;
280 }
281
getResourceCacheUsage(int * resourceCount,size_t * resourceBytes) const282 void GrDirectContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
283 ASSERT_SINGLE_OWNER
284
285 if (resourceCount) {
286 *resourceCount = fResourceCache->getBudgetedResourceCount();
287 }
288 if (resourceBytes) {
289 *resourceBytes = fResourceCache->getBudgetedResourceBytes();
290 }
291 }
292
getResourceCachePurgeableBytes() const293 size_t GrDirectContext::getResourceCachePurgeableBytes() const {
294 ASSERT_SINGLE_OWNER
295 return fResourceCache->getPurgeableBytes();
296 }
297
getResourceCacheLimits(int * maxResources,size_t * maxResourceBytes) const298 void GrDirectContext::getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const {
299 ASSERT_SINGLE_OWNER
300 if (maxResources) {
301 *maxResources = -1;
302 }
303 if (maxResourceBytes) {
304 *maxResourceBytes = this->getResourceCacheLimit();
305 }
306 }
307
getResourceCacheLimit() const308 size_t GrDirectContext::getResourceCacheLimit() const {
309 ASSERT_SINGLE_OWNER
310 return fResourceCache->getMaxResourceBytes();
311 }
312
setResourceCacheLimits(int unused,size_t maxResourceBytes)313 void GrDirectContext::setResourceCacheLimits(int unused, size_t maxResourceBytes) {
314 ASSERT_SINGLE_OWNER
315 this->setResourceCacheLimit(maxResourceBytes);
316 }
317
setResourceCacheLimit(size_t maxResourceBytes)318 void GrDirectContext::setResourceCacheLimit(size_t maxResourceBytes) {
319 ASSERT_SINGLE_OWNER
320 fResourceCache->setLimit(maxResourceBytes);
321 }
322
purgeUnlockedResources(bool scratchResourcesOnly)323 void GrDirectContext::purgeUnlockedResources(bool scratchResourcesOnly) {
324 ASSERT_SINGLE_OWNER
325
326 if (this->abandoned()) {
327 return;
328 }
329
330 fResourceCache->purgeUnlockedResources(scratchResourcesOnly);
331 fResourceCache->purgeAsNeeded();
332
333 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
334 // place to purge stale blobs
335 this->getTextBlobRedrawCoordinator()->purgeStaleBlobs();
336
337 fGpu->releaseUnlockedBackendObjects();
338 }
339
performDeferredCleanup(std::chrono::milliseconds msNotUsed,bool scratchResourcesOnly)340 void GrDirectContext::performDeferredCleanup(std::chrono::milliseconds msNotUsed,
341 bool scratchResourcesOnly) {
342 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
343
344 ASSERT_SINGLE_OWNER
345
346 if (this->abandoned()) {
347 return;
348 }
349
350 this->checkAsyncWorkCompletion();
351 fMappedBufferManager->process();
352 auto purgeTime = GrStdSteadyClock::now() - msNotUsed;
353
354 fResourceCache->purgeAsNeeded();
355 fResourceCache->purgeResourcesNotUsedSince(purgeTime, scratchResourcesOnly);
356
357 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
358 // place to purge stale blobs
359 this->getTextBlobRedrawCoordinator()->purgeStaleBlobs();
360 }
361
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)362 void GrDirectContext::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
363 ASSERT_SINGLE_OWNER
364
365 if (this->abandoned()) {
366 return;
367 }
368
369 fResourceCache->purgeUnlockedResources(bytesToPurge, preferScratchResources);
370 }
371
372 ////////////////////////////////////////////////////////////////////////////////
wait(int numSemaphores,const GrBackendSemaphore waitSemaphores[],bool deleteSemaphoresAfterWait)373 bool GrDirectContext::wait(int numSemaphores, const GrBackendSemaphore waitSemaphores[],
374 bool deleteSemaphoresAfterWait) {
375 if (!fGpu || !fGpu->caps()->semaphoreSupport()) {
376 return false;
377 }
378 GrWrapOwnership ownership =
379 deleteSemaphoresAfterWait ? kAdopt_GrWrapOwnership : kBorrow_GrWrapOwnership;
380 for (int i = 0; i < numSemaphores; ++i) {
381 std::unique_ptr<GrSemaphore> sema = fResourceProvider->wrapBackendSemaphore(
382 waitSemaphores[i], GrSemaphoreWrapType::kWillWait, ownership);
383 // If we failed to wrap the semaphore it means the client didn't give us a valid semaphore
384 // to begin with. Therefore, it is fine to not wait on it.
385 if (sema) {
386 fGpu->waitSemaphore(sema.get());
387 }
388 }
389 return true;
390 }
391
392 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
onGetSmallPathAtlasMgr()393 skgpu::v1::SmallPathAtlasMgr* GrDirectContext::onGetSmallPathAtlasMgr() {
394 if (!fSmallPathAtlasMgr) {
395 fSmallPathAtlasMgr = std::make_unique<skgpu::v1::SmallPathAtlasMgr>();
396
397 this->priv().addOnFlushCallbackObject(fSmallPathAtlasMgr.get());
398 }
399
400 if (!fSmallPathAtlasMgr->initAtlas(this->proxyProvider(), this->caps())) {
401 return nullptr;
402 }
403
404 return fSmallPathAtlasMgr.get();
405 }
406 #endif
407
408 ////////////////////////////////////////////////////////////////////////////////
409
flush(const GrFlushInfo & info)410 GrSemaphoresSubmitted GrDirectContext::flush(const GrFlushInfo& info) {
411 ASSERT_SINGLE_OWNER
412 if (this->abandoned()) {
413 if (info.fFinishedProc) {
414 info.fFinishedProc(info.fFinishedContext);
415 }
416 if (info.fSubmittedProc) {
417 info.fSubmittedProc(info.fSubmittedContext, false);
418 }
419 return GrSemaphoresSubmitted::kNo;
420 }
421
422 return this->drawingManager()->flushSurfaces({}, SkSurface::BackendSurfaceAccess::kNoAccess,
423 info, nullptr);
424 }
425
submit(bool syncCpu)426 bool GrDirectContext::submit(bool syncCpu) {
427 ASSERT_SINGLE_OWNER
428 if (this->abandoned()) {
429 return false;
430 }
431
432 if (!fGpu) {
433 return false;
434 }
435
436 return fGpu->submitToGpu(syncCpu);
437 }
438
439 ////////////////////////////////////////////////////////////////////////////////
440
checkAsyncWorkCompletion()441 void GrDirectContext::checkAsyncWorkCompletion() {
442 if (fGpu) {
443 fGpu->checkFinishProcs();
444 }
445 }
446
syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned)447 void GrDirectContext::syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned) {
448 if (fGpu && (!this->abandoned() || shouldExecuteWhileAbandoned)) {
449 fGpu->finishOutstandingGpuWork();
450 this->checkAsyncWorkCompletion();
451 }
452 }
453
454 ////////////////////////////////////////////////////////////////////////////////
455
storeVkPipelineCacheData()456 void GrDirectContext::storeVkPipelineCacheData() {
457 if (fGpu) {
458 fGpu->storeVkPipelineCacheData();
459 }
460 }
461
462 ////////////////////////////////////////////////////////////////////////////////
463
supportsDistanceFieldText() const464 bool GrDirectContext::supportsDistanceFieldText() const {
465 return this->caps()->shaderCaps()->supportsDistanceFieldText();
466 }
467
468 //////////////////////////////////////////////////////////////////////////////
469
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const470 void GrDirectContext::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
471 ASSERT_SINGLE_OWNER
472 fResourceCache->dumpMemoryStatistics(traceMemoryDump);
473 traceMemoryDump->dumpNumericValue("skia/gr_text_blob_cache", "size", "bytes",
474 this->getTextBlobRedrawCoordinator()->usedBytes());
475 }
476
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,GrMipmapped mipmapped,GrRenderable renderable,GrProtected isProtected,std::string_view label)477 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
478 const GrBackendFormat& backendFormat,
479 GrMipmapped mipmapped,
480 GrRenderable renderable,
481 GrProtected isProtected,
482 std::string_view label) {
483 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
484 if (this->abandoned()) {
485 return GrBackendTexture();
486 }
487
488 return fGpu->createBackendTexture({width, height}, backendFormat, renderable,
489 mipmapped, isProtected, label);
490 }
491
createBackendTexture(int width,int height,SkColorType skColorType,GrMipmapped mipmapped,GrRenderable renderable,GrProtected isProtected,std::string_view label)492 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
493 SkColorType skColorType,
494 GrMipmapped mipmapped,
495 GrRenderable renderable,
496 GrProtected isProtected,
497 std::string_view label) {
498 if (this->abandoned()) {
499 return GrBackendTexture();
500 }
501
502 const GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
503
504 return this->createBackendTexture(
505 width, height, format, mipmapped, renderable, isProtected, label);
506 }
507
create_and_clear_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,GrMipmapped mipmapped,GrRenderable renderable,GrProtected isProtected,sk_sp<skgpu::RefCntedCallback> finishedCallback,std::array<float,4> color,std::string_view label)508 static GrBackendTexture create_and_clear_backend_texture(
509 GrDirectContext* dContext,
510 SkISize dimensions,
511 const GrBackendFormat& backendFormat,
512 GrMipmapped mipmapped,
513 GrRenderable renderable,
514 GrProtected isProtected,
515 sk_sp<skgpu::RefCntedCallback> finishedCallback,
516 std::array<float, 4> color,
517 std::string_view label) {
518 GrGpu* gpu = dContext->priv().getGpu();
519 GrBackendTexture beTex = gpu->createBackendTexture(dimensions, backendFormat, renderable,
520 mipmapped, isProtected, label);
521 if (!beTex.isValid()) {
522 return {};
523 }
524
525 if (!dContext->priv().getGpu()->clearBackendTexture(beTex,
526 std::move(finishedCallback),
527 color)) {
528 dContext->deleteBackendTexture(beTex);
529 return {};
530 }
531 return beTex;
532 }
533
update_texture_with_pixmaps(GrDirectContext * context,const SkPixmap src[],int numLevels,const GrBackendTexture & backendTexture,GrSurfaceOrigin textureOrigin,sk_sp<skgpu::RefCntedCallback> finishedCallback)534 static bool update_texture_with_pixmaps(GrDirectContext* context,
535 const SkPixmap src[],
536 int numLevels,
537 const GrBackendTexture& backendTexture,
538 GrSurfaceOrigin textureOrigin,
539 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
540 GrColorType ct = SkColorTypeToGrColorType(src[0].colorType());
541 const GrBackendFormat& format = backendTexture.getBackendFormat();
542
543 if (!context->priv().caps()->areColorTypeAndFormatCompatible(ct, format)) {
544 return false;
545 }
546
547 auto proxy = context->priv().proxyProvider()->wrapBackendTexture(backendTexture,
548 kBorrow_GrWrapOwnership,
549 GrWrapCacheable::kNo,
550 kRW_GrIOType,
551 std::move(finishedCallback));
552 if (!proxy) {
553 return false;
554 }
555
556 skgpu::Swizzle swizzle = context->priv().caps()->getReadSwizzle(format, ct);
557 GrSurfaceProxyView view(std::move(proxy), textureOrigin, swizzle);
558 skgpu::v1::SurfaceContext surfaceContext(context, std::move(view), src[0].info().colorInfo());
559 AutoSTArray<15, GrCPixmap> tmpSrc(numLevels);
560 for (int i = 0; i < numLevels; ++i) {
561 tmpSrc[i] = src[i];
562 }
563 if (!surfaceContext.writePixels(context, tmpSrc.get(), numLevels)) {
564 return false;
565 }
566
567 GrSurfaceProxy* p = surfaceContext.asSurfaceProxy();
568 GrFlushInfo info;
569 context->priv().drawingManager()->flushSurfaces({&p, 1},
570 SkSurface::BackendSurfaceAccess::kNoAccess,
571 info,
572 nullptr);
573 return true;
574 }
575
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,GrMipmapped mipmapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext,std::string_view label)576 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
577 const GrBackendFormat& backendFormat,
578 const SkColor4f& color,
579 GrMipmapped mipmapped,
580 GrRenderable renderable,
581 GrProtected isProtected,
582 GrGpuFinishedProc finishedProc,
583 GrGpuFinishedContext finishedContext,
584 std::string_view label) {
585 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
586
587 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
588 if (this->abandoned()) {
589 return {};
590 }
591
592 return create_and_clear_backend_texture(this,
593 {width, height},
594 backendFormat,
595 mipmapped,
596 renderable,
597 isProtected,
598 std::move(finishedCallback),
599 color.array(),
600 label);
601 }
602
createBackendTexture(int width,int height,SkColorType skColorType,const SkColor4f & color,GrMipmapped mipmapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext,std::string_view label)603 GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
604 SkColorType skColorType,
605 const SkColor4f& color,
606 GrMipmapped mipmapped,
607 GrRenderable renderable,
608 GrProtected isProtected,
609 GrGpuFinishedProc finishedProc,
610 GrGpuFinishedContext finishedContext,
611 std::string_view label) {
612 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
613
614 if (this->abandoned()) {
615 return {};
616 }
617
618 GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
619 if (!format.isValid()) {
620 return {};
621 }
622
623 GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
624 SkColor4f swizzledColor = this->caps()->getWriteSwizzle(format, grColorType).applyTo(color);
625
626 return create_and_clear_backend_texture(this,
627 {width, height},
628 format,
629 mipmapped,
630 renderable,
631 isProtected,
632 std::move(finishedCallback),
633 swizzledColor.array(),
634 label);
635 }
636
createBackendTexture(const SkPixmap srcData[],int numProvidedLevels,GrSurfaceOrigin textureOrigin,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext,std::string_view label)637 GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap srcData[],
638 int numProvidedLevels,
639 GrSurfaceOrigin textureOrigin,
640 GrRenderable renderable,
641 GrProtected isProtected,
642 GrGpuFinishedProc finishedProc,
643 GrGpuFinishedContext finishedContext,
644 std::string_view label) {
645 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
646
647 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
648
649 if (this->abandoned()) {
650 return {};
651 }
652
653 if (!srcData || numProvidedLevels <= 0) {
654 return {};
655 }
656
657 SkColorType colorType = srcData[0].colorType();
658
659 GrMipmapped mipmapped = GrMipmapped::kNo;
660 if (numProvidedLevels > 1) {
661 mipmapped = GrMipmapped::kYes;
662 }
663
664 GrBackendFormat backendFormat = this->defaultBackendFormat(colorType, renderable);
665 GrBackendTexture beTex = this->createBackendTexture(srcData[0].width(),
666 srcData[0].height(),
667 backendFormat,
668 mipmapped,
669 renderable,
670 isProtected,
671 label);
672 if (!beTex.isValid()) {
673 return {};
674 }
675 if (!update_texture_with_pixmaps(this,
676 srcData,
677 numProvidedLevels,
678 beTex,
679 textureOrigin,
680 std::move(finishedCallback))) {
681 this->deleteBackendTexture(beTex);
682 return {};
683 }
684 return beTex;
685 }
686
updateBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)687 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
688 const SkColor4f& color,
689 GrGpuFinishedProc finishedProc,
690 GrGpuFinishedContext finishedContext) {
691 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
692
693 if (this->abandoned()) {
694 return false;
695 }
696
697 return fGpu->clearBackendTexture(backendTexture, std::move(finishedCallback), color.array());
698 }
699
updateBackendTexture(const GrBackendTexture & backendTexture,SkColorType skColorType,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)700 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
701 SkColorType skColorType,
702 const SkColor4f& color,
703 GrGpuFinishedProc finishedProc,
704 GrGpuFinishedContext finishedContext) {
705 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
706
707 if (this->abandoned()) {
708 return false;
709 }
710
711 GrBackendFormat format = backendTexture.getBackendFormat();
712 GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
713
714 if (!this->caps()->areColorTypeAndFormatCompatible(grColorType, format)) {
715 return false;
716 }
717
718 skgpu::Swizzle swizzle = this->caps()->getWriteSwizzle(format, grColorType);
719 SkColor4f swizzledColor = swizzle.applyTo(color);
720
721 return fGpu->clearBackendTexture(backendTexture,
722 std::move(finishedCallback),
723 swizzledColor.array());
724 }
725
updateBackendTexture(const GrBackendTexture & backendTexture,const SkPixmap srcData[],int numLevels,GrSurfaceOrigin textureOrigin,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)726 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
727 const SkPixmap srcData[],
728 int numLevels,
729 GrSurfaceOrigin textureOrigin,
730 GrGpuFinishedProc finishedProc,
731 GrGpuFinishedContext finishedContext) {
732 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
733
734 if (this->abandoned()) {
735 return false;
736 }
737
738 if (!srcData || numLevels <= 0) {
739 return false;
740 }
741
742 // If the texture has MIP levels then we require that the full set is overwritten.
743 int numExpectedLevels = 1;
744 if (backendTexture.hasMipmaps()) {
745 numExpectedLevels = SkMipmap::ComputeLevelCount(backendTexture.width(),
746 backendTexture.height()) + 1;
747 }
748 if (numLevels != numExpectedLevels) {
749 return false;
750 }
751 return update_texture_with_pixmaps(this,
752 srcData,
753 numLevels,
754 backendTexture,
755 textureOrigin,
756 std::move(finishedCallback));
757 }
758
759 //////////////////////////////////////////////////////////////////////////////
760
create_and_update_compressed_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,GrMipmapped mipmapped,GrProtected isProtected,sk_sp<skgpu::RefCntedCallback> finishedCallback,const void * data,size_t size)761 static GrBackendTexture create_and_update_compressed_backend_texture(
762 GrDirectContext* dContext,
763 SkISize dimensions,
764 const GrBackendFormat& backendFormat,
765 GrMipmapped mipmapped,
766 GrProtected isProtected,
767 sk_sp<skgpu::RefCntedCallback> finishedCallback,
768 const void* data,
769 size_t size) {
770 GrGpu* gpu = dContext->priv().getGpu();
771
772 GrBackendTexture beTex = gpu->createCompressedBackendTexture(dimensions, backendFormat,
773 mipmapped, isProtected);
774 if (!beTex.isValid()) {
775 return {};
776 }
777
778 if (!dContext->priv().getGpu()->updateCompressedBackendTexture(
779 beTex, std::move(finishedCallback), data, size)) {
780 dContext->deleteBackendTexture(beTex);
781 return {};
782 }
783 return beTex;
784 }
785
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,GrMipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)786 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
787 int width, int height,
788 const GrBackendFormat& backendFormat,
789 const SkColor4f& color,
790 GrMipmapped mipmapped,
791 GrProtected isProtected,
792 GrGpuFinishedProc finishedProc,
793 GrGpuFinishedContext finishedContext) {
794 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
795 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
796
797 if (this->abandoned()) {
798 return {};
799 }
800
801 SkImage::CompressionType compression = GrBackendFormatToCompressionType(backendFormat);
802 if (compression == SkImage::CompressionType::kNone) {
803 return {};
804 }
805
806 size_t size = SkCompressedDataSize(compression,
807 {width, height},
808 nullptr,
809 mipmapped == GrMipmapped::kYes);
810 auto storage = std::make_unique<char[]>(size);
811 GrFillInCompressedData(compression, {width, height}, mipmapped, storage.get(), color);
812 return create_and_update_compressed_backend_texture(this,
813 {width, height},
814 backendFormat,
815 mipmapped,
816 isProtected,
817 std::move(finishedCallback),
818 storage.get(),
819 size);
820 }
821
createCompressedBackendTexture(int width,int height,SkImage::CompressionType compression,const SkColor4f & color,GrMipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)822 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
823 int width, int height,
824 SkImage::CompressionType compression,
825 const SkColor4f& color,
826 GrMipmapped mipmapped,
827 GrProtected isProtected,
828 GrGpuFinishedProc finishedProc,
829 GrGpuFinishedContext finishedContext) {
830 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
831 GrBackendFormat format = this->compressedBackendFormat(compression);
832 return this->createCompressedBackendTexture(width, height, format, color,
833 mipmapped, isProtected, finishedProc,
834 finishedContext);
835 }
836
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const void * compressedData,size_t dataSize,GrMipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)837 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
838 int width, int height,
839 const GrBackendFormat& backendFormat,
840 const void* compressedData,
841 size_t dataSize,
842 GrMipmapped mipmapped,
843 GrProtected isProtected,
844 GrGpuFinishedProc finishedProc,
845 GrGpuFinishedContext finishedContext) {
846 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
847 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
848
849 if (this->abandoned()) {
850 return {};
851 }
852
853 return create_and_update_compressed_backend_texture(this,
854 {width, height},
855 backendFormat,
856 mipmapped,
857 isProtected,
858 std::move(finishedCallback),
859 compressedData,
860 dataSize);
861 }
862
createCompressedBackendTexture(int width,int height,SkImage::CompressionType compression,const void * data,size_t dataSize,GrMipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)863 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
864 int width, int height,
865 SkImage::CompressionType compression,
866 const void* data, size_t dataSize,
867 GrMipmapped mipmapped,
868 GrProtected isProtected,
869 GrGpuFinishedProc finishedProc,
870 GrGpuFinishedContext finishedContext) {
871 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
872 GrBackendFormat format = this->compressedBackendFormat(compression);
873 return this->createCompressedBackendTexture(width, height, format, data, dataSize, mipmapped,
874 isProtected, finishedProc, finishedContext);
875 }
876
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)877 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
878 const SkColor4f& color,
879 GrGpuFinishedProc finishedProc,
880 GrGpuFinishedContext finishedContext) {
881 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
882
883 if (this->abandoned()) {
884 return false;
885 }
886
887 SkImage::CompressionType compression =
888 GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
889 if (compression == SkImage::CompressionType::kNone) {
890 return {};
891 }
892 size_t size = SkCompressedDataSize(compression,
893 backendTexture.dimensions(),
894 nullptr,
895 backendTexture.hasMipmaps());
896 SkAutoMalloc storage(size);
897 GrFillInCompressedData(compression,
898 backendTexture.dimensions(),
899 backendTexture.mipmapped(),
900 static_cast<char*>(storage.get()),
901 color);
902 return fGpu->updateCompressedBackendTexture(backendTexture,
903 std::move(finishedCallback),
904 storage.get(),
905 size);
906 }
907
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const void * compressedData,size_t dataSize,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)908 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
909 const void* compressedData,
910 size_t dataSize,
911 GrGpuFinishedProc finishedProc,
912 GrGpuFinishedContext finishedContext) {
913 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
914
915 if (this->abandoned()) {
916 return false;
917 }
918
919 if (!compressedData) {
920 return false;
921 }
922
923 return fGpu->updateCompressedBackendTexture(backendTexture,
924 std::move(finishedCallback),
925 compressedData,
926 dataSize);
927 }
928
929 //////////////////////////////////////////////////////////////////////////////
930
setBackendTextureState(const GrBackendTexture & backendTexture,const skgpu::MutableTextureState & state,skgpu::MutableTextureState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)931 bool GrDirectContext::setBackendTextureState(const GrBackendTexture& backendTexture,
932 const skgpu::MutableTextureState& state,
933 skgpu::MutableTextureState* previousState,
934 GrGpuFinishedProc finishedProc,
935 GrGpuFinishedContext finishedContext) {
936 auto callback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
937
938 if (this->abandoned()) {
939 return false;
940 }
941
942 return fGpu->setBackendTextureState(backendTexture, state, previousState, std::move(callback));
943 }
944
945
setBackendRenderTargetState(const GrBackendRenderTarget & backendRenderTarget,const skgpu::MutableTextureState & state,skgpu::MutableTextureState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)946 bool GrDirectContext::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
947 const skgpu::MutableTextureState& state,
948 skgpu::MutableTextureState* previousState,
949 GrGpuFinishedProc finishedProc,
950 GrGpuFinishedContext finishedContext) {
951 auto callback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
952
953 if (this->abandoned()) {
954 return false;
955 }
956
957 return fGpu->setBackendRenderTargetState(backendRenderTarget, state, previousState,
958 std::move(callback));
959 }
960
deleteBackendTexture(GrBackendTexture backendTex)961 void GrDirectContext::deleteBackendTexture(GrBackendTexture backendTex) {
962 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
963 // For the Vulkan backend we still must destroy the backend texture when the context is
964 // abandoned.
965 if ((this->abandoned() && this->backend() != GrBackendApi::kVulkan) || !backendTex.isValid()) {
966 return;
967 }
968
969 fGpu->deleteBackendTexture(backendTex);
970 }
971
972 //////////////////////////////////////////////////////////////////////////////
973
precompileShader(const SkData & key,const SkData & data)974 bool GrDirectContext::precompileShader(const SkData& key, const SkData& data) {
975 return fGpu->precompileShader(key, data);
976 }
977
978 #ifdef SK_ENABLE_DUMP_GPU
979 #include "include/core/SkString.h"
980 #include "src/utils/SkJSONWriter.h"
dump() const981 SkString GrDirectContext::dump() const {
982 SkDynamicMemoryWStream stream;
983 SkJSONWriter writer(&stream, SkJSONWriter::Mode::kPretty);
984 writer.beginObject();
985
986 writer.appendCString("backend", GrBackendApiToStr(this->backend()));
987
988 writer.appendName("caps");
989 this->caps()->dumpJSON(&writer);
990
991 writer.appendName("gpu");
992 this->fGpu->dumpJSON(&writer);
993
994 writer.appendName("context");
995 this->dumpJSON(&writer);
996
997 // Flush JSON to the memory stream
998 writer.endObject();
999 writer.flush();
1000
1001 // Null terminate the JSON data in the memory stream
1002 stream.write8(0);
1003
1004 // Allocate a string big enough to hold all the data, then copy out of the stream
1005 SkString result(stream.bytesWritten());
1006 stream.copyToAndReset(result.data());
1007 return result;
1008 }
1009 #endif
1010
1011 #ifdef SK_GL
1012
1013 /*************************************************************************************************/
MakeGL(sk_sp<const GrGLInterface> glInterface)1014 sk_sp<GrDirectContext> GrDirectContext::MakeGL(sk_sp<const GrGLInterface> glInterface) {
1015 GrContextOptions defaultOptions;
1016 return MakeGL(std::move(glInterface), defaultOptions);
1017 }
1018
MakeGL(const GrContextOptions & options)1019 sk_sp<GrDirectContext> GrDirectContext::MakeGL(const GrContextOptions& options) {
1020 return MakeGL(nullptr, options);
1021 }
1022
MakeGL()1023 sk_sp<GrDirectContext> GrDirectContext::MakeGL() {
1024 GrContextOptions defaultOptions;
1025 return MakeGL(nullptr, defaultOptions);
1026 }
1027
1028 #if GR_TEST_UTILS
make_get_error_with_random_oom(GrGLFunction<GrGLGetErrorFn> original)1029 GrGLFunction<GrGLGetErrorFn> make_get_error_with_random_oom(GrGLFunction<GrGLGetErrorFn> original) {
1030 // A SkRandom and a GrGLFunction<GrGLGetErrorFn> are too big to be captured by a
1031 // GrGLFunction<GrGLGetError> (surprise, surprise). So we make a context object and
1032 // capture that by pointer. However, GrGLFunction doesn't support calling a destructor
1033 // on the thing it captures. So we leak the context.
1034 struct GetErrorContext {
1035 SkRandom fRandom;
1036 GrGLFunction<GrGLGetErrorFn> fGetError;
1037 };
1038
1039 auto errorContext = new GetErrorContext;
1040
1041 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
1042 __lsan_ignore_object(errorContext);
1043 #endif
1044
1045 errorContext->fGetError = original;
1046
1047 return GrGLFunction<GrGLGetErrorFn>([errorContext]() {
1048 GrGLenum error = errorContext->fGetError();
1049 if (error == GR_GL_NO_ERROR && (errorContext->fRandom.nextU() % 300) == 0) {
1050 error = GR_GL_OUT_OF_MEMORY;
1051 }
1052 return error;
1053 });
1054 }
1055 #endif
1056
MakeGL(sk_sp<const GrGLInterface> glInterface,const GrContextOptions & options)1057 sk_sp<GrDirectContext> GrDirectContext::MakeGL(sk_sp<const GrGLInterface> glInterface,
1058 const GrContextOptions& options) {
1059 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kOpenGL, options));
1060 #if GR_TEST_UTILS
1061 if (options.fRandomGLOOM) {
1062 auto copy = sk_make_sp<GrGLInterface>(*glInterface);
1063 copy->fFunctions.fGetError =
1064 make_get_error_with_random_oom(glInterface->fFunctions.fGetError);
1065 #if GR_GL_CHECK_ERROR
1066 // Suppress logging GL errors since we'll be synthetically generating them.
1067 copy->suppressErrorLogging();
1068 #endif
1069 glInterface = std::move(copy);
1070 }
1071 #endif
1072 direct->fGpu = GrGLGpu::Make(std::move(glInterface), options, direct.get());
1073 if (!direct->init()) {
1074 return nullptr;
1075 }
1076 return direct;
1077 }
1078 #endif
1079
1080 /*************************************************************************************************/
MakeMock(const GrMockOptions * mockOptions)1081 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions) {
1082 GrContextOptions defaultOptions;
1083 return MakeMock(mockOptions, defaultOptions);
1084 }
1085
MakeMock(const GrMockOptions * mockOptions,const GrContextOptions & options)1086 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions,
1087 const GrContextOptions& options) {
1088 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMock, options));
1089
1090 direct->fGpu = GrMockGpu::Make(mockOptions, options, direct.get());
1091 if (!direct->init()) {
1092 return nullptr;
1093 }
1094
1095 return direct;
1096 }
1097
1098 #ifdef SK_VULKAN
1099 /*************************************************************************************************/
MakeVulkan(const GrVkBackendContext & backendContext)1100 sk_sp<GrDirectContext> GrDirectContext::MakeVulkan(const GrVkBackendContext& backendContext) {
1101 GrContextOptions defaultOptions;
1102 return MakeVulkan(backendContext, defaultOptions);
1103 }
1104
MakeVulkan(const GrVkBackendContext & backendContext,const GrContextOptions & options)1105 sk_sp<GrDirectContext> GrDirectContext::MakeVulkan(const GrVkBackendContext& backendContext,
1106 const GrContextOptions& options) {
1107 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kVulkan, options));
1108
1109 direct->fGpu = GrVkGpu::Make(backendContext, options, direct.get());
1110 if (!direct->init()) {
1111 SK_ABORT_IN_ANDROID_FRAMEWORK("GrDirectContext initialization failed");
1112 return nullptr;
1113 }
1114
1115 return direct;
1116 }
1117 #endif
1118
1119 #ifdef SK_METAL
1120 /*************************************************************************************************/
MakeMetal(const GrMtlBackendContext & backendContext)1121 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(const GrMtlBackendContext& backendContext) {
1122 GrContextOptions defaultOptions;
1123 return MakeMetal(backendContext, defaultOptions);
1124 }
1125
MakeMetal(const GrMtlBackendContext & backendContext,const GrContextOptions & options)1126 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(const GrMtlBackendContext& backendContext,
1127 const GrContextOptions& options) {
1128 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMetal, options));
1129
1130 direct->fGpu = GrMtlTrampoline::MakeGpu(backendContext, options, direct.get());
1131 if (!direct->init()) {
1132 return nullptr;
1133 }
1134
1135 return direct;
1136 }
1137
1138 // deprecated
MakeMetal(void * device,void * queue)1139 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(void* device, void* queue) {
1140 GrContextOptions defaultOptions;
1141 return MakeMetal(device, queue, defaultOptions);
1142 }
1143
1144 // deprecated
1145 // remove include/gpu/mtl/GrMtlBackendContext.h, above, when removed
MakeMetal(void * device,void * queue,const GrContextOptions & options)1146 sk_sp<GrDirectContext> GrDirectContext::MakeMetal(void* device, void* queue,
1147 const GrContextOptions& options) {
1148 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kMetal, options));
1149 GrMtlBackendContext backendContext = {};
1150 backendContext.fDevice.reset(device);
1151 backendContext.fQueue.reset(queue);
1152
1153 return GrDirectContext::MakeMetal(backendContext, options);
1154 }
1155 #endif
1156
1157 #ifdef SK_DIRECT3D
1158 /*************************************************************************************************/
MakeDirect3D(const GrD3DBackendContext & backendContext)1159 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext) {
1160 GrContextOptions defaultOptions;
1161 return MakeDirect3D(backendContext, defaultOptions);
1162 }
1163
MakeDirect3D(const GrD3DBackendContext & backendContext,const GrContextOptions & options)1164 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext,
1165 const GrContextOptions& options) {
1166 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kDirect3D, options));
1167
1168 direct->fGpu = GrD3DGpu::Make(backendContext, options, direct.get());
1169 if (!direct->init()) {
1170 return nullptr;
1171 }
1172
1173 return direct;
1174 }
1175 #endif
1176
1177 #ifdef SK_DAWN
1178 /*************************************************************************************************/
MakeDawn(const wgpu::Device & device)1179 sk_sp<GrDirectContext> GrDirectContext::MakeDawn(const wgpu::Device& device) {
1180 GrContextOptions defaultOptions;
1181 return MakeDawn(device, defaultOptions);
1182 }
1183
MakeDawn(const wgpu::Device & device,const GrContextOptions & options)1184 sk_sp<GrDirectContext> GrDirectContext::MakeDawn(const wgpu::Device& device,
1185 const GrContextOptions& options) {
1186 sk_sp<GrDirectContext> direct(new GrDirectContext(GrBackendApi::kDawn, options));
1187
1188 direct->fGpu = GrDawnGpu::Make(device, options, direct.get());
1189 if (!direct->init()) {
1190 return nullptr;
1191 }
1192
1193 return direct;
1194 }
1195
1196 #endif
1197