1 /*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7 #include "include/gpu/GrDirectContext.h"
8
9 #include "include/core/SkImageInfo.h"
10 #include "include/core/SkPixmap.h"
11 #include "include/core/SkSize.h"
12 #include "include/core/SkSurface.h"
13 #include "include/core/SkTextureCompressionType.h"
14 #include "include/core/SkTraceMemoryDump.h"
15 #include "include/gpu/GpuTypes.h"
16 #include "include/gpu/GrBackendSemaphore.h"
17 #include "include/gpu/GrBackendSurface.h"
18 #include "include/gpu/GrContextThreadSafeProxy.h"
19 #include "include/private/base/SingleOwner.h"
20 #include "include/private/base/SkTArray.h"
21 #include "include/private/base/SkTemplates.h"
22 #include "include/private/gpu/ganesh/GrTypesPriv.h"
23 #include "src/base/SkAutoMalloc.h"
24 #include "src/core/SkCompressedDataUtils.h"
25 #include "src/core/SkMipmap.h"
26 #include "src/core/SkTaskGroup.h"
27 #include "src/core/SkTraceEvent.h"
28 #include "src/gpu/DataUtils.h"
29 #include "src/gpu/GpuTypesPriv.h"
30 #include "src/gpu/RefCntedCallback.h"
31 #include "src/gpu/Swizzle.h"
32 #include "src/gpu/ganesh/Device.h"
33 #include "src/gpu/ganesh/GrBackendUtils.h"
34 #include "src/gpu/ganesh/GrCaps.h"
35 #include "src/gpu/ganesh/GrClientMappedBufferManager.h"
36 #include "src/gpu/ganesh/GrColorInfo.h"
37 #include "src/gpu/ganesh/GrContextThreadSafeProxyPriv.h"
38 #include "src/gpu/ganesh/GrDirectContextPriv.h"
39 #include "src/gpu/ganesh/GrDrawOpAtlas.h"
40 #include "src/gpu/ganesh/GrDrawingManager.h"
41 #include "src/gpu/ganesh/GrGpu.h"
42 #include "src/gpu/ganesh/GrPixmap.h"
43 #include "src/gpu/ganesh/GrProxyProvider.h"
44 #include "src/gpu/ganesh/GrRenderTargetProxy.h"
45 #include "src/gpu/ganesh/GrResourceCache.h"
46 #include "src/gpu/ganesh/GrResourceProvider.h"
47 #include "src/gpu/ganesh/GrSemaphore.h" // IWYU pragma: keep
48 #include "src/gpu/ganesh/GrShaderCaps.h"
49 #include "src/gpu/ganesh/GrSurfaceProxy.h"
50 #include "src/gpu/ganesh/GrSurfaceProxyView.h"
51 #include "src/gpu/ganesh/GrThreadSafePipelineBuilder.h" // IWYU pragma: keep
52 #include "src/gpu/ganesh/SurfaceContext.h"
53 #include "src/gpu/ganesh/image/SkImage_GaneshBase.h"
54 #include "src/gpu/ganesh/mock/GrMockGpu.h"
55 #include "src/gpu/ganesh/ops/SmallPathAtlasMgr.h"
56 #include "src/gpu/ganesh/surface/SkSurface_Ganesh.h"
57 #include "src/gpu/ganesh/text/GrAtlasManager.h"
58 #include "src/image/SkImage_Base.h"
59 #include "src/image/SkSurface_Base.h"
60 #include "src/text/gpu/StrikeCache.h"
61 #include "src/text/gpu/TextBlobRedrawCoordinator.h"
62
63 #include <array>
64 #include <atomic>
65 #include <forward_list>
66 #include <memory>
67 #include <utility>
68
69 #ifdef SK_DIRECT3D
70 #include "src/gpu/ganesh/d3d/GrD3DGpu.h"
71 #endif
72
73 using namespace skia_private;
74
75 #define ASSERT_SINGLE_OWNER SKGPU_ASSERT_SINGLE_OWNER(this->singleOwner())
76
77 using StrikeCache = sktext::gpu::StrikeCache;
78
Next()79 GrDirectContext::DirectContextID GrDirectContext::DirectContextID::Next() {
80 static std::atomic<uint32_t> nextID{1};
81 uint32_t id;
82 do {
83 id = nextID.fetch_add(1, std::memory_order_relaxed);
84 } while (id == SK_InvalidUniqueID);
85 return DirectContextID(id);
86 }
87
GrDirectContext(GrBackendApi backend,const GrContextOptions & options,sk_sp<GrContextThreadSafeProxy> proxy)88 GrDirectContext::GrDirectContext(GrBackendApi backend,
89 const GrContextOptions& options,
90 sk_sp<GrContextThreadSafeProxy> proxy)
91 : GrRecordingContext(std::move(proxy), false)
92 , fDeleteCallbackHelper(new DeleteCallbackHelper(options.fContextDeleteContext,
93 options.fContextDeleteProc))
94 , fDirectContextID(DirectContextID::Next()) {}
95
~GrDirectContext()96 GrDirectContext::~GrDirectContext() {
97 ASSERT_SINGLE_OWNER
98 // this if-test protects against the case where the context is being destroyed
99 // before having been fully created
100 if (fGpu) {
101 this->flushAndSubmit();
102 }
103
104 // We need to make sure all work is finished on the gpu before we start releasing resources.
105 this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/false);
106
107 this->destroyDrawingManager();
108
109 // Ideally we could just let the ptr drop, but resource cache queries this ptr in releaseAll.
110 if (fResourceCache) {
111 fResourceCache->releaseAll();
112 }
113 // This has to be after GrResourceCache::releaseAll so that other threads that are holding
114 // async pixel result don't try to destroy buffers off thread.
115 fMappedBufferManager.reset();
116 }
117
threadSafeProxy()118 sk_sp<GrContextThreadSafeProxy> GrDirectContext::threadSafeProxy() {
119 return GrRecordingContext::threadSafeProxy();
120 }
121
resetGLTextureBindings()122 void GrDirectContext::resetGLTextureBindings() {
123 if (this->abandoned() || this->backend() != GrBackendApi::kOpenGL) {
124 return;
125 }
126 fGpu->resetTextureBindings();
127 }
128
resetContext(uint32_t state)129 void GrDirectContext::resetContext(uint32_t state) {
130 ASSERT_SINGLE_OWNER
131 fGpu->markContextDirty(state);
132 }
133
abandonContext()134 void GrDirectContext::abandonContext() {
135 if (GrRecordingContext::abandoned()) {
136 return;
137 }
138
139 if (fInsideReleaseProcCnt) {
140 SkDEBUGFAIL("Calling GrDirectContext::abandonContext() while inside a ReleaseProc is not "
141 "allowed");
142 return;
143 }
144
145 GrRecordingContext::abandonContext();
146
147 // We need to make sure all work is finished on the gpu before we start releasing resources.
148 this->syncAllOutstandingGpuWork(this->caps()->mustSyncGpuDuringAbandon());
149
150 fStrikeCache->freeAll();
151
152 fMappedBufferManager->abandon();
153
154 fResourceProvider->abandon();
155
156 // abandon first so destructors don't try to free the resources in the API.
157 fResourceCache->abandonAll();
158
159 fGpu->disconnect(GrGpu::DisconnectType::kAbandon);
160
161 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
162 if (fSmallPathAtlasMgr) {
163 fSmallPathAtlasMgr->reset();
164 }
165 #endif
166 fAtlasManager->freeAll();
167 }
168
abandoned()169 bool GrDirectContext::abandoned() {
170 if (GrRecordingContext::abandoned()) {
171 return true;
172 }
173
174 if (fGpu && fGpu->isDeviceLost()) {
175 this->abandonContext();
176 return true;
177 }
178 return false;
179 }
180
isDeviceLost()181 bool GrDirectContext::isDeviceLost() {
182 if (fGpu && fGpu->isDeviceLost()) {
183 if (!GrRecordingContext::abandoned()) {
184 this->abandonContext();
185 }
186 return true;
187 }
188 return false;
189 }
190
oomed()191 bool GrDirectContext::oomed() { return fGpu ? fGpu->checkAndResetOOMed() : false; }
192
releaseResourcesAndAbandonContext()193 void GrDirectContext::releaseResourcesAndAbandonContext() {
194 if (GrRecordingContext::abandoned()) {
195 return;
196 }
197
198 GrRecordingContext::abandonContext();
199
200 // We need to make sure all work is finished on the gpu before we start releasing resources.
201 this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/true);
202
203 fResourceProvider->abandon();
204
205 // Release all resources in the backend 3D API.
206 fResourceCache->releaseAll();
207
208 // Must be after GrResourceCache::releaseAll().
209 fMappedBufferManager.reset();
210
211 fGpu->disconnect(GrGpu::DisconnectType::kCleanup);
212 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
213 if (fSmallPathAtlasMgr) {
214 fSmallPathAtlasMgr->reset();
215 }
216 #endif
217 fAtlasManager->freeAll();
218 }
219
freeGpuResources()220 void GrDirectContext::freeGpuResources() {
221 ASSERT_SINGLE_OWNER
222
223 if (this->abandoned()) {
224 return;
225 }
226
227 this->flushAndSubmit();
228 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
229 if (fSmallPathAtlasMgr) {
230 fSmallPathAtlasMgr->reset();
231 }
232 #endif
233 fAtlasManager->freeAll();
234
235 // TODO: the glyph cache doesn't hold any GpuResources so this call should not be needed here.
236 // Some slack in the GrTextBlob's implementation requires it though. That could be fixed.
237 fStrikeCache->freeAll();
238
239 this->drawingManager()->freeGpuResources();
240
241 fResourceCache->purgeUnlockedResources(GrPurgeResourceOptions::kAllResources);
242 }
243
init()244 bool GrDirectContext::init() {
245 ASSERT_SINGLE_OWNER
246 if (!fGpu) {
247 return false;
248 }
249
250 fThreadSafeProxy->priv().init(fGpu->refCaps(), fGpu->refPipelineBuilder());
251 if (!GrRecordingContext::init()) {
252 return false;
253 }
254
255 SkASSERT(this->getTextBlobRedrawCoordinator());
256 SkASSERT(this->threadSafeCache());
257
258 fStrikeCache = std::make_unique<StrikeCache>();
259 fResourceCache = std::make_unique<GrResourceCache>(this->singleOwner(),
260 this->directContextID(),
261 this->contextID());
262 fResourceCache->setProxyProvider(this->proxyProvider());
263 fResourceCache->setThreadSafeCache(this->threadSafeCache());
264 #if defined(GR_TEST_UTILS)
265 if (this->options().fResourceCacheLimitOverride != -1) {
266 this->setResourceCacheLimit(this->options().fResourceCacheLimitOverride);
267 }
268 #endif
269 fResourceProvider = std::make_unique<GrResourceProvider>(fGpu.get(), fResourceCache.get(),
270 this->singleOwner());
271 fMappedBufferManager = std::make_unique<GrClientMappedBufferManager>(this->directContextID());
272
273 fDidTestPMConversions = false;
274
275 // DDL TODO: we need to think through how the task group & persistent cache
276 // get passed on to/shared between all the DDLRecorders created with this context.
277 if (this->options().fExecutor) {
278 fTaskGroup = std::make_unique<SkTaskGroup>(*this->options().fExecutor);
279 }
280
281 fPersistentCache = this->options().fPersistentCache;
282
283 GrDrawOpAtlas::AllowMultitexturing allowMultitexturing;
284 if (GrContextOptions::Enable::kNo == this->options().fAllowMultipleGlyphCacheTextures ||
285 // multitexturing supported only if range can represent the index + texcoords fully
286 !(this->caps()->shaderCaps()->fFloatIs32Bits ||
287 this->caps()->shaderCaps()->fIntegerSupport)) {
288 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kNo;
289 } else {
290 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kYes;
291 }
292
293 GrProxyProvider* proxyProvider = this->priv().proxyProvider();
294
295 fAtlasManager = std::make_unique<GrAtlasManager>(proxyProvider,
296 this->options().fGlyphCacheTextureMaximumBytes,
297 allowMultitexturing,
298 this->options().fSupportBilerpFromGlyphAtlas);
299 this->priv().addOnFlushCallbackObject(fAtlasManager.get());
300
301 return true;
302 }
303
getResourceCacheUsage(int * resourceCount,size_t * resourceBytes) const304 void GrDirectContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
305 ASSERT_SINGLE_OWNER
306
307 if (resourceCount) {
308 *resourceCount = fResourceCache->getBudgetedResourceCount();
309 }
310 if (resourceBytes) {
311 *resourceBytes = fResourceCache->getBudgetedResourceBytes();
312 }
313 }
314
getResourceCachePurgeableBytes() const315 size_t GrDirectContext::getResourceCachePurgeableBytes() const {
316 ASSERT_SINGLE_OWNER
317 return fResourceCache->getPurgeableBytes();
318 }
319
getResourceCacheLimits(int * maxResources,size_t * maxResourceBytes) const320 void GrDirectContext::getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const {
321 ASSERT_SINGLE_OWNER
322 if (maxResources) {
323 *maxResources = -1;
324 }
325 if (maxResourceBytes) {
326 *maxResourceBytes = this->getResourceCacheLimit();
327 }
328 }
329
getResourceCacheLimit() const330 size_t GrDirectContext::getResourceCacheLimit() const {
331 ASSERT_SINGLE_OWNER
332 return fResourceCache->getMaxResourceBytes();
333 }
334
setResourceCacheLimits(int unused,size_t maxResourceBytes)335 void GrDirectContext::setResourceCacheLimits(int unused, size_t maxResourceBytes) {
336 ASSERT_SINGLE_OWNER
337 this->setResourceCacheLimit(maxResourceBytes);
338 }
339
setResourceCacheLimit(size_t maxResourceBytes)340 void GrDirectContext::setResourceCacheLimit(size_t maxResourceBytes) {
341 ASSERT_SINGLE_OWNER
342 fResourceCache->setLimit(maxResourceBytes);
343 }
344
purgeUnlockedResources(GrPurgeResourceOptions opts)345 void GrDirectContext::purgeUnlockedResources(GrPurgeResourceOptions opts) {
346 ASSERT_SINGLE_OWNER
347
348 if (this->abandoned()) {
349 return;
350 }
351
352 fResourceCache->purgeUnlockedResources(opts);
353 fResourceCache->purgeAsNeeded();
354
355 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
356 // place to purge stale blobs
357 this->getTextBlobRedrawCoordinator()->purgeStaleBlobs();
358
359 fGpu->releaseUnlockedBackendObjects();
360 }
361
performDeferredCleanup(std::chrono::milliseconds msNotUsed,GrPurgeResourceOptions opts)362 void GrDirectContext::performDeferredCleanup(std::chrono::milliseconds msNotUsed,
363 GrPurgeResourceOptions opts) {
364 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
365
366 ASSERT_SINGLE_OWNER
367
368 if (this->abandoned()) {
369 return;
370 }
371
372 this->checkAsyncWorkCompletion();
373 fMappedBufferManager->process();
374 auto purgeTime = skgpu::StdSteadyClock::now() - msNotUsed;
375
376 fResourceCache->purgeAsNeeded();
377 fResourceCache->purgeResourcesNotUsedSince(purgeTime, opts);
378
379 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
380 // place to purge stale blobs
381 this->getTextBlobRedrawCoordinator()->purgeStaleBlobs();
382 }
383
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)384 void GrDirectContext::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
385 ASSERT_SINGLE_OWNER
386
387 if (this->abandoned()) {
388 return;
389 }
390
391 fResourceCache->purgeUnlockedResources(bytesToPurge, preferScratchResources);
392 }
393
394 ////////////////////////////////////////////////////////////////////////////////
wait(int numSemaphores,const GrBackendSemaphore waitSemaphores[],bool deleteSemaphoresAfterWait)395 bool GrDirectContext::wait(int numSemaphores, const GrBackendSemaphore waitSemaphores[],
396 bool deleteSemaphoresAfterWait) {
397 if (!fGpu || !fGpu->caps()->backendSemaphoreSupport()) {
398 return false;
399 }
400 GrWrapOwnership ownership =
401 deleteSemaphoresAfterWait ? kAdopt_GrWrapOwnership : kBorrow_GrWrapOwnership;
402 for (int i = 0; i < numSemaphores; ++i) {
403 std::unique_ptr<GrSemaphore> sema = fResourceProvider->wrapBackendSemaphore(
404 waitSemaphores[i], GrSemaphoreWrapType::kWillWait, ownership);
405 // If we failed to wrap the semaphore it means the client didn't give us a valid semaphore
406 // to begin with. Therefore, it is fine to not wait on it.
407 if (sema) {
408 fGpu->waitSemaphore(sema.get());
409 }
410 }
411 return true;
412 }
413
414 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
onGetSmallPathAtlasMgr()415 skgpu::ganesh::SmallPathAtlasMgr* GrDirectContext::onGetSmallPathAtlasMgr() {
416 if (!fSmallPathAtlasMgr) {
417 fSmallPathAtlasMgr = std::make_unique<skgpu::ganesh::SmallPathAtlasMgr>();
418
419 this->priv().addOnFlushCallbackObject(fSmallPathAtlasMgr.get());
420 }
421
422 if (!fSmallPathAtlasMgr->initAtlas(this->proxyProvider(), this->caps())) {
423 return nullptr;
424 }
425
426 return fSmallPathAtlasMgr.get();
427 }
428 #endif
429
430 ////////////////////////////////////////////////////////////////////////////////
431
flush(const GrFlushInfo & info)432 GrSemaphoresSubmitted GrDirectContext::flush(const GrFlushInfo& info) {
433 ASSERT_SINGLE_OWNER
434 if (this->abandoned()) {
435 if (info.fFinishedProc) {
436 info.fFinishedProc(info.fFinishedContext);
437 }
438 if (info.fSubmittedProc) {
439 info.fSubmittedProc(info.fSubmittedContext, false);
440 }
441 return GrSemaphoresSubmitted::kNo;
442 }
443
444 return this->drawingManager()->flushSurfaces(
445 {}, SkSurfaces::BackendSurfaceAccess::kNoAccess, info, nullptr);
446 }
447
submit(GrSyncCpu sync)448 bool GrDirectContext::submit(GrSyncCpu sync) {
449 ASSERT_SINGLE_OWNER
450 if (this->abandoned()) {
451 return false;
452 }
453
454 if (!fGpu) {
455 return false;
456 }
457
458 return fGpu->submitToGpu(sync);
459 }
460
flush(const sk_sp<const SkImage> & image,const GrFlushInfo & flushInfo)461 GrSemaphoresSubmitted GrDirectContext::flush(const sk_sp<const SkImage>& image,
462 const GrFlushInfo& flushInfo) {
463 if (!image) {
464 return GrSemaphoresSubmitted::kNo;
465 }
466 auto ib = as_IB(image);
467 if (!ib->isGaneshBacked()) {
468 return GrSemaphoresSubmitted::kNo;
469 }
470 auto igb = static_cast<const SkImage_GaneshBase*>(image.get());
471 return igb->flush(this, flushInfo);
472 }
473
flush(const sk_sp<const SkImage> & image)474 void GrDirectContext::flush(const sk_sp<const SkImage>& image) {
475 this->flush(image, {});
476 }
477
flushAndSubmit(const sk_sp<const SkImage> & image)478 void GrDirectContext::flushAndSubmit(const sk_sp<const SkImage>& image) {
479 this->flush(image, {});
480 this->submit();
481 }
482
flush(SkSurface * surface,SkSurfaces::BackendSurfaceAccess access,const GrFlushInfo & info)483 GrSemaphoresSubmitted GrDirectContext::flush(SkSurface* surface,
484 SkSurfaces::BackendSurfaceAccess access,
485 const GrFlushInfo& info) {
486 if (!surface) {
487 return GrSemaphoresSubmitted::kNo;
488 }
489 auto sb = asSB(surface);
490 if (!sb->isGaneshBacked()) {
491 return GrSemaphoresSubmitted::kNo;
492 }
493
494 auto gs = static_cast<SkSurface_Ganesh*>(surface);
495 SkASSERT(this->priv().matches(gs->getDevice()->recordingContext()->asDirectContext()));
496 GrRenderTargetProxy* rtp = gs->getDevice()->targetProxy();
497
498 return this->priv().flushSurface(rtp, access, info, nullptr);
499 }
500
flush(SkSurface * surface,const GrFlushInfo & info,const skgpu::MutableTextureState * newState)501 GrSemaphoresSubmitted GrDirectContext::flush(SkSurface* surface,
502 const GrFlushInfo& info,
503 const skgpu::MutableTextureState* newState) {
504 if (!surface) {
505 return GrSemaphoresSubmitted::kNo;
506 }
507 auto sb = asSB(surface);
508 if (!sb->isGaneshBacked()) {
509 return GrSemaphoresSubmitted::kNo;
510 }
511
512 auto gs = static_cast<SkSurface_Ganesh*>(surface);
513 SkASSERT(this->priv().matches(gs->getDevice()->recordingContext()->asDirectContext()));
514 GrRenderTargetProxy* rtp = gs->getDevice()->targetProxy();
515
516 return this->priv().flushSurface(
517 rtp, SkSurfaces::BackendSurfaceAccess::kNoAccess, info, newState);
518 }
519
flushAndSubmit(SkSurface * surface,GrSyncCpu sync)520 void GrDirectContext::flushAndSubmit(SkSurface* surface, GrSyncCpu sync) {
521 this->flush(surface, SkSurfaces::BackendSurfaceAccess::kNoAccess, GrFlushInfo());
522 this->submit(sync);
523 }
524
flush(SkSurface * surface)525 void GrDirectContext::flush(SkSurface* surface) {
526 this->flush(surface, GrFlushInfo(), nullptr);
527 }
528
529 ////////////////////////////////////////////////////////////////////////////////
530
checkAsyncWorkCompletion()531 void GrDirectContext::checkAsyncWorkCompletion() {
532 if (fGpu) {
533 fGpu->checkFinishProcs();
534 }
535 }
536
syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned)537 void GrDirectContext::syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned) {
538 if (fGpu && (!this->abandoned() || shouldExecuteWhileAbandoned)) {
539 fGpu->finishOutstandingGpuWork();
540 this->checkAsyncWorkCompletion();
541 }
542 }
543
544 ////////////////////////////////////////////////////////////////////////////////
545
storeVkPipelineCacheData()546 void GrDirectContext::storeVkPipelineCacheData() {
547 if (fGpu) {
548 fGpu->storeVkPipelineCacheData();
549 }
550 }
551
552 ////////////////////////////////////////////////////////////////////////////////
553
supportsDistanceFieldText() const554 bool GrDirectContext::supportsDistanceFieldText() const {
555 return this->caps()->shaderCaps()->supportsDistanceFieldText();
556 }
557
558 //////////////////////////////////////////////////////////////////////////////
559
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const560 void GrDirectContext::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
561 ASSERT_SINGLE_OWNER
562 fResourceCache->dumpMemoryStatistics(traceMemoryDump);
563 traceMemoryDump->dumpNumericValue("skia/gr_text_blob_cache", "size", "bytes",
564 this->getTextBlobRedrawCoordinator()->usedBytes());
565 }
566
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,skgpu::Mipmapped mipmapped,GrRenderable renderable,GrProtected isProtected,std::string_view label)567 GrBackendTexture GrDirectContext::createBackendTexture(int width,
568 int height,
569 const GrBackendFormat& backendFormat,
570 skgpu::Mipmapped mipmapped,
571 GrRenderable renderable,
572 GrProtected isProtected,
573 std::string_view label) {
574 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
575 if (this->abandoned()) {
576 return GrBackendTexture();
577 }
578
579 return fGpu->createBackendTexture({width, height}, backendFormat, renderable,
580 mipmapped, isProtected, label);
581 }
582
createBackendTexture(const SkPixmap & srcData,GrSurfaceOrigin textureOrigin,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext,std::string_view label)583 GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap& srcData,
584 GrSurfaceOrigin textureOrigin,
585 GrRenderable renderable,
586 GrProtected isProtected,
587 GrGpuFinishedProc finishedProc,
588 GrGpuFinishedContext finishedContext,
589 std::string_view label) {
590 return this->createBackendTexture(&srcData, 1, textureOrigin, renderable, isProtected,
591 finishedProc, finishedContext, label);
592 }
593
createBackendTexture(const SkPixmap & srcData,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext,std::string_view label)594 GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap& srcData,
595 GrRenderable renderable,
596 GrProtected isProtected,
597 GrGpuFinishedProc finishedProc,
598 GrGpuFinishedContext finishedContext,
599 std::string_view label) {
600 return this->createBackendTexture(&srcData,
601 1,
602 renderable,
603 isProtected,
604 finishedProc,
605 finishedContext,
606 label);
607 }
608
createBackendTexture(const SkPixmap srcData[],int numLevels,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext,std::string_view label)609 GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap srcData[],
610 int numLevels,
611 GrRenderable renderable,
612 GrProtected isProtected,
613 GrGpuFinishedProc finishedProc,
614 GrGpuFinishedContext finishedContext,
615 std::string_view label) {
616 return this->createBackendTexture(srcData,
617 numLevels,
618 kTopLeft_GrSurfaceOrigin,
619 renderable,
620 isProtected,
621 finishedProc,
622 finishedContext,
623 label);
624 }
625
createBackendTexture(int width,int height,SkColorType skColorType,skgpu::Mipmapped mipmapped,GrRenderable renderable,GrProtected isProtected,std::string_view label)626 GrBackendTexture GrDirectContext::createBackendTexture(int width,
627 int height,
628 SkColorType skColorType,
629 skgpu::Mipmapped mipmapped,
630 GrRenderable renderable,
631 GrProtected isProtected,
632 std::string_view label) {
633 if (this->abandoned()) {
634 return GrBackendTexture();
635 }
636
637 const GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
638
639 return this->createBackendTexture(
640 width, height, format, mipmapped, renderable, isProtected, label);
641 }
642
create_and_clear_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,skgpu::Mipmapped mipmapped,GrRenderable renderable,GrProtected isProtected,sk_sp<skgpu::RefCntedCallback> finishedCallback,std::array<float,4> color,std::string_view label)643 static GrBackendTexture create_and_clear_backend_texture(
644 GrDirectContext* dContext,
645 SkISize dimensions,
646 const GrBackendFormat& backendFormat,
647 skgpu::Mipmapped mipmapped,
648 GrRenderable renderable,
649 GrProtected isProtected,
650 sk_sp<skgpu::RefCntedCallback> finishedCallback,
651 std::array<float, 4> color,
652 std::string_view label) {
653 GrGpu* gpu = dContext->priv().getGpu();
654 GrBackendTexture beTex = gpu->createBackendTexture(dimensions, backendFormat, renderable,
655 mipmapped, isProtected, label);
656 if (!beTex.isValid()) {
657 return {};
658 }
659
660 if (!dContext->priv().getGpu()->clearBackendTexture(beTex,
661 std::move(finishedCallback),
662 color)) {
663 dContext->deleteBackendTexture(beTex);
664 return {};
665 }
666 return beTex;
667 }
668
update_texture_with_pixmaps(GrDirectContext * context,const SkPixmap src[],int numLevels,const GrBackendTexture & backendTexture,GrSurfaceOrigin textureOrigin,sk_sp<skgpu::RefCntedCallback> finishedCallback)669 static bool update_texture_with_pixmaps(GrDirectContext* context,
670 const SkPixmap src[],
671 int numLevels,
672 const GrBackendTexture& backendTexture,
673 GrSurfaceOrigin textureOrigin,
674 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
675 GrColorType ct = SkColorTypeToGrColorType(src[0].colorType());
676 const GrBackendFormat& format = backendTexture.getBackendFormat();
677
678 if (!context->priv().caps()->areColorTypeAndFormatCompatible(ct, format)) {
679 return false;
680 }
681
682 auto proxy = context->priv().proxyProvider()->wrapBackendTexture(backendTexture,
683 kBorrow_GrWrapOwnership,
684 GrWrapCacheable::kNo,
685 kRW_GrIOType,
686 std::move(finishedCallback));
687 if (!proxy) {
688 return false;
689 }
690
691 skgpu::Swizzle swizzle = context->priv().caps()->getReadSwizzle(format, ct);
692 GrSurfaceProxyView view(std::move(proxy), textureOrigin, swizzle);
693 skgpu::ganesh::SurfaceContext surfaceContext(
694 context, std::move(view), src[0].info().colorInfo());
695 AutoSTArray<15, GrCPixmap> tmpSrc(numLevels);
696 for (int i = 0; i < numLevels; ++i) {
697 tmpSrc[i] = src[i];
698 }
699 if (!surfaceContext.writePixels(context, tmpSrc.get(), numLevels)) {
700 return false;
701 }
702
703 GrSurfaceProxy* p = surfaceContext.asSurfaceProxy();
704 GrFlushInfo info;
705 context->priv().drawingManager()->flushSurfaces(
706 {&p, 1}, SkSurfaces::BackendSurfaceAccess::kNoAccess, info, nullptr);
707 return true;
708 }
709
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,skgpu::Mipmapped mipmapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext,std::string_view label)710 GrBackendTexture GrDirectContext::createBackendTexture(int width,
711 int height,
712 const GrBackendFormat& backendFormat,
713 const SkColor4f& color,
714 skgpu::Mipmapped mipmapped,
715 GrRenderable renderable,
716 GrProtected isProtected,
717 GrGpuFinishedProc finishedProc,
718 GrGpuFinishedContext finishedContext,
719 std::string_view label) {
720 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
721
722 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
723 if (this->abandoned()) {
724 return {};
725 }
726
727 return create_and_clear_backend_texture(this,
728 {width, height},
729 backendFormat,
730 mipmapped,
731 renderable,
732 isProtected,
733 std::move(finishedCallback),
734 color.array(),
735 label);
736 }
737
createBackendTexture(int width,int height,SkColorType skColorType,const SkColor4f & color,skgpu::Mipmapped mipmapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext,std::string_view label)738 GrBackendTexture GrDirectContext::createBackendTexture(int width,
739 int height,
740 SkColorType skColorType,
741 const SkColor4f& color,
742 skgpu::Mipmapped mipmapped,
743 GrRenderable renderable,
744 GrProtected isProtected,
745 GrGpuFinishedProc finishedProc,
746 GrGpuFinishedContext finishedContext,
747 std::string_view label) {
748 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
749
750 if (this->abandoned()) {
751 return {};
752 }
753
754 GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
755 if (!format.isValid()) {
756 return {};
757 }
758
759 GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
760 SkColor4f swizzledColor = this->caps()->getWriteSwizzle(format, grColorType).applyTo(color);
761
762 return create_and_clear_backend_texture(this,
763 {width, height},
764 format,
765 mipmapped,
766 renderable,
767 isProtected,
768 std::move(finishedCallback),
769 swizzledColor.array(),
770 label);
771 }
772
createBackendTexture(const SkPixmap srcData[],int numProvidedLevels,GrSurfaceOrigin textureOrigin,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext,std::string_view label)773 GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap srcData[],
774 int numProvidedLevels,
775 GrSurfaceOrigin textureOrigin,
776 GrRenderable renderable,
777 GrProtected isProtected,
778 GrGpuFinishedProc finishedProc,
779 GrGpuFinishedContext finishedContext,
780 std::string_view label) {
781 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
782
783 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
784
785 if (this->abandoned()) {
786 return {};
787 }
788
789 if (!srcData || numProvidedLevels <= 0) {
790 return {};
791 }
792
793 SkColorType colorType = srcData[0].colorType();
794
795 skgpu::Mipmapped mipmapped = skgpu::Mipmapped::kNo;
796 if (numProvidedLevels > 1) {
797 mipmapped = skgpu::Mipmapped::kYes;
798 }
799
800 GrBackendFormat backendFormat = this->defaultBackendFormat(colorType, renderable);
801 GrBackendTexture beTex = this->createBackendTexture(srcData[0].width(),
802 srcData[0].height(),
803 backendFormat,
804 mipmapped,
805 renderable,
806 isProtected,
807 label);
808 if (!beTex.isValid()) {
809 return {};
810 }
811 if (!update_texture_with_pixmaps(this,
812 srcData,
813 numProvidedLevels,
814 beTex,
815 textureOrigin,
816 std::move(finishedCallback))) {
817 this->deleteBackendTexture(beTex);
818 return {};
819 }
820 return beTex;
821 }
822
updateBackendTexture(const GrBackendTexture & texture,const SkPixmap srcData[],int numLevels,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)823 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& texture,
824 const SkPixmap srcData[],
825 int numLevels,
826 GrGpuFinishedProc finishedProc,
827 GrGpuFinishedContext finishedContext) {
828 return this->updateBackendTexture(texture,
829 srcData,
830 numLevels,
831 kTopLeft_GrSurfaceOrigin,
832 finishedProc,
833 finishedContext);
834 }
835
updateBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)836 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
837 const SkColor4f& color,
838 GrGpuFinishedProc finishedProc,
839 GrGpuFinishedContext finishedContext) {
840 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
841
842 if (this->abandoned()) {
843 return false;
844 }
845
846 return fGpu->clearBackendTexture(backendTexture, std::move(finishedCallback), color.array());
847 }
848
updateBackendTexture(const GrBackendTexture & backendTexture,SkColorType skColorType,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)849 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
850 SkColorType skColorType,
851 const SkColor4f& color,
852 GrGpuFinishedProc finishedProc,
853 GrGpuFinishedContext finishedContext) {
854 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
855
856 if (this->abandoned()) {
857 return false;
858 }
859
860 GrBackendFormat format = backendTexture.getBackendFormat();
861 GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
862
863 if (!this->caps()->areColorTypeAndFormatCompatible(grColorType, format)) {
864 return false;
865 }
866
867 skgpu::Swizzle swizzle = this->caps()->getWriteSwizzle(format, grColorType);
868 SkColor4f swizzledColor = swizzle.applyTo(color);
869
870 return fGpu->clearBackendTexture(backendTexture,
871 std::move(finishedCallback),
872 swizzledColor.array());
873 }
874
updateBackendTexture(const GrBackendTexture & backendTexture,const SkPixmap srcData[],int numLevels,GrSurfaceOrigin textureOrigin,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)875 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
876 const SkPixmap srcData[],
877 int numLevels,
878 GrSurfaceOrigin textureOrigin,
879 GrGpuFinishedProc finishedProc,
880 GrGpuFinishedContext finishedContext) {
881 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
882
883 if (this->abandoned()) {
884 return false;
885 }
886
887 if (!srcData || numLevels <= 0) {
888 return false;
889 }
890
891 // If the texture has MIP levels then we require that the full set is overwritten.
892 int numExpectedLevels = 1;
893 if (backendTexture.hasMipmaps()) {
894 numExpectedLevels = SkMipmap::ComputeLevelCount(backendTexture.width(),
895 backendTexture.height()) + 1;
896 }
897 if (numLevels != numExpectedLevels) {
898 return false;
899 }
900 return update_texture_with_pixmaps(this,
901 srcData,
902 numLevels,
903 backendTexture,
904 textureOrigin,
905 std::move(finishedCallback));
906 }
907
908 //////////////////////////////////////////////////////////////////////////////
909
create_and_update_compressed_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,skgpu::Mipmapped mipmapped,GrProtected isProtected,sk_sp<skgpu::RefCntedCallback> finishedCallback,const void * data,size_t size)910 static GrBackendTexture create_and_update_compressed_backend_texture(
911 GrDirectContext* dContext,
912 SkISize dimensions,
913 const GrBackendFormat& backendFormat,
914 skgpu::Mipmapped mipmapped,
915 GrProtected isProtected,
916 sk_sp<skgpu::RefCntedCallback> finishedCallback,
917 const void* data,
918 size_t size) {
919 GrGpu* gpu = dContext->priv().getGpu();
920
921 GrBackendTexture beTex = gpu->createCompressedBackendTexture(dimensions, backendFormat,
922 mipmapped, isProtected);
923 if (!beTex.isValid()) {
924 return {};
925 }
926
927 if (!dContext->priv().getGpu()->updateCompressedBackendTexture(
928 beTex, std::move(finishedCallback), data, size)) {
929 dContext->deleteBackendTexture(beTex);
930 return {};
931 }
932 return beTex;
933 }
934
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,skgpu::Mipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)935 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
936 int width,
937 int height,
938 const GrBackendFormat& backendFormat,
939 const SkColor4f& color,
940 skgpu::Mipmapped mipmapped,
941 GrProtected isProtected,
942 GrGpuFinishedProc finishedProc,
943 GrGpuFinishedContext finishedContext) {
944 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
945 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
946
947 if (this->abandoned()) {
948 return {};
949 }
950
951 SkTextureCompressionType compression = GrBackendFormatToCompressionType(backendFormat);
952 if (compression == SkTextureCompressionType::kNone) {
953 return {};
954 }
955
956 size_t size = SkCompressedDataSize(
957 compression, {width, height}, nullptr, mipmapped == skgpu::Mipmapped::kYes);
958 auto storage = std::make_unique<char[]>(size);
959 skgpu::FillInCompressedData(compression, {width, height}, mipmapped, storage.get(), color);
960 return create_and_update_compressed_backend_texture(this,
961 {width, height},
962 backendFormat,
963 mipmapped,
964 isProtected,
965 std::move(finishedCallback),
966 storage.get(),
967 size);
968 }
969
createCompressedBackendTexture(int width,int height,SkTextureCompressionType compression,const SkColor4f & color,skgpu::Mipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)970 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
971 int width,
972 int height,
973 SkTextureCompressionType compression,
974 const SkColor4f& color,
975 skgpu::Mipmapped mipmapped,
976 GrProtected isProtected,
977 GrGpuFinishedProc finishedProc,
978 GrGpuFinishedContext finishedContext) {
979 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
980 GrBackendFormat format = this->compressedBackendFormat(compression);
981 return this->createCompressedBackendTexture(width, height, format, color,
982 mipmapped, isProtected, finishedProc,
983 finishedContext);
984 }
985
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const void * compressedData,size_t dataSize,skgpu::Mipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)986 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
987 int width,
988 int height,
989 const GrBackendFormat& backendFormat,
990 const void* compressedData,
991 size_t dataSize,
992 skgpu::Mipmapped mipmapped,
993 GrProtected isProtected,
994 GrGpuFinishedProc finishedProc,
995 GrGpuFinishedContext finishedContext) {
996 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
997 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
998
999 if (this->abandoned()) {
1000 return {};
1001 }
1002
1003 return create_and_update_compressed_backend_texture(this,
1004 {width, height},
1005 backendFormat,
1006 mipmapped,
1007 isProtected,
1008 std::move(finishedCallback),
1009 compressedData,
1010 dataSize);
1011 }
1012
createCompressedBackendTexture(int width,int height,SkTextureCompressionType compression,const void * data,size_t dataSize,skgpu::Mipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1013 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
1014 int width,
1015 int height,
1016 SkTextureCompressionType compression,
1017 const void* data,
1018 size_t dataSize,
1019 skgpu::Mipmapped mipmapped,
1020 GrProtected isProtected,
1021 GrGpuFinishedProc finishedProc,
1022 GrGpuFinishedContext finishedContext) {
1023 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1024 GrBackendFormat format = this->compressedBackendFormat(compression);
1025 return this->createCompressedBackendTexture(width, height, format, data, dataSize, mipmapped,
1026 isProtected, finishedProc, finishedContext);
1027 }
1028
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1029 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1030 const SkColor4f& color,
1031 GrGpuFinishedProc finishedProc,
1032 GrGpuFinishedContext finishedContext) {
1033 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
1034
1035 if (this->abandoned()) {
1036 return false;
1037 }
1038
1039 SkTextureCompressionType compression =
1040 GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
1041 if (compression == SkTextureCompressionType::kNone) {
1042 return {};
1043 }
1044 size_t size = SkCompressedDataSize(compression,
1045 backendTexture.dimensions(),
1046 nullptr,
1047 backendTexture.hasMipmaps());
1048 SkAutoMalloc storage(size);
1049 skgpu::FillInCompressedData(compression,
1050 backendTexture.dimensions(),
1051 backendTexture.mipmapped(),
1052 static_cast<char*>(storage.get()),
1053 color);
1054 return fGpu->updateCompressedBackendTexture(backendTexture,
1055 std::move(finishedCallback),
1056 storage.get(),
1057 size);
1058 }
1059
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const void * compressedData,size_t dataSize,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1060 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1061 const void* compressedData,
1062 size_t dataSize,
1063 GrGpuFinishedProc finishedProc,
1064 GrGpuFinishedContext finishedContext) {
1065 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
1066
1067 if (this->abandoned()) {
1068 return false;
1069 }
1070
1071 if (!compressedData) {
1072 return false;
1073 }
1074
1075 return fGpu->updateCompressedBackendTexture(backendTexture,
1076 std::move(finishedCallback),
1077 compressedData,
1078 dataSize);
1079 }
1080
1081 //////////////////////////////////////////////////////////////////////////////
1082
setBackendTextureState(const GrBackendTexture & backendTexture,const skgpu::MutableTextureState & state,skgpu::MutableTextureState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1083 bool GrDirectContext::setBackendTextureState(const GrBackendTexture& backendTexture,
1084 const skgpu::MutableTextureState& state,
1085 skgpu::MutableTextureState* previousState,
1086 GrGpuFinishedProc finishedProc,
1087 GrGpuFinishedContext finishedContext) {
1088 auto callback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
1089
1090 if (this->abandoned()) {
1091 return false;
1092 }
1093
1094 return fGpu->setBackendTextureState(backendTexture, state, previousState, std::move(callback));
1095 }
1096
1097
setBackendRenderTargetState(const GrBackendRenderTarget & backendRenderTarget,const skgpu::MutableTextureState & state,skgpu::MutableTextureState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1098 bool GrDirectContext::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
1099 const skgpu::MutableTextureState& state,
1100 skgpu::MutableTextureState* previousState,
1101 GrGpuFinishedProc finishedProc,
1102 GrGpuFinishedContext finishedContext) {
1103 auto callback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
1104
1105 if (this->abandoned()) {
1106 return false;
1107 }
1108
1109 return fGpu->setBackendRenderTargetState(backendRenderTarget, state, previousState,
1110 std::move(callback));
1111 }
1112
deleteBackendTexture(const GrBackendTexture & backendTex)1113 void GrDirectContext::deleteBackendTexture(const GrBackendTexture& backendTex) {
1114 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1115 // For the Vulkan backend we still must destroy the backend texture when the context is
1116 // abandoned.
1117 if ((this->abandoned() && this->backend() != GrBackendApi::kVulkan) || !backendTex.isValid()) {
1118 return;
1119 }
1120
1121 fGpu->deleteBackendTexture(backendTex);
1122 }
1123
1124 //////////////////////////////////////////////////////////////////////////////
1125
precompileShader(const SkData & key,const SkData & data)1126 bool GrDirectContext::precompileShader(const SkData& key, const SkData& data) {
1127 return fGpu->precompileShader(key, data);
1128 }
1129
1130 #ifdef SK_ENABLE_DUMP_GPU
1131 #include "include/core/SkString.h"
1132 #include "src/utils/SkJSONWriter.h"
dump() const1133 SkString GrDirectContext::dump() const {
1134 SkDynamicMemoryWStream stream;
1135 SkJSONWriter writer(&stream, SkJSONWriter::Mode::kPretty);
1136 writer.beginObject();
1137
1138 writer.appendCString("backend", GrBackendApiToStr(this->backend()));
1139
1140 writer.appendName("caps");
1141 this->caps()->dumpJSON(&writer);
1142
1143 writer.appendName("gpu");
1144 this->fGpu->dumpJSON(&writer);
1145
1146 writer.appendName("context");
1147 this->dumpJSON(&writer);
1148
1149 // Flush JSON to the memory stream
1150 writer.endObject();
1151 writer.flush();
1152
1153 // Null terminate the JSON data in the memory stream
1154 stream.write8(0);
1155
1156 // Allocate a string big enough to hold all the data, then copy out of the stream
1157 SkString result(stream.bytesWritten());
1158 stream.copyToAndReset(result.data());
1159 return result;
1160 }
1161 #endif
1162
1163 /*************************************************************************************************/
MakeMock(const GrMockOptions * mockOptions)1164 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions) {
1165 GrContextOptions defaultOptions;
1166 return MakeMock(mockOptions, defaultOptions);
1167 }
1168
MakeMock(const GrMockOptions * mockOptions,const GrContextOptions & options)1169 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions,
1170 const GrContextOptions& options) {
1171 sk_sp<GrDirectContext> direct(
1172 new GrDirectContext(GrBackendApi::kMock,
1173 options,
1174 GrContextThreadSafeProxyPriv::Make(GrBackendApi::kMock, options)));
1175
1176 direct->fGpu = GrMockGpu::Make(mockOptions, options, direct.get());
1177 if (!direct->init()) {
1178 return nullptr;
1179 }
1180
1181 return direct;
1182 }
1183
1184 #ifdef SK_DIRECT3D
1185 /*************************************************************************************************/
MakeDirect3D(const GrD3DBackendContext & backendContext)1186 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext) {
1187 GrContextOptions defaultOptions;
1188 return MakeDirect3D(backendContext, defaultOptions);
1189 }
1190
MakeDirect3D(const GrD3DBackendContext & backendContext,const GrContextOptions & options)1191 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext,
1192 const GrContextOptions& options) {
1193 sk_sp<GrDirectContext> direct(new GrDirectContext(
1194 GrBackendApi::kDirect3D,
1195 options,
1196 GrContextThreadSafeProxyPriv::Make(GrBackendApi::kDirect3D, options)));
1197
1198 direct->fGpu = GrD3DGpu::Make(backendContext, options, direct.get());
1199 if (!direct->init()) {
1200 return nullptr;
1201 }
1202
1203 return direct;
1204 }
1205 #endif
1206