1 /*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7 #include "include/gpu/ganesh/GrDirectContext.h"
8
9 #include "include/core/SkImageInfo.h"
10 #include "include/core/SkPixmap.h"
11 #include "include/core/SkSize.h"
12 #include "include/core/SkSurface.h"
13 #include "include/core/SkTextureCompressionType.h"
14 #include "include/core/SkTraceMemoryDump.h"
15 #include "include/gpu/GpuTypes.h"
16 #include "include/gpu/ganesh/GrBackendSemaphore.h"
17 #include "include/gpu/ganesh/GrBackendSurface.h"
18 #include "include/gpu/ganesh/GrContextThreadSafeProxy.h"
19 #include "include/private/base/SingleOwner.h"
20 #include "include/private/base/SkTArray.h"
21 #include "include/private/base/SkTemplates.h"
22 #include "include/private/gpu/ganesh/GrTypesPriv.h"
23 #include "src/base/SkAutoMalloc.h"
24 #include "src/core/SkCompressedDataUtils.h"
25 #include "src/core/SkMipmap.h"
26 #include "src/core/SkTaskGroup.h"
27 #include "src/core/SkTraceEvent.h"
28 #include "src/gpu/DataUtils.h"
29 #include "src/gpu/GpuTypesPriv.h"
30 #include "src/gpu/RefCntedCallback.h"
31 #include "src/gpu/Swizzle.h"
32 #include "src/gpu/ganesh/Device.h"
33 #include "src/gpu/ganesh/GrBackendUtils.h"
34 #include "src/gpu/ganesh/GrCaps.h"
35 #include "src/gpu/ganesh/GrClientMappedBufferManager.h"
36 #include "src/gpu/ganesh/GrColorInfo.h"
37 #include "src/gpu/ganesh/GrContextThreadSafeProxyPriv.h"
38 #include "src/gpu/ganesh/GrDirectContextPriv.h"
39 #include "src/gpu/ganesh/GrDrawOpAtlas.h"
40 #include "src/gpu/ganesh/GrDrawingManager.h"
41 #include "src/gpu/ganesh/GrGpu.h"
42 #include "src/gpu/ganesh/GrPixmap.h"
43 #include "src/gpu/ganesh/GrProxyProvider.h"
44 #include "src/gpu/ganesh/GrRenderTargetProxy.h"
45 #include "src/gpu/ganesh/GrResourceCache.h"
46 #include "src/gpu/ganesh/GrResourceProvider.h"
47 #include "src/gpu/ganesh/GrSemaphore.h" // IWYU pragma: keep
48 #include "src/gpu/ganesh/GrShaderCaps.h"
49 #include "src/gpu/ganesh/GrSurfaceProxy.h"
50 #include "src/gpu/ganesh/GrSurfaceProxyView.h"
51 #include "src/gpu/ganesh/GrThreadSafePipelineBuilder.h" // IWYU pragma: keep
52 #include "src/gpu/ganesh/SurfaceContext.h"
53 #include "src/gpu/ganesh/image/SkImage_GaneshBase.h"
54 #include "src/gpu/ganesh/mock/GrMockGpu.h"
55 #include "src/gpu/ganesh/ops/SmallPathAtlasMgr.h"
56 #include "src/gpu/ganesh/surface/SkSurface_Ganesh.h"
57 #include "src/gpu/ganesh/text/GrAtlasManager.h"
58 #include "src/image/SkImage_Base.h"
59 #include "src/image/SkSurface_Base.h"
60 #include "src/text/gpu/StrikeCache.h"
61 #include "src/text/gpu/TextBlobRedrawCoordinator.h"
62
63 #include <array>
64 #include <atomic>
65 #include <forward_list>
66 #include <memory>
67 #include <utility>
68
69 #ifdef SK_DIRECT3D
70 #include "src/gpu/ganesh/d3d/GrD3DGpu.h"
71 #endif
72
73 using namespace skia_private;
74
75 #define ASSERT_SINGLE_OWNER SKGPU_ASSERT_SINGLE_OWNER(this->singleOwner())
76
77 using StrikeCache = sktext::gpu::StrikeCache;
78
Next()79 GrDirectContext::DirectContextID GrDirectContext::DirectContextID::Next() {
80 static std::atomic<uint32_t> nextID{1};
81 uint32_t id;
82 do {
83 id = nextID.fetch_add(1, std::memory_order_relaxed);
84 } while (id == SK_InvalidUniqueID);
85 return DirectContextID(id);
86 }
87
GrDirectContext(GrBackendApi backend,const GrContextOptions & options,sk_sp<GrContextThreadSafeProxy> proxy)88 GrDirectContext::GrDirectContext(GrBackendApi backend,
89 const GrContextOptions& options,
90 sk_sp<GrContextThreadSafeProxy> proxy)
91 : GrRecordingContext(std::move(proxy), false)
92 , fDeleteCallbackHelper(new DeleteCallbackHelper(options.fContextDeleteContext,
93 options.fContextDeleteProc))
94 , fDirectContextID(DirectContextID::Next()) {}
95
~GrDirectContext()96 GrDirectContext::~GrDirectContext() {
97 ASSERT_SINGLE_OWNER
98 // this if-test protects against the case where the context is being destroyed
99 // before having been fully created
100 if (fGpu) {
101 this->flushAndSubmit();
102 }
103
104 // We need to make sure all work is finished on the gpu before we start releasing resources.
105 this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/false);
106
107 this->destroyDrawingManager();
108
109 // Ideally we could just let the ptr drop, but resource cache queries this ptr in releaseAll.
110 if (fResourceCache) {
111 fResourceCache->releaseAll();
112 }
113 // This has to be after GrResourceCache::releaseAll so that other threads that are holding
114 // async pixel result don't try to destroy buffers off thread.
115 fMappedBufferManager.reset();
116 }
117
threadSafeProxy()118 sk_sp<GrContextThreadSafeProxy> GrDirectContext::threadSafeProxy() {
119 return GrRecordingContext::threadSafeProxy();
120 }
121
resetGLTextureBindings()122 void GrDirectContext::resetGLTextureBindings() {
123 if (this->abandoned() || this->backend() != GrBackendApi::kOpenGL) {
124 return;
125 }
126 fGpu->resetTextureBindings();
127 }
128
resetContext(uint32_t state)129 void GrDirectContext::resetContext(uint32_t state) {
130 ASSERT_SINGLE_OWNER
131 fGpu->markContextDirty(state);
132 }
133
abandonContext()134 void GrDirectContext::abandonContext() {
135 if (GrRecordingContext::abandoned()) {
136 return;
137 }
138
139 if (fInsideReleaseProcCnt) {
140 SkDEBUGFAIL("Calling GrDirectContext::abandonContext() while inside a ReleaseProc is not "
141 "allowed");
142 return;
143 }
144
145 GrRecordingContext::abandonContext();
146
147 // We need to make sure all work is finished on the gpu before we start releasing resources.
148 this->syncAllOutstandingGpuWork(this->caps()->mustSyncGpuDuringAbandon());
149
150 fStrikeCache->freeAll();
151
152 fMappedBufferManager->abandon();
153
154 fResourceProvider->abandon();
155
156 // abandon first so destructors don't try to free the resources in the API.
157 fResourceCache->abandonAll();
158
159 fGpu->disconnect(GrGpu::DisconnectType::kAbandon);
160
161 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
162 if (fSmallPathAtlasMgr) {
163 fSmallPathAtlasMgr->reset();
164 }
165 #endif
166 fAtlasManager->freeAll();
167 }
168
abandoned()169 bool GrDirectContext::abandoned() {
170 if (GrRecordingContext::abandoned()) {
171 return true;
172 }
173
174 if (fGpu && fGpu->isDeviceLost()) {
175 this->abandonContext();
176 return true;
177 }
178 return false;
179 }
180
isDeviceLost()181 bool GrDirectContext::isDeviceLost() {
182 if (fGpu && fGpu->isDeviceLost()) {
183 if (!GrRecordingContext::abandoned()) {
184 this->abandonContext();
185 }
186 return true;
187 }
188 return false;
189 }
190
oomed()191 bool GrDirectContext::oomed() { return fGpu ? fGpu->checkAndResetOOMed() : false; }
192
releaseResourcesAndAbandonContext()193 void GrDirectContext::releaseResourcesAndAbandonContext() {
194 if (GrRecordingContext::abandoned()) {
195 return;
196 }
197
198 GrRecordingContext::abandonContext();
199
200 // We need to make sure all work is finished on the gpu before we start releasing resources.
201 this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/true);
202
203 fResourceProvider->abandon();
204
205 // Release all resources in the backend 3D API.
206 fResourceCache->releaseAll();
207
208 // Must be after GrResourceCache::releaseAll().
209 fMappedBufferManager.reset();
210
211 fGpu->disconnect(GrGpu::DisconnectType::kCleanup);
212 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
213 if (fSmallPathAtlasMgr) {
214 fSmallPathAtlasMgr->reset();
215 }
216 #endif
217 fAtlasManager->freeAll();
218 }
219
freeGpuResources()220 void GrDirectContext::freeGpuResources() {
221 ASSERT_SINGLE_OWNER
222
223 if (this->abandoned()) {
224 return;
225 }
226
227 this->flushAndSubmit();
228 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
229 if (fSmallPathAtlasMgr) {
230 fSmallPathAtlasMgr->reset();
231 }
232 #endif
233 fAtlasManager->freeAll();
234
235 // TODO: the glyph cache doesn't hold any GpuResources so this call should not be needed here.
236 // Some slack in the GrTextBlob's implementation requires it though. That could be fixed.
237 fStrikeCache->freeAll();
238
239 this->drawingManager()->freeGpuResources();
240
241 fResourceCache->purgeUnlockedResources(GrPurgeResourceOptions::kAllResources);
242 }
243
init()244 bool GrDirectContext::init() {
245 ASSERT_SINGLE_OWNER
246 if (!fGpu) {
247 return false;
248 }
249
250 fThreadSafeProxy->priv().init(fGpu->refCaps(), fGpu->refPipelineBuilder());
251 if (!GrRecordingContext::init()) {
252 return false;
253 }
254
255 SkASSERT(this->getTextBlobRedrawCoordinator());
256 SkASSERT(this->threadSafeCache());
257
258 fStrikeCache = std::make_unique<StrikeCache>();
259 fResourceCache = std::make_unique<GrResourceCache>(this->singleOwner(),
260 this->directContextID(),
261 this->contextID());
262 fResourceCache->setProxyProvider(this->proxyProvider());
263 fResourceCache->setThreadSafeCache(this->threadSafeCache());
264 #if defined(GPU_TEST_UTILS)
265 if (this->options().fResourceCacheLimitOverride != -1) {
266 this->setResourceCacheLimit(this->options().fResourceCacheLimitOverride);
267 }
268 #endif
269 fResourceProvider = std::make_unique<GrResourceProvider>(fGpu.get(), fResourceCache.get(),
270 this->singleOwner());
271 fMappedBufferManager = std::make_unique<GrClientMappedBufferManager>(this->directContextID());
272
273 fDidTestPMConversions = false;
274
275 // DDL TODO: we need to think through how the task group & persistent cache
276 // get passed on to/shared between all the DDLRecorders created with this context.
277 if (this->options().fExecutor) {
278 fTaskGroup = std::make_unique<SkTaskGroup>(*this->options().fExecutor);
279 }
280
281 fPersistentCache = this->options().fPersistentCache;
282
283 GrDrawOpAtlas::AllowMultitexturing allowMultitexturing;
284 if (GrContextOptions::Enable::kNo == this->options().fAllowMultipleGlyphCacheTextures ||
285 // multitexturing supported only if range can represent the index + texcoords fully
286 !(this->caps()->shaderCaps()->fFloatIs32Bits ||
287 this->caps()->shaderCaps()->fIntegerSupport)) {
288 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kNo;
289 } else {
290 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kYes;
291 }
292
293 GrProxyProvider* proxyProvider = this->priv().proxyProvider();
294
295 fAtlasManager = std::make_unique<GrAtlasManager>(proxyProvider,
296 this->options().fGlyphCacheTextureMaximumBytes,
297 allowMultitexturing,
298 this->options().fSupportBilerpFromGlyphAtlas);
299 this->priv().addOnFlushCallbackObject(fAtlasManager.get());
300
301 return true;
302 }
303
getResourceCacheUsage(int * resourceCount,size_t * resourceBytes) const304 void GrDirectContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
305 ASSERT_SINGLE_OWNER
306
307 if (resourceCount) {
308 *resourceCount = fResourceCache->getBudgetedResourceCount();
309 }
310 if (resourceBytes) {
311 *resourceBytes = fResourceCache->getBudgetedResourceBytes();
312 }
313 }
314
getResourceCachePurgeableBytes() const315 size_t GrDirectContext::getResourceCachePurgeableBytes() const {
316 ASSERT_SINGLE_OWNER
317 return fResourceCache->getPurgeableBytes();
318 }
319
getResourceCacheLimits(int * maxResources,size_t * maxResourceBytes) const320 void GrDirectContext::getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const {
321 ASSERT_SINGLE_OWNER
322 if (maxResources) {
323 *maxResources = -1;
324 }
325 if (maxResourceBytes) {
326 *maxResourceBytes = this->getResourceCacheLimit();
327 }
328 }
329
getResourceCacheLimit() const330 size_t GrDirectContext::getResourceCacheLimit() const {
331 ASSERT_SINGLE_OWNER
332 return fResourceCache->getMaxResourceBytes();
333 }
334
setResourceCacheLimits(int unused,size_t maxResourceBytes)335 void GrDirectContext::setResourceCacheLimits(int unused, size_t maxResourceBytes) {
336 ASSERT_SINGLE_OWNER
337 this->setResourceCacheLimit(maxResourceBytes);
338 }
339
setResourceCacheLimit(size_t maxResourceBytes)340 void GrDirectContext::setResourceCacheLimit(size_t maxResourceBytes) {
341 ASSERT_SINGLE_OWNER
342 fResourceCache->setLimit(maxResourceBytes);
343 }
344
purgeUnlockedResources(GrPurgeResourceOptions opts)345 void GrDirectContext::purgeUnlockedResources(GrPurgeResourceOptions opts) {
346 ASSERT_SINGLE_OWNER
347
348 if (this->abandoned()) {
349 return;
350 }
351
352 fResourceCache->purgeUnlockedResources(opts);
353 fResourceCache->purgeAsNeeded();
354
355 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
356 // place to purge stale blobs
357 this->getTextBlobRedrawCoordinator()->purgeStaleBlobs();
358
359 fGpu->releaseUnlockedBackendObjects();
360 }
361
purgeUnlockedResourcesByTag(bool scratchResourceseOnly,const GrGpuResourceTag & tag)362 void GrDirectContext::purgeUnlockedResourcesByTag(bool scratchResourceseOnly, const GrGpuResourceTag& tag) {
363 ASSERT_SINGLE_OWNER
364 fResourceCache->purgeUnlockedResourcesByTag(scratchResourceseOnly, tag);
365 fResourceCache->purgeAsNeeded();
366
367 // The textBlod Cache doesn't actually hold any GPU resource but this is a convenient
368 // place to purge stale blobs
369 this->getTextBlobRedrawCoordinator()->purgeStaleBlobs();
370 }
371
purgeUnlockedResourcesByPid(bool scratchResourcesOnly,const std::set<int> & exitedPidSet)372 void GrDirectContext::purgeUnlockedResourcesByPid(bool scratchResourcesOnly, const std::set<int>& exitedPidSet) {
373 ASSERT_SINGLE_OWNER
374 fResourceCache->purgeUnlockedResourcesByPid(scratchResourcesOnly, exitedPidSet);
375 fResourceCache->purgeAsNeeded();
376
377 // The textBlod Cache doesn't actually hold any GPU resource but this is a convenient
378 // place to purge stale blobs
379 this->getTextBlobRedrawCoordinator()->purgeStaleBlobs();
380 }
381
performDeferredCleanup(std::chrono::milliseconds msNotUsed,GrPurgeResourceOptions opts)382 void GrDirectContext::performDeferredCleanup(std::chrono::milliseconds msNotUsed,
383 GrPurgeResourceOptions opts) {
384 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
385
386 ASSERT_SINGLE_OWNER
387
388 if (this->abandoned()) {
389 return;
390 }
391
392 this->checkAsyncWorkCompletion();
393 fMappedBufferManager->process();
394 auto purgeTime = skgpu::StdSteadyClock::now() - msNotUsed;
395
396 fResourceCache->purgeAsNeeded();
397 fResourceCache->purgeResourcesNotUsedSince(purgeTime, opts);
398
399 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
400 // place to purge stale blobs
401 this->getTextBlobRedrawCoordinator()->purgeStaleBlobs();
402 }
403
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)404 void GrDirectContext::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
405 ASSERT_SINGLE_OWNER
406
407 if (this->abandoned()) {
408 return;
409 }
410
411 fResourceCache->purgeUnlockedResources(bytesToPurge, preferScratchResources);
412 }
413
414 ////////////////////////////////////////////////////////////////////////////////
wait(int numSemaphores,const GrBackendSemaphore waitSemaphores[],bool deleteSemaphoresAfterWait)415 bool GrDirectContext::wait(int numSemaphores, const GrBackendSemaphore waitSemaphores[],
416 bool deleteSemaphoresAfterWait) {
417 if (!fGpu || !fGpu->caps()->backendSemaphoreSupport()) {
418 return false;
419 }
420 GrWrapOwnership ownership =
421 deleteSemaphoresAfterWait ? kAdopt_GrWrapOwnership : kBorrow_GrWrapOwnership;
422 for (int i = 0; i < numSemaphores; ++i) {
423 std::unique_ptr<GrSemaphore> sema = fResourceProvider->wrapBackendSemaphore(
424 waitSemaphores[i], GrSemaphoreWrapType::kWillWait, ownership);
425 // If we failed to wrap the semaphore it means the client didn't give us a valid semaphore
426 // to begin with. Therefore, it is fine to not wait on it.
427 if (sema) {
428 fGpu->waitSemaphore(sema.get());
429 }
430 }
431 return true;
432 }
433
434 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
onGetSmallPathAtlasMgr()435 skgpu::ganesh::SmallPathAtlasMgr* GrDirectContext::onGetSmallPathAtlasMgr() {
436 if (!fSmallPathAtlasMgr) {
437 fSmallPathAtlasMgr = std::make_unique<skgpu::ganesh::SmallPathAtlasMgr>();
438
439 this->priv().addOnFlushCallbackObject(fSmallPathAtlasMgr.get());
440 }
441
442 if (!fSmallPathAtlasMgr->initAtlas(this->proxyProvider(), this->caps())) {
443 return nullptr;
444 }
445
446 return fSmallPathAtlasMgr.get();
447 }
448 #endif
449
registerVulkanErrorCallback(const std::function<void ()> & vulkanErrorCallback)450 void GrDirectContext::registerVulkanErrorCallback(const std::function<void()>& vulkanErrorCallback)
451 {
452 vulkanErrorCallback_ = vulkanErrorCallback;
453 }
454
processVulkanError()455 void GrDirectContext::processVulkanError()
456 {
457 if (vulkanErrorCallback_) {
458 vulkanErrorCallback_();
459 }
460 }
461
462 ////////////////////////////////////////////////////////////////////////////////
463
supportedGpuStats() const464 skgpu::GpuStatsFlags GrDirectContext::supportedGpuStats() const {
465 return this->caps()->supportedGpuStats();
466 }
467
flush(const GrFlushInfo & info)468 GrSemaphoresSubmitted GrDirectContext::flush(const GrFlushInfo& info) {
469 ASSERT_SINGLE_OWNER
470 if (this->abandoned()) {
471 if (info.fFinishedProc) {
472 info.fFinishedProc(info.fFinishedContext);
473 }
474 if (info.fSubmittedProc) {
475 info.fSubmittedProc(info.fSubmittedContext, false);
476 }
477 return GrSemaphoresSubmitted::kNo;
478 }
479
480 return this->drawingManager()->flushSurfaces(
481 {}, SkSurfaces::BackendSurfaceAccess::kNoAccess, info, nullptr);
482 }
483
submit(const GrSubmitInfo & info)484 bool GrDirectContext::submit(const GrSubmitInfo& info) {
485 ASSERT_SINGLE_OWNER
486 if (this->abandoned()) {
487 return false;
488 }
489
490 if (!fGpu) {
491 return false;
492 }
493
494 return fGpu->submitToGpu(info);
495 }
496
flush(const sk_sp<const SkImage> & image,const GrFlushInfo & flushInfo)497 GrSemaphoresSubmitted GrDirectContext::flush(const sk_sp<const SkImage>& image,
498 const GrFlushInfo& flushInfo) {
499 if (!image) {
500 return GrSemaphoresSubmitted::kNo;
501 }
502 auto ib = as_IB(image);
503 if (!ib->isGaneshBacked()) {
504 return GrSemaphoresSubmitted::kNo;
505 }
506 auto igb = static_cast<const SkImage_GaneshBase*>(image.get());
507 return igb->flush(this, flushInfo);
508 }
509
flush(const sk_sp<const SkImage> & image)510 void GrDirectContext::flush(const sk_sp<const SkImage>& image) {
511 this->flush(image, {});
512 }
513
flushAndSubmit(const sk_sp<const SkImage> & image)514 void GrDirectContext::flushAndSubmit(const sk_sp<const SkImage>& image) {
515 this->flush(image, {});
516 this->submit();
517 }
518
flush(SkSurface * surface,SkSurfaces::BackendSurfaceAccess access,const GrFlushInfo & info)519 GrSemaphoresSubmitted GrDirectContext::flush(SkSurface* surface,
520 SkSurfaces::BackendSurfaceAccess access,
521 const GrFlushInfo& info) {
522 if (!surface) {
523 return GrSemaphoresSubmitted::kNo;
524 }
525 auto sb = asSB(surface);
526 if (!sb->isGaneshBacked()) {
527 return GrSemaphoresSubmitted::kNo;
528 }
529
530 auto gs = static_cast<SkSurface_Ganesh*>(surface);
531 SkASSERT(this->priv().matches(gs->getDevice()->recordingContext()->asDirectContext()));
532 GrRenderTargetProxy* rtp = gs->getDevice()->targetProxy();
533
534 return this->priv().flushSurface(rtp, access, info, nullptr);
535 }
536
flush(SkSurface * surface,const GrFlushInfo & info,const skgpu::MutableTextureState * newState)537 GrSemaphoresSubmitted GrDirectContext::flush(SkSurface* surface,
538 const GrFlushInfo& info,
539 const skgpu::MutableTextureState* newState) {
540 if (!surface) {
541 return GrSemaphoresSubmitted::kNo;
542 }
543 auto sb = asSB(surface);
544 if (!sb->isGaneshBacked()) {
545 return GrSemaphoresSubmitted::kNo;
546 }
547
548 auto gs = static_cast<SkSurface_Ganesh*>(surface);
549 SkASSERT(this->priv().matches(gs->getDevice()->recordingContext()->asDirectContext()));
550 GrRenderTargetProxy* rtp = gs->getDevice()->targetProxy();
551
552 return this->priv().flushSurface(
553 rtp, SkSurfaces::BackendSurfaceAccess::kNoAccess, info, newState);
554 }
555
flushAndSubmit(SkSurface * surface,GrSyncCpu sync)556 void GrDirectContext::flushAndSubmit(SkSurface* surface, GrSyncCpu sync) {
557 this->flush(surface, SkSurfaces::BackendSurfaceAccess::kNoAccess, GrFlushInfo());
558 this->submit(sync);
559 }
560
flush(SkSurface * surface)561 void GrDirectContext::flush(SkSurface* surface) {
562 this->flush(surface, GrFlushInfo(), nullptr);
563 }
564
565 ////////////////////////////////////////////////////////////////////////////////
566
checkAsyncWorkCompletion()567 void GrDirectContext::checkAsyncWorkCompletion() {
568 if (fGpu) {
569 fGpu->checkFinishedCallbacks();
570 }
571 }
572
syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned)573 void GrDirectContext::syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned) {
574 if (fGpu && (!this->abandoned() || shouldExecuteWhileAbandoned)) {
575 fGpu->finishOutstandingGpuWork();
576 this->checkAsyncWorkCompletion();
577 }
578 }
579
580 ////////////////////////////////////////////////////////////////////////////////
581
storeVkPipelineCacheData()582 void GrDirectContext::storeVkPipelineCacheData() {
583 if (fGpu) {
584 fGpu->storeVkPipelineCacheData();
585 }
586 }
587
588 ////////////////////////////////////////////////////////////////////////////////
589
supportsDistanceFieldText() const590 bool GrDirectContext::supportsDistanceFieldText() const {
591 return this->caps()->shaderCaps()->supportsDistanceFieldText();
592 }
593
594 //////////////////////////////////////////////////////////////////////////////
595
dumpAllResource(std::stringstream & dump) const596 void GrDirectContext::dumpAllResource(std::stringstream &dump) const {
597 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
598 if (fResourceCache) {
599 fResourceCache->dumpAllResource(dump);
600 }
601 #endif
602 }
603
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const604 void GrDirectContext::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
605 ASSERT_SINGLE_OWNER
606 fResourceCache->dumpMemoryStatistics(traceMemoryDump);
607 traceMemoryDump->dumpNumericValue("skia/gr_text_blob_cache", "size", "bytes",
608 this->getTextBlobRedrawCoordinator()->usedBytes());
609 }
610
dumpMemoryStatisticsByTag(SkTraceMemoryDump * traceMemoryDump,GrGpuResourceTag & tag) const611 void GrDirectContext::dumpMemoryStatisticsByTag(SkTraceMemoryDump* traceMemoryDump, GrGpuResourceTag& tag) const {
612 ASSERT_SINGLE_OWNER
613 fResourceCache->dumpMemoryStatistics(traceMemoryDump, tag);
614 traceMemoryDump->dumpNumericValue("skia/gr_text_blob_cache", "size", "bytes",
615 this->getTextBlobRedrawCoordinator()->usedBytes());
616 }
617
setCurrentGrResourceTag(const GrGpuResourceTag & tag)618 void GrDirectContext::setCurrentGrResourceTag(const GrGpuResourceTag& tag) {
619 if (fResourceCache) {
620 return fResourceCache->setCurrentGrResourceTag(tag);
621 }
622 }
623
popGrResourceTag()624 void GrDirectContext::popGrResourceTag()
625 {
626 if (fResourceCache) {
627 return fResourceCache->popGrResourceTag();
628 }
629 }
630
getCurrentGrResourceTag() const631 GrGpuResourceTag GrDirectContext::getCurrentGrResourceTag() const {
632 if (fResourceCache) {
633 return fResourceCache->getCurrentGrResourceTag();
634 }
635 return {};
636 }
releaseByTag(const GrGpuResourceTag & tag)637 void GrDirectContext::releaseByTag(const GrGpuResourceTag& tag) {
638 if (fResourceCache) {
639 fResourceCache->releaseByTag(tag);
640 }
641 }
getAllGrGpuResourceTags() const642 std::set<GrGpuResourceTag> GrDirectContext::getAllGrGpuResourceTags() const {
643 if (fResourceCache) {
644 return fResourceCache->getAllGrGpuResourceTags();
645 }
646 return {};
647 }
648
649 // OH ISSUE: get the memory information of the updated pid.
getUpdatedMemoryMap(std::unordered_map<int32_t,size_t> & out)650 void GrDirectContext::getUpdatedMemoryMap(std::unordered_map<int32_t, size_t> &out)
651 {
652 if (fResourceCache) {
653 fResourceCache->getUpdatedMemoryMap(out);
654 }
655 }
656
657 // OH ISSUE: init gpu memory limit.
initGpuMemoryLimit(MemoryOverflowCalllback callback,uint64_t size)658 void GrDirectContext::initGpuMemoryLimit(MemoryOverflowCalllback callback, uint64_t size)
659 {
660 if (fResourceCache) {
661 fResourceCache->initGpuMemoryLimit(callback, size);
662 }
663 }
664
665 // OH ISSUE: check whether the PID is abnormal.
isPidAbnormal() const666 bool GrDirectContext::isPidAbnormal() const
667 {
668 if (fResourceCache) {
669 return fResourceCache->isPidAbnormal();
670 }
671 return false;
672 }
673
vmaDefragment()674 void GrDirectContext::vmaDefragment()
675 {
676 if (fGpu) {
677 fGpu->vmaDefragment();
678 }
679 }
680
dumpVmaStats(SkString * out)681 void GrDirectContext::dumpVmaStats(SkString *out)
682 {
683 if (out == nullptr) {
684 return;
685 }
686 if (fGpu) {
687 fGpu->dumpVmaStats(out);
688 }
689 }
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,skgpu::Mipmapped mipmapped,GrRenderable renderable,GrProtected isProtected,std::string_view label)690 GrBackendTexture GrDirectContext::createBackendTexture(int width,
691 int height,
692 const GrBackendFormat& backendFormat,
693 skgpu::Mipmapped mipmapped,
694 GrRenderable renderable,
695 GrProtected isProtected,
696 std::string_view label) {
697 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
698 if (this->abandoned()) {
699 return GrBackendTexture();
700 }
701
702 return fGpu->createBackendTexture({width, height}, backendFormat, renderable,
703 mipmapped, isProtected, label);
704 }
705
createBackendTexture(const SkPixmap & srcData,GrSurfaceOrigin textureOrigin,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext,std::string_view label)706 GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap& srcData,
707 GrSurfaceOrigin textureOrigin,
708 GrRenderable renderable,
709 GrProtected isProtected,
710 GrGpuFinishedProc finishedProc,
711 GrGpuFinishedContext finishedContext,
712 std::string_view label) {
713 return this->createBackendTexture(&srcData, 1, textureOrigin, renderable, isProtected,
714 finishedProc, finishedContext, label);
715 }
716
createBackendTexture(const SkPixmap & srcData,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext,std::string_view label)717 GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap& srcData,
718 GrRenderable renderable,
719 GrProtected isProtected,
720 GrGpuFinishedProc finishedProc,
721 GrGpuFinishedContext finishedContext,
722 std::string_view label) {
723 return this->createBackendTexture(&srcData,
724 1,
725 renderable,
726 isProtected,
727 finishedProc,
728 finishedContext,
729 label);
730 }
731
createBackendTexture(const SkPixmap srcData[],int numLevels,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext,std::string_view label)732 GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap srcData[],
733 int numLevels,
734 GrRenderable renderable,
735 GrProtected isProtected,
736 GrGpuFinishedProc finishedProc,
737 GrGpuFinishedContext finishedContext,
738 std::string_view label) {
739 return this->createBackendTexture(srcData,
740 numLevels,
741 kTopLeft_GrSurfaceOrigin,
742 renderable,
743 isProtected,
744 finishedProc,
745 finishedContext,
746 label);
747 }
748
createBackendTexture(int width,int height,SkColorType skColorType,skgpu::Mipmapped mipmapped,GrRenderable renderable,GrProtected isProtected,std::string_view label)749 GrBackendTexture GrDirectContext::createBackendTexture(int width,
750 int height,
751 SkColorType skColorType,
752 skgpu::Mipmapped mipmapped,
753 GrRenderable renderable,
754 GrProtected isProtected,
755 std::string_view label) {
756 if (this->abandoned()) {
757 return GrBackendTexture();
758 }
759
760 const GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
761
762 return this->createBackendTexture(
763 width, height, format, mipmapped, renderable, isProtected, label);
764 }
765
create_and_clear_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,skgpu::Mipmapped mipmapped,GrRenderable renderable,GrProtected isProtected,sk_sp<skgpu::RefCntedCallback> finishedCallback,std::array<float,4> color,std::string_view label)766 static GrBackendTexture create_and_clear_backend_texture(
767 GrDirectContext* dContext,
768 SkISize dimensions,
769 const GrBackendFormat& backendFormat,
770 skgpu::Mipmapped mipmapped,
771 GrRenderable renderable,
772 GrProtected isProtected,
773 sk_sp<skgpu::RefCntedCallback> finishedCallback,
774 std::array<float, 4> color,
775 std::string_view label) {
776 GrGpu* gpu = dContext->priv().getGpu();
777 GrBackendTexture beTex = gpu->createBackendTexture(dimensions, backendFormat, renderable,
778 mipmapped, isProtected, label);
779 if (!beTex.isValid()) {
780 return {};
781 }
782
783 if (!dContext->priv().getGpu()->clearBackendTexture(beTex,
784 std::move(finishedCallback),
785 color)) {
786 dContext->deleteBackendTexture(beTex);
787 return {};
788 }
789 return beTex;
790 }
791
update_texture_with_pixmaps(GrDirectContext * context,const SkPixmap src[],int numLevels,const GrBackendTexture & backendTexture,GrSurfaceOrigin textureOrigin,sk_sp<skgpu::RefCntedCallback> finishedCallback)792 static bool update_texture_with_pixmaps(GrDirectContext* context,
793 const SkPixmap src[],
794 int numLevels,
795 const GrBackendTexture& backendTexture,
796 GrSurfaceOrigin textureOrigin,
797 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
798 GrColorType ct = SkColorTypeToGrColorType(src[0].colorType());
799 const GrBackendFormat& format = backendTexture.getBackendFormat();
800
801 if (!context->priv().caps()->areColorTypeAndFormatCompatible(ct, format)) {
802 return false;
803 }
804
805 auto proxy = context->priv().proxyProvider()->wrapBackendTexture(backendTexture,
806 kBorrow_GrWrapOwnership,
807 GrWrapCacheable::kNo,
808 kRW_GrIOType,
809 std::move(finishedCallback));
810 if (!proxy) {
811 return false;
812 }
813
814 skgpu::Swizzle swizzle = context->priv().caps()->getReadSwizzle(format, ct);
815 GrSurfaceProxyView view(std::move(proxy), textureOrigin, swizzle);
816 skgpu::ganesh::SurfaceContext surfaceContext(
817 context, std::move(view), src[0].info().colorInfo());
818 AutoSTArray<15, GrCPixmap> tmpSrc(numLevels);
819 for (int i = 0; i < numLevels; ++i) {
820 tmpSrc[i] = src[i];
821 }
822 if (!surfaceContext.writePixels(context, tmpSrc.get(), numLevels)) {
823 return false;
824 }
825
826 GrSurfaceProxy* p = surfaceContext.asSurfaceProxy();
827 GrFlushInfo info;
828 context->priv().drawingManager()->flushSurfaces(
829 {&p, 1}, SkSurfaces::BackendSurfaceAccess::kNoAccess, info, nullptr);
830 return true;
831 }
832
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,skgpu::Mipmapped mipmapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext,std::string_view label)833 GrBackendTexture GrDirectContext::createBackendTexture(int width,
834 int height,
835 const GrBackendFormat& backendFormat,
836 const SkColor4f& color,
837 skgpu::Mipmapped mipmapped,
838 GrRenderable renderable,
839 GrProtected isProtected,
840 GrGpuFinishedProc finishedProc,
841 GrGpuFinishedContext finishedContext,
842 std::string_view label) {
843 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
844
845 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
846 if (this->abandoned()) {
847 return {};
848 }
849
850 return create_and_clear_backend_texture(this,
851 {width, height},
852 backendFormat,
853 mipmapped,
854 renderable,
855 isProtected,
856 std::move(finishedCallback),
857 color.array(),
858 label);
859 }
860
createBackendTexture(int width,int height,SkColorType skColorType,const SkColor4f & color,skgpu::Mipmapped mipmapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext,std::string_view label)861 GrBackendTexture GrDirectContext::createBackendTexture(int width,
862 int height,
863 SkColorType skColorType,
864 const SkColor4f& color,
865 skgpu::Mipmapped mipmapped,
866 GrRenderable renderable,
867 GrProtected isProtected,
868 GrGpuFinishedProc finishedProc,
869 GrGpuFinishedContext finishedContext,
870 std::string_view label) {
871 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
872
873 if (this->abandoned()) {
874 return {};
875 }
876
877 GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
878 if (!format.isValid()) {
879 return {};
880 }
881
882 GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
883 SkColor4f swizzledColor = this->caps()->getWriteSwizzle(format, grColorType).applyTo(color);
884
885 return create_and_clear_backend_texture(this,
886 {width, height},
887 format,
888 mipmapped,
889 renderable,
890 isProtected,
891 std::move(finishedCallback),
892 swizzledColor.array(),
893 label);
894 }
895
createBackendTexture(const SkPixmap srcData[],int numProvidedLevels,GrSurfaceOrigin textureOrigin,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext,std::string_view label)896 GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap srcData[],
897 int numProvidedLevels,
898 GrSurfaceOrigin textureOrigin,
899 GrRenderable renderable,
900 GrProtected isProtected,
901 GrGpuFinishedProc finishedProc,
902 GrGpuFinishedContext finishedContext,
903 std::string_view label) {
904 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
905
906 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
907
908 if (this->abandoned()) {
909 return {};
910 }
911
912 if (!srcData || numProvidedLevels <= 0) {
913 return {};
914 }
915
916 SkColorType colorType = srcData[0].colorType();
917
918 skgpu::Mipmapped mipmapped = skgpu::Mipmapped::kNo;
919 if (numProvidedLevels > 1) {
920 mipmapped = skgpu::Mipmapped::kYes;
921 }
922
923 GrBackendFormat backendFormat = this->defaultBackendFormat(colorType, renderable);
924 GrBackendTexture beTex = this->createBackendTexture(srcData[0].width(),
925 srcData[0].height(),
926 backendFormat,
927 mipmapped,
928 renderable,
929 isProtected,
930 label);
931 if (!beTex.isValid()) {
932 return {};
933 }
934 if (!update_texture_with_pixmaps(this,
935 srcData,
936 numProvidedLevels,
937 beTex,
938 textureOrigin,
939 std::move(finishedCallback))) {
940 this->deleteBackendTexture(beTex);
941 return {};
942 }
943 return beTex;
944 }
945
updateBackendTexture(const GrBackendTexture & texture,const SkPixmap srcData[],int numLevels,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)946 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& texture,
947 const SkPixmap srcData[],
948 int numLevels,
949 GrGpuFinishedProc finishedProc,
950 GrGpuFinishedContext finishedContext) {
951 return this->updateBackendTexture(texture,
952 srcData,
953 numLevels,
954 kTopLeft_GrSurfaceOrigin,
955 finishedProc,
956 finishedContext);
957 }
958
updateBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)959 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
960 const SkColor4f& color,
961 GrGpuFinishedProc finishedProc,
962 GrGpuFinishedContext finishedContext) {
963 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
964
965 if (this->abandoned()) {
966 return false;
967 }
968
969 return fGpu->clearBackendTexture(backendTexture, std::move(finishedCallback), color.array());
970 }
971
updateBackendTexture(const GrBackendTexture & backendTexture,SkColorType skColorType,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)972 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
973 SkColorType skColorType,
974 const SkColor4f& color,
975 GrGpuFinishedProc finishedProc,
976 GrGpuFinishedContext finishedContext) {
977 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
978
979 if (this->abandoned()) {
980 return false;
981 }
982
983 GrBackendFormat format = backendTexture.getBackendFormat();
984 GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
985
986 if (!this->caps()->areColorTypeAndFormatCompatible(grColorType, format)) {
987 return false;
988 }
989
990 skgpu::Swizzle swizzle = this->caps()->getWriteSwizzle(format, grColorType);
991 SkColor4f swizzledColor = swizzle.applyTo(color);
992
993 return fGpu->clearBackendTexture(backendTexture,
994 std::move(finishedCallback),
995 swizzledColor.array());
996 }
997
updateBackendTexture(const GrBackendTexture & backendTexture,const SkPixmap srcData[],int numLevels,GrSurfaceOrigin textureOrigin,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)998 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
999 const SkPixmap srcData[],
1000 int numLevels,
1001 GrSurfaceOrigin textureOrigin,
1002 GrGpuFinishedProc finishedProc,
1003 GrGpuFinishedContext finishedContext) {
1004 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
1005
1006 if (this->abandoned()) {
1007 return false;
1008 }
1009
1010 if (!srcData || numLevels <= 0) {
1011 return false;
1012 }
1013
1014 // If the texture has MIP levels then we require that the full set is overwritten.
1015 int numExpectedLevels = 1;
1016 if (backendTexture.hasMipmaps()) {
1017 numExpectedLevels = SkMipmap::ComputeLevelCount(backendTexture.width(),
1018 backendTexture.height()) + 1;
1019 }
1020 if (numLevels != numExpectedLevels) {
1021 return false;
1022 }
1023 return update_texture_with_pixmaps(this,
1024 srcData,
1025 numLevels,
1026 backendTexture,
1027 textureOrigin,
1028 std::move(finishedCallback));
1029 }
1030
1031 //////////////////////////////////////////////////////////////////////////////
1032
create_and_update_compressed_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,skgpu::Mipmapped mipmapped,GrProtected isProtected,sk_sp<skgpu::RefCntedCallback> finishedCallback,const void * data,size_t size)1033 static GrBackendTexture create_and_update_compressed_backend_texture(
1034 GrDirectContext* dContext,
1035 SkISize dimensions,
1036 const GrBackendFormat& backendFormat,
1037 skgpu::Mipmapped mipmapped,
1038 GrProtected isProtected,
1039 sk_sp<skgpu::RefCntedCallback> finishedCallback,
1040 const void* data,
1041 size_t size) {
1042 GrGpu* gpu = dContext->priv().getGpu();
1043
1044 GrBackendTexture beTex = gpu->createCompressedBackendTexture(dimensions, backendFormat,
1045 mipmapped, isProtected);
1046 if (!beTex.isValid()) {
1047 return {};
1048 }
1049
1050 if (!dContext->priv().getGpu()->updateCompressedBackendTexture(
1051 beTex, std::move(finishedCallback), data, size)) {
1052 dContext->deleteBackendTexture(beTex);
1053 return {};
1054 }
1055 return beTex;
1056 }
1057
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,skgpu::Mipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1058 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
1059 int width,
1060 int height,
1061 const GrBackendFormat& backendFormat,
1062 const SkColor4f& color,
1063 skgpu::Mipmapped mipmapped,
1064 GrProtected isProtected,
1065 GrGpuFinishedProc finishedProc,
1066 GrGpuFinishedContext finishedContext) {
1067 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1068 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
1069
1070 if (this->abandoned()) {
1071 return {};
1072 }
1073
1074 SkTextureCompressionType compression = GrBackendFormatToCompressionType(backendFormat);
1075 if (compression == SkTextureCompressionType::kNone) {
1076 return {};
1077 }
1078
1079 size_t size = SkCompressedDataSize(
1080 compression, {width, height}, nullptr, mipmapped == skgpu::Mipmapped::kYes);
1081 auto storage = std::make_unique<char[]>(size);
1082 skgpu::FillInCompressedData(compression, {width, height}, mipmapped, storage.get(), color);
1083 return create_and_update_compressed_backend_texture(this,
1084 {width, height},
1085 backendFormat,
1086 mipmapped,
1087 isProtected,
1088 std::move(finishedCallback),
1089 storage.get(),
1090 size);
1091 }
1092
createCompressedBackendTexture(int width,int height,SkTextureCompressionType compression,const SkColor4f & color,skgpu::Mipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1093 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
1094 int width,
1095 int height,
1096 SkTextureCompressionType compression,
1097 const SkColor4f& color,
1098 skgpu::Mipmapped mipmapped,
1099 GrProtected isProtected,
1100 GrGpuFinishedProc finishedProc,
1101 GrGpuFinishedContext finishedContext) {
1102 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1103 GrBackendFormat format = this->compressedBackendFormat(compression);
1104 return this->createCompressedBackendTexture(width, height, format, color,
1105 mipmapped, isProtected, finishedProc,
1106 finishedContext);
1107 }
1108
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const void * compressedData,size_t dataSize,skgpu::Mipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1109 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
1110 int width,
1111 int height,
1112 const GrBackendFormat& backendFormat,
1113 const void* compressedData,
1114 size_t dataSize,
1115 skgpu::Mipmapped mipmapped,
1116 GrProtected isProtected,
1117 GrGpuFinishedProc finishedProc,
1118 GrGpuFinishedContext finishedContext) {
1119 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1120 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
1121
1122 if (this->abandoned()) {
1123 return {};
1124 }
1125
1126 return create_and_update_compressed_backend_texture(this,
1127 {width, height},
1128 backendFormat,
1129 mipmapped,
1130 isProtected,
1131 std::move(finishedCallback),
1132 compressedData,
1133 dataSize);
1134 }
1135
createCompressedBackendTexture(int width,int height,SkTextureCompressionType compression,const void * data,size_t dataSize,skgpu::Mipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1136 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
1137 int width,
1138 int height,
1139 SkTextureCompressionType compression,
1140 const void* data,
1141 size_t dataSize,
1142 skgpu::Mipmapped mipmapped,
1143 GrProtected isProtected,
1144 GrGpuFinishedProc finishedProc,
1145 GrGpuFinishedContext finishedContext) {
1146 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1147 GrBackendFormat format = this->compressedBackendFormat(compression);
1148 return this->createCompressedBackendTexture(width, height, format, data, dataSize, mipmapped,
1149 isProtected, finishedProc, finishedContext);
1150 }
1151
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1152 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1153 const SkColor4f& color,
1154 GrGpuFinishedProc finishedProc,
1155 GrGpuFinishedContext finishedContext) {
1156 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
1157
1158 if (this->abandoned()) {
1159 return false;
1160 }
1161
1162 SkTextureCompressionType compression =
1163 GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
1164 if (compression == SkTextureCompressionType::kNone) {
1165 return {};
1166 }
1167 size_t size = SkCompressedDataSize(compression,
1168 backendTexture.dimensions(),
1169 nullptr,
1170 backendTexture.hasMipmaps());
1171 SkAutoMalloc storage(size);
1172 skgpu::FillInCompressedData(compression,
1173 backendTexture.dimensions(),
1174 backendTexture.mipmapped(),
1175 static_cast<char*>(storage.get()),
1176 color);
1177 return fGpu->updateCompressedBackendTexture(backendTexture,
1178 std::move(finishedCallback),
1179 storage.get(),
1180 size);
1181 }
1182
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const void * compressedData,size_t dataSize,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1183 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1184 const void* compressedData,
1185 size_t dataSize,
1186 GrGpuFinishedProc finishedProc,
1187 GrGpuFinishedContext finishedContext) {
1188 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
1189
1190 if (this->abandoned()) {
1191 return false;
1192 }
1193
1194 if (!compressedData) {
1195 return false;
1196 }
1197
1198 return fGpu->updateCompressedBackendTexture(backendTexture,
1199 std::move(finishedCallback),
1200 compressedData,
1201 dataSize);
1202 }
1203
1204 //////////////////////////////////////////////////////////////////////////////
1205
setBackendTextureState(const GrBackendTexture & backendTexture,const skgpu::MutableTextureState & state,skgpu::MutableTextureState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1206 bool GrDirectContext::setBackendTextureState(const GrBackendTexture& backendTexture,
1207 const skgpu::MutableTextureState& state,
1208 skgpu::MutableTextureState* previousState,
1209 GrGpuFinishedProc finishedProc,
1210 GrGpuFinishedContext finishedContext) {
1211 auto callback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
1212
1213 if (this->abandoned()) {
1214 return false;
1215 }
1216
1217 return fGpu->setBackendTextureState(backendTexture, state, previousState, std::move(callback));
1218 }
1219
1220
setBackendRenderTargetState(const GrBackendRenderTarget & backendRenderTarget,const skgpu::MutableTextureState & state,skgpu::MutableTextureState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1221 bool GrDirectContext::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
1222 const skgpu::MutableTextureState& state,
1223 skgpu::MutableTextureState* previousState,
1224 GrGpuFinishedProc finishedProc,
1225 GrGpuFinishedContext finishedContext) {
1226 auto callback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
1227
1228 if (this->abandoned()) {
1229 return false;
1230 }
1231
1232 return fGpu->setBackendRenderTargetState(backendRenderTarget, state, previousState,
1233 std::move(callback));
1234 }
1235
deleteBackendTexture(const GrBackendTexture & backendTex)1236 void GrDirectContext::deleteBackendTexture(const GrBackendTexture& backendTex) {
1237 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1238 // For the Vulkan backend we still must destroy the backend texture when the context is
1239 // abandoned.
1240 if ((this->abandoned() && this->backend() != GrBackendApi::kVulkan) || !backendTex.isValid()) {
1241 return;
1242 }
1243
1244 fGpu->deleteBackendTexture(backendTex);
1245 }
1246
1247 //////////////////////////////////////////////////////////////////////////////
1248
precompileShader(const SkData & key,const SkData & data)1249 bool GrDirectContext::precompileShader(const SkData& key, const SkData& data) {
1250 return fGpu->precompileShader(key, data);
1251 }
1252
1253 #if defined(SK_ENABLE_DUMP_GPU)
1254 #include "include/core/SkString.h"
1255 #include "src/gpu/ganesh/GrUtil.h"
1256 #include "src/utils/SkJSONWriter.h"
1257
dump() const1258 SkString GrDirectContext::dump() const {
1259 SkDynamicMemoryWStream stream;
1260 SkJSONWriter writer(&stream, SkJSONWriter::Mode::kPretty);
1261 writer.beginObject();
1262
1263 writer.appendCString("backend", GrBackendApiToStr(this->backend()));
1264
1265 writer.appendName("caps");
1266 this->caps()->dumpJSON(&writer);
1267
1268 writer.appendName("gpu");
1269 this->fGpu->dumpJSON(&writer);
1270
1271 writer.appendName("context");
1272 this->dumpJSON(&writer);
1273
1274 // Flush JSON to the memory stream
1275 writer.endObject();
1276 writer.flush();
1277
1278 // Null terminate the JSON data in the memory stream
1279 stream.write8(0);
1280
1281 // Allocate a string big enough to hold all the data, then copy out of the stream
1282 SkString result(stream.bytesWritten());
1283 stream.copyToAndReset(result.data());
1284 return result;
1285 }
1286 #endif
1287
1288 /*************************************************************************************************/
MakeMock(const GrMockOptions * mockOptions)1289 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions) {
1290 GrContextOptions defaultOptions;
1291 return MakeMock(mockOptions, defaultOptions);
1292 }
1293
MakeMock(const GrMockOptions * mockOptions,const GrContextOptions & options)1294 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions,
1295 const GrContextOptions& options) {
1296 sk_sp<GrDirectContext> direct(
1297 new GrDirectContext(GrBackendApi::kMock,
1298 options,
1299 GrContextThreadSafeProxyPriv::Make(GrBackendApi::kMock, options)));
1300
1301 direct->fGpu = GrMockGpu::Make(mockOptions, options, direct.get());
1302 if (!direct->init()) {
1303 return nullptr;
1304 }
1305
1306 return direct;
1307 }
1308
1309 #ifdef SK_DIRECT3D
1310 /*************************************************************************************************/
MakeDirect3D(const GrD3DBackendContext & backendContext)1311 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext) {
1312 GrContextOptions defaultOptions;
1313 return MakeDirect3D(backendContext, defaultOptions);
1314 }
1315
MakeDirect3D(const GrD3DBackendContext & backendContext,const GrContextOptions & options)1316 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext,
1317 const GrContextOptions& options) {
1318 sk_sp<GrDirectContext> direct(new GrDirectContext(
1319 GrBackendApi::kDirect3D,
1320 options,
1321 GrContextThreadSafeProxyPriv::Make(GrBackendApi::kDirect3D, options)));
1322
1323 direct->fGpu = GrD3DGpu::Make(backendContext, options, direct.get());
1324 if (!direct->init()) {
1325 return nullptr;
1326 }
1327
1328 return direct;
1329 }
1330 #endif
1331