• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/GrDrawingManager.h"
9 
10 #include <algorithm>
11 #include <memory>
12 
13 #include "include/core/SkDeferredDisplayList.h"
14 #include "include/gpu/GrBackendSemaphore.h"
15 #include "include/gpu/GrDirectContext.h"
16 #include "include/gpu/GrRecordingContext.h"
17 #include "src/core/SkDeferredDisplayListPriv.h"
18 #include "src/core/SkTInternalLList.h"
19 #include "src/gpu/GrClientMappedBufferManager.h"
20 #include "src/gpu/GrCopyRenderTask.h"
21 #include "src/gpu/GrDDLTask.h"
22 #include "src/gpu/GrDirectContextPriv.h"
23 #include "src/gpu/GrGpu.h"
24 #include "src/gpu/GrMemoryPool.h"
25 #include "src/gpu/GrOnFlushResourceProvider.h"
26 #include "src/gpu/GrOpFlushState.h"
27 #include "src/gpu/GrRecordingContextPriv.h"
28 #include "src/gpu/GrRenderTargetProxy.h"
29 #include "src/gpu/GrRenderTask.h"
30 #include "src/gpu/GrRenderTaskCluster.h"
31 #include "src/gpu/GrResourceAllocator.h"
32 #include "src/gpu/GrResourceProvider.h"
33 #include "src/gpu/GrSurfaceProxyPriv.h"
34 #include "src/gpu/GrTTopoSort.h"
35 #include "src/gpu/GrTexture.h"
36 #include "src/gpu/GrTextureProxy.h"
37 #include "src/gpu/GrTextureProxyPriv.h"
38 #include "src/gpu/GrTextureResolveRenderTask.h"
39 #include "src/gpu/GrTracing.h"
40 #include "src/gpu/GrTransferFromRenderTask.h"
41 #include "src/gpu/GrWaitRenderTask.h"
42 #include "src/gpu/GrWritePixelsRenderTask.h"
43 #include "src/gpu/text/GrSDFTControl.h"
44 #include "src/image/SkSurface_Gpu.h"
45 
46 #if SK_GPU_V1
47 #include "src/gpu/ops/OpsTask.h"
48 #include "src/gpu/ops/SoftwarePathRenderer.h"
49 #endif
50 
51 ///////////////////////////////////////////////////////////////////////////////////////////////////
52 #if SK_GPU_V1
GrDrawingManager(GrRecordingContext * rContext,const PathRendererChain::Options & optionsForPathRendererChain,bool reduceOpsTaskSplitting)53 GrDrawingManager::GrDrawingManager(GrRecordingContext* rContext,
54                                    const PathRendererChain::Options& optionsForPathRendererChain,
55                                    bool reduceOpsTaskSplitting)
56         : fContext(rContext)
57         , fOptionsForPathRendererChain(optionsForPathRendererChain)
58         , fPathRendererChain(nullptr)
59         , fSoftwarePathRenderer(nullptr)
60         , fReduceOpsTaskSplitting(reduceOpsTaskSplitting) {
61 }
62 
63 #else
64 
GrDrawingManager(GrRecordingContext * rContext,bool reduceOpsTaskSplitting)65 GrDrawingManager::GrDrawingManager(GrRecordingContext* rContext, bool reduceOpsTaskSplitting)
66         : fContext(rContext)
67         , fReduceOpsTaskSplitting(reduceOpsTaskSplitting) {
68 }
69 
70 #endif
71 
~GrDrawingManager()72 GrDrawingManager::~GrDrawingManager() {
73     this->closeAllTasks();
74     this->removeRenderTasks();
75 }
76 
wasAbandoned() const77 bool GrDrawingManager::wasAbandoned() const {
78     return fContext->abandoned();
79 }
80 
freeGpuResources()81 void GrDrawingManager::freeGpuResources() {
82     for (int i = fOnFlushCBObjects.count() - 1; i >= 0; --i) {
83         if (!fOnFlushCBObjects[i]->retainOnFreeGpuResources()) {
84             // it's safe to just do this because we're iterating in reverse
85             fOnFlushCBObjects.removeShuffle(i);
86         }
87     }
88 
89 #if SK_GPU_V1
90     // a path renderer may be holding onto resources
91     fPathRendererChain = nullptr;
92     fSoftwarePathRenderer = nullptr;
93 #endif
94 }
95 
96 // MDB TODO: make use of the 'proxies' parameter.
flush(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const GrFlushInfo & info,const GrBackendSurfaceMutableState * newState)97 bool GrDrawingManager::flush(
98         SkSpan<GrSurfaceProxy*> proxies,
99         SkSurface::BackendSurfaceAccess access,
100         const GrFlushInfo& info,
101         const GrBackendSurfaceMutableState* newState) {
102     GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "flush", fContext);
103 
104     if (fFlushing || this->wasAbandoned()) {
105         if (info.fSubmittedProc) {
106             info.fSubmittedProc(info.fSubmittedContext, false);
107         }
108         if (info.fFinishedProc) {
109             info.fFinishedProc(info.fFinishedContext);
110         }
111         return false;
112     }
113 
114     SkDEBUGCODE(this->validate());
115 
116     // As of now we only short-circuit if we got an explicit list of surfaces to flush.
117     if (!proxies.empty() && !info.fNumSemaphores && !info.fFinishedProc &&
118         access == SkSurface::BackendSurfaceAccess::kNoAccess && !newState) {
119         bool allUnused = std::all_of(proxies.begin(), proxies.end(), [&](GrSurfaceProxy* proxy) {
120             bool used = std::any_of(fDAG.begin(), fDAG.end(), [&](auto& task) {
121                 return task && task->isUsed(proxy);
122             });
123             return !used;
124         });
125         if (allUnused) {
126             if (info.fSubmittedProc) {
127                 info.fSubmittedProc(info.fSubmittedContext, true);
128             }
129             return false;
130         }
131     }
132 
133     auto dContext = fContext->asDirectContext();
134     SkASSERT(dContext);
135     dContext->priv().clientMappedBufferManager()->process();
136 
137     GrGpu* gpu = dContext->priv().getGpu();
138     // We have a non abandoned and direct GrContext. It must have a GrGpu.
139     SkASSERT(gpu);
140 
141     fFlushing = true;
142 
143     auto resourceProvider = dContext->priv().resourceProvider();
144     auto resourceCache = dContext->priv().getResourceCache();
145 
146     // Semi-usually the GrRenderTasks are already closed at this point, but sometimes Ganesh needs
147     // to flush mid-draw. In that case, the SkGpuDevice's opsTasks won't be closed but need to be
148     // flushed anyway. Closing such opsTasks here will mean new ones will be created to replace them
149     // if the SkGpuDevice(s) write to them again.
150     this->closeAllTasks();
151     fActiveOpsTask = nullptr;
152 
153     this->sortTasks();
154 
155     if (!fCpuBufferCache) {
156         // We cache more buffers when the backend is using client side arrays. Otherwise, we
157         // expect each pool will use a CPU buffer as a staging buffer before uploading to a GPU
158         // buffer object. Each pool only requires one staging buffer at a time.
159         int maxCachedBuffers = fContext->priv().caps()->preferClientSideDynamicBuffers() ? 2 : 6;
160         fCpuBufferCache = GrBufferAllocPool::CpuBufferCache::Make(maxCachedBuffers);
161     }
162 
163     GrOpFlushState flushState(gpu, resourceProvider, &fTokenTracker, fCpuBufferCache);
164 
165     GrOnFlushResourceProvider onFlushProvider(this);
166 
167     // Prepare any onFlush op lists (e.g. atlases).
168     if (!fOnFlushCBObjects.empty()) {
169         fFlushingRenderTaskIDs.reserve_back(fDAG.count());
170         for (const auto& task : fDAG) {
171             if (task) {
172                 task->gatherIDs(&fFlushingRenderTaskIDs);
173             }
174         }
175 
176         for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
177             onFlushCBObject->preFlush(&onFlushProvider, SkMakeSpan(fFlushingRenderTaskIDs));
178         }
179         for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
180             onFlushRenderTask->makeClosed(fContext);
181 #ifdef SK_DEBUG
182             // OnFlush callbacks are invoked during flush, and are therefore expected to handle
183             // resource allocation & usage on their own. (No deferred or lazy proxies!)
184             onFlushRenderTask->visitTargetAndSrcProxies_debugOnly(
185                     [](GrSurfaceProxy* p, GrMipmapped mipMapped) {
186                 SkASSERT(!p->asTextureProxy() || !p->asTextureProxy()->texPriv().isDeferred());
187                 SkASSERT(!p->isLazy());
188                 if (p->requiresManualMSAAResolve()) {
189                     // The onFlush callback is responsible for ensuring MSAA gets resolved.
190                     SkASSERT(p->asRenderTargetProxy() && !p->asRenderTargetProxy()->isMSAADirty());
191                 }
192                 if (GrMipmapped::kYes == mipMapped) {
193                     // The onFlush callback is responsible for regenerating mips if needed.
194                     SkASSERT(p->asTextureProxy() && !p->asTextureProxy()->mipmapsAreDirty());
195                 }
196             });
197 #endif
198 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
199             flushState.fDisableStencilCulling = fDisableStencilCulling;
200             flushState.fHasStencilCullingOp = fHasStencilCullingOp;
201 #endif
202             onFlushRenderTask->prepare(&flushState);
203         }
204     }
205 
206     bool usingReorderedDAG = false;
207     GrResourceAllocator resourceAllocator(dContext);
208     if (fReduceOpsTaskSplitting) {
209         usingReorderedDAG = this->reorderTasks(&resourceAllocator);
210         if (!usingReorderedDAG) {
211             resourceAllocator.reset();
212         }
213     }
214 
215 #if 0
216     // Enable this to print out verbose GrOp information
217     SkDEBUGCODE(SkDebugf("onFlush renderTasks (%d):\n", fOnFlushRenderTasks.count()));
218     for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
219         SkDEBUGCODE(onFlushRenderTask->dump(/* printDependencies */ true);)
220     }
221     SkDEBUGCODE(SkDebugf("Normal renderTasks (%d):\n", fDAG.count()));
222     for (const auto& task : fDAG) {
223         SkDEBUGCODE(task->dump(/* printDependencies */ true);)
224     }
225 #endif
226 
227     if (!resourceAllocator.failedInstantiation()) {
228         if (!usingReorderedDAG) {
229             for (const auto& task : fDAG) {
230                 SkASSERT(task);
231                 task->gatherProxyIntervals(&resourceAllocator);
232             }
233             resourceAllocator.planAssignment();
234         }
235         resourceAllocator.assign();
236     }
237     bool flushed = !resourceAllocator.failedInstantiation() &&
238                     this->executeRenderTasks(&flushState);
239     this->removeRenderTasks();
240 
241     gpu->executeFlushInfo(proxies, access, info, newState);
242 
243     // Give the cache a chance to purge resources that become purgeable due to flushing.
244     if (flushed) {
245         resourceCache->purgeAsNeeded();
246         flushed = false;
247     }
248     for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
249         onFlushCBObject->postFlush(fTokenTracker.nextTokenToFlush(),
250                                    SkMakeSpan(fFlushingRenderTaskIDs));
251         flushed = true;
252     }
253     if (flushed) {
254         resourceCache->purgeAsNeeded();
255     }
256     fFlushingRenderTaskIDs.reset();
257     fFlushing = false;
258 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
259     fDisableStencilCulling = false;
260     fHasStencilCullingOp = false;
261 #endif
262 
263     return true;
264 }
265 
submitToGpu(bool syncToCpu)266 bool GrDrawingManager::submitToGpu(bool syncToCpu) {
267     if (fFlushing || this->wasAbandoned()) {
268         return false;
269     }
270 
271     auto direct = fContext->asDirectContext();
272     if (!direct) {
273         return false; // Can't submit while DDL recording
274     }
275     GrGpu* gpu = direct->priv().getGpu();
276     return gpu->submitToGpu(syncToCpu);
277 }
278 
executeRenderTasks(GrOpFlushState * flushState)279 bool GrDrawingManager::executeRenderTasks(GrOpFlushState* flushState) {
280 #if GR_FLUSH_TIME_OP_SPEW
281     SkDebugf("Flushing %d opsTasks\n", fDAG.count());
282     for (int i = 0; i < fDAG.count(); ++i) {
283         if (fDAG[i]) {
284             SkString label;
285             label.printf("task %d/%d", i, fDAG.count());
286             fDAG[i]->dump(label, {}, true, true);
287         }
288     }
289 #endif
290 
291     bool anyRenderTasksExecuted = false;
292 
293     for (const auto& renderTask : fDAG) {
294         if (!renderTask || !renderTask->isInstantiated()) {
295              continue;
296         }
297 
298         SkASSERT(renderTask->deferredProxiesAreInstantiated());
299 
300         renderTask->prepare(flushState);
301     }
302 
303     // Upload all data to the GPU
304     flushState->preExecuteDraws();
305 
306     // For Vulkan, if we have too many oplists to be flushed we end up allocating a lot of resources
307     // for each command buffer associated with the oplists. If this gets too large we can cause the
308     // devices to go OOM. In practice we usually only hit this case in our tests, but to be safe we
309     // put a cap on the number of oplists we will execute before flushing to the GPU to relieve some
310     // memory pressure.
311     static constexpr int kMaxRenderTasksBeforeFlush = 100;
312     int numRenderTasksExecuted = 0;
313 
314     // Execute the onFlush renderTasks first, if any.
315     for (sk_sp<GrRenderTask>& onFlushRenderTask : fOnFlushRenderTasks) {
316         if (!onFlushRenderTask->execute(flushState)) {
317             SkDebugf("WARNING: onFlushRenderTask failed to execute.\n");
318         }
319         SkASSERT(onFlushRenderTask->unique());
320         onFlushRenderTask->disown(this);
321         onFlushRenderTask = nullptr;
322         if (++numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
323             flushState->gpu()->submitToGpu(false);
324             numRenderTasksExecuted = 0;
325         }
326     }
327     fOnFlushRenderTasks.reset();
328 
329     // Execute the normal op lists.
330     for (const auto& renderTask : fDAG) {
331         SkASSERT(renderTask);
332         if (!renderTask->isInstantiated()) {
333             continue;
334         }
335 
336         if (renderTask->execute(flushState)) {
337             anyRenderTasksExecuted = true;
338         }
339         if (++numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
340             flushState->gpu()->submitToGpu(false);
341             numRenderTasksExecuted = 0;
342         }
343     }
344 
345     SkASSERT(!flushState->opsRenderPass());
346     SkASSERT(fTokenTracker.nextDrawToken() == fTokenTracker.nextTokenToFlush());
347 
348     // We reset the flush state before the RenderTasks so that the last resources to be freed are
349     // those that are written to in the RenderTasks. This helps to make sure the most recently used
350     // resources are the last to be purged by the resource cache.
351     flushState->reset();
352 
353     return anyRenderTasksExecuted;
354 }
355 
removeRenderTasks()356 void GrDrawingManager::removeRenderTasks() {
357     for (const auto& task : fDAG) {
358         SkASSERT(task);
359         if (!task->unique() || task->requiresExplicitCleanup()) {
360             // TODO: Eventually uniqueness should be guaranteed: http://skbug.com/7111.
361             // DDLs, however, will always require an explicit notification for when they
362             // can clean up resources.
363             task->endFlush(this);
364         }
365         task->disown(this);
366     }
367     fDAG.reset();
368     fLastRenderTasks.reset();
369     for (const sk_sp<GrRenderTask>& onFlushRenderTask : fOnFlushRenderTasks) {
370         onFlushRenderTask->disown(this);
371     }
372     fOnFlushRenderTasks.reset();
373 }
374 
sortTasks()375 void GrDrawingManager::sortTasks() {
376     if (!GrTTopoSort<GrRenderTask, GrRenderTask::TopoSortTraits>(&fDAG)) {
377         SkDEBUGFAIL("Render task topo sort failed.");
378         return;
379     }
380 
381 #if SK_GPU_V1 && defined(SK_DEBUG)
382     // This block checks for any unnecessary splits in the opsTasks. If two sequential opsTasks
383     // could have merged it means the opsTask was artificially split.
384     if (!fDAG.empty()) {
385         auto prevOpsTask = fDAG[0]->asOpsTask();
386         for (int i = 1; i < fDAG.count(); ++i) {
387             auto curOpsTask = fDAG[i]->asOpsTask();
388 
389             if (prevOpsTask && curOpsTask) {
390                 SkASSERT(!prevOpsTask->canMerge(curOpsTask));
391             }
392 
393             prevOpsTask = curOpsTask;
394         }
395     }
396 #endif
397 }
398 
399 // Reorder the array to match the llist without reffing & unreffing sk_sp's.
400 // Both args must contain the same objects.
401 // This is basically a shim because clustering uses LList but the rest of drawmgr uses array.
402 template <typename T>
reorder_array_by_llist(const SkTInternalLList<T> & llist,SkTArray<sk_sp<T>> * array)403 static void reorder_array_by_llist(const SkTInternalLList<T>& llist, SkTArray<sk_sp<T>>* array) {
404     int i = 0;
405     for (T* t : llist) {
406         // Release the pointer that used to live here so it doesn't get unreffed.
407         [[maybe_unused]] T* old = array->at(i).release();
408         array->at(i++).reset(t);
409     }
410     SkASSERT(i == array->count());
411 }
412 
reorderTasks(GrResourceAllocator * resourceAllocator)413 bool GrDrawingManager::reorderTasks(GrResourceAllocator* resourceAllocator) {
414     SkASSERT(fReduceOpsTaskSplitting);
415     SkTInternalLList<GrRenderTask> llist;
416     bool clustered = GrClusterRenderTasks(SkMakeSpan(fDAG), &llist);
417     if (!clustered) {
418         return false;
419     }
420 
421     for (GrRenderTask* task : llist) {
422         task->gatherProxyIntervals(resourceAllocator);
423     }
424     if (!resourceAllocator->planAssignment()) {
425         return false;
426     }
427     if (!resourceAllocator->makeBudgetHeadroom()) {
428         auto dContext = fContext->asDirectContext();
429         SkASSERT(dContext);
430         dContext->priv().getGpu()->stats()->incNumReorderedDAGsOverBudget();
431         return false;
432     }
433     reorder_array_by_llist(llist, &fDAG);
434 
435     int newCount = 0;
436     for (int i = 0; i < fDAG.count(); i++) {
437         sk_sp<GrRenderTask>& task = fDAG[i];
438 #if SK_GPU_V1
439         if (auto opsTask = task->asOpsTask()) {
440             size_t remaining = fDAG.size() - i - 1;
441             SkSpan<sk_sp<GrRenderTask>> nextTasks{fDAG.end() - remaining, remaining};
442             int removeCount = opsTask->mergeFrom(nextTasks);
443             for (const auto& removed : nextTasks.first(removeCount)) {
444                 removed->disown(this);
445             }
446             i += removeCount;
447         }
448 #endif
449         fDAG[newCount++] = std::move(task);
450     }
451     fDAG.resize_back(newCount);
452     return true;
453 }
454 
closeAllTasks()455 void GrDrawingManager::closeAllTasks() {
456     for (auto& task : fDAG) {
457         if (task) {
458             task->makeClosed(fContext);
459         }
460     }
461 }
462 
insertTaskBeforeLast(sk_sp<GrRenderTask> task)463 GrRenderTask* GrDrawingManager::insertTaskBeforeLast(sk_sp<GrRenderTask> task) {
464     if (!task) {
465         return nullptr;
466     }
467     if (fDAG.empty()) {
468         return fDAG.push_back(std::move(task)).get();
469     }
470     // Release 'fDAG.back()' and grab the raw pointer, in case the SkTArray grows
471     // and reallocates during emplace_back.
472     // TODO: Either use std::vector that can do this for us, or use SkSTArray to get the
473     // perf win.
474     fDAG.emplace_back(fDAG.back().release());
475     return (fDAG[fDAG.count() - 2] = std::move(task)).get();
476 }
477 
appendTask(sk_sp<GrRenderTask> task)478 GrRenderTask* GrDrawingManager::appendTask(sk_sp<GrRenderTask> task) {
479     if (!task) {
480         return nullptr;
481     }
482     return fDAG.push_back(std::move(task)).get();
483 }
484 
resolve_and_mipmap(GrGpu * gpu,GrSurfaceProxy * proxy)485 static void resolve_and_mipmap(GrGpu* gpu, GrSurfaceProxy* proxy) {
486     if (!proxy->isInstantiated()) {
487         return;
488     }
489 
490     // In the flushSurfaces case, we need to resolve MSAA immediately after flush. This is
491     // because clients expect the flushed surface's backing texture to be fully resolved
492     // upon return.
493     if (proxy->requiresManualMSAAResolve()) {
494         auto* rtProxy = proxy->asRenderTargetProxy();
495         SkASSERT(rtProxy);
496         if (rtProxy->isMSAADirty()) {
497             SkASSERT(rtProxy->peekRenderTarget());
498             gpu->resolveRenderTarget(rtProxy->peekRenderTarget(), rtProxy->msaaDirtyRect());
499             gpu->submitToGpu(false);
500             rtProxy->markMSAAResolved();
501         }
502     }
503     // If, after a flush, any of the proxies of interest have dirty mipmaps, regenerate them in
504     // case their backend textures are being stolen.
505     // (This special case is exercised by the ReimportImageTextureWithMipLevels test.)
506     // FIXME: It may be more ideal to plumb down a "we're going to steal the backends" flag.
507     if (auto* textureProxy = proxy->asTextureProxy()) {
508         if (textureProxy->mipmapsAreDirty()) {
509             SkASSERT(textureProxy->peekTexture());
510             gpu->regenerateMipMapLevels(textureProxy->peekTexture());
511             textureProxy->markMipmapsClean();
512         }
513     }
514 }
515 
flushSurfaces(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const GrFlushInfo & info,const GrBackendSurfaceMutableState * newState)516 GrSemaphoresSubmitted GrDrawingManager::flushSurfaces(
517         SkSpan<GrSurfaceProxy*> proxies,
518         SkSurface::BackendSurfaceAccess access,
519         const GrFlushInfo& info,
520         const GrBackendSurfaceMutableState* newState) {
521     if (this->wasAbandoned()) {
522         if (info.fSubmittedProc) {
523             info.fSubmittedProc(info.fSubmittedContext, false);
524         }
525         if (info.fFinishedProc) {
526             info.fFinishedProc(info.fFinishedContext);
527         }
528         return GrSemaphoresSubmitted::kNo;
529     }
530     SkDEBUGCODE(this->validate());
531 
532     auto direct = fContext->asDirectContext();
533     SkASSERT(direct);
534     GrGpu* gpu = direct->priv().getGpu();
535     // We have a non abandoned and direct GrContext. It must have a GrGpu.
536     SkASSERT(gpu);
537 
538     // TODO: It is important to upgrade the drawingmanager to just flushing the
539     // portion of the DAG required by 'proxies' in order to restore some of the
540     // semantics of this method.
541     bool didFlush = this->flush(proxies, access, info, newState);
542     for (GrSurfaceProxy* proxy : proxies) {
543         resolve_and_mipmap(gpu, proxy);
544     }
545 
546     SkDEBUGCODE(this->validate());
547 
548     if (!didFlush || (!direct->priv().caps()->semaphoreSupport() && info.fNumSemaphores)) {
549         return GrSemaphoresSubmitted::kNo;
550     }
551     return GrSemaphoresSubmitted::kYes;
552 }
553 
addOnFlushCallbackObject(GrOnFlushCallbackObject * onFlushCBObject)554 void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
555     fOnFlushCBObjects.push_back(onFlushCBObject);
556 }
557 
558 #if GR_TEST_UTILS
testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject * cb)559 void GrDrawingManager::testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject* cb) {
560     int n = std::find(fOnFlushCBObjects.begin(), fOnFlushCBObjects.end(), cb) -
561             fOnFlushCBObjects.begin();
562     SkASSERT(n < fOnFlushCBObjects.count());
563     fOnFlushCBObjects.removeShuffle(n);
564 }
565 #endif
566 
setLastRenderTask(const GrSurfaceProxy * proxy,GrRenderTask * task)567 void GrDrawingManager::setLastRenderTask(const GrSurfaceProxy* proxy, GrRenderTask* task) {
568 #ifdef SK_DEBUG
569     if (auto prior = this->getLastRenderTask(proxy)) {
570         SkASSERT(prior->isClosed() || prior == task);
571     }
572 #endif
573     uint32_t key = proxy->uniqueID().asUInt();
574     if (task) {
575         fLastRenderTasks.set(key, task);
576     } else if (fLastRenderTasks.find(key)) {
577         fLastRenderTasks.remove(key);
578     }
579 }
580 
getLastRenderTask(const GrSurfaceProxy * proxy) const581 GrRenderTask* GrDrawingManager::getLastRenderTask(const GrSurfaceProxy* proxy) const {
582     auto entry = fLastRenderTasks.find(proxy->uniqueID().asUInt());
583     return entry ? *entry : nullptr;
584 }
585 
getLastOpsTask(const GrSurfaceProxy * proxy) const586 skgpu::v1::OpsTask* GrDrawingManager::getLastOpsTask(const GrSurfaceProxy* proxy) const {
587     GrRenderTask* task = this->getLastRenderTask(proxy);
588     return task ? task->asOpsTask() : nullptr;
589 }
590 
591 
moveRenderTasksToDDL(SkDeferredDisplayList * ddl)592 void GrDrawingManager::moveRenderTasksToDDL(SkDeferredDisplayList* ddl) {
593     SkDEBUGCODE(this->validate());
594 
595     // no renderTask should receive a new command after this
596     this->closeAllTasks();
597     fActiveOpsTask = nullptr;
598 
599     this->sortTasks();
600 
601     fDAG.swap(ddl->fRenderTasks);
602     SkASSERT(fDAG.empty());
603 
604     for (auto& renderTask : ddl->fRenderTasks) {
605         renderTask->disown(this);
606         renderTask->prePrepare(fContext);
607     }
608 
609     ddl->fArenas = std::move(fContext->priv().detachArenas());
610 
611     fContext->priv().detachProgramData(&ddl->fProgramData);
612 
613     SkDEBUGCODE(this->validate());
614 }
615 
createDDLTask(sk_sp<const SkDeferredDisplayList> ddl,sk_sp<GrRenderTargetProxy> newDest,SkIPoint offset)616 void GrDrawingManager::createDDLTask(sk_sp<const SkDeferredDisplayList> ddl,
617                                      sk_sp<GrRenderTargetProxy> newDest,
618                                      SkIPoint offset) {
619     SkDEBUGCODE(this->validate());
620 
621 #if SK_GPU_V1
622     if (fActiveOpsTask) {
623         // This is a temporary fix for the partial-MDB world. In that world we're not
624         // reordering so ops that (in the single opsTask world) would've just glommed onto the
625         // end of the single opsTask but referred to a far earlier RT need to appear in their
626         // own opsTask.
627         fActiveOpsTask->makeClosed(fContext);
628         fActiveOpsTask = nullptr;
629     }
630 #endif
631 
632     // Propagate the DDL proxy's state information to the replay target.
633     if (ddl->priv().targetProxy()->isMSAADirty()) {
634         auto nativeRect = GrNativeRect::MakeIRectRelativeTo(
635                 ddl->characterization().origin(),
636                 ddl->priv().targetProxy()->backingStoreDimensions().height(),
637                 ddl->priv().targetProxy()->msaaDirtyRect());
638         newDest->markMSAADirty(nativeRect);
639     }
640     GrTextureProxy* newTextureProxy = newDest->asTextureProxy();
641     if (newTextureProxy && GrMipmapped::kYes == newTextureProxy->mipmapped()) {
642         newTextureProxy->markMipmapsDirty();
643     }
644 
645     // Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData.
646     // The lazy proxy that references it (in the DDL opsTasks) will then steal its GrTexture.
647     ddl->fLazyProxyData->fReplayDest = newDest.get();
648 
649     // Add a task to handle drawing and lifetime management of the DDL.
650     SkDEBUGCODE(auto ddlTask =) this->appendTask(sk_make_sp<GrDDLTask>(this,
651                                                                        std::move(newDest),
652                                                                        std::move(ddl),
653                                                                        offset));
654     SkASSERT(ddlTask->isClosed());
655 
656     SkDEBUGCODE(this->validate());
657 }
658 
659 #ifdef SK_DEBUG
validate() const660 void GrDrawingManager::validate() const {
661 #if SK_GPU_V1
662     if (fActiveOpsTask) {
663         SkASSERT(!fDAG.empty());
664         SkASSERT(!fActiveOpsTask->isClosed());
665         SkASSERT(fActiveOpsTask == fDAG.back().get());
666     }
667 
668     for (int i = 0; i < fDAG.count(); ++i) {
669         if (fActiveOpsTask != fDAG[i].get()) {
670             // The resolveTask associated with the activeTask remains open for as long as the
671             // activeTask does.
672             bool isActiveResolveTask =
673                 fActiveOpsTask && fActiveOpsTask->fTextureResolveTask == fDAG[i].get();
674             bool isAtlas = fDAG[i]->isSetFlag(GrRenderTask::kAtlas_Flag);
675             SkASSERT(isActiveResolveTask || isAtlas || fDAG[i]->isClosed());
676         }
677     }
678 
679     // The active opsTask, if any, should always be at the back of the DAG.
680     if (!fDAG.empty()) {
681         if (fDAG.back()->isSetFlag(GrRenderTask::kAtlas_Flag)) {
682             SkASSERT(fActiveOpsTask == nullptr);
683             SkASSERT(!fDAG.back()->isClosed());
684         } else if (fDAG.back()->isClosed()) {
685             SkASSERT(fActiveOpsTask == nullptr);
686         } else {
687             SkASSERT(fActiveOpsTask == fDAG.back().get());
688         }
689     } else {
690         SkASSERT(fActiveOpsTask == nullptr);
691     }
692 #endif // SK_GPU_V1
693 }
694 #endif // SK_DEBUG
695 
closeActiveOpsTask()696 void GrDrawingManager::closeActiveOpsTask() {
697 #if SK_GPU_V1
698     if (fActiveOpsTask) {
699         // This is a temporary fix for the partial-MDB world. In that world we're not
700         // reordering so ops that (in the single opsTask world) would've just glommed onto the
701         // end of the single opsTask but referred to a far earlier RT need to appear in their
702         // own opsTask.
703         fActiveOpsTask->makeClosed(fContext);
704         fActiveOpsTask = nullptr;
705     }
706 #endif
707 }
708 
709 #if SK_GPU_V1
newOpsTask(GrSurfaceProxyView surfaceView,sk_sp<GrArenas> arenas,bool flushTimeOpsTask)710 sk_sp<skgpu::v1::OpsTask> GrDrawingManager::newOpsTask(GrSurfaceProxyView surfaceView,
711                                                        sk_sp<GrArenas> arenas,
712                                                        bool flushTimeOpsTask) {
713     SkDEBUGCODE(this->validate());
714     SkASSERT(fContext);
715 
716     this->closeActiveOpsTask();
717 
718     sk_sp<skgpu::v1::OpsTask> opsTask(new skgpu::v1::OpsTask(this,
719                                                              std::move(surfaceView),
720                                                              fContext->priv().auditTrail(),
721                                                              std::move(arenas)));
722 
723     SkASSERT(this->getLastRenderTask(opsTask->target(0)) == opsTask.get());
724 
725     if (flushTimeOpsTask) {
726         fOnFlushRenderTasks.push_back(opsTask);
727     } else {
728         this->appendTask(opsTask);
729 
730         fActiveOpsTask = opsTask.get();
731     }
732 
733     SkDEBUGCODE(this->validate());
734     return opsTask;
735 }
736 
addAtlasTask(sk_sp<GrRenderTask> atlasTask,GrRenderTask * previousAtlasTask)737 void GrDrawingManager::addAtlasTask(sk_sp<GrRenderTask> atlasTask,
738                                     GrRenderTask* previousAtlasTask) {
739     SkDEBUGCODE(this->validate());
740     SkASSERT(fContext);
741 
742     if (previousAtlasTask) {
743         previousAtlasTask->makeClosed(fContext);
744         for (GrRenderTask* previousAtlasUser : previousAtlasTask->dependents()) {
745             // Make the new atlas depend on everybody who used the old atlas, and close their tasks.
746             // This guarantees that the previous atlas is totally out of service before we render
747             // the next one, meaning there is only ever one atlas active at a time and that they can
748             // all share the same texture.
749             atlasTask->addDependency(previousAtlasUser);
750             previousAtlasUser->makeClosed(fContext);
751             if (previousAtlasUser == fActiveOpsTask) {
752                 fActiveOpsTask = nullptr;
753             }
754         }
755     }
756 
757     atlasTask->setFlag(GrRenderTask::kAtlas_Flag);
758     this->insertTaskBeforeLast(std::move(atlasTask));
759 
760     SkDEBUGCODE(this->validate());
761 }
762 #endif // SK_GPU_V1
763 
newTextureResolveRenderTask(const GrCaps & caps)764 GrTextureResolveRenderTask* GrDrawingManager::newTextureResolveRenderTask(const GrCaps& caps) {
765     // Unlike in the "new opsTask" case, we do not want to close the active opsTask, nor (if we are
766     // in sorting and opsTask reduction mode) the render tasks that depend on any proxy's current
767     // state. This is because those opsTasks can still receive new ops and because if they refer to
768     // the mipmapped version of 'proxy', they will then come to depend on the render task being
769     // created here.
770     //
771     // Add the new textureResolveTask before the fActiveOpsTask (if not in
772     // sorting/opsTask-splitting-reduction mode) because it will depend upon this resolve task.
773     // NOTE: Putting it here will also reduce the amount of work required by the topological sort.
774     GrRenderTask* task = this->insertTaskBeforeLast(sk_make_sp<GrTextureResolveRenderTask>());
775     return static_cast<GrTextureResolveRenderTask*>(task);
776 }
777 
newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,int numSemaphores)778 void GrDrawingManager::newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,
779                                          std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,
780                                          int numSemaphores) {
781     SkDEBUGCODE(this->validate());
782     SkASSERT(fContext);
783 
784     sk_sp<GrWaitRenderTask> waitTask = sk_make_sp<GrWaitRenderTask>(GrSurfaceProxyView(proxy),
785                                                                     std::move(semaphores),
786                                                                     numSemaphores);
787 
788 #if SK_GPU_V1
789     if (fActiveOpsTask && (fActiveOpsTask->target(0) == proxy.get())) {
790         SkASSERT(this->getLastRenderTask(proxy.get()) == fActiveOpsTask);
791         this->insertTaskBeforeLast(waitTask);
792         // In this case we keep the current renderTask open but just insert the new waitTask
793         // before it in the list. The waitTask will never need to trigger any resolves or mip
794         // map generation which is the main advantage of going through the proxy version.
795         // Additionally we would've had to temporarily set the wait task as the lastRenderTask
796         // on the proxy, add the dependency, and then reset the lastRenderTask to
797         // fActiveOpsTask. Additionally we make the waitTask depend on all of fActiveOpsTask
798         // dependencies so that we don't unnecessarily reorder the waitTask before them.
799         // Note: Any previous Ops already in fActiveOpsTask will get blocked by the wait
800         // semaphore even though they don't need to be for correctness.
801 
802         // Make sure we add the dependencies of fActiveOpsTask to waitTask first or else we'll
803         // get a circular self dependency of waitTask on waitTask.
804         waitTask->addDependenciesFromOtherTask(fActiveOpsTask);
805         fActiveOpsTask->addDependency(waitTask.get());
806     } else
807 #endif
808     {
809         // In this case we just close the previous RenderTask and start and append the waitTask
810         // to the DAG. Since it is the last task now we call setLastRenderTask on the proxy. If
811         // there is a lastTask on the proxy we make waitTask depend on that task. This
812         // dependency isn't strictly needed but it does keep the DAG from reordering the
813         // waitTask earlier and blocking more tasks.
814         if (GrRenderTask* lastTask = this->getLastRenderTask(proxy.get())) {
815             waitTask->addDependency(lastTask);
816         }
817         this->setLastRenderTask(proxy.get(), waitTask.get());
818         this->closeActiveOpsTask();
819         this->appendTask(waitTask);
820     }
821     waitTask->makeClosed(fContext);
822 
823     SkDEBUGCODE(this->validate());
824 }
825 
newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy,const SkIRect & srcRect,GrColorType surfaceColorType,GrColorType dstColorType,sk_sp<GrGpuBuffer> dstBuffer,size_t dstOffset)826 void GrDrawingManager::newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy,
827                                                  const SkIRect& srcRect,
828                                                  GrColorType surfaceColorType,
829                                                  GrColorType dstColorType,
830                                                  sk_sp<GrGpuBuffer> dstBuffer,
831                                                  size_t dstOffset) {
832     SkDEBUGCODE(this->validate());
833     SkASSERT(fContext);
834     this->closeActiveOpsTask();
835 
836     GrRenderTask* task = this->appendTask(sk_make_sp<GrTransferFromRenderTask>(
837             srcProxy, srcRect, surfaceColorType, dstColorType,
838             std::move(dstBuffer), dstOffset));
839 
840     const GrCaps& caps = *fContext->priv().caps();
841 
842     // We always say GrMipmapped::kNo here since we are always just copying from the base layer. We
843     // don't need to make sure the whole mip map chain is valid.
844     task->addDependency(this, srcProxy.get(), GrMipmapped::kNo,
845                         GrTextureResolveManager(this), caps);
846     task->makeClosed(fContext);
847 
848     // We have closed the previous active oplist but since a new oplist isn't being added there
849     // shouldn't be an active one.
850     SkASSERT(!fActiveOpsTask);
851     SkDEBUGCODE(this->validate());
852 }
853 
newCopyRenderTask(sk_sp<GrSurfaceProxy> src,SkIRect srcRect,sk_sp<GrSurfaceProxy> dst,SkIPoint dstPoint,GrSurfaceOrigin origin)854 sk_sp<GrRenderTask> GrDrawingManager::newCopyRenderTask(sk_sp<GrSurfaceProxy> src,
855                                                         SkIRect srcRect,
856                                                         sk_sp<GrSurfaceProxy> dst,
857                                                         SkIPoint dstPoint,
858                                                         GrSurfaceOrigin origin) {
859     SkDEBUGCODE(this->validate());
860     SkASSERT(fContext);
861 
862     // It'd be nicer to check this in GrCopyRenderTask::Make. This gets complicated because of
863     // "active ops task" tracking. dst will be the target of our copy task but it might also be the
864     // target of the active ops task. We currently require the active ops task to be closed before
865     // making a new task that targets the same proxy. However, if we first close the active ops
866     // task, then fail to make a copy task, the next active ops task may target the same proxy. This
867     // will trip an assert related to unnecessary ops task splitting.
868     if (src->framebufferOnly()) {
869         return nullptr;
870     }
871 
872     this->closeActiveOpsTask();
873 
874     sk_sp<GrRenderTask> task = GrCopyRenderTask::Make(this,
875                                                       src,
876                                                       srcRect,
877                                                       std::move(dst),
878                                                       dstPoint,
879                                                       origin);
880     if (!task) {
881         return nullptr;
882     }
883 
884     this->appendTask(task);
885 
886     const GrCaps& caps = *fContext->priv().caps();
887     // We always say GrMipmapped::kNo here since we are always just copying from the base layer to
888     // another base layer. We don't need to make sure the whole mip map chain is valid.
889     task->addDependency(this, src.get(), GrMipmapped::kNo, GrTextureResolveManager(this), caps);
890     task->makeClosed(fContext);
891 
892     // We have closed the previous active oplist but since a new oplist isn't being added there
893     // shouldn't be an active one.
894     SkASSERT(!fActiveOpsTask);
895     SkDEBUGCODE(this->validate());
896     return task;
897 }
898 
newWritePixelsTask(sk_sp<GrSurfaceProxy> dst,SkIRect rect,GrColorType srcColorType,GrColorType dstColorType,const GrMipLevel levels[],int levelCount)899 bool GrDrawingManager::newWritePixelsTask(sk_sp<GrSurfaceProxy> dst,
900                                           SkIRect rect,
901                                           GrColorType srcColorType,
902                                           GrColorType dstColorType,
903                                           const GrMipLevel levels[],
904                                           int levelCount) {
905     SkDEBUGCODE(this->validate());
906     SkASSERT(fContext);
907 
908     this->closeActiveOpsTask();
909     const GrCaps& caps = *fContext->priv().caps();
910 
911     // On platforms that prefer flushes over VRAM use (i.e., ANGLE) we're better off forcing a
912     // complete flush here.
913     if (!caps.preferVRAMUseOverFlushes()) {
914         this->flushSurfaces(SkSpan<GrSurfaceProxy*>{},
915                             SkSurface::BackendSurfaceAccess::kNoAccess,
916                             GrFlushInfo{},
917                             nullptr);
918     }
919 
920     GrRenderTask* task = this->appendTask(GrWritePixelsTask::Make(this,
921                                                                   std::move(dst),
922                                                                   rect,
923                                                                   srcColorType,
924                                                                   dstColorType,
925                                                                   levels,
926                                                                   levelCount));
927     if (!task) {
928         return false;
929     }
930 
931     task->makeClosed(fContext);
932 
933     // We have closed the previous active oplist but since a new oplist isn't being added there
934     // shouldn't be an active one.
935     SkASSERT(!fActiveOpsTask);
936     SkDEBUGCODE(this->validate());
937     return true;
938 }
939 
940 #if SK_GPU_V1
941 /*
942  * This method finds a path renderer that can draw the specified path on
943  * the provided target.
944  * Due to its expense, the software path renderer has split out so it can
945  * can be individually allowed/disallowed via the "allowSW" boolean.
946  */
getPathRenderer(const PathRenderer::CanDrawPathArgs & args,bool allowSW,PathRendererChain::DrawType drawType,PathRenderer::StencilSupport * stencilSupport)947 skgpu::v1::PathRenderer* GrDrawingManager::getPathRenderer(
948         const PathRenderer::CanDrawPathArgs& args,
949         bool allowSW,
950         PathRendererChain::DrawType drawType,
951         PathRenderer::StencilSupport* stencilSupport) {
952 
953     if (!fPathRendererChain) {
954         fPathRendererChain =
955                 std::make_unique<PathRendererChain>(fContext, fOptionsForPathRendererChain);
956     }
957 
958     auto pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport);
959     if (!pr && allowSW) {
960         auto swPR = this->getSoftwarePathRenderer();
961         if (PathRenderer::CanDrawPath::kNo != swPR->canDrawPath(args)) {
962             pr = swPR;
963         }
964     }
965 
966 #if GR_PATH_RENDERER_SPEW
967     if (pr) {
968         SkDebugf("getPathRenderer: %s\n", pr->name());
969     }
970 #endif
971 
972     return pr;
973 }
974 
getSoftwarePathRenderer()975 skgpu::v1::PathRenderer* GrDrawingManager::getSoftwarePathRenderer() {
976     if (!fSoftwarePathRenderer) {
977         fSoftwarePathRenderer.reset(new skgpu::v1::SoftwarePathRenderer(
978             fContext->priv().proxyProvider(), fOptionsForPathRendererChain.fAllowPathMaskCaching));
979     }
980     return fSoftwarePathRenderer.get();
981 }
982 
getAtlasPathRenderer()983 skgpu::v1::AtlasPathRenderer* GrDrawingManager::getAtlasPathRenderer() {
984     if (!fPathRendererChain) {
985         fPathRendererChain = std::make_unique<PathRendererChain>(fContext,
986                                                                  fOptionsForPathRendererChain);
987     }
988     return fPathRendererChain->getAtlasPathRenderer();
989 }
990 
getTessellationPathRenderer()991 skgpu::v1::PathRenderer* GrDrawingManager::getTessellationPathRenderer() {
992     if (!fPathRendererChain) {
993         fPathRendererChain = std::make_unique<PathRendererChain>(fContext,
994                                                                  fOptionsForPathRendererChain);
995     }
996     return fPathRendererChain->getTessellationPathRenderer();
997 }
998 
999 #endif // SK_GPU_V1
1000 
flushIfNecessary()1001 void GrDrawingManager::flushIfNecessary() {
1002     auto direct = fContext->asDirectContext();
1003     if (!direct) {
1004         return;
1005     }
1006 
1007     auto resourceCache = direct->priv().getResourceCache();
1008     if (resourceCache && resourceCache->requestsFlush()) {
1009         if (this->flush({}, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo(), nullptr)) {
1010             this->submitToGpu(false);
1011         }
1012         resourceCache->purgeAsNeeded();
1013     }
1014 }
1015 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
disableStencilCulling()1016 void GrDrawingManager::disableStencilCulling() {
1017     GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "disableStencilCulling", fContext);
1018     fDisableStencilCulling = true;
1019 }
1020 #endif