• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/GrDrawingManager.h"
9 
10 #include <algorithm>
11 #include <memory>
12 
13 #include "include/core/SkDeferredDisplayList.h"
14 #include "include/gpu/GrBackendSemaphore.h"
15 #include "include/gpu/GrDirectContext.h"
16 #include "include/gpu/GrRecordingContext.h"
17 #include "src/core/SkDeferredDisplayListPriv.h"
18 #include "src/core/SkTInternalLList.h"
19 #include "src/gpu/GrClientMappedBufferManager.h"
20 #include "src/gpu/GrCopyRenderTask.h"
21 #include "src/gpu/GrDDLTask.h"
22 #include "src/gpu/GrDirectContextPriv.h"
23 #include "src/gpu/GrGpu.h"
24 #include "src/gpu/GrMemoryPool.h"
25 #include "src/gpu/GrOnFlushResourceProvider.h"
26 #include "src/gpu/GrOpFlushState.h"
27 #include "src/gpu/GrRecordingContextPriv.h"
28 #include "src/gpu/GrRenderTargetProxy.h"
29 #include "src/gpu/GrRenderTask.h"
30 #include "src/gpu/GrRenderTaskCluster.h"
31 #include "src/gpu/GrResourceAllocator.h"
32 #include "src/gpu/GrResourceProvider.h"
33 #include "src/gpu/GrSurfaceProxyPriv.h"
34 #include "src/gpu/GrTTopoSort.h"
35 #include "src/gpu/GrTexture.h"
36 #include "src/gpu/GrTextureProxy.h"
37 #include "src/gpu/GrTextureProxyPriv.h"
38 #include "src/gpu/GrTextureResolveRenderTask.h"
39 #include "src/gpu/GrTracing.h"
40 #include "src/gpu/GrTransferFromRenderTask.h"
41 #include "src/gpu/GrWaitRenderTask.h"
42 #include "src/gpu/GrWritePixelsRenderTask.h"
43 #include "src/gpu/text/GrSDFTControl.h"
44 #include "src/image/SkSurface_Gpu.h"
45 
46 #if SK_GPU_V1
47 #include "src/gpu/ops/OpsTask.h"
48 #include "src/gpu/ops/SoftwarePathRenderer.h"
49 #endif
50 
51 ///////////////////////////////////////////////////////////////////////////////////////////////////
52 #if SK_GPU_V1
GrDrawingManager(GrRecordingContext * rContext,const PathRendererChain::Options & optionsForPathRendererChain,bool reduceOpsTaskSplitting)53 GrDrawingManager::GrDrawingManager(GrRecordingContext* rContext,
54                                    const PathRendererChain::Options& optionsForPathRendererChain,
55                                    bool reduceOpsTaskSplitting)
56         : fContext(rContext)
57         , fOptionsForPathRendererChain(optionsForPathRendererChain)
58         , fPathRendererChain(nullptr)
59         , fSoftwarePathRenderer(nullptr)
60         , fReduceOpsTaskSplitting(reduceOpsTaskSplitting) {
61 }
62 
63 #else
64 
GrDrawingManager(GrRecordingContext * rContext,bool reduceOpsTaskSplitting)65 GrDrawingManager::GrDrawingManager(GrRecordingContext* rContext, bool reduceOpsTaskSplitting)
66         : fContext(rContext)
67         , fReduceOpsTaskSplitting(reduceOpsTaskSplitting) {
68 }
69 
70 #endif
71 
~GrDrawingManager()72 GrDrawingManager::~GrDrawingManager() {
73     this->closeAllTasks();
74     this->removeRenderTasks();
75 }
76 
wasAbandoned() const77 bool GrDrawingManager::wasAbandoned() const {
78     return fContext->abandoned();
79 }
80 
freeGpuResources()81 void GrDrawingManager::freeGpuResources() {
82     for (int i = fOnFlushCBObjects.count() - 1; i >= 0; --i) {
83         if (!fOnFlushCBObjects[i]->retainOnFreeGpuResources()) {
84             // it's safe to just do this because we're iterating in reverse
85             fOnFlushCBObjects.removeShuffle(i);
86         }
87     }
88 
89 #if SK_GPU_V1
90     // a path renderer may be holding onto resources
91     fPathRendererChain = nullptr;
92     fSoftwarePathRenderer = nullptr;
93 #endif
94 }
95 
96 // MDB TODO: make use of the 'proxies' parameter.
flush(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const GrFlushInfo & info,const GrBackendSurfaceMutableState * newState)97 bool GrDrawingManager::flush(
98         SkSpan<GrSurfaceProxy*> proxies,
99         SkSurface::BackendSurfaceAccess access,
100         const GrFlushInfo& info,
101         const GrBackendSurfaceMutableState* newState) {
102     GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "flush", fContext);
103 
104     if (fFlushing || this->wasAbandoned()) {
105         if (info.fSubmittedProc) {
106             info.fSubmittedProc(info.fSubmittedContext, false);
107         }
108         if (info.fFinishedProc) {
109             info.fFinishedProc(info.fFinishedContext);
110         }
111         return false;
112     }
113 
114     SkDEBUGCODE(this->validate());
115 
116     // As of now we only short-circuit if we got an explicit list of surfaces to flush.
117     if (!proxies.empty() && !info.fNumSemaphores && !info.fFinishedProc &&
118         access == SkSurface::BackendSurfaceAccess::kNoAccess && !newState) {
119         bool allUnused = std::all_of(proxies.begin(), proxies.end(), [&](GrSurfaceProxy* proxy) {
120             bool used = std::any_of(fDAG.begin(), fDAG.end(), [&](auto& task) {
121                 return task && task->isUsed(proxy);
122             });
123             return !used;
124         });
125         if (allUnused) {
126             if (info.fSubmittedProc) {
127                 info.fSubmittedProc(info.fSubmittedContext, true);
128             }
129             return false;
130         }
131     }
132 
133     auto dContext = fContext->asDirectContext();
134     SkASSERT(dContext);
135     dContext->priv().clientMappedBufferManager()->process();
136 
137     GrGpu* gpu = dContext->priv().getGpu();
138     // We have a non abandoned and direct GrContext. It must have a GrGpu.
139     SkASSERT(gpu);
140 
141     fFlushing = true;
142 
143     auto resourceProvider = dContext->priv().resourceProvider();
144     auto resourceCache = dContext->priv().getResourceCache();
145 
146     // Semi-usually the GrRenderTasks are already closed at this point, but sometimes Ganesh needs
147     // to flush mid-draw. In that case, the SkGpuDevice's opsTasks won't be closed but need to be
148     // flushed anyway. Closing such opsTasks here will mean new ones will be created to replace them
149     // if the SkGpuDevice(s) write to them again.
150     this->closeAllTasks();
151     fActiveOpsTask = nullptr;
152 
153     this->sortTasks();
154 
155     if (!fCpuBufferCache) {
156         // We cache more buffers when the backend is using client side arrays. Otherwise, we
157         // expect each pool will use a CPU buffer as a staging buffer before uploading to a GPU
158         // buffer object. Each pool only requires one staging buffer at a time.
159         int maxCachedBuffers = fContext->priv().caps()->preferClientSideDynamicBuffers() ? 2 : 6;
160         fCpuBufferCache = GrBufferAllocPool::CpuBufferCache::Make(maxCachedBuffers);
161     }
162 
163     GrOpFlushState flushState(gpu, resourceProvider, &fTokenTracker, fCpuBufferCache);
164 
165     GrOnFlushResourceProvider onFlushProvider(this);
166 
167     // Prepare any onFlush op lists (e.g. atlases).
168     if (!fOnFlushCBObjects.empty()) {
169         fFlushingRenderTaskIDs.reserve_back(fDAG.count());
170         for (const auto& task : fDAG) {
171             if (task) {
172                 task->gatherIDs(&fFlushingRenderTaskIDs);
173             }
174         }
175 
176         for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
177             onFlushCBObject->preFlush(&onFlushProvider, SkMakeSpan(fFlushingRenderTaskIDs));
178         }
179         for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
180             onFlushRenderTask->makeClosed(fContext);
181 #ifdef SK_DEBUG
182             // OnFlush callbacks are invoked during flush, and are therefore expected to handle
183             // resource allocation & usage on their own. (No deferred or lazy proxies!)
184             onFlushRenderTask->visitTargetAndSrcProxies_debugOnly(
185                     [](GrSurfaceProxy* p, GrMipmapped mipMapped) {
186                 SkASSERT(!p->asTextureProxy() || !p->asTextureProxy()->texPriv().isDeferred());
187                 SkASSERT(!p->isLazy());
188                 if (p->requiresManualMSAAResolve()) {
189                     // The onFlush callback is responsible for ensuring MSAA gets resolved.
190                     SkASSERT(p->asRenderTargetProxy() && !p->asRenderTargetProxy()->isMSAADirty());
191                 }
192                 if (GrMipmapped::kYes == mipMapped) {
193                     // The onFlush callback is responsible for regenerating mips if needed.
194                     SkASSERT(p->asTextureProxy() && !p->asTextureProxy()->mipmapsAreDirty());
195                 }
196             });
197 #endif
198             onFlushRenderTask->prepare(&flushState);
199         }
200     }
201 
202     bool usingReorderedDAG = false;
203     GrResourceAllocator resourceAllocator(dContext);
204     if (fReduceOpsTaskSplitting) {
205         usingReorderedDAG = this->reorderTasks(&resourceAllocator);
206         if (!usingReorderedDAG) {
207             resourceAllocator.reset();
208         }
209     }
210 
211 #if 0
212     // Enable this to print out verbose GrOp information
213     SkDEBUGCODE(SkDebugf("onFlush renderTasks (%d):\n", fOnFlushRenderTasks.count()));
214     for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
215         SkDEBUGCODE(onFlushRenderTask->dump(/* printDependencies */ true);)
216     }
217     SkDEBUGCODE(SkDebugf("Normal renderTasks (%d):\n", fDAG.count()));
218     for (const auto& task : fDAG) {
219         SkDEBUGCODE(task->dump(/* printDependencies */ true);)
220     }
221 #endif
222 
223     if (!resourceAllocator.failedInstantiation()) {
224         if (!usingReorderedDAG) {
225             for (const auto& task : fDAG) {
226                 SkASSERT(task);
227                 task->gatherProxyIntervals(&resourceAllocator);
228             }
229             resourceAllocator.planAssignment();
230         }
231         resourceAllocator.assign();
232     }
233     bool flushed = !resourceAllocator.failedInstantiation() &&
234                     this->executeRenderTasks(&flushState);
235     this->removeRenderTasks();
236 
237     gpu->executeFlushInfo(proxies, access, info, newState);
238 
239     // Give the cache a chance to purge resources that become purgeable due to flushing.
240     if (flushed) {
241         resourceCache->purgeAsNeeded();
242         flushed = false;
243     }
244     for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
245         onFlushCBObject->postFlush(fTokenTracker.nextTokenToFlush(),
246                                    SkMakeSpan(fFlushingRenderTaskIDs));
247         flushed = true;
248     }
249     if (flushed) {
250         resourceCache->purgeAsNeeded();
251     }
252     fFlushingRenderTaskIDs.reset();
253     fFlushing = false;
254 
255     return true;
256 }
257 
submitToGpu(bool syncToCpu)258 bool GrDrawingManager::submitToGpu(bool syncToCpu) {
259     if (fFlushing || this->wasAbandoned()) {
260         return false;
261     }
262 
263     auto direct = fContext->asDirectContext();
264     if (!direct) {
265         return false; // Can't submit while DDL recording
266     }
267     GrGpu* gpu = direct->priv().getGpu();
268     return gpu->submitToGpu(syncToCpu);
269 }
270 
executeRenderTasks(GrOpFlushState * flushState)271 bool GrDrawingManager::executeRenderTasks(GrOpFlushState* flushState) {
272 #if GR_FLUSH_TIME_OP_SPEW
273     SkDebugf("Flushing %d opsTasks\n", fDAG.count());
274     for (int i = 0; i < fDAG.count(); ++i) {
275         if (fDAG[i]) {
276             SkString label;
277             label.printf("task %d/%d", i, fDAG.count());
278             fDAG[i]->dump(label, {}, true, true);
279         }
280     }
281 #endif
282 
283     bool anyRenderTasksExecuted = false;
284 
285     for (const auto& renderTask : fDAG) {
286         if (!renderTask || !renderTask->isInstantiated()) {
287              continue;
288         }
289 
290         SkASSERT(renderTask->deferredProxiesAreInstantiated());
291 
292         renderTask->prepare(flushState);
293     }
294 
295     // Upload all data to the GPU
296     flushState->preExecuteDraws();
297 
298     // For Vulkan, if we have too many oplists to be flushed we end up allocating a lot of resources
299     // for each command buffer associated with the oplists. If this gets too large we can cause the
300     // devices to go OOM. In practice we usually only hit this case in our tests, but to be safe we
301     // put a cap on the number of oplists we will execute before flushing to the GPU to relieve some
302     // memory pressure.
303     static constexpr int kMaxRenderTasksBeforeFlush = 100;
304     int numRenderTasksExecuted = 0;
305 
306     // Execute the onFlush renderTasks first, if any.
307     for (sk_sp<GrRenderTask>& onFlushRenderTask : fOnFlushRenderTasks) {
308         if (!onFlushRenderTask->execute(flushState)) {
309             SkDebugf("WARNING: onFlushRenderTask failed to execute.\n");
310         }
311         SkASSERT(onFlushRenderTask->unique());
312         onFlushRenderTask->disown(this);
313         onFlushRenderTask = nullptr;
314         if (++numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
315             flushState->gpu()->submitToGpu(false);
316             numRenderTasksExecuted = 0;
317         }
318     }
319     fOnFlushRenderTasks.reset();
320 
321     // Execute the normal op lists.
322     for (const auto& renderTask : fDAG) {
323         SkASSERT(renderTask);
324         if (!renderTask->isInstantiated()) {
325             continue;
326         }
327 
328         if (renderTask->execute(flushState)) {
329             anyRenderTasksExecuted = true;
330         }
331         if (++numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
332             flushState->gpu()->submitToGpu(false);
333             numRenderTasksExecuted = 0;
334         }
335     }
336 
337     SkASSERT(!flushState->opsRenderPass());
338     SkASSERT(fTokenTracker.nextDrawToken() == fTokenTracker.nextTokenToFlush());
339 
340     // We reset the flush state before the RenderTasks so that the last resources to be freed are
341     // those that are written to in the RenderTasks. This helps to make sure the most recently used
342     // resources are the last to be purged by the resource cache.
343     flushState->reset();
344 
345     return anyRenderTasksExecuted;
346 }
347 
removeRenderTasks()348 void GrDrawingManager::removeRenderTasks() {
349     for (const auto& task : fDAG) {
350         SkASSERT(task);
351         if (!task->unique() || task->requiresExplicitCleanup()) {
352             // TODO: Eventually uniqueness should be guaranteed: http://skbug.com/7111.
353             // DDLs, however, will always require an explicit notification for when they
354             // can clean up resources.
355             task->endFlush(this);
356         }
357         task->disown(this);
358     }
359     fDAG.reset();
360     fLastRenderTasks.reset();
361     for (const sk_sp<GrRenderTask>& onFlushRenderTask : fOnFlushRenderTasks) {
362         onFlushRenderTask->disown(this);
363     }
364     fOnFlushRenderTasks.reset();
365 }
366 
sortTasks()367 void GrDrawingManager::sortTasks() {
368     if (!GrTTopoSort<GrRenderTask, GrRenderTask::TopoSortTraits>(&fDAG)) {
369         SkDEBUGFAIL("Render task topo sort failed.");
370         return;
371     }
372 
373 #if SK_GPU_V1 && defined(SK_DEBUG)
374     // This block checks for any unnecessary splits in the opsTasks. If two sequential opsTasks
375     // could have merged it means the opsTask was artificially split.
376     if (!fDAG.empty()) {
377         auto prevOpsTask = fDAG[0]->asOpsTask();
378         for (int i = 1; i < fDAG.count(); ++i) {
379             auto curOpsTask = fDAG[i]->asOpsTask();
380 
381             if (prevOpsTask && curOpsTask) {
382                 SkASSERT(!prevOpsTask->canMerge(curOpsTask));
383             }
384 
385             prevOpsTask = curOpsTask;
386         }
387     }
388 #endif
389 }
390 
391 // Reorder the array to match the llist without reffing & unreffing sk_sp's.
392 // Both args must contain the same objects.
393 // This is basically a shim because clustering uses LList but the rest of drawmgr uses array.
394 template <typename T>
reorder_array_by_llist(const SkTInternalLList<T> & llist,SkTArray<sk_sp<T>> * array)395 static void reorder_array_by_llist(const SkTInternalLList<T>& llist, SkTArray<sk_sp<T>>* array) {
396     int i = 0;
397     for (T* t : llist) {
398         // Release the pointer that used to live here so it doesn't get unreffed.
399         [[maybe_unused]] T* old = array->at(i).release();
400         array->at(i++).reset(t);
401     }
402     SkASSERT(i == array->count());
403 }
404 
reorderTasks(GrResourceAllocator * resourceAllocator)405 bool GrDrawingManager::reorderTasks(GrResourceAllocator* resourceAllocator) {
406     SkASSERT(fReduceOpsTaskSplitting);
407     SkTInternalLList<GrRenderTask> llist;
408     bool clustered = GrClusterRenderTasks(SkMakeSpan(fDAG), &llist);
409     if (!clustered) {
410         return false;
411     }
412 
413     for (GrRenderTask* task : llist) {
414         task->gatherProxyIntervals(resourceAllocator);
415     }
416     if (!resourceAllocator->planAssignment()) {
417         return false;
418     }
419     if (!resourceAllocator->makeBudgetHeadroom()) {
420         auto dContext = fContext->asDirectContext();
421         SkASSERT(dContext);
422         dContext->priv().getGpu()->stats()->incNumReorderedDAGsOverBudget();
423         return false;
424     }
425     reorder_array_by_llist(llist, &fDAG);
426 
427     int newCount = 0;
428     for (int i = 0; i < fDAG.count(); i++) {
429         sk_sp<GrRenderTask>& task = fDAG[i];
430 #if SK_GPU_V1
431         if (auto opsTask = task->asOpsTask()) {
432             size_t remaining = fDAG.size() - i - 1;
433             SkSpan<sk_sp<GrRenderTask>> nextTasks{fDAG.end() - remaining, remaining};
434             int removeCount = opsTask->mergeFrom(nextTasks);
435             for (const auto& removed : nextTasks.first(removeCount)) {
436                 removed->disown(this);
437             }
438             i += removeCount;
439         }
440 #endif
441         fDAG[newCount++] = std::move(task);
442     }
443     fDAG.resize_back(newCount);
444     return true;
445 }
446 
closeAllTasks()447 void GrDrawingManager::closeAllTasks() {
448     for (auto& task : fDAG) {
449         if (task) {
450             task->makeClosed(fContext);
451         }
452     }
453 }
454 
insertTaskBeforeLast(sk_sp<GrRenderTask> task)455 GrRenderTask* GrDrawingManager::insertTaskBeforeLast(sk_sp<GrRenderTask> task) {
456     if (!task) {
457         return nullptr;
458     }
459     if (fDAG.empty()) {
460         return fDAG.push_back(std::move(task)).get();
461     }
462     // Release 'fDAG.back()' and grab the raw pointer, in case the SkTArray grows
463     // and reallocates during emplace_back.
464     // TODO: Either use std::vector that can do this for us, or use SkSTArray to get the
465     // perf win.
466     fDAG.emplace_back(fDAG.back().release());
467     return (fDAG[fDAG.count() - 2] = std::move(task)).get();
468 }
469 
appendTask(sk_sp<GrRenderTask> task)470 GrRenderTask* GrDrawingManager::appendTask(sk_sp<GrRenderTask> task) {
471     if (!task) {
472         return nullptr;
473     }
474     return fDAG.push_back(std::move(task)).get();
475 }
476 
resolve_and_mipmap(GrGpu * gpu,GrSurfaceProxy * proxy)477 static void resolve_and_mipmap(GrGpu* gpu, GrSurfaceProxy* proxy) {
478     if (!proxy->isInstantiated()) {
479         return;
480     }
481 
482     // In the flushSurfaces case, we need to resolve MSAA immediately after flush. This is
483     // because clients expect the flushed surface's backing texture to be fully resolved
484     // upon return.
485     if (proxy->requiresManualMSAAResolve()) {
486         auto* rtProxy = proxy->asRenderTargetProxy();
487         SkASSERT(rtProxy);
488         if (rtProxy->isMSAADirty()) {
489             SkASSERT(rtProxy->peekRenderTarget());
490             gpu->resolveRenderTarget(rtProxy->peekRenderTarget(), rtProxy->msaaDirtyRect());
491             gpu->submitToGpu(false);
492             rtProxy->markMSAAResolved();
493         }
494     }
495     // If, after a flush, any of the proxies of interest have dirty mipmaps, regenerate them in
496     // case their backend textures are being stolen.
497     // (This special case is exercised by the ReimportImageTextureWithMipLevels test.)
498     // FIXME: It may be more ideal to plumb down a "we're going to steal the backends" flag.
499     if (auto* textureProxy = proxy->asTextureProxy()) {
500         if (textureProxy->mipmapsAreDirty()) {
501             SkASSERT(textureProxy->peekTexture());
502             gpu->regenerateMipMapLevels(textureProxy->peekTexture());
503             textureProxy->markMipmapsClean();
504         }
505     }
506 }
507 
flushSurfaces(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const GrFlushInfo & info,const GrBackendSurfaceMutableState * newState)508 GrSemaphoresSubmitted GrDrawingManager::flushSurfaces(
509         SkSpan<GrSurfaceProxy*> proxies,
510         SkSurface::BackendSurfaceAccess access,
511         const GrFlushInfo& info,
512         const GrBackendSurfaceMutableState* newState) {
513     if (this->wasAbandoned()) {
514         if (info.fSubmittedProc) {
515             info.fSubmittedProc(info.fSubmittedContext, false);
516         }
517         if (info.fFinishedProc) {
518             info.fFinishedProc(info.fFinishedContext);
519         }
520         return GrSemaphoresSubmitted::kNo;
521     }
522     SkDEBUGCODE(this->validate());
523 
524     auto direct = fContext->asDirectContext();
525     SkASSERT(direct);
526     GrGpu* gpu = direct->priv().getGpu();
527     // We have a non abandoned and direct GrContext. It must have a GrGpu.
528     SkASSERT(gpu);
529 
530     // TODO: It is important to upgrade the drawingmanager to just flushing the
531     // portion of the DAG required by 'proxies' in order to restore some of the
532     // semantics of this method.
533     bool didFlush = this->flush(proxies, access, info, newState);
534     for (GrSurfaceProxy* proxy : proxies) {
535         resolve_and_mipmap(gpu, proxy);
536     }
537 
538     SkDEBUGCODE(this->validate());
539 
540     if (!didFlush || (!direct->priv().caps()->semaphoreSupport() && info.fNumSemaphores)) {
541         return GrSemaphoresSubmitted::kNo;
542     }
543     return GrSemaphoresSubmitted::kYes;
544 }
545 
addOnFlushCallbackObject(GrOnFlushCallbackObject * onFlushCBObject)546 void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
547     fOnFlushCBObjects.push_back(onFlushCBObject);
548 }
549 
550 #if GR_TEST_UTILS
testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject * cb)551 void GrDrawingManager::testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject* cb) {
552     int n = std::find(fOnFlushCBObjects.begin(), fOnFlushCBObjects.end(), cb) -
553             fOnFlushCBObjects.begin();
554     SkASSERT(n < fOnFlushCBObjects.count());
555     fOnFlushCBObjects.removeShuffle(n);
556 }
557 #endif
558 
setLastRenderTask(const GrSurfaceProxy * proxy,GrRenderTask * task)559 void GrDrawingManager::setLastRenderTask(const GrSurfaceProxy* proxy, GrRenderTask* task) {
560 #ifdef SK_DEBUG
561     if (auto prior = this->getLastRenderTask(proxy)) {
562         SkASSERT(prior->isClosed() || prior == task);
563     }
564 #endif
565     uint32_t key = proxy->uniqueID().asUInt();
566     if (task) {
567         fLastRenderTasks.set(key, task);
568     } else if (fLastRenderTasks.find(key)) {
569         fLastRenderTasks.remove(key);
570     }
571 }
572 
getLastRenderTask(const GrSurfaceProxy * proxy) const573 GrRenderTask* GrDrawingManager::getLastRenderTask(const GrSurfaceProxy* proxy) const {
574     auto entry = fLastRenderTasks.find(proxy->uniqueID().asUInt());
575     return entry ? *entry : nullptr;
576 }
577 
getLastOpsTask(const GrSurfaceProxy * proxy) const578 skgpu::v1::OpsTask* GrDrawingManager::getLastOpsTask(const GrSurfaceProxy* proxy) const {
579     GrRenderTask* task = this->getLastRenderTask(proxy);
580     return task ? task->asOpsTask() : nullptr;
581 }
582 
583 
moveRenderTasksToDDL(SkDeferredDisplayList * ddl)584 void GrDrawingManager::moveRenderTasksToDDL(SkDeferredDisplayList* ddl) {
585     SkDEBUGCODE(this->validate());
586 
587     // no renderTask should receive a new command after this
588     this->closeAllTasks();
589     fActiveOpsTask = nullptr;
590 
591     this->sortTasks();
592 
593     fDAG.swap(ddl->fRenderTasks);
594     SkASSERT(fDAG.empty());
595 
596     for (auto& renderTask : ddl->fRenderTasks) {
597         renderTask->disown(this);
598         renderTask->prePrepare(fContext);
599     }
600 
601     ddl->fArenas = std::move(fContext->priv().detachArenas());
602 
603     fContext->priv().detachProgramData(&ddl->fProgramData);
604 
605     SkDEBUGCODE(this->validate());
606 }
607 
createDDLTask(sk_sp<const SkDeferredDisplayList> ddl,sk_sp<GrRenderTargetProxy> newDest,SkIPoint offset)608 void GrDrawingManager::createDDLTask(sk_sp<const SkDeferredDisplayList> ddl,
609                                      sk_sp<GrRenderTargetProxy> newDest,
610                                      SkIPoint offset) {
611     SkDEBUGCODE(this->validate());
612 
613 #if SK_GPU_V1
614     if (fActiveOpsTask) {
615         // This is a temporary fix for the partial-MDB world. In that world we're not
616         // reordering so ops that (in the single opsTask world) would've just glommed onto the
617         // end of the single opsTask but referred to a far earlier RT need to appear in their
618         // own opsTask.
619         fActiveOpsTask->makeClosed(fContext);
620         fActiveOpsTask = nullptr;
621     }
622 #endif
623 
624     // Propagate the DDL proxy's state information to the replay target.
625     if (ddl->priv().targetProxy()->isMSAADirty()) {
626         auto nativeRect = GrNativeRect::MakeIRectRelativeTo(
627                 ddl->characterization().origin(),
628                 ddl->priv().targetProxy()->backingStoreDimensions().height(),
629                 ddl->priv().targetProxy()->msaaDirtyRect());
630         newDest->markMSAADirty(nativeRect);
631     }
632     GrTextureProxy* newTextureProxy = newDest->asTextureProxy();
633     if (newTextureProxy && GrMipmapped::kYes == newTextureProxy->mipmapped()) {
634         newTextureProxy->markMipmapsDirty();
635     }
636 
637     // Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData.
638     // The lazy proxy that references it (in the DDL opsTasks) will then steal its GrTexture.
639     ddl->fLazyProxyData->fReplayDest = newDest.get();
640 
641     // Add a task to handle drawing and lifetime management of the DDL.
642     SkDEBUGCODE(auto ddlTask =) this->appendTask(sk_make_sp<GrDDLTask>(this,
643                                                                        std::move(newDest),
644                                                                        std::move(ddl),
645                                                                        offset));
646     SkASSERT(ddlTask->isClosed());
647 
648     SkDEBUGCODE(this->validate());
649 }
650 
651 #ifdef SK_DEBUG
validate() const652 void GrDrawingManager::validate() const {
653 #if SK_GPU_V1
654     if (fActiveOpsTask) {
655         SkASSERT(!fDAG.empty());
656         SkASSERT(!fActiveOpsTask->isClosed());
657         SkASSERT(fActiveOpsTask == fDAG.back().get());
658     }
659 
660     for (int i = 0; i < fDAG.count(); ++i) {
661         if (fActiveOpsTask != fDAG[i].get()) {
662             // The resolveTask associated with the activeTask remains open for as long as the
663             // activeTask does.
664             bool isActiveResolveTask =
665                 fActiveOpsTask && fActiveOpsTask->fTextureResolveTask == fDAG[i].get();
666             bool isAtlas = fDAG[i]->isSetFlag(GrRenderTask::kAtlas_Flag);
667             SkASSERT(isActiveResolveTask || isAtlas || fDAG[i]->isClosed());
668         }
669     }
670 
671     // The active opsTask, if any, should always be at the back of the DAG.
672     if (!fDAG.empty()) {
673         if (fDAG.back()->isSetFlag(GrRenderTask::kAtlas_Flag)) {
674             SkASSERT(fActiveOpsTask == nullptr);
675             SkASSERT(!fDAG.back()->isClosed());
676         } else if (fDAG.back()->isClosed()) {
677             SkASSERT(fActiveOpsTask == nullptr);
678         } else {
679             SkASSERT(fActiveOpsTask == fDAG.back().get());
680         }
681     } else {
682         SkASSERT(fActiveOpsTask == nullptr);
683     }
684 #endif // SK_GPU_V1
685 }
686 #endif // SK_DEBUG
687 
closeActiveOpsTask()688 void GrDrawingManager::closeActiveOpsTask() {
689 #if SK_GPU_V1
690     if (fActiveOpsTask) {
691         // This is a temporary fix for the partial-MDB world. In that world we're not
692         // reordering so ops that (in the single opsTask world) would've just glommed onto the
693         // end of the single opsTask but referred to a far earlier RT need to appear in their
694         // own opsTask.
695         fActiveOpsTask->makeClosed(fContext);
696         fActiveOpsTask = nullptr;
697     }
698 #endif
699 }
700 
701 #if SK_GPU_V1
newOpsTask(GrSurfaceProxyView surfaceView,sk_sp<GrArenas> arenas,bool flushTimeOpsTask)702 sk_sp<skgpu::v1::OpsTask> GrDrawingManager::newOpsTask(GrSurfaceProxyView surfaceView,
703                                                        sk_sp<GrArenas> arenas,
704                                                        bool flushTimeOpsTask) {
705     SkDEBUGCODE(this->validate());
706     SkASSERT(fContext);
707 
708     this->closeActiveOpsTask();
709 
710     sk_sp<skgpu::v1::OpsTask> opsTask(new skgpu::v1::OpsTask(this,
711                                                              std::move(surfaceView),
712                                                              fContext->priv().auditTrail(),
713                                                              std::move(arenas)));
714 
715     SkASSERT(this->getLastRenderTask(opsTask->target(0)) == opsTask.get());
716 
717     if (flushTimeOpsTask) {
718         fOnFlushRenderTasks.push_back(opsTask);
719     } else {
720         this->appendTask(opsTask);
721 
722         fActiveOpsTask = opsTask.get();
723     }
724 
725     SkDEBUGCODE(this->validate());
726     return opsTask;
727 }
728 
addAtlasTask(sk_sp<GrRenderTask> atlasTask,GrRenderTask * previousAtlasTask)729 void GrDrawingManager::addAtlasTask(sk_sp<GrRenderTask> atlasTask,
730                                     GrRenderTask* previousAtlasTask) {
731     SkDEBUGCODE(this->validate());
732     SkASSERT(fContext);
733 
734     if (previousAtlasTask) {
735         previousAtlasTask->makeClosed(fContext);
736         for (GrRenderTask* previousAtlasUser : previousAtlasTask->dependents()) {
737             // Make the new atlas depend on everybody who used the old atlas, and close their tasks.
738             // This guarantees that the previous atlas is totally out of service before we render
739             // the next one, meaning there is only ever one atlas active at a time and that they can
740             // all share the same texture.
741             atlasTask->addDependency(previousAtlasUser);
742             previousAtlasUser->makeClosed(fContext);
743             if (previousAtlasUser == fActiveOpsTask) {
744                 fActiveOpsTask = nullptr;
745             }
746         }
747     }
748 
749     atlasTask->setFlag(GrRenderTask::kAtlas_Flag);
750     this->insertTaskBeforeLast(std::move(atlasTask));
751 
752     SkDEBUGCODE(this->validate());
753 }
754 #endif // SK_GPU_V1
755 
newTextureResolveRenderTask(const GrCaps & caps)756 GrTextureResolveRenderTask* GrDrawingManager::newTextureResolveRenderTask(const GrCaps& caps) {
757     // Unlike in the "new opsTask" case, we do not want to close the active opsTask, nor (if we are
758     // in sorting and opsTask reduction mode) the render tasks that depend on any proxy's current
759     // state. This is because those opsTasks can still receive new ops and because if they refer to
760     // the mipmapped version of 'proxy', they will then come to depend on the render task being
761     // created here.
762     //
763     // Add the new textureResolveTask before the fActiveOpsTask (if not in
764     // sorting/opsTask-splitting-reduction mode) because it will depend upon this resolve task.
765     // NOTE: Putting it here will also reduce the amount of work required by the topological sort.
766     GrRenderTask* task = this->insertTaskBeforeLast(sk_make_sp<GrTextureResolveRenderTask>());
767     return static_cast<GrTextureResolveRenderTask*>(task);
768 }
769 
newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,int numSemaphores)770 void GrDrawingManager::newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,
771                                          std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,
772                                          int numSemaphores) {
773     SkDEBUGCODE(this->validate());
774     SkASSERT(fContext);
775 
776     sk_sp<GrWaitRenderTask> waitTask = sk_make_sp<GrWaitRenderTask>(GrSurfaceProxyView(proxy),
777                                                                     std::move(semaphores),
778                                                                     numSemaphores);
779 
780 #if SK_GPU_V1
781     if (fActiveOpsTask && (fActiveOpsTask->target(0) == proxy.get())) {
782         SkASSERT(this->getLastRenderTask(proxy.get()) == fActiveOpsTask);
783         this->insertTaskBeforeLast(waitTask);
784         // In this case we keep the current renderTask open but just insert the new waitTask
785         // before it in the list. The waitTask will never need to trigger any resolves or mip
786         // map generation which is the main advantage of going through the proxy version.
787         // Additionally we would've had to temporarily set the wait task as the lastRenderTask
788         // on the proxy, add the dependency, and then reset the lastRenderTask to
789         // fActiveOpsTask. Additionally we make the waitTask depend on all of fActiveOpsTask
790         // dependencies so that we don't unnecessarily reorder the waitTask before them.
791         // Note: Any previous Ops already in fActiveOpsTask will get blocked by the wait
792         // semaphore even though they don't need to be for correctness.
793 
794         // Make sure we add the dependencies of fActiveOpsTask to waitTask first or else we'll
795         // get a circular self dependency of waitTask on waitTask.
796         waitTask->addDependenciesFromOtherTask(fActiveOpsTask);
797         fActiveOpsTask->addDependency(waitTask.get());
798     } else
799 #endif
800     {
801         // In this case we just close the previous RenderTask and start and append the waitTask
802         // to the DAG. Since it is the last task now we call setLastRenderTask on the proxy. If
803         // there is a lastTask on the proxy we make waitTask depend on that task. This
804         // dependency isn't strictly needed but it does keep the DAG from reordering the
805         // waitTask earlier and blocking more tasks.
806         if (GrRenderTask* lastTask = this->getLastRenderTask(proxy.get())) {
807             waitTask->addDependency(lastTask);
808         }
809         this->setLastRenderTask(proxy.get(), waitTask.get());
810         this->closeActiveOpsTask();
811         this->appendTask(waitTask);
812     }
813     waitTask->makeClosed(fContext);
814 
815     SkDEBUGCODE(this->validate());
816 }
817 
newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy,const SkIRect & srcRect,GrColorType surfaceColorType,GrColorType dstColorType,sk_sp<GrGpuBuffer> dstBuffer,size_t dstOffset)818 void GrDrawingManager::newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy,
819                                                  const SkIRect& srcRect,
820                                                  GrColorType surfaceColorType,
821                                                  GrColorType dstColorType,
822                                                  sk_sp<GrGpuBuffer> dstBuffer,
823                                                  size_t dstOffset) {
824     SkDEBUGCODE(this->validate());
825     SkASSERT(fContext);
826     this->closeActiveOpsTask();
827 
828     GrRenderTask* task = this->appendTask(sk_make_sp<GrTransferFromRenderTask>(
829             srcProxy, srcRect, surfaceColorType, dstColorType,
830             std::move(dstBuffer), dstOffset));
831 
832     const GrCaps& caps = *fContext->priv().caps();
833 
834     // We always say GrMipmapped::kNo here since we are always just copying from the base layer. We
835     // don't need to make sure the whole mip map chain is valid.
836     task->addDependency(this, srcProxy.get(), GrMipmapped::kNo,
837                         GrTextureResolveManager(this), caps);
838     task->makeClosed(fContext);
839 
840     // We have closed the previous active oplist but since a new oplist isn't being added there
841     // shouldn't be an active one.
842     SkASSERT(!fActiveOpsTask);
843     SkDEBUGCODE(this->validate());
844 }
845 
newCopyRenderTask(sk_sp<GrSurfaceProxy> src,SkIRect srcRect,sk_sp<GrSurfaceProxy> dst,SkIPoint dstPoint,GrSurfaceOrigin origin)846 sk_sp<GrRenderTask> GrDrawingManager::newCopyRenderTask(sk_sp<GrSurfaceProxy> src,
847                                                         SkIRect srcRect,
848                                                         sk_sp<GrSurfaceProxy> dst,
849                                                         SkIPoint dstPoint,
850                                                         GrSurfaceOrigin origin) {
851     SkDEBUGCODE(this->validate());
852     SkASSERT(fContext);
853 
854     // It'd be nicer to check this in GrCopyRenderTask::Make. This gets complicated because of
855     // "active ops task" tracking. dst will be the target of our copy task but it might also be the
856     // target of the active ops task. We currently require the active ops task to be closed before
857     // making a new task that targets the same proxy. However, if we first close the active ops
858     // task, then fail to make a copy task, the next active ops task may target the same proxy. This
859     // will trip an assert related to unnecessary ops task splitting.
860     if (src->framebufferOnly()) {
861         return nullptr;
862     }
863 
864     this->closeActiveOpsTask();
865 
866     sk_sp<GrRenderTask> task = GrCopyRenderTask::Make(this,
867                                                       src,
868                                                       srcRect,
869                                                       std::move(dst),
870                                                       dstPoint,
871                                                       origin);
872     if (!task) {
873         return nullptr;
874     }
875 
876     this->appendTask(task);
877 
878     const GrCaps& caps = *fContext->priv().caps();
879     // We always say GrMipmapped::kNo here since we are always just copying from the base layer to
880     // another base layer. We don't need to make sure the whole mip map chain is valid.
881     task->addDependency(this, src.get(), GrMipmapped::kNo, GrTextureResolveManager(this), caps);
882     task->makeClosed(fContext);
883 
884     // We have closed the previous active oplist but since a new oplist isn't being added there
885     // shouldn't be an active one.
886     SkASSERT(!fActiveOpsTask);
887     SkDEBUGCODE(this->validate());
888     return task;
889 }
890 
newWritePixelsTask(sk_sp<GrSurfaceProxy> dst,SkIRect rect,GrColorType srcColorType,GrColorType dstColorType,const GrMipLevel levels[],int levelCount)891 bool GrDrawingManager::newWritePixelsTask(sk_sp<GrSurfaceProxy> dst,
892                                           SkIRect rect,
893                                           GrColorType srcColorType,
894                                           GrColorType dstColorType,
895                                           const GrMipLevel levels[],
896                                           int levelCount) {
897     SkDEBUGCODE(this->validate());
898     SkASSERT(fContext);
899 
900     this->closeActiveOpsTask();
901     const GrCaps& caps = *fContext->priv().caps();
902 
903     // On platforms that prefer flushes over VRAM use (i.e., ANGLE) we're better off forcing a
904     // complete flush here.
905     if (!caps.preferVRAMUseOverFlushes()) {
906         this->flushSurfaces(SkSpan<GrSurfaceProxy*>{},
907                             SkSurface::BackendSurfaceAccess::kNoAccess,
908                             GrFlushInfo{},
909                             nullptr);
910     }
911 
912     GrRenderTask* task = this->appendTask(GrWritePixelsTask::Make(this,
913                                                                   std::move(dst),
914                                                                   rect,
915                                                                   srcColorType,
916                                                                   dstColorType,
917                                                                   levels,
918                                                                   levelCount));
919     if (!task) {
920         return false;
921     }
922 
923     task->makeClosed(fContext);
924 
925     // We have closed the previous active oplist but since a new oplist isn't being added there
926     // shouldn't be an active one.
927     SkASSERT(!fActiveOpsTask);
928     SkDEBUGCODE(this->validate());
929     return true;
930 }
931 
932 #if SK_GPU_V1
933 /*
934  * This method finds a path renderer that can draw the specified path on
935  * the provided target.
936  * Due to its expense, the software path renderer has split out so it can
937  * can be individually allowed/disallowed via the "allowSW" boolean.
938  */
getPathRenderer(const PathRenderer::CanDrawPathArgs & args,bool allowSW,PathRendererChain::DrawType drawType,PathRenderer::StencilSupport * stencilSupport)939 skgpu::v1::PathRenderer* GrDrawingManager::getPathRenderer(
940         const PathRenderer::CanDrawPathArgs& args,
941         bool allowSW,
942         PathRendererChain::DrawType drawType,
943         PathRenderer::StencilSupport* stencilSupport) {
944 
945     if (!fPathRendererChain) {
946         fPathRendererChain =
947                 std::make_unique<PathRendererChain>(fContext, fOptionsForPathRendererChain);
948     }
949 
950     auto pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport);
951     if (!pr && allowSW) {
952         auto swPR = this->getSoftwarePathRenderer();
953         if (PathRenderer::CanDrawPath::kNo != swPR->canDrawPath(args)) {
954             pr = swPR;
955         }
956     }
957 
958 #if GR_PATH_RENDERER_SPEW
959     if (pr) {
960         SkDebugf("getPathRenderer: %s\n", pr->name());
961     }
962 #endif
963 
964     return pr;
965 }
966 
getSoftwarePathRenderer()967 skgpu::v1::PathRenderer* GrDrawingManager::getSoftwarePathRenderer() {
968     if (!fSoftwarePathRenderer) {
969         fSoftwarePathRenderer.reset(new skgpu::v1::SoftwarePathRenderer(
970             fContext->priv().proxyProvider(), fOptionsForPathRendererChain.fAllowPathMaskCaching));
971     }
972     return fSoftwarePathRenderer.get();
973 }
974 
getAtlasPathRenderer()975 skgpu::v1::AtlasPathRenderer* GrDrawingManager::getAtlasPathRenderer() {
976     if (!fPathRendererChain) {
977         fPathRendererChain = std::make_unique<PathRendererChain>(fContext,
978                                                                  fOptionsForPathRendererChain);
979     }
980     return fPathRendererChain->getAtlasPathRenderer();
981 }
982 
getTessellationPathRenderer()983 skgpu::v1::PathRenderer* GrDrawingManager::getTessellationPathRenderer() {
984     if (!fPathRendererChain) {
985         fPathRendererChain = std::make_unique<PathRendererChain>(fContext,
986                                                                  fOptionsForPathRendererChain);
987     }
988     return fPathRendererChain->getTessellationPathRenderer();
989 }
990 
991 #endif // SK_GPU_V1
992 
flushIfNecessary()993 void GrDrawingManager::flushIfNecessary() {
994     auto direct = fContext->asDirectContext();
995     if (!direct) {
996         return;
997     }
998 
999     auto resourceCache = direct->priv().getResourceCache();
1000     if (resourceCache && resourceCache->requestsFlush()) {
1001         if (this->flush({}, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo(), nullptr)) {
1002             this->submitToGpu(false);
1003         }
1004         resourceCache->purgeAsNeeded();
1005     }
1006 }
1007