• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/ganesh/GrDrawingManager.h"
9 
10 #include <algorithm>
11 #include <memory>
12 
13 #include "include/core/SkDeferredDisplayList.h"
14 #include "include/gpu/GrBackendSemaphore.h"
15 #include "include/gpu/GrDirectContext.h"
16 #include "include/gpu/GrRecordingContext.h"
17 #include "src/base/SkTInternalLList.h"
18 #include "src/core/SkDeferredDisplayListPriv.h"
19 #include "src/gpu/ganesh/GrBufferTransferRenderTask.h"
20 #include "src/gpu/ganesh/GrBufferUpdateRenderTask.h"
21 #include "src/gpu/ganesh/GrClientMappedBufferManager.h"
22 #include "src/gpu/ganesh/GrCopyRenderTask.h"
23 #include "src/gpu/ganesh/GrDDLTask.h"
24 #include "src/gpu/ganesh/GrDirectContextPriv.h"
25 #include "src/gpu/ganesh/GrGpu.h"
26 #include "src/gpu/ganesh/GrMemoryPool.h"
27 #include "src/gpu/ganesh/GrNativeRect.h"
28 #include "src/gpu/ganesh/GrOnFlushResourceProvider.h"
29 #include "src/gpu/ganesh/GrOpFlushState.h"
30 #include "src/gpu/ganesh/GrRecordingContextPriv.h"
31 #include "src/gpu/ganesh/GrRenderTargetProxy.h"
32 #include "src/gpu/ganesh/GrRenderTask.h"
33 #include "src/gpu/ganesh/GrRenderTaskCluster.h"
34 #include "src/gpu/ganesh/GrResourceAllocator.h"
35 #include "src/gpu/ganesh/GrResourceProvider.h"
36 #include "src/gpu/ganesh/GrSurfaceProxyPriv.h"
37 #include "src/gpu/ganesh/GrTTopoSort.h"
38 #include "src/gpu/ganesh/GrTexture.h"
39 #include "src/gpu/ganesh/GrTextureProxy.h"
40 #include "src/gpu/ganesh/GrTextureProxyPriv.h"
41 #include "src/gpu/ganesh/GrTextureResolveRenderTask.h"
42 #include "src/gpu/ganesh/GrTracing.h"
43 #include "src/gpu/ganesh/GrTransferFromRenderTask.h"
44 #include "src/gpu/ganesh/GrWaitRenderTask.h"
45 #include "src/gpu/ganesh/GrWritePixelsRenderTask.h"
46 #include "src/gpu/ganesh/ops/OpsTask.h"
47 #include "src/gpu/ganesh/ops/SoftwarePathRenderer.h"
48 #include "src/image/SkSurface_Gpu.h"
49 #include "src/text/gpu/SDFTControl.h"
50 
51 
52 
53 ///////////////////////////////////////////////////////////////////////////////////////////////////
GrDrawingManager(GrRecordingContext * rContext,const PathRendererChain::Options & optionsForPathRendererChain,bool reduceOpsTaskSplitting)54 GrDrawingManager::GrDrawingManager(GrRecordingContext* rContext,
55                                    const PathRendererChain::Options& optionsForPathRendererChain,
56                                    bool reduceOpsTaskSplitting)
57         : fContext(rContext)
58         , fOptionsForPathRendererChain(optionsForPathRendererChain)
59         , fPathRendererChain(nullptr)
60         , fSoftwarePathRenderer(nullptr)
61         , fReduceOpsTaskSplitting(reduceOpsTaskSplitting) {
62 }
63 
~GrDrawingManager()64 GrDrawingManager::~GrDrawingManager() {
65     this->closeAllTasks();
66     this->removeRenderTasks();
67 }
68 
wasAbandoned() const69 bool GrDrawingManager::wasAbandoned() const {
70     return fContext->abandoned();
71 }
72 
freeGpuResources()73 void GrDrawingManager::freeGpuResources() {
74     for (int i = fOnFlushCBObjects.size() - 1; i >= 0; --i) {
75         if (!fOnFlushCBObjects[i]->retainOnFreeGpuResources()) {
76             // it's safe to just do this because we're iterating in reverse
77             fOnFlushCBObjects.removeShuffle(i);
78         }
79     }
80 
81     // a path renderer may be holding onto resources
82     fPathRendererChain = nullptr;
83     fSoftwarePathRenderer = nullptr;
84 }
85 
86 // MDB TODO: make use of the 'proxies' parameter.
flush(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const GrFlushInfo & info,const skgpu::MutableTextureState * newState)87 bool GrDrawingManager::flush(
88         SkSpan<GrSurfaceProxy*> proxies,
89         SkSurface::BackendSurfaceAccess access,
90         const GrFlushInfo& info,
91         const skgpu::MutableTextureState* newState) {
92     GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "flush", fContext);
93 
94     if (fFlushing || this->wasAbandoned()) {
95         if (info.fSubmittedProc) {
96             info.fSubmittedProc(info.fSubmittedContext, false);
97         }
98         if (info.fFinishedProc) {
99             info.fFinishedProc(info.fFinishedContext);
100         }
101         return false;
102     }
103 
104     SkDEBUGCODE(this->validate());
105 
106     // As of now we only short-circuit if we got an explicit list of surfaces to flush.
107     if (!proxies.empty() && !info.fNumSemaphores && !info.fFinishedProc &&
108         access == SkSurface::BackendSurfaceAccess::kNoAccess && !newState) {
109         bool allUnused = std::all_of(proxies.begin(), proxies.end(), [&](GrSurfaceProxy* proxy) {
110             bool used = std::any_of(fDAG.begin(), fDAG.end(), [&](auto& task) {
111                 return task && task->isUsed(proxy);
112             });
113             return !used;
114         });
115         if (allUnused) {
116             if (info.fSubmittedProc) {
117                 info.fSubmittedProc(info.fSubmittedContext, true);
118             }
119             return false;
120         }
121     }
122 
123     auto dContext = fContext->asDirectContext();
124     SkASSERT(dContext);
125     dContext->priv().clientMappedBufferManager()->process();
126 
127     GrGpu* gpu = dContext->priv().getGpu();
128     // We have a non abandoned and direct GrContext. It must have a GrGpu.
129     SkASSERT(gpu);
130 
131     fFlushing = true;
132 
133     auto resourceProvider = dContext->priv().resourceProvider();
134     auto resourceCache = dContext->priv().getResourceCache();
135 
136     // Semi-usually the GrRenderTasks are already closed at this point, but sometimes Ganesh needs
137     // to flush mid-draw. In that case, the SkGpuDevice's opsTasks won't be closed but need to be
138     // flushed anyway. Closing such opsTasks here will mean new ones will be created to replace them
139     // if the SkGpuDevice(s) write to them again.
140     this->closeAllTasks();
141     fActiveOpsTask = nullptr;
142 
143     this->sortTasks();
144 
145     if (!fCpuBufferCache) {
146         // We cache more buffers when the backend is using client side arrays. Otherwise, we
147         // expect each pool will use a CPU buffer as a staging buffer before uploading to a GPU
148         // buffer object. Each pool only requires one staging buffer at a time.
149         int maxCachedBuffers = fContext->priv().caps()->preferClientSideDynamicBuffers() ? 2 : 6;
150         fCpuBufferCache = GrBufferAllocPool::CpuBufferCache::Make(maxCachedBuffers);
151     }
152 
153     GrOpFlushState flushState(gpu, resourceProvider, &fTokenTracker, fCpuBufferCache);
154 
155     GrOnFlushResourceProvider onFlushProvider(this);
156 
157     // Prepare any onFlush op lists (e.g. atlases).
158     bool preFlushSuccessful = true;
159     for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
160         preFlushSuccessful &= onFlushCBObject->preFlush(&onFlushProvider);
161     }
162 
163     bool cachePurgeNeeded = false;
164 
165     if (preFlushSuccessful) {
166         bool usingReorderedDAG = false;
167         GrResourceAllocator resourceAllocator(dContext);
168         if (fReduceOpsTaskSplitting) {
169             usingReorderedDAG = this->reorderTasks(&resourceAllocator);
170             if (!usingReorderedDAG) {
171                 resourceAllocator.reset();
172             }
173         }
174 
175 #if 0
176         // Enable this to print out verbose GrOp information
177         SkDEBUGCODE(SkDebugf("RenderTasks (%d):\n", fDAG.count()));
178         for (const auto& task : fDAG) {
179             SkDEBUGCODE(task->dump(/* printDependencies */ true);)
180         }
181 #endif
182 
183         if (!resourceAllocator.failedInstantiation()) {
184             if (!usingReorderedDAG) {
185                 for (const auto& task : fDAG) {
186                     SkASSERT(task);
187                     task->gatherProxyIntervals(&resourceAllocator);
188                 }
189                 resourceAllocator.planAssignment();
190             }
191             resourceAllocator.assign();
192         }
193 
194         cachePurgeNeeded = !resourceAllocator.failedInstantiation() &&
195                            this->executeRenderTasks(&flushState);
196     }
197     this->removeRenderTasks();
198 
199     gpu->executeFlushInfo(proxies, access, info, newState);
200 
201     // Give the cache a chance to purge resources that become purgeable due to flushing.
202     if (cachePurgeNeeded) {
203         resourceCache->purgeAsNeeded();
204         cachePurgeNeeded = false;
205     }
206     for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
207         onFlushCBObject->postFlush(fTokenTracker.nextFlushToken());
208         cachePurgeNeeded = true;
209     }
210     if (cachePurgeNeeded) {
211         resourceCache->purgeAsNeeded();
212     }
213     fFlushing = false;
214 
215     return true;
216 }
217 
submitToGpu(bool syncToCpu)218 bool GrDrawingManager::submitToGpu(bool syncToCpu) {
219     if (fFlushing || this->wasAbandoned()) {
220         return false;
221     }
222 
223     auto direct = fContext->asDirectContext();
224     if (!direct) {
225         return false; // Can't submit while DDL recording
226     }
227     GrGpu* gpu = direct->priv().getGpu();
228     return gpu->submitToGpu(syncToCpu);
229 }
230 
executeRenderTasks(GrOpFlushState * flushState)231 bool GrDrawingManager::executeRenderTasks(GrOpFlushState* flushState) {
232 #if GR_FLUSH_TIME_OP_SPEW
233     SkDebugf("Flushing %d opsTasks\n", fDAG.count());
234     for (int i = 0; i < fDAG.count(); ++i) {
235         if (fDAG[i]) {
236             SkString label;
237             label.printf("task %d/%d", i, fDAG.count());
238             fDAG[i]->dump(label, {}, true, true);
239         }
240     }
241 #endif
242 
243     bool anyRenderTasksExecuted = false;
244 
245     for (const auto& renderTask : fDAG) {
246         if (!renderTask || !renderTask->isInstantiated()) {
247              continue;
248         }
249 
250         SkASSERT(renderTask->deferredProxiesAreInstantiated());
251 
252         renderTask->prepare(flushState);
253     }
254 
255     // Upload all data to the GPU
256     flushState->preExecuteDraws();
257 
258     // For Vulkan, if we have too many oplists to be flushed we end up allocating a lot of resources
259     // for each command buffer associated with the oplists. If this gets too large we can cause the
260     // devices to go OOM. In practice we usually only hit this case in our tests, but to be safe we
261     // put a cap on the number of oplists we will execute before flushing to the GPU to relieve some
262     // memory pressure.
263     static constexpr int kMaxRenderTasksBeforeFlush = 100;
264     int numRenderTasksExecuted = 0;
265 
266     // Execute the normal op lists.
267     for (const auto& renderTask : fDAG) {
268         SkASSERT(renderTask);
269         if (!renderTask->isInstantiated()) {
270             continue;
271         }
272 
273         if (renderTask->execute(flushState)) {
274             anyRenderTasksExecuted = true;
275         }
276         if (++numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
277             flushState->gpu()->submitToGpu(false);
278             numRenderTasksExecuted = 0;
279         }
280     }
281 
282     SkASSERT(!flushState->opsRenderPass());
283     SkASSERT(fTokenTracker.nextDrawToken() == fTokenTracker.nextFlushToken());
284 
285     // We reset the flush state before the RenderTasks so that the last resources to be freed are
286     // those that are written to in the RenderTasks. This helps to make sure the most recently used
287     // resources are the last to be purged by the resource cache.
288     flushState->reset();
289 
290     return anyRenderTasksExecuted;
291 }
292 
removeRenderTasks()293 void GrDrawingManager::removeRenderTasks() {
294     for (const auto& task : fDAG) {
295         SkASSERT(task);
296         if (!task->unique() || task->requiresExplicitCleanup()) {
297             // TODO: Eventually uniqueness should be guaranteed: http://skbug.com/7111.
298             // DDLs, however, will always require an explicit notification for when they
299             // can clean up resources.
300             task->endFlush(this);
301         }
302         task->disown(this);
303     }
304     fDAG.clear();
305     fReorderBlockerTaskIndices.clear();
306     fLastRenderTasks.reset();
307 }
308 
sortTasks()309 void GrDrawingManager::sortTasks() {
310     // We separately sort the ranges around non-reorderable tasks.
311     for (size_t i = 0, start = 0, end; start < SkToSizeT(fDAG.size()); ++i, start = end + 1) {
312         end = i == fReorderBlockerTaskIndices.size() ? fDAG.size() : fReorderBlockerTaskIndices[i];
313         SkSpan span(fDAG.begin() + start, end - start);
314 
315         SkASSERT(std::none_of(span.begin(), span.end(), [](const auto& t) {
316             return t->blocksReordering();
317         }));
318         SkASSERT(span.end() == fDAG.end() || fDAG[end]->blocksReordering());
319 
320 #if defined(SK_GANESH) && defined(SK_DEBUG)
321         // In order to partition the dag array like this it must be the case that each partition
322         // only depends on nodes in the partition or earlier partitions.
323         auto check = [&](const GrRenderTask* task, auto&& check) -> void {
324             SkASSERT(GrRenderTask::TopoSortTraits::WasOutput(task) ||
325                      std::find_if(span.begin(), span.end(), [task](const auto& n) {
326                          return n.get() == task; }));
327             for (int i = 0; i < task->fDependencies.size(); ++i) {
328                 check(task->fDependencies[i], check);
329             }
330         };
331         for (const auto& node : span) {
332             check(node.get(), check);
333         }
334 #endif
335 
336         bool sorted = GrTTopoSort<GrRenderTask, GrRenderTask::TopoSortTraits>(span, start);
337         if (!sorted) {
338             SkDEBUGFAIL("Render task topo sort failed.");
339         }
340 
341 #ifdef SK_DEBUG
342         if (sorted && !span.empty()) {
343             // This block checks for any unnecessary splits in the opsTasks. If two sequential
344             // opsTasks could have merged it means the opsTask was artificially split.
345             auto prevOpsTask = span[0]->asOpsTask();
346             for (size_t j = 1; j < span.size(); ++j) {
347                 auto curOpsTask = span[j]->asOpsTask();
348 
349                 if (prevOpsTask && curOpsTask) {
350                     SkASSERT(!prevOpsTask->canMerge(curOpsTask));
351                 }
352 
353                 prevOpsTask = curOpsTask;
354             }
355         }
356 #endif
357     }
358 }
359 
360 // Reorder the array to match the llist without reffing & unreffing sk_sp's.
361 // Both args must contain the same objects.
362 // This is basically a shim because clustering uses LList but the rest of drawmgr uses array.
363 template <typename T>
reorder_array_by_llist(const SkTInternalLList<T> & llist,SkTArray<sk_sp<T>> * array)364 static void reorder_array_by_llist(const SkTInternalLList<T>& llist, SkTArray<sk_sp<T>>* array) {
365     int i = 0;
366     for (T* t : llist) {
367         // Release the pointer that used to live here so it doesn't get unreffed.
368         [[maybe_unused]] T* old = array->at(i).release();
369         array->at(i++).reset(t);
370     }
371     SkASSERT(i == array->size());
372 }
373 
reorderTasks(GrResourceAllocator * resourceAllocator)374 bool GrDrawingManager::reorderTasks(GrResourceAllocator* resourceAllocator) {
375     SkASSERT(fReduceOpsTaskSplitting);
376     // We separately sort the ranges around non-reorderable tasks.
377     bool clustered = false;
378     SkTInternalLList<GrRenderTask> llist;
379     for (size_t i = 0, start = 0, end; start < SkToSizeT(fDAG.size()); ++i, start = end + 1) {
380         end = i == fReorderBlockerTaskIndices.size() ? fDAG.size() : fReorderBlockerTaskIndices[i];
381         SkSpan span(fDAG.begin() + start, end - start);
382         SkASSERT(std::none_of(span.begin(), span.end(), [](const auto& t) {
383             return t->blocksReordering();
384         }));
385 
386         SkTInternalLList<GrRenderTask> subllist;
387         if (GrClusterRenderTasks(span, &subllist)) {
388             clustered = true;
389         }
390 
391         if (i < fReorderBlockerTaskIndices.size()) {
392             SkASSERT(fDAG[fReorderBlockerTaskIndices[i]]->blocksReordering());
393             subllist.addToTail(fDAG[fReorderBlockerTaskIndices[i]].get());
394         }
395         llist.concat(std::move(subllist));
396     }
397     if (!clustered) {
398         return false;
399     }
400 
401     for (GrRenderTask* task : llist) {
402         task->gatherProxyIntervals(resourceAllocator);
403     }
404     if (!resourceAllocator->planAssignment()) {
405         return false;
406     }
407     if (!resourceAllocator->makeBudgetHeadroom()) {
408         auto dContext = fContext->asDirectContext();
409         SkASSERT(dContext);
410         dContext->priv().getGpu()->stats()->incNumReorderedDAGsOverBudget();
411         return false;
412     }
413     reorder_array_by_llist(llist, &fDAG);
414 
415     int newCount = 0;
416     for (int i = 0; i < fDAG.size(); i++) {
417         sk_sp<GrRenderTask>& task = fDAG[i];
418         if (auto opsTask = task->asOpsTask()) {
419             size_t remaining = fDAG.size() - i - 1;
420             SkSpan<sk_sp<GrRenderTask>> nextTasks{fDAG.end() - remaining, remaining};
421             int removeCount = opsTask->mergeFrom(nextTasks);
422             for (const auto& removed : nextTasks.first(removeCount)) {
423                 removed->disown(this);
424             }
425             i += removeCount;
426         }
427         fDAG[newCount++] = std::move(task);
428     }
429     fDAG.resize_back(newCount);
430     return true;
431 }
432 
closeAllTasks()433 void GrDrawingManager::closeAllTasks() {
434     for (auto& task : fDAG) {
435         if (task) {
436             task->makeClosed(fContext);
437         }
438     }
439 }
440 
insertTaskBeforeLast(sk_sp<GrRenderTask> task)441 GrRenderTask* GrDrawingManager::insertTaskBeforeLast(sk_sp<GrRenderTask> task) {
442     if (!task) {
443         return nullptr;
444     }
445     if (fDAG.empty()) {
446         return fDAG.push_back(std::move(task)).get();
447     }
448     if (!fReorderBlockerTaskIndices.empty() && fReorderBlockerTaskIndices.back() == fDAG.size()) {
449         fReorderBlockerTaskIndices.back()++;
450     }
451     fDAG.push_back(std::move(task));
452     auto& penultimate = fDAG.fromBack(1);
453     fDAG.back().swap(penultimate);
454     return penultimate.get();
455 }
456 
appendTask(sk_sp<GrRenderTask> task)457 GrRenderTask* GrDrawingManager::appendTask(sk_sp<GrRenderTask> task) {
458     if (!task) {
459         return nullptr;
460     }
461     if (task->blocksReordering()) {
462         fReorderBlockerTaskIndices.push_back(fDAG.size());
463     }
464     return fDAG.push_back(std::move(task)).get();
465 }
466 
resolve_and_mipmap(GrGpu * gpu,GrSurfaceProxy * proxy)467 static void resolve_and_mipmap(GrGpu* gpu, GrSurfaceProxy* proxy) {
468     if (!proxy->isInstantiated()) {
469         return;
470     }
471 
472     // In the flushSurfaces case, we need to resolve MSAA immediately after flush. This is
473     // because clients expect the flushed surface's backing texture to be fully resolved
474     // upon return.
475     if (proxy->requiresManualMSAAResolve()) {
476         auto* rtProxy = proxy->asRenderTargetProxy();
477         SkASSERT(rtProxy);
478         if (rtProxy->isMSAADirty()) {
479             SkASSERT(rtProxy->peekRenderTarget());
480             gpu->resolveRenderTarget(rtProxy->peekRenderTarget(), rtProxy->msaaDirtyRect());
481             gpu->submitToGpu(false);
482             rtProxy->markMSAAResolved();
483         }
484     }
485     // If, after a flush, any of the proxies of interest have dirty mipmaps, regenerate them in
486     // case their backend textures are being stolen.
487     // (This special case is exercised by the ReimportImageTextureWithMipLevels test.)
488     // FIXME: It may be more ideal to plumb down a "we're going to steal the backends" flag.
489     if (auto* textureProxy = proxy->asTextureProxy()) {
490         if (textureProxy->mipmapsAreDirty()) {
491             SkASSERT(textureProxy->peekTexture());
492             gpu->regenerateMipMapLevels(textureProxy->peekTexture());
493             textureProxy->markMipmapsClean();
494         }
495     }
496 }
497 
flushSurfaces(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const GrFlushInfo & info,const skgpu::MutableTextureState * newState)498 GrSemaphoresSubmitted GrDrawingManager::flushSurfaces(
499         SkSpan<GrSurfaceProxy*> proxies,
500         SkSurface::BackendSurfaceAccess access,
501         const GrFlushInfo& info,
502         const skgpu::MutableTextureState* newState) {
503     if (this->wasAbandoned()) {
504         if (info.fSubmittedProc) {
505             info.fSubmittedProc(info.fSubmittedContext, false);
506         }
507         if (info.fFinishedProc) {
508             info.fFinishedProc(info.fFinishedContext);
509         }
510         return GrSemaphoresSubmitted::kNo;
511     }
512     SkDEBUGCODE(this->validate());
513 
514     auto direct = fContext->asDirectContext();
515     SkASSERT(direct);
516     GrGpu* gpu = direct->priv().getGpu();
517     // We have a non abandoned and direct GrContext. It must have a GrGpu.
518     SkASSERT(gpu);
519 
520     // TODO: It is important to upgrade the drawingmanager to just flushing the
521     // portion of the DAG required by 'proxies' in order to restore some of the
522     // semantics of this method.
523     bool didFlush = this->flush(proxies, access, info, newState);
524     for (GrSurfaceProxy* proxy : proxies) {
525         resolve_and_mipmap(gpu, proxy);
526     }
527 
528     SkDEBUGCODE(this->validate());
529 
530     if (!didFlush || (!direct->priv().caps()->semaphoreSupport() && info.fNumSemaphores)) {
531         return GrSemaphoresSubmitted::kNo;
532     }
533     return GrSemaphoresSubmitted::kYes;
534 }
535 
addOnFlushCallbackObject(GrOnFlushCallbackObject * onFlushCBObject)536 void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
537     fOnFlushCBObjects.push_back(onFlushCBObject);
538 }
539 
540 #if GR_TEST_UTILS
testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject * cb)541 void GrDrawingManager::testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject* cb) {
542     int n = std::find(fOnFlushCBObjects.begin(), fOnFlushCBObjects.end(), cb) -
543             fOnFlushCBObjects.begin();
544     SkASSERT(n < fOnFlushCBObjects.size());
545     fOnFlushCBObjects.removeShuffle(n);
546 }
547 #endif
548 
setLastRenderTask(const GrSurfaceProxy * proxy,GrRenderTask * task)549 void GrDrawingManager::setLastRenderTask(const GrSurfaceProxy* proxy, GrRenderTask* task) {
550 #ifdef SK_DEBUG
551     if (auto prior = this->getLastRenderTask(proxy)) {
552         SkASSERT(prior->isClosed() || prior == task);
553     }
554 #endif
555     uint32_t key = proxy->uniqueID().asUInt();
556     if (task) {
557         fLastRenderTasks.set(key, task);
558     } else if (fLastRenderTasks.find(key)) {
559         fLastRenderTasks.remove(key);
560     }
561 }
562 
getLastRenderTask(const GrSurfaceProxy * proxy) const563 GrRenderTask* GrDrawingManager::getLastRenderTask(const GrSurfaceProxy* proxy) const {
564     auto entry = fLastRenderTasks.find(proxy->uniqueID().asUInt());
565     return entry ? *entry : nullptr;
566 }
567 
getLastOpsTask(const GrSurfaceProxy * proxy) const568 skgpu::v1::OpsTask* GrDrawingManager::getLastOpsTask(const GrSurfaceProxy* proxy) const {
569     GrRenderTask* task = this->getLastRenderTask(proxy);
570     return task ? task->asOpsTask() : nullptr;
571 }
572 
573 
moveRenderTasksToDDL(SkDeferredDisplayList * ddl)574 void GrDrawingManager::moveRenderTasksToDDL(SkDeferredDisplayList* ddl) {
575     SkDEBUGCODE(this->validate());
576 
577     // no renderTask should receive a new command after this
578     this->closeAllTasks();
579     fActiveOpsTask = nullptr;
580 
581     this->sortTasks();
582 
583     fDAG.swap(ddl->fRenderTasks);
584     SkASSERT(fDAG.empty());
585     fReorderBlockerTaskIndices.clear();
586 
587     for (auto& renderTask : ddl->fRenderTasks) {
588         renderTask->disown(this);
589         renderTask->prePrepare(fContext);
590     }
591 
592     ddl->fArenas = std::move(fContext->priv().detachArenas());
593 
594     fContext->priv().detachProgramData(&ddl->fProgramData);
595 
596     SkDEBUGCODE(this->validate());
597 }
598 
createDDLTask(sk_sp<const SkDeferredDisplayList> ddl,sk_sp<GrRenderTargetProxy> newDest,SkIPoint offset)599 void GrDrawingManager::createDDLTask(sk_sp<const SkDeferredDisplayList> ddl,
600                                      sk_sp<GrRenderTargetProxy> newDest,
601                                      SkIPoint offset) {
602     SkDEBUGCODE(this->validate());
603 
604     if (fActiveOpsTask) {
605         // This is a temporary fix for the partial-MDB world. In that world we're not
606         // reordering so ops that (in the single opsTask world) would've just glommed onto the
607         // end of the single opsTask but referred to a far earlier RT need to appear in their
608         // own opsTask.
609         fActiveOpsTask->makeClosed(fContext);
610         fActiveOpsTask = nullptr;
611     }
612 
613     // Propagate the DDL proxy's state information to the replay target.
614     if (ddl->priv().targetProxy()->isMSAADirty()) {
615         auto nativeRect = GrNativeRect::MakeIRectRelativeTo(
616                 ddl->characterization().origin(),
617                 ddl->priv().targetProxy()->backingStoreDimensions().height(),
618                 ddl->priv().targetProxy()->msaaDirtyRect());
619         newDest->markMSAADirty(nativeRect);
620     }
621     GrTextureProxy* newTextureProxy = newDest->asTextureProxy();
622     if (newTextureProxy && GrMipmapped::kYes == newTextureProxy->mipmapped()) {
623         newTextureProxy->markMipmapsDirty();
624     }
625 
626     // Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData.
627     // The lazy proxy that references it (in the DDL opsTasks) will then steal its GrTexture.
628     ddl->fLazyProxyData->fReplayDest = newDest.get();
629 
630     // Add a task to handle drawing and lifetime management of the DDL.
631     SkDEBUGCODE(auto ddlTask =) this->appendTask(sk_make_sp<GrDDLTask>(this,
632                                                                        std::move(newDest),
633                                                                        std::move(ddl),
634                                                                        offset));
635     SkASSERT(ddlTask->isClosed());
636 
637     SkDEBUGCODE(this->validate());
638 }
639 
640 #ifdef SK_DEBUG
validate() const641 void GrDrawingManager::validate() const {
642     if (fActiveOpsTask) {
643         SkASSERT(!fDAG.empty());
644         SkASSERT(!fActiveOpsTask->isClosed());
645         SkASSERT(fActiveOpsTask == fDAG.back().get());
646     }
647 
648     for (int i = 0; i < fDAG.size(); ++i) {
649         if (fActiveOpsTask != fDAG[i].get()) {
650             // The resolveTask associated with the activeTask remains open for as long as the
651             // activeTask does.
652             bool isActiveResolveTask =
653                 fActiveOpsTask && fActiveOpsTask->fTextureResolveTask == fDAG[i].get();
654             bool isAtlas = fDAG[i]->isSetFlag(GrRenderTask::kAtlas_Flag);
655             SkASSERT(isActiveResolveTask || isAtlas || fDAG[i]->isClosed());
656         }
657     }
658 
659     // The active opsTask, if any, should always be at the back of the DAG.
660     if (!fDAG.empty()) {
661         if (fDAG.back()->isSetFlag(GrRenderTask::kAtlas_Flag)) {
662             SkASSERT(fActiveOpsTask == nullptr);
663             SkASSERT(!fDAG.back()->isClosed());
664         } else if (fDAG.back()->isClosed()) {
665             SkASSERT(fActiveOpsTask == nullptr);
666         } else {
667             SkASSERT(fActiveOpsTask == fDAG.back().get());
668         }
669     } else {
670         SkASSERT(fActiveOpsTask == nullptr);
671     }
672 }
673 #endif // SK_DEBUG
674 
closeActiveOpsTask()675 void GrDrawingManager::closeActiveOpsTask() {
676     if (fActiveOpsTask) {
677         // This is a temporary fix for the partial-MDB world. In that world we're not
678         // reordering so ops that (in the single opsTask world) would've just glommed onto the
679         // end of the single opsTask but referred to a far earlier RT need to appear in their
680         // own opsTask.
681         fActiveOpsTask->makeClosed(fContext);
682         fActiveOpsTask = nullptr;
683     }
684 }
685 
newOpsTask(GrSurfaceProxyView surfaceView,sk_sp<GrArenas> arenas)686 sk_sp<skgpu::v1::OpsTask> GrDrawingManager::newOpsTask(GrSurfaceProxyView surfaceView,
687                                                        sk_sp<GrArenas> arenas) {
688     SkDEBUGCODE(this->validate());
689     SkASSERT(fContext);
690 
691     this->closeActiveOpsTask();
692 
693     sk_sp<skgpu::v1::OpsTask> opsTask(new skgpu::v1::OpsTask(this,
694                                                              std::move(surfaceView),
695                                                              fContext->priv().auditTrail(),
696                                                              std::move(arenas)));
697 
698     SkASSERT(this->getLastRenderTask(opsTask->target(0)) == opsTask.get());
699 
700     this->appendTask(opsTask);
701 
702     fActiveOpsTask = opsTask.get();
703 
704     SkDEBUGCODE(this->validate());
705     return opsTask;
706 }
707 
addAtlasTask(sk_sp<GrRenderTask> atlasTask,GrRenderTask * previousAtlasTask)708 void GrDrawingManager::addAtlasTask(sk_sp<GrRenderTask> atlasTask,
709                                     GrRenderTask* previousAtlasTask) {
710     SkDEBUGCODE(this->validate());
711     SkASSERT(fContext);
712 
713     if (previousAtlasTask) {
714         previousAtlasTask->makeClosed(fContext);
715         for (GrRenderTask* previousAtlasUser : previousAtlasTask->dependents()) {
716             // Make the new atlas depend on everybody who used the old atlas, and close their tasks.
717             // This guarantees that the previous atlas is totally out of service before we render
718             // the next one, meaning there is only ever one atlas active at a time and that they can
719             // all share the same texture.
720             atlasTask->addDependency(previousAtlasUser);
721             previousAtlasUser->makeClosed(fContext);
722             if (previousAtlasUser == fActiveOpsTask) {
723                 fActiveOpsTask = nullptr;
724             }
725         }
726     }
727 
728     atlasTask->setFlag(GrRenderTask::kAtlas_Flag);
729     this->insertTaskBeforeLast(std::move(atlasTask));
730 
731     SkDEBUGCODE(this->validate());
732 }
733 
newTextureResolveRenderTaskBefore(const GrCaps & caps)734 GrTextureResolveRenderTask* GrDrawingManager::newTextureResolveRenderTaskBefore(
735         const GrCaps& caps) {
736     // Unlike in the "new opsTask" case, we do not want to close the active opsTask, nor (if we are
737     // in sorting and opsTask reduction mode) the render tasks that depend on any proxy's current
738     // state. This is because those opsTasks can still receive new ops and because if they refer to
739     // the mipmapped version of 'proxy', they will then come to depend on the render task being
740     // created here.
741     //
742     // Add the new textureResolveTask before the fActiveOpsTask (if not in
743     // sorting/opsTask-splitting-reduction mode) because it will depend upon this resolve task.
744     // NOTE: Putting it here will also reduce the amount of work required by the topological sort.
745     GrRenderTask* task = this->insertTaskBeforeLast(sk_make_sp<GrTextureResolveRenderTask>());
746     return static_cast<GrTextureResolveRenderTask*>(task);
747 }
748 
newTextureResolveRenderTask(sk_sp<GrSurfaceProxy> proxy,GrSurfaceProxy::ResolveFlags flags,const GrCaps & caps)749 void GrDrawingManager::newTextureResolveRenderTask(sk_sp<GrSurfaceProxy> proxy,
750                                                    GrSurfaceProxy::ResolveFlags flags,
751                                                    const GrCaps& caps) {
752     SkDEBUGCODE(this->validate());
753     SkASSERT(fContext);
754 
755     if (!proxy->requiresManualMSAAResolve()) {
756         SkDEBUGCODE(this->validate());
757         return;
758     }
759 
760     GrRenderTask* lastTask = this->getLastRenderTask(proxy.get());
761     if (!proxy->asRenderTargetProxy()->isMSAADirty() && (!lastTask || lastTask->isClosed())) {
762         SkDEBUGCODE(this->validate());
763         return;
764     }
765 
766     this->closeActiveOpsTask();
767 
768     auto resolveTask = sk_make_sp<GrTextureResolveRenderTask>();
769     // Add proxy also adds all the needed dependencies we need
770     resolveTask->addProxy(this, std::move(proxy), flags, caps);
771 
772     auto task = this->appendTask(std::move(resolveTask));
773     task->makeClosed(fContext);
774 
775     // We have closed the previous active oplist but since a new oplist isn't being added there
776     // shouldn't be an active one.
777     SkASSERT(!fActiveOpsTask);
778     SkDEBUGCODE(this->validate());
779 }
780 
newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,int numSemaphores)781 void GrDrawingManager::newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,
782                                          std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,
783                                          int numSemaphores) {
784     SkDEBUGCODE(this->validate());
785     SkASSERT(fContext);
786 
787     sk_sp<GrWaitRenderTask> waitTask = sk_make_sp<GrWaitRenderTask>(GrSurfaceProxyView(proxy),
788                                                                     std::move(semaphores),
789                                                                     numSemaphores);
790 
791     if (fActiveOpsTask && (fActiveOpsTask->target(0) == proxy.get())) {
792         SkASSERT(this->getLastRenderTask(proxy.get()) == fActiveOpsTask);
793         this->insertTaskBeforeLast(waitTask);
794         // In this case we keep the current renderTask open but just insert the new waitTask
795         // before it in the list. The waitTask will never need to trigger any resolves or mip
796         // map generation which is the main advantage of going through the proxy version.
797         // Additionally we would've had to temporarily set the wait task as the lastRenderTask
798         // on the proxy, add the dependency, and then reset the lastRenderTask to
799         // fActiveOpsTask. Additionally we make the waitTask depend on all of fActiveOpsTask
800         // dependencies so that we don't unnecessarily reorder the waitTask before them.
801         // Note: Any previous Ops already in fActiveOpsTask will get blocked by the wait
802         // semaphore even though they don't need to be for correctness.
803 
804         // Make sure we add the dependencies of fActiveOpsTask to waitTask first or else we'll
805         // get a circular self dependency of waitTask on waitTask.
806         waitTask->addDependenciesFromOtherTask(fActiveOpsTask);
807         fActiveOpsTask->addDependency(waitTask.get());
808     } else {
809         // In this case we just close the previous RenderTask and start and append the waitTask
810         // to the DAG. Since it is the last task now we call setLastRenderTask on the proxy. If
811         // there is a lastTask on the proxy we make waitTask depend on that task. This
812         // dependency isn't strictly needed but it does keep the DAG from reordering the
813         // waitTask earlier and blocking more tasks.
814         if (GrRenderTask* lastTask = this->getLastRenderTask(proxy.get())) {
815             waitTask->addDependency(lastTask);
816         }
817         this->setLastRenderTask(proxy.get(), waitTask.get());
818         this->closeActiveOpsTask();
819         this->appendTask(waitTask);
820     }
821     waitTask->makeClosed(fContext);
822 
823     SkDEBUGCODE(this->validate());
824 }
825 
newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy,const SkIRect & srcRect,GrColorType surfaceColorType,GrColorType dstColorType,sk_sp<GrGpuBuffer> dstBuffer,size_t dstOffset)826 void GrDrawingManager::newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy,
827                                                  const SkIRect& srcRect,
828                                                  GrColorType surfaceColorType,
829                                                  GrColorType dstColorType,
830                                                  sk_sp<GrGpuBuffer> dstBuffer,
831                                                  size_t dstOffset) {
832     SkDEBUGCODE(this->validate());
833     SkASSERT(fContext);
834     this->closeActiveOpsTask();
835 
836     GrRenderTask* task = this->appendTask(sk_make_sp<GrTransferFromRenderTask>(
837             srcProxy, srcRect, surfaceColorType, dstColorType,
838             std::move(dstBuffer), dstOffset));
839 
840     const GrCaps& caps = *fContext->priv().caps();
841 
842     // We always say GrMipmapped::kNo here since we are always just copying from the base layer. We
843     // don't need to make sure the whole mip map chain is valid.
844     task->addDependency(this, srcProxy.get(), GrMipmapped::kNo,
845                         GrTextureResolveManager(this), caps);
846     task->makeClosed(fContext);
847 
848     // We have closed the previous active oplist but since a new oplist isn't being added there
849     // shouldn't be an active one.
850     SkASSERT(!fActiveOpsTask);
851     SkDEBUGCODE(this->validate());
852 }
853 
newBufferTransferTask(sk_sp<GrGpuBuffer> src,size_t srcOffset,sk_sp<GrGpuBuffer> dst,size_t dstOffset,size_t size)854 void GrDrawingManager::newBufferTransferTask(sk_sp<GrGpuBuffer> src,
855                                              size_t srcOffset,
856                                              sk_sp<GrGpuBuffer> dst,
857                                              size_t dstOffset,
858                                              size_t size) {
859     SkASSERT(src);
860     SkASSERT(dst);
861     SkASSERT(srcOffset + size <= src->size());
862     SkASSERT(dstOffset + size <= dst->size());
863     SkASSERT(src->intendedType() == GrGpuBufferType::kXferCpuToGpu);
864     SkASSERT(dst->intendedType() != GrGpuBufferType::kXferCpuToGpu);
865 
866     SkDEBUGCODE(this->validate());
867     SkASSERT(fContext);
868 
869     this->closeActiveOpsTask();
870 
871     sk_sp<GrRenderTask> task = GrBufferTransferRenderTask::Make(std::move(src),
872                                                                 srcOffset,
873                                                                 std::move(dst),
874                                                                 dstOffset,
875                                                                 size);
876     SkASSERT(task);
877 
878     this->appendTask(task);
879     task->makeClosed(fContext);
880 
881     // We have closed the previous active oplist but since a new oplist isn't being added there
882     // shouldn't be an active one.
883     SkASSERT(!fActiveOpsTask);
884     SkDEBUGCODE(this->validate());
885 }
886 
newBufferUpdateTask(sk_sp<SkData> src,sk_sp<GrGpuBuffer> dst,size_t dstOffset)887 void GrDrawingManager::newBufferUpdateTask(sk_sp<SkData> src,
888                                            sk_sp<GrGpuBuffer> dst,
889                                            size_t dstOffset) {
890     SkASSERT(src);
891     SkASSERT(dst);
892     SkASSERT(dstOffset + src->size() <= dst->size());
893     SkASSERT(dst->intendedType() != GrGpuBufferType::kXferCpuToGpu);
894     SkASSERT(!dst->isMapped());
895 
896     SkDEBUGCODE(this->validate());
897     SkASSERT(fContext);
898 
899     this->closeActiveOpsTask();
900 
901     sk_sp<GrRenderTask> task = GrBufferUpdateRenderTask::Make(std::move(src),
902                                                               std::move(dst),
903                                                               dstOffset);
904     SkASSERT(task);
905 
906     this->appendTask(task);
907     task->makeClosed(fContext);
908 
909     // We have closed the previous active oplist but since a new oplist isn't being added there
910     // shouldn't be an active one.
911     SkASSERT(!fActiveOpsTask);
912     SkDEBUGCODE(this->validate());
913 }
914 
newCopyRenderTask(sk_sp<GrSurfaceProxy> dst,SkIRect dstRect,sk_sp<GrSurfaceProxy> src,SkIRect srcRect,GrSamplerState::Filter filter,GrSurfaceOrigin origin)915 sk_sp<GrRenderTask> GrDrawingManager::newCopyRenderTask(sk_sp<GrSurfaceProxy> dst,
916                                                         SkIRect dstRect,
917                                                         sk_sp<GrSurfaceProxy> src,
918                                                         SkIRect srcRect,
919                                                         GrSamplerState::Filter filter,
920                                                         GrSurfaceOrigin origin) {
921     SkDEBUGCODE(this->validate());
922     SkASSERT(fContext);
923 
924     // It'd be nicer to check this in GrCopyRenderTask::Make. This gets complicated because of
925     // "active ops task" tracking. dst will be the target of our copy task but it might also be the
926     // target of the active ops task. We currently require the active ops task to be closed before
927     // making a new task that targets the same proxy. However, if we first close the active ops
928     // task, then fail to make a copy task, the next active ops task may target the same proxy. This
929     // will trip an assert related to unnecessary ops task splitting.
930     if (src->framebufferOnly()) {
931         return nullptr;
932     }
933 
934     this->closeActiveOpsTask();
935 
936     sk_sp<GrRenderTask> task = GrCopyRenderTask::Make(this,
937                                                       std::move(dst),
938                                                       dstRect,
939                                                       src,
940                                                       srcRect,
941                                                       filter,
942                                                       origin);
943     if (!task) {
944         return nullptr;
945     }
946 
947     this->appendTask(task);
948 
949     const GrCaps& caps = *fContext->priv().caps();
950     // We always say GrMipmapped::kNo here since we are always just copying from the base layer to
951     // another base layer. We don't need to make sure the whole mip map chain is valid.
952     task->addDependency(this, src.get(), GrMipmapped::kNo, GrTextureResolveManager(this), caps);
953     task->makeClosed(fContext);
954 
955     // We have closed the previous active oplist but since a new oplist isn't being added there
956     // shouldn't be an active one.
957     SkASSERT(!fActiveOpsTask);
958     SkDEBUGCODE(this->validate());
959     return task;
960 }
961 
newWritePixelsTask(sk_sp<GrSurfaceProxy> dst,SkIRect rect,GrColorType srcColorType,GrColorType dstColorType,const GrMipLevel levels[],int levelCount)962 bool GrDrawingManager::newWritePixelsTask(sk_sp<GrSurfaceProxy> dst,
963                                           SkIRect rect,
964                                           GrColorType srcColorType,
965                                           GrColorType dstColorType,
966                                           const GrMipLevel levels[],
967                                           int levelCount) {
968     SkDEBUGCODE(this->validate());
969     SkASSERT(fContext);
970 
971     this->closeActiveOpsTask();
972     const GrCaps& caps = *fContext->priv().caps();
973 
974     // On platforms that prefer flushes over VRAM use (i.e., ANGLE) we're better off forcing a
975     // complete flush here.
976     if (!caps.preferVRAMUseOverFlushes()) {
977         this->flushSurfaces(SkSpan<GrSurfaceProxy*>{},
978                             SkSurface::BackendSurfaceAccess::kNoAccess,
979                             GrFlushInfo{},
980                             nullptr);
981     }
982 
983     GrRenderTask* task = this->appendTask(GrWritePixelsTask::Make(this,
984                                                                   std::move(dst),
985                                                                   rect,
986                                                                   srcColorType,
987                                                                   dstColorType,
988                                                                   levels,
989                                                                   levelCount));
990     if (!task) {
991         return false;
992     }
993 
994     task->makeClosed(fContext);
995 
996     // We have closed the previous active oplist but since a new oplist isn't being added there
997     // shouldn't be an active one.
998     SkASSERT(!fActiveOpsTask);
999     SkDEBUGCODE(this->validate());
1000     return true;
1001 }
1002 
1003 /*
1004  * This method finds a path renderer that can draw the specified path on
1005  * the provided target.
1006  * Due to its expense, the software path renderer has split out so it can
1007  * can be individually allowed/disallowed via the "allowSW" boolean.
1008  */
getPathRenderer(const PathRenderer::CanDrawPathArgs & args,bool allowSW,PathRendererChain::DrawType drawType,PathRenderer::StencilSupport * stencilSupport)1009 skgpu::v1::PathRenderer* GrDrawingManager::getPathRenderer(
1010         const PathRenderer::CanDrawPathArgs& args,
1011         bool allowSW,
1012         PathRendererChain::DrawType drawType,
1013         PathRenderer::StencilSupport* stencilSupport) {
1014 
1015     if (!fPathRendererChain) {
1016         fPathRendererChain =
1017                 std::make_unique<PathRendererChain>(fContext, fOptionsForPathRendererChain);
1018     }
1019 
1020     auto pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport);
1021     if (!pr && allowSW) {
1022         auto swPR = this->getSoftwarePathRenderer();
1023         if (PathRenderer::CanDrawPath::kNo != swPR->canDrawPath(args)) {
1024             pr = swPR;
1025         }
1026     }
1027 
1028 #if GR_PATH_RENDERER_SPEW
1029     if (pr) {
1030         SkDebugf("getPathRenderer: %s\n", pr->name());
1031     }
1032 #endif
1033 
1034     return pr;
1035 }
1036 
getSoftwarePathRenderer()1037 skgpu::v1::PathRenderer* GrDrawingManager::getSoftwarePathRenderer() {
1038     if (!fSoftwarePathRenderer) {
1039         fSoftwarePathRenderer.reset(new skgpu::v1::SoftwarePathRenderer(
1040             fContext->priv().proxyProvider(), fOptionsForPathRendererChain.fAllowPathMaskCaching));
1041     }
1042     return fSoftwarePathRenderer.get();
1043 }
1044 
getAtlasPathRenderer()1045 skgpu::v1::AtlasPathRenderer* GrDrawingManager::getAtlasPathRenderer() {
1046     if (!fPathRendererChain) {
1047         fPathRendererChain = std::make_unique<PathRendererChain>(fContext,
1048                                                                  fOptionsForPathRendererChain);
1049     }
1050     return fPathRendererChain->getAtlasPathRenderer();
1051 }
1052 
getTessellationPathRenderer()1053 skgpu::v1::PathRenderer* GrDrawingManager::getTessellationPathRenderer() {
1054     if (!fPathRendererChain) {
1055         fPathRendererChain = std::make_unique<PathRendererChain>(fContext,
1056                                                                  fOptionsForPathRendererChain);
1057     }
1058     return fPathRendererChain->getTessellationPathRenderer();
1059 }
1060 
flushIfNecessary()1061 void GrDrawingManager::flushIfNecessary() {
1062     auto direct = fContext->asDirectContext();
1063     if (!direct) {
1064         return;
1065     }
1066 
1067     auto resourceCache = direct->priv().getResourceCache();
1068     if (resourceCache && resourceCache->requestsFlush()) {
1069         if (this->flush({}, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo(), nullptr)) {
1070             this->submitToGpu(false);
1071         }
1072         resourceCache->purgeAsNeeded();
1073     }
1074 }
1075