• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 #include "src/gpu/ganesh/GrDrawingManager.h"
8 
9 #include "include/core/SkData.h"
10 #include "include/core/SkRect.h"
11 #include "include/core/SkSize.h"
12 #include "include/core/SkSurface.h"
13 #include "include/gpu/GpuTypes.h"
14 #include "include/gpu/ganesh/GrDirectContext.h"
15 #include "include/gpu/ganesh/GrRecordingContext.h"
16 #include "include/gpu/ganesh/GrTypes.h"
17 #include "include/private/base/SkAssert.h"
18 #include "include/private/base/SkTo.h"
19 #include "include/private/chromium/GrDeferredDisplayList.h"
20 #include "include/private/chromium/GrSurfaceCharacterization.h"
21 #include "include/private/gpu/ganesh/GrTypesPriv.h"
22 #include "src/base/SkTInternalLList.h"
23 #include "src/core/SkTraceEvent.h"
24 #include "src/gpu/GpuTypesPriv.h"
25 #include "src/gpu/ganesh/GrAuditTrail.h"
26 #include "src/gpu/ganesh/GrBufferTransferRenderTask.h"
27 #include "src/gpu/ganesh/GrBufferUpdateRenderTask.h"
28 #include "src/gpu/ganesh/GrClientMappedBufferManager.h"
29 #include "src/gpu/ganesh/GrCopyRenderTask.h"
30 #include "src/gpu/ganesh/GrDDLTask.h"
31 #include "src/gpu/ganesh/GrDeferredDisplayListPriv.h"
32 #include "src/gpu/ganesh/GrDirectContextPriv.h"
33 #include "src/gpu/ganesh/GrGpu.h"
34 #include "src/gpu/ganesh/GrGpuBuffer.h"
35 #include "src/gpu/ganesh/GrNativeRect.h"
36 #include "src/gpu/ganesh/GrOnFlushResourceProvider.h"
37 #include "src/gpu/ganesh/GrOpFlushState.h"
38 #include "src/gpu/ganesh/GrRecordingContextPriv.h"
39 #include "src/gpu/ganesh/GrRenderTargetProxy.h"
40 #include "src/gpu/ganesh/GrRenderTask.h"
41 #include "src/gpu/ganesh/GrRenderTaskCluster.h"
42 #include "src/gpu/ganesh/GrResourceAllocator.h"
43 #include "src/gpu/ganesh/GrResourceCache.h"
44 #include "src/gpu/ganesh/GrSurfaceProxyView.h"
45 #include "src/gpu/ganesh/GrTTopoSort.h"
46 #include "src/gpu/ganesh/GrTextureProxy.h"
47 #include "src/gpu/ganesh/GrTextureResolveManager.h"
48 #include "src/gpu/ganesh/GrTextureResolveRenderTask.h"
49 #include "src/gpu/ganesh/GrTracing.h"
50 #include "src/gpu/ganesh/GrTransferFromRenderTask.h"
51 #include "src/gpu/ganesh/GrWaitRenderTask.h"
52 #include "src/gpu/ganesh/GrWritePixelsRenderTask.h"
53 #include "src/gpu/ganesh/ops/GrOp.h"
54 #include "src/gpu/ganesh/ops/OpsTask.h"
55 #include "src/gpu/ganesh/ops/SoftwarePathRenderer.h"
56 
57 #include <algorithm>
58 #include <memory>
59 #include <optional>
60 #include <utility>
61 
62 using namespace skia_private;
63 
64 ///////////////////////////////////////////////////////////////////////////////////////////////////
GrDrawingManager(GrRecordingContext * rContext,const PathRendererChain::Options & optionsForPathRendererChain,bool reduceOpsTaskSplitting)65 GrDrawingManager::GrDrawingManager(GrRecordingContext* rContext,
66                                    const PathRendererChain::Options& optionsForPathRendererChain,
67                                    bool reduceOpsTaskSplitting)
68         : fContext(rContext)
69         , fOptionsForPathRendererChain(optionsForPathRendererChain)
70         , fPathRendererChain(nullptr)
71         , fSoftwarePathRenderer(nullptr)
72         , fReduceOpsTaskSplitting(reduceOpsTaskSplitting) {
73 }
74 
~GrDrawingManager()75 GrDrawingManager::~GrDrawingManager() {
76     this->closeAllTasks();
77     this->removeRenderTasks();
78 }
79 
wasAbandoned() const80 bool GrDrawingManager::wasAbandoned() const {
81     return fContext->abandoned();
82 }
83 
freeGpuResources()84 void GrDrawingManager::freeGpuResources() {
85     for (int i = fOnFlushCBObjects.size() - 1; i >= 0; --i) {
86         if (!fOnFlushCBObjects[i]->retainOnFreeGpuResources()) {
87             // it's safe to just do this because we're iterating in reverse
88             fOnFlushCBObjects.removeShuffle(i);
89         }
90     }
91 
92     // a path renderer may be holding onto resources
93     fPathRendererChain = nullptr;
94     fSoftwarePathRenderer = nullptr;
95 }
96 
97 // MDB TODO: make use of the 'proxies' parameter.
flush(SkSpan<GrSurfaceProxy * > proxies,SkSurfaces::BackendSurfaceAccess access,const GrFlushInfo & info,const skgpu::MutableTextureState * newState)98 bool GrDrawingManager::flush(SkSpan<GrSurfaceProxy*> proxies,
99                              SkSurfaces::BackendSurfaceAccess access,
100                              const GrFlushInfo& info,
101                              const skgpu::MutableTextureState* newState) {
102     GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "flush", fContext);
103 
104     if (fFlushing || this->wasAbandoned()) {
105         if (info.fSubmittedProc) {
106             info.fSubmittedProc(info.fSubmittedContext, false);
107         }
108         if (info.fFinishedProc) {
109             info.fFinishedProc(info.fFinishedContext);
110         }
111         return false;
112     }
113 
114     SkDEBUGCODE(this->validate());
115 
116     // As of now we only short-circuit if we got an explicit list of surfaces to flush.
117     if (!proxies.empty() && !info.fNumSemaphores && !info.fFinishedProc &&
118         access == SkSurfaces::BackendSurfaceAccess::kNoAccess && !newState) {
119         bool allUnused = std::all_of(proxies.begin(), proxies.end(), [&](GrSurfaceProxy* proxy) {
120             bool used = std::any_of(fDAG.begin(), fDAG.end(), [&](auto& task) {
121                 return task && task->isUsed(proxy);
122             });
123             return !used;
124         });
125         if (allUnused) {
126             if (info.fSubmittedProc) {
127                 info.fSubmittedProc(info.fSubmittedContext, true);
128             }
129             return false;
130         }
131     }
132 
133     auto dContext = fContext->asDirectContext();
134     SkASSERT(dContext);
135     dContext->priv().clientMappedBufferManager()->process();
136 
137     GrGpu* gpu = dContext->priv().getGpu();
138     // We have a non abandoned and direct GrContext. It must have a GrGpu.
139     SkASSERT(gpu);
140 
141     fFlushing = true;
142 
143     auto resourceProvider = dContext->priv().resourceProvider();
144     auto resourceCache = dContext->priv().getResourceCache();
145 
146     // Semi-usually the GrRenderTasks are already closed at this point, but sometimes Ganesh needs
147     // to flush mid-draw. In that case, the SkGpuDevice's opsTasks won't be closed but need to be
148     // flushed anyway. Closing such opsTasks here will mean new ones will be created to replace them
149     // if the SkGpuDevice(s) write to them again.
150     this->closeAllTasks();
151     fActiveOpsTask = nullptr;
152 
153     this->sortTasks();
154 
155     if (!fCpuBufferCache) {
156         // We cache more buffers when the backend is using client side arrays. Otherwise, we
157         // expect each pool will use a CPU buffer as a staging buffer before uploading to a GPU
158         // buffer object. Each pool only requires one staging buffer at a time.
159         int maxCachedBuffers = fContext->priv().caps()->preferClientSideDynamicBuffers() ? 2 : 6;
160         fCpuBufferCache = GrBufferAllocPool::CpuBufferCache::Make(maxCachedBuffers);
161     }
162 
163     GrOpFlushState flushState(gpu, resourceProvider, &fTokenTracker, fCpuBufferCache);
164 
165     std::optional<GrTimerQuery> timerQuery;
166     if (info.fFinishedWithStatsProc && (info.fGpuStatsFlags & skgpu::GpuStatsFlags::kElapsedTime)) {
167         timerQuery = gpu->startTimerQuery();
168     }
169     GrOnFlushResourceProvider onFlushProvider(this);
170 
171     // Prepare any onFlush op lists (e.g. atlases).
172     bool preFlushSuccessful = true;
173     for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
174         preFlushSuccessful &= onFlushCBObject->preFlush(&onFlushProvider);
175     }
176 
177     bool cachePurgeNeeded = false;
178 
179     if (preFlushSuccessful) {
180         bool usingReorderedDAG = false;
181         GrResourceAllocator resourceAllocator(dContext);
182         if (fReduceOpsTaskSplitting) {
183             usingReorderedDAG = this->reorderTasks(&resourceAllocator);
184             if (!usingReorderedDAG) {
185                 resourceAllocator.reset();
186             }
187         }
188 
189 #if 0
190         // Enable this to print out verbose GrOp information
191         SkDEBUGCODE(SkDebugf("RenderTasks (%d):\n", fDAG.count()));
192         for (const auto& task : fDAG) {
193             SkDEBUGCODE(task->dump(/* printDependencies */ true);)
194         }
195 #endif
196 
197         if (!resourceAllocator.failedInstantiation()) {
198             if (!usingReorderedDAG) {
199                 for (const auto& task : fDAG) {
200                     SkASSERT(task);
201                     task->gatherProxyIntervals(&resourceAllocator);
202                 }
203                 resourceAllocator.planAssignment();
204             }
205             resourceAllocator.assign();
206         }
207 
208         cachePurgeNeeded = !resourceAllocator.failedInstantiation() &&
209                            this->executeRenderTasks(&flushState);
210     }
211     this->removeRenderTasks();
212 
213     gpu->executeFlushInfo(proxies, access, info, std::move(timerQuery), newState);
214 
215     // Give the cache a chance to purge resources that become purgeable due to flushing.
216     if (cachePurgeNeeded) {
217         resourceCache->purgeAsNeeded();
218         cachePurgeNeeded = false;
219     }
220     for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
221         onFlushCBObject->postFlush(fTokenTracker.nextFlushToken());
222         cachePurgeNeeded = true;
223     }
224     if (cachePurgeNeeded) {
225         resourceCache->purgeAsNeeded();
226     }
227     fFlushing = false;
228 
229     return true;
230 }
231 
submitToGpu()232 bool GrDrawingManager::submitToGpu() {
233     if (fFlushing || this->wasAbandoned()) {
234         return false;
235     }
236 
237     auto direct = fContext->asDirectContext();
238     if (!direct) {
239         return false; // Can't submit while DDL recording
240     }
241     GrGpu* gpu = direct->priv().getGpu();
242     return gpu->submitToGpu();
243 }
244 
executeRenderTasks(GrOpFlushState * flushState)245 bool GrDrawingManager::executeRenderTasks(GrOpFlushState* flushState) {
246 #if GR_FLUSH_TIME_OP_SPEW
247     SkDebugf("Flushing %d opsTasks\n", fDAG.size());
248     for (int i = 0; i < fDAG.size(); ++i) {
249         if (fDAG[i]) {
250             SkString label;
251             label.printf("task %d/%d", i, fDAG.size());
252             fDAG[i]->dump(label, {}, true, true);
253         }
254     }
255 #endif
256 
257     bool anyRenderTasksExecuted = false;
258 
259     for (const auto& renderTask : fDAG) {
260         if (!renderTask || !renderTask->isInstantiated()) {
261              continue;
262         }
263 
264         SkASSERT(renderTask->deferredProxiesAreInstantiated());
265 
266         renderTask->prepare(flushState);
267     }
268 
269     // Upload all data to the GPU
270     flushState->preExecuteDraws();
271 
272     // For Vulkan, if we have too many oplists to be flushed we end up allocating a lot of resources
273     // for each command buffer associated with the oplists. If this gets too large we can cause the
274     // devices to go OOM. In practice we usually only hit this case in our tests, but to be safe we
275     // put a cap on the number of oplists we will execute before flushing to the GPU to relieve some
276     // memory pressure.
277     static constexpr int kMaxRenderTasksBeforeFlush = 100;
278     int numRenderTasksExecuted = 0;
279 
280     // Unlike kMaxRenderTasksBeforeFlush, this is a global limit.
281     static constexpr int kMaxRenderPassesBeforeFlush = 100;
282 
283     // Execute the normal op lists.
284     for (const auto& renderTask : fDAG) {
285         SkASSERT(renderTask);
286         if (!renderTask->isInstantiated()) {
287             continue;
288         }
289 
290         if (renderTask->execute(flushState)) {
291             anyRenderTasksExecuted = true;
292         }
293         if (++numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush ||
294             flushState->gpu()->getCurrentSubmitRenderPassCount() >= kMaxRenderPassesBeforeFlush) {
295             flushState->gpu()->submitToGpu();
296             numRenderTasksExecuted = 0;
297         }
298     }
299 
300     SkASSERT(!flushState->opsRenderPass());
301     SkASSERT(fTokenTracker.nextDrawToken() == fTokenTracker.nextFlushToken());
302 
303     // We reset the flush state before the RenderTasks so that the last resources to be freed are
304     // those that are written to in the RenderTasks. This helps to make sure the most recently used
305     // resources are the last to be purged by the resource cache.
306     flushState->reset();
307 
308     return anyRenderTasksExecuted;
309 }
310 
removeRenderTasks()311 void GrDrawingManager::removeRenderTasks() {
312     for (const auto& task : fDAG) {
313         SkASSERT(task);
314         if (!task->unique() || task->requiresExplicitCleanup()) {
315             // TODO: Eventually uniqueness should be guaranteed: http://skbug.com/7111.
316             // DDLs, however, will always require an explicit notification for when they
317             // can clean up resources.
318             task->endFlush(this);
319         }
320         task->disown(this);
321     }
322     fDAG.clear();
323     fReorderBlockerTaskIndices.clear();
324     fLastRenderTasks.reset();
325 }
326 
sortTasks()327 void GrDrawingManager::sortTasks() {
328     // We separately sort the ranges around non-reorderable tasks.
329     for (size_t i = 0, start = 0, end; start < SkToSizeT(fDAG.size()); ++i, start = end + 1) {
330         end = i == fReorderBlockerTaskIndices.size() ? fDAG.size() : fReorderBlockerTaskIndices[i];
331         SkSpan span(fDAG.begin() + start, end - start);
332 
333         SkASSERT(std::none_of(span.begin(), span.end(), [](const auto& t) {
334             return t->blocksReordering();
335         }));
336         SkASSERT(span.end() == fDAG.end() || fDAG[end]->blocksReordering());
337 
338 #if defined(SK_DEBUG)
339         // In order to partition the dag array like this it must be the case that each partition
340         // only depends on nodes in the partition or earlier partitions.
341         auto check = [&](const GrRenderTask* task, auto&& check) -> void {
342             SkASSERT(GrRenderTask::TopoSortTraits::WasOutput(task) ||
343                      std::find_if(span.begin(), span.end(), [task](const auto& n) {
344                          return n.get() == task; }));
345             for (int i = 0; i < task->fDependencies.size(); ++i) {
346                 check(task->fDependencies[i], check);
347             }
348         };
349         for (const auto& node : span) {
350             check(node.get(), check);
351         }
352 #endif
353 
354         bool sorted = GrTTopoSort<GrRenderTask, GrRenderTask::TopoSortTraits>(span, start);
355         if (!sorted) {
356             SkDEBUGFAIL("Render task topo sort failed.");
357         }
358 
359 #ifdef SK_DEBUG
360         if (sorted && !span.empty()) {
361             // This block checks for any unnecessary splits in the opsTasks. If two sequential
362             // opsTasks could have merged it means the opsTask was artificially split.
363             auto prevOpsTask = span[0]->asOpsTask();
364             for (size_t j = 1; j < span.size(); ++j) {
365                 auto curOpsTask = span[j]->asOpsTask();
366 
367                 if (prevOpsTask && curOpsTask) {
368                     SkASSERT(!prevOpsTask->canMerge(curOpsTask));
369                 }
370 
371                 prevOpsTask = curOpsTask;
372             }
373         }
374 #endif
375     }
376 }
377 
378 // Reorder the array to match the llist without reffing & unreffing sk_sp's.
379 // Both args must contain the same objects.
380 // This is basically a shim because clustering uses LList but the rest of drawmgr uses array.
381 template <typename T>
reorder_array_by_llist(const SkTInternalLList<T> & llist,TArray<sk_sp<T>> * array)382 static void reorder_array_by_llist(const SkTInternalLList<T>& llist, TArray<sk_sp<T>>* array) {
383     int i = 0;
384     for (T* t : llist) {
385         // Release the pointer that used to live here so it doesn't get unreffed.
386         [[maybe_unused]] T* old = array->at(i).release();
387         array->at(i++).reset(t);
388     }
389     SkASSERT(i == array->size());
390 }
391 
reorderTasks(GrResourceAllocator * resourceAllocator)392 bool GrDrawingManager::reorderTasks(GrResourceAllocator* resourceAllocator) {
393     SkASSERT(fReduceOpsTaskSplitting);
394     // We separately sort the ranges around non-reorderable tasks.
395     bool clustered = false;
396     SkTInternalLList<GrRenderTask> llist;
397     for (size_t i = 0, start = 0, end; start < SkToSizeT(fDAG.size()); ++i, start = end + 1) {
398         end = i == fReorderBlockerTaskIndices.size() ? fDAG.size() : fReorderBlockerTaskIndices[i];
399         SkSpan span(fDAG.begin() + start, end - start);
400         SkASSERT(std::none_of(span.begin(), span.end(), [](const auto& t) {
401             return t->blocksReordering();
402         }));
403 
404         SkTInternalLList<GrRenderTask> subllist;
405         if (GrClusterRenderTasks(span, &subllist)) {
406             clustered = true;
407         }
408 
409         if (i < fReorderBlockerTaskIndices.size()) {
410             SkASSERT(fDAG[fReorderBlockerTaskIndices[i]]->blocksReordering());
411             subllist.addToTail(fDAG[fReorderBlockerTaskIndices[i]].get());
412         }
413         llist.concat(std::move(subllist));
414     }
415     if (!clustered) {
416         return false;
417     }
418 
419     for (GrRenderTask* task : llist) {
420         task->gatherProxyIntervals(resourceAllocator);
421     }
422     if (!resourceAllocator->planAssignment()) {
423         return false;
424     }
425     if (!resourceAllocator->makeBudgetHeadroom()) {
426         auto dContext = fContext->asDirectContext();
427         SkASSERT(dContext);
428         dContext->priv().getGpu()->stats()->incNumReorderedDAGsOverBudget();
429         return false;
430     }
431     reorder_array_by_llist(llist, &fDAG);
432 
433     int newCount = 0;
434     for (int i = 0; i < fDAG.size(); i++) {
435         sk_sp<GrRenderTask>& task = fDAG[i];
436         if (auto opsTask = task->asOpsTask()) {
437             size_t remaining = fDAG.size() - i - 1;
438             SkSpan<sk_sp<GrRenderTask>> nextTasks{fDAG.end() - remaining, remaining};
439             int removeCount = opsTask->mergeFrom(nextTasks);
440             for (const auto& removed : nextTasks.first(removeCount)) {
441                 removed->disown(this);
442             }
443             i += removeCount;
444         }
445         fDAG[newCount++] = std::move(task);
446     }
447     fDAG.resize_back(newCount);
448     return true;
449 }
450 
closeAllTasks()451 void GrDrawingManager::closeAllTasks() {
452     for (auto& task : fDAG) {
453         if (task) {
454             task->makeClosed(fContext);
455         }
456     }
457 }
458 
insertTaskBeforeLast(sk_sp<GrRenderTask> task)459 GrRenderTask* GrDrawingManager::insertTaskBeforeLast(sk_sp<GrRenderTask> task) {
460     if (!task) {
461         return nullptr;
462     }
463     if (fDAG.empty()) {
464         return fDAG.push_back(std::move(task)).get();
465     }
466     if (!fReorderBlockerTaskIndices.empty() && fReorderBlockerTaskIndices.back() == fDAG.size()) {
467         fReorderBlockerTaskIndices.back()++;
468     }
469     fDAG.push_back(std::move(task));
470     auto& penultimate = fDAG.fromBack(1);
471     fDAG.back().swap(penultimate);
472     return penultimate.get();
473 }
474 
appendTask(sk_sp<GrRenderTask> task)475 GrRenderTask* GrDrawingManager::appendTask(sk_sp<GrRenderTask> task) {
476     if (!task) {
477         return nullptr;
478     }
479     if (task->blocksReordering()) {
480         fReorderBlockerTaskIndices.push_back(fDAG.size());
481     }
482     return fDAG.push_back(std::move(task)).get();
483 }
484 
resolve_and_mipmap(GrGpu * gpu,GrSurfaceProxy * proxy)485 static void resolve_and_mipmap(GrGpu* gpu, GrSurfaceProxy* proxy) {
486     if (!proxy->isInstantiated()) {
487         return;
488     }
489 
490     // In the flushSurfaces case, we need to resolve MSAA immediately after flush. This is
491     // because clients expect the flushed surface's backing texture to be fully resolved
492     // upon return.
493     if (proxy->requiresManualMSAAResolve()) {
494         auto* rtProxy = proxy->asRenderTargetProxy();
495         SkASSERT(rtProxy);
496         if (rtProxy->isMSAADirty()) {
497             SkASSERT(rtProxy->peekRenderTarget());
498             gpu->resolveRenderTarget(rtProxy->peekRenderTarget(), rtProxy->msaaDirtyRect());
499             gpu->submitToGpu();
500             rtProxy->markMSAAResolved();
501         }
502     }
503     // If, after a flush, any of the proxies of interest have dirty mipmaps, regenerate them in
504     // case their backend textures are being stolen.
505     // (This special case is exercised by the ReimportImageTextureWithMipLevels test.)
506     // FIXME: It may be more ideal to plumb down a "we're going to steal the backends" flag.
507     if (auto* textureProxy = proxy->asTextureProxy()) {
508         if (textureProxy->mipmapsAreDirty()) {
509             SkASSERT(textureProxy->peekTexture());
510             gpu->regenerateMipMapLevels(textureProxy->peekTexture());
511             textureProxy->markMipmapsClean();
512         }
513     }
514 }
515 
flushSurfaces(SkSpan<GrSurfaceProxy * > proxies,SkSurfaces::BackendSurfaceAccess access,const GrFlushInfo & info,const skgpu::MutableTextureState * newState)516 GrSemaphoresSubmitted GrDrawingManager::flushSurfaces(SkSpan<GrSurfaceProxy*> proxies,
517                                                       SkSurfaces::BackendSurfaceAccess access,
518                                                       const GrFlushInfo& info,
519                                                       const skgpu::MutableTextureState* newState) {
520     if (this->wasAbandoned()) {
521         if (info.fSubmittedProc) {
522             info.fSubmittedProc(info.fSubmittedContext, false);
523         }
524         if (info.fFinishedProc) {
525             info.fFinishedProc(info.fFinishedContext);
526         }
527         return GrSemaphoresSubmitted::kNo;
528     }
529     SkDEBUGCODE(this->validate());
530 
531     auto direct = fContext->asDirectContext();
532     SkASSERT(direct);
533     GrGpu* gpu = direct->priv().getGpu();
534     // We have a non abandoned and direct GrContext. It must have a GrGpu.
535     SkASSERT(gpu);
536 
537     // TODO: It is important to upgrade the drawingmanager to just flushing the
538     // portion of the DAG required by 'proxies' in order to restore some of the
539     // semantics of this method.
540     bool didFlush = this->flush(proxies, access, info, newState);
541     for (GrSurfaceProxy* proxy : proxies) {
542         resolve_and_mipmap(gpu, proxy);
543     }
544 
545     SkDEBUGCODE(this->validate());
546 
547     if (!didFlush || (!direct->priv().caps()->backendSemaphoreSupport() && info.fNumSemaphores)) {
548         return GrSemaphoresSubmitted::kNo;
549     }
550     return GrSemaphoresSubmitted::kYes;
551 }
552 
addOnFlushCallbackObject(GrOnFlushCallbackObject * onFlushCBObject)553 void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
554     fOnFlushCBObjects.push_back(onFlushCBObject);
555 }
556 
557 #if defined(GPU_TEST_UTILS)
testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject * cb)558 void GrDrawingManager::testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject* cb) {
559     int n = std::find(fOnFlushCBObjects.begin(), fOnFlushCBObjects.end(), cb) -
560             fOnFlushCBObjects.begin();
561     SkASSERT(n < fOnFlushCBObjects.size());
562     fOnFlushCBObjects.removeShuffle(n);
563 }
564 #endif
565 
setLastRenderTask(const GrSurfaceProxy * proxy,GrRenderTask * task)566 void GrDrawingManager::setLastRenderTask(const GrSurfaceProxy* proxy, GrRenderTask* task) {
567 #ifdef SK_DEBUG
568     if (auto prior = this->getLastRenderTask(proxy)) {
569         SkASSERT(prior->isClosed() || prior == task);
570     }
571 #endif
572     uint32_t key = proxy->uniqueID().asUInt();
573     if (task) {
574         fLastRenderTasks.set(key, task);
575     } else if (fLastRenderTasks.find(key)) {
576         fLastRenderTasks.remove(key);
577     }
578 }
579 
getLastRenderTask(const GrSurfaceProxy * proxy) const580 GrRenderTask* GrDrawingManager::getLastRenderTask(const GrSurfaceProxy* proxy) const {
581     auto entry = fLastRenderTasks.find(proxy->uniqueID().asUInt());
582     return entry ? *entry : nullptr;
583 }
584 
getLastOpsTask(const GrSurfaceProxy * proxy) const585 skgpu::ganesh::OpsTask* GrDrawingManager::getLastOpsTask(const GrSurfaceProxy* proxy) const {
586     GrRenderTask* task = this->getLastRenderTask(proxy);
587     return task ? task->asOpsTask() : nullptr;
588 }
589 
moveRenderTasksToDDL(GrDeferredDisplayList * ddl)590 void GrDrawingManager::moveRenderTasksToDDL(GrDeferredDisplayList* ddl) {
591     SkDEBUGCODE(this->validate());
592 
593     // no renderTask should receive a new command after this
594     this->closeAllTasks();
595     fActiveOpsTask = nullptr;
596 
597     this->sortTasks();
598 
599     fDAG.swap(ddl->fRenderTasks);
600     SkASSERT(fDAG.empty());
601     fReorderBlockerTaskIndices.clear();
602 
603     for (auto& renderTask : ddl->fRenderTasks) {
604         renderTask->disown(this);
605         renderTask->prePrepare(fContext);
606     }
607 
608     ddl->fArenas = std::move(fContext->priv().detachArenas());
609 
610     fContext->priv().detachProgramData(&ddl->fProgramData);
611 
612     SkDEBUGCODE(this->validate());
613 }
614 
createDDLTask(sk_sp<const GrDeferredDisplayList> ddl,sk_sp<GrRenderTargetProxy> newDest)615 void GrDrawingManager::createDDLTask(sk_sp<const GrDeferredDisplayList> ddl,
616                                      sk_sp<GrRenderTargetProxy> newDest) {
617     SkDEBUGCODE(this->validate());
618 
619     if (fActiveOpsTask) {
620         // This is a temporary fix for the partial-MDB world. In that world we're not
621         // reordering so ops that (in the single opsTask world) would've just glommed onto the
622         // end of the single opsTask but referred to a far earlier RT need to appear in their
623         // own opsTask.
624         fActiveOpsTask->makeClosed(fContext);
625         fActiveOpsTask = nullptr;
626     }
627 
628     // Propagate the DDL proxy's state information to the replay target.
629     if (ddl->priv().targetProxy()->isMSAADirty()) {
630         auto nativeRect = GrNativeRect::MakeIRectRelativeTo(
631                 ddl->characterization().origin(),
632                 ddl->priv().targetProxy()->backingStoreDimensions().height(),
633                 ddl->priv().targetProxy()->msaaDirtyRect());
634         newDest->markMSAADirty(nativeRect);
635     }
636     GrTextureProxy* newTextureProxy = newDest->asTextureProxy();
637     if (newTextureProxy && skgpu::Mipmapped::kYes == newTextureProxy->mipmapped()) {
638         newTextureProxy->markMipmapsDirty();
639     }
640 
641     // Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData.
642     // The lazy proxy that references it (in the DDL opsTasks) will then steal its GrTexture.
643     ddl->fLazyProxyData->fReplayDest = newDest.get();
644 
645     // Add a task to handle drawing and lifetime management of the DDL.
646     SkDEBUGCODE(auto ddlTask =) this->appendTask(sk_make_sp<GrDDLTask>(this,
647                                                                        std::move(newDest),
648                                                                        std::move(ddl)));
649     SkASSERT(ddlTask->isClosed());
650 
651     SkDEBUGCODE(this->validate());
652 }
653 
654 #ifdef SK_DEBUG
validate() const655 void GrDrawingManager::validate() const {
656     if (fActiveOpsTask) {
657         SkASSERT(!fDAG.empty());
658         SkASSERT(!fActiveOpsTask->isClosed());
659         SkASSERT(fActiveOpsTask == fDAG.back().get());
660     }
661 
662     for (int i = 0; i < fDAG.size(); ++i) {
663         if (fActiveOpsTask != fDAG[i].get()) {
664             // The resolveTask associated with the activeTask remains open for as long as the
665             // activeTask does.
666             bool isActiveResolveTask =
667                 fActiveOpsTask && fActiveOpsTask->fTextureResolveTask == fDAG[i].get();
668             bool isAtlas = fDAG[i]->isSetFlag(GrRenderTask::kAtlas_Flag);
669             SkASSERT(isActiveResolveTask || isAtlas || fDAG[i]->isClosed());
670         }
671     }
672 
673     // The active opsTask, if any, should always be at the back of the DAG.
674     if (!fDAG.empty()) {
675         if (fDAG.back()->isSetFlag(GrRenderTask::kAtlas_Flag)) {
676             SkASSERT(fActiveOpsTask == nullptr);
677             SkASSERT(!fDAG.back()->isClosed());
678         } else if (fDAG.back()->isClosed()) {
679             SkASSERT(fActiveOpsTask == nullptr);
680         } else {
681             SkASSERT(fActiveOpsTask == fDAG.back().get());
682         }
683     } else {
684         SkASSERT(fActiveOpsTask == nullptr);
685     }
686 }
687 #endif // SK_DEBUG
688 
closeActiveOpsTask()689 void GrDrawingManager::closeActiveOpsTask() {
690     if (fActiveOpsTask) {
691         // This is a temporary fix for the partial-MDB world. In that world we're not
692         // reordering so ops that (in the single opsTask world) would've just glommed onto the
693         // end of the single opsTask but referred to a far earlier RT need to appear in their
694         // own opsTask.
695         fActiveOpsTask->makeClosed(fContext);
696         fActiveOpsTask = nullptr;
697     }
698 }
699 
newOpsTask(GrSurfaceProxyView surfaceView,sk_sp<GrArenas> arenas)700 sk_sp<skgpu::ganesh::OpsTask> GrDrawingManager::newOpsTask(GrSurfaceProxyView surfaceView,
701                                                            sk_sp<GrArenas> arenas) {
702     SkDEBUGCODE(this->validate());
703     SkASSERT(fContext);
704 
705     this->closeActiveOpsTask();
706 
707     sk_sp<skgpu::ganesh::OpsTask> opsTask(new skgpu::ganesh::OpsTask(
708             this, std::move(surfaceView), fContext->priv().auditTrail(), std::move(arenas)));
709 
710     SkASSERT(this->getLastRenderTask(opsTask->target(0)) == opsTask.get());
711 
712     this->appendTask(opsTask);
713 
714     fActiveOpsTask = opsTask.get();
715 
716     SkDEBUGCODE(this->validate());
717     return opsTask;
718 }
719 
addAtlasTask(sk_sp<GrRenderTask> atlasTask,GrRenderTask * previousAtlasTask)720 void GrDrawingManager::addAtlasTask(sk_sp<GrRenderTask> atlasTask,
721                                     GrRenderTask* previousAtlasTask) {
722     SkDEBUGCODE(this->validate());
723     SkASSERT(fContext);
724 
725     if (previousAtlasTask) {
726         previousAtlasTask->makeClosed(fContext);
727         for (GrRenderTask* previousAtlasUser : previousAtlasTask->dependents()) {
728             // Make the new atlas depend on everybody who used the old atlas, and close their tasks.
729             // This guarantees that the previous atlas is totally out of service before we render
730             // the next one, meaning there is only ever one atlas active at a time and that they can
731             // all share the same texture.
732             atlasTask->addDependency(previousAtlasUser);
733             previousAtlasUser->makeClosed(fContext);
734             if (previousAtlasUser == fActiveOpsTask) {
735                 fActiveOpsTask = nullptr;
736             }
737         }
738     }
739 
740     atlasTask->setFlag(GrRenderTask::kAtlas_Flag);
741     this->insertTaskBeforeLast(std::move(atlasTask));
742 
743     SkDEBUGCODE(this->validate());
744 }
745 
newTextureResolveRenderTaskBefore(const GrCaps & caps)746 GrTextureResolveRenderTask* GrDrawingManager::newTextureResolveRenderTaskBefore(
747         const GrCaps& caps) {
748     // Unlike in the "new opsTask" case, we do not want to close the active opsTask, nor (if we are
749     // in sorting and opsTask reduction mode) the render tasks that depend on any proxy's current
750     // state. This is because those opsTasks can still receive new ops and because if they refer to
751     // the mipmapped version of 'proxy', they will then come to depend on the render task being
752     // created here.
753     //
754     // Add the new textureResolveTask before the fActiveOpsTask (if not in
755     // sorting/opsTask-splitting-reduction mode) because it will depend upon this resolve task.
756     // NOTE: Putting it here will also reduce the amount of work required by the topological sort.
757     GrRenderTask* task = this->insertTaskBeforeLast(sk_make_sp<GrTextureResolveRenderTask>());
758     return static_cast<GrTextureResolveRenderTask*>(task);
759 }
760 
newTextureResolveRenderTask(sk_sp<GrSurfaceProxy> proxy,GrSurfaceProxy::ResolveFlags flags,const GrCaps & caps)761 void GrDrawingManager::newTextureResolveRenderTask(sk_sp<GrSurfaceProxy> proxy,
762                                                    GrSurfaceProxy::ResolveFlags flags,
763                                                    const GrCaps& caps) {
764     SkDEBUGCODE(this->validate());
765     SkASSERT(fContext);
766 
767     if (!proxy->requiresManualMSAAResolve()) {
768         SkDEBUGCODE(this->validate());
769         return;
770     }
771 
772     GrRenderTask* lastTask = this->getLastRenderTask(proxy.get());
773     if (!proxy->asRenderTargetProxy()->isMSAADirty() && (!lastTask || lastTask->isClosed())) {
774         SkDEBUGCODE(this->validate());
775         return;
776     }
777 
778     this->closeActiveOpsTask();
779 
780     auto resolveTask = sk_make_sp<GrTextureResolveRenderTask>();
781     // Add proxy also adds all the needed dependencies we need
782     resolveTask->addProxy(this, std::move(proxy), flags, caps);
783 
784     auto task = this->appendTask(std::move(resolveTask));
785     task->makeClosed(fContext);
786 
787     // We have closed the previous active oplist but since a new oplist isn't being added there
788     // shouldn't be an active one.
789     SkASSERT(!fActiveOpsTask);
790     SkDEBUGCODE(this->validate());
791 }
792 
newWaitRenderTask(const sk_sp<GrSurfaceProxy> & proxy,std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,int numSemaphores)793 void GrDrawingManager::newWaitRenderTask(const sk_sp<GrSurfaceProxy>& proxy,
794                                          std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,
795                                          int numSemaphores) {
796     SkDEBUGCODE(this->validate());
797     SkASSERT(fContext);
798 
799     sk_sp<GrWaitRenderTask> waitTask = sk_make_sp<GrWaitRenderTask>(GrSurfaceProxyView(proxy),
800                                                                     std::move(semaphores),
801                                                                     numSemaphores);
802 
803     if (fActiveOpsTask && (fActiveOpsTask->target(0) == proxy.get())) {
804         SkASSERT(this->getLastRenderTask(proxy.get()) == fActiveOpsTask);
805         this->insertTaskBeforeLast(waitTask);
806         // In this case we keep the current renderTask open but just insert the new waitTask
807         // before it in the list. The waitTask will never need to trigger any resolves or mip
808         // map generation which is the main advantage of going through the proxy version.
809         // Additionally we would've had to temporarily set the wait task as the lastRenderTask
810         // on the proxy, add the dependency, and then reset the lastRenderTask to
811         // fActiveOpsTask. Additionally we make the waitTask depend on all of fActiveOpsTask
812         // dependencies so that we don't unnecessarily reorder the waitTask before them.
813         // Note: Any previous Ops already in fActiveOpsTask will get blocked by the wait
814         // semaphore even though they don't need to be for correctness.
815 
816         // Make sure we add the dependencies of fActiveOpsTask to waitTask first or else we'll
817         // get a circular self dependency of waitTask on waitTask.
818         waitTask->addDependenciesFromOtherTask(fActiveOpsTask);
819         fActiveOpsTask->addDependency(waitTask.get());
820     } else {
821         // In this case we just close the previous RenderTask and start and append the waitTask
822         // to the DAG. Since it is the last task now we call setLastRenderTask on the proxy. If
823         // there is a lastTask on the proxy we make waitTask depend on that task. This
824         // dependency isn't strictly needed but it does keep the DAG from reordering the
825         // waitTask earlier and blocking more tasks.
826         if (GrRenderTask* lastTask = this->getLastRenderTask(proxy.get())) {
827             waitTask->addDependency(lastTask);
828         }
829         this->setLastRenderTask(proxy.get(), waitTask.get());
830         this->closeActiveOpsTask();
831         this->appendTask(waitTask);
832     }
833     waitTask->makeClosed(fContext);
834 
835     SkDEBUGCODE(this->validate());
836 }
837 
newTransferFromRenderTask(const sk_sp<GrSurfaceProxy> & srcProxy,const SkIRect & srcRect,GrColorType surfaceColorType,GrColorType dstColorType,sk_sp<GrGpuBuffer> dstBuffer,size_t dstOffset)838 void GrDrawingManager::newTransferFromRenderTask(const sk_sp<GrSurfaceProxy>& srcProxy,
839                                                  const SkIRect& srcRect,
840                                                  GrColorType surfaceColorType,
841                                                  GrColorType dstColorType,
842                                                  sk_sp<GrGpuBuffer> dstBuffer,
843                                                  size_t dstOffset) {
844     SkDEBUGCODE(this->validate());
845     SkASSERT(fContext);
846     this->closeActiveOpsTask();
847 
848     GrRenderTask* task = this->appendTask(sk_make_sp<GrTransferFromRenderTask>(
849             srcProxy, srcRect, surfaceColorType, dstColorType,
850             std::move(dstBuffer), dstOffset));
851 
852     const GrCaps& caps = *fContext->priv().caps();
853 
854     // We always say skgpu::Mipmapped::kNo here since we are always just copying from the base
855     // layer. We don't need to make sure the whole mip map chain is valid.
856     task->addDependency(
857             this, srcProxy.get(), skgpu::Mipmapped::kNo, GrTextureResolveManager(this), caps);
858     task->makeClosed(fContext);
859 
860     // We have closed the previous active oplist but since a new oplist isn't being added there
861     // shouldn't be an active one.
862     SkASSERT(!fActiveOpsTask);
863     SkDEBUGCODE(this->validate());
864 }
865 
newBufferTransferTask(sk_sp<GrGpuBuffer> src,size_t srcOffset,sk_sp<GrGpuBuffer> dst,size_t dstOffset,size_t size)866 void GrDrawingManager::newBufferTransferTask(sk_sp<GrGpuBuffer> src,
867                                              size_t srcOffset,
868                                              sk_sp<GrGpuBuffer> dst,
869                                              size_t dstOffset,
870                                              size_t size) {
871     SkASSERT(src);
872     SkASSERT(dst);
873     SkASSERT(srcOffset + size <= src->size());
874     SkASSERT(dstOffset + size <= dst->size());
875     SkASSERT(src->intendedType() == GrGpuBufferType::kXferCpuToGpu);
876     SkASSERT(dst->intendedType() != GrGpuBufferType::kXferCpuToGpu);
877 
878     SkDEBUGCODE(this->validate());
879     SkASSERT(fContext);
880 
881     this->closeActiveOpsTask();
882 
883     sk_sp<GrRenderTask> task = GrBufferTransferRenderTask::Make(std::move(src),
884                                                                 srcOffset,
885                                                                 std::move(dst),
886                                                                 dstOffset,
887                                                                 size);
888     SkASSERT(task);
889 
890     this->appendTask(task);
891     task->makeClosed(fContext);
892 
893     // We have closed the previous active oplist but since a new oplist isn't being added there
894     // shouldn't be an active one.
895     SkASSERT(!fActiveOpsTask);
896     SkDEBUGCODE(this->validate());
897 }
898 
newBufferUpdateTask(sk_sp<SkData> src,sk_sp<GrGpuBuffer> dst,size_t dstOffset)899 void GrDrawingManager::newBufferUpdateTask(sk_sp<SkData> src,
900                                            sk_sp<GrGpuBuffer> dst,
901                                            size_t dstOffset) {
902     SkASSERT(src);
903     SkASSERT(dst);
904     SkASSERT(dstOffset + src->size() <= dst->size());
905     SkASSERT(dst->intendedType() != GrGpuBufferType::kXferCpuToGpu);
906     SkASSERT(!dst->isMapped());
907 
908     SkDEBUGCODE(this->validate());
909     SkASSERT(fContext);
910 
911     this->closeActiveOpsTask();
912 
913     sk_sp<GrRenderTask> task = GrBufferUpdateRenderTask::Make(std::move(src),
914                                                               std::move(dst),
915                                                               dstOffset);
916     SkASSERT(task);
917 
918     this->appendTask(task);
919     task->makeClosed(fContext);
920 
921     // We have closed the previous active oplist but since a new oplist isn't being added there
922     // shouldn't be an active one.
923     SkASSERT(!fActiveOpsTask);
924     SkDEBUGCODE(this->validate());
925 }
926 
newCopyRenderTask(sk_sp<GrSurfaceProxy> dst,SkIRect dstRect,const sk_sp<GrSurfaceProxy> & src,SkIRect srcRect,GrSamplerState::Filter filter,GrSurfaceOrigin origin)927 sk_sp<GrRenderTask> GrDrawingManager::newCopyRenderTask(sk_sp<GrSurfaceProxy> dst,
928                                                         SkIRect dstRect,
929                                                         const sk_sp<GrSurfaceProxy>& src,
930                                                         SkIRect srcRect,
931                                                         GrSamplerState::Filter filter,
932                                                         GrSurfaceOrigin origin) {
933     SkDEBUGCODE(this->validate());
934     SkASSERT(fContext);
935 
936     // It'd be nicer to check this in GrCopyRenderTask::Make. This gets complicated because of
937     // "active ops task" tracking. dst will be the target of our copy task but it might also be the
938     // target of the active ops task. We currently require the active ops task to be closed before
939     // making a new task that targets the same proxy. However, if we first close the active ops
940     // task, then fail to make a copy task, the next active ops task may target the same proxy. This
941     // will trip an assert related to unnecessary ops task splitting.
942     if (src->framebufferOnly()) {
943         return nullptr;
944     }
945 
946     this->closeActiveOpsTask();
947 
948     sk_sp<GrRenderTask> task = GrCopyRenderTask::Make(this,
949                                                       std::move(dst),
950                                                       dstRect,
951                                                       src,
952                                                       srcRect,
953                                                       filter,
954                                                       origin);
955     if (!task) {
956         return nullptr;
957     }
958 
959     this->appendTask(task);
960 
961     const GrCaps& caps = *fContext->priv().caps();
962     // We always say skgpu::Mipmapped::kNo here since we are always just copying from the base layer
963     // to another base layer. We don't need to make sure the whole mip map chain is valid.
964     task->addDependency(
965             this, src.get(), skgpu::Mipmapped::kNo, GrTextureResolveManager(this), caps);
966     task->makeClosed(fContext);
967 
968     // We have closed the previous active oplist but since a new oplist isn't being added there
969     // shouldn't be an active one.
970     SkASSERT(!fActiveOpsTask);
971     SkDEBUGCODE(this->validate());
972     return task;
973 }
974 
newWritePixelsTask(sk_sp<GrSurfaceProxy> dst,SkIRect rect,GrColorType srcColorType,GrColorType dstColorType,const GrMipLevel levels[],int levelCount)975 bool GrDrawingManager::newWritePixelsTask(sk_sp<GrSurfaceProxy> dst,
976                                           SkIRect rect,
977                                           GrColorType srcColorType,
978                                           GrColorType dstColorType,
979                                           const GrMipLevel levels[],
980                                           int levelCount) {
981     SkDEBUGCODE(this->validate());
982     SkASSERT(fContext);
983 
984     this->closeActiveOpsTask();
985     const GrCaps& caps = *fContext->priv().caps();
986 
987     // On platforms that prefer flushes over VRAM use (i.e., ANGLE) we're better off forcing a
988     // complete flush here.
989     if (!caps.preferVRAMUseOverFlushes()) {
990         this->flushSurfaces(SkSpan<GrSurfaceProxy*>{},
991                             SkSurfaces::BackendSurfaceAccess::kNoAccess,
992                             GrFlushInfo{},
993                             nullptr);
994     }
995 
996     GrRenderTask* task = this->appendTask(GrWritePixelsTask::Make(this,
997                                                                   std::move(dst),
998                                                                   rect,
999                                                                   srcColorType,
1000                                                                   dstColorType,
1001                                                                   levels,
1002                                                                   levelCount));
1003     if (!task) {
1004         return false;
1005     }
1006 
1007     task->makeClosed(fContext);
1008 
1009     // We have closed the previous active oplist but since a new oplist isn't being added there
1010     // shouldn't be an active one.
1011     SkASSERT(!fActiveOpsTask);
1012     SkDEBUGCODE(this->validate());
1013     return true;
1014 }
1015 
1016 /*
1017  * This method finds a path renderer that can draw the specified path on
1018  * the provided target.
1019  * Due to its expense, the software path renderer has split out so it can
1020  * can be individually allowed/disallowed via the "allowSW" boolean.
1021  */
getPathRenderer(const PathRenderer::CanDrawPathArgs & args,bool allowSW,PathRendererChain::DrawType drawType,PathRenderer::StencilSupport * stencilSupport)1022 skgpu::ganesh::PathRenderer* GrDrawingManager::getPathRenderer(
1023         const PathRenderer::CanDrawPathArgs& args,
1024         bool allowSW,
1025         PathRendererChain::DrawType drawType,
1026         PathRenderer::StencilSupport* stencilSupport) {
1027     if (!fPathRendererChain) {
1028         fPathRendererChain =
1029                 std::make_unique<PathRendererChain>(fContext, fOptionsForPathRendererChain);
1030     }
1031 
1032     auto pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport);
1033     if (!pr && allowSW) {
1034         auto swPR = this->getSoftwarePathRenderer();
1035         if (PathRenderer::CanDrawPath::kNo != swPR->canDrawPath(args)) {
1036             pr = swPR;
1037         }
1038     }
1039 
1040 #if GR_PATH_RENDERER_SPEW
1041     if (pr) {
1042         SkDebugf("getPathRenderer: %s\n", pr->name());
1043     }
1044 #endif
1045 
1046     return pr;
1047 }
1048 
getSoftwarePathRenderer()1049 skgpu::ganesh::PathRenderer* GrDrawingManager::getSoftwarePathRenderer() {
1050     if (!fSoftwarePathRenderer) {
1051         fSoftwarePathRenderer.reset(new skgpu::ganesh::SoftwarePathRenderer(
1052                 fContext->priv().proxyProvider(),
1053                 fOptionsForPathRendererChain.fAllowPathMaskCaching));
1054     }
1055     return fSoftwarePathRenderer.get();
1056 }
1057 
getAtlasPathRenderer()1058 skgpu::ganesh::AtlasPathRenderer* GrDrawingManager::getAtlasPathRenderer() {
1059     if (!fPathRendererChain) {
1060         fPathRendererChain = std::make_unique<PathRendererChain>(fContext,
1061                                                                  fOptionsForPathRendererChain);
1062     }
1063     return fPathRendererChain->getAtlasPathRenderer();
1064 }
1065 
getTessellationPathRenderer()1066 skgpu::ganesh::PathRenderer* GrDrawingManager::getTessellationPathRenderer() {
1067     if (!fPathRendererChain) {
1068         fPathRendererChain = std::make_unique<PathRendererChain>(fContext,
1069                                                                  fOptionsForPathRendererChain);
1070     }
1071     return fPathRendererChain->getTessellationPathRenderer();
1072 }
1073