• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 #include "src/gpu/ganesh/GrDrawingManager.h"
8 
9 #include "include/core/SkData.h"
10 #include "include/core/SkRect.h"
11 #include "include/core/SkSize.h"
12 #include "include/core/SkSurface.h"
13 #include "include/gpu/GpuTypes.h"
14 #include "include/gpu/ganesh/GrDirectContext.h"
15 #include "include/gpu/ganesh/GrRecordingContext.h"
16 #include "include/gpu/ganesh/GrTypes.h"
17 #include "include/private/base/SkAssert.h"
18 #include "include/private/base/SkTo.h"
19 #include "include/private/chromium/GrDeferredDisplayList.h"
20 #include "include/private/chromium/GrSurfaceCharacterization.h"
21 #include "include/private/gpu/ganesh/GrTypesPriv.h"
22 #include "src/base/SkTInternalLList.h"
23 #include "src/core/SkTraceEvent.h"
24 #include "src/gpu/GpuTypesPriv.h"
25 #include "src/gpu/ganesh/GrAuditTrail.h"
26 #include "src/gpu/ganesh/GrBufferTransferRenderTask.h"
27 #include "src/gpu/ganesh/GrBufferUpdateRenderTask.h"
28 #include "src/gpu/ganesh/GrClientMappedBufferManager.h"
29 #include "src/gpu/ganesh/GrCopyRenderTask.h"
30 #include "src/gpu/ganesh/GrDDLTask.h"
31 #include "src/gpu/ganesh/GrDeferredDisplayListPriv.h"
32 #include "src/gpu/ganesh/GrDirectContextPriv.h"
33 #include "src/gpu/ganesh/GrGpu.h"
34 #include "src/gpu/ganesh/GrGpuBuffer.h"
35 #include "src/gpu/ganesh/GrNativeRect.h"
36 #include "src/gpu/ganesh/GrOnFlushResourceProvider.h"
37 #include "src/gpu/ganesh/GrOpFlushState.h"
38 #include "src/gpu/ganesh/GrRecordingContextPriv.h"
39 #include "src/gpu/ganesh/GrRenderTargetProxy.h"
40 #include "src/gpu/ganesh/GrRenderTask.h"
41 #include "src/gpu/ganesh/GrRenderTaskCluster.h"
42 #include "src/gpu/ganesh/GrResourceAllocator.h"
43 #include "src/gpu/ganesh/GrResourceCache.h"
44 #include "src/gpu/ganesh/GrSurfaceProxyView.h"
45 #include "src/gpu/ganesh/GrTTopoSort.h"
46 #include "src/gpu/ganesh/GrTextureProxy.h"
47 #include "src/gpu/ganesh/GrTextureResolveManager.h"
48 #include "src/gpu/ganesh/GrTextureResolveRenderTask.h"
49 #include "src/gpu/ganesh/GrTracing.h"
50 #include "src/gpu/ganesh/GrTransferFromRenderTask.h"
51 #include "src/gpu/ganesh/GrWaitRenderTask.h"
52 #include "src/gpu/ganesh/GrWritePixelsRenderTask.h"
53 #include "src/gpu/ganesh/ops/GrOp.h"
54 #include "src/gpu/ganesh/ops/OpsTask.h"
55 #include "src/gpu/ganesh/ops/SoftwarePathRenderer.h"
56 
57 #include <algorithm>
58 #include <memory>
59 #include <optional>
60 #include <utility>
61 
62 using namespace skia_private;
63 
64 ///////////////////////////////////////////////////////////////////////////////////////////////////
GrDrawingManager(GrRecordingContext * rContext,const PathRendererChain::Options & optionsForPathRendererChain,bool reduceOpsTaskSplitting)65 GrDrawingManager::GrDrawingManager(GrRecordingContext* rContext,
66                                    const PathRendererChain::Options& optionsForPathRendererChain,
67                                    bool reduceOpsTaskSplitting)
68         : fContext(rContext)
69         , fOptionsForPathRendererChain(optionsForPathRendererChain)
70         , fPathRendererChain(nullptr)
71         , fSoftwarePathRenderer(nullptr)
72         , fReduceOpsTaskSplitting(reduceOpsTaskSplitting) {
73 }
74 
~GrDrawingManager()75 GrDrawingManager::~GrDrawingManager() {
76     this->closeAllTasks();
77     this->removeRenderTasks();
78 }
79 
wasAbandoned() const80 bool GrDrawingManager::wasAbandoned() const {
81     return fContext->abandoned();
82 }
83 
freeGpuResources()84 void GrDrawingManager::freeGpuResources() {
85     for (int i = fOnFlushCBObjects.size() - 1; i >= 0; --i) {
86         if (!fOnFlushCBObjects[i]->retainOnFreeGpuResources()) {
87             // it's safe to just do this because we're iterating in reverse
88             fOnFlushCBObjects.removeShuffle(i);
89         }
90     }
91 
92     // a path renderer may be holding onto resources
93     fPathRendererChain = nullptr;
94     fSoftwarePathRenderer = nullptr;
95 }
96 
97 // MDB TODO: make use of the 'proxies' parameter.
flush(SkSpan<GrSurfaceProxy * > proxies,SkSurfaces::BackendSurfaceAccess access,const GrFlushInfo & info,const skgpu::MutableTextureState * newState)98 bool GrDrawingManager::flush(SkSpan<GrSurfaceProxy*> proxies,
99                              SkSurfaces::BackendSurfaceAccess access,
100                              const GrFlushInfo& info,
101                              const skgpu::MutableTextureState* newState) {
102     GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "flush", fContext);
103 
104     if (fFlushing || this->wasAbandoned()) {
105         if (info.fSubmittedProc) {
106             info.fSubmittedProc(info.fSubmittedContext, false);
107         }
108         if (info.fFinishedProc) {
109             info.fFinishedProc(info.fFinishedContext);
110         }
111         return false;
112     }
113 
114     SkDEBUGCODE(this->validate());
115 
116     // As of now we only short-circuit if we got an explicit list of surfaces to flush.
117     if (!proxies.empty() && !info.fNumSemaphores && !info.fFinishedProc &&
118         access == SkSurfaces::BackendSurfaceAccess::kNoAccess && !newState) {
119         bool allUnused = std::all_of(proxies.begin(), proxies.end(), [&](GrSurfaceProxy* proxy) {
120             bool used = std::any_of(fDAG.begin(), fDAG.end(), [&](auto& task) {
121                 return task && task->isUsed(proxy);
122             });
123             return !used;
124         });
125         if (allUnused) {
126             if (info.fSubmittedProc) {
127                 info.fSubmittedProc(info.fSubmittedContext, true);
128             }
129             return false;
130         }
131     }
132 
133     auto dContext = fContext->asDirectContext();
134     SkASSERT(dContext);
135     dContext->priv().clientMappedBufferManager()->process();
136 
137     GrGpu* gpu = dContext->priv().getGpu();
138     // We have a non abandoned and direct GrContext. It must have a GrGpu.
139     SkASSERT(gpu);
140 
141     fFlushing = true;
142 
143     auto resourceProvider = dContext->priv().resourceProvider();
144     auto resourceCache = dContext->priv().getResourceCache();
145 
146     // Semi-usually the GrRenderTasks are already closed at this point, but sometimes Ganesh needs
147     // to flush mid-draw. In that case, the SkGpuDevice's opsTasks won't be closed but need to be
148     // flushed anyway. Closing such opsTasks here will mean new ones will be created to replace them
149     // if the SkGpuDevice(s) write to them again.
150     this->closeAllTasks();
151     fActiveOpsTask = nullptr;
152 
153     this->sortTasks();
154 
155     if (!fCpuBufferCache) {
156         // We cache more buffers when the backend is using client side arrays. Otherwise, we
157         // expect each pool will use a CPU buffer as a staging buffer before uploading to a GPU
158         // buffer object. Each pool only requires one staging buffer at a time.
159         int maxCachedBuffers = fContext->priv().caps()->preferClientSideDynamicBuffers() ? 2 : 6;
160         fCpuBufferCache = GrBufferAllocPool::CpuBufferCache::Make(maxCachedBuffers);
161     }
162 
163     GrOpFlushState flushState(gpu, resourceProvider, &fTokenTracker, fCpuBufferCache);
164 
165     std::optional<GrTimerQuery> timerQuery;
166     if (info.fFinishedWithStatsProc && (info.fGpuStatsFlags & skgpu::GpuStatsFlags::kElapsedTime)) {
167         timerQuery = gpu->startTimerQuery();
168     }
169     GrOnFlushResourceProvider onFlushProvider(this);
170 
171     // Prepare any onFlush op lists (e.g. atlases).
172     bool preFlushSuccessful = true;
173     for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
174         preFlushSuccessful &= onFlushCBObject->preFlush(&onFlushProvider);
175     }
176 
177     bool cachePurgeNeeded = false;
178 
179     if (preFlushSuccessful) {
180         bool usingReorderedDAG = false;
181         GrResourceAllocator resourceAllocator(dContext);
182         if (fReduceOpsTaskSplitting) {
183             usingReorderedDAG = this->reorderTasks(&resourceAllocator);
184             if (!usingReorderedDAG) {
185                 resourceAllocator.reset();
186             }
187         }
188 
189 #if 0
190         // Enable this to print out verbose GrOp information
191         SkDEBUGCODE(SkDebugf("RenderTasks (%d):\n", fDAG.count()));
192         for (const auto& task : fDAG) {
193             SkDEBUGCODE(task->dump(/* printDependencies */ true);)
194         }
195 #endif
196 
197         if (!resourceAllocator.failedInstantiation()) {
198             if (!usingReorderedDAG) {
199                 for (const auto& task : fDAG) {
200                     SkASSERT(task);
201                     task->gatherProxyIntervals(&resourceAllocator);
202                 }
203                 resourceAllocator.planAssignment();
204             }
205             resourceAllocator.assign();
206         }
207 
208         cachePurgeNeeded = !resourceAllocator.failedInstantiation() &&
209                            this->executeRenderTasks(&flushState);
210     }
211     this->removeRenderTasks();
212 
213     gpu->executeFlushInfo(proxies, access, info, std::move(timerQuery), newState);
214 
215     // Give the cache a chance to purge resources that become purgeable due to flushing.
216     if (cachePurgeNeeded) {
217         resourceCache->purgeAsNeeded();
218         cachePurgeNeeded = false;
219     }
220     for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
221         onFlushCBObject->postFlush(fTokenTracker.nextFlushToken());
222         cachePurgeNeeded = true;
223     }
224     if (cachePurgeNeeded) {
225         resourceCache->purgeAsNeeded();
226     }
227     fFlushing = false;
228 #ifdef SKIA_OHOS
229     fNumDrawOp = 0;
230 #endif
231 
232     return true;
233 }
234 
submitToGpu()235 bool GrDrawingManager::submitToGpu() {
236     if (fFlushing || this->wasAbandoned()) {
237         return false;
238     }
239 
240     auto direct = fContext->asDirectContext();
241     if (!direct) {
242         return false; // Can't submit while DDL recording
243     }
244     GrGpu* gpu = direct->priv().getGpu();
245     return gpu->submitToGpu();
246 }
247 
executeRenderTasks(GrOpFlushState * flushState)248 bool GrDrawingManager::executeRenderTasks(GrOpFlushState* flushState) {
249 #if GR_FLUSH_TIME_OP_SPEW
250     SkDebugf("Flushing %d opsTasks\n", fDAG.size());
251     for (int i = 0; i < fDAG.size(); ++i) {
252         if (fDAG[i]) {
253             SkString label;
254             label.printf("task %d/%d", i, fDAG.size());
255             fDAG[i]->dump(label, {}, true, true);
256         }
257     }
258 #endif
259 
260     bool anyRenderTasksExecuted = false;
261 
262     for (const auto& renderTask : fDAG) {
263         if (!renderTask || !renderTask->isInstantiated()) {
264              continue;
265         }
266 
267         SkASSERT(renderTask->deferredProxiesAreInstantiated());
268 
269         renderTask->prepare(flushState);
270     }
271 
272     // Upload all data to the GPU
273     flushState->preExecuteDraws();
274 
275     // For Vulkan, if we have too many oplists to be flushed we end up allocating a lot of resources
276     // for each command buffer associated with the oplists. If this gets too large we can cause the
277     // devices to go OOM. In practice we usually only hit this case in our tests, but to be safe we
278     // put a cap on the number of oplists we will execute before flushing to the GPU to relieve some
279     // memory pressure.
280     static constexpr int kMaxRenderTasksBeforeFlush = 100;
281     int numRenderTasksExecuted = 0;
282 
283     // Execute the normal op lists.
284 #ifdef SKIA_OHOS
285     int numOpsTaskExecuted = 0;
286     int numOpsExecuted = 0;
287 #endif
288     for (const auto& renderTask : fDAG) {
289         SkASSERT(renderTask);
290         if (!renderTask->isInstantiated()) {
291             continue;
292         }
293 
294         if (renderTask->execute(flushState)) {
295             anyRenderTasksExecuted = true;
296 #ifdef SKIA_OHOS
297             if (UNLIKELY(SkOHOSDebugLevelTraceUtil::getEnableDebugTrace()) && renderTask->asOpsTask()) {
298                 numOpsTaskExecuted++;
299                 numOpsExecuted += renderTask->asOpsTask()->getNumOpChainsExecuted();
300             }
301 #endif
302         }
303         if (++numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
304             flushState->gpu()->submitToGpu();
305             numRenderTasksExecuted = 0;
306         }
307     }
308 
309 #ifdef SKIA_OHOS
310     HITRACE_OHOS_NAME_FMT_LEVEL(DebugTraceLevel::NORMAL, "Add: %d drawOps, Flush: %d opsTasks, %d ops",
311         fNumDrawOp, numOpsTaskExecuted, numOpsExecuted);
312 #endif
313 
314     SkASSERT(!flushState->opsRenderPass());
315     SkASSERT(fTokenTracker.nextDrawToken() == fTokenTracker.nextFlushToken());
316 
317     // We reset the flush state before the RenderTasks so that the last resources to be freed are
318     // those that are written to in the RenderTasks. This helps to make sure the most recently used
319     // resources are the last to be purged by the resource cache.
320     flushState->reset();
321 
322     return anyRenderTasksExecuted;
323 }
324 
removeRenderTasks()325 void GrDrawingManager::removeRenderTasks() {
326     for (const auto& task : fDAG) {
327         SkASSERT(task);
328         if (!task->unique() || task->requiresExplicitCleanup()) {
329             // TODO: Eventually uniqueness should be guaranteed: http://skbug.com/7111.
330             // DDLs, however, will always require an explicit notification for when they
331             // can clean up resources.
332             task->endFlush(this);
333         }
334         task->disown(this);
335     }
336     fDAG.clear();
337     fReorderBlockerTaskIndices.clear();
338     fLastRenderTasks.reset();
339 }
340 
sortTasks()341 void GrDrawingManager::sortTasks() {
342     // We separately sort the ranges around non-reorderable tasks.
343     for (size_t i = 0, start = 0, end; start < SkToSizeT(fDAG.size()); ++i, start = end + 1) {
344         end = i == fReorderBlockerTaskIndices.size() ? fDAG.size() : fReorderBlockerTaskIndices[i];
345         SkSpan span(fDAG.begin() + start, end - start);
346 
347         SkASSERT(std::none_of(span.begin(), span.end(), [](const auto& t) {
348             return t->blocksReordering();
349         }));
350         SkASSERT(span.end() == fDAG.end() || fDAG[end]->blocksReordering());
351 
352 #if defined(SK_DEBUG)
353         // In order to partition the dag array like this it must be the case that each partition
354         // only depends on nodes in the partition or earlier partitions.
355         auto check = [&](const GrRenderTask* task, auto&& check) -> void {
356             SkASSERT(GrRenderTask::TopoSortTraits::WasOutput(task) ||
357                      std::find_if(span.begin(), span.end(), [task](const auto& n) {
358                          return n.get() == task; }));
359             for (int i = 0; i < task->fDependencies.size(); ++i) {
360                 check(task->fDependencies[i], check);
361             }
362         };
363         for (const auto& node : span) {
364             check(node.get(), check);
365         }
366 #endif
367 
368         bool sorted = GrTTopoSort<GrRenderTask, GrRenderTask::TopoSortTraits>(span, start);
369         if (!sorted) {
370             SkDEBUGFAIL("Render task topo sort failed.");
371         }
372 
373 #ifdef SK_DEBUG
374         if (sorted && !span.empty()) {
375             // This block checks for any unnecessary splits in the opsTasks. If two sequential
376             // opsTasks could have merged it means the opsTask was artificially split.
377             auto prevOpsTask = span[0]->asOpsTask();
378             for (size_t j = 1; j < span.size(); ++j) {
379                 auto curOpsTask = span[j]->asOpsTask();
380 
381                 if (prevOpsTask && curOpsTask) {
382                     SkASSERT(!prevOpsTask->canMerge(curOpsTask));
383                 }
384 
385                 prevOpsTask = curOpsTask;
386             }
387         }
388 #endif
389     }
390 }
391 
392 // Reorder the array to match the llist without reffing & unreffing sk_sp's.
393 // Both args must contain the same objects.
394 // This is basically a shim because clustering uses LList but the rest of drawmgr uses array.
395 template <typename T>
reorder_array_by_llist(const SkTInternalLList<T> & llist,TArray<sk_sp<T>> * array)396 static void reorder_array_by_llist(const SkTInternalLList<T>& llist, TArray<sk_sp<T>>* array) {
397     int i = 0;
398     for (T* t : llist) {
399         // Release the pointer that used to live here so it doesn't get unreffed.
400         [[maybe_unused]] T* old = array->at(i).release();
401         array->at(i++).reset(t);
402     }
403     SkASSERT(i == array->size());
404 }
405 
reorderTasks(GrResourceAllocator * resourceAllocator)406 bool GrDrawingManager::reorderTasks(GrResourceAllocator* resourceAllocator) {
407     SkASSERT(fReduceOpsTaskSplitting);
408     // We separately sort the ranges around non-reorderable tasks.
409     bool clustered = false;
410     SkTInternalLList<GrRenderTask> llist;
411     for (size_t i = 0, start = 0, end; start < SkToSizeT(fDAG.size()); ++i, start = end + 1) {
412         end = i == fReorderBlockerTaskIndices.size() ? fDAG.size() : fReorderBlockerTaskIndices[i];
413         SkSpan span(fDAG.begin() + start, end - start);
414         SkASSERT(std::none_of(span.begin(), span.end(), [](const auto& t) {
415             return t->blocksReordering();
416         }));
417 
418         SkTInternalLList<GrRenderTask> subllist;
419         if (GrClusterRenderTasks(span, &subllist)) {
420             clustered = true;
421         }
422 
423         if (i < fReorderBlockerTaskIndices.size()) {
424             SkASSERT(fDAG[fReorderBlockerTaskIndices[i]]->blocksReordering());
425             subllist.addToTail(fDAG[fReorderBlockerTaskIndices[i]].get());
426         }
427         llist.concat(std::move(subllist));
428     }
429     if (!clustered) {
430         return false;
431     }
432 
433     for (GrRenderTask* task : llist) {
434         task->gatherProxyIntervals(resourceAllocator);
435     }
436     if (!resourceAllocator->planAssignment()) {
437         return false;
438     }
439     if (!resourceAllocator->makeBudgetHeadroom()) {
440         auto dContext = fContext->asDirectContext();
441         SkASSERT(dContext);
442         dContext->priv().getGpu()->stats()->incNumReorderedDAGsOverBudget();
443         return false;
444     }
445     reorder_array_by_llist(llist, &fDAG);
446 
447     int newCount = 0;
448     for (int i = 0; i < fDAG.size(); i++) {
449         sk_sp<GrRenderTask>& task = fDAG[i];
450         if (auto opsTask = task->asOpsTask()) {
451             size_t remaining = fDAG.size() - i - 1;
452             SkSpan<sk_sp<GrRenderTask>> nextTasks{fDAG.end() - remaining, remaining};
453             int removeCount = opsTask->mergeFrom(nextTasks);
454             for (const auto& removed : nextTasks.first(removeCount)) {
455                 removed->disown(this);
456             }
457             i += removeCount;
458         }
459         fDAG[newCount++] = std::move(task);
460     }
461     fDAG.resize_back(newCount);
462     return true;
463 }
464 
closeAllTasks()465 void GrDrawingManager::closeAllTasks() {
466     for (auto& task : fDAG) {
467         if (task) {
468             task->makeClosed(fContext);
469         }
470     }
471 }
472 
insertTaskBeforeLast(sk_sp<GrRenderTask> task)473 GrRenderTask* GrDrawingManager::insertTaskBeforeLast(sk_sp<GrRenderTask> task) {
474     if (!task) {
475         return nullptr;
476     }
477     if (fDAG.empty()) {
478         return fDAG.push_back(std::move(task)).get();
479     }
480     if (!fReorderBlockerTaskIndices.empty() && fReorderBlockerTaskIndices.back() == fDAG.size()) {
481         fReorderBlockerTaskIndices.back()++;
482     }
483     fDAG.push_back(std::move(task));
484     auto& penultimate = fDAG.fromBack(1);
485     fDAG.back().swap(penultimate);
486     return penultimate.get();
487 }
488 
appendTask(sk_sp<GrRenderTask> task)489 GrRenderTask* GrDrawingManager::appendTask(sk_sp<GrRenderTask> task) {
490     if (!task) {
491         return nullptr;
492     }
493     if (task->blocksReordering()) {
494         fReorderBlockerTaskIndices.push_back(fDAG.size());
495     }
496     return fDAG.push_back(std::move(task)).get();
497 }
498 
resolve_and_mipmap(GrGpu * gpu,GrSurfaceProxy * proxy)499 static void resolve_and_mipmap(GrGpu* gpu, GrSurfaceProxy* proxy) {
500     if (!proxy->isInstantiated()) {
501         return;
502     }
503 
504     // In the flushSurfaces case, we need to resolve MSAA immediately after flush. This is
505     // because clients expect the flushed surface's backing texture to be fully resolved
506     // upon return.
507     if (proxy->requiresManualMSAAResolve()) {
508         auto* rtProxy = proxy->asRenderTargetProxy();
509         SkASSERT(rtProxy);
510         if (rtProxy->isMSAADirty()) {
511             SkASSERT(rtProxy->peekRenderTarget());
512             gpu->resolveRenderTarget(rtProxy->peekRenderTarget(), rtProxy->msaaDirtyRect());
513             gpu->submitToGpu();
514             rtProxy->markMSAAResolved();
515         }
516     }
517     // If, after a flush, any of the proxies of interest have dirty mipmaps, regenerate them in
518     // case their backend textures are being stolen.
519     // (This special case is exercised by the ReimportImageTextureWithMipLevels test.)
520     // FIXME: It may be more ideal to plumb down a "we're going to steal the backends" flag.
521     if (auto* textureProxy = proxy->asTextureProxy()) {
522         if (textureProxy->mipmapsAreDirty()) {
523             SkASSERT(textureProxy->peekTexture());
524             gpu->regenerateMipMapLevels(textureProxy->peekTexture());
525             textureProxy->markMipmapsClean();
526         }
527     }
528 }
529 
flushSurfaces(SkSpan<GrSurfaceProxy * > proxies,SkSurfaces::BackendSurfaceAccess access,const GrFlushInfo & info,const skgpu::MutableTextureState * newState)530 GrSemaphoresSubmitted GrDrawingManager::flushSurfaces(SkSpan<GrSurfaceProxy*> proxies,
531                                                       SkSurfaces::BackendSurfaceAccess access,
532                                                       const GrFlushInfo& info,
533                                                       const skgpu::MutableTextureState* newState) {
534     if (this->wasAbandoned()) {
535         if (info.fSubmittedProc) {
536             info.fSubmittedProc(info.fSubmittedContext, false);
537         }
538         if (info.fFinishedProc) {
539             info.fFinishedProc(info.fFinishedContext);
540         }
541         return GrSemaphoresSubmitted::kNo;
542     }
543     SkDEBUGCODE(this->validate());
544 
545     auto direct = fContext->asDirectContext();
546     SkASSERT(direct);
547     GrGpu* gpu = direct->priv().getGpu();
548     // We have a non abandoned and direct GrContext. It must have a GrGpu.
549     SkASSERT(gpu);
550 
551     // TODO: It is important to upgrade the drawingmanager to just flushing the
552     // portion of the DAG required by 'proxies' in order to restore some of the
553     // semantics of this method.
554     bool didFlush = this->flush(proxies, access, info, newState);
555     for (GrSurfaceProxy* proxy : proxies) {
556         resolve_and_mipmap(gpu, proxy);
557     }
558 
559     SkDEBUGCODE(this->validate());
560 
561     if (!didFlush || (!direct->priv().caps()->backendSemaphoreSupport() && info.fNumSemaphores)) {
562         return GrSemaphoresSubmitted::kNo;
563     }
564     return GrSemaphoresSubmitted::kYes;
565 }
566 
addOnFlushCallbackObject(GrOnFlushCallbackObject * onFlushCBObject)567 void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
568     fOnFlushCBObjects.push_back(onFlushCBObject);
569 }
570 
571 #if defined(GPU_TEST_UTILS)
testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject * cb)572 void GrDrawingManager::testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject* cb) {
573     int n = std::find(fOnFlushCBObjects.begin(), fOnFlushCBObjects.end(), cb) -
574             fOnFlushCBObjects.begin();
575     SkASSERT(n < fOnFlushCBObjects.size());
576     fOnFlushCBObjects.removeShuffle(n);
577 }
578 #endif
579 
setLastRenderTask(const GrSurfaceProxy * proxy,GrRenderTask * task)580 void GrDrawingManager::setLastRenderTask(const GrSurfaceProxy* proxy, GrRenderTask* task) {
581 #ifdef SK_DEBUG
582     if (auto prior = this->getLastRenderTask(proxy)) {
583         SkASSERT(prior->isClosed() || prior == task);
584     }
585 #endif
586     uint32_t key = proxy->uniqueID().asUInt();
587     if (task) {
588         fLastRenderTasks.set(key, task);
589     } else if (fLastRenderTasks.find(key)) {
590         fLastRenderTasks.remove(key);
591     }
592 }
593 
getLastRenderTask(const GrSurfaceProxy * proxy) const594 GrRenderTask* GrDrawingManager::getLastRenderTask(const GrSurfaceProxy* proxy) const {
595     auto entry = fLastRenderTasks.find(proxy->uniqueID().asUInt());
596     return entry ? *entry : nullptr;
597 }
598 
getLastOpsTask(const GrSurfaceProxy * proxy) const599 skgpu::ganesh::OpsTask* GrDrawingManager::getLastOpsTask(const GrSurfaceProxy* proxy) const {
600     GrRenderTask* task = this->getLastRenderTask(proxy);
601     return task ? task->asOpsTask() : nullptr;
602 }
603 
moveRenderTasksToDDL(GrDeferredDisplayList * ddl)604 void GrDrawingManager::moveRenderTasksToDDL(GrDeferredDisplayList* ddl) {
605     SkDEBUGCODE(this->validate());
606 
607     // no renderTask should receive a new command after this
608     this->closeAllTasks();
609     fActiveOpsTask = nullptr;
610 
611     this->sortTasks();
612 
613     fDAG.swap(ddl->fRenderTasks);
614     SkASSERT(fDAG.empty());
615     fReorderBlockerTaskIndices.clear();
616 
617     for (auto& renderTask : ddl->fRenderTasks) {
618         renderTask->disown(this);
619         renderTask->prePrepare(fContext);
620     }
621 
622     ddl->fArenas = std::move(fContext->priv().detachArenas());
623 
624     fContext->priv().detachProgramData(&ddl->fProgramData);
625 
626     SkDEBUGCODE(this->validate());
627 }
628 
createDDLTask(sk_sp<const GrDeferredDisplayList> ddl,sk_sp<GrRenderTargetProxy> newDest)629 void GrDrawingManager::createDDLTask(sk_sp<const GrDeferredDisplayList> ddl,
630                                      sk_sp<GrRenderTargetProxy> newDest) {
631     SkDEBUGCODE(this->validate());
632 
633     if (fActiveOpsTask) {
634         // This is a temporary fix for the partial-MDB world. In that world we're not
635         // reordering so ops that (in the single opsTask world) would've just glommed onto the
636         // end of the single opsTask but referred to a far earlier RT need to appear in their
637         // own opsTask.
638         fActiveOpsTask->makeClosed(fContext);
639         fActiveOpsTask = nullptr;
640     }
641 
642     // Propagate the DDL proxy's state information to the replay target.
643     if (ddl->priv().targetProxy()->isMSAADirty()) {
644         auto nativeRect = GrNativeRect::MakeIRectRelativeTo(
645                 ddl->characterization().origin(),
646                 ddl->priv().targetProxy()->backingStoreDimensions().height(),
647                 ddl->priv().targetProxy()->msaaDirtyRect());
648         newDest->markMSAADirty(nativeRect);
649     }
650     GrTextureProxy* newTextureProxy = newDest->asTextureProxy();
651     if (newTextureProxy && skgpu::Mipmapped::kYes == newTextureProxy->mipmapped()) {
652         newTextureProxy->markMipmapsDirty();
653     }
654 
655     // Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData.
656     // The lazy proxy that references it (in the DDL opsTasks) will then steal its GrTexture.
657     ddl->fLazyProxyData->fReplayDest = newDest.get();
658 
659     // Add a task to handle drawing and lifetime management of the DDL.
660     SkDEBUGCODE(auto ddlTask =) this->appendTask(sk_make_sp<GrDDLTask>(this,
661                                                                        std::move(newDest),
662                                                                        std::move(ddl)));
663     SkASSERT(ddlTask->isClosed());
664 
665     SkDEBUGCODE(this->validate());
666 }
667 
668 #ifdef SK_DEBUG
validate() const669 void GrDrawingManager::validate() const {
670     if (fActiveOpsTask) {
671         SkASSERT(!fDAG.empty());
672         SkASSERT(!fActiveOpsTask->isClosed());
673         SkASSERT(fActiveOpsTask == fDAG.back().get());
674     }
675 
676     for (int i = 0; i < fDAG.size(); ++i) {
677         if (fActiveOpsTask != fDAG[i].get()) {
678             // The resolveTask associated with the activeTask remains open for as long as the
679             // activeTask does.
680             bool isActiveResolveTask =
681                 fActiveOpsTask && fActiveOpsTask->fTextureResolveTask == fDAG[i].get();
682             bool isAtlas = fDAG[i]->isSetFlag(GrRenderTask::kAtlas_Flag);
683             SkASSERT(isActiveResolveTask || isAtlas || fDAG[i]->isClosed());
684         }
685     }
686 
687     // The active opsTask, if any, should always be at the back of the DAG.
688     if (!fDAG.empty()) {
689         if (fDAG.back()->isSetFlag(GrRenderTask::kAtlas_Flag)) {
690             SkASSERT(fActiveOpsTask == nullptr);
691             SkASSERT(!fDAG.back()->isClosed());
692         } else if (fDAG.back()->isClosed()) {
693             SkASSERT(fActiveOpsTask == nullptr);
694         } else {
695             SkASSERT(fActiveOpsTask == fDAG.back().get());
696         }
697     } else {
698         SkASSERT(fActiveOpsTask == nullptr);
699     }
700 }
701 #endif // SK_DEBUG
702 
closeActiveOpsTask()703 void GrDrawingManager::closeActiveOpsTask() {
704     if (fActiveOpsTask) {
705         // This is a temporary fix for the partial-MDB world. In that world we're not
706         // reordering so ops that (in the single opsTask world) would've just glommed onto the
707         // end of the single opsTask but referred to a far earlier RT need to appear in their
708         // own opsTask.
709         fActiveOpsTask->makeClosed(fContext);
710         fActiveOpsTask = nullptr;
711     }
712 }
713 
newOpsTask(GrSurfaceProxyView surfaceView,sk_sp<GrArenas> arenas)714 sk_sp<skgpu::ganesh::OpsTask> GrDrawingManager::newOpsTask(GrSurfaceProxyView surfaceView,
715                                                            sk_sp<GrArenas> arenas) {
716     SkDEBUGCODE(this->validate());
717     SkASSERT(fContext);
718 
719     this->closeActiveOpsTask();
720 
721     sk_sp<skgpu::ganesh::OpsTask> opsTask(new skgpu::ganesh::OpsTask(
722             this, std::move(surfaceView), fContext->priv().auditTrail(), std::move(arenas)));
723 
724     SkASSERT(this->getLastRenderTask(opsTask->target(0)) == opsTask.get());
725 
726     this->appendTask(opsTask);
727 
728     fActiveOpsTask = opsTask.get();
729 
730     SkDEBUGCODE(this->validate());
731     return opsTask;
732 }
733 
addAtlasTask(sk_sp<GrRenderTask> atlasTask,GrRenderTask * previousAtlasTask)734 void GrDrawingManager::addAtlasTask(sk_sp<GrRenderTask> atlasTask,
735                                     GrRenderTask* previousAtlasTask) {
736     SkDEBUGCODE(this->validate());
737     SkASSERT(fContext);
738 
739     if (previousAtlasTask) {
740         previousAtlasTask->makeClosed(fContext);
741         for (GrRenderTask* previousAtlasUser : previousAtlasTask->dependents()) {
742             // Make the new atlas depend on everybody who used the old atlas, and close their tasks.
743             // This guarantees that the previous atlas is totally out of service before we render
744             // the next one, meaning there is only ever one atlas active at a time and that they can
745             // all share the same texture.
746             atlasTask->addDependency(previousAtlasUser);
747             previousAtlasUser->makeClosed(fContext);
748             if (previousAtlasUser == fActiveOpsTask) {
749                 fActiveOpsTask = nullptr;
750             }
751         }
752     }
753 
754     atlasTask->setFlag(GrRenderTask::kAtlas_Flag);
755     this->insertTaskBeforeLast(std::move(atlasTask));
756 
757     SkDEBUGCODE(this->validate());
758 }
759 
newTextureResolveRenderTaskBefore(const GrCaps & caps)760 GrTextureResolveRenderTask* GrDrawingManager::newTextureResolveRenderTaskBefore(
761         const GrCaps& caps) {
762     // Unlike in the "new opsTask" case, we do not want to close the active opsTask, nor (if we are
763     // in sorting and opsTask reduction mode) the render tasks that depend on any proxy's current
764     // state. This is because those opsTasks can still receive new ops and because if they refer to
765     // the mipmapped version of 'proxy', they will then come to depend on the render task being
766     // created here.
767     //
768     // Add the new textureResolveTask before the fActiveOpsTask (if not in
769     // sorting/opsTask-splitting-reduction mode) because it will depend upon this resolve task.
770     // NOTE: Putting it here will also reduce the amount of work required by the topological sort.
771     GrRenderTask* task = this->insertTaskBeforeLast(sk_make_sp<GrTextureResolveRenderTask>());
772     return static_cast<GrTextureResolveRenderTask*>(task);
773 }
774 
newTextureResolveRenderTask(sk_sp<GrSurfaceProxy> proxy,GrSurfaceProxy::ResolveFlags flags,const GrCaps & caps)775 void GrDrawingManager::newTextureResolveRenderTask(sk_sp<GrSurfaceProxy> proxy,
776                                                    GrSurfaceProxy::ResolveFlags flags,
777                                                    const GrCaps& caps) {
778     SkDEBUGCODE(this->validate());
779     SkASSERT(fContext);
780 
781     if (!proxy->requiresManualMSAAResolve()) {
782         SkDEBUGCODE(this->validate());
783         return;
784     }
785 
786     GrRenderTask* lastTask = this->getLastRenderTask(proxy.get());
787     if (!proxy->asRenderTargetProxy()->isMSAADirty() && (!lastTask || lastTask->isClosed())) {
788         SkDEBUGCODE(this->validate());
789         return;
790     }
791 
792     this->closeActiveOpsTask();
793 
794     auto resolveTask = sk_make_sp<GrTextureResolveRenderTask>();
795     // Add proxy also adds all the needed dependencies we need
796     resolveTask->addProxy(this, std::move(proxy), flags, caps);
797 
798     auto task = this->appendTask(std::move(resolveTask));
799     task->makeClosed(fContext);
800 
801     // We have closed the previous active oplist but since a new oplist isn't being added there
802     // shouldn't be an active one.
803     SkASSERT(!fActiveOpsTask);
804     SkDEBUGCODE(this->validate());
805 }
806 
newWaitRenderTask(const sk_sp<GrSurfaceProxy> & proxy,std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,int numSemaphores)807 void GrDrawingManager::newWaitRenderTask(const sk_sp<GrSurfaceProxy>& proxy,
808                                          std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,
809                                          int numSemaphores) {
810     SkDEBUGCODE(this->validate());
811     SkASSERT(fContext);
812 
813     sk_sp<GrWaitRenderTask> waitTask = sk_make_sp<GrWaitRenderTask>(GrSurfaceProxyView(proxy),
814                                                                     std::move(semaphores),
815                                                                     numSemaphores);
816 
817     if (fActiveOpsTask && (fActiveOpsTask->target(0) == proxy.get())) {
818         SkASSERT(this->getLastRenderTask(proxy.get()) == fActiveOpsTask);
819         this->insertTaskBeforeLast(waitTask);
820         // In this case we keep the current renderTask open but just insert the new waitTask
821         // before it in the list. The waitTask will never need to trigger any resolves or mip
822         // map generation which is the main advantage of going through the proxy version.
823         // Additionally we would've had to temporarily set the wait task as the lastRenderTask
824         // on the proxy, add the dependency, and then reset the lastRenderTask to
825         // fActiveOpsTask. Additionally we make the waitTask depend on all of fActiveOpsTask
826         // dependencies so that we don't unnecessarily reorder the waitTask before them.
827         // Note: Any previous Ops already in fActiveOpsTask will get blocked by the wait
828         // semaphore even though they don't need to be for correctness.
829 
830         // Make sure we add the dependencies of fActiveOpsTask to waitTask first or else we'll
831         // get a circular self dependency of waitTask on waitTask.
832         waitTask->addDependenciesFromOtherTask(fActiveOpsTask);
833         fActiveOpsTask->addDependency(waitTask.get());
834     } else {
835         // In this case we just close the previous RenderTask and start and append the waitTask
836         // to the DAG. Since it is the last task now we call setLastRenderTask on the proxy. If
837         // there is a lastTask on the proxy we make waitTask depend on that task. This
838         // dependency isn't strictly needed but it does keep the DAG from reordering the
839         // waitTask earlier and blocking more tasks.
840         if (GrRenderTask* lastTask = this->getLastRenderTask(proxy.get())) {
841             waitTask->addDependency(lastTask);
842         }
843         this->setLastRenderTask(proxy.get(), waitTask.get());
844         this->closeActiveOpsTask();
845         this->appendTask(waitTask);
846     }
847     waitTask->makeClosed(fContext);
848 
849     SkDEBUGCODE(this->validate());
850 }
851 
newTransferFromRenderTask(const sk_sp<GrSurfaceProxy> & srcProxy,const SkIRect & srcRect,GrColorType surfaceColorType,GrColorType dstColorType,sk_sp<GrGpuBuffer> dstBuffer,size_t dstOffset)852 void GrDrawingManager::newTransferFromRenderTask(const sk_sp<GrSurfaceProxy>& srcProxy,
853                                                  const SkIRect& srcRect,
854                                                  GrColorType surfaceColorType,
855                                                  GrColorType dstColorType,
856                                                  sk_sp<GrGpuBuffer> dstBuffer,
857                                                  size_t dstOffset) {
858     SkDEBUGCODE(this->validate());
859     SkASSERT(fContext);
860     this->closeActiveOpsTask();
861 
862     GrRenderTask* task = this->appendTask(sk_make_sp<GrTransferFromRenderTask>(
863             srcProxy, srcRect, surfaceColorType, dstColorType,
864             std::move(dstBuffer), dstOffset));
865 
866     const GrCaps& caps = *fContext->priv().caps();
867 
868     // We always say skgpu::Mipmapped::kNo here since we are always just copying from the base
869     // layer. We don't need to make sure the whole mip map chain is valid.
870     task->addDependency(
871             this, srcProxy.get(), skgpu::Mipmapped::kNo, GrTextureResolveManager(this), caps);
872     task->makeClosed(fContext);
873 
874     // We have closed the previous active oplist but since a new oplist isn't being added there
875     // shouldn't be an active one.
876     SkASSERT(!fActiveOpsTask);
877     SkDEBUGCODE(this->validate());
878 }
879 
newBufferTransferTask(sk_sp<GrGpuBuffer> src,size_t srcOffset,sk_sp<GrGpuBuffer> dst,size_t dstOffset,size_t size)880 void GrDrawingManager::newBufferTransferTask(sk_sp<GrGpuBuffer> src,
881                                              size_t srcOffset,
882                                              sk_sp<GrGpuBuffer> dst,
883                                              size_t dstOffset,
884                                              size_t size) {
885     SkASSERT(src);
886     SkASSERT(dst);
887     SkASSERT(srcOffset + size <= src->size());
888     SkASSERT(dstOffset + size <= dst->size());
889     SkASSERT(src->intendedType() == GrGpuBufferType::kXferCpuToGpu);
890     SkASSERT(dst->intendedType() != GrGpuBufferType::kXferCpuToGpu);
891 
892     SkDEBUGCODE(this->validate());
893     SkASSERT(fContext);
894 
895     this->closeActiveOpsTask();
896 
897     sk_sp<GrRenderTask> task = GrBufferTransferRenderTask::Make(std::move(src),
898                                                                 srcOffset,
899                                                                 std::move(dst),
900                                                                 dstOffset,
901                                                                 size);
902     SkASSERT(task);
903 
904     this->appendTask(task);
905     task->makeClosed(fContext);
906 
907     // We have closed the previous active oplist but since a new oplist isn't being added there
908     // shouldn't be an active one.
909     SkASSERT(!fActiveOpsTask);
910     SkDEBUGCODE(this->validate());
911 }
912 
newBufferUpdateTask(sk_sp<SkData> src,sk_sp<GrGpuBuffer> dst,size_t dstOffset)913 void GrDrawingManager::newBufferUpdateTask(sk_sp<SkData> src,
914                                            sk_sp<GrGpuBuffer> dst,
915                                            size_t dstOffset) {
916     SkASSERT(src);
917     SkASSERT(dst);
918     SkASSERT(dstOffset + src->size() <= dst->size());
919     SkASSERT(dst->intendedType() != GrGpuBufferType::kXferCpuToGpu);
920     SkASSERT(!dst->isMapped());
921 
922     SkDEBUGCODE(this->validate());
923     SkASSERT(fContext);
924 
925     this->closeActiveOpsTask();
926 
927     sk_sp<GrRenderTask> task = GrBufferUpdateRenderTask::Make(std::move(src),
928                                                               std::move(dst),
929                                                               dstOffset);
930     SkASSERT(task);
931 
932     this->appendTask(task);
933     task->makeClosed(fContext);
934 
935     // We have closed the previous active oplist but since a new oplist isn't being added there
936     // shouldn't be an active one.
937     SkASSERT(!fActiveOpsTask);
938     SkDEBUGCODE(this->validate());
939 }
940 
newCopyRenderTask(sk_sp<GrSurfaceProxy> dst,SkIRect dstRect,const sk_sp<GrSurfaceProxy> & src,SkIRect srcRect,GrSamplerState::Filter filter,GrSurfaceOrigin origin)941 sk_sp<GrRenderTask> GrDrawingManager::newCopyRenderTask(sk_sp<GrSurfaceProxy> dst,
942                                                         SkIRect dstRect,
943                                                         const sk_sp<GrSurfaceProxy>& src,
944                                                         SkIRect srcRect,
945                                                         GrSamplerState::Filter filter,
946                                                         GrSurfaceOrigin origin) {
947     SkDEBUGCODE(this->validate());
948     SkASSERT(fContext);
949 
950     // It'd be nicer to check this in GrCopyRenderTask::Make. This gets complicated because of
951     // "active ops task" tracking. dst will be the target of our copy task but it might also be the
952     // target of the active ops task. We currently require the active ops task to be closed before
953     // making a new task that targets the same proxy. However, if we first close the active ops
954     // task, then fail to make a copy task, the next active ops task may target the same proxy. This
955     // will trip an assert related to unnecessary ops task splitting.
956     if (src->framebufferOnly()) {
957         return nullptr;
958     }
959 
960     this->closeActiveOpsTask();
961 
962     sk_sp<GrRenderTask> task = GrCopyRenderTask::Make(this,
963                                                       std::move(dst),
964                                                       dstRect,
965                                                       src,
966                                                       srcRect,
967                                                       filter,
968                                                       origin);
969     if (!task) {
970         return nullptr;
971     }
972 
973     this->appendTask(task);
974 
975     const GrCaps& caps = *fContext->priv().caps();
976     // We always say skgpu::Mipmapped::kNo here since we are always just copying from the base layer
977     // to another base layer. We don't need to make sure the whole mip map chain is valid.
978     task->addDependency(
979             this, src.get(), skgpu::Mipmapped::kNo, GrTextureResolveManager(this), caps);
980     task->makeClosed(fContext);
981 
982     // We have closed the previous active oplist but since a new oplist isn't being added there
983     // shouldn't be an active one.
984     SkASSERT(!fActiveOpsTask);
985     SkDEBUGCODE(this->validate());
986     return task;
987 }
988 
newWritePixelsTask(sk_sp<GrSurfaceProxy> dst,SkIRect rect,GrColorType srcColorType,GrColorType dstColorType,const GrMipLevel levels[],int levelCount)989 bool GrDrawingManager::newWritePixelsTask(sk_sp<GrSurfaceProxy> dst,
990                                           SkIRect rect,
991                                           GrColorType srcColorType,
992                                           GrColorType dstColorType,
993                                           const GrMipLevel levels[],
994                                           int levelCount) {
995     SkDEBUGCODE(this->validate());
996     SkASSERT(fContext);
997 
998     this->closeActiveOpsTask();
999     const GrCaps& caps = *fContext->priv().caps();
1000 
1001     // On platforms that prefer flushes over VRAM use (i.e., ANGLE) we're better off forcing a
1002     // complete flush here.
1003     if (!caps.preferVRAMUseOverFlushes()) {
1004         this->flushSurfaces(SkSpan<GrSurfaceProxy*>{},
1005                             SkSurfaces::BackendSurfaceAccess::kNoAccess,
1006                             GrFlushInfo{},
1007                             nullptr);
1008     }
1009 
1010     GrRenderTask* task = this->appendTask(GrWritePixelsTask::Make(this,
1011                                                                   std::move(dst),
1012                                                                   rect,
1013                                                                   srcColorType,
1014                                                                   dstColorType,
1015                                                                   levels,
1016                                                                   levelCount));
1017     if (!task) {
1018         return false;
1019     }
1020 
1021     task->makeClosed(fContext);
1022 
1023     // We have closed the previous active oplist but since a new oplist isn't being added there
1024     // shouldn't be an active one.
1025     SkASSERT(!fActiveOpsTask);
1026     SkDEBUGCODE(this->validate());
1027     return true;
1028 }
1029 
1030 /*
1031  * This method finds a path renderer that can draw the specified path on
1032  * the provided target.
1033  * Due to its expense, the software path renderer has split out so it can
1034  * can be individually allowed/disallowed via the "allowSW" boolean.
1035  */
getPathRenderer(const PathRenderer::CanDrawPathArgs & args,bool allowSW,PathRendererChain::DrawType drawType,PathRenderer::StencilSupport * stencilSupport)1036 skgpu::ganesh::PathRenderer* GrDrawingManager::getPathRenderer(
1037         const PathRenderer::CanDrawPathArgs& args,
1038         bool allowSW,
1039         PathRendererChain::DrawType drawType,
1040         PathRenderer::StencilSupport* stencilSupport) {
1041     if (!fPathRendererChain) {
1042         fPathRendererChain =
1043                 std::make_unique<PathRendererChain>(fContext, fOptionsForPathRendererChain);
1044     }
1045 
1046     auto pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport);
1047     if (!pr && allowSW) {
1048         auto swPR = this->getSoftwarePathRenderer();
1049         if (PathRenderer::CanDrawPath::kNo != swPR->canDrawPath(args)) {
1050             pr = swPR;
1051         }
1052     }
1053 
1054 #if GR_PATH_RENDERER_SPEW
1055     if (pr) {
1056         SkDebugf("getPathRenderer: %s\n", pr->name());
1057     }
1058 #endif
1059 
1060     return pr;
1061 }
1062 
getSoftwarePathRenderer()1063 skgpu::ganesh::PathRenderer* GrDrawingManager::getSoftwarePathRenderer() {
1064     if (!fSoftwarePathRenderer) {
1065         fSoftwarePathRenderer.reset(new skgpu::ganesh::SoftwarePathRenderer(
1066                 fContext->priv().proxyProvider(),
1067                 fOptionsForPathRendererChain.fAllowPathMaskCaching));
1068     }
1069     return fSoftwarePathRenderer.get();
1070 }
1071 
getAtlasPathRenderer()1072 skgpu::ganesh::AtlasPathRenderer* GrDrawingManager::getAtlasPathRenderer() {
1073     if (!fPathRendererChain) {
1074         fPathRendererChain = std::make_unique<PathRendererChain>(fContext,
1075                                                                  fOptionsForPathRendererChain);
1076     }
1077     return fPathRendererChain->getAtlasPathRenderer();
1078 }
1079 
getTessellationPathRenderer()1080 skgpu::ganesh::PathRenderer* GrDrawingManager::getTessellationPathRenderer() {
1081     if (!fPathRendererChain) {
1082         fPathRendererChain = std::make_unique<PathRendererChain>(fContext,
1083                                                                  fOptionsForPathRendererChain);
1084     }
1085     return fPathRendererChain->getTessellationPathRenderer();
1086 }
1087 
flushIfNecessary()1088 void GrDrawingManager::flushIfNecessary() {
1089     auto direct = fContext->asDirectContext();
1090     if (!direct) {
1091         return;
1092     }
1093 
1094     auto resourceCache = direct->priv().getResourceCache();
1095     if (resourceCache && resourceCache->requestsFlush()) {
1096         if (this->flush({}, SkSurfaces::BackendSurfaceAccess::kNoAccess, GrFlushInfo(), nullptr)) {
1097             this->submitToGpu();
1098         }
1099         resourceCache->purgeAsNeeded();
1100     }
1101 }
1102