• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/GrDrawingManager.h"
9 
10 #include <algorithm>
11 #include <memory>
12 
13 #include "include/core/SkDeferredDisplayList.h"
14 #include "include/gpu/GrBackendSemaphore.h"
15 #include "include/gpu/GrDirectContext.h"
16 #include "include/gpu/GrRecordingContext.h"
17 #include "src/core/SkDeferredDisplayListPriv.h"
18 #include "src/core/SkTInternalLList.h"
19 #include "src/gpu/GrClientMappedBufferManager.h"
20 #include "src/gpu/GrCopyRenderTask.h"
21 #include "src/gpu/GrDDLTask.h"
22 #include "src/gpu/GrDirectContextPriv.h"
23 #include "src/gpu/GrGpu.h"
24 #include "src/gpu/GrMemoryPool.h"
25 #include "src/gpu/GrOnFlushResourceProvider.h"
26 #include "src/gpu/GrOpFlushState.h"
27 #include "src/gpu/GrRecordingContextPriv.h"
28 #include "src/gpu/GrRenderTargetProxy.h"
29 #include "src/gpu/GrRenderTask.h"
30 #include "src/gpu/GrRenderTaskCluster.h"
31 #include "src/gpu/GrResourceAllocator.h"
32 #include "src/gpu/GrResourceProvider.h"
33 #include "src/gpu/GrSurfaceProxyPriv.h"
34 #include "src/gpu/GrTTopoSort.h"
35 #include "src/gpu/GrTexture.h"
36 #include "src/gpu/GrTextureProxy.h"
37 #include "src/gpu/GrTextureProxyPriv.h"
38 #include "src/gpu/GrTextureResolveRenderTask.h"
39 #include "src/gpu/GrTracing.h"
40 #include "src/gpu/GrTransferFromRenderTask.h"
41 #include "src/gpu/GrWaitRenderTask.h"
42 #include "src/gpu/GrWritePixelsRenderTask.h"
43 #include "src/gpu/text/GrSDFTControl.h"
44 #include "src/image/SkSurface_Gpu.h"
45 
46 #if SK_GPU_V1
47 #include "src/gpu/ops/OpsTask.h"
48 #include "src/gpu/ops/SoftwarePathRenderer.h"
49 #endif
50 
51 ///////////////////////////////////////////////////////////////////////////////////////////////////
52 #if SK_GPU_V1
GrDrawingManager(GrRecordingContext * rContext,const PathRendererChain::Options & optionsForPathRendererChain,bool reduceOpsTaskSplitting)53 GrDrawingManager::GrDrawingManager(GrRecordingContext* rContext,
54                                    const PathRendererChain::Options& optionsForPathRendererChain,
55                                    bool reduceOpsTaskSplitting)
56         : fContext(rContext)
57         , fOptionsForPathRendererChain(optionsForPathRendererChain)
58         , fPathRendererChain(nullptr)
59         , fSoftwarePathRenderer(nullptr)
60         , fReduceOpsTaskSplitting(reduceOpsTaskSplitting) {
61 }
62 
63 #else
64 
GrDrawingManager(GrRecordingContext * rContext,bool reduceOpsTaskSplitting)65 GrDrawingManager::GrDrawingManager(GrRecordingContext* rContext, bool reduceOpsTaskSplitting)
66         : fContext(rContext)
67         , fReduceOpsTaskSplitting(reduceOpsTaskSplitting) {
68 }
69 
70 #endif
71 
~GrDrawingManager()72 GrDrawingManager::~GrDrawingManager() {
73     this->closeAllTasks();
74     this->removeRenderTasks();
75 }
76 
wasAbandoned() const77 bool GrDrawingManager::wasAbandoned() const {
78     return fContext->abandoned();
79 }
80 
freeGpuResources()81 void GrDrawingManager::freeGpuResources() {
82     for (int i = fOnFlushCBObjects.count() - 1; i >= 0; --i) {
83         if (!fOnFlushCBObjects[i]->retainOnFreeGpuResources()) {
84             // it's safe to just do this because we're iterating in reverse
85             fOnFlushCBObjects.removeShuffle(i);
86         }
87     }
88 
89 #if SK_GPU_V1
90     // a path renderer may be holding onto resources
91     fPathRendererChain = nullptr;
92     fSoftwarePathRenderer = nullptr;
93 #endif
94 }
95 
96 // MDB TODO: make use of the 'proxies' parameter.
flush(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const GrFlushInfo & info,const GrBackendSurfaceMutableState * newState)97 bool GrDrawingManager::flush(
98         SkSpan<GrSurfaceProxy*> proxies,
99         SkSurface::BackendSurfaceAccess access,
100         const GrFlushInfo& info,
101         const GrBackendSurfaceMutableState* newState) {
102     GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "flush", fContext);
103 
104     if (fFlushing || this->wasAbandoned()) {
105         if (info.fSubmittedProc) {
106             info.fSubmittedProc(info.fSubmittedContext, false);
107         }
108         if (info.fFinishedProc) {
109             info.fFinishedProc(info.fFinishedContext);
110         }
111         return false;
112     }
113 
114     SkDEBUGCODE(this->validate());
115 
116     // As of now we only short-circuit if we got an explicit list of surfaces to flush.
117     if (!proxies.empty() && !info.fNumSemaphores && !info.fFinishedProc &&
118         access == SkSurface::BackendSurfaceAccess::kNoAccess && !newState) {
119         bool allUnused = std::all_of(proxies.begin(), proxies.end(), [&](GrSurfaceProxy* proxy) {
120             bool used = std::any_of(fDAG.begin(), fDAG.end(), [&](auto& task) {
121                 return task && task->isUsed(proxy);
122             });
123             return !used;
124         });
125         if (allUnused) {
126             if (info.fSubmittedProc) {
127                 info.fSubmittedProc(info.fSubmittedContext, true);
128             }
129             return false;
130         }
131     }
132 
133     auto dContext = fContext->asDirectContext();
134     SkASSERT(dContext);
135     dContext->priv().clientMappedBufferManager()->process();
136 
137     GrGpu* gpu = dContext->priv().getGpu();
138     // We have a non abandoned and direct GrContext. It must have a GrGpu.
139     SkASSERT(gpu);
140 
141     fFlushing = true;
142 
143     auto resourceProvider = dContext->priv().resourceProvider();
144     auto resourceCache = dContext->priv().getResourceCache();
145 
146     // Semi-usually the GrRenderTasks are already closed at this point, but sometimes Ganesh needs
147     // to flush mid-draw. In that case, the SkGpuDevice's opsTasks won't be closed but need to be
148     // flushed anyway. Closing such opsTasks here will mean new ones will be created to replace them
149     // if the SkGpuDevice(s) write to them again.
150     this->closeAllTasks();
151     fActiveOpsTask = nullptr;
152 
153     this->sortTasks();
154 
155     if (!fCpuBufferCache) {
156         // We cache more buffers when the backend is using client side arrays. Otherwise, we
157         // expect each pool will use a CPU buffer as a staging buffer before uploading to a GPU
158         // buffer object. Each pool only requires one staging buffer at a time.
159         int maxCachedBuffers = fContext->priv().caps()->preferClientSideDynamicBuffers() ? 2 : 6;
160         fCpuBufferCache = GrBufferAllocPool::CpuBufferCache::Make(maxCachedBuffers);
161     }
162 
163     GrOpFlushState flushState(gpu, resourceProvider, &fTokenTracker, fCpuBufferCache);
164 
165     GrOnFlushResourceProvider onFlushProvider(this);
166 
167     // Prepare any onFlush op lists (e.g. atlases).
168     if (!fOnFlushCBObjects.empty()) {
169         fFlushingRenderTaskIDs.reserve_back(fDAG.count());
170         for (const auto& task : fDAG) {
171             if (task) {
172                 task->gatherIDs(&fFlushingRenderTaskIDs);
173             }
174         }
175 
176         for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
177             onFlushCBObject->preFlush(&onFlushProvider, SkMakeSpan(fFlushingRenderTaskIDs));
178         }
179         for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
180             onFlushRenderTask->makeClosed(fContext);
181 #ifdef SK_DEBUG
182             // OnFlush callbacks are invoked during flush, and are therefore expected to handle
183             // resource allocation & usage on their own. (No deferred or lazy proxies!)
184             onFlushRenderTask->visitTargetAndSrcProxies_debugOnly(
185                     [](GrSurfaceProxy* p, GrMipmapped mipMapped) {
186                 SkASSERT(!p->asTextureProxy() || !p->asTextureProxy()->texPriv().isDeferred());
187                 SkASSERT(!p->isLazy());
188                 if (p->requiresManualMSAAResolve()) {
189                     // The onFlush callback is responsible for ensuring MSAA gets resolved.
190                     SkASSERT(p->asRenderTargetProxy() && !p->asRenderTargetProxy()->isMSAADirty());
191                 }
192                 if (GrMipmapped::kYes == mipMapped) {
193                     // The onFlush callback is responsible for regenerating mips if needed.
194                     SkASSERT(p->asTextureProxy() && !p->asTextureProxy()->mipmapsAreDirty());
195                 }
196             });
197 #endif
198 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
199             flushState.fDisableStencilCulling = fDisableStencilCulling;
200             flushState.fHasStencilCullingOp = fHasStencilCullingOp;
201 #endif
202             onFlushRenderTask->prepare(&flushState);
203         }
204     }
205 
206     bool usingReorderedDAG = false;
207     GrResourceAllocator resourceAllocator(dContext);
208     if (fReduceOpsTaskSplitting) {
209         usingReorderedDAG = this->reorderTasks(&resourceAllocator);
210         if (!usingReorderedDAG) {
211             resourceAllocator.reset();
212         }
213     }
214 
215 #if 0
216     // Enable this to print out verbose GrOp information
217     SkDEBUGCODE(SkDebugf("onFlush renderTasks (%d):\n", fOnFlushRenderTasks.count()));
218     for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
219         SkDEBUGCODE(onFlushRenderTask->dump(/* printDependencies */ true);)
220     }
221     SkDEBUGCODE(SkDebugf("Normal renderTasks (%d):\n", fDAG.count()));
222     for (const auto& task : fDAG) {
223         SkDEBUGCODE(task->dump(/* printDependencies */ true);)
224     }
225 #endif
226 
227     if (!resourceAllocator.failedInstantiation()) {
228         if (!usingReorderedDAG) {
229             for (const auto& task : fDAG) {
230                 SkASSERT(task);
231                 task->gatherProxyIntervals(&resourceAllocator);
232             }
233             resourceAllocator.planAssignment();
234         }
235         resourceAllocator.assign();
236     }
237     bool flushed = !resourceAllocator.failedInstantiation() &&
238                     this->executeRenderTasks(&flushState);
239     this->removeRenderTasks();
240 
241     gpu->executeFlushInfo(proxies, access, info, newState);
242 
243     // Give the cache a chance to purge resources that become purgeable due to flushing.
244     if (flushed) {
245         resourceCache->purgeAsNeeded();
246         flushed = false;
247     }
248     for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
249         onFlushCBObject->postFlush(fTokenTracker.nextTokenToFlush(),
250                                    SkMakeSpan(fFlushingRenderTaskIDs));
251         flushed = true;
252     }
253     if (flushed) {
254         resourceCache->purgeAsNeeded();
255     }
256     fFlushingRenderTaskIDs.reset();
257     fFlushing = false;
258 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
259     fDisableStencilCulling = false;
260     fHasStencilCullingOp = false;
261 #endif
262 #ifdef SKIA_OHOS
263     fNumDrawOp = 0;
264 #endif
265 
266     return true;
267 }
268 
submitToGpu(bool syncToCpu)269 bool GrDrawingManager::submitToGpu(bool syncToCpu) {
270     if (fFlushing || this->wasAbandoned()) {
271         return false;
272     }
273 
274     auto direct = fContext->asDirectContext();
275     if (!direct) {
276         return false; // Can't submit while DDL recording
277     }
278     GrGpu* gpu = direct->priv().getGpu();
279     return gpu->submitToGpu(syncToCpu);
280 }
281 
executeRenderTasks(GrOpFlushState * flushState)282 bool GrDrawingManager::executeRenderTasks(GrOpFlushState* flushState) {
283 #if GR_FLUSH_TIME_OP_SPEW
284     SkDebugf("Flushing %d opsTasks\n", fDAG.count());
285     for (int i = 0; i < fDAG.count(); ++i) {
286         if (fDAG[i]) {
287             SkString label;
288             label.printf("task %d/%d", i, fDAG.count());
289             fDAG[i]->dump(label, {}, true, true);
290         }
291     }
292 #endif
293 
294     bool anyRenderTasksExecuted = false;
295 
296     for (const auto& renderTask : fDAG) {
297         if (!renderTask || !renderTask->isInstantiated()) {
298              continue;
299         }
300 
301         SkASSERT(renderTask->deferredProxiesAreInstantiated());
302 
303         renderTask->prepare(flushState);
304     }
305 
306     // Upload all data to the GPU
307     flushState->preExecuteDraws();
308 
309     // For Vulkan, if we have too many oplists to be flushed we end up allocating a lot of resources
310     // for each command buffer associated with the oplists. If this gets too large we can cause the
311     // devices to go OOM. In practice we usually only hit this case in our tests, but to be safe we
312     // put a cap on the number of oplists we will execute before flushing to the GPU to relieve some
313     // memory pressure.
314     static constexpr int kMaxRenderTasksBeforeFlush = 100;
315     int numRenderTasksExecuted = 0;
316 
317     // Execute the onFlush renderTasks first, if any.
318     for (sk_sp<GrRenderTask>& onFlushRenderTask : fOnFlushRenderTasks) {
319         if (!onFlushRenderTask->execute(flushState)) {
320             SkDebugf("WARNING: onFlushRenderTask failed to execute.\n");
321         }
322         SkASSERT(onFlushRenderTask->unique());
323         onFlushRenderTask->disown(this);
324         onFlushRenderTask = nullptr;
325         if (++numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
326             flushState->gpu()->submitToGpu(false);
327             numRenderTasksExecuted = 0;
328         }
329     }
330     fOnFlushRenderTasks.reset();
331 
332     // Execute the normal op lists.
333 #ifdef SKIA_OHOS
334     int numOpsTaskExecuted = 0;
335     int numOpsExecuted = 0;
336 #endif
337     for (const auto& renderTask : fDAG) {
338         SkASSERT(renderTask);
339         if (!renderTask->isInstantiated()) {
340             continue;
341         }
342 
343         if (renderTask->execute(flushState)) {
344             anyRenderTasksExecuted = true;
345 #ifdef SKIA_OHOS
346             if (UNLIKELY(SkOHOSDebugLevelTraceUtil::getEnableDebugTrace()) && renderTask->asOpsTask()) {
347                 numOpsTaskExecuted++;
348                 numOpsExecuted += renderTask->asOpsTask()->getNumOpChainsExecuted();
349             }
350 #endif
351         }
352         if (++numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
353             flushState->gpu()->submitToGpu(false);
354             numRenderTasksExecuted = 0;
355         }
356     }
357 
358 #ifdef SKIA_OHOS
359     HITRACE_OHOS_NAME_FMT_LEVEL(DebugTraceLevel::NORMAL, "Add: %d drawOps, Flush: %d opsTasks, %d ops",
360         fNumDrawOp, numOpsTaskExecuted, numOpsExecuted);
361 #endif
362 
363     SkASSERT(!flushState->opsRenderPass());
364     SkASSERT(fTokenTracker.nextDrawToken() == fTokenTracker.nextTokenToFlush());
365 
366     // We reset the flush state before the RenderTasks so that the last resources to be freed are
367     // those that are written to in the RenderTasks. This helps to make sure the most recently used
368     // resources are the last to be purged by the resource cache.
369     flushState->reset();
370 
371     return anyRenderTasksExecuted;
372 }
373 
removeRenderTasks()374 void GrDrawingManager::removeRenderTasks() {
375     for (const auto& task : fDAG) {
376         SkASSERT(task);
377         if (!task->unique() || task->requiresExplicitCleanup()) {
378             // TODO: Eventually uniqueness should be guaranteed: http://skbug.com/7111.
379             // DDLs, however, will always require an explicit notification for when they
380             // can clean up resources.
381             task->endFlush(this);
382         }
383         task->disown(this);
384     }
385     fDAG.reset();
386     fLastRenderTasks.reset();
387     for (const sk_sp<GrRenderTask>& onFlushRenderTask : fOnFlushRenderTasks) {
388         onFlushRenderTask->disown(this);
389     }
390     fOnFlushRenderTasks.reset();
391 }
392 
sortTasks()393 void GrDrawingManager::sortTasks() {
394     if (!GrTTopoSort<GrRenderTask, GrRenderTask::TopoSortTraits>(&fDAG)) {
395         SkDEBUGFAIL("Render task topo sort failed.");
396         return;
397     }
398 
399 #if SK_GPU_V1 && defined(SK_DEBUG)
400     // This block checks for any unnecessary splits in the opsTasks. If two sequential opsTasks
401     // could have merged it means the opsTask was artificially split.
402     if (!fDAG.empty()) {
403         auto prevOpsTask = fDAG[0]->asOpsTask();
404         for (int i = 1; i < fDAG.count(); ++i) {
405             auto curOpsTask = fDAG[i]->asOpsTask();
406 
407             if (prevOpsTask && curOpsTask) {
408                 SkASSERT(!prevOpsTask->canMerge(curOpsTask));
409             }
410 
411             prevOpsTask = curOpsTask;
412         }
413     }
414 #endif
415 }
416 
417 // Reorder the array to match the llist without reffing & unreffing sk_sp's.
418 // Both args must contain the same objects.
419 // This is basically a shim because clustering uses LList but the rest of drawmgr uses array.
420 template <typename T>
reorder_array_by_llist(const SkTInternalLList<T> & llist,SkTArray<sk_sp<T>> * array)421 static void reorder_array_by_llist(const SkTInternalLList<T>& llist, SkTArray<sk_sp<T>>* array) {
422     int i = 0;
423     for (T* t : llist) {
424         // Release the pointer that used to live here so it doesn't get unreffed.
425         [[maybe_unused]] T* old = array->at(i).release();
426         array->at(i++).reset(t);
427     }
428     SkASSERT(i == array->count());
429 }
430 
reorderTasks(GrResourceAllocator * resourceAllocator)431 bool GrDrawingManager::reorderTasks(GrResourceAllocator* resourceAllocator) {
432     SkASSERT(fReduceOpsTaskSplitting);
433     SkTInternalLList<GrRenderTask> llist;
434     bool clustered = GrClusterRenderTasks(SkMakeSpan(fDAG), &llist);
435     if (!clustered) {
436         return false;
437     }
438 
439     for (GrRenderTask* task : llist) {
440         task->gatherProxyIntervals(resourceAllocator);
441     }
442     if (!resourceAllocator->planAssignment()) {
443         return false;
444     }
445     if (!resourceAllocator->makeBudgetHeadroom()) {
446         auto dContext = fContext->asDirectContext();
447         SkASSERT(dContext);
448         dContext->priv().getGpu()->stats()->incNumReorderedDAGsOverBudget();
449         return false;
450     }
451     reorder_array_by_llist(llist, &fDAG);
452 
453     int newCount = 0;
454     for (int i = 0; i < fDAG.count(); i++) {
455         sk_sp<GrRenderTask>& task = fDAG[i];
456 #if SK_GPU_V1
457         if (auto opsTask = task->asOpsTask()) {
458             size_t remaining = fDAG.size() - i - 1;
459             SkSpan<sk_sp<GrRenderTask>> nextTasks{fDAG.end() - remaining, remaining};
460             int removeCount = opsTask->mergeFrom(nextTasks);
461             for (const auto& removed : nextTasks.first(removeCount)) {
462                 removed->disown(this);
463             }
464             i += removeCount;
465         }
466 #endif
467         fDAG[newCount++] = std::move(task);
468     }
469     fDAG.resize_back(newCount);
470     return true;
471 }
472 
closeAllTasks()473 void GrDrawingManager::closeAllTasks() {
474     for (auto& task : fDAG) {
475         if (task) {
476             task->makeClosed(fContext);
477         }
478     }
479 }
480 
insertTaskBeforeLast(sk_sp<GrRenderTask> task)481 GrRenderTask* GrDrawingManager::insertTaskBeforeLast(sk_sp<GrRenderTask> task) {
482     if (!task) {
483         return nullptr;
484     }
485     if (fDAG.empty()) {
486         return fDAG.push_back(std::move(task)).get();
487     }
488     // Release 'fDAG.back()' and grab the raw pointer, in case the SkTArray grows
489     // and reallocates during emplace_back.
490     // TODO: Either use std::vector that can do this for us, or use SkSTArray to get the
491     // perf win.
492     fDAG.emplace_back(fDAG.back().release());
493     return (fDAG[fDAG.count() - 2] = std::move(task)).get();
494 }
495 
appendTask(sk_sp<GrRenderTask> task)496 GrRenderTask* GrDrawingManager::appendTask(sk_sp<GrRenderTask> task) {
497     if (!task) {
498         return nullptr;
499     }
500     return fDAG.push_back(std::move(task)).get();
501 }
502 
resolve_and_mipmap(GrGpu * gpu,GrSurfaceProxy * proxy)503 static void resolve_and_mipmap(GrGpu* gpu, GrSurfaceProxy* proxy) {
504     if (!proxy->isInstantiated()) {
505         return;
506     }
507 
508     // In the flushSurfaces case, we need to resolve MSAA immediately after flush. This is
509     // because clients expect the flushed surface's backing texture to be fully resolved
510     // upon return.
511     if (proxy->requiresManualMSAAResolve()) {
512         auto* rtProxy = proxy->asRenderTargetProxy();
513         SkASSERT(rtProxy);
514         if (rtProxy->isMSAADirty()) {
515             SkASSERT(rtProxy->peekRenderTarget());
516             gpu->resolveRenderTarget(rtProxy->peekRenderTarget(), rtProxy->msaaDirtyRect());
517             gpu->submitToGpu(false);
518             rtProxy->markMSAAResolved();
519         }
520     }
521     // If, after a flush, any of the proxies of interest have dirty mipmaps, regenerate them in
522     // case their backend textures are being stolen.
523     // (This special case is exercised by the ReimportImageTextureWithMipLevels test.)
524     // FIXME: It may be more ideal to plumb down a "we're going to steal the backends" flag.
525     if (auto* textureProxy = proxy->asTextureProxy()) {
526         if (textureProxy->mipmapsAreDirty()) {
527             SkASSERT(textureProxy->peekTexture());
528             gpu->regenerateMipMapLevels(textureProxy->peekTexture());
529             textureProxy->markMipmapsClean();
530         }
531     }
532 }
533 
flushSurfaces(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const GrFlushInfo & info,const GrBackendSurfaceMutableState * newState)534 GrSemaphoresSubmitted GrDrawingManager::flushSurfaces(
535         SkSpan<GrSurfaceProxy*> proxies,
536         SkSurface::BackendSurfaceAccess access,
537         const GrFlushInfo& info,
538         const GrBackendSurfaceMutableState* newState) {
539     if (this->wasAbandoned()) {
540         if (info.fSubmittedProc) {
541             info.fSubmittedProc(info.fSubmittedContext, false);
542         }
543         if (info.fFinishedProc) {
544             info.fFinishedProc(info.fFinishedContext);
545         }
546         return GrSemaphoresSubmitted::kNo;
547     }
548     SkDEBUGCODE(this->validate());
549 
550     auto direct = fContext->asDirectContext();
551     SkASSERT(direct);
552     GrGpu* gpu = direct->priv().getGpu();
553     // We have a non abandoned and direct GrContext. It must have a GrGpu.
554     SkASSERT(gpu);
555 
556     // TODO: It is important to upgrade the drawingmanager to just flushing the
557     // portion of the DAG required by 'proxies' in order to restore some of the
558     // semantics of this method.
559     bool didFlush = this->flush(proxies, access, info, newState);
560     for (GrSurfaceProxy* proxy : proxies) {
561         resolve_and_mipmap(gpu, proxy);
562     }
563 
564     SkDEBUGCODE(this->validate());
565 
566     if (!didFlush || (!direct->priv().caps()->semaphoreSupport() && info.fNumSemaphores)) {
567         return GrSemaphoresSubmitted::kNo;
568     }
569     return GrSemaphoresSubmitted::kYes;
570 }
571 
addOnFlushCallbackObject(GrOnFlushCallbackObject * onFlushCBObject)572 void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
573     fOnFlushCBObjects.push_back(onFlushCBObject);
574 }
575 
576 #if GR_TEST_UTILS
testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject * cb)577 void GrDrawingManager::testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject* cb) {
578     int n = std::find(fOnFlushCBObjects.begin(), fOnFlushCBObjects.end(), cb) -
579             fOnFlushCBObjects.begin();
580     SkASSERT(n < fOnFlushCBObjects.count());
581     fOnFlushCBObjects.removeShuffle(n);
582 }
583 #endif
584 
setLastRenderTask(const GrSurfaceProxy * proxy,GrRenderTask * task)585 void GrDrawingManager::setLastRenderTask(const GrSurfaceProxy* proxy, GrRenderTask* task) {
586 #ifdef SK_DEBUG
587     if (auto prior = this->getLastRenderTask(proxy)) {
588         SkASSERT(prior->isClosed() || prior == task);
589     }
590 #endif
591     uint32_t key = proxy->uniqueID().asUInt();
592     if (task) {
593         fLastRenderTasks.set(key, task);
594     } else if (fLastRenderTasks.find(key)) {
595         fLastRenderTasks.remove(key);
596     }
597 }
598 
getLastRenderTask(const GrSurfaceProxy * proxy) const599 GrRenderTask* GrDrawingManager::getLastRenderTask(const GrSurfaceProxy* proxy) const {
600     auto entry = fLastRenderTasks.find(proxy->uniqueID().asUInt());
601     return entry ? *entry : nullptr;
602 }
603 
getLastOpsTask(const GrSurfaceProxy * proxy) const604 skgpu::v1::OpsTask* GrDrawingManager::getLastOpsTask(const GrSurfaceProxy* proxy) const {
605     GrRenderTask* task = this->getLastRenderTask(proxy);
606     return task ? task->asOpsTask() : nullptr;
607 }
608 
609 
moveRenderTasksToDDL(SkDeferredDisplayList * ddl)610 void GrDrawingManager::moveRenderTasksToDDL(SkDeferredDisplayList* ddl) {
611     SkDEBUGCODE(this->validate());
612 
613     // no renderTask should receive a new command after this
614     this->closeAllTasks();
615     fActiveOpsTask = nullptr;
616 
617     this->sortTasks();
618 
619     fDAG.swap(ddl->fRenderTasks);
620     SkASSERT(fDAG.empty());
621 
622     for (auto& renderTask : ddl->fRenderTasks) {
623         renderTask->disown(this);
624         renderTask->prePrepare(fContext);
625     }
626 
627     ddl->fArenas = std::move(fContext->priv().detachArenas());
628 
629     fContext->priv().detachProgramData(&ddl->fProgramData);
630 
631     SkDEBUGCODE(this->validate());
632 }
633 
createDDLTask(sk_sp<const SkDeferredDisplayList> ddl,sk_sp<GrRenderTargetProxy> newDest,SkIPoint offset)634 void GrDrawingManager::createDDLTask(sk_sp<const SkDeferredDisplayList> ddl,
635                                      sk_sp<GrRenderTargetProxy> newDest,
636                                      SkIPoint offset) {
637     SkDEBUGCODE(this->validate());
638 
639 #if SK_GPU_V1
640     if (fActiveOpsTask) {
641         // This is a temporary fix for the partial-MDB world. In that world we're not
642         // reordering so ops that (in the single opsTask world) would've just glommed onto the
643         // end of the single opsTask but referred to a far earlier RT need to appear in their
644         // own opsTask.
645         fActiveOpsTask->makeClosed(fContext);
646         fActiveOpsTask = nullptr;
647     }
648 #endif
649 
650     // Propagate the DDL proxy's state information to the replay target.
651     if (ddl->priv().targetProxy()->isMSAADirty()) {
652         auto nativeRect = GrNativeRect::MakeIRectRelativeTo(
653                 ddl->characterization().origin(),
654                 ddl->priv().targetProxy()->backingStoreDimensions().height(),
655                 ddl->priv().targetProxy()->msaaDirtyRect());
656         newDest->markMSAADirty(nativeRect);
657     }
658     GrTextureProxy* newTextureProxy = newDest->asTextureProxy();
659     if (newTextureProxy && GrMipmapped::kYes == newTextureProxy->mipmapped()) {
660         newTextureProxy->markMipmapsDirty();
661     }
662 
663     // Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData.
664     // The lazy proxy that references it (in the DDL opsTasks) will then steal its GrTexture.
665     ddl->fLazyProxyData->fReplayDest = newDest.get();
666 
667     // Add a task to handle drawing and lifetime management of the DDL.
668     SkDEBUGCODE(auto ddlTask =) this->appendTask(sk_make_sp<GrDDLTask>(this,
669                                                                        std::move(newDest),
670                                                                        std::move(ddl),
671                                                                        offset));
672     SkASSERT(ddlTask->isClosed());
673 
674     SkDEBUGCODE(this->validate());
675 }
676 
677 #ifdef SK_DEBUG
validate() const678 void GrDrawingManager::validate() const {
679 #if SK_GPU_V1
680     if (fActiveOpsTask) {
681         SkASSERT(!fDAG.empty());
682         SkASSERT(!fActiveOpsTask->isClosed());
683         SkASSERT(fActiveOpsTask == fDAG.back().get());
684     }
685 
686     for (int i = 0; i < fDAG.count(); ++i) {
687         if (fActiveOpsTask != fDAG[i].get()) {
688             // The resolveTask associated with the activeTask remains open for as long as the
689             // activeTask does.
690             bool isActiveResolveTask =
691                 fActiveOpsTask && fActiveOpsTask->fTextureResolveTask == fDAG[i].get();
692             bool isAtlas = fDAG[i]->isSetFlag(GrRenderTask::kAtlas_Flag);
693             SkASSERT(isActiveResolveTask || isAtlas || fDAG[i]->isClosed());
694         }
695     }
696 
697     // The active opsTask, if any, should always be at the back of the DAG.
698     if (!fDAG.empty()) {
699         if (fDAG.back()->isSetFlag(GrRenderTask::kAtlas_Flag)) {
700             SkASSERT(fActiveOpsTask == nullptr);
701             SkASSERT(!fDAG.back()->isClosed());
702         } else if (fDAG.back()->isClosed()) {
703             SkASSERT(fActiveOpsTask == nullptr);
704         } else {
705             SkASSERT(fActiveOpsTask == fDAG.back().get());
706         }
707     } else {
708         SkASSERT(fActiveOpsTask == nullptr);
709     }
710 #endif // SK_GPU_V1
711 }
712 #endif // SK_DEBUG
713 
closeActiveOpsTask()714 void GrDrawingManager::closeActiveOpsTask() {
715 #if SK_GPU_V1
716     if (fActiveOpsTask) {
717         // This is a temporary fix for the partial-MDB world. In that world we're not
718         // reordering so ops that (in the single opsTask world) would've just glommed onto the
719         // end of the single opsTask but referred to a far earlier RT need to appear in their
720         // own opsTask.
721         fActiveOpsTask->makeClosed(fContext);
722         fActiveOpsTask = nullptr;
723     }
724 #endif
725 }
726 
727 #if SK_GPU_V1
newOpsTask(GrSurfaceProxyView surfaceView,sk_sp<GrArenas> arenas,bool flushTimeOpsTask)728 sk_sp<skgpu::v1::OpsTask> GrDrawingManager::newOpsTask(GrSurfaceProxyView surfaceView,
729                                                        sk_sp<GrArenas> arenas,
730                                                        bool flushTimeOpsTask) {
731     SkDEBUGCODE(this->validate());
732     SkASSERT(fContext);
733 
734     this->closeActiveOpsTask();
735 
736     sk_sp<skgpu::v1::OpsTask> opsTask(new skgpu::v1::OpsTask(this,
737                                                              std::move(surfaceView),
738                                                              fContext->priv().auditTrail(),
739                                                              std::move(arenas)));
740 
741     SkASSERT(this->getLastRenderTask(opsTask->target(0)) == opsTask.get());
742 
743     if (flushTimeOpsTask) {
744         fOnFlushRenderTasks.push_back(opsTask);
745     } else {
746         this->appendTask(opsTask);
747 
748         fActiveOpsTask = opsTask.get();
749     }
750 
751     SkDEBUGCODE(this->validate());
752     return opsTask;
753 }
754 
addAtlasTask(sk_sp<GrRenderTask> atlasTask,GrRenderTask * previousAtlasTask)755 void GrDrawingManager::addAtlasTask(sk_sp<GrRenderTask> atlasTask,
756                                     GrRenderTask* previousAtlasTask) {
757     SkDEBUGCODE(this->validate());
758     SkASSERT(fContext);
759 
760     if (previousAtlasTask) {
761         previousAtlasTask->makeClosed(fContext);
762         for (GrRenderTask* previousAtlasUser : previousAtlasTask->dependents()) {
763             // Make the new atlas depend on everybody who used the old atlas, and close their tasks.
764             // This guarantees that the previous atlas is totally out of service before we render
765             // the next one, meaning there is only ever one atlas active at a time and that they can
766             // all share the same texture.
767             atlasTask->addDependency(previousAtlasUser);
768             previousAtlasUser->makeClosed(fContext);
769             if (previousAtlasUser == fActiveOpsTask) {
770                 fActiveOpsTask = nullptr;
771             }
772         }
773     }
774 
775     atlasTask->setFlag(GrRenderTask::kAtlas_Flag);
776     this->insertTaskBeforeLast(std::move(atlasTask));
777 
778     SkDEBUGCODE(this->validate());
779 }
780 #endif // SK_GPU_V1
781 
newTextureResolveRenderTask(const GrCaps & caps)782 GrTextureResolveRenderTask* GrDrawingManager::newTextureResolveRenderTask(const GrCaps& caps) {
783     // Unlike in the "new opsTask" case, we do not want to close the active opsTask, nor (if we are
784     // in sorting and opsTask reduction mode) the render tasks that depend on any proxy's current
785     // state. This is because those opsTasks can still receive new ops and because if they refer to
786     // the mipmapped version of 'proxy', they will then come to depend on the render task being
787     // created here.
788     //
789     // Add the new textureResolveTask before the fActiveOpsTask (if not in
790     // sorting/opsTask-splitting-reduction mode) because it will depend upon this resolve task.
791     // NOTE: Putting it here will also reduce the amount of work required by the topological sort.
792     GrRenderTask* task = this->insertTaskBeforeLast(sk_make_sp<GrTextureResolveRenderTask>());
793     return static_cast<GrTextureResolveRenderTask*>(task);
794 }
795 
newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,int numSemaphores)796 void GrDrawingManager::newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,
797                                          std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,
798                                          int numSemaphores) {
799     SkDEBUGCODE(this->validate());
800     SkASSERT(fContext);
801 
802     sk_sp<GrWaitRenderTask> waitTask = sk_make_sp<GrWaitRenderTask>(GrSurfaceProxyView(proxy),
803                                                                     std::move(semaphores),
804                                                                     numSemaphores);
805 
806 #if SK_GPU_V1
807     if (fActiveOpsTask && (fActiveOpsTask->target(0) == proxy.get())) {
808         SkASSERT(this->getLastRenderTask(proxy.get()) == fActiveOpsTask);
809         this->insertTaskBeforeLast(waitTask);
810         // In this case we keep the current renderTask open but just insert the new waitTask
811         // before it in the list. The waitTask will never need to trigger any resolves or mip
812         // map generation which is the main advantage of going through the proxy version.
813         // Additionally we would've had to temporarily set the wait task as the lastRenderTask
814         // on the proxy, add the dependency, and then reset the lastRenderTask to
815         // fActiveOpsTask. Additionally we make the waitTask depend on all of fActiveOpsTask
816         // dependencies so that we don't unnecessarily reorder the waitTask before them.
817         // Note: Any previous Ops already in fActiveOpsTask will get blocked by the wait
818         // semaphore even though they don't need to be for correctness.
819 
820         // Make sure we add the dependencies of fActiveOpsTask to waitTask first or else we'll
821         // get a circular self dependency of waitTask on waitTask.
822         waitTask->addDependenciesFromOtherTask(fActiveOpsTask);
823         fActiveOpsTask->addDependency(waitTask.get());
824     } else
825 #endif
826     {
827         // In this case we just close the previous RenderTask and start and append the waitTask
828         // to the DAG. Since it is the last task now we call setLastRenderTask on the proxy. If
829         // there is a lastTask on the proxy we make waitTask depend on that task. This
830         // dependency isn't strictly needed but it does keep the DAG from reordering the
831         // waitTask earlier and blocking more tasks.
832         if (GrRenderTask* lastTask = this->getLastRenderTask(proxy.get())) {
833             waitTask->addDependency(lastTask);
834         }
835         this->setLastRenderTask(proxy.get(), waitTask.get());
836         this->closeActiveOpsTask();
837         this->appendTask(waitTask);
838     }
839     waitTask->makeClosed(fContext);
840 
841     SkDEBUGCODE(this->validate());
842 }
843 
newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy,const SkIRect & srcRect,GrColorType surfaceColorType,GrColorType dstColorType,sk_sp<GrGpuBuffer> dstBuffer,size_t dstOffset)844 void GrDrawingManager::newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy,
845                                                  const SkIRect& srcRect,
846                                                  GrColorType surfaceColorType,
847                                                  GrColorType dstColorType,
848                                                  sk_sp<GrGpuBuffer> dstBuffer,
849                                                  size_t dstOffset) {
850     SkDEBUGCODE(this->validate());
851     SkASSERT(fContext);
852     this->closeActiveOpsTask();
853 
854     GrRenderTask* task = this->appendTask(sk_make_sp<GrTransferFromRenderTask>(
855             srcProxy, srcRect, surfaceColorType, dstColorType,
856             std::move(dstBuffer), dstOffset));
857 
858     const GrCaps& caps = *fContext->priv().caps();
859 
860     // We always say GrMipmapped::kNo here since we are always just copying from the base layer. We
861     // don't need to make sure the whole mip map chain is valid.
862     task->addDependency(this, srcProxy.get(), GrMipmapped::kNo,
863                         GrTextureResolveManager(this), caps);
864     task->makeClosed(fContext);
865 
866     // We have closed the previous active oplist but since a new oplist isn't being added there
867     // shouldn't be an active one.
868     SkASSERT(!fActiveOpsTask);
869     SkDEBUGCODE(this->validate());
870 }
871 
newCopyRenderTask(sk_sp<GrSurfaceProxy> src,SkIRect srcRect,sk_sp<GrSurfaceProxy> dst,SkIPoint dstPoint,GrSurfaceOrigin origin)872 sk_sp<GrRenderTask> GrDrawingManager::newCopyRenderTask(sk_sp<GrSurfaceProxy> src,
873                                                         SkIRect srcRect,
874                                                         sk_sp<GrSurfaceProxy> dst,
875                                                         SkIPoint dstPoint,
876                                                         GrSurfaceOrigin origin) {
877     SkDEBUGCODE(this->validate());
878     SkASSERT(fContext);
879 
880     // It'd be nicer to check this in GrCopyRenderTask::Make. This gets complicated because of
881     // "active ops task" tracking. dst will be the target of our copy task but it might also be the
882     // target of the active ops task. We currently require the active ops task to be closed before
883     // making a new task that targets the same proxy. However, if we first close the active ops
884     // task, then fail to make a copy task, the next active ops task may target the same proxy. This
885     // will trip an assert related to unnecessary ops task splitting.
886     if (src->framebufferOnly()) {
887         return nullptr;
888     }
889 
890     this->closeActiveOpsTask();
891 
892     sk_sp<GrRenderTask> task = GrCopyRenderTask::Make(this,
893                                                       src,
894                                                       srcRect,
895                                                       std::move(dst),
896                                                       dstPoint,
897                                                       origin);
898     if (!task) {
899         return nullptr;
900     }
901 
902     this->appendTask(task);
903 
904     const GrCaps& caps = *fContext->priv().caps();
905     // We always say GrMipmapped::kNo here since we are always just copying from the base layer to
906     // another base layer. We don't need to make sure the whole mip map chain is valid.
907     task->addDependency(this, src.get(), GrMipmapped::kNo, GrTextureResolveManager(this), caps);
908     task->makeClosed(fContext);
909 
910     // We have closed the previous active oplist but since a new oplist isn't being added there
911     // shouldn't be an active one.
912     SkASSERT(!fActiveOpsTask);
913     SkDEBUGCODE(this->validate());
914     return task;
915 }
916 
newWritePixelsTask(sk_sp<GrSurfaceProxy> dst,SkIRect rect,GrColorType srcColorType,GrColorType dstColorType,const GrMipLevel levels[],int levelCount)917 bool GrDrawingManager::newWritePixelsTask(sk_sp<GrSurfaceProxy> dst,
918                                           SkIRect rect,
919                                           GrColorType srcColorType,
920                                           GrColorType dstColorType,
921                                           const GrMipLevel levels[],
922                                           int levelCount) {
923     SkDEBUGCODE(this->validate());
924     SkASSERT(fContext);
925 
926     this->closeActiveOpsTask();
927     const GrCaps& caps = *fContext->priv().caps();
928 
929     // On platforms that prefer flushes over VRAM use (i.e., ANGLE) we're better off forcing a
930     // complete flush here.
931     if (!caps.preferVRAMUseOverFlushes()) {
932         this->flushSurfaces(SkSpan<GrSurfaceProxy*>{},
933                             SkSurface::BackendSurfaceAccess::kNoAccess,
934                             GrFlushInfo{},
935                             nullptr);
936     }
937 
938     GrRenderTask* task = this->appendTask(GrWritePixelsTask::Make(this,
939                                                                   std::move(dst),
940                                                                   rect,
941                                                                   srcColorType,
942                                                                   dstColorType,
943                                                                   levels,
944                                                                   levelCount));
945     if (!task) {
946         return false;
947     }
948 
949     task->makeClosed(fContext);
950 
951     // We have closed the previous active oplist but since a new oplist isn't being added there
952     // shouldn't be an active one.
953     SkASSERT(!fActiveOpsTask);
954     SkDEBUGCODE(this->validate());
955     return true;
956 }
957 
958 #if SK_GPU_V1
959 /*
960  * This method finds a path renderer that can draw the specified path on
961  * the provided target.
962  * Due to its expense, the software path renderer has split out so it can
963  * can be individually allowed/disallowed via the "allowSW" boolean.
964  */
getPathRenderer(const PathRenderer::CanDrawPathArgs & args,bool allowSW,PathRendererChain::DrawType drawType,PathRenderer::StencilSupport * stencilSupport)965 skgpu::v1::PathRenderer* GrDrawingManager::getPathRenderer(
966         const PathRenderer::CanDrawPathArgs& args,
967         bool allowSW,
968         PathRendererChain::DrawType drawType,
969         PathRenderer::StencilSupport* stencilSupport) {
970 
971     if (!fPathRendererChain) {
972         fPathRendererChain =
973                 std::make_unique<PathRendererChain>(fContext, fOptionsForPathRendererChain);
974     }
975 
976     auto pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport);
977     if (!pr && allowSW) {
978         auto swPR = this->getSoftwarePathRenderer();
979         if (PathRenderer::CanDrawPath::kNo != swPR->canDrawPath(args)) {
980             pr = swPR;
981         }
982     }
983 
984 #if GR_PATH_RENDERER_SPEW
985     if (pr) {
986         SkDebugf("getPathRenderer: %s\n", pr->name());
987     }
988 #endif
989 
990     return pr;
991 }
992 
getSoftwarePathRenderer()993 skgpu::v1::PathRenderer* GrDrawingManager::getSoftwarePathRenderer() {
994     if (!fSoftwarePathRenderer) {
995         fSoftwarePathRenderer.reset(new skgpu::v1::SoftwarePathRenderer(
996             fContext->priv().proxyProvider(), fOptionsForPathRendererChain.fAllowPathMaskCaching));
997     }
998     return fSoftwarePathRenderer.get();
999 }
1000 
getAtlasPathRenderer()1001 skgpu::v1::AtlasPathRenderer* GrDrawingManager::getAtlasPathRenderer() {
1002     if (!fPathRendererChain) {
1003         fPathRendererChain = std::make_unique<PathRendererChain>(fContext,
1004                                                                  fOptionsForPathRendererChain);
1005     }
1006     return fPathRendererChain->getAtlasPathRenderer();
1007 }
1008 
getTessellationPathRenderer()1009 skgpu::v1::PathRenderer* GrDrawingManager::getTessellationPathRenderer() {
1010     if (!fPathRendererChain) {
1011         fPathRendererChain = std::make_unique<PathRendererChain>(fContext,
1012                                                                  fOptionsForPathRendererChain);
1013     }
1014     return fPathRendererChain->getTessellationPathRenderer();
1015 }
1016 
1017 #endif // SK_GPU_V1
1018 
flushIfNecessary()1019 void GrDrawingManager::flushIfNecessary() {
1020     auto direct = fContext->asDirectContext();
1021     if (!direct) {
1022         return;
1023     }
1024 
1025     auto resourceCache = direct->priv().getResourceCache();
1026     if (resourceCache && resourceCache->requestsFlush()) {
1027         if (this->flush({}, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo(), nullptr)) {
1028             this->submitToGpu(false);
1029         }
1030         resourceCache->purgeAsNeeded();
1031     }
1032 }
1033 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
disableStencilCulling()1034 void GrDrawingManager::disableStencilCulling() {
1035     GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "disableStencilCulling", fContext);
1036     fDisableStencilCulling = true;
1037 }
1038 #endif