1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/GrDrawingManager.h"
9
10 #include <algorithm>
11 #include <memory>
12
13 #include "include/core/SkDeferredDisplayList.h"
14 #include "include/gpu/GrBackendSemaphore.h"
15 #include "include/gpu/GrDirectContext.h"
16 #include "include/gpu/GrRecordingContext.h"
17 #include "src/core/SkDeferredDisplayListPriv.h"
18 #include "src/core/SkTInternalLList.h"
19 #include "src/gpu/GrAuditTrail.h"
20 #include "src/gpu/GrClientMappedBufferManager.h"
21 #include "src/gpu/GrCopyRenderTask.h"
22 #include "src/gpu/GrDDLTask.h"
23 #include "src/gpu/GrDirectContextPriv.h"
24 #include "src/gpu/GrGpu.h"
25 #include "src/gpu/GrMemoryPool.h"
26 #include "src/gpu/GrOnFlushResourceProvider.h"
27 #include "src/gpu/GrRecordingContextPriv.h"
28 #include "src/gpu/GrRenderTargetProxy.h"
29 #include "src/gpu/GrRenderTask.h"
30 #include "src/gpu/GrRenderTaskCluster.h"
31 #include "src/gpu/GrResourceAllocator.h"
32 #include "src/gpu/GrResourceProvider.h"
33 #include "src/gpu/GrSoftwarePathRenderer.h"
34 #include "src/gpu/GrSurfaceContext.h"
35 #include "src/gpu/GrSurfaceDrawContext.h"
36 #include "src/gpu/GrSurfaceProxyPriv.h"
37 #include "src/gpu/GrTTopoSort.h"
38 #include "src/gpu/GrTexture.h"
39 #include "src/gpu/GrTextureProxy.h"
40 #include "src/gpu/GrTextureProxyPriv.h"
41 #include "src/gpu/GrTextureResolveRenderTask.h"
42 #include "src/gpu/GrTracing.h"
43 #include "src/gpu/GrTransferFromRenderTask.h"
44 #include "src/gpu/GrWaitRenderTask.h"
45 #include "src/gpu/GrWritePixelsRenderTask.h"
46 #include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
47 #include "src/gpu/text/GrSDFTControl.h"
48 #include "src/image/SkSurface_Gpu.h"
49
50 ///////////////////////////////////////////////////////////////////////////////////////////////////
GrDrawingManager(GrRecordingContext * context,const GrPathRendererChain::Options & optionsForPathRendererChain,bool reduceOpsTaskSplitting)51 GrDrawingManager::GrDrawingManager(GrRecordingContext* context,
52 const GrPathRendererChain::Options& optionsForPathRendererChain,
53 bool reduceOpsTaskSplitting)
54 : fContext(context)
55 , fOptionsForPathRendererChain(optionsForPathRendererChain)
56 , fPathRendererChain(nullptr)
57 , fSoftwarePathRenderer(nullptr)
58 , fFlushing(false)
59 , fReduceOpsTaskSplitting(reduceOpsTaskSplitting) { }
60
~GrDrawingManager()61 GrDrawingManager::~GrDrawingManager() {
62 this->closeAllTasks();
63 this->removeRenderTasks();
64 }
65
wasAbandoned() const66 bool GrDrawingManager::wasAbandoned() const {
67 return fContext->abandoned();
68 }
69
freeGpuResources()70 void GrDrawingManager::freeGpuResources() {
71 for (int i = fOnFlushCBObjects.count() - 1; i >= 0; --i) {
72 if (!fOnFlushCBObjects[i]->retainOnFreeGpuResources()) {
73 // it's safe to just do this because we're iterating in reverse
74 fOnFlushCBObjects.removeShuffle(i);
75 }
76 }
77
78 // a path renderer may be holding onto resources
79 fPathRendererChain = nullptr;
80 fSoftwarePathRenderer = nullptr;
81 }
82
83 // MDB TODO: make use of the 'proxies' parameter.
flush(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const GrFlushInfo & info,const GrBackendSurfaceMutableState * newState)84 bool GrDrawingManager::flush(
85 SkSpan<GrSurfaceProxy*> proxies,
86 SkSurface::BackendSurfaceAccess access,
87 const GrFlushInfo& info,
88 const GrBackendSurfaceMutableState* newState) {
89 GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "flush", fContext);
90
91 if (fFlushing || this->wasAbandoned()) {
92 if (info.fSubmittedProc) {
93 info.fSubmittedProc(info.fSubmittedContext, false);
94 }
95 if (info.fFinishedProc) {
96 info.fFinishedProc(info.fFinishedContext);
97 }
98 return false;
99 }
100
101 SkDEBUGCODE(this->validate());
102
103 // As of now we only short-circuit if we got an explicit list of surfaces to flush.
104 if (!proxies.empty() && !info.fNumSemaphores && !info.fFinishedProc &&
105 access == SkSurface::BackendSurfaceAccess::kNoAccess && !newState) {
106 bool allUnused = std::all_of(proxies.begin(), proxies.end(), [&](GrSurfaceProxy* proxy) {
107 bool used = std::any_of(fDAG.begin(), fDAG.end(), [&](auto& task) {
108 return task && task->isUsed(proxy);
109 });
110 return !used;
111 });
112 if (allUnused) {
113 if (info.fSubmittedProc) {
114 info.fSubmittedProc(info.fSubmittedContext, true);
115 }
116 return false;
117 }
118 }
119
120 auto dContext = fContext->asDirectContext();
121 SkASSERT(dContext);
122 dContext->priv().clientMappedBufferManager()->process();
123
124 GrGpu* gpu = dContext->priv().getGpu();
125 // We have a non abandoned and direct GrContext. It must have a GrGpu.
126 SkASSERT(gpu);
127
128 fFlushing = true;
129
130 auto resourceProvider = dContext->priv().resourceProvider();
131 auto resourceCache = dContext->priv().getResourceCache();
132
133 // Semi-usually the GrRenderTasks are already closed at this point, but sometimes Ganesh needs
134 // to flush mid-draw. In that case, the SkGpuDevice's opsTasks won't be closed but need to be
135 // flushed anyway. Closing such opsTasks here will mean new ones will be created to replace them
136 // if the SkGpuDevice(s) write to them again.
137 this->closeAllTasks();
138 fActiveOpsTask = nullptr;
139
140 this->sortTasks();
141
142 if (!fCpuBufferCache) {
143 // We cache more buffers when the backend is using client side arrays. Otherwise, we
144 // expect each pool will use a CPU buffer as a staging buffer before uploading to a GPU
145 // buffer object. Each pool only requires one staging buffer at a time.
146 int maxCachedBuffers = fContext->priv().caps()->preferClientSideDynamicBuffers() ? 2 : 6;
147 fCpuBufferCache = GrBufferAllocPool::CpuBufferCache::Make(maxCachedBuffers);
148 }
149
150 GrOpFlushState flushState(gpu, resourceProvider, &fTokenTracker, fCpuBufferCache);
151
152 GrOnFlushResourceProvider onFlushProvider(this);
153
154 // Prepare any onFlush op lists (e.g. atlases).
155 if (!fOnFlushCBObjects.empty()) {
156 fFlushingRenderTaskIDs.reserve_back(fDAG.count());
157 for (const auto& task : fDAG) {
158 if (task) {
159 task->gatherIDs(&fFlushingRenderTaskIDs);
160 }
161 }
162
163 for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
164 onFlushCBObject->preFlush(&onFlushProvider, SkMakeSpan(fFlushingRenderTaskIDs));
165 }
166 for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
167 onFlushRenderTask->makeClosed(*fContext->priv().caps());
168 #ifdef SK_DEBUG
169 // OnFlush callbacks are invoked during flush, and are therefore expected to handle
170 // resource allocation & usage on their own. (No deferred or lazy proxies!)
171 onFlushRenderTask->visitTargetAndSrcProxies_debugOnly(
172 [](GrSurfaceProxy* p, GrMipmapped mipMapped) {
173 SkASSERT(!p->asTextureProxy() || !p->asTextureProxy()->texPriv().isDeferred());
174 SkASSERT(!p->isLazy());
175 if (p->requiresManualMSAAResolve()) {
176 // The onFlush callback is responsible for ensuring MSAA gets resolved.
177 SkASSERT(p->asRenderTargetProxy() && !p->asRenderTargetProxy()->isMSAADirty());
178 }
179 if (GrMipmapped::kYes == mipMapped) {
180 // The onFlush callback is responsible for regenerating mips if needed.
181 SkASSERT(p->asTextureProxy() && !p->asTextureProxy()->mipmapsAreDirty());
182 }
183 });
184 #endif
185 onFlushRenderTask->prepare(&flushState);
186 }
187 }
188
189 bool usingReorderedDAG = false;
190 GrResourceAllocator resourceAllocator(dContext);
191 if (fReduceOpsTaskSplitting) {
192 usingReorderedDAG = this->reorderTasks(&resourceAllocator);
193 if (!usingReorderedDAG) {
194 resourceAllocator.reset();
195 }
196 }
197
198 #if 0
199 // Enable this to print out verbose GrOp information
200 SkDEBUGCODE(SkDebugf("onFlush renderTasks (%d):\n", fOnFlushRenderTasks.count()));
201 for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
202 SkDEBUGCODE(onFlushRenderTask->dump(/* printDependencies */ true);)
203 }
204 SkDEBUGCODE(SkDebugf("Normal renderTasks (%d):\n", fDAG.count()));
205 for (const auto& task : fDAG) {
206 SkDEBUGCODE(task->dump(/* printDependencies */ true);)
207 }
208 #endif
209
210 if (!resourceAllocator.failedInstantiation()) {
211 if (!usingReorderedDAG) {
212 for (const auto& task : fDAG) {
213 SkASSERT(task);
214 task->gatherProxyIntervals(&resourceAllocator);
215 }
216 resourceAllocator.planAssignment();
217 }
218 resourceAllocator.assign();
219 }
220 bool flushed = !resourceAllocator.failedInstantiation() &&
221 this->executeRenderTasks(&flushState);
222 this->removeRenderTasks();
223
224 gpu->executeFlushInfo(proxies, access, info, newState);
225
226 // Give the cache a chance to purge resources that become purgeable due to flushing.
227 if (flushed) {
228 resourceCache->purgeAsNeeded();
229 flushed = false;
230 }
231 for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
232 onFlushCBObject->postFlush(fTokenTracker.nextTokenToFlush(),
233 SkMakeSpan(fFlushingRenderTaskIDs));
234 flushed = true;
235 }
236 if (flushed) {
237 resourceCache->purgeAsNeeded();
238 }
239 fFlushingRenderTaskIDs.reset();
240 fFlushing = false;
241
242 return true;
243 }
244
submitToGpu(bool syncToCpu)245 bool GrDrawingManager::submitToGpu(bool syncToCpu) {
246 if (fFlushing || this->wasAbandoned()) {
247 return false;
248 }
249
250 auto direct = fContext->asDirectContext();
251 if (!direct) {
252 return false; // Can't submit while DDL recording
253 }
254 GrGpu* gpu = direct->priv().getGpu();
255 return gpu->submitToGpu(syncToCpu);
256 }
257
executeRenderTasks(GrOpFlushState * flushState)258 bool GrDrawingManager::executeRenderTasks(GrOpFlushState* flushState) {
259 #if GR_FLUSH_TIME_OP_SPEW
260 SkDebugf("Flushing %d opsTasks\n", fDAG.count());
261 for (int i = 0; i < fDAG.count(); ++i) {
262 if (fDAG[i]) {
263 SkString label;
264 label.printf("task %d/%d", i, fDAG.count());
265 fDAG[i]->dump(label, {}, true, true);
266 }
267 }
268 #endif
269
270 bool anyRenderTasksExecuted = false;
271
272 for (const auto& renderTask : fDAG) {
273 if (!renderTask || !renderTask->isInstantiated()) {
274 continue;
275 }
276
277 SkASSERT(renderTask->deferredProxiesAreInstantiated());
278
279 renderTask->prepare(flushState);
280 }
281
282 // Upload all data to the GPU
283 flushState->preExecuteDraws();
284
285 // For Vulkan, if we have too many oplists to be flushed we end up allocating a lot of resources
286 // for each command buffer associated with the oplists. If this gets too large we can cause the
287 // devices to go OOM. In practice we usually only hit this case in our tests, but to be safe we
288 // put a cap on the number of oplists we will execute before flushing to the GPU to relieve some
289 // memory pressure.
290 static constexpr int kMaxRenderTasksBeforeFlush = 100;
291 int numRenderTasksExecuted = 0;
292
293 // Execute the onFlush renderTasks first, if any.
294 for (sk_sp<GrRenderTask>& onFlushRenderTask : fOnFlushRenderTasks) {
295 if (!onFlushRenderTask->execute(flushState)) {
296 SkDebugf("WARNING: onFlushRenderTask failed to execute.\n");
297 }
298 SkASSERT(onFlushRenderTask->unique());
299 onFlushRenderTask->disown(this);
300 onFlushRenderTask = nullptr;
301 if (++numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
302 flushState->gpu()->submitToGpu(false);
303 numRenderTasksExecuted = 0;
304 }
305 }
306 fOnFlushRenderTasks.reset();
307
308 // Execute the normal op lists.
309 for (const auto& renderTask : fDAG) {
310 SkASSERT(renderTask);
311 if (!renderTask->isInstantiated()) {
312 continue;
313 }
314
315 if (renderTask->execute(flushState)) {
316 anyRenderTasksExecuted = true;
317 }
318 if (++numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
319 flushState->gpu()->submitToGpu(false);
320 numRenderTasksExecuted = 0;
321 }
322 }
323
324 SkASSERT(!flushState->opsRenderPass());
325 SkASSERT(fTokenTracker.nextDrawToken() == fTokenTracker.nextTokenToFlush());
326
327 // We reset the flush state before the RenderTasks so that the last resources to be freed are
328 // those that are written to in the RenderTasks. This helps to make sure the most recently used
329 // resources are the last to be purged by the resource cache.
330 flushState->reset();
331
332 return anyRenderTasksExecuted;
333 }
334
removeRenderTasks()335 void GrDrawingManager::removeRenderTasks() {
336 for (const auto& task : fDAG) {
337 SkASSERT(task);
338 if (!task->unique() || task->requiresExplicitCleanup()) {
339 // TODO: Eventually uniqueness should be guaranteed: http://skbug.com/7111.
340 // DDLs, however, will always require an explicit notification for when they
341 // can clean up resources.
342 task->endFlush(this);
343 }
344 task->disown(this);
345 }
346 fDAG.reset();
347 fLastRenderTasks.reset();
348 for (const sk_sp<GrRenderTask>& onFlushRenderTask : fOnFlushRenderTasks) {
349 onFlushRenderTask->disown(this);
350 }
351 fOnFlushRenderTasks.reset();
352 }
353
sortTasks()354 void GrDrawingManager::sortTasks() {
355 if (!GrTTopoSort<GrRenderTask, GrRenderTask::TopoSortTraits>(&fDAG)) {
356 SkDEBUGFAIL("Render task topo sort failed.");
357 return;
358 }
359
360 #ifdef SK_DEBUG
361 // This block checks for any unnecessary splits in the opsTasks. If two sequential opsTasks
362 // share the same backing GrSurfaceProxy it means the opsTask was artificially split.
363 if (!fDAG.empty()) {
364 GrOpsTask* prevOpsTask = fDAG[0]->asOpsTask();
365 for (int i = 1; i < fDAG.count(); ++i) {
366 GrOpsTask* curOpsTask = fDAG[i]->asOpsTask();
367
368 if (prevOpsTask && curOpsTask) {
369 SkASSERT(prevOpsTask->target(0) != curOpsTask->target(0));
370 }
371
372 prevOpsTask = curOpsTask;
373 }
374 }
375 #endif
376 }
377
378 // Reorder the array to match the llist without reffing & unreffing sk_sp's.
379 // Both args must contain the same objects.
380 // This is basically a shim because clustering uses LList but the rest of drawmgr uses array.
381 template <typename T>
reorder_array_by_llist(const SkTInternalLList<T> & llist,SkTArray<sk_sp<T>> * array)382 static void reorder_array_by_llist(const SkTInternalLList<T>& llist, SkTArray<sk_sp<T>>* array) {
383 int i = 0;
384 for (T* t : llist) {
385 // Release the pointer that used to live here so it doesn't get unreffed.
386 [[maybe_unused]] T* old = array->at(i).release();
387 array->at(i++).reset(t);
388 }
389 SkASSERT(i == array->count());
390 }
391
reorderTasks(GrResourceAllocator * resourceAllocator)392 bool GrDrawingManager::reorderTasks(GrResourceAllocator* resourceAllocator) {
393 SkASSERT(fReduceOpsTaskSplitting);
394 SkTInternalLList<GrRenderTask> llist;
395 bool clustered = GrClusterRenderTasks(SkMakeSpan(fDAG), &llist);
396 if (!clustered) {
397 return false;
398 }
399
400 for (GrRenderTask* task : llist) {
401 task->gatherProxyIntervals(resourceAllocator);
402 }
403 if (!resourceAllocator->planAssignment()) {
404 return false;
405 }
406 if (!resourceAllocator->makeBudgetHeadroom()) {
407 auto dContext = fContext->asDirectContext();
408 SkASSERT(dContext);
409 dContext->priv().getGpu()->stats()->incNumReorderedDAGsOverBudget();
410 return false;
411 }
412 reorder_array_by_llist(llist, &fDAG);
413
414 int newCount = 0;
415 for (int i = 0; i < fDAG.count(); i++) {
416 sk_sp<GrRenderTask>& task = fDAG[i];
417 if (auto opsTask = task->asOpsTask()) {
418 size_t remaining = fDAG.size() - i - 1;
419 SkSpan<sk_sp<GrRenderTask>> nextTasks{fDAG.end() - remaining, remaining};
420 int removeCount = opsTask->mergeFrom(nextTasks);
421 for (const auto& removed : nextTasks.first(removeCount)) {
422 removed->disown(this);
423 }
424 i += removeCount;
425 }
426 fDAG[newCount++] = std::move(task);
427 }
428 fDAG.resize_back(newCount);
429 return true;
430 }
431
closeAllTasks()432 void GrDrawingManager::closeAllTasks() {
433 const GrCaps& caps = *fContext->priv().caps();
434 for (auto& task : fDAG) {
435 if (task) {
436 task->makeClosed(caps);
437 }
438 }
439 }
440
insertTaskBeforeLast(sk_sp<GrRenderTask> task)441 GrRenderTask* GrDrawingManager::insertTaskBeforeLast(sk_sp<GrRenderTask> task) {
442 SkASSERT(!fDAG.empty());
443 if (!task) {
444 return nullptr;
445 }
446 // Release 'fDAG.back()' and grab the raw pointer, in case the SkTArray grows
447 // and reallocates during emplace_back.
448 // TODO: Either use std::vector that can do this for us, or use SkSTArray to get the
449 // perf win.
450 fDAG.emplace_back(fDAG.back().release());
451 return (fDAG[fDAG.count() - 2] = std::move(task)).get();
452 }
453
appendTask(sk_sp<GrRenderTask> task)454 GrRenderTask* GrDrawingManager::appendTask(sk_sp<GrRenderTask> task) {
455 if (!task) {
456 return nullptr;
457 }
458 return fDAG.push_back(std::move(task)).get();
459 }
460
resolve_and_mipmap(GrGpu * gpu,GrSurfaceProxy * proxy)461 static void resolve_and_mipmap(GrGpu* gpu, GrSurfaceProxy* proxy) {
462 if (!proxy->isInstantiated()) {
463 return;
464 }
465
466 // In the flushSurfaces case, we need to resolve MSAA immediately after flush. This is
467 // because clients expect the flushed surface's backing texture to be fully resolved
468 // upon return.
469 if (proxy->requiresManualMSAAResolve()) {
470 auto* rtProxy = proxy->asRenderTargetProxy();
471 SkASSERT(rtProxy);
472 if (rtProxy->isMSAADirty()) {
473 SkASSERT(rtProxy->peekRenderTarget());
474 gpu->resolveRenderTarget(rtProxy->peekRenderTarget(), rtProxy->msaaDirtyRect());
475 gpu->submitToGpu(false);
476 rtProxy->markMSAAResolved();
477 }
478 }
479 // If, after a flush, any of the proxies of interest have dirty mipmaps, regenerate them in
480 // case their backend textures are being stolen.
481 // (This special case is exercised by the ReimportImageTextureWithMipLevels test.)
482 // FIXME: It may be more ideal to plumb down a "we're going to steal the backends" flag.
483 if (auto* textureProxy = proxy->asTextureProxy()) {
484 if (textureProxy->mipmapsAreDirty()) {
485 SkASSERT(textureProxy->peekTexture());
486 gpu->regenerateMipMapLevels(textureProxy->peekTexture());
487 textureProxy->markMipmapsClean();
488 }
489 }
490 }
491
flushSurfaces(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const GrFlushInfo & info,const GrBackendSurfaceMutableState * newState)492 GrSemaphoresSubmitted GrDrawingManager::flushSurfaces(
493 SkSpan<GrSurfaceProxy*> proxies,
494 SkSurface::BackendSurfaceAccess access,
495 const GrFlushInfo& info,
496 const GrBackendSurfaceMutableState* newState) {
497 if (this->wasAbandoned()) {
498 if (info.fSubmittedProc) {
499 info.fSubmittedProc(info.fSubmittedContext, false);
500 }
501 if (info.fFinishedProc) {
502 info.fFinishedProc(info.fFinishedContext);
503 }
504 return GrSemaphoresSubmitted::kNo;
505 }
506 SkDEBUGCODE(this->validate());
507
508 auto direct = fContext->asDirectContext();
509 SkASSERT(direct);
510 GrGpu* gpu = direct->priv().getGpu();
511 // We have a non abandoned and direct GrContext. It must have a GrGpu.
512 SkASSERT(gpu);
513
514 // TODO: It is important to upgrade the drawingmanager to just flushing the
515 // portion of the DAG required by 'proxies' in order to restore some of the
516 // semantics of this method.
517 bool didFlush = this->flush(proxies, access, info, newState);
518 for (GrSurfaceProxy* proxy : proxies) {
519 resolve_and_mipmap(gpu, proxy);
520 }
521
522 SkDEBUGCODE(this->validate());
523
524 if (!didFlush || (!direct->priv().caps()->semaphoreSupport() && info.fNumSemaphores)) {
525 return GrSemaphoresSubmitted::kNo;
526 }
527 return GrSemaphoresSubmitted::kYes;
528 }
529
addOnFlushCallbackObject(GrOnFlushCallbackObject * onFlushCBObject)530 void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
531 fOnFlushCBObjects.push_back(onFlushCBObject);
532 }
533
534 #if GR_TEST_UTILS
testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject * cb)535 void GrDrawingManager::testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject* cb) {
536 int n = std::find(fOnFlushCBObjects.begin(), fOnFlushCBObjects.end(), cb) -
537 fOnFlushCBObjects.begin();
538 SkASSERT(n < fOnFlushCBObjects.count());
539 fOnFlushCBObjects.removeShuffle(n);
540 }
541 #endif
542
setLastRenderTask(const GrSurfaceProxy * proxy,GrRenderTask * task)543 void GrDrawingManager::setLastRenderTask(const GrSurfaceProxy* proxy, GrRenderTask* task) {
544 #ifdef SK_DEBUG
545 if (auto prior = this->getLastRenderTask(proxy)) {
546 SkASSERT(prior->isClosed() || prior == task);
547 }
548 #endif
549 uint32_t key = proxy->uniqueID().asUInt();
550 if (task) {
551 fLastRenderTasks.set(key, task);
552 } else if (fLastRenderTasks.find(key)) {
553 fLastRenderTasks.remove(key);
554 }
555 }
556
getLastRenderTask(const GrSurfaceProxy * proxy) const557 GrRenderTask* GrDrawingManager::getLastRenderTask(const GrSurfaceProxy* proxy) const {
558 auto entry = fLastRenderTasks.find(proxy->uniqueID().asUInt());
559 return entry ? *entry : nullptr;
560 }
561
getLastOpsTask(const GrSurfaceProxy * proxy) const562 GrOpsTask* GrDrawingManager::getLastOpsTask(const GrSurfaceProxy* proxy) const {
563 GrRenderTask* task = this->getLastRenderTask(proxy);
564 return task ? task->asOpsTask() : nullptr;
565 }
566
567
moveRenderTasksToDDL(SkDeferredDisplayList * ddl)568 void GrDrawingManager::moveRenderTasksToDDL(SkDeferredDisplayList* ddl) {
569 SkDEBUGCODE(this->validate());
570
571 // no renderTask should receive a new command after this
572 this->closeAllTasks();
573 fActiveOpsTask = nullptr;
574
575 this->sortTasks();
576
577 fDAG.swap(ddl->fRenderTasks);
578 SkASSERT(fDAG.empty());
579
580 for (auto& renderTask : ddl->fRenderTasks) {
581 renderTask->disown(this);
582 renderTask->prePrepare(fContext);
583 }
584
585 ddl->fArenas = std::move(fContext->priv().detachArenas());
586
587 fContext->priv().detachProgramData(&ddl->fProgramData);
588
589 if (fPathRendererChain) {
590 if (auto ccpr = fPathRendererChain->getCoverageCountingPathRenderer()) {
591 ddl->fPendingPaths = ccpr->detachPendingPaths();
592 }
593 }
594
595 SkDEBUGCODE(this->validate());
596 }
597
createDDLTask(sk_sp<const SkDeferredDisplayList> ddl,sk_sp<GrRenderTargetProxy> newDest,SkIPoint offset)598 void GrDrawingManager::createDDLTask(sk_sp<const SkDeferredDisplayList> ddl,
599 sk_sp<GrRenderTargetProxy> newDest,
600 SkIPoint offset) {
601 SkDEBUGCODE(this->validate());
602
603 if (fActiveOpsTask) {
604 // This is a temporary fix for the partial-MDB world. In that world we're not
605 // reordering so ops that (in the single opsTask world) would've just glommed onto the
606 // end of the single opsTask but referred to a far earlier RT need to appear in their
607 // own opsTask.
608 fActiveOpsTask->makeClosed(*fContext->priv().caps());
609 fActiveOpsTask = nullptr;
610 }
611
612 // Propagate the DDL proxy's state information to the replay target.
613 if (ddl->priv().targetProxy()->isMSAADirty()) {
614 auto nativeRect = GrNativeRect::MakeIRectRelativeTo(
615 ddl->characterization().origin(),
616 ddl->priv().targetProxy()->backingStoreDimensions().height(),
617 ddl->priv().targetProxy()->msaaDirtyRect());
618 newDest->markMSAADirty(nativeRect);
619 }
620 GrTextureProxy* newTextureProxy = newDest->asTextureProxy();
621 if (newTextureProxy && GrMipmapped::kYes == newTextureProxy->mipmapped()) {
622 newTextureProxy->markMipmapsDirty();
623 }
624
625 // Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData.
626 // The lazy proxy that references it (in the DDL opsTasks) will then steal its GrTexture.
627 ddl->fLazyProxyData->fReplayDest = newDest.get();
628
629 if (ddl->fPendingPaths.size()) {
630 GrCoverageCountingPathRenderer* ccpr = this->getCoverageCountingPathRenderer();
631
632 ccpr->mergePendingPaths(ddl->fPendingPaths);
633 }
634
635 // Add a task to handle drawing and lifetime management of the DDL.
636 SkDEBUGCODE(auto ddlTask =) this->appendTask(sk_make_sp<GrDDLTask>(this,
637 std::move(newDest),
638 std::move(ddl),
639 offset));
640 SkASSERT(ddlTask->isClosed());
641
642 SkDEBUGCODE(this->validate());
643 }
644
645 #ifdef SK_DEBUG
validate() const646 void GrDrawingManager::validate() const {
647 if (fActiveOpsTask) {
648 SkASSERT(!fDAG.empty());
649 SkASSERT(!fActiveOpsTask->isClosed());
650 SkASSERT(fActiveOpsTask == fDAG.back().get());
651 }
652
653 for (int i = 0; i < fDAG.count(); ++i) {
654 if (fActiveOpsTask != fDAG[i].get()) {
655 // The resolveTask associated with the activeTask remains open for as long as the
656 // activeTask does.
657 bool isActiveResolveTask =
658 fActiveOpsTask && fActiveOpsTask->fTextureResolveTask == fDAG[i].get();
659 SkASSERT(isActiveResolveTask || fDAG[i]->isClosed());
660 }
661 }
662
663 if (!fDAG.empty() && !fDAG.back()->isClosed()) {
664 SkASSERT(fActiveOpsTask == fDAG.back().get());
665 }
666 }
667 #endif
668
closeActiveOpsTask()669 void GrDrawingManager::closeActiveOpsTask() {
670 if (fActiveOpsTask) {
671 // This is a temporary fix for the partial-MDB world. In that world we're not
672 // reordering so ops that (in the single opsTask world) would've just glommed onto the
673 // end of the single opsTask but referred to a far earlier RT need to appear in their
674 // own opsTask.
675 fActiveOpsTask->makeClosed(*fContext->priv().caps());
676 fActiveOpsTask = nullptr;
677 }
678 }
679
newOpsTask(GrSurfaceProxyView surfaceView,sk_sp<GrArenas> arenas,bool flushTimeOpsTask)680 sk_sp<GrOpsTask> GrDrawingManager::newOpsTask(GrSurfaceProxyView surfaceView,
681 sk_sp<GrArenas> arenas,
682 bool flushTimeOpsTask) {
683 SkDEBUGCODE(this->validate());
684 SkASSERT(fContext);
685
686 this->closeActiveOpsTask();
687
688 sk_sp<GrOpsTask> opsTask(new GrOpsTask(this,
689 std::move(surfaceView),
690 fContext->priv().auditTrail(),
691 std::move(arenas)));
692 SkASSERT(this->getLastRenderTask(opsTask->target(0)) == opsTask.get());
693
694 if (flushTimeOpsTask) {
695 fOnFlushRenderTasks.push_back(opsTask);
696 } else {
697 this->appendTask(opsTask);
698
699 fActiveOpsTask = opsTask.get();
700 }
701
702 SkDEBUGCODE(this->validate());
703 return opsTask;
704 }
705
newTextureResolveRenderTask(const GrCaps & caps)706 GrTextureResolveRenderTask* GrDrawingManager::newTextureResolveRenderTask(const GrCaps& caps) {
707 // Unlike in the "new opsTask" case, we do not want to close the active opsTask, nor (if we are
708 // in sorting and opsTask reduction mode) the render tasks that depend on any proxy's current
709 // state. This is because those opsTasks can still receive new ops and because if they refer to
710 // the mipmapped version of 'proxy', they will then come to depend on the render task being
711 // created here.
712 //
713 // Add the new textureResolveTask before the fActiveOpsTask (if not in
714 // sorting/opsTask-splitting-reduction mode) because it will depend upon this resolve task.
715 // NOTE: Putting it here will also reduce the amount of work required by the topological sort.
716 GrRenderTask* task = this->insertTaskBeforeLast(sk_make_sp<GrTextureResolveRenderTask>());
717 return static_cast<GrTextureResolveRenderTask*>(task);
718 }
719
newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,int numSemaphores)720 void GrDrawingManager::newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,
721 std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,
722 int numSemaphores) {
723 SkDEBUGCODE(this->validate());
724 SkASSERT(fContext);
725
726 const GrCaps& caps = *fContext->priv().caps();
727
728 sk_sp<GrWaitRenderTask> waitTask = sk_make_sp<GrWaitRenderTask>(GrSurfaceProxyView(proxy),
729 std::move(semaphores),
730 numSemaphores);
731
732 if (fActiveOpsTask && (fActiveOpsTask->target(0) == proxy.get())) {
733 SkASSERT(this->getLastRenderTask(proxy.get()) == fActiveOpsTask);
734 this->insertTaskBeforeLast(waitTask);
735 // In this case we keep the current renderTask open but just insert the new waitTask
736 // before it in the list. The waitTask will never need to trigger any resolves or mip
737 // map generation which is the main advantage of going through the proxy version.
738 // Additionally we would've had to temporarily set the wait task as the lastRenderTask
739 // on the proxy, add the dependency, and then reset the lastRenderTask to
740 // fActiveOpsTask. Additionally we make the waitTask depend on all of fActiveOpsTask
741 // dependencies so that we don't unnecessarily reorder the waitTask before them.
742 // Note: Any previous Ops already in fActiveOpsTask will get blocked by the wait
743 // semaphore even though they don't need to be for correctness.
744
745 // Make sure we add the dependencies of fActiveOpsTask to waitTask first or else we'll
746 // get a circular self dependency of waitTask on waitTask.
747 waitTask->addDependenciesFromOtherTask(fActiveOpsTask);
748 fActiveOpsTask->addDependency(waitTask.get());
749 } else {
750 // In this case we just close the previous RenderTask and start and append the waitTask
751 // to the DAG. Since it is the last task now we call setLastRenderTask on the proxy. If
752 // there is a lastTask on the proxy we make waitTask depend on that task. This
753 // dependency isn't strictly needed but it does keep the DAG from reordering the
754 // waitTask earlier and blocking more tasks.
755 if (GrRenderTask* lastTask = this->getLastRenderTask(proxy.get())) {
756 waitTask->addDependency(lastTask);
757 }
758 this->setLastRenderTask(proxy.get(), waitTask.get());
759 this->closeActiveOpsTask();
760 this->appendTask(waitTask);
761 }
762 waitTask->makeClosed(caps);
763
764 SkDEBUGCODE(this->validate());
765 }
766
newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy,const SkIRect & srcRect,GrColorType surfaceColorType,GrColorType dstColorType,sk_sp<GrGpuBuffer> dstBuffer,size_t dstOffset)767 void GrDrawingManager::newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy,
768 const SkIRect& srcRect,
769 GrColorType surfaceColorType,
770 GrColorType dstColorType,
771 sk_sp<GrGpuBuffer> dstBuffer,
772 size_t dstOffset) {
773 SkDEBUGCODE(this->validate());
774 SkASSERT(fContext);
775 this->closeActiveOpsTask();
776
777 GrRenderTask* task = this->appendTask(sk_make_sp<GrTransferFromRenderTask>(
778 srcProxy, srcRect, surfaceColorType, dstColorType,
779 std::move(dstBuffer), dstOffset));
780
781 const GrCaps& caps = *fContext->priv().caps();
782
783 // We always say GrMipmapped::kNo here since we are always just copying from the base layer. We
784 // don't need to make sure the whole mip map chain is valid.
785 task->addDependency(this, srcProxy.get(), GrMipmapped::kNo,
786 GrTextureResolveManager(this), caps);
787 task->makeClosed(caps);
788
789 // We have closed the previous active oplist but since a new oplist isn't being added there
790 // shouldn't be an active one.
791 SkASSERT(!fActiveOpsTask);
792 SkDEBUGCODE(this->validate());
793 }
794
newCopyRenderTask(sk_sp<GrSurfaceProxy> src,SkIRect srcRect,sk_sp<GrSurfaceProxy> dst,SkIPoint dstPoint,GrSurfaceOrigin origin)795 sk_sp<GrRenderTask> GrDrawingManager::newCopyRenderTask(sk_sp<GrSurfaceProxy> src,
796 SkIRect srcRect,
797 sk_sp<GrSurfaceProxy> dst,
798 SkIPoint dstPoint,
799 GrSurfaceOrigin origin) {
800 SkDEBUGCODE(this->validate());
801 SkASSERT(fContext);
802
803 // It'd be nicer to check this in GrCopyRenderTask::Make. This gets complicated because of
804 // "active ops task" tracking. dst will be the target of our copy task but it might also be the
805 // target of the active ops task. We currently require the active ops task to be closed before
806 // making a new task that targets the same proxy. However, if we first close the active ops
807 // task, then fail to make a copy task, the next active ops task may target the same proxy. This
808 // will trip an assert related to unnecessary ops task splitting.
809 if (src->framebufferOnly()) {
810 return nullptr;
811 }
812
813 this->closeActiveOpsTask();
814
815 sk_sp<GrRenderTask> task = GrCopyRenderTask::Make(this,
816 src,
817 srcRect,
818 std::move(dst),
819 dstPoint,
820 origin);
821 if (!task) {
822 return nullptr;
823 }
824
825 this->appendTask(task);
826
827 const GrCaps& caps = *fContext->priv().caps();
828 // We always say GrMipmapped::kNo here since we are always just copying from the base layer to
829 // another base layer. We don't need to make sure the whole mip map chain is valid.
830 task->addDependency(this, src.get(), GrMipmapped::kNo, GrTextureResolveManager(this), caps);
831 task->makeClosed(caps);
832
833 // We have closed the previous active oplist but since a new oplist isn't being added there
834 // shouldn't be an active one.
835 SkASSERT(!fActiveOpsTask);
836 SkDEBUGCODE(this->validate());
837 return task;
838 }
839
newWritePixelsTask(sk_sp<GrSurfaceProxy> dst,SkIRect rect,GrColorType srcColorType,GrColorType dstColorType,const GrMipLevel levels[],int levelCount)840 bool GrDrawingManager::newWritePixelsTask(sk_sp<GrSurfaceProxy> dst,
841 SkIRect rect,
842 GrColorType srcColorType,
843 GrColorType dstColorType,
844 const GrMipLevel levels[],
845 int levelCount) {
846 SkDEBUGCODE(this->validate());
847 SkASSERT(fContext);
848
849 this->closeActiveOpsTask();
850 const GrCaps& caps = *fContext->priv().caps();
851
852 // On platforms that prefer flushes over VRAM use (i.e., ANGLE) we're better off forcing a
853 // complete flush here.
854 if (!caps.preferVRAMUseOverFlushes()) {
855 this->flushSurfaces(SkSpan<GrSurfaceProxy*>{},
856 SkSurface::BackendSurfaceAccess::kNoAccess,
857 GrFlushInfo{},
858 nullptr);
859 }
860
861 GrRenderTask* task = this->appendTask(GrWritePixelsTask::Make(this,
862 std::move(dst),
863 rect,
864 srcColorType,
865 dstColorType,
866 levels,
867 levelCount));
868 if (!task) {
869 return false;
870 }
871
872 task->makeClosed(caps);
873
874 // We have closed the previous active oplist but since a new oplist isn't being added there
875 // shouldn't be an active one.
876 SkASSERT(!fActiveOpsTask);
877 SkDEBUGCODE(this->validate());
878 return true;
879 }
880
881 /*
882 * This method finds a path renderer that can draw the specified path on
883 * the provided target.
884 * Due to its expense, the software path renderer has split out so it can
885 * can be individually allowed/disallowed via the "allowSW" boolean.
886 */
getPathRenderer(const GrPathRenderer::CanDrawPathArgs & args,bool allowSW,GrPathRendererChain::DrawType drawType,GrPathRenderer::StencilSupport * stencilSupport)887 GrPathRenderer* GrDrawingManager::getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args,
888 bool allowSW,
889 GrPathRendererChain::DrawType drawType,
890 GrPathRenderer::StencilSupport* stencilSupport) {
891
892 if (!fPathRendererChain) {
893 fPathRendererChain =
894 std::make_unique<GrPathRendererChain>(fContext, fOptionsForPathRendererChain);
895 }
896
897 GrPathRenderer* pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport);
898 if (!pr && allowSW) {
899 auto swPR = this->getSoftwarePathRenderer();
900 if (GrPathRenderer::CanDrawPath::kNo != swPR->canDrawPath(args)) {
901 pr = swPR;
902 }
903 }
904
905 #if GR_PATH_RENDERER_SPEW
906 if (pr) {
907 SkDebugf("getPathRenderer: %s\n", pr->name());
908 }
909 #endif
910
911 return pr;
912 }
913
getSoftwarePathRenderer()914 GrPathRenderer* GrDrawingManager::getSoftwarePathRenderer() {
915 if (!fSoftwarePathRenderer) {
916 fSoftwarePathRenderer.reset(
917 new GrSoftwarePathRenderer(fContext->priv().proxyProvider(),
918 fOptionsForPathRendererChain.fAllowPathMaskCaching));
919 }
920 return fSoftwarePathRenderer.get();
921 }
922
getCoverageCountingPathRenderer()923 GrCoverageCountingPathRenderer* GrDrawingManager::getCoverageCountingPathRenderer() {
924 if (!fPathRendererChain) {
925 fPathRendererChain = std::make_unique<GrPathRendererChain>(fContext,
926 fOptionsForPathRendererChain);
927 }
928 return fPathRendererChain->getCoverageCountingPathRenderer();
929 }
930
getTessellationPathRenderer()931 GrPathRenderer* GrDrawingManager::getTessellationPathRenderer() {
932 if (!fPathRendererChain) {
933 fPathRendererChain = std::make_unique<GrPathRendererChain>(fContext,
934 fOptionsForPathRendererChain);
935 }
936 return fPathRendererChain->getTessellationPathRenderer();
937 }
938
flushIfNecessary()939 void GrDrawingManager::flushIfNecessary() {
940 auto direct = fContext->asDirectContext();
941 if (!direct) {
942 return;
943 }
944
945 auto resourceCache = direct->priv().getResourceCache();
946 if (resourceCache && resourceCache->requestsFlush()) {
947 if (this->flush({}, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo(), nullptr)) {
948 this->submitToGpu(false);
949 }
950 resourceCache->purgeAsNeeded();
951 }
952 }
953