• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/GrDrawingManager.h"
9 
10 #include "include/gpu/GrBackendSemaphore.h"
11 #include "include/gpu/GrTexture.h"
12 #include "include/private/GrRecordingContext.h"
13 #include "include/private/SkDeferredDisplayList.h"
14 #include "src/core/SkTTopoSort.h"
15 #include "src/gpu/GrAuditTrail.h"
16 #include "src/gpu/GrContextPriv.h"
17 #include "src/gpu/GrGpu.h"
18 #include "src/gpu/GrMemoryPool.h"
19 #include "src/gpu/GrOnFlushResourceProvider.h"
20 #include "src/gpu/GrRecordingContextPriv.h"
21 #include "src/gpu/GrRenderTargetContext.h"
22 #include "src/gpu/GrRenderTargetProxy.h"
23 #include "src/gpu/GrRenderTask.h"
24 #include "src/gpu/GrResourceAllocator.h"
25 #include "src/gpu/GrResourceProvider.h"
26 #include "src/gpu/GrSoftwarePathRenderer.h"
27 #include "src/gpu/GrSurfaceProxyPriv.h"
28 #include "src/gpu/GrTextureContext.h"
29 #include "src/gpu/GrTextureOpList.h"
30 #include "src/gpu/GrTexturePriv.h"
31 #include "src/gpu/GrTextureProxy.h"
32 #include "src/gpu/GrTextureProxyPriv.h"
33 #include "src/gpu/GrTextureResolveRenderTask.h"
34 #include "src/gpu/GrTracing.h"
35 #include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
36 #include "src/gpu/text/GrTextContext.h"
37 #include "src/image/SkSurface_Gpu.h"
38 
RenderTaskDAG(bool sortRenderTasks)39 GrDrawingManager::RenderTaskDAG::RenderTaskDAG(bool sortRenderTasks)
40         : fSortRenderTasks(sortRenderTasks) {}
41 
~RenderTaskDAG()42 GrDrawingManager::RenderTaskDAG::~RenderTaskDAG() {}
43 
gatherIDs(SkSTArray<8,uint32_t,true> * idArray) const44 void GrDrawingManager::RenderTaskDAG::gatherIDs(SkSTArray<8, uint32_t, true>* idArray) const {
45     idArray->reset(fRenderTasks.count());
46     for (int i = 0; i < fRenderTasks.count(); ++i) {
47         if (fRenderTasks[i]) {
48             (*idArray)[i] = fRenderTasks[i]->uniqueID();
49         }
50     }
51 }
52 
reset()53 void GrDrawingManager::RenderTaskDAG::reset() {
54     fRenderTasks.reset();
55 }
56 
removeRenderTask(int index)57 void GrDrawingManager::RenderTaskDAG::removeRenderTask(int index) {
58     if (!fRenderTasks[index]->unique()) {
59         // TODO: Eventually this should be guaranteed unique: http://skbug.com/7111
60         fRenderTasks[index]->endFlush();
61     }
62 
63     fRenderTasks[index] = nullptr;
64 }
65 
removeRenderTasks(int startIndex,int stopIndex)66 void GrDrawingManager::RenderTaskDAG::removeRenderTasks(int startIndex, int stopIndex) {
67     for (int i = startIndex; i < stopIndex; ++i) {
68         if (!fRenderTasks[i]) {
69             continue;
70         }
71         this->removeRenderTask(i);
72     }
73 }
74 
isUsed(GrSurfaceProxy * proxy) const75 bool GrDrawingManager::RenderTaskDAG::isUsed(GrSurfaceProxy* proxy) const {
76     for (int i = 0; i < fRenderTasks.count(); ++i) {
77         if (fRenderTasks[i] && fRenderTasks[i]->isUsed(proxy)) {
78             return true;
79         }
80     }
81 
82     return false;
83 }
84 
add(sk_sp<GrRenderTask> renderTask)85 GrRenderTask* GrDrawingManager::RenderTaskDAG::add(sk_sp<GrRenderTask> renderTask) {
86     return fRenderTasks.emplace_back(std::move(renderTask)).get();
87 }
88 
addBeforeLast(sk_sp<GrRenderTask> renderTask)89 GrRenderTask* GrDrawingManager::RenderTaskDAG::addBeforeLast(sk_sp<GrRenderTask> renderTask) {
90     SkASSERT(!fRenderTasks.empty());
91     // Release 'fRenderTasks.back()' and grab the raw pointer. If we pass in a reference to an array
92     // element, and the array grows during emplace_back, then the reference will become invalidated.
93     fRenderTasks.emplace_back(fRenderTasks.back().release());
94     return (fRenderTasks[fRenderTasks.count() - 2] = std::move(renderTask)).get();
95 }
96 
add(const SkTArray<sk_sp<GrRenderTask>> & opLists)97 void GrDrawingManager::RenderTaskDAG::add(const SkTArray<sk_sp<GrRenderTask>>& opLists) {
98     fRenderTasks.push_back_n(opLists.count(), opLists.begin());
99 }
100 
swap(SkTArray<sk_sp<GrRenderTask>> * opLists)101 void GrDrawingManager::RenderTaskDAG::swap(SkTArray<sk_sp<GrRenderTask>>* opLists) {
102     SkASSERT(opLists->empty());
103     opLists->swap(fRenderTasks);
104 }
105 
prepForFlush()106 void GrDrawingManager::RenderTaskDAG::prepForFlush() {
107     if (fSortRenderTasks) {
108         SkDEBUGCODE(bool result =) SkTTopoSort<GrRenderTask, GrRenderTask::TopoSortTraits>(
109                 &fRenderTasks);
110         SkASSERT(result);
111     }
112 
113 #ifdef SK_DEBUG
114     // This block checks for any unnecessary splits in the opLists. If two sequential opLists
115     // share the same backing GrSurfaceProxy it means the opList was artificially split.
116     if (fRenderTasks.count()) {
117         GrRenderTargetOpList* prevOpList = fRenderTasks[0]->asRenderTargetOpList();
118         for (int i = 1; i < fRenderTasks.count(); ++i) {
119             GrRenderTargetOpList* curOpList = fRenderTasks[i]->asRenderTargetOpList();
120 
121             if (prevOpList && curOpList) {
122                 SkASSERT(prevOpList->fTarget.get() != curOpList->fTarget.get());
123             }
124 
125             prevOpList = curOpList;
126         }
127     }
128 #endif
129 }
130 
closeAll(const GrCaps * caps)131 void GrDrawingManager::RenderTaskDAG::closeAll(const GrCaps* caps) {
132     for (int i = 0; i < fRenderTasks.count(); ++i) {
133         if (fRenderTasks[i]) {
134             fRenderTasks[i]->makeClosed(*caps);
135         }
136     }
137 }
138 
cleanup(const GrCaps * caps)139 void GrDrawingManager::RenderTaskDAG::cleanup(const GrCaps* caps) {
140     for (int i = 0; i < fRenderTasks.count(); ++i) {
141         if (!fRenderTasks[i]) {
142             continue;
143         }
144 
145         // no renderTask should receive a dependency
146         fRenderTasks[i]->makeClosed(*caps);
147 
148         // We shouldn't need to do this, but it turns out some clients still hold onto opLists
149         // after a cleanup.
150         // MDB TODO: is this still true?
151         if (!fRenderTasks[i]->unique()) {
152             // TODO: Eventually this should be guaranteed unique.
153             // https://bugs.chromium.org/p/skia/issues/detail?id=7111
154             fRenderTasks[i]->endFlush();
155         }
156     }
157 
158     fRenderTasks.reset();
159 }
160 
161 ///////////////////////////////////////////////////////////////////////////////////////////////////
GrDrawingManager(GrRecordingContext * context,const GrPathRendererChain::Options & optionsForPathRendererChain,const GrTextContext::Options & optionsForTextContext,bool sortRenderTasks,bool reduceOpListSplitting)162 GrDrawingManager::GrDrawingManager(GrRecordingContext* context,
163                                    const GrPathRendererChain::Options& optionsForPathRendererChain,
164                                    const GrTextContext::Options& optionsForTextContext,
165                                    bool sortRenderTasks,
166                                    bool reduceOpListSplitting)
167         : fContext(context)
168         , fOptionsForPathRendererChain(optionsForPathRendererChain)
169         , fOptionsForTextContext(optionsForTextContext)
170         , fDAG(sortRenderTasks)
171         , fTextContext(nullptr)
172         , fPathRendererChain(nullptr)
173         , fSoftwarePathRenderer(nullptr)
174         , fFlushing(false)
175         , fReduceOpListSplitting(reduceOpListSplitting) {
176 }
177 
cleanup()178 void GrDrawingManager::cleanup() {
179     fDAG.cleanup(fContext->priv().caps());
180 
181     fPathRendererChain = nullptr;
182     fSoftwarePathRenderer = nullptr;
183 
184     fOnFlushCBObjects.reset();
185 }
186 
~GrDrawingManager()187 GrDrawingManager::~GrDrawingManager() {
188     this->cleanup();
189 }
190 
wasAbandoned() const191 bool GrDrawingManager::wasAbandoned() const {
192     return fContext->priv().abandoned();
193 }
194 
freeGpuResources()195 void GrDrawingManager::freeGpuResources() {
196     for (int i = fOnFlushCBObjects.count() - 1; i >= 0; --i) {
197         if (!fOnFlushCBObjects[i]->retainOnFreeGpuResources()) {
198             // it's safe to just do this because we're iterating in reverse
199             fOnFlushCBObjects.removeShuffle(i);
200         }
201     }
202 
203     // a path renderer may be holding onto resources
204     fPathRendererChain = nullptr;
205     fSoftwarePathRenderer = nullptr;
206 }
207 
208 // MDB TODO: make use of the 'proxy' parameter.
flush(GrSurfaceProxy * proxies[],int numProxies,SkSurface::BackendSurfaceAccess access,const GrFlushInfo & info,const GrPrepareForExternalIORequests & externalRequests)209 GrSemaphoresSubmitted GrDrawingManager::flush(GrSurfaceProxy* proxies[], int numProxies,
210         SkSurface::BackendSurfaceAccess access, const GrFlushInfo& info,
211         const GrPrepareForExternalIORequests& externalRequests) {
212     SkASSERT(numProxies >= 0);
213     SkASSERT(!numProxies || proxies);
214     GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "flush", fContext);
215 
216     if (fFlushing || this->wasAbandoned()) {
217         if (info.fFinishedProc) {
218             info.fFinishedProc(info.fFinishedContext);
219         }
220         return GrSemaphoresSubmitted::kNo;
221     }
222 
223     SkDEBUGCODE(this->validate());
224 
225     if (kNone_GrFlushFlags == info.fFlags && !info.fNumSemaphores && !info.fFinishedProc &&
226             !externalRequests.hasRequests()) {
227         bool canSkip = numProxies > 0;
228         for (int i = 0; i < numProxies && canSkip; ++i) {
229             canSkip = !fDAG.isUsed(proxies[i]) && !this->isDDLTarget(proxies[i]);
230         }
231         if (canSkip) {
232             return GrSemaphoresSubmitted::kNo;
233         }
234     }
235 
236     auto direct = fContext->priv().asDirectContext();
237     if (!direct) {
238         if (info.fFinishedProc) {
239             info.fFinishedProc(info.fFinishedContext);
240         }
241         return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
242     }
243 
244     GrGpu* gpu = direct->priv().getGpu();
245     if (!gpu) {
246         if (info.fFinishedProc) {
247             info.fFinishedProc(info.fFinishedContext);
248         }
249         return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
250     }
251 
252     fFlushing = true;
253 
254     auto resourceProvider = direct->priv().resourceProvider();
255     auto resourceCache = direct->priv().getResourceCache();
256 
257     // Semi-usually the GrRenderTasks are already closed at this point, but sometimes Ganesh needs
258     // to flush mid-draw. In that case, the SkGpuDevice's opLists won't be closed but need to be
259     // flushed anyway. Closing such opLists here will mean new ones will be created to replace them
260     // if the SkGpuDevice(s) write to them again.
261     fDAG.closeAll(fContext->priv().caps());
262     fActiveOpList = nullptr;
263 
264     fDAG.prepForFlush();
265     if (!fCpuBufferCache) {
266         // We cache more buffers when the backend is using client side arrays. Otherwise, we
267         // expect each pool will use a CPU buffer as a staging buffer before uploading to a GPU
268         // buffer object. Each pool only requires one staging buffer at a time.
269         int maxCachedBuffers = fContext->priv().caps()->preferClientSideDynamicBuffers() ? 2 : 6;
270         fCpuBufferCache = GrBufferAllocPool::CpuBufferCache::Make(maxCachedBuffers);
271     }
272 
273     GrOpFlushState flushState(gpu, resourceProvider, &fTokenTracker, fCpuBufferCache);
274 
275     GrOnFlushResourceProvider onFlushProvider(this);
276     // TODO: AFAICT the only reason fFlushState is on GrDrawingManager rather than on the
277     // stack here is to preserve the flush tokens.
278 
279     // Prepare any onFlush op lists (e.g. atlases).
280     if (!fOnFlushCBObjects.empty()) {
281         fDAG.gatherIDs(&fFlushingRenderTaskIDs);
282 
283         SkSTArray<4, sk_sp<GrRenderTargetContext>> renderTargetContexts;
284         for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
285             onFlushCBObject->preFlush(&onFlushProvider, fFlushingRenderTaskIDs.begin(),
286                                       fFlushingRenderTaskIDs.count(), &renderTargetContexts);
287             for (const sk_sp<GrRenderTargetContext>& rtc : renderTargetContexts) {
288                 sk_sp<GrRenderTargetOpList> onFlushOpList = sk_ref_sp(rtc->getRTOpList());
289                 if (!onFlushOpList) {
290                     continue;   // Odd - but not a big deal
291                 }
292 #ifdef SK_DEBUG
293                 // OnFlush callbacks are already invoked during flush, and are therefore expected to
294                 // handle resource allocation & usage on their own. (No deferred or lazy proxies!)
295                 onFlushOpList->visitProxies_debugOnly([](GrSurfaceProxy* p, GrMipMapped) {
296                     SkASSERT(!p->asTextureProxy() || !p->asTextureProxy()->texPriv().isDeferred());
297                     SkASSERT(GrSurfaceProxy::LazyState::kNot == p->lazyInstantiationState());
298                 });
299 #endif
300                 onFlushOpList->makeClosed(*fContext->priv().caps());
301                 onFlushOpList->prepare(&flushState);
302                 fOnFlushCBOpLists.push_back(std::move(onFlushOpList));
303             }
304             renderTargetContexts.reset();
305         }
306     }
307 
308 #if 0
309     // Enable this to print out verbose GrOp information
310     for (int i = 0; i < fRenderTasks.count(); ++i) {
311         SkDEBUGCODE(fRenderTasks[i]->dump();)
312     }
313 #endif
314 
315     int startIndex, stopIndex;
316     bool flushed = false;
317 
318     {
319         GrResourceAllocator alloc(resourceProvider, flushState.deinstantiateProxyTracker()
320                                   SkDEBUGCODE(, fDAG.numRenderTasks()));
321         for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
322             if (fDAG.renderTask(i)) {
323                 fDAG.renderTask(i)->gatherProxyIntervals(&alloc);
324             }
325             alloc.markEndOfOpList(i);
326         }
327         alloc.determineRecyclability();
328 
329         GrResourceAllocator::AssignError error = GrResourceAllocator::AssignError::kNoError;
330         int numRenderTasksExecuted = 0;
331         while (alloc.assign(&startIndex, &stopIndex, &error)) {
332             if (GrResourceAllocator::AssignError::kFailedProxyInstantiation == error) {
333                 for (int i = startIndex; i < stopIndex; ++i) {
334                     GrRenderTask* renderTask = fDAG.renderTask(i);
335                     if (!renderTask) {
336                         continue;
337                     }
338                     if (!renderTask->isInstantiated()) {
339                         // If the backing surface wasn't allocated, drop the entire renderTask.
340                         fDAG.removeRenderTask(i);
341                         continue;
342                     }
343                     renderTask->handleInternalAllocationFailure();
344                 }
345             }
346 
347             if (this->executeRenderTasks(
348                     startIndex, stopIndex, &flushState, &numRenderTasksExecuted)) {
349                 flushed = true;
350             }
351         }
352     }
353 
354 #ifdef SK_DEBUG
355     for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
356         // If there are any remaining opLists at this point, make sure they will not survive the
357         // flush. Otherwise we need to call endFlush() on them.
358         // http://skbug.com/7111
359         SkASSERT(!fDAG.renderTask(i) || fDAG.renderTask(i)->unique());
360     }
361 #endif
362     fDAG.reset();
363     this->clearDDLTargets();
364 
365 #ifdef SK_DEBUG
366     // In non-DDL mode this checks that all the flushed ops have been freed from the memory pool.
367     // When we move to partial flushes this assert will no longer be valid.
368     // In DDL mode this check is somewhat superfluous since the memory for most of the ops/opLists
369     // will be stored in the DDL's GrOpMemoryPools.
370     GrOpMemoryPool* opMemoryPool = fContext->priv().opMemoryPool();
371     opMemoryPool->isEmpty();
372 #endif
373 
374     GrSemaphoresSubmitted result = gpu->finishFlush(proxies, numProxies, access, info,
375                                                     externalRequests);
376 
377     flushState.deinstantiateProxyTracker()->deinstantiateAllProxies();
378 
379     // Give the cache a chance to purge resources that become purgeable due to flushing.
380     if (flushed) {
381         resourceCache->purgeAsNeeded();
382         flushed = false;
383     }
384     for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
385         onFlushCBObject->postFlush(fTokenTracker.nextTokenToFlush(), fFlushingRenderTaskIDs.begin(),
386                                    fFlushingRenderTaskIDs.count());
387         flushed = true;
388     }
389     if (flushed) {
390         resourceCache->purgeAsNeeded();
391     }
392     fFlushingRenderTaskIDs.reset();
393     fFlushing = false;
394 
395     return result;
396 }
397 
executeRenderTasks(int startIndex,int stopIndex,GrOpFlushState * flushState,int * numRenderTasksExecuted)398 bool GrDrawingManager::executeRenderTasks(int startIndex, int stopIndex, GrOpFlushState* flushState,
399                                           int* numRenderTasksExecuted) {
400     SkASSERT(startIndex <= stopIndex && stopIndex <= fDAG.numRenderTasks());
401 
402 #if GR_FLUSH_TIME_OP_SPEW
403     SkDebugf("Flushing opLists: %d to %d out of [%d, %d]\n",
404                             startIndex, stopIndex, 0, fDAG.numRenderTasks());
405     for (int i = startIndex; i < stopIndex; ++i) {
406         if (fDAG.renderTask(i)) {
407             fDAG.renderTask(i)->dump(true);
408         }
409     }
410 #endif
411 
412     bool anyRenderTasksExecuted = false;
413 
414     for (int i = startIndex; i < stopIndex; ++i) {
415         if (!fDAG.renderTask(i)) {
416              continue;
417         }
418 
419         GrRenderTask* renderTask = fDAG.renderTask(i);
420         SkASSERT(renderTask->isInstantiated());
421         SkASSERT(renderTask->deferredProxiesAreInstantiated());
422 
423         renderTask->prepare(flushState);
424     }
425 
426     // Upload all data to the GPU
427     flushState->preExecuteDraws();
428 
429     // For Vulkan, if we have too many oplists to be flushed we end up allocating a lot of resources
430     // for each command buffer associated with the oplists. If this gets too large we can cause the
431     // devices to go OOM. In practice we usually only hit this case in our tests, but to be safe we
432     // put a cap on the number of oplists we will execute before flushing to the GPU to relieve some
433     // memory pressure.
434     static constexpr int kMaxRenderTasksBeforeFlush = 100;
435 
436     // Execute the onFlush op lists first, if any.
437     for (sk_sp<GrOpList>& onFlushOpList : fOnFlushCBOpLists) {
438         if (!onFlushOpList->execute(flushState)) {
439             SkDebugf("WARNING: onFlushOpList failed to execute.\n");
440         }
441         SkASSERT(onFlushOpList->unique());
442         onFlushOpList = nullptr;
443         (*numRenderTasksExecuted)++;
444         if (*numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
445             flushState->gpu()->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess,
446                                            GrFlushInfo(), GrPrepareForExternalIORequests());
447             *numRenderTasksExecuted = 0;
448         }
449     }
450     fOnFlushCBOpLists.reset();
451 
452     // Execute the normal op lists.
453     for (int i = startIndex; i < stopIndex; ++i) {
454         if (!fDAG.renderTask(i)) {
455             continue;
456         }
457 
458         if (fDAG.renderTask(i)->execute(flushState)) {
459             anyRenderTasksExecuted = true;
460         }
461         (*numRenderTasksExecuted)++;
462         if (*numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
463             flushState->gpu()->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess,
464                                            GrFlushInfo(), GrPrepareForExternalIORequests());
465             *numRenderTasksExecuted = 0;
466         }
467     }
468 
469     SkASSERT(!flushState->commandBuffer());
470     SkASSERT(fTokenTracker.nextDrawToken() == fTokenTracker.nextTokenToFlush());
471 
472     // We reset the flush state before the RenderTasks so that the last resources to be freed are
473     // those that are written to in the RenderTasks. This helps to make sure the most recently used
474     // resources are the last to be purged by the resource cache.
475     flushState->reset();
476 
477     fDAG.removeRenderTasks(startIndex, stopIndex);
478 
479     return anyRenderTasksExecuted;
480 }
481 
flushSurfaces(GrSurfaceProxy * proxies[],int numProxies,SkSurface::BackendSurfaceAccess access,const GrFlushInfo & info)482 GrSemaphoresSubmitted GrDrawingManager::flushSurfaces(GrSurfaceProxy* proxies[], int numProxies,
483                                                       SkSurface::BackendSurfaceAccess access,
484                                                       const GrFlushInfo& info) {
485     if (this->wasAbandoned()) {
486         return GrSemaphoresSubmitted::kNo;
487     }
488     SkDEBUGCODE(this->validate());
489     SkASSERT(numProxies >= 0);
490     SkASSERT(!numProxies || proxies);
491 
492     auto direct = fContext->priv().asDirectContext();
493     if (!direct) {
494         return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
495     }
496 
497     GrGpu* gpu = direct->priv().getGpu();
498     if (!gpu) {
499         return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
500     }
501 
502     // TODO: It is important to upgrade the drawingmanager to just flushing the
503     // portion of the DAG required by 'proxies' in order to restore some of the
504     // semantics of this method.
505     GrSemaphoresSubmitted result = this->flush(proxies, numProxies, access, info,
506                                                GrPrepareForExternalIORequests());
507     for (int i = 0; i < numProxies; ++i) {
508         if (!proxies[i]->isInstantiated()) {
509             return result;
510         }
511         GrSurface* surface = proxies[i]->peekSurface();
512         if (auto* rt = surface->asRenderTarget()) {
513             gpu->resolveRenderTarget(rt);
514         }
515         // If, after a flush, any of the proxies of interest have dirty mipmaps, regenerate them in
516         // case their backend textures are being stolen.
517         // (This special case is exercised by the ReimportImageTextureWithMipLevels test.)
518         // FIXME: It may be more ideal to plumb down a "we're going to steal the backends" flag.
519         if (auto* textureProxy = proxies[i]->asTextureProxy()) {
520             if (textureProxy->mipMapsAreDirty()) {
521                 gpu->regenerateMipMapLevels(textureProxy->peekTexture());
522                 textureProxy->markMipMapsClean();
523             }
524         }
525     }
526 
527     SkDEBUGCODE(this->validate());
528     return result;
529 }
530 
addOnFlushCallbackObject(GrOnFlushCallbackObject * onFlushCBObject)531 void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
532     fOnFlushCBObjects.push_back(onFlushCBObject);
533 }
534 
535 #if GR_TEST_UTILS
testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject * cb)536 void GrDrawingManager::testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject* cb) {
537     int n = std::find(fOnFlushCBObjects.begin(), fOnFlushCBObjects.end(), cb) -
538             fOnFlushCBObjects.begin();
539     SkASSERT(n < fOnFlushCBObjects.count());
540     fOnFlushCBObjects.removeShuffle(n);
541 }
542 #endif
543 
moveRenderTasksToDDL(SkDeferredDisplayList * ddl)544 void GrDrawingManager::moveRenderTasksToDDL(SkDeferredDisplayList* ddl) {
545     SkDEBUGCODE(this->validate());
546 
547     // no renderTask should receive a new command after this
548     fDAG.closeAll(fContext->priv().caps());
549     fActiveOpList = nullptr;
550 
551     fDAG.swap(&ddl->fRenderTasks);
552 
553     if (fPathRendererChain) {
554         if (auto ccpr = fPathRendererChain->getCoverageCountingPathRenderer()) {
555             ddl->fPendingPaths = ccpr->detachPendingPaths();
556         }
557     }
558 
559     SkDEBUGCODE(this->validate());
560 }
561 
copyRenderTasksFromDDL(const SkDeferredDisplayList * ddl,GrRenderTargetProxy * newDest)562 void GrDrawingManager::copyRenderTasksFromDDL(const SkDeferredDisplayList* ddl,
563                                           GrRenderTargetProxy* newDest) {
564     SkDEBUGCODE(this->validate());
565 
566     if (fActiveOpList) {
567         // This is  a temporary fix for the partial-MDB world. In that world we're not
568         // reordering so ops that (in the single opList world) would've just glommed onto the
569         // end of the single opList but referred to a far earlier RT need to appear in their
570         // own opList.
571         fActiveOpList->makeClosed(*fContext->priv().caps());
572         fActiveOpList = nullptr;
573     }
574 
575     this->addDDLTarget(newDest);
576 
577     // Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData.
578     // The lazy proxy that references it (in the copied opLists) will steal its GrTexture.
579     ddl->fLazyProxyData->fReplayDest = newDest;
580 
581     if (ddl->fPendingPaths.size()) {
582         GrCoverageCountingPathRenderer* ccpr = this->getCoverageCountingPathRenderer();
583 
584         ccpr->mergePendingPaths(ddl->fPendingPaths);
585     }
586 
587     fDAG.add(ddl->fRenderTasks);
588 
589     SkDEBUGCODE(this->validate());
590 }
591 
592 #ifdef SK_DEBUG
validate() const593 void GrDrawingManager::validate() const {
594     if (fDAG.sortingRenderTasks() && fReduceOpListSplitting) {
595         SkASSERT(!fActiveOpList);
596     } else {
597         if (fActiveOpList) {
598             SkASSERT(!fDAG.empty());
599             SkASSERT(!fActiveOpList->isClosed());
600             SkASSERT(fActiveOpList == fDAG.back());
601         }
602 
603         for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
604             if (fActiveOpList != fDAG.renderTask(i)) {
605                 SkASSERT(fDAG.renderTask(i)->isClosed());
606             }
607         }
608 
609         if (!fDAG.empty() && !fDAG.back()->isClosed()) {
610             SkASSERT(fActiveOpList == fDAG.back());
611         }
612     }
613 }
614 #endif
615 
closeRenderTasksForNewOpList(GrSurfaceProxy * target)616 void GrDrawingManager::closeRenderTasksForNewOpList(GrSurfaceProxy* target) {
617     if (fDAG.sortingRenderTasks() && fReduceOpListSplitting) {
618         // In this case we need to close all the renderTasks that rely on the current contents of
619         // 'target'. That is bc we're going to update the content of the proxy so they need to be
620         // split in case they use both the old and new content. (This is a bit of an overkill: they
621         // really only need to be split if they ever reference proxy's contents again but that is
622         // hard to predict/handle).
623         if (GrRenderTask* lastRenderTask = target->getLastRenderTask()) {
624             lastRenderTask->closeThoseWhoDependOnMe(*fContext->priv().caps());
625         }
626     } else if (fActiveOpList) {
627         // This is  a temporary fix for the partial-MDB world. In that world we're not
628         // reordering so ops that (in the single opList world) would've just glommed onto the
629         // end of the single opList but referred to a far earlier RT need to appear in their
630         // own opList.
631         fActiveOpList->makeClosed(*fContext->priv().caps());
632         fActiveOpList = nullptr;
633     }
634 }
635 
newRTOpList(sk_sp<GrRenderTargetProxy> rtp,bool managedOpList)636 sk_sp<GrRenderTargetOpList> GrDrawingManager::newRTOpList(sk_sp<GrRenderTargetProxy> rtp,
637                                                           bool managedOpList) {
638     SkDEBUGCODE(this->validate());
639     SkASSERT(fContext);
640 
641     this->closeRenderTasksForNewOpList(rtp.get());
642 
643     sk_sp<GrRenderTargetOpList> opList(new GrRenderTargetOpList(
644                                                         fContext->priv().refOpMemoryPool(),
645                                                         rtp,
646                                                         fContext->priv().auditTrail()));
647     SkASSERT(rtp->getLastRenderTask() == opList.get());
648 
649     if (managedOpList) {
650         fDAG.add(opList);
651 
652         if (!fDAG.sortingRenderTasks() || !fReduceOpListSplitting) {
653             fActiveOpList = opList.get();
654         }
655     }
656 
657     SkDEBUGCODE(this->validate());
658     return opList;
659 }
660 
newTextureOpList(sk_sp<GrTextureProxy> textureProxy)661 sk_sp<GrTextureOpList> GrDrawingManager::newTextureOpList(sk_sp<GrTextureProxy> textureProxy) {
662     SkDEBUGCODE(this->validate());
663     SkASSERT(fContext);
664 
665     this->closeRenderTasksForNewOpList(textureProxy.get());
666 
667     sk_sp<GrTextureOpList> opList(new GrTextureOpList(fContext->priv().refOpMemoryPool(),
668                                                       textureProxy,
669                                                       fContext->priv().auditTrail()));
670 
671     SkASSERT(textureProxy->getLastRenderTask() == opList.get());
672 
673     fDAG.add(opList);
674     if (!fDAG.sortingRenderTasks() || !fReduceOpListSplitting) {
675         fActiveOpList = opList.get();
676     }
677 
678     SkDEBUGCODE(this->validate());
679     return opList;
680 }
681 
newTextureResolveRenderTask(sk_sp<GrTextureProxy> textureProxy,GrTextureResolveFlags flags,const GrCaps & caps)682 GrRenderTask* GrDrawingManager::newTextureResolveRenderTask(
683         sk_sp<GrTextureProxy> textureProxy, GrTextureResolveFlags flags, const GrCaps& caps) {
684     // Unlike in the "new opList" cases, we do not want to close the active opList, nor (if we are
685     // in sorting and opList reduction mode) the render tasks that depend on the proxy's current
686     // state. This is because those opLists can still receive new ops and because if they refer to
687     // the mipmapped version of 'textureProxy', they will then come to depend on the render task
688     // being created here.
689     // NOTE: In either case, 'textureProxy' should already be closed at this point (i.e., its state
690     // is finished))
691     SkDEBUGCODE(auto* previousTaskBeforeMipsResolve = textureProxy->getLastRenderTask();)
692     auto textureResolveTask = GrTextureResolveRenderTask::Make(textureProxy, flags, caps);
693     // GrTextureResolveRenderTask::Make should have closed the texture proxy's previous task.
694     SkASSERT(!previousTaskBeforeMipsResolve || previousTaskBeforeMipsResolve->isClosed());
695     SkASSERT(textureProxy->getLastRenderTask() == textureResolveTask.get());
696 
697     // Add the new textureResolveTask before the fActiveOpList (if not in
698     // sorting/opList-splitting-reduction mode) because it will depend upon this resolve task.
699     // NOTE: Putting it here will also reduce the amount of work required by the topological sort.
700     return fDAG.addBeforeLast(std::move(textureResolveTask));
701 }
702 
getTextContext()703 GrTextContext* GrDrawingManager::getTextContext() {
704     if (!fTextContext) {
705         fTextContext = GrTextContext::Make(fOptionsForTextContext);
706     }
707 
708     return fTextContext.get();
709 }
710 
711 /*
712  * This method finds a path renderer that can draw the specified path on
713  * the provided target.
714  * Due to its expense, the software path renderer has split out so it can
715  * can be individually allowed/disallowed via the "allowSW" boolean.
716  */
getPathRenderer(const GrPathRenderer::CanDrawPathArgs & args,bool allowSW,GrPathRendererChain::DrawType drawType,GrPathRenderer::StencilSupport * stencilSupport)717 GrPathRenderer* GrDrawingManager::getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args,
718                                                   bool allowSW,
719                                                   GrPathRendererChain::DrawType drawType,
720                                                   GrPathRenderer::StencilSupport* stencilSupport) {
721 
722     if (!fPathRendererChain) {
723         fPathRendererChain.reset(new GrPathRendererChain(fContext, fOptionsForPathRendererChain));
724     }
725 
726     GrPathRenderer* pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport);
727     if (!pr && allowSW) {
728         auto swPR = this->getSoftwarePathRenderer();
729         if (GrPathRenderer::CanDrawPath::kNo != swPR->canDrawPath(args)) {
730             pr = swPR;
731         }
732     }
733 
734     return pr;
735 }
736 
getSoftwarePathRenderer()737 GrPathRenderer* GrDrawingManager::getSoftwarePathRenderer() {
738     if (!fSoftwarePathRenderer) {
739         fSoftwarePathRenderer.reset(
740                 new GrSoftwarePathRenderer(fContext->priv().proxyProvider(),
741                                            fOptionsForPathRendererChain.fAllowPathMaskCaching));
742     }
743     return fSoftwarePathRenderer.get();
744 }
745 
getCoverageCountingPathRenderer()746 GrCoverageCountingPathRenderer* GrDrawingManager::getCoverageCountingPathRenderer() {
747     if (!fPathRendererChain) {
748         fPathRendererChain.reset(new GrPathRendererChain(fContext, fOptionsForPathRendererChain));
749     }
750     return fPathRendererChain->getCoverageCountingPathRenderer();
751 }
752 
flushIfNecessary()753 void GrDrawingManager::flushIfNecessary() {
754     auto direct = fContext->priv().asDirectContext();
755     if (!direct) {
756         return;
757     }
758 
759     auto resourceCache = direct->priv().getResourceCache();
760     if (resourceCache && resourceCache->requestsFlush()) {
761         this->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo(),
762                     GrPrepareForExternalIORequests());
763         resourceCache->purgeAsNeeded();
764     }
765 }
766 
makeRenderTargetContext(sk_sp<GrSurfaceProxy> sProxy,GrColorType colorType,sk_sp<SkColorSpace> colorSpace,const SkSurfaceProps * surfaceProps,bool managedOpList)767 sk_sp<GrRenderTargetContext> GrDrawingManager::makeRenderTargetContext(
768         sk_sp<GrSurfaceProxy> sProxy,
769         GrColorType colorType,
770         sk_sp<SkColorSpace> colorSpace,
771         const SkSurfaceProps* surfaceProps,
772         bool managedOpList) {
773     if (this->wasAbandoned() || !sProxy->asRenderTargetProxy()) {
774         return nullptr;
775     }
776 
777     // SkSurface catches bad color space usage at creation. This check handles anything that slips
778     // by, including internal usage.
779     if (!SkSurface_Gpu::Valid(fContext->priv().caps(), sProxy->backendFormat())) {
780         SkDEBUGFAIL("Invalid config and colorspace combination");
781         return nullptr;
782     }
783 
784     sk_sp<GrRenderTargetProxy> renderTargetProxy(sk_ref_sp(sProxy->asRenderTargetProxy()));
785 
786     return sk_sp<GrRenderTargetContext>(new GrRenderTargetContext(fContext,
787                                                                   std::move(renderTargetProxy),
788                                                                   colorType,
789                                                                   std::move(colorSpace),
790                                                                   surfaceProps,
791                                                                   managedOpList));
792 }
793 
makeTextureContext(sk_sp<GrSurfaceProxy> sProxy,GrColorType colorType,SkAlphaType alphaType,sk_sp<SkColorSpace> colorSpace)794 sk_sp<GrTextureContext> GrDrawingManager::makeTextureContext(sk_sp<GrSurfaceProxy> sProxy,
795                                                              GrColorType colorType,
796                                                              SkAlphaType alphaType,
797                                                              sk_sp<SkColorSpace> colorSpace) {
798     if (this->wasAbandoned() || !sProxy->asTextureProxy()) {
799         return nullptr;
800     }
801 
802     // SkSurface catches bad color space usage at creation. This check handles anything that slips
803     // by, including internal usage.
804     if (!SkSurface_Gpu::Valid(fContext->priv().caps(), sProxy->backendFormat())) {
805         SkDEBUGFAIL("Invalid config and colorspace combination");
806         return nullptr;
807     }
808 
809     // GrTextureRenderTargets should always be using a GrRenderTargetContext
810     SkASSERT(!sProxy->asRenderTargetProxy());
811 
812     sk_sp<GrTextureProxy> textureProxy(sk_ref_sp(sProxy->asTextureProxy()));
813 
814     return sk_sp<GrTextureContext>(new GrTextureContext(fContext,
815                                                         std::move(textureProxy),
816                                                         colorType,
817                                                         alphaType,
818                                                         std::move(colorSpace)));
819 }
820