• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "GrDrawingManager.h"
9 
10 #include "GrBackendSemaphore.h"
11 #include "GrContext.h"
12 #include "GrContextPriv.h"
13 #include "GrGpu.h"
14 #include "GrOnFlushResourceProvider.h"
15 #include "GrOpList.h"
16 #include "GrRenderTargetContext.h"
17 #include "GrRenderTargetProxy.h"
18 #include "GrResourceAllocator.h"
19 #include "GrResourceProvider.h"
20 #include "GrSoftwarePathRenderer.h"
21 #include "GrSurfaceProxyPriv.h"
22 #include "GrTextureContext.h"
23 #include "GrTextureOpList.h"
24 #include "GrTextureProxy.h"
25 #include "GrTextureProxyPriv.h"
26 
27 #include "SkDeferredDisplayList.h"
28 #include "SkSurface_Gpu.h"
29 #include "SkTTopoSort.h"
30 
31 #include "GrTracing.h"
32 #include "text/GrAtlasTextContext.h"
33 
34 // Turn on/off the sorting of opLists at flush time
35 #ifndef SK_DISABLE_RENDER_TARGET_SORTING
36    #define SK_DISABLE_RENDER_TARGET_SORTING
37 #endif
38 
39 #ifdef SK_DISABLE_RENDER_TARGET_SORTING
40 static const bool kDefaultSortRenderTargets = false;
41 #else
42 static const bool kDefaultSortRenderTargets = true;
43 #endif
44 
GrDrawingManager(GrContext * context,const GrPathRendererChain::Options & optionsForPathRendererChain,const GrAtlasTextContext::Options & optionsForAtlasTextContext,GrSingleOwner * singleOwner,GrContextOptions::Enable sortRenderTargets)45 GrDrawingManager::GrDrawingManager(GrContext* context,
46                                    const GrPathRendererChain::Options& optionsForPathRendererChain,
47                                    const GrAtlasTextContext::Options& optionsForAtlasTextContext,
48                                    GrSingleOwner* singleOwner,
49                                    GrContextOptions::Enable sortRenderTargets)
50         : fContext(context)
51         , fOptionsForPathRendererChain(optionsForPathRendererChain)
52         , fOptionsForAtlasTextContext(optionsForAtlasTextContext)
53         , fSingleOwner(singleOwner)
54         , fAbandoned(false)
55         , fAtlasTextContext(nullptr)
56         , fPathRendererChain(nullptr)
57         , fSoftwarePathRenderer(nullptr)
58         , fFlushing(false) {
59 
60     if (GrContextOptions::Enable::kNo == sortRenderTargets) {
61         fSortRenderTargets = false;
62     } else if (GrContextOptions::Enable::kYes == sortRenderTargets) {
63         fSortRenderTargets = true;
64     } else {
65         fSortRenderTargets = kDefaultSortRenderTargets;
66     }
67 }
68 
cleanup()69 void GrDrawingManager::cleanup() {
70     for (int i = 0; i < fOpLists.count(); ++i) {
71         // no opList should receive a new command after this
72         fOpLists[i]->makeClosed(*fContext->caps());
73 
74         // We shouldn't need to do this, but it turns out some clients still hold onto opLists
75         // after a cleanup.
76         // MDB TODO: is this still true?
77         if (!fOpLists[i]->unique()) {
78             // TODO: Eventually this should be guaranteed unique.
79             // https://bugs.chromium.org/p/skia/issues/detail?id=7111
80             fOpLists[i]->endFlush();
81         }
82     }
83 
84     fOpLists.reset();
85 
86     delete fPathRendererChain;
87     fPathRendererChain = nullptr;
88     SkSafeSetNull(fSoftwarePathRenderer);
89 
90     fOnFlushCBObjects.reset();
91 }
92 
~GrDrawingManager()93 GrDrawingManager::~GrDrawingManager() {
94     this->cleanup();
95 }
96 
abandon()97 void GrDrawingManager::abandon() {
98     fAbandoned = true;
99     this->cleanup();
100 }
101 
freeGpuResources()102 void GrDrawingManager::freeGpuResources() {
103     for (int i = fOnFlushCBObjects.count() - 1; i >= 0; --i) {
104         if (!fOnFlushCBObjects[i]->retainOnFreeGpuResources()) {
105             // it's safe to just do this because we're iterating in reverse
106             fOnFlushCBObjects.removeShuffle(i);
107         }
108     }
109 
110     // a path renderer may be holding onto resources
111     delete fPathRendererChain;
112     fPathRendererChain = nullptr;
113     SkSafeSetNull(fSoftwarePathRenderer);
114 }
115 
116 // MDB TODO: make use of the 'proxy' parameter.
internalFlush(GrSurfaceProxy *,GrResourceCache::FlushType type,int numSemaphores,GrBackendSemaphore backendSemaphores[])117 GrSemaphoresSubmitted GrDrawingManager::internalFlush(GrSurfaceProxy*,
118                                                       GrResourceCache::FlushType type,
119                                                       int numSemaphores,
120                                                       GrBackendSemaphore backendSemaphores[]) {
121     GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "internalFlush", fContext);
122 
123     if (fFlushing || this->wasAbandoned()) {
124         return GrSemaphoresSubmitted::kNo;
125     }
126     fFlushing = true;
127 
128     for (int i = 0; i < fOpLists.count(); ++i) {
129         // Semi-usually the GrOpLists are already closed at this point, but sometimes Ganesh
130         // needs to flush mid-draw. In that case, the SkGpuDevice's GrOpLists won't be closed
131         // but need to be flushed anyway. Closing such GrOpLists here will mean new
132         // GrOpLists will be created to replace them if the SkGpuDevice(s) write to them again.
133         fOpLists[i]->makeClosed(*fContext->caps());
134     }
135 
136 #ifdef SK_DEBUG
137     // This block checks for any unnecessary splits in the opLists. If two sequential opLists
138     // share the same backing GrSurfaceProxy it means the opList was artificially split.
139     if (fOpLists.count()) {
140         GrRenderTargetOpList* prevOpList = fOpLists[0]->asRenderTargetOpList();
141         for (int i = 1; i < fOpLists.count(); ++i) {
142             GrRenderTargetOpList* curOpList = fOpLists[i]->asRenderTargetOpList();
143 
144             if (prevOpList && curOpList) {
145                 SkASSERT(prevOpList->fTarget.get() != curOpList->fTarget.get());
146             }
147 
148             prevOpList = curOpList;
149         }
150     }
151 #endif
152 
153     if (fSortRenderTargets) {
154         SkDEBUGCODE(bool result =) SkTTopoSort<GrOpList, GrOpList::TopoSortTraits>(&fOpLists);
155         SkASSERT(result);
156     }
157 
158     GrGpu* gpu = fContext->contextPriv().getGpu();
159 
160     GrOpFlushState flushState(gpu, fContext->contextPriv().resourceProvider(),
161                               &fTokenTracker);
162 
163     GrOnFlushResourceProvider onFlushProvider(this);
164     // TODO: AFAICT the only reason fFlushState is on GrDrawingManager rather than on the
165     // stack here is to preserve the flush tokens.
166 
167     // Prepare any onFlush op lists (e.g. atlases).
168     if (!fOnFlushCBObjects.empty()) {
169         fFlushingOpListIDs.reset(fOpLists.count());
170         for (int i = 0; i < fOpLists.count(); ++i) {
171             fFlushingOpListIDs[i] = fOpLists[i]->uniqueID();
172         }
173         SkSTArray<4, sk_sp<GrRenderTargetContext>> renderTargetContexts;
174         for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
175             onFlushCBObject->preFlush(&onFlushProvider,
176                                       fFlushingOpListIDs.begin(), fFlushingOpListIDs.count(),
177                                       &renderTargetContexts);
178             for (const sk_sp<GrRenderTargetContext>& rtc : renderTargetContexts) {
179                 sk_sp<GrRenderTargetOpList> onFlushOpList = sk_ref_sp(rtc->getRTOpList());
180                 if (!onFlushOpList) {
181                     continue;   // Odd - but not a big deal
182                 }
183 #ifdef SK_DEBUG
184                 // OnFlush callbacks are already invoked during flush, and are therefore expected to
185                 // handle resource allocation & usage on their own. (No deferred or lazy proxies!)
186                 onFlushOpList->visitProxies_debugOnly([](GrSurfaceProxy* p) {
187                     SkASSERT(!p->asTextureProxy() || !p->asTextureProxy()->texPriv().isDeferred());
188                     SkASSERT(GrSurfaceProxy::LazyState::kNot == p->lazyInstantiationState());
189                 });
190 #endif
191                 onFlushOpList->makeClosed(*fContext->caps());
192                 onFlushOpList->prepare(&flushState);
193                 fOnFlushCBOpLists.push_back(std::move(onFlushOpList));
194             }
195             renderTargetContexts.reset();
196         }
197     }
198 
199 #if 0
200     // Enable this to print out verbose GrOp information
201     for (int i = 0; i < fOpLists.count(); ++i) {
202         SkDEBUGCODE(fOpLists[i]->dump();)
203     }
204 #endif
205 
206     int startIndex, stopIndex;
207     bool flushed = false;
208 
209     {
210         GrResourceAllocator alloc(fContext->contextPriv().resourceProvider());
211         for (int i = 0; i < fOpLists.count(); ++i) {
212             fOpLists[i]->gatherProxyIntervals(&alloc);
213             alloc.markEndOfOpList(i);
214         }
215 
216         GrResourceAllocator::AssignError error = GrResourceAllocator::AssignError::kNoError;
217         while (alloc.assign(&startIndex, &stopIndex, &error)) {
218             if (GrResourceAllocator::AssignError::kFailedProxyInstantiation == error) {
219                 for (int i = startIndex; i < stopIndex; ++i) {
220                     fOpLists[i]->purgeOpsWithUninstantiatedProxies();
221                 }
222             }
223 
224             if (this->executeOpLists(startIndex, stopIndex, &flushState)) {
225                 flushed = true;
226             }
227         }
228     }
229 
230     fOpLists.reset();
231 
232     GrSemaphoresSubmitted result = gpu->finishFlush(numSemaphores, backendSemaphores);
233 
234     // We always have to notify the cache when it requested a flush so it can reset its state.
235     if (flushed || type == GrResourceCache::FlushType::kCacheRequested) {
236         fContext->contextPriv().getResourceCache()->notifyFlushOccurred(type);
237     }
238     for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
239         onFlushCBObject->postFlush(fTokenTracker.nextTokenToFlush(), fFlushingOpListIDs.begin(),
240                                    fFlushingOpListIDs.count());
241     }
242     fFlushingOpListIDs.reset();
243     fFlushing = false;
244 
245     return result;
246 }
247 
executeOpLists(int startIndex,int stopIndex,GrOpFlushState * flushState)248 bool GrDrawingManager::executeOpLists(int startIndex, int stopIndex, GrOpFlushState* flushState) {
249     SkASSERT(startIndex <= stopIndex && stopIndex <= fOpLists.count());
250 
251     GrResourceProvider* resourceProvider = fContext->contextPriv().resourceProvider();
252     bool anyOpListsExecuted = false;
253 
254     for (int i = startIndex; i < stopIndex; ++i) {
255         if (!fOpLists[i]) {
256              continue;
257         }
258 
259         if (resourceProvider->explicitlyAllocateGPUResources()) {
260             if (!fOpLists[i]->isInstantiated()) {
261                 // If the backing surface wasn't allocated drop the draw of the entire opList.
262                 fOpLists[i] = nullptr;
263                 continue;
264             }
265         } else {
266             if (!fOpLists[i]->instantiate(resourceProvider)) {
267                 SkDebugf("OpList failed to instantiate.\n");
268                 fOpLists[i] = nullptr;
269                 continue;
270             }
271         }
272 
273         // TODO: handle this instantiation via lazy surface proxies?
274         // Instantiate all deferred proxies (being built on worker threads) so we can upload them
275         fOpLists[i]->instantiateDeferredProxies(fContext->contextPriv().resourceProvider());
276         fOpLists[i]->prepare(flushState);
277     }
278 
279     // Upload all data to the GPU
280     flushState->preExecuteDraws();
281 
282     // Execute the onFlush op lists first, if any.
283     for (sk_sp<GrOpList>& onFlushOpList : fOnFlushCBOpLists) {
284         if (!onFlushOpList->execute(flushState)) {
285             SkDebugf("WARNING: onFlushOpList failed to execute.\n");
286         }
287         SkASSERT(onFlushOpList->unique());
288         onFlushOpList = nullptr;
289     }
290     fOnFlushCBOpLists.reset();
291 
292     // Execute the normal op lists.
293     for (int i = startIndex; i < stopIndex; ++i) {
294         if (!fOpLists[i]) {
295             continue;
296         }
297 
298         if (fOpLists[i]->execute(flushState)) {
299             anyOpListsExecuted = true;
300         }
301     }
302 
303     SkASSERT(!flushState->commandBuffer());
304     SkASSERT(fTokenTracker.nextDrawToken() == fTokenTracker.nextTokenToFlush());
305 
306     // We reset the flush state before the OpLists so that the last resources to be freed are those
307     // that are written to in the OpLists. This helps to make sure the most recently used resources
308     // are the last to be purged by the resource cache.
309     flushState->reset();
310 
311     for (int i = startIndex; i < stopIndex; ++i) {
312         if (!fOpLists[i]) {
313             continue;
314         }
315         if (!fOpLists[i]->unique()) {
316             // TODO: Eventually this should be guaranteed unique.
317             // https://bugs.chromium.org/p/skia/issues/detail?id=7111
318             fOpLists[i]->endFlush();
319         }
320         fOpLists[i] = nullptr;
321     }
322 
323     return anyOpListsExecuted;
324 }
325 
prepareSurfaceForExternalIO(GrSurfaceProxy * proxy,int numSemaphores,GrBackendSemaphore backendSemaphores[])326 GrSemaphoresSubmitted GrDrawingManager::prepareSurfaceForExternalIO(
327         GrSurfaceProxy* proxy, int numSemaphores, GrBackendSemaphore backendSemaphores[]) {
328     if (this->wasAbandoned()) {
329         return GrSemaphoresSubmitted::kNo;
330     }
331     SkASSERT(proxy);
332 
333     GrSemaphoresSubmitted result = GrSemaphoresSubmitted::kNo;
334     if (proxy->priv().hasPendingIO() || numSemaphores) {
335         result = this->flush(proxy, numSemaphores, backendSemaphores);
336     }
337 
338     if (!proxy->instantiate(fContext->contextPriv().resourceProvider())) {
339         return result;
340     }
341 
342     GrGpu* gpu = fContext->contextPriv().getGpu();
343     GrSurface* surface = proxy->priv().peekSurface();
344 
345     if (gpu && surface->asRenderTarget()) {
346         gpu->resolveRenderTarget(surface->asRenderTarget());
347     }
348     return result;
349 }
350 
addOnFlushCallbackObject(GrOnFlushCallbackObject * onFlushCBObject)351 void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
352     fOnFlushCBObjects.push_back(onFlushCBObject);
353 }
354 
moveOpListsToDDL(SkDeferredDisplayList * ddl)355 void GrDrawingManager::moveOpListsToDDL(SkDeferredDisplayList* ddl) {
356 #ifndef SK_RASTER_RECORDER_IMPLEMENTATION
357     for (int i = 0; i < fOpLists.count(); ++i) {
358         // no opList should receive a new command after this
359         fOpLists[i]->makeClosed(*fContext->caps());
360     }
361 
362     ddl->fOpLists = std::move(fOpLists);
363 #endif
364 }
365 
copyOpListsFromDDL(const SkDeferredDisplayList * ddl,GrRenderTargetProxy * newDest)366 void GrDrawingManager::copyOpListsFromDDL(const SkDeferredDisplayList* ddl,
367                                           GrRenderTargetProxy* newDest) {
368 #ifndef SK_RASTER_RECORDER_IMPLEMENTATION
369     // Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData.
370     // The lazy proxy that references it (in the copied opLists) will steal its GrTexture.
371     ddl->fLazyProxyData->fReplayDest = newDest;
372     fOpLists.push_back_n(ddl->fOpLists.count(), ddl->fOpLists.begin());
373 #endif
374 }
375 
newRTOpList(GrRenderTargetProxy * rtp,bool managedOpList)376 sk_sp<GrRenderTargetOpList> GrDrawingManager::newRTOpList(GrRenderTargetProxy* rtp,
377                                                           bool managedOpList) {
378     SkASSERT(fContext);
379 
380     // This is  a temporary fix for the partial-MDB world. In that world we're not reordering
381     // so ops that (in the single opList world) would've just glommed onto the end of the single
382     // opList but referred to a far earlier RT need to appear in their own opList.
383     if (!fOpLists.empty()) {
384         fOpLists.back()->makeClosed(*fContext->caps());
385     }
386 
387     auto resourceProvider = fContext->contextPriv().resourceProvider();
388 
389     sk_sp<GrRenderTargetOpList> opList(new GrRenderTargetOpList(rtp,
390                                                                 resourceProvider,
391                                                                 fContext->getAuditTrail()));
392     SkASSERT(rtp->getLastOpList() == opList.get());
393 
394     if (managedOpList) {
395         fOpLists.push_back() = opList;
396     }
397 
398     return opList;
399 }
400 
newTextureOpList(GrTextureProxy * textureProxy)401 sk_sp<GrTextureOpList> GrDrawingManager::newTextureOpList(GrTextureProxy* textureProxy) {
402     SkASSERT(fContext);
403 
404     // This is  a temporary fix for the partial-MDB world. In that world we're not reordering
405     // so ops that (in the single opList world) would've just glommed onto the end of the single
406     // opList but referred to a far earlier RT need to appear in their own opList.
407     if (!fOpLists.empty()) {
408         fOpLists.back()->makeClosed(*fContext->caps());
409     }
410 
411     sk_sp<GrTextureOpList> opList(new GrTextureOpList(fContext->contextPriv().resourceProvider(),
412                                                       textureProxy,
413                                                       fContext->getAuditTrail()));
414 
415     SkASSERT(textureProxy->getLastOpList() == opList.get());
416 
417     fOpLists.push_back() = opList;
418 
419     return opList;
420 }
421 
getAtlasTextContext()422 GrAtlasTextContext* GrDrawingManager::getAtlasTextContext() {
423     if (!fAtlasTextContext) {
424         fAtlasTextContext = GrAtlasTextContext::Make(fOptionsForAtlasTextContext);
425     }
426 
427     return fAtlasTextContext.get();
428 }
429 
430 /*
431  * This method finds a path renderer that can draw the specified path on
432  * the provided target.
433  * Due to its expense, the software path renderer has split out so it can
434  * can be individually allowed/disallowed via the "allowSW" boolean.
435  */
getPathRenderer(const GrPathRenderer::CanDrawPathArgs & args,bool allowSW,GrPathRendererChain::DrawType drawType,GrPathRenderer::StencilSupport * stencilSupport)436 GrPathRenderer* GrDrawingManager::getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args,
437                                                   bool allowSW,
438                                                   GrPathRendererChain::DrawType drawType,
439                                                   GrPathRenderer::StencilSupport* stencilSupport) {
440 
441     if (!fPathRendererChain) {
442         fPathRendererChain = new GrPathRendererChain(fContext, fOptionsForPathRendererChain);
443     }
444 
445     GrPathRenderer* pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport);
446     if (!pr && allowSW) {
447         if (!fSoftwarePathRenderer) {
448             fSoftwarePathRenderer =
449                     new GrSoftwarePathRenderer(fContext->contextPriv().proxyProvider(),
450                                                fOptionsForPathRendererChain.fAllowPathMaskCaching);
451         }
452         if (GrPathRenderer::CanDrawPath::kNo != fSoftwarePathRenderer->canDrawPath(args)) {
453             pr = fSoftwarePathRenderer;
454         }
455     }
456 
457     return pr;
458 }
459 
getCoverageCountingPathRenderer()460 GrCoverageCountingPathRenderer* GrDrawingManager::getCoverageCountingPathRenderer() {
461     if (!fPathRendererChain) {
462         fPathRendererChain = new GrPathRendererChain(fContext, fOptionsForPathRendererChain);
463     }
464     return fPathRendererChain->getCoverageCountingPathRenderer();
465 }
466 
makeRenderTargetContext(sk_sp<GrSurfaceProxy> sProxy,sk_sp<SkColorSpace> colorSpace,const SkSurfaceProps * surfaceProps,bool managedOpList)467 sk_sp<GrRenderTargetContext> GrDrawingManager::makeRenderTargetContext(
468                                                             sk_sp<GrSurfaceProxy> sProxy,
469                                                             sk_sp<SkColorSpace> colorSpace,
470                                                             const SkSurfaceProps* surfaceProps,
471                                                             bool managedOpList) {
472     if (this->wasAbandoned() || !sProxy->asRenderTargetProxy()) {
473         return nullptr;
474     }
475 
476     // SkSurface catches bad color space usage at creation. This check handles anything that slips
477     // by, including internal usage.
478     if (!SkSurface_Gpu::Valid(fContext, sProxy->config(), colorSpace.get())) {
479         SkDEBUGFAIL("Invalid config and colorspace combination");
480         return nullptr;
481     }
482 
483     sk_sp<GrRenderTargetProxy> rtp(sk_ref_sp(sProxy->asRenderTargetProxy()));
484 
485     return sk_sp<GrRenderTargetContext>(new GrRenderTargetContext(fContext, this, std::move(rtp),
486                                                                   std::move(colorSpace),
487                                                                   surfaceProps,
488                                                                   fContext->getAuditTrail(),
489                                                                   fSingleOwner, managedOpList));
490 }
491 
makeTextureContext(sk_sp<GrSurfaceProxy> sProxy,sk_sp<SkColorSpace> colorSpace)492 sk_sp<GrTextureContext> GrDrawingManager::makeTextureContext(sk_sp<GrSurfaceProxy> sProxy,
493                                                              sk_sp<SkColorSpace> colorSpace) {
494     if (this->wasAbandoned() || !sProxy->asTextureProxy()) {
495         return nullptr;
496     }
497 
498     // SkSurface catches bad color space usage at creation. This check handles anything that slips
499     // by, including internal usage.
500     if (!SkSurface_Gpu::Valid(fContext, sProxy->config(), colorSpace.get())) {
501         SkDEBUGFAIL("Invalid config and colorspace combination");
502         return nullptr;
503     }
504 
505     // GrTextureRenderTargets should always be using a GrRenderTargetContext
506     SkASSERT(!sProxy->asRenderTargetProxy());
507 
508     sk_sp<GrTextureProxy> textureProxy(sk_ref_sp(sProxy->asTextureProxy()));
509 
510     return sk_sp<GrTextureContext>(new GrTextureContext(fContext, this, std::move(textureProxy),
511                                                         std::move(colorSpace),
512                                                         fContext->getAuditTrail(),
513                                                         fSingleOwner));
514 }
515