1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrDrawingManager.h"
9 #include "GrBackendSemaphore.h"
10 #include "GrContext.h"
11 #include "GrContextPriv.h"
12 #include "GrGpu.h"
13 #include "GrMemoryPool.h"
14 #include "GrOnFlushResourceProvider.h"
15 #include "GrOpList.h"
16 #include "GrRenderTargetContext.h"
17 #include "GrRenderTargetProxy.h"
18 #include "GrResourceAllocator.h"
19 #include "GrResourceProvider.h"
20 #include "GrSoftwarePathRenderer.h"
21 #include "GrSurfaceProxyPriv.h"
22 #include "GrTexture.h"
23 #include "GrTextureContext.h"
24 #include "GrTextureOpList.h"
25 #include "GrTexturePriv.h"
26 #include "GrTextureProxy.h"
27 #include "GrTextureProxyPriv.h"
28 #include "GrTracing.h"
29 #include "SkDeferredDisplayList.h"
30 #include "SkSurface_Gpu.h"
31 #include "SkTTopoSort.h"
32 #include "ccpr/GrCoverageCountingPathRenderer.h"
33 #include "text/GrTextContext.h"
34
OpListDAG(bool explicitlyAllocating,GrContextOptions::Enable sortOpLists)35 GrDrawingManager::OpListDAG::OpListDAG(bool explicitlyAllocating,
36 GrContextOptions::Enable sortOpLists) {
37 if (GrContextOptions::Enable::kNo == sortOpLists) {
38 fSortOpLists = false;
39 } else if (GrContextOptions::Enable::kYes == sortOpLists) {
40 fSortOpLists = true;
41 } else {
42 // By default we always enable sorting when we're explicitly allocating GPU resources
43 fSortOpLists = explicitlyAllocating;
44 }
45 }
46
~OpListDAG()47 GrDrawingManager::OpListDAG::~OpListDAG() {}
48
gatherIDs(SkSTArray<8,uint32_t,true> * idArray) const49 void GrDrawingManager::OpListDAG::gatherIDs(SkSTArray<8, uint32_t, true>* idArray) const {
50 idArray->reset(fOpLists.count());
51 for (int i = 0; i < fOpLists.count(); ++i) {
52 if (fOpLists[i]) {
53 (*idArray)[i] = fOpLists[i]->uniqueID();
54 }
55 }
56 }
57
reset()58 void GrDrawingManager::OpListDAG::reset() {
59 fOpLists.reset();
60 }
61
removeOpList(int index)62 void GrDrawingManager::OpListDAG::removeOpList(int index) {
63 if (!fOpLists[index]->unique()) {
64 // TODO: Eventually this should be guaranteed unique: http://skbug.com/7111
65 fOpLists[index]->endFlush();
66 }
67
68 fOpLists[index] = nullptr;
69 }
70
removeOpLists(int startIndex,int stopIndex)71 void GrDrawingManager::OpListDAG::removeOpLists(int startIndex, int stopIndex) {
72 for (int i = startIndex; i < stopIndex; ++i) {
73 if (!fOpLists[i]) {
74 continue;
75 }
76 this->removeOpList(i);
77 }
78 }
79
add(sk_sp<GrOpList> opList)80 void GrDrawingManager::OpListDAG::add(sk_sp<GrOpList> opList) {
81 fOpLists.emplace_back(std::move(opList));
82 }
83
add(const SkTArray<sk_sp<GrOpList>> & opLists)84 void GrDrawingManager::OpListDAG::add(const SkTArray<sk_sp<GrOpList>>& opLists) {
85 fOpLists.push_back_n(opLists.count(), opLists.begin());
86 }
87
swap(SkTArray<sk_sp<GrOpList>> * opLists)88 void GrDrawingManager::OpListDAG::swap(SkTArray<sk_sp<GrOpList>>* opLists) {
89 SkASSERT(opLists->empty());
90 opLists->swap(fOpLists);
91 }
92
prepForFlush()93 void GrDrawingManager::OpListDAG::prepForFlush() {
94 if (fSortOpLists) {
95 SkDEBUGCODE(bool result =) SkTTopoSort<GrOpList, GrOpList::TopoSortTraits>(&fOpLists);
96 SkASSERT(result);
97 }
98
99 #ifdef SK_DEBUG
100 // This block checks for any unnecessary splits in the opLists. If two sequential opLists
101 // share the same backing GrSurfaceProxy it means the opList was artificially split.
102 if (fOpLists.count()) {
103 GrRenderTargetOpList* prevOpList = fOpLists[0]->asRenderTargetOpList();
104 for (int i = 1; i < fOpLists.count(); ++i) {
105 GrRenderTargetOpList* curOpList = fOpLists[i]->asRenderTargetOpList();
106
107 if (prevOpList && curOpList) {
108 SkASSERT(prevOpList->fTarget.get() != curOpList->fTarget.get());
109 }
110
111 prevOpList = curOpList;
112 }
113 }
114 #endif
115 }
116
closeAll(const GrCaps * caps)117 void GrDrawingManager::OpListDAG::closeAll(const GrCaps* caps) {
118 for (int i = 0; i < fOpLists.count(); ++i) {
119 if (fOpLists[i]) {
120 fOpLists[i]->makeClosed(*caps);
121 }
122 }
123 }
124
cleanup(const GrCaps * caps)125 void GrDrawingManager::OpListDAG::cleanup(const GrCaps* caps) {
126 for (int i = 0; i < fOpLists.count(); ++i) {
127 if (!fOpLists[i]) {
128 continue;
129 }
130
131 // no opList should receive a new command after this
132 fOpLists[i]->makeClosed(*caps);
133
134 // We shouldn't need to do this, but it turns out some clients still hold onto opLists
135 // after a cleanup.
136 // MDB TODO: is this still true?
137 if (!fOpLists[i]->unique()) {
138 // TODO: Eventually this should be guaranteed unique.
139 // https://bugs.chromium.org/p/skia/issues/detail?id=7111
140 fOpLists[i]->endFlush();
141 }
142 }
143
144 fOpLists.reset();
145 }
146
147 ///////////////////////////////////////////////////////////////////////////////////////////////////
GrDrawingManager(GrContext * context,const GrPathRendererChain::Options & optionsForPathRendererChain,const GrTextContext::Options & optionsForTextContext,GrSingleOwner * singleOwner,bool explicitlyAllocating,GrContextOptions::Enable sortOpLists,GrContextOptions::Enable reduceOpListSplitting)148 GrDrawingManager::GrDrawingManager(GrContext* context,
149 const GrPathRendererChain::Options& optionsForPathRendererChain,
150 const GrTextContext::Options& optionsForTextContext,
151 GrSingleOwner* singleOwner,
152 bool explicitlyAllocating,
153 GrContextOptions::Enable sortOpLists,
154 GrContextOptions::Enable reduceOpListSplitting)
155 : fContext(context)
156 , fOptionsForPathRendererChain(optionsForPathRendererChain)
157 , fOptionsForTextContext(optionsForTextContext)
158 , fSingleOwner(singleOwner)
159 , fAbandoned(false)
160 , fDAG(explicitlyAllocating, sortOpLists)
161 , fTextContext(nullptr)
162 , fPathRendererChain(nullptr)
163 , fSoftwarePathRenderer(nullptr)
164 , fFlushing(false) {
165 if (GrContextOptions::Enable::kNo == reduceOpListSplitting) {
166 fReduceOpListSplitting = false;
167 } else if (GrContextOptions::Enable::kYes == reduceOpListSplitting) {
168 fReduceOpListSplitting = true;
169 } else {
170 // For now, this is only turned on when explicitly enabled. Once mini-flushes are
171 // implemented it should be enabled whenever sorting is enabled.
172 fReduceOpListSplitting = false; // sortOpLists
173 }
174 }
175
cleanup()176 void GrDrawingManager::cleanup() {
177 fDAG.cleanup(fContext->contextPriv().caps());
178
179 fPathRendererChain = nullptr;
180 fSoftwarePathRenderer = nullptr;
181
182 fOnFlushCBObjects.reset();
183 }
184
~GrDrawingManager()185 GrDrawingManager::~GrDrawingManager() {
186 this->cleanup();
187 }
188
abandon()189 void GrDrawingManager::abandon() {
190 fAbandoned = true;
191 this->cleanup();
192 }
193
freeGpuResources()194 void GrDrawingManager::freeGpuResources() {
195 for (int i = fOnFlushCBObjects.count() - 1; i >= 0; --i) {
196 if (!fOnFlushCBObjects[i]->retainOnFreeGpuResources()) {
197 // it's safe to just do this because we're iterating in reverse
198 fOnFlushCBObjects.removeShuffle(i);
199 }
200 }
201
202 // a path renderer may be holding onto resources
203 fPathRendererChain = nullptr;
204 fSoftwarePathRenderer = nullptr;
205 }
206
207 // MDB TODO: make use of the 'proxy' parameter.
flush(GrSurfaceProxy *,int numSemaphores,GrBackendSemaphore backendSemaphores[])208 GrSemaphoresSubmitted GrDrawingManager::flush(GrSurfaceProxy*,
209 int numSemaphores,
210 GrBackendSemaphore backendSemaphores[]) {
211 GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "flush", fContext);
212
213 if (fFlushing || this->wasAbandoned()) {
214 return GrSemaphoresSubmitted::kNo;
215 }
216 SkDEBUGCODE(this->validate());
217
218 GrGpu* gpu = fContext->contextPriv().getGpu();
219 if (!gpu) {
220 return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
221 }
222 fFlushing = true;
223
224 // Semi-usually the GrOpLists are already closed at this point, but sometimes Ganesh
225 // needs to flush mid-draw. In that case, the SkGpuDevice's GrOpLists won't be closed
226 // but need to be flushed anyway. Closing such GrOpLists here will mean new
227 // GrOpLists will be created to replace them if the SkGpuDevice(s) write to them again.
228 fDAG.closeAll(fContext->contextPriv().caps());
229 fActiveOpList = nullptr;
230
231 fDAG.prepForFlush();
232 SkASSERT(SkToBool(fVertexBufferSpace) == SkToBool(fIndexBufferSpace));
233 if (!fVertexBufferSpace) {
234 fVertexBufferSpace.reset(new char[GrBufferAllocPool::kDefaultBufferSize]());
235 fIndexBufferSpace.reset(new char[GrBufferAllocPool::kDefaultBufferSize]());
236 }
237
238 GrOpFlushState flushState(gpu, fContext->contextPriv().resourceProvider(), &fTokenTracker,
239 fVertexBufferSpace.get(), fIndexBufferSpace.get());
240
241 GrOnFlushResourceProvider onFlushProvider(this);
242 // TODO: AFAICT the only reason fFlushState is on GrDrawingManager rather than on the
243 // stack here is to preserve the flush tokens.
244
245 // Prepare any onFlush op lists (e.g. atlases).
246 if (!fOnFlushCBObjects.empty()) {
247 fDAG.gatherIDs(&fFlushingOpListIDs);
248
249 SkSTArray<4, sk_sp<GrRenderTargetContext>> renderTargetContexts;
250 for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
251 onFlushCBObject->preFlush(&onFlushProvider,
252 fFlushingOpListIDs.begin(), fFlushingOpListIDs.count(),
253 &renderTargetContexts);
254 for (const sk_sp<GrRenderTargetContext>& rtc : renderTargetContexts) {
255 sk_sp<GrRenderTargetOpList> onFlushOpList = sk_ref_sp(rtc->getRTOpList());
256 if (!onFlushOpList) {
257 continue; // Odd - but not a big deal
258 }
259 #ifdef SK_DEBUG
260 // OnFlush callbacks are already invoked during flush, and are therefore expected to
261 // handle resource allocation & usage on their own. (No deferred or lazy proxies!)
262 onFlushOpList->visitProxies_debugOnly([](GrSurfaceProxy* p) {
263 SkASSERT(!p->asTextureProxy() || !p->asTextureProxy()->texPriv().isDeferred());
264 SkASSERT(GrSurfaceProxy::LazyState::kNot == p->lazyInstantiationState());
265 });
266 #endif
267 onFlushOpList->makeClosed(*fContext->contextPriv().caps());
268 onFlushOpList->prepare(&flushState);
269 fOnFlushCBOpLists.push_back(std::move(onFlushOpList));
270 }
271 renderTargetContexts.reset();
272 }
273 }
274
275 #if 0
276 // Enable this to print out verbose GrOp information
277 for (int i = 0; i < fOpLists.count(); ++i) {
278 SkDEBUGCODE(fOpLists[i]->dump();)
279 }
280 #endif
281
282 int startIndex, stopIndex;
283 bool flushed = false;
284
285 {
286 GrResourceAllocator alloc(fContext->contextPriv().resourceProvider(),
287 flushState.deinstantiateProxyTracker());
288 for (int i = 0; i < fDAG.numOpLists(); ++i) {
289 if (fDAG.opList(i)) {
290 fDAG.opList(i)->gatherProxyIntervals(&alloc);
291 }
292 alloc.markEndOfOpList(i);
293 }
294
295 GrResourceAllocator::AssignError error = GrResourceAllocator::AssignError::kNoError;
296 int numOpListsExecuted = 0;
297 while (alloc.assign(&startIndex, &stopIndex, &error)) {
298 if (GrResourceAllocator::AssignError::kFailedProxyInstantiation == error) {
299 for (int i = startIndex; i < stopIndex; ++i) {
300 if (fDAG.opList(i) && !fDAG.opList(i)->isFullyInstantiated()) {
301 // If the backing surface wasn't allocated drop the entire opList.
302 fDAG.removeOpList(i);
303 }
304 if (fDAG.opList(i)) {
305 fDAG.opList(i)->purgeOpsWithUninstantiatedProxies();
306 }
307 }
308 }
309
310 if (this->executeOpLists(startIndex, stopIndex, &flushState, &numOpListsExecuted)) {
311 flushed = true;
312 }
313 }
314 }
315
316 #ifdef SK_DEBUG
317 for (int i = 0; i < fDAG.numOpLists(); ++i) {
318 // If there are any remaining opLists at this point, make sure they will not survive the
319 // flush. Otherwise we need to call endFlush() on them.
320 // http://skbug.com/7111
321 SkASSERT(!fDAG.opList(i) || fDAG.opList(i)->unique());
322 }
323 #endif
324 fDAG.reset();
325
326 #ifdef SK_DEBUG
327 // In non-DDL mode this checks that all the flushed ops have been freed from the memory pool.
328 // When we move to partial flushes this assert will no longer be valid.
329 // In DDL mode this check is somewhat superfluous since the memory for most of the ops/opLists
330 // will be stored in the DDL's GrOpMemoryPools.
331 GrOpMemoryPool* opMemoryPool = fContext->contextPriv().opMemoryPool();
332 opMemoryPool->isEmpty();
333 #endif
334
335 GrSemaphoresSubmitted result = gpu->finishFlush(numSemaphores, backendSemaphores);
336
337 flushState.deinstantiateProxyTracker()->deinstantiateAllProxies();
338
339 // Give the cache a chance to purge resources that become purgeable due to flushing.
340 if (flushed) {
341 fContext->contextPriv().getResourceCache()->purgeAsNeeded();
342 }
343 for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
344 onFlushCBObject->postFlush(fTokenTracker.nextTokenToFlush(), fFlushingOpListIDs.begin(),
345 fFlushingOpListIDs.count());
346 }
347 fFlushingOpListIDs.reset();
348 fFlushing = false;
349
350 return result;
351 }
352
executeOpLists(int startIndex,int stopIndex,GrOpFlushState * flushState,int * numOpListsExecuted)353 bool GrDrawingManager::executeOpLists(int startIndex, int stopIndex, GrOpFlushState* flushState,
354 int* numOpListsExecuted) {
355 SkASSERT(startIndex <= stopIndex && stopIndex <= fDAG.numOpLists());
356
357 #if GR_FLUSH_TIME_OP_SPEW
358 SkDebugf("Flushing opLists: %d to %d out of [%d, %d]\n",
359 startIndex, stopIndex, 0, fDAG.numOpLists());
360 for (int i = startIndex; i < stopIndex; ++i) {
361 if (fDAG.opList(i)) {
362 fDAG.opList(i)->dump(true);
363 }
364 }
365 #endif
366
367 GrResourceProvider* resourceProvider = fContext->contextPriv().resourceProvider();
368 bool anyOpListsExecuted = false;
369
370 for (int i = startIndex; i < stopIndex; ++i) {
371 if (!fDAG.opList(i)) {
372 continue;
373 }
374
375 GrOpList* opList = fDAG.opList(i);
376
377 if (resourceProvider->explicitlyAllocateGPUResources()) {
378 if (!opList->isFullyInstantiated()) {
379 // If the backing surface wasn't allocated drop the draw of the entire opList.
380 fDAG.removeOpList(i);
381 continue;
382 }
383 } else {
384 if (!opList->instantiate(resourceProvider)) {
385 fDAG.removeOpList(i);
386 continue;
387 }
388 }
389
390 // TODO: handle this instantiation via lazy surface proxies?
391 // Instantiate all deferred proxies (being built on worker threads) so we can upload them
392 opList->instantiateDeferredProxies(fContext->contextPriv().resourceProvider());
393 opList->prepare(flushState);
394 }
395
396 // Upload all data to the GPU
397 flushState->preExecuteDraws();
398
399 // For Vulkan, if we have too many oplists to be flushed we end up allocating a lot of resources
400 // for each command buffer associated with the oplists. If this gets too large we can cause the
401 // devices to go OOM. In practice we usually only hit this case in our tests, but to be safe we
402 // put a cap on the number of oplists we will execute before flushing to the GPU to relieve some
403 // memory pressure.
404 static constexpr int kMaxOpListsBeforeFlush = 100;
405
406 // Execute the onFlush op lists first, if any.
407 for (sk_sp<GrOpList>& onFlushOpList : fOnFlushCBOpLists) {
408 if (!onFlushOpList->execute(flushState)) {
409 SkDebugf("WARNING: onFlushOpList failed to execute.\n");
410 }
411 SkASSERT(onFlushOpList->unique());
412 onFlushOpList = nullptr;
413 (*numOpListsExecuted)++;
414 if (*numOpListsExecuted >= kMaxOpListsBeforeFlush) {
415 flushState->gpu()->finishFlush(0, nullptr);
416 *numOpListsExecuted = 0;
417 }
418 }
419 fOnFlushCBOpLists.reset();
420
421 // Execute the normal op lists.
422 for (int i = startIndex; i < stopIndex; ++i) {
423 if (!fDAG.opList(i)) {
424 continue;
425 }
426
427 if (fDAG.opList(i)->execute(flushState)) {
428 anyOpListsExecuted = true;
429 }
430 (*numOpListsExecuted)++;
431 if (*numOpListsExecuted >= kMaxOpListsBeforeFlush) {
432 flushState->gpu()->finishFlush(0, nullptr);
433 *numOpListsExecuted = 0;
434 }
435 }
436
437 SkASSERT(!flushState->commandBuffer());
438 SkASSERT(fTokenTracker.nextDrawToken() == fTokenTracker.nextTokenToFlush());
439
440 // We reset the flush state before the OpLists so that the last resources to be freed are those
441 // that are written to in the OpLists. This helps to make sure the most recently used resources
442 // are the last to be purged by the resource cache.
443 flushState->reset();
444
445 fDAG.removeOpLists(startIndex, stopIndex);
446
447 return anyOpListsExecuted;
448 }
449
prepareSurfaceForExternalIO(GrSurfaceProxy * proxy,int numSemaphores,GrBackendSemaphore backendSemaphores[])450 GrSemaphoresSubmitted GrDrawingManager::prepareSurfaceForExternalIO(
451 GrSurfaceProxy* proxy, int numSemaphores, GrBackendSemaphore backendSemaphores[]) {
452 if (this->wasAbandoned()) {
453 return GrSemaphoresSubmitted::kNo;
454 }
455 SkDEBUGCODE(this->validate());
456 SkASSERT(proxy);
457
458 GrGpu* gpu = fContext->contextPriv().getGpu();
459 if (!gpu) {
460 return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
461 }
462
463 GrSemaphoresSubmitted result = GrSemaphoresSubmitted::kNo;
464 if (proxy->priv().hasPendingIO() || numSemaphores) {
465 result = this->flush(proxy, numSemaphores, backendSemaphores);
466 }
467
468 if (!proxy->instantiate(fContext->contextPriv().resourceProvider())) {
469 return result;
470 }
471
472 GrSurface* surface = proxy->peekSurface();
473 if (auto* rt = surface->asRenderTarget()) {
474 gpu->resolveRenderTarget(rt);
475 }
476 if (auto* tex = surface->asTexture()) {
477 if (tex->texturePriv().mipMapped() == GrMipMapped::kYes &&
478 tex->texturePriv().mipMapsAreDirty()) {
479 gpu->regenerateMipMapLevels(tex);
480 }
481 }
482
483 SkDEBUGCODE(this->validate());
484 return result;
485 }
486
addOnFlushCallbackObject(GrOnFlushCallbackObject * onFlushCBObject)487 void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
488 fOnFlushCBObjects.push_back(onFlushCBObject);
489 }
490
moveOpListsToDDL(SkDeferredDisplayList * ddl)491 void GrDrawingManager::moveOpListsToDDL(SkDeferredDisplayList* ddl) {
492 SkDEBUGCODE(this->validate());
493
494 // no opList should receive a new command after this
495 fDAG.closeAll(fContext->contextPriv().caps());
496 fActiveOpList = nullptr;
497
498 fDAG.swap(&ddl->fOpLists);
499
500 if (fPathRendererChain) {
501 if (auto ccpr = fPathRendererChain->getCoverageCountingPathRenderer()) {
502 ddl->fPendingPaths = ccpr->detachPendingPaths();
503 }
504 }
505
506 SkDEBUGCODE(this->validate());
507 }
508
copyOpListsFromDDL(const SkDeferredDisplayList * ddl,GrRenderTargetProxy * newDest)509 void GrDrawingManager::copyOpListsFromDDL(const SkDeferredDisplayList* ddl,
510 GrRenderTargetProxy* newDest) {
511 SkDEBUGCODE(this->validate());
512
513 if (fActiveOpList) {
514 // This is a temporary fix for the partial-MDB world. In that world we're not
515 // reordering so ops that (in the single opList world) would've just glommed onto the
516 // end of the single opList but referred to a far earlier RT need to appear in their
517 // own opList.
518 fActiveOpList->makeClosed(*fContext->contextPriv().caps());
519 fActiveOpList = nullptr;
520 }
521
522 // Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData.
523 // The lazy proxy that references it (in the copied opLists) will steal its GrTexture.
524 ddl->fLazyProxyData->fReplayDest = newDest;
525
526 if (ddl->fPendingPaths.size()) {
527 GrCoverageCountingPathRenderer* ccpr = this->getCoverageCountingPathRenderer();
528
529 ccpr->mergePendingPaths(ddl->fPendingPaths);
530 }
531
532 fDAG.add(ddl->fOpLists);
533
534 SkDEBUGCODE(this->validate());
535 }
536
537 #ifdef SK_DEBUG
validate() const538 void GrDrawingManager::validate() const {
539 if (fDAG.sortingOpLists() && fReduceOpListSplitting) {
540 SkASSERT(!fActiveOpList);
541 } else {
542 if (fActiveOpList) {
543 SkASSERT(!fDAG.empty());
544 SkASSERT(!fActiveOpList->isClosed());
545 SkASSERT(fActiveOpList == fDAG.back());
546 }
547
548 for (int i = 0; i < fDAG.numOpLists(); ++i) {
549 if (fActiveOpList != fDAG.opList(i)) {
550 SkASSERT(fDAG.opList(i)->isClosed());
551 }
552 }
553
554 if (!fDAG.empty() && !fDAG.back()->isClosed()) {
555 SkASSERT(fActiveOpList == fDAG.back());
556 }
557 }
558 }
559 #endif
560
newRTOpList(GrRenderTargetProxy * rtp,bool managedOpList)561 sk_sp<GrRenderTargetOpList> GrDrawingManager::newRTOpList(GrRenderTargetProxy* rtp,
562 bool managedOpList) {
563 SkDEBUGCODE(this->validate());
564 SkASSERT(fContext);
565
566 if (fDAG.sortingOpLists() && fReduceOpListSplitting) {
567 // In this case we need to close all the opLists that rely on the current contents of
568 // 'rtp'. That is bc we're going to update the content of the proxy so they need to be
569 // split in case they use both the old and new content. (This is a bit of an overkill:
570 // they really only need to be split if they ever reference proxy's contents again but
571 // that is hard to predict/handle).
572 if (GrOpList* lastOpList = rtp->getLastOpList()) {
573 lastOpList->closeThoseWhoDependOnMe(*fContext->contextPriv().caps());
574 }
575 } else if (fActiveOpList) {
576 // This is a temporary fix for the partial-MDB world. In that world we're not
577 // reordering so ops that (in the single opList world) would've just glommed onto the
578 // end of the single opList but referred to a far earlier RT need to appear in their
579 // own opList.
580 fActiveOpList->makeClosed(*fContext->contextPriv().caps());
581 fActiveOpList = nullptr;
582 }
583
584 auto resourceProvider = fContext->contextPriv().resourceProvider();
585
586 sk_sp<GrRenderTargetOpList> opList(new GrRenderTargetOpList(
587 resourceProvider,
588 fContext->contextPriv().refOpMemoryPool(),
589 rtp,
590 fContext->contextPriv().getAuditTrail()));
591 SkASSERT(rtp->getLastOpList() == opList.get());
592
593 if (managedOpList) {
594 fDAG.add(opList);
595
596 if (!fDAG.sortingOpLists() || !fReduceOpListSplitting) {
597 fActiveOpList = opList.get();
598 }
599 }
600
601 SkDEBUGCODE(this->validate());
602 return opList;
603 }
604
newTextureOpList(GrTextureProxy * textureProxy)605 sk_sp<GrTextureOpList> GrDrawingManager::newTextureOpList(GrTextureProxy* textureProxy) {
606 SkDEBUGCODE(this->validate());
607 SkASSERT(fContext);
608
609 if (fDAG.sortingOpLists() && fReduceOpListSplitting) {
610 // In this case we need to close all the opLists that rely on the current contents of
611 // 'texture'. That is bc we're going to update the content of the proxy so they need to
612 // be split in case they use both the old and new content. (This is a bit of an
613 // overkill: they really only need to be split if they ever reference proxy's contents
614 // again but that is hard to predict/handle).
615 if (GrOpList* lastOpList = textureProxy->getLastOpList()) {
616 lastOpList->closeThoseWhoDependOnMe(*fContext->contextPriv().caps());
617 }
618 } else if (fActiveOpList) {
619 // This is a temporary fix for the partial-MDB world. In that world we're not
620 // reordering so ops that (in the single opList world) would've just glommed onto the
621 // end of the single opList but referred to a far earlier RT need to appear in their
622 // own opList.
623 fActiveOpList->makeClosed(*fContext->contextPriv().caps());
624 fActiveOpList = nullptr;
625 }
626
627 sk_sp<GrTextureOpList> opList(new GrTextureOpList(fContext->contextPriv().resourceProvider(),
628 fContext->contextPriv().refOpMemoryPool(),
629 textureProxy,
630 fContext->contextPriv().getAuditTrail()));
631
632 SkASSERT(textureProxy->getLastOpList() == opList.get());
633
634 fDAG.add(opList);
635 if (!fDAG.sortingOpLists() || !fReduceOpListSplitting) {
636 fActiveOpList = opList.get();
637 }
638
639 SkDEBUGCODE(this->validate());
640 return opList;
641 }
642
getTextContext()643 GrTextContext* GrDrawingManager::getTextContext() {
644 if (!fTextContext) {
645 fTextContext = GrTextContext::Make(fOptionsForTextContext);
646 }
647
648 return fTextContext.get();
649 }
650
651 /*
652 * This method finds a path renderer that can draw the specified path on
653 * the provided target.
654 * Due to its expense, the software path renderer has split out so it can
655 * can be individually allowed/disallowed via the "allowSW" boolean.
656 */
getPathRenderer(const GrPathRenderer::CanDrawPathArgs & args,bool allowSW,GrPathRendererChain::DrawType drawType,GrPathRenderer::StencilSupport * stencilSupport)657 GrPathRenderer* GrDrawingManager::getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args,
658 bool allowSW,
659 GrPathRendererChain::DrawType drawType,
660 GrPathRenderer::StencilSupport* stencilSupport) {
661
662 if (!fPathRendererChain) {
663 fPathRendererChain.reset(new GrPathRendererChain(fContext, fOptionsForPathRendererChain));
664 }
665
666 GrPathRenderer* pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport);
667 if (!pr && allowSW) {
668 auto swPR = this->getSoftwarePathRenderer();
669 if (GrPathRenderer::CanDrawPath::kNo != swPR->canDrawPath(args)) {
670 pr = swPR;
671 }
672 }
673
674 return pr;
675 }
676
getSoftwarePathRenderer()677 GrPathRenderer* GrDrawingManager::getSoftwarePathRenderer() {
678 if (!fSoftwarePathRenderer) {
679 fSoftwarePathRenderer.reset(
680 new GrSoftwarePathRenderer(fContext->contextPriv().proxyProvider(),
681 fOptionsForPathRendererChain.fAllowPathMaskCaching));
682 }
683 return fSoftwarePathRenderer.get();
684 }
685
getCoverageCountingPathRenderer()686 GrCoverageCountingPathRenderer* GrDrawingManager::getCoverageCountingPathRenderer() {
687 if (!fPathRendererChain) {
688 fPathRendererChain.reset(new GrPathRendererChain(fContext, fOptionsForPathRendererChain));
689 }
690 return fPathRendererChain->getCoverageCountingPathRenderer();
691 }
692
flushIfNecessary()693 void GrDrawingManager::flushIfNecessary() {
694 GrResourceCache* resourceCache = fContext->contextPriv().getResourceCache();
695 if (resourceCache && resourceCache->requestsFlush()) {
696 this->flush(nullptr, 0, nullptr);
697 resourceCache->purgeAsNeeded();
698 }
699 }
700
makeRenderTargetContext(sk_sp<GrSurfaceProxy> sProxy,sk_sp<SkColorSpace> colorSpace,const SkSurfaceProps * surfaceProps,bool managedOpList)701 sk_sp<GrRenderTargetContext> GrDrawingManager::makeRenderTargetContext(
702 sk_sp<GrSurfaceProxy> sProxy,
703 sk_sp<SkColorSpace> colorSpace,
704 const SkSurfaceProps* surfaceProps,
705 bool managedOpList) {
706 if (this->wasAbandoned() || !sProxy->asRenderTargetProxy()) {
707 return nullptr;
708 }
709
710 // SkSurface catches bad color space usage at creation. This check handles anything that slips
711 // by, including internal usage.
712 if (!SkSurface_Gpu::Valid(fContext->contextPriv().caps(), sProxy->config(), colorSpace.get())) {
713 SkDEBUGFAIL("Invalid config and colorspace combination");
714 return nullptr;
715 }
716
717 sk_sp<GrRenderTargetProxy> rtp(sk_ref_sp(sProxy->asRenderTargetProxy()));
718
719 return sk_sp<GrRenderTargetContext>(new GrRenderTargetContext(
720 fContext, this, std::move(rtp),
721 std::move(colorSpace),
722 surfaceProps,
723 fContext->contextPriv().getAuditTrail(),
724 fSingleOwner, managedOpList));
725 }
726
makeTextureContext(sk_sp<GrSurfaceProxy> sProxy,sk_sp<SkColorSpace> colorSpace)727 sk_sp<GrTextureContext> GrDrawingManager::makeTextureContext(sk_sp<GrSurfaceProxy> sProxy,
728 sk_sp<SkColorSpace> colorSpace) {
729 if (this->wasAbandoned() || !sProxy->asTextureProxy()) {
730 return nullptr;
731 }
732
733 // SkSurface catches bad color space usage at creation. This check handles anything that slips
734 // by, including internal usage.
735 if (!SkSurface_Gpu::Valid(fContext->contextPriv().caps(), sProxy->config(), colorSpace.get())) {
736 SkDEBUGFAIL("Invalid config and colorspace combination");
737 return nullptr;
738 }
739
740 // GrTextureRenderTargets should always be using a GrRenderTargetContext
741 SkASSERT(!sProxy->asRenderTargetProxy());
742
743 sk_sp<GrTextureProxy> textureProxy(sk_ref_sp(sProxy->asTextureProxy()));
744
745 return sk_sp<GrTextureContext>(new GrTextureContext(fContext, this, std::move(textureProxy),
746 std::move(colorSpace),
747 fContext->contextPriv().getAuditTrail(),
748 fSingleOwner));
749 }
750