• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2017 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/ganesh/GrResourceAllocator.h"
9 
10 #include "include/core/SkTypes.h"
11 #include "include/gpu/GpuTypes.h"
12 #include "include/gpu/ganesh/GrDirectContext.h"
13 #include "include/private/gpu/ganesh/GrTypesPriv.h"
14 #include "src/gpu/ganesh/GrCaps.h"
15 #include "src/gpu/ganesh/GrDirectContextPriv.h"
16 #include "src/gpu/ganesh/GrGpuResourcePriv.h"
17 #include "src/gpu/ganesh/GrResourceCache.h"
18 #include "src/gpu/ganesh/GrResourceProvider.h"
19 #include "src/gpu/ganesh/GrSurfaceProxy.h"
20 #include "src/gpu/ganesh/GrSurfaceProxyPriv.h"
21 #include "src/gpu/ganesh/GrTexture.h"  // IWYU pragma: keep
22 #include "src/gpu/ganesh/GrTextureProxy.h"
23 
24 #include <cstddef>
25 #include <limits>
26 #include <utility>
27 
28 #ifdef SK_DEBUG
29 #include <atomic>
30 
CreateUniqueID()31 uint32_t GrResourceAllocator::Interval::CreateUniqueID() {
32     static std::atomic<uint32_t> nextID{1};
33     uint32_t id;
34     do {
35         id = nextID.fetch_add(1, std::memory_order_relaxed);
36     } while (id == SK_InvalidUniqueID);
37     return id;
38 }
39 
CreateUniqueID()40 uint32_t GrResourceAllocator::Register::CreateUniqueID() {
41     static std::atomic<uint32_t> nextID{1};
42     uint32_t id;
43     do {
44         id = nextID.fetch_add(1, std::memory_order_relaxed);
45     } while (id == SK_InvalidUniqueID);
46     return id;
47 }
48 #endif
49 
~GrResourceAllocator()50 GrResourceAllocator::~GrResourceAllocator() {
51     SkASSERT(fFailedInstantiation || fIntvlList.empty());
52     SkASSERT(fActiveIntvls.empty());
53     SkASSERT(!fIntvlHash.count());
54 }
55 
56 void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end,
57                                       ActualUse actualUse, AllowRecycling allowRecycling
58                                       SkDEBUGCODE(, bool isDirectDstRead)) {
59     SkASSERT(start <= end);
60     SkASSERT(!fAssigned);  // We shouldn't be adding any intervals after (or during) assignment
61 
62     if (proxy->canSkipResourceAllocator()) {
63         return;
64     }
65 
66     // If a proxy is read only it must refer to a texture with specific content that cannot be
67     // recycled. We don't need to assign a texture to it and no other proxy can be instantiated
68     // with the same texture.
69     if (proxy->readOnly()) {
70         auto resourceProvider = fDContext->priv().resourceProvider();
71         if (proxy->isLazy() && !proxy->priv().doLazyInstantiation(resourceProvider)) {
72             fFailedInstantiation = true;
73         } else {
74             // Since we aren't going to add an interval we won't revisit this proxy in assign(). So
75             // must already be instantiated or it must be a lazy proxy that we instantiated above.
76             SkASSERT(proxy->isInstantiated());
77         }
78         return;
79     }
80     uint32_t proxyID = proxy->uniqueID().asUInt();
81     if (Interval** intvlPtr = fIntvlHash.find(proxyID)) {
82         // Revise the interval for an existing use
83         Interval* intvl = *intvlPtr;
84 #ifdef SK_DEBUG
85         if (0 == start && 0 == end) {
86             // This interval is for the initial upload to a deferred proxy. Due to the vagaries
87             // of how deferred proxies are collected they can appear as uploads multiple times
88             // in a single opsTasks' list and as uploads in several opsTasks.
89             SkASSERT(0 == intvl->start());
90         } else if (isDirectDstRead) {
91             // Direct reads from the render target itself should occur w/in the existing
92             // interval
93             SkASSERT(intvl->start() <= start && intvl->end() >= end);
94         } else {
95             SkASSERT(intvl->end() <= start && intvl->end() <= end);
96         }
97 #endif
98         if (ActualUse::kYes == actualUse) {
99             intvl->addUse();
100         }
101         if (allowRecycling == AllowRecycling::kNo) {
102             // In this case, a preexisting interval is made non-reuseable since its proxy is sampled
103             // into a secondary command buffer.
104             intvl->disallowRecycling();
105         }
106         intvl->extendEnd(end);
107         return;
108     }
109     Interval* newIntvl = fInternalAllocator.make<Interval>(proxy, start, end);
110 
111     if (ActualUse::kYes == actualUse) {
112         newIntvl->addUse();
113     }
114     if (allowRecycling == AllowRecycling::kNo) {
115         newIntvl->disallowRecycling();
116     }
117     fIntvlList.insertByIncreasingStart(newIntvl);
118     fIntvlHash.set(proxyID, newIntvl);
119 }
120 
121 // Tragically we have cases where we always have to make new textures.
can_proxy_use_scratch(const GrCaps & caps,GrSurfaceProxy * proxy)122 static bool can_proxy_use_scratch(const GrCaps& caps, GrSurfaceProxy* proxy) {
123     return caps.reuseScratchTextures() || proxy->asRenderTargetProxy();
124 }
125 
user_cache_proxy(GrSurfaceProxy * proxy)126 static bool user_cache_proxy(GrSurfaceProxy* proxy) {
127     GrTextureProxy* texProxy = proxy->asTextureProxy();
128     if (texProxy) {
129         return texProxy->getUserCacheTarget();
130     }
131     return false;
132 }
133 
Register(GrSurfaceProxy * originatingProxy,skgpu::ScratchKey scratchKey,GrResourceProvider * provider)134 GrResourceAllocator::Register::Register(GrSurfaceProxy* originatingProxy,
135                                         skgpu::ScratchKey scratchKey,
136                                         GrResourceProvider* provider)
137         : fOriginatingProxy(originatingProxy)
138         , fScratchKey(std::move(scratchKey)) {
139     SkASSERT(originatingProxy);
140     SkASSERT(!originatingProxy->isInstantiated());
141     SkASSERT(!originatingProxy->isLazy());
142     SkDEBUGCODE(fUniqueID = CreateUniqueID();)
143     if (fScratchKey.isValid()) {
144         if (can_proxy_use_scratch(*provider->caps(), originatingProxy) ||
145             user_cache_proxy(originatingProxy)) {
146             fExistingSurface = provider->findAndRefScratchTexture(
147                     fScratchKey, /*label=*/"ResourceAllocatorRegister");
148         }
149     } else {
150         SkASSERT(this->uniqueKey().isValid());
151         fExistingSurface = provider->findByUniqueKey<GrSurface>(this->uniqueKey());
152     }
153 }
154 
isRecyclable(const GrCaps & caps,GrSurfaceProxy * proxy,int knownUseCount,AllowRecycling allowRecycling) const155 bool GrResourceAllocator::Register::isRecyclable(const GrCaps& caps,
156                                                  GrSurfaceProxy* proxy,
157                                                  int knownUseCount,
158                                                  AllowRecycling allowRecycling) const {
159     if (allowRecycling == AllowRecycling::kNo) {
160         return false;
161     }
162 
163     if (!can_proxy_use_scratch(caps, proxy)) {
164         return false;
165     }
166 
167     if (!this->scratchKey().isValid()) {
168         return false; // no scratch key, no free pool
169     }
170     if (this->uniqueKey().isValid()) {
171         return false; // rely on the resource cache to hold onto uniquely-keyed surfaces.
172     }
173     // If all the refs on the proxy are known to the resource allocator then no one
174     // should be holding onto it outside of Ganesh.
175     return !proxy->refCntGreaterThan(knownUseCount);
176 }
177 
instantiateSurface(GrSurfaceProxy * proxy,GrResourceProvider * resourceProvider)178 bool GrResourceAllocator::Register::instantiateSurface(GrSurfaceProxy* proxy,
179                                                        GrResourceProvider* resourceProvider) {
180     SkASSERT(!proxy->peekSurface());
181 
182     sk_sp<GrSurface> newSurface;
183     if (!fExistingSurface) {
184         if (proxy == fOriginatingProxy) {
185             newSurface = proxy->priv().createSurface(resourceProvider);
186         } else {
187             newSurface = sk_ref_sp(fOriginatingProxy->peekSurface());
188         }
189     }
190     if (!fExistingSurface && !newSurface) {
191         return false;
192     }
193 
194     GrSurface* surface = newSurface ? newSurface.get() : fExistingSurface.get();
195     // Make surface budgeted if this proxy is budgeted.
196     if (skgpu::Budgeted::kYes == proxy->isBudgeted() &&
197         GrBudgetedType::kBudgeted != surface->resourcePriv().budgetedType()) {
198         // This gets the job done but isn't quite correct. It would be better to try to
199         // match budgeted proxies w/ budgeted surfaces and unbudgeted w/ unbudgeted.
200         surface->resourcePriv().makeBudgeted();
201     }
202 
203     // Propagate the proxy unique key to the surface if we have one.
204     if (const auto& uniqueKey = proxy->getUniqueKey(); uniqueKey.isValid()) {
205         if (!surface->getUniqueKey().isValid()) {
206             resourceProvider->assignUniqueKeyToResource(uniqueKey, surface);
207         }
208         SkASSERT(surface->getUniqueKey() == uniqueKey);
209     }
210     proxy->priv().assign(fExistingSurface ? fExistingSurface : std::move(newSurface));
211     return true;
212 }
213 
popHead()214 GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() {
215     SkDEBUGCODE(this->validate());
216 
217     Interval* temp = fHead;
218     if (temp) {
219         fHead = temp->next();
220         if (!fHead) {
221             fTail = nullptr;
222         }
223         temp->setNext(nullptr);
224     }
225 
226     SkDEBUGCODE(this->validate());
227     return temp;
228 }
229 
230 // TODO: fuse this with insertByIncreasingEnd
insertByIncreasingStart(Interval * intvl)231 void GrResourceAllocator::IntervalList::insertByIncreasingStart(Interval* intvl) {
232     SkDEBUGCODE(this->validate());
233     SkASSERT(!intvl->next());
234 
235     if (!fHead) {
236         // 14%
237         fHead = fTail = intvl;
238     } else if (intvl->start() <= fHead->start()) {
239         // 3%
240         intvl->setNext(fHead);
241         fHead = intvl;
242     } else if (fTail->start() <= intvl->start()) {
243         // 83%
244         fTail->setNext(intvl);
245         fTail = intvl;
246     } else {
247         // almost never
248         Interval* prev = fHead;
249         Interval* next = prev->next();
250         for (; intvl->start() > next->start(); prev = next, next = next->next()) {
251         }
252 
253         SkASSERT(next);
254         intvl->setNext(next);
255         prev->setNext(intvl);
256     }
257 
258     SkDEBUGCODE(this->validate());
259 }
260 
261 // TODO: fuse this with insertByIncreasingStart
insertByIncreasingEnd(Interval * intvl)262 void GrResourceAllocator::IntervalList::insertByIncreasingEnd(Interval* intvl) {
263     SkDEBUGCODE(this->validate());
264     SkASSERT(!intvl->next());
265 
266     if (!fHead) {
267         // 14%
268         fHead = fTail = intvl;
269     } else if (intvl->end() <= fHead->end()) {
270         // 64%
271         intvl->setNext(fHead);
272         fHead = intvl;
273     } else if (fTail->end() <= intvl->end()) {
274         // 3%
275         fTail->setNext(intvl);
276         fTail = intvl;
277     } else {
278         // 19% but 81% of those land right after the list's head
279         Interval* prev = fHead;
280         Interval* next = prev->next();
281         for (; intvl->end() > next->end(); prev = next, next = next->next()) {
282         }
283 
284         SkASSERT(next);
285         intvl->setNext(next);
286         prev->setNext(intvl);
287     }
288 
289     SkDEBUGCODE(this->validate());
290 }
291 
292 #ifdef SK_DEBUG
validate() const293 void GrResourceAllocator::IntervalList::validate() const {
294     SkASSERT(SkToBool(fHead) == SkToBool(fTail));
295 
296     Interval* prev = nullptr;
297     for (Interval* cur = fHead; cur; prev = cur, cur = cur->next()) {
298     }
299 
300     SkASSERT(fTail == prev);
301 }
302 #endif
303 
304 // First try to reuse one of the recently allocated/used registers in the free pool.
findOrCreateRegisterFor(GrSurfaceProxy * proxy)305 GrResourceAllocator::Register* GrResourceAllocator::findOrCreateRegisterFor(GrSurfaceProxy* proxy) {
306     auto resourceProvider = fDContext->priv().resourceProvider();
307     // Handle uniquely keyed proxies
308     if (const auto& uniqueKey = proxy->getUniqueKey(); uniqueKey.isValid()) {
309         if (auto p = fUniqueKeyRegisters.find(uniqueKey)) {
310             return *p;
311         }
312         // No need for a scratch key. These don't go in the free pool.
313         Register* r = fInternalAllocator.make<Register>(proxy,
314                                                         skgpu::ScratchKey(),
315                                                         resourceProvider);
316         fUniqueKeyRegisters.set(uniqueKey, r);
317         return r;
318     }
319 
320     // Then look in the free pool
321     skgpu::ScratchKey scratchKey;
322     proxy->priv().computeScratchKey(*fDContext->priv().caps(), &scratchKey);
323 
324     auto filter = [] (const Register* r) {
325         return true;
326     };
327     if (Register* r = fFreePool.findAndRemove(scratchKey, filter)) {
328         return r;
329     }
330 
331     return fInternalAllocator.make<Register>(proxy, std::move(scratchKey), resourceProvider);
332 }
333 
334 // Remove any intervals that end before the current index. Add their registers
335 // to the free pool if possible.
expire(unsigned int curIndex)336 void GrResourceAllocator::expire(unsigned int curIndex) {
337     while (!fActiveIntvls.empty() && fActiveIntvls.peekHead()->end() < curIndex) {
338         Interval* intvl = fActiveIntvls.popHead();
339         SkASSERT(!intvl->next());
340 
341         Register* r = intvl->getRegister();
342         if (r && r->isRecyclable(*fDContext->priv().caps(), intvl->proxy(), intvl->uses(),
343                                  intvl->allowRecycling())) {
344 #if GR_ALLOCATION_SPEW
345             SkDebugf("putting register %d back into pool\n", r->uniqueID());
346 #endif
347             // TODO: fix this insertion so we get a more LRU-ish behavior
348             fFreePool.insert(r->scratchKey(), r);
349         }
350         fFinishedIntvls.insertByIncreasingStart(intvl);
351     }
352 }
353 
planAssignment()354 bool GrResourceAllocator::planAssignment() {
355     fIntvlHash.reset(); // we don't need the interval hash anymore
356 
357     SkASSERT(!fPlanned && !fAssigned);
358     SkDEBUGCODE(fPlanned = true;)
359 
360 #if GR_ALLOCATION_SPEW
361     SkDebugf("assigning %d ops\n", fNumOps);
362     this->dumpIntervals();
363 #endif
364 
365     auto resourceProvider = fDContext->priv().resourceProvider();
366     while (Interval* cur = fIntvlList.popHead()) {
367         this->expire(cur->start());
368         fActiveIntvls.insertByIncreasingEnd(cur);
369 
370         // Already-instantiated proxies and lazy proxies don't use registers.
371         if (cur->proxy()->isInstantiated()) {
372             continue;
373         }
374 
375         // Instantiate fully-lazy proxies immediately. Ignore other lazy proxies at this stage.
376         if (cur->proxy()->isLazy()) {
377             if (cur->proxy()->isFullyLazy()) {
378                 fFailedInstantiation = !cur->proxy()->priv().doLazyInstantiation(resourceProvider);
379                 if (fFailedInstantiation) {
380                     break;
381                 }
382             }
383             continue;
384         }
385 
386         Register* r = this->findOrCreateRegisterFor(cur->proxy());
387 #if GR_ALLOCATION_SPEW
388         SkDebugf("Assigning register %d to %d\n",
389              r->uniqueID(),
390              cur->proxy()->uniqueID().asUInt());
391 #endif
392         SkASSERT(!cur->proxy()->peekSurface());
393         cur->setRegister(r);
394     }
395 
396     // expire all the remaining intervals to drain the active interval list
397     this->expire(std::numeric_limits<unsigned int>::max());
398     return !fFailedInstantiation;
399 }
400 
makeBudgetHeadroom()401 bool GrResourceAllocator::makeBudgetHeadroom() {
402     SkASSERT(fPlanned);
403     SkASSERT(!fFailedInstantiation);
404     size_t additionalBytesNeeded = 0;
405     for (Interval* cur = fFinishedIntvls.peekHead(); cur; cur = cur->next()) {
406         GrSurfaceProxy* proxy = cur->proxy();
407         if (skgpu::Budgeted::kNo == proxy->isBudgeted() || proxy->isInstantiated()) {
408             continue;
409         }
410 
411         // N.B Fully-lazy proxies were already instantiated in planAssignment
412         if (proxy->isLazy()) {
413             additionalBytesNeeded += proxy->gpuMemorySize();
414         } else {
415             Register* r = cur->getRegister();
416             SkASSERT(r);
417             if (!r->accountedForInBudget() && !r->existingSurface()) {
418                 additionalBytesNeeded += proxy->gpuMemorySize();
419             }
420             r->setAccountedForInBudget();
421         }
422     }
423     return fDContext->priv().getResourceCache()->purgeToMakeHeadroom(additionalBytesNeeded);
424 }
425 
reset()426 void GrResourceAllocator::reset() {
427     // NOTE: We do not reset the failedInstantiation flag because we currently do not attempt
428     // to recover from failed instantiations. The user is responsible for checking this flag and
429     // bailing early.
430     SkDEBUGCODE(fPlanned = false;)
431     SkDEBUGCODE(fAssigned = false;)
432     SkASSERT(fActiveIntvls.empty());
433     fFinishedIntvls = IntervalList();
434     fIntvlList = IntervalList();
435     fIntvlHash.reset();
436     fUniqueKeyRegisters.reset();
437     fFreePool.reset();
438     fInternalAllocator.reset();
439 }
440 
assign()441 bool GrResourceAllocator::assign() {
442     if (fFailedInstantiation) {
443         return false;
444     }
445     SkASSERT(fPlanned && !fAssigned);
446     SkDEBUGCODE(fAssigned = true;)
447     auto resourceProvider = fDContext->priv().resourceProvider();
448     while (Interval* cur = fFinishedIntvls.popHead()) {
449         if (fFailedInstantiation) {
450             break;
451         }
452         if (cur->proxy()->isInstantiated()) {
453             continue;
454         }
455         if (cur->proxy()->isLazy()) {
456             fFailedInstantiation = !cur->proxy()->priv().doLazyInstantiation(resourceProvider);
457             continue;
458         }
459         Register* r = cur->getRegister();
460         SkASSERT(r);
461         fFailedInstantiation = !r->instantiateSurface(cur->proxy(), resourceProvider);
462     }
463     return !fFailedInstantiation;
464 }
465 
466 #if GR_ALLOCATION_SPEW
dumpIntervals()467 void GrResourceAllocator::dumpIntervals() {
468     // Print all the intervals while computing their range
469     SkDebugf("------------------------------------------------------------\n");
470     unsigned int min = std::numeric_limits<unsigned int>::max();
471     unsigned int max = 0;
472     for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
473         SkDebugf("{ %3d,%3d }: [%2d, %2d] - refProxys:%d surfaceRefs:%d\n",
474                  cur->proxy()->uniqueID().asUInt(),
475                  cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1,
476                  cur->start(),
477                  cur->end(),
478                  cur->proxy()->priv().getProxyRefCnt(),
479                  cur->proxy()->testingOnly_getBackingRefCnt());
480         min = std::min(min, cur->start());
481         max = std::max(max, cur->end());
482     }
483 
484     // Draw a graph of the useage intervals
485     for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
486         SkDebugf("{ %3d,%3d }: ",
487                  cur->proxy()->uniqueID().asUInt(),
488                  cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1);
489         for (unsigned int i = min; i <= max; ++i) {
490             if (i >= cur->start() && i <= cur->end()) {
491                 SkDebugf("x");
492             } else {
493                 SkDebugf(" ");
494             }
495         }
496         SkDebugf("\n");
497     }
498 }
499 #endif
500