• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2017 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/ganesh/GrResourceAllocator.h"
9 
10 #include "src/gpu/ganesh/GrCaps.h"
11 #include "src/gpu/ganesh/GrDirectContextPriv.h"
12 #include "src/gpu/ganesh/GrGpuResourcePriv.h"
13 #include "src/gpu/ganesh/GrRenderTargetProxy.h"
14 #include "src/gpu/ganesh/GrResourceProvider.h"
15 #include "src/gpu/ganesh/GrSurfaceProxy.h"
16 #include "src/gpu/ganesh/GrSurfaceProxyPriv.h"
17 #include "src/gpu/ganesh/GrTexture.h"
18 
19 #ifdef SK_DEBUG
20 #include <atomic>
21 
CreateUniqueID()22 uint32_t GrResourceAllocator::Interval::CreateUniqueID() {
23     static std::atomic<uint32_t> nextID{1};
24     uint32_t id;
25     do {
26         id = nextID.fetch_add(1, std::memory_order_relaxed);
27     } while (id == SK_InvalidUniqueID);
28     return id;
29 }
30 
CreateUniqueID()31 uint32_t GrResourceAllocator::Register::CreateUniqueID() {
32     static std::atomic<uint32_t> nextID{1};
33     uint32_t id;
34     do {
35         id = nextID.fetch_add(1, std::memory_order_relaxed);
36     } while (id == SK_InvalidUniqueID);
37     return id;
38 }
39 #endif
40 
~GrResourceAllocator()41 GrResourceAllocator::~GrResourceAllocator() {
42     SkASSERT(fFailedInstantiation || fIntvlList.empty());
43     SkASSERT(fActiveIntvls.empty());
44     SkASSERT(!fIntvlHash.count());
45 }
46 
47 void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end,
48                                       ActualUse actualUse
49                                       SkDEBUGCODE(, bool isDirectDstRead)) {
50     SkASSERT(start <= end);
51     SkASSERT(!fAssigned);  // We shouldn't be adding any intervals after (or during) assignment
52 
53     if (proxy->canSkipResourceAllocator()) {
54         return;
55     }
56 
57     // If a proxy is read only it must refer to a texture with specific content that cannot be
58     // recycled. We don't need to assign a texture to it and no other proxy can be instantiated
59     // with the same texture.
60     if (proxy->readOnly()) {
61         auto resourceProvider = fDContext->priv().resourceProvider();
62         if (proxy->isLazy() && !proxy->priv().doLazyInstantiation(resourceProvider)) {
63             fFailedInstantiation = true;
64         } else {
65             // Since we aren't going to add an interval we won't revisit this proxy in assign(). So
66             // must already be instantiated or it must be a lazy proxy that we instantiated above.
67             SkASSERT(proxy->isInstantiated());
68         }
69         return;
70     }
71     uint32_t proxyID = proxy->uniqueID().asUInt();
72     if (Interval** intvlPtr = fIntvlHash.find(proxyID)) {
73         // Revise the interval for an existing use
74         Interval* intvl = *intvlPtr;
75 #ifdef SK_DEBUG
76         if (0 == start && 0 == end) {
77             // This interval is for the initial upload to a deferred proxy. Due to the vagaries
78             // of how deferred proxies are collected they can appear as uploads multiple times
79             // in a single opsTasks' list and as uploads in several opsTasks.
80             SkASSERT(0 == intvl->start());
81         } else if (isDirectDstRead) {
82             // Direct reads from the render target itself should occur w/in the existing
83             // interval
84             SkASSERT(intvl->start() <= start && intvl->end() >= end);
85         } else {
86             SkASSERT(intvl->end() <= start && intvl->end() <= end);
87         }
88 #endif
89         if (ActualUse::kYes == actualUse) {
90             intvl->addUse();
91         }
92         intvl->extendEnd(end);
93         return;
94     }
95     Interval* newIntvl = fInternalAllocator.make<Interval>(proxy, start, end);
96 
97     if (ActualUse::kYes == actualUse) {
98         newIntvl->addUse();
99     }
100     fIntvlList.insertByIncreasingStart(newIntvl);
101     fIntvlHash.set(proxyID, newIntvl);
102 }
103 
104 // Tragically we have cases where we always have to make new textures.
can_proxy_use_scratch(const GrCaps & caps,GrSurfaceProxy * proxy)105 static bool can_proxy_use_scratch(const GrCaps& caps, GrSurfaceProxy* proxy) {
106     return caps.reuseScratchTextures() || proxy->asRenderTargetProxy();
107 }
108 
Register(GrSurfaceProxy * originatingProxy,skgpu::ScratchKey scratchKey,GrResourceProvider * provider)109 GrResourceAllocator::Register::Register(GrSurfaceProxy* originatingProxy,
110                                         skgpu::ScratchKey scratchKey,
111                                         GrResourceProvider* provider)
112         : fOriginatingProxy(originatingProxy)
113         , fScratchKey(std::move(scratchKey)) {
114     SkASSERT(originatingProxy);
115     SkASSERT(!originatingProxy->isInstantiated());
116     SkASSERT(!originatingProxy->isLazy());
117     SkDEBUGCODE(fUniqueID = CreateUniqueID();)
118     if (fScratchKey.isValid()) {
119         if (can_proxy_use_scratch(*provider->caps(), originatingProxy)) {
120             fExistingSurface = provider->findAndRefScratchTexture(
121                     fScratchKey, /*label=*/"ResourceAllocatorRegister");
122         }
123     } else {
124         SkASSERT(this->uniqueKey().isValid());
125         fExistingSurface = provider->findByUniqueKey<GrSurface>(this->uniqueKey());
126     }
127 }
128 
isRecyclable(const GrCaps & caps,GrSurfaceProxy * proxy,int knownUseCount) const129 bool GrResourceAllocator::Register::isRecyclable(const GrCaps& caps,
130                                                  GrSurfaceProxy* proxy,
131                                                  int knownUseCount) const {
132     if (!can_proxy_use_scratch(caps, proxy)) {
133         return false;
134     }
135 
136     if (!this->scratchKey().isValid()) {
137         return false; // no scratch key, no free pool
138     }
139     if (this->uniqueKey().isValid()) {
140         return false; // rely on the resource cache to hold onto uniquely-keyed surfaces.
141     }
142     // If all the refs on the proxy are known to the resource allocator then no one
143     // should be holding onto it outside of Ganesh.
144     return !proxy->refCntGreaterThan(knownUseCount);
145 }
146 
instantiateSurface(GrSurfaceProxy * proxy,GrResourceProvider * resourceProvider)147 bool GrResourceAllocator::Register::instantiateSurface(GrSurfaceProxy* proxy,
148                                                        GrResourceProvider* resourceProvider) {
149     SkASSERT(!proxy->peekSurface());
150 
151     sk_sp<GrSurface> newSurface;
152     if (!fExistingSurface) {
153         if (proxy == fOriginatingProxy) {
154             newSurface = proxy->priv().createSurface(resourceProvider);
155         } else {
156             newSurface = sk_ref_sp(fOriginatingProxy->peekSurface());
157         }
158     }
159     if (!fExistingSurface && !newSurface) {
160         return false;
161     }
162 
163     GrSurface* surface = newSurface ? newSurface.get() : fExistingSurface.get();
164     // Make surface budgeted if this proxy is budgeted.
165     if (skgpu::Budgeted::kYes == proxy->isBudgeted() &&
166         GrBudgetedType::kBudgeted != surface->resourcePriv().budgetedType()) {
167         // This gets the job done but isn't quite correct. It would be better to try to
168         // match budgeted proxies w/ budgeted surfaces and unbudgeted w/ unbudgeted.
169         surface->resourcePriv().makeBudgeted();
170     }
171 
172     // Propagate the proxy unique key to the surface if we have one.
173     if (const auto& uniqueKey = proxy->getUniqueKey(); uniqueKey.isValid()) {
174         if (!surface->getUniqueKey().isValid()) {
175             resourceProvider->assignUniqueKeyToResource(uniqueKey, surface);
176         }
177         SkASSERT(surface->getUniqueKey() == uniqueKey);
178     }
179     proxy->priv().assign(fExistingSurface ? fExistingSurface : std::move(newSurface));
180     return true;
181 }
182 
popHead()183 GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() {
184     SkDEBUGCODE(this->validate());
185 
186     Interval* temp = fHead;
187     if (temp) {
188         fHead = temp->next();
189         if (!fHead) {
190             fTail = nullptr;
191         }
192         temp->setNext(nullptr);
193     }
194 
195     SkDEBUGCODE(this->validate());
196     return temp;
197 }
198 
199 // TODO: fuse this with insertByIncreasingEnd
insertByIncreasingStart(Interval * intvl)200 void GrResourceAllocator::IntervalList::insertByIncreasingStart(Interval* intvl) {
201     SkDEBUGCODE(this->validate());
202     SkASSERT(!intvl->next());
203 
204     if (!fHead) {
205         // 14%
206         fHead = fTail = intvl;
207     } else if (intvl->start() <= fHead->start()) {
208         // 3%
209         intvl->setNext(fHead);
210         fHead = intvl;
211     } else if (fTail->start() <= intvl->start()) {
212         // 83%
213         fTail->setNext(intvl);
214         fTail = intvl;
215     } else {
216         // almost never
217         Interval* prev = fHead;
218         Interval* next = prev->next();
219         for (; intvl->start() > next->start(); prev = next, next = next->next()) {
220         }
221 
222         SkASSERT(next);
223         intvl->setNext(next);
224         prev->setNext(intvl);
225     }
226 
227     SkDEBUGCODE(this->validate());
228 }
229 
230 // TODO: fuse this with insertByIncreasingStart
insertByIncreasingEnd(Interval * intvl)231 void GrResourceAllocator::IntervalList::insertByIncreasingEnd(Interval* intvl) {
232     SkDEBUGCODE(this->validate());
233     SkASSERT(!intvl->next());
234 
235     if (!fHead) {
236         // 14%
237         fHead = fTail = intvl;
238     } else if (intvl->end() <= fHead->end()) {
239         // 64%
240         intvl->setNext(fHead);
241         fHead = intvl;
242     } else if (fTail->end() <= intvl->end()) {
243         // 3%
244         fTail->setNext(intvl);
245         fTail = intvl;
246     } else {
247         // 19% but 81% of those land right after the list's head
248         Interval* prev = fHead;
249         Interval* next = prev->next();
250         for (; intvl->end() > next->end(); prev = next, next = next->next()) {
251         }
252 
253         SkASSERT(next);
254         intvl->setNext(next);
255         prev->setNext(intvl);
256     }
257 
258     SkDEBUGCODE(this->validate());
259 }
260 
261 #ifdef SK_DEBUG
validate() const262 void GrResourceAllocator::IntervalList::validate() const {
263     SkASSERT(SkToBool(fHead) == SkToBool(fTail));
264 
265     Interval* prev = nullptr;
266     for (Interval* cur = fHead; cur; prev = cur, cur = cur->next()) {
267     }
268 
269     SkASSERT(fTail == prev);
270 }
271 #endif
272 
273 // First try to reuse one of the recently allocated/used registers in the free pool.
findOrCreateRegisterFor(GrSurfaceProxy * proxy)274 GrResourceAllocator::Register* GrResourceAllocator::findOrCreateRegisterFor(GrSurfaceProxy* proxy) {
275     auto resourceProvider = fDContext->priv().resourceProvider();
276     // Handle uniquely keyed proxies
277     if (const auto& uniqueKey = proxy->getUniqueKey(); uniqueKey.isValid()) {
278         if (auto p = fUniqueKeyRegisters.find(uniqueKey)) {
279             return *p;
280         }
281         // No need for a scratch key. These don't go in the free pool.
282         Register* r = fInternalAllocator.make<Register>(proxy,
283                                                         skgpu::ScratchKey(),
284                                                         resourceProvider);
285         fUniqueKeyRegisters.set(uniqueKey, r);
286         return r;
287     }
288 
289     // Then look in the free pool
290     skgpu::ScratchKey scratchKey;
291     proxy->priv().computeScratchKey(*fDContext->priv().caps(), &scratchKey);
292 
293     auto filter = [] (const Register* r) {
294         return true;
295     };
296     if (Register* r = fFreePool.findAndRemove(scratchKey, filter)) {
297         return r;
298     }
299 
300     return fInternalAllocator.make<Register>(proxy, std::move(scratchKey), resourceProvider);
301 }
302 
303 // Remove any intervals that end before the current index. Add their registers
304 // to the free pool if possible.
expire(unsigned int curIndex)305 void GrResourceAllocator::expire(unsigned int curIndex) {
306     while (!fActiveIntvls.empty() && fActiveIntvls.peekHead()->end() < curIndex) {
307         Interval* intvl = fActiveIntvls.popHead();
308         SkASSERT(!intvl->next());
309 
310         Register* r = intvl->getRegister();
311         if (r && r->isRecyclable(*fDContext->priv().caps(), intvl->proxy(), intvl->uses())) {
312 #if GR_ALLOCATION_SPEW
313             SkDebugf("putting register %d back into pool\n", r->uniqueID());
314 #endif
315             // TODO: fix this insertion so we get a more LRU-ish behavior
316             fFreePool.insert(r->scratchKey(), r);
317         }
318         fFinishedIntvls.insertByIncreasingStart(intvl);
319     }
320 }
321 
planAssignment()322 bool GrResourceAllocator::planAssignment() {
323     fIntvlHash.reset(); // we don't need the interval hash anymore
324 
325     SkASSERT(!fPlanned && !fAssigned);
326     SkDEBUGCODE(fPlanned = true;)
327 
328 #if GR_ALLOCATION_SPEW
329     SkDebugf("assigning %d ops\n", fNumOps);
330     this->dumpIntervals();
331 #endif
332 
333     auto resourceProvider = fDContext->priv().resourceProvider();
334     while (Interval* cur = fIntvlList.popHead()) {
335         this->expire(cur->start());
336         fActiveIntvls.insertByIncreasingEnd(cur);
337 
338         // Already-instantiated proxies and lazy proxies don't use registers.
339         if (cur->proxy()->isInstantiated()) {
340             continue;
341         }
342 
343         // Instantiate fully-lazy proxies immediately. Ignore other lazy proxies at this stage.
344         if (cur->proxy()->isLazy()) {
345             if (cur->proxy()->isFullyLazy()) {
346                 fFailedInstantiation = !cur->proxy()->priv().doLazyInstantiation(resourceProvider);
347                 if (fFailedInstantiation) {
348                     break;
349                 }
350             }
351             continue;
352         }
353 
354         Register* r = this->findOrCreateRegisterFor(cur->proxy());
355 #if GR_ALLOCATION_SPEW
356         SkDebugf("Assigning register %d to %d\n",
357              r->uniqueID(),
358              cur->proxy()->uniqueID().asUInt());
359 #endif
360         SkASSERT(!cur->proxy()->peekSurface());
361         cur->setRegister(r);
362     }
363 
364     // expire all the remaining intervals to drain the active interval list
365     this->expire(std::numeric_limits<unsigned int>::max());
366     return !fFailedInstantiation;
367 }
368 
makeBudgetHeadroom()369 bool GrResourceAllocator::makeBudgetHeadroom() {
370     SkASSERT(fPlanned);
371     SkASSERT(!fFailedInstantiation);
372     size_t additionalBytesNeeded = 0;
373     for (Interval* cur = fFinishedIntvls.peekHead(); cur; cur = cur->next()) {
374         GrSurfaceProxy* proxy = cur->proxy();
375         if (skgpu::Budgeted::kNo == proxy->isBudgeted() || proxy->isInstantiated()) {
376             continue;
377         }
378 
379         // N.B Fully-lazy proxies were already instantiated in planAssignment
380         if (proxy->isLazy()) {
381             additionalBytesNeeded += proxy->gpuMemorySize();
382         } else {
383             Register* r = cur->getRegister();
384             SkASSERT(r);
385             if (!r->accountedForInBudget() && !r->existingSurface()) {
386                 additionalBytesNeeded += proxy->gpuMemorySize();
387             }
388             r->setAccountedForInBudget();
389         }
390     }
391     return fDContext->priv().getResourceCache()->purgeToMakeHeadroom(additionalBytesNeeded);
392 }
393 
reset()394 void GrResourceAllocator::reset() {
395     // NOTE: We do not reset the failedInstantiation flag because we currently do not attempt
396     // to recover from failed instantiations. The user is responsible for checking this flag and
397     // bailing early.
398     SkDEBUGCODE(fPlanned = false;)
399     SkDEBUGCODE(fAssigned = false;)
400     SkASSERT(fActiveIntvls.empty());
401     fFinishedIntvls = IntervalList();
402     fIntvlList = IntervalList();
403     fIntvlHash.reset();
404     fUniqueKeyRegisters.reset();
405     fFreePool.reset();
406     fInternalAllocator.reset();
407 }
408 
assign()409 bool GrResourceAllocator::assign() {
410     if (fFailedInstantiation) {
411         return false;
412     }
413     SkASSERT(fPlanned && !fAssigned);
414     SkDEBUGCODE(fAssigned = true;)
415     auto resourceProvider = fDContext->priv().resourceProvider();
416     while (Interval* cur = fFinishedIntvls.popHead()) {
417         if (fFailedInstantiation) {
418             break;
419         }
420         if (cur->proxy()->isInstantiated()) {
421             continue;
422         }
423         if (cur->proxy()->isLazy()) {
424             fFailedInstantiation = !cur->proxy()->priv().doLazyInstantiation(resourceProvider);
425             continue;
426         }
427         Register* r = cur->getRegister();
428         SkASSERT(r);
429         fFailedInstantiation = !r->instantiateSurface(cur->proxy(), resourceProvider);
430     }
431     return !fFailedInstantiation;
432 }
433 
434 #if GR_ALLOCATION_SPEW
dumpIntervals()435 void GrResourceAllocator::dumpIntervals() {
436     // Print all the intervals while computing their range
437     SkDebugf("------------------------------------------------------------\n");
438     unsigned int min = std::numeric_limits<unsigned int>::max();
439     unsigned int max = 0;
440     for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
441         SkDebugf("{ %3d,%3d }: [%2d, %2d] - refProxys:%d surfaceRefs:%d\n",
442                  cur->proxy()->uniqueID().asUInt(),
443                  cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1,
444                  cur->start(),
445                  cur->end(),
446                  cur->proxy()->priv().getProxyRefCnt(),
447                  cur->proxy()->testingOnly_getBackingRefCnt());
448         min = std::min(min, cur->start());
449         max = std::max(max, cur->end());
450     }
451 
452     // Draw a graph of the useage intervals
453     for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
454         SkDebugf("{ %3d,%3d }: ",
455                  cur->proxy()->uniqueID().asUInt(),
456                  cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1);
457         for (unsigned int i = min; i <= max; ++i) {
458             if (i >= cur->start() && i <= cur->end()) {
459                 SkDebugf("x");
460             } else {
461                 SkDebugf(" ");
462             }
463         }
464         SkDebugf("\n");
465     }
466 }
467 #endif
468