• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2017 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/GrResourceAllocator.h"
9 
10 #include "src/gpu/GrGpuResourcePriv.h"
11 #include "src/gpu/GrOpsTask.h"
12 #include "src/gpu/GrRenderTargetProxy.h"
13 #include "src/gpu/GrResourceProvider.h"
14 #include "src/gpu/GrSurfacePriv.h"
15 #include "src/gpu/GrSurfaceProxy.h"
16 #include "src/gpu/GrSurfaceProxyPriv.h"
17 #include "src/gpu/GrTextureProxy.h"
18 
19 #if GR_TRACK_INTERVAL_CREATION
20     #include <atomic>
21 
CreateUniqueID()22     uint32_t GrResourceAllocator::Interval::CreateUniqueID() {
23         static std::atomic<uint32_t> nextID{1};
24         uint32_t id;
25         do {
26             id = nextID++;
27         } while (id == SK_InvalidUniqueID);
28         return id;
29     }
30 #endif
31 
assign(sk_sp<GrSurface> s)32 void GrResourceAllocator::Interval::assign(sk_sp<GrSurface> s) {
33     SkASSERT(!fAssignedSurface);
34     fAssignedSurface = s;
35     fProxy->priv().assign(std::move(s));
36 }
37 
determineRecyclability()38 void GrResourceAllocator::determineRecyclability() {
39     for (Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
40         if (cur->proxy()->canSkipResourceAllocator()) {
41             // These types of proxies can slip in here if they require a stencil buffer
42             continue;
43         }
44 
45         if (!cur->proxy()->refCntGreaterThan(cur->uses())) {
46             // All the refs on the proxy are known to the resource allocator thus no one
47             // should be holding onto it outside of Ganesh.
48             cur->markAsRecyclable();
49         }
50     }
51 }
52 
markEndOfOpsTask(int opsTaskIndex)53 void GrResourceAllocator::markEndOfOpsTask(int opsTaskIndex) {
54     SkASSERT(!fAssigned);      // We shouldn't be adding any opsTasks after (or during) assignment
55 
56     SkASSERT(fEndOfOpsTaskOpIndices.count() == opsTaskIndex);
57     if (!fEndOfOpsTaskOpIndices.empty()) {
58         SkASSERT(fEndOfOpsTaskOpIndices.back() < this->curOp());
59     }
60 
61     // This is the first op index of the next opsTask
62     fEndOfOpsTaskOpIndices.push_back(this->curOp());
63     SkASSERT(fEndOfOpsTaskOpIndices.count() <= fNumOpsTasks);
64 }
65 
~GrResourceAllocator()66 GrResourceAllocator::~GrResourceAllocator() {
67     SkASSERT(fIntvlList.empty());
68     SkASSERT(fActiveIntvls.empty());
69     SkASSERT(!fIntvlHash.count());
70 }
71 
72 void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end,
73                                       ActualUse actualUse
74                                       SkDEBUGCODE(, bool isDirectDstRead)) {
75     SkASSERT(start <= end);
76     SkASSERT(!fAssigned);  // We shouldn't be adding any intervals after (or during) assignment
77 
78     if (proxy->canSkipResourceAllocator()) {
79         return;
80     }
81 
82     // If a proxy is read only it must refer to a texture with specific content that cannot be
83     // recycled. We don't need to assign a texture to it and no other proxy can be instantiated
84     // with the same texture.
85     if (proxy->readOnly()) {
86         if (proxy->isLazy() && !proxy->priv().doLazyInstantiation(fResourceProvider)) {
87             fLazyInstantiationError = true;
88         } else {
89             // Since we aren't going to add an interval we won't revisit this proxy in assign(). So
90             // must already be instantiated or it must be a lazy proxy that we instantiated above.
91             SkASSERT(proxy->isInstantiated());
92         }
93         return;
94     }
95     if (Interval* intvl = fIntvlHash.find(proxy->uniqueID().asUInt())) {
96         // Revise the interval for an existing use
97 #ifdef SK_DEBUG
98         if (0 == start && 0 == end) {
99             // This interval is for the initial upload to a deferred proxy. Due to the vagaries
100             // of how deferred proxies are collected they can appear as uploads multiple times
101             // in a single opsTasks' list and as uploads in several opsTasks.
102             SkASSERT(0 == intvl->start());
103         } else if (isDirectDstRead) {
104             // Direct reads from the render target itself should occur w/in the existing
105             // interval
106             SkASSERT(intvl->start() <= start && intvl->end() >= end);
107         } else {
108             SkASSERT(intvl->end() <= start && intvl->end() <= end);
109         }
110 #endif
111         if (ActualUse::kYes == actualUse) {
112             intvl->addUse();
113         }
114         intvl->extendEnd(end);
115         return;
116     }
117     Interval* newIntvl;
118     if (fFreeIntervalList) {
119         newIntvl = fFreeIntervalList;
120         fFreeIntervalList = newIntvl->next();
121         newIntvl->setNext(nullptr);
122         newIntvl->resetTo(proxy, start, end);
123     } else {
124         newIntvl = fIntervalAllocator.make<Interval>(proxy, start, end);
125     }
126 
127     if (ActualUse::kYes == actualUse) {
128         newIntvl->addUse();
129     }
130     fIntvlList.insertByIncreasingStart(newIntvl);
131     fIntvlHash.add(newIntvl);
132 }
133 
popHead()134 GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() {
135     SkDEBUGCODE(this->validate());
136 
137     Interval* temp = fHead;
138     if (temp) {
139         fHead = temp->next();
140         if (!fHead) {
141             fTail = nullptr;
142         }
143         temp->setNext(nullptr);
144     }
145 
146     SkDEBUGCODE(this->validate());
147     return temp;
148 }
149 
150 // TODO: fuse this with insertByIncreasingEnd
insertByIncreasingStart(Interval * intvl)151 void GrResourceAllocator::IntervalList::insertByIncreasingStart(Interval* intvl) {
152     SkDEBUGCODE(this->validate());
153     SkASSERT(!intvl->next());
154 
155     if (!fHead) {
156         // 14%
157         fHead = fTail = intvl;
158     } else if (intvl->start() <= fHead->start()) {
159         // 3%
160         intvl->setNext(fHead);
161         fHead = intvl;
162     } else if (fTail->start() <= intvl->start()) {
163         // 83%
164         fTail->setNext(intvl);
165         fTail = intvl;
166     } else {
167         // almost never
168         Interval* prev = fHead;
169         Interval* next = prev->next();
170         for (; intvl->start() > next->start(); prev = next, next = next->next()) {
171         }
172 
173         SkASSERT(next);
174         intvl->setNext(next);
175         prev->setNext(intvl);
176     }
177 
178     SkDEBUGCODE(this->validate());
179 }
180 
181 // TODO: fuse this with insertByIncreasingStart
insertByIncreasingEnd(Interval * intvl)182 void GrResourceAllocator::IntervalList::insertByIncreasingEnd(Interval* intvl) {
183     SkDEBUGCODE(this->validate());
184     SkASSERT(!intvl->next());
185 
186     if (!fHead) {
187         // 14%
188         fHead = fTail = intvl;
189     } else if (intvl->end() <= fHead->end()) {
190         // 64%
191         intvl->setNext(fHead);
192         fHead = intvl;
193     } else if (fTail->end() <= intvl->end()) {
194         // 3%
195         fTail->setNext(intvl);
196         fTail = intvl;
197     } else {
198         // 19% but 81% of those land right after the list's head
199         Interval* prev = fHead;
200         Interval* next = prev->next();
201         for (; intvl->end() > next->end(); prev = next, next = next->next()) {
202         }
203 
204         SkASSERT(next);
205         intvl->setNext(next);
206         prev->setNext(intvl);
207     }
208 
209     SkDEBUGCODE(this->validate());
210 }
211 
212 #ifdef SK_DEBUG
validate() const213 void GrResourceAllocator::IntervalList::validate() const {
214     SkASSERT(SkToBool(fHead) == SkToBool(fTail));
215 
216     Interval* prev = nullptr;
217     for (Interval* cur = fHead; cur; prev = cur, cur = cur->next()) {
218     }
219 
220     SkASSERT(fTail == prev);
221 }
222 #endif
223 
detachAll()224  GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::detachAll() {
225     Interval* tmp = fHead;
226     fHead = nullptr;
227     fTail = nullptr;
228     return tmp;
229 }
230 
231 // 'surface' can be reused. Add it back to the free pool.
recycleSurface(sk_sp<GrSurface> surface)232 void GrResourceAllocator::recycleSurface(sk_sp<GrSurface> surface) {
233     const GrScratchKey &key = surface->resourcePriv().getScratchKey();
234 
235     if (!key.isValid()) {
236         return; // can't do it w/o a valid scratch key
237     }
238 
239     if (surface->getUniqueKey().isValid()) {
240         // If the surface has a unique key we throw it back into the resource cache.
241         // If things get really tight 'findSurfaceFor' may pull it back out but there is
242         // no need to have it in tight rotation.
243         return;
244     }
245 
246 #if GR_ALLOCATION_SPEW
247     SkDebugf("putting surface %d back into pool\n", surface->uniqueID().asUInt());
248 #endif
249     // TODO: fix this insertion so we get a more LRU-ish behavior
250     fFreePool.insert(key, surface.release());
251 }
252 
253 // First try to reuse one of the recently allocated/used GrSurfaces in the free pool.
254 // If we can't find a useable one, create a new one.
findSurfaceFor(const GrSurfaceProxy * proxy)255 sk_sp<GrSurface> GrResourceAllocator::findSurfaceFor(const GrSurfaceProxy* proxy) {
256     if (proxy->asTextureProxy() && proxy->asTextureProxy()->getUniqueKey().isValid()) {
257         // First try to reattach to a cached version if the proxy is uniquely keyed
258         if (sk_sp<GrSurface> surface = fResourceProvider->findByUniqueKey<GrSurface>(
259                 proxy->asTextureProxy()->getUniqueKey())) {
260             return surface;
261         }
262     }
263 
264     // First look in the free pool
265     GrScratchKey key;
266 
267     proxy->priv().computeScratchKey(*fResourceProvider->caps(), &key);
268 
269     auto filter = [] (const GrSurface* s) {
270         return true;
271     };
272     sk_sp<GrSurface> surface(fFreePool.findAndRemove(key, filter));
273     if (surface) {
274         if (SkBudgeted::kYes == proxy->isBudgeted() &&
275             GrBudgetedType::kBudgeted != surface->resourcePriv().budgetedType()) {
276             // This gets the job done but isn't quite correct. It would be better to try to
277             // match budgeted proxies w/ budgeted surfaces and unbudgeted w/ unbudgeted.
278             surface->resourcePriv().makeBudgeted();
279         }
280         SkASSERT(!surface->getUniqueKey().isValid());
281         return surface;
282     }
283 
284     // Failing that, try to grab a new one from the resource cache
285     return proxy->priv().createSurface(fResourceProvider);
286 }
287 
288 // Remove any intervals that end before the current index. Return their GrSurfaces
289 // to the free pool if possible.
expire(unsigned int curIndex)290 void GrResourceAllocator::expire(unsigned int curIndex) {
291     while (!fActiveIntvls.empty() && fActiveIntvls.peekHead()->end() < curIndex) {
292         Interval* temp = fActiveIntvls.popHead();
293         SkASSERT(!temp->next());
294 
295         if (temp->wasAssignedSurface()) {
296             sk_sp<GrSurface> surface = temp->detachSurface();
297 
298             if (temp->isRecyclable()) {
299                 this->recycleSurface(std::move(surface));
300             }
301         }
302 
303         // Add temp to the free interval list so it can be reused
304         SkASSERT(!temp->wasAssignedSurface()); // it had better not have a ref on a surface
305         temp->setNext(fFreeIntervalList);
306         fFreeIntervalList = temp;
307     }
308 }
309 
onOpsTaskBoundary() const310 bool GrResourceAllocator::onOpsTaskBoundary() const {
311     if (fIntvlList.empty()) {
312         SkASSERT(fCurOpsTaskIndex+1 <= fNumOpsTasks);
313         // Although technically on an opsTask boundary there is no need to force an
314         // intermediate flush here
315         return false;
316     }
317 
318     const Interval* tmp = fIntvlList.peekHead();
319     return fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= tmp->start();
320 }
321 
forceIntermediateFlush(int * stopIndex)322 void GrResourceAllocator::forceIntermediateFlush(int* stopIndex) {
323     *stopIndex = fCurOpsTaskIndex+1;
324 
325     // This is interrupting the allocation of resources for this flush. We need to
326     // proactively clear the active interval list of any intervals that aren't
327     // guaranteed to survive the partial flush lest they become zombies (i.e.,
328     // holding a deleted surface proxy).
329     const Interval* tmp = fIntvlList.peekHead();
330     SkASSERT(fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= tmp->start());
331 
332     fCurOpsTaskIndex++;
333     SkASSERT(fCurOpsTaskIndex < fNumOpsTasks);
334 
335     this->expire(tmp->start());
336 }
337 
assign(int * startIndex,int * stopIndex,AssignError * outError)338 bool GrResourceAllocator::assign(int* startIndex, int* stopIndex, AssignError* outError) {
339     SkASSERT(outError);
340     *outError = fLazyInstantiationError ? AssignError::kFailedProxyInstantiation
341                                         : AssignError::kNoError;
342 
343     SkASSERT(fNumOpsTasks == fEndOfOpsTaskOpIndices.count());
344 
345     fIntvlHash.reset(); // we don't need the interval hash anymore
346 
347     if (fCurOpsTaskIndex >= fEndOfOpsTaskOpIndices.count()) {
348         return false; // nothing to render
349     }
350 
351     *startIndex = fCurOpsTaskIndex;
352     *stopIndex = fEndOfOpsTaskOpIndices.count();
353 
354     if (fIntvlList.empty()) {
355         fCurOpsTaskIndex = fEndOfOpsTaskOpIndices.count();
356         return true;          // no resources to assign
357     }
358 
359 #if GR_ALLOCATION_SPEW
360     SkDebugf("assigning opsTasks %d through %d out of %d numOpsTasks\n",
361              *startIndex, *stopIndex, fNumOpsTasks);
362     SkDebugf("EndOfOpsTaskIndices: ");
363     for (int i = 0; i < fEndOfOpsTaskOpIndices.count(); ++i) {
364         SkDebugf("%d ", fEndOfOpsTaskOpIndices[i]);
365     }
366     SkDebugf("\n");
367 #endif
368 
369     SkDEBUGCODE(fAssigned = true;)
370 
371 #if GR_ALLOCATION_SPEW
372     this->dumpIntervals();
373 #endif
374     while (Interval* cur = fIntvlList.popHead()) {
375         while (fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= cur->start()) {
376             fCurOpsTaskIndex++;
377             SkASSERT(fCurOpsTaskIndex < fNumOpsTasks);
378         }
379 
380         this->expire(cur->start());
381 
382         if (cur->proxy()->isInstantiated()) {
383             fActiveIntvls.insertByIncreasingEnd(cur);
384 
385             if (fResourceProvider->overBudget()) {
386                 // Only force intermediate draws on opsTask boundaries
387                 if (this->onOpsTaskBoundary()) {
388                     this->forceIntermediateFlush(stopIndex);
389                     return true;
390                 }
391             }
392 
393             continue;
394         }
395 
396         if (cur->proxy()->isLazy()) {
397             if (!cur->proxy()->priv().doLazyInstantiation(fResourceProvider)) {
398                 *outError = AssignError::kFailedProxyInstantiation;
399             }
400         } else if (sk_sp<GrSurface> surface = this->findSurfaceFor(cur->proxy())) {
401             // TODO: make getUniqueKey virtual on GrSurfaceProxy
402             GrTextureProxy* texProxy = cur->proxy()->asTextureProxy();
403 
404             if (texProxy && texProxy->getUniqueKey().isValid()) {
405                 if (!surface->getUniqueKey().isValid()) {
406                     fResourceProvider->assignUniqueKeyToResource(texProxy->getUniqueKey(),
407                                                                  surface.get());
408                 }
409                 SkASSERT(surface->getUniqueKey() == texProxy->getUniqueKey());
410             }
411 
412 #if GR_ALLOCATION_SPEW
413             SkDebugf("Assigning %d to %d\n",
414                  surface->uniqueID().asUInt(),
415                  cur->proxy()->uniqueID().asUInt());
416 #endif
417 
418             cur->assign(std::move(surface));
419         } else {
420             SkASSERT(!cur->proxy()->isInstantiated());
421             *outError = AssignError::kFailedProxyInstantiation;
422         }
423 
424         fActiveIntvls.insertByIncreasingEnd(cur);
425 
426         if (fResourceProvider->overBudget()) {
427             // Only force intermediate draws on opsTask boundaries
428             if (this->onOpsTaskBoundary()) {
429                 this->forceIntermediateFlush(stopIndex);
430                 return true;
431             }
432         }
433     }
434 
435     // expire all the remaining intervals to drain the active interval list
436     this->expire(std::numeric_limits<unsigned int>::max());
437     return true;
438 }
439 
440 #if GR_ALLOCATION_SPEW
dumpIntervals()441 void GrResourceAllocator::dumpIntervals() {
442     // Print all the intervals while computing their range
443     SkDebugf("------------------------------------------------------------\n");
444     unsigned int min = std::numeric_limits<unsigned int>::max();
445     unsigned int max = 0;
446     for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
447         SkDebugf("{ %3d,%3d }: [%2d, %2d] - refProxys:%d surfaceRefs:%d\n",
448                  cur->proxy()->uniqueID().asUInt(),
449                  cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1,
450                  cur->start(),
451                  cur->end(),
452                  cur->proxy()->priv().getProxyRefCnt(),
453                  cur->proxy()->testingOnly_getBackingRefCnt());
454         min = std::min(min, cur->start());
455         max = std::max(max, cur->end());
456     }
457 
458     // Draw a graph of the useage intervals
459     for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
460         SkDebugf("{ %3d,%3d }: ",
461                  cur->proxy()->uniqueID().asUInt(),
462                  cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1);
463         for (unsigned int i = min; i <= max; ++i) {
464             if (i >= cur->start() && i <= cur->end()) {
465                 SkDebugf("x");
466             } else {
467                 SkDebugf(" ");
468             }
469         }
470         SkDebugf("\n");
471     }
472 }
473 #endif
474