1 /*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/GrResourceAllocator.h"
9
10 #include "src/gpu/GrDirectContextPriv.h"
11 #include "src/gpu/GrGpuResourcePriv.h"
12 #include "src/gpu/GrRenderTargetProxy.h"
13 #include "src/gpu/GrResourceProvider.h"
14 #include "src/gpu/GrSurfaceProxy.h"
15 #include "src/gpu/GrSurfaceProxyPriv.h"
16
17 #ifdef SK_DEBUG
18 #include <atomic>
19
CreateUniqueID()20 uint32_t GrResourceAllocator::Interval::CreateUniqueID() {
21 static std::atomic<uint32_t> nextID{1};
22 uint32_t id;
23 do {
24 id = nextID.fetch_add(1, std::memory_order_relaxed);
25 } while (id == SK_InvalidUniqueID);
26 return id;
27 }
28
CreateUniqueID()29 uint32_t GrResourceAllocator::Register::CreateUniqueID() {
30 static std::atomic<uint32_t> nextID{1};
31 uint32_t id;
32 do {
33 id = nextID.fetch_add(1, std::memory_order_relaxed);
34 } while (id == SK_InvalidUniqueID);
35 return id;
36 }
37 #endif
38
~GrResourceAllocator()39 GrResourceAllocator::~GrResourceAllocator() {
40 SkASSERT(fFailedInstantiation || fIntvlList.empty());
41 SkASSERT(fActiveIntvls.empty());
42 SkASSERT(!fIntvlHash.count());
43 }
44
45 void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end,
46 ActualUse actualUse
47 SkDEBUGCODE(, bool isDirectDstRead)) {
48 SkASSERT(start <= end);
49 SkASSERT(!fAssigned); // We shouldn't be adding any intervals after (or during) assignment
50
51 if (proxy->canSkipResourceAllocator()) {
52 return;
53 }
54
55 // If a proxy is read only it must refer to a texture with specific content that cannot be
56 // recycled. We don't need to assign a texture to it and no other proxy can be instantiated
57 // with the same texture.
58 if (proxy->readOnly()) {
59 auto resourceProvider = fDContext->priv().resourceProvider();
60 if (proxy->isLazy() && !proxy->priv().doLazyInstantiation(resourceProvider)) {
61 fFailedInstantiation = true;
62 } else {
63 // Since we aren't going to add an interval we won't revisit this proxy in assign(). So
64 // must already be instantiated or it must be a lazy proxy that we instantiated above.
65 SkASSERT(proxy->isInstantiated());
66 }
67 return;
68 }
69 uint32_t proxyID = proxy->uniqueID().asUInt();
70 if (Interval** intvlPtr = fIntvlHash.find(proxyID)) {
71 // Revise the interval for an existing use
72 Interval* intvl = *intvlPtr;
73 #ifdef SK_DEBUG
74 if (0 == start && 0 == end) {
75 // This interval is for the initial upload to a deferred proxy. Due to the vagaries
76 // of how deferred proxies are collected they can appear as uploads multiple times
77 // in a single opsTasks' list and as uploads in several opsTasks.
78 SkASSERT(0 == intvl->start());
79 } else if (isDirectDstRead) {
80 // Direct reads from the render target itself should occur w/in the existing
81 // interval
82 SkASSERT(intvl->start() <= start && intvl->end() >= end);
83 } else {
84 SkASSERT(intvl->end() <= start && intvl->end() <= end);
85 }
86 #endif
87 if (ActualUse::kYes == actualUse) {
88 intvl->addUse();
89 }
90 intvl->extendEnd(end);
91 return;
92 }
93 Interval* newIntvl = fInternalAllocator.make<Interval>(proxy, start, end);
94
95 if (ActualUse::kYes == actualUse) {
96 newIntvl->addUse();
97 }
98 fIntvlList.insertByIncreasingStart(newIntvl);
99 fIntvlHash.set(proxyID, newIntvl);
100 }
101
102 // Tragically we have cases where we always have to make new textures.
can_proxy_use_scratch(const GrCaps & caps,GrSurfaceProxy * proxy)103 static bool can_proxy_use_scratch(const GrCaps& caps, GrSurfaceProxy* proxy) {
104 return caps.reuseScratchTextures() || proxy->asRenderTargetProxy();
105 }
106
Register(GrSurfaceProxy * originatingProxy,GrScratchKey scratchKey,GrResourceProvider * provider)107 GrResourceAllocator::Register::Register(GrSurfaceProxy* originatingProxy,
108 GrScratchKey scratchKey,
109 GrResourceProvider* provider)
110 : fOriginatingProxy(originatingProxy)
111 , fScratchKey(std::move(scratchKey)) {
112 SkASSERT(originatingProxy);
113 SkASSERT(!originatingProxy->isInstantiated());
114 SkASSERT(!originatingProxy->isLazy());
115 SkDEBUGCODE(fUniqueID = CreateUniqueID();)
116 if (scratchKey.isValid()) {
117 if (can_proxy_use_scratch(*provider->caps(), originatingProxy)) {
118 fExistingSurface = provider->findAndRefScratchTexture(fScratchKey);
119 }
120 } else {
121 SkASSERT(this->uniqueKey().isValid());
122 fExistingSurface = provider->findByUniqueKey<GrSurface>(this->uniqueKey());
123 }
124 }
125
isRecyclable(const GrCaps & caps,GrSurfaceProxy * proxy,int knownUseCount) const126 bool GrResourceAllocator::Register::isRecyclable(const GrCaps& caps,
127 GrSurfaceProxy* proxy,
128 int knownUseCount) const {
129 if (!can_proxy_use_scratch(caps, proxy)) {
130 return false;
131 }
132
133 if (!this->scratchKey().isValid()) {
134 return false; // no scratch key, no free pool
135 }
136 if (this->uniqueKey().isValid()) {
137 return false; // rely on the resource cache to hold onto uniquely-keyed surfaces.
138 }
139 // If all the refs on the proxy are known to the resource allocator then no one
140 // should be holding onto it outside of Ganesh.
141 return !proxy->refCntGreaterThan(knownUseCount);
142 }
143
instantiateSurface(GrSurfaceProxy * proxy,GrResourceProvider * resourceProvider)144 bool GrResourceAllocator::Register::instantiateSurface(GrSurfaceProxy* proxy,
145 GrResourceProvider* resourceProvider) {
146 SkASSERT(!proxy->peekSurface());
147
148 sk_sp<GrSurface> newSurface;
149 if (!fExistingSurface) {
150 if (proxy == fOriginatingProxy) {
151 newSurface = proxy->priv().createSurface(resourceProvider);
152 } else {
153 newSurface = sk_ref_sp(fOriginatingProxy->peekSurface());
154 }
155 }
156 if (!fExistingSurface && !newSurface) {
157 return false;
158 }
159
160 GrSurface* surface = newSurface ? newSurface.get() : fExistingSurface.get();
161 // Make surface budgeted if this proxy is budgeted.
162 if (SkBudgeted::kYes == proxy->isBudgeted() &&
163 GrBudgetedType::kBudgeted != surface->resourcePriv().budgetedType()) {
164 // This gets the job done but isn't quite correct. It would be better to try to
165 // match budgeted proxies w/ budgeted surfaces and unbudgeted w/ unbudgeted.
166 surface->resourcePriv().makeBudgeted();
167 }
168
169 // Propagate the proxy unique key to the surface if we have one.
170 if (const auto& uniqueKey = proxy->getUniqueKey(); uniqueKey.isValid()) {
171 if (!surface->getUniqueKey().isValid()) {
172 resourceProvider->assignUniqueKeyToResource(uniqueKey, surface);
173 }
174 SkASSERT(surface->getUniqueKey() == uniqueKey);
175 }
176 proxy->priv().assign(fExistingSurface ? fExistingSurface : std::move(newSurface));
177 return true;
178 }
179
popHead()180 GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() {
181 SkDEBUGCODE(this->validate());
182
183 Interval* temp = fHead;
184 if (temp) {
185 fHead = temp->next();
186 if (!fHead) {
187 fTail = nullptr;
188 }
189 temp->setNext(nullptr);
190 }
191
192 SkDEBUGCODE(this->validate());
193 return temp;
194 }
195
196 // TODO: fuse this with insertByIncreasingEnd
insertByIncreasingStart(Interval * intvl)197 void GrResourceAllocator::IntervalList::insertByIncreasingStart(Interval* intvl) {
198 SkDEBUGCODE(this->validate());
199 SkASSERT(!intvl->next());
200
201 if (!fHead) {
202 // 14%
203 fHead = fTail = intvl;
204 } else if (intvl->start() <= fHead->start()) {
205 // 3%
206 intvl->setNext(fHead);
207 fHead = intvl;
208 } else if (fTail->start() <= intvl->start()) {
209 // 83%
210 fTail->setNext(intvl);
211 fTail = intvl;
212 } else {
213 // almost never
214 Interval* prev = fHead;
215 Interval* next = prev->next();
216 for (; intvl->start() > next->start(); prev = next, next = next->next()) {
217 }
218
219 SkASSERT(next);
220 intvl->setNext(next);
221 prev->setNext(intvl);
222 }
223
224 SkDEBUGCODE(this->validate());
225 }
226
227 // TODO: fuse this with insertByIncreasingStart
insertByIncreasingEnd(Interval * intvl)228 void GrResourceAllocator::IntervalList::insertByIncreasingEnd(Interval* intvl) {
229 SkDEBUGCODE(this->validate());
230 SkASSERT(!intvl->next());
231
232 if (!fHead) {
233 // 14%
234 fHead = fTail = intvl;
235 } else if (intvl->end() <= fHead->end()) {
236 // 64%
237 intvl->setNext(fHead);
238 fHead = intvl;
239 } else if (fTail->end() <= intvl->end()) {
240 // 3%
241 fTail->setNext(intvl);
242 fTail = intvl;
243 } else {
244 // 19% but 81% of those land right after the list's head
245 Interval* prev = fHead;
246 Interval* next = prev->next();
247 for (; intvl->end() > next->end(); prev = next, next = next->next()) {
248 }
249
250 SkASSERT(next);
251 intvl->setNext(next);
252 prev->setNext(intvl);
253 }
254
255 SkDEBUGCODE(this->validate());
256 }
257
258 #ifdef SK_DEBUG
validate() const259 void GrResourceAllocator::IntervalList::validate() const {
260 SkASSERT(SkToBool(fHead) == SkToBool(fTail));
261
262 Interval* prev = nullptr;
263 for (Interval* cur = fHead; cur; prev = cur, cur = cur->next()) {
264 }
265
266 SkASSERT(fTail == prev);
267 }
268 #endif
269
270 // First try to reuse one of the recently allocated/used registers in the free pool.
findOrCreateRegisterFor(GrSurfaceProxy * proxy)271 GrResourceAllocator::Register* GrResourceAllocator::findOrCreateRegisterFor(GrSurfaceProxy* proxy) {
272 auto resourceProvider = fDContext->priv().resourceProvider();
273 // Handle uniquely keyed proxies
274 if (const auto& uniqueKey = proxy->getUniqueKey(); uniqueKey.isValid()) {
275 if (auto p = fUniqueKeyRegisters.find(uniqueKey)) {
276 return *p;
277 }
278 // No need for a scratch key. These don't go in the free pool.
279 Register* r = fInternalAllocator.make<Register>(proxy, GrScratchKey(), resourceProvider);
280 fUniqueKeyRegisters.set(uniqueKey, r);
281 return r;
282 }
283
284 // Then look in the free pool
285 GrScratchKey scratchKey;
286 proxy->priv().computeScratchKey(*fDContext->priv().caps(), &scratchKey);
287
288 auto filter = [] (const Register* r) {
289 return true;
290 };
291 if (Register* r = fFreePool.findAndRemove(scratchKey, filter)) {
292 return r;
293 }
294
295 return fInternalAllocator.make<Register>(proxy, std::move(scratchKey), resourceProvider);
296 }
297
298 // Remove any intervals that end before the current index. Add their registers
299 // to the free pool if possible.
expire(unsigned int curIndex)300 void GrResourceAllocator::expire(unsigned int curIndex) {
301 while (!fActiveIntvls.empty() && fActiveIntvls.peekHead()->end() < curIndex) {
302 Interval* intvl = fActiveIntvls.popHead();
303 SkASSERT(!intvl->next());
304
305 Register* r = intvl->getRegister();
306 if (r && r->isRecyclable(*fDContext->priv().caps(), intvl->proxy(), intvl->uses())) {
307 #if GR_ALLOCATION_SPEW
308 SkDebugf("putting register %d back into pool\n", r->uniqueID());
309 #endif
310 // TODO: fix this insertion so we get a more LRU-ish behavior
311 fFreePool.insert(r->scratchKey(), r);
312 }
313 fFinishedIntvls.insertByIncreasingStart(intvl);
314 }
315 }
316
planAssignment()317 bool GrResourceAllocator::planAssignment() {
318 fIntvlHash.reset(); // we don't need the interval hash anymore
319
320 SkASSERT(!fPlanned && !fAssigned);
321 SkDEBUGCODE(fPlanned = true;)
322
323 #if GR_ALLOCATION_SPEW
324 SkDebugf("assigning %d ops\n", fNumOps);
325 this->dumpIntervals();
326 #endif
327
328 auto resourceProvider = fDContext->priv().resourceProvider();
329 while (Interval* cur = fIntvlList.popHead()) {
330 this->expire(cur->start());
331 fActiveIntvls.insertByIncreasingEnd(cur);
332
333 // Already-instantiated proxies and lazy proxies don't use registers.
334 if (cur->proxy()->isInstantiated()) {
335 continue;
336 }
337
338 // Instantiate fully-lazy proxies immediately. Ignore other lazy proxies at this stage.
339 if (cur->proxy()->isLazy()) {
340 if (cur->proxy()->isFullyLazy()) {
341 fFailedInstantiation = !cur->proxy()->priv().doLazyInstantiation(resourceProvider);
342 if (fFailedInstantiation) {
343 break;
344 }
345 }
346 continue;
347 }
348
349 Register* r = this->findOrCreateRegisterFor(cur->proxy());
350 #if GR_ALLOCATION_SPEW
351 SkDebugf("Assigning register %d to %d\n",
352 r->uniqueID(),
353 cur->proxy()->uniqueID().asUInt());
354 #endif
355 SkASSERT(!cur->proxy()->peekSurface());
356 cur->setRegister(r);
357 }
358
359 // expire all the remaining intervals to drain the active interval list
360 this->expire(std::numeric_limits<unsigned int>::max());
361 return !fFailedInstantiation;
362 }
363
makeBudgetHeadroom()364 bool GrResourceAllocator::makeBudgetHeadroom() {
365 SkASSERT(fPlanned);
366 SkASSERT(!fFailedInstantiation);
367 size_t additionalBytesNeeded = 0;
368 for (Interval* cur = fFinishedIntvls.peekHead(); cur; cur = cur->next()) {
369 GrSurfaceProxy* proxy = cur->proxy();
370 if (SkBudgeted::kNo == proxy->isBudgeted() || proxy->isInstantiated()) {
371 continue;
372 }
373
374 // N.B Fully-lazy proxies were already instantiated in planAssignment
375 if (proxy->isLazy()) {
376 additionalBytesNeeded += proxy->gpuMemorySize();
377 } else {
378 Register* r = cur->getRegister();
379 SkASSERT(r);
380 if (!r->accountedForInBudget() && !r->existingSurface()) {
381 additionalBytesNeeded += proxy->gpuMemorySize();
382 }
383 r->setAccountedForInBudget();
384 }
385 }
386 return fDContext->priv().getResourceCache()->purgeToMakeHeadroom(additionalBytesNeeded);
387 }
388
reset()389 void GrResourceAllocator::reset() {
390 // NOTE: We do not reset the failedInstantiation flag because we currently do not attempt
391 // to recover from failed instantiations. The user is responsible for checking this flag and
392 // bailing early.
393 SkDEBUGCODE(fPlanned = false;)
394 SkDEBUGCODE(fAssigned = false;)
395 SkASSERT(fActiveIntvls.empty());
396 fFinishedIntvls = IntervalList();
397 fIntvlList = IntervalList();
398 fIntvlHash.reset();
399 fUniqueKeyRegisters.reset();
400 fFreePool.reset();
401 fInternalAllocator.reset();
402 }
403
assign()404 bool GrResourceAllocator::assign() {
405 if (fFailedInstantiation) {
406 return false;
407 }
408 SkASSERT(fPlanned && !fAssigned);
409 SkDEBUGCODE(fAssigned = true;)
410 auto resourceProvider = fDContext->priv().resourceProvider();
411 while (Interval* cur = fFinishedIntvls.popHead()) {
412 if (fFailedInstantiation) {
413 break;
414 }
415 if (cur->proxy()->isInstantiated()) {
416 continue;
417 }
418 if (cur->proxy()->isLazy()) {
419 fFailedInstantiation = !cur->proxy()->priv().doLazyInstantiation(resourceProvider);
420 continue;
421 }
422 Register* r = cur->getRegister();
423 SkASSERT(r);
424 fFailedInstantiation = !r->instantiateSurface(cur->proxy(), resourceProvider);
425 }
426 return !fFailedInstantiation;
427 }
428
429 #if GR_ALLOCATION_SPEW
dumpIntervals()430 void GrResourceAllocator::dumpIntervals() {
431 // Print all the intervals while computing their range
432 SkDebugf("------------------------------------------------------------\n");
433 unsigned int min = std::numeric_limits<unsigned int>::max();
434 unsigned int max = 0;
435 for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
436 SkDebugf("{ %3d,%3d }: [%2d, %2d] - refProxys:%d surfaceRefs:%d\n",
437 cur->proxy()->uniqueID().asUInt(),
438 cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1,
439 cur->start(),
440 cur->end(),
441 cur->proxy()->priv().getProxyRefCnt(),
442 cur->proxy()->testingOnly_getBackingRefCnt());
443 min = std::min(min, cur->start());
444 max = std::max(max, cur->end());
445 }
446
447 // Draw a graph of the useage intervals
448 for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
449 SkDebugf("{ %3d,%3d }: ",
450 cur->proxy()->uniqueID().asUInt(),
451 cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1);
452 for (unsigned int i = min; i <= max; ++i) {
453 if (i >= cur->start() && i <= cur->end()) {
454 SkDebugf("x");
455 } else {
456 SkDebugf(" ");
457 }
458 }
459 SkDebugf("\n");
460 }
461 }
462 #endif
463