1 /*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/GrResourceAllocator.h"
9
10 #include "src/gpu/GrDirectContextPriv.h"
11 #include "src/gpu/GrGpuResourcePriv.h"
12 #include "src/gpu/GrRenderTargetProxy.h"
13 #include "src/gpu/GrResourceProvider.h"
14 #include "src/gpu/GrSurfaceProxy.h"
15 #include "src/gpu/GrSurfaceProxyPriv.h"
16
17 #ifdef SK_DEBUG
18 #include <atomic>
19
CreateUniqueID()20 uint32_t GrResourceAllocator::Interval::CreateUniqueID() {
21 static std::atomic<uint32_t> nextID{1};
22 uint32_t id;
23 do {
24 id = nextID.fetch_add(1, std::memory_order_relaxed);
25 } while (id == SK_InvalidUniqueID);
26 return id;
27 }
28
CreateUniqueID()29 uint32_t GrResourceAllocator::Register::CreateUniqueID() {
30 static std::atomic<uint32_t> nextID{1};
31 uint32_t id;
32 do {
33 id = nextID.fetch_add(1, std::memory_order_relaxed);
34 } while (id == SK_InvalidUniqueID);
35 return id;
36 }
37 #endif
38
~GrResourceAllocator()39 GrResourceAllocator::~GrResourceAllocator() {
40 SkASSERT(fFailedInstantiation || fIntvlList.empty());
41 SkASSERT(fActiveIntvls.empty());
42 SkASSERT(!fIntvlHash.count());
43 }
44
45 void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end,
46 ActualUse actualUse
47 SkDEBUGCODE(, bool isDirectDstRead)) {
48 SkASSERT(start <= end);
49 SkASSERT(!fAssigned); // We shouldn't be adding any intervals after (or during) assignment
50
51 if (proxy->canSkipResourceAllocator()) {
52 return;
53 }
54
55 // If a proxy is read only it must refer to a texture with specific content that cannot be
56 // recycled. We don't need to assign a texture to it and no other proxy can be instantiated
57 // with the same texture.
58 if (proxy->readOnly()) {
59 auto resourceProvider = fDContext->priv().resourceProvider();
60 if (proxy->isLazy() && !proxy->priv().doLazyInstantiation(resourceProvider)) {
61 fFailedInstantiation = true;
62 } else {
63 // Since we aren't going to add an interval we won't revisit this proxy in assign(). So
64 // must already be instantiated or it must be a lazy proxy that we instantiated above.
65 SkASSERT(proxy->isInstantiated());
66 }
67 return;
68 }
69 uint32_t proxyID = proxy->uniqueID().asUInt();
70 if (Interval** intvlPtr = fIntvlHash.find(proxyID)) {
71 // Revise the interval for an existing use
72 Interval* intvl = *intvlPtr;
73 #ifdef SK_DEBUG
74 if (0 == start && 0 == end) {
75 // This interval is for the initial upload to a deferred proxy. Due to the vagaries
76 // of how deferred proxies are collected they can appear as uploads multiple times
77 // in a single opsTasks' list and as uploads in several opsTasks.
78 SkASSERT(0 == intvl->start());
79 } else if (isDirectDstRead) {
80 // Direct reads from the render target itself should occur w/in the existing
81 // interval
82 SkASSERT(intvl->start() <= start && intvl->end() >= end);
83 } else {
84 SkASSERT(intvl->end() <= start && intvl->end() <= end);
85 }
86 #endif
87 if (ActualUse::kYes == actualUse) {
88 intvl->addUse();
89 }
90 intvl->extendEnd(end);
91 return;
92 }
93 Interval* newIntvl = fInternalAllocator.make<Interval>(proxy, start, end);
94
95 if (ActualUse::kYes == actualUse) {
96 newIntvl->addUse();
97 }
98 fIntvlList.insertByIncreasingStart(newIntvl);
99 fIntvlHash.set(proxyID, newIntvl);
100 }
101
102 // Tragically we have cases where we always have to make new textures.
can_proxy_use_scratch(const GrCaps & caps,GrSurfaceProxy * proxy)103 static bool can_proxy_use_scratch(const GrCaps& caps, GrSurfaceProxy* proxy) {
104 return caps.reuseScratchTextures() || proxy->asRenderTargetProxy();
105 }
106
Register(GrSurfaceProxy * originatingProxy,skgpu::ScratchKey scratchKey,GrResourceProvider * provider)107 GrResourceAllocator::Register::Register(GrSurfaceProxy* originatingProxy,
108 skgpu::ScratchKey scratchKey,
109 GrResourceProvider* provider)
110 : fOriginatingProxy(originatingProxy)
111 , fScratchKey(std::move(scratchKey)) {
112 SkASSERT(originatingProxy);
113 SkASSERT(!originatingProxy->isInstantiated());
114 SkASSERT(!originatingProxy->isLazy());
115 SkDEBUGCODE(fUniqueID = CreateUniqueID();)
116 if (fScratchKey.isValid()) {
117 if (can_proxy_use_scratch(*provider->caps(), originatingProxy)) {
118 fExistingSurface = provider->findAndRefScratchTexture(fScratchKey);
119 }
120 } else {
121 SkASSERT(this->uniqueKey().isValid());
122 fExistingSurface = provider->findByUniqueKey<GrSurface>(this->uniqueKey());
123 }
124 }
125
isRecyclable(const GrCaps & caps,GrSurfaceProxy * proxy,int knownUseCount) const126 bool GrResourceAllocator::Register::isRecyclable(const GrCaps& caps,
127 GrSurfaceProxy* proxy,
128 int knownUseCount) const {
129 if (!can_proxy_use_scratch(caps, proxy)) {
130 return false;
131 }
132
133 if (!this->scratchKey().isValid()) {
134 return false; // no scratch key, no free pool
135 }
136 if (this->uniqueKey().isValid()) {
137 return false; // rely on the resource cache to hold onto uniquely-keyed surfaces.
138 }
139 // If all the refs on the proxy are known to the resource allocator then no one
140 // should be holding onto it outside of Ganesh.
141 return !proxy->refCntGreaterThan(knownUseCount);
142 }
143
instantiateSurface(GrSurfaceProxy * proxy,GrResourceProvider * resourceProvider)144 bool GrResourceAllocator::Register::instantiateSurface(GrSurfaceProxy* proxy,
145 GrResourceProvider* resourceProvider) {
146 SkASSERT(!proxy->peekSurface());
147
148 sk_sp<GrSurface> newSurface;
149 if (!fExistingSurface) {
150 if (proxy == fOriginatingProxy) {
151 newSurface = proxy->priv().createSurface(resourceProvider);
152 } else {
153 newSurface = sk_ref_sp(fOriginatingProxy->peekSurface());
154 }
155 }
156 if (!fExistingSurface && !newSurface) {
157 return false;
158 }
159
160 GrSurface* surface = newSurface ? newSurface.get() : fExistingSurface.get();
161 // Make surface budgeted if this proxy is budgeted.
162 if (SkBudgeted::kYes == proxy->isBudgeted() &&
163 GrBudgetedType::kBudgeted != surface->resourcePriv().budgetedType()) {
164 // This gets the job done but isn't quite correct. It would be better to try to
165 // match budgeted proxies w/ budgeted surfaces and unbudgeted w/ unbudgeted.
166 surface->resourcePriv().makeBudgeted();
167 }
168
169 // Propagate the proxy unique key to the surface if we have one.
170 if (const auto& uniqueKey = proxy->getUniqueKey(); uniqueKey.isValid()) {
171 if (!surface->getUniqueKey().isValid()) {
172 resourceProvider->assignUniqueKeyToResource(uniqueKey, surface);
173 }
174 SkASSERT(surface->getUniqueKey() == uniqueKey);
175 }
176 proxy->priv().assign(fExistingSurface ? fExistingSurface : std::move(newSurface));
177 return true;
178 }
179
popHead()180 GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() {
181 SkDEBUGCODE(this->validate());
182
183 Interval* temp = fHead;
184 if (temp) {
185 fHead = temp->next();
186 if (!fHead) {
187 fTail = nullptr;
188 }
189 temp->setNext(nullptr);
190 }
191
192 SkDEBUGCODE(this->validate());
193 return temp;
194 }
195
196 // TODO: fuse this with insertByIncreasingEnd
insertByIncreasingStart(Interval * intvl)197 void GrResourceAllocator::IntervalList::insertByIncreasingStart(Interval* intvl) {
198 SkDEBUGCODE(this->validate());
199 SkASSERT(!intvl->next());
200
201 if (!fHead) {
202 // 14%
203 fHead = fTail = intvl;
204 } else if (intvl->start() <= fHead->start()) {
205 // 3%
206 intvl->setNext(fHead);
207 fHead = intvl;
208 } else if (fTail->start() <= intvl->start()) {
209 // 83%
210 fTail->setNext(intvl);
211 fTail = intvl;
212 } else {
213 // almost never
214 Interval* prev = fHead;
215 Interval* next = prev->next();
216 for (; intvl->start() > next->start(); prev = next, next = next->next()) {
217 }
218
219 SkASSERT(next);
220 intvl->setNext(next);
221 prev->setNext(intvl);
222 }
223
224 SkDEBUGCODE(this->validate());
225 }
226
227 // TODO: fuse this with insertByIncreasingStart
insertByIncreasingEnd(Interval * intvl)228 void GrResourceAllocator::IntervalList::insertByIncreasingEnd(Interval* intvl) {
229 SkDEBUGCODE(this->validate());
230 SkASSERT(!intvl->next());
231
232 if (!fHead) {
233 // 14%
234 fHead = fTail = intvl;
235 } else if (intvl->end() <= fHead->end()) {
236 // 64%
237 intvl->setNext(fHead);
238 fHead = intvl;
239 } else if (fTail->end() <= intvl->end()) {
240 // 3%
241 fTail->setNext(intvl);
242 fTail = intvl;
243 } else {
244 // 19% but 81% of those land right after the list's head
245 Interval* prev = fHead;
246 Interval* next = prev->next();
247 for (; intvl->end() > next->end(); prev = next, next = next->next()) {
248 }
249
250 SkASSERT(next);
251 intvl->setNext(next);
252 prev->setNext(intvl);
253 }
254
255 SkDEBUGCODE(this->validate());
256 }
257
258 #ifdef SK_DEBUG
validate() const259 void GrResourceAllocator::IntervalList::validate() const {
260 SkASSERT(SkToBool(fHead) == SkToBool(fTail));
261
262 Interval* prev = nullptr;
263 for (Interval* cur = fHead; cur; prev = cur, cur = cur->next()) {
264 }
265
266 SkASSERT(fTail == prev);
267 }
268 #endif
269
270 // First try to reuse one of the recently allocated/used registers in the free pool.
findOrCreateRegisterFor(GrSurfaceProxy * proxy)271 GrResourceAllocator::Register* GrResourceAllocator::findOrCreateRegisterFor(GrSurfaceProxy* proxy) {
272 auto resourceProvider = fDContext->priv().resourceProvider();
273 // Handle uniquely keyed proxies
274 if (const auto& uniqueKey = proxy->getUniqueKey(); uniqueKey.isValid()) {
275 if (auto p = fUniqueKeyRegisters.find(uniqueKey)) {
276 return *p;
277 }
278 // No need for a scratch key. These don't go in the free pool.
279 Register* r = fInternalAllocator.make<Register>(proxy,
280 skgpu::ScratchKey(),
281 resourceProvider);
282 fUniqueKeyRegisters.set(uniqueKey, r);
283 return r;
284 }
285
286 // Then look in the free pool
287 skgpu::ScratchKey scratchKey;
288 proxy->priv().computeScratchKey(*fDContext->priv().caps(), &scratchKey);
289
290 auto filter = [] (const Register* r) {
291 return true;
292 };
293 if (Register* r = fFreePool.findAndRemove(scratchKey, filter)) {
294 return r;
295 }
296
297 return fInternalAllocator.make<Register>(proxy, std::move(scratchKey), resourceProvider);
298 }
299
300 // Remove any intervals that end before the current index. Add their registers
301 // to the free pool if possible.
expire(unsigned int curIndex)302 void GrResourceAllocator::expire(unsigned int curIndex) {
303 while (!fActiveIntvls.empty() && fActiveIntvls.peekHead()->end() < curIndex) {
304 Interval* intvl = fActiveIntvls.popHead();
305 SkASSERT(!intvl->next());
306
307 Register* r = intvl->getRegister();
308 if (r && r->isRecyclable(*fDContext->priv().caps(), intvl->proxy(), intvl->uses())) {
309 #if GR_ALLOCATION_SPEW
310 SkDebugf("putting register %d back into pool\n", r->uniqueID());
311 #endif
312 // TODO: fix this insertion so we get a more LRU-ish behavior
313 fFreePool.insert(r->scratchKey(), r);
314 }
315 fFinishedIntvls.insertByIncreasingStart(intvl);
316 }
317 }
318
planAssignment()319 bool GrResourceAllocator::planAssignment() {
320 fIntvlHash.reset(); // we don't need the interval hash anymore
321
322 SkASSERT(!fPlanned && !fAssigned);
323 SkDEBUGCODE(fPlanned = true;)
324
325 #if GR_ALLOCATION_SPEW
326 SkDebugf("assigning %d ops\n", fNumOps);
327 this->dumpIntervals();
328 #endif
329
330 auto resourceProvider = fDContext->priv().resourceProvider();
331 while (Interval* cur = fIntvlList.popHead()) {
332 this->expire(cur->start());
333 fActiveIntvls.insertByIncreasingEnd(cur);
334
335 // Already-instantiated proxies and lazy proxies don't use registers.
336 if (cur->proxy()->isInstantiated()) {
337 continue;
338 }
339
340 // Instantiate fully-lazy proxies immediately. Ignore other lazy proxies at this stage.
341 if (cur->proxy()->isLazy()) {
342 if (cur->proxy()->isFullyLazy()) {
343 fFailedInstantiation = !cur->proxy()->priv().doLazyInstantiation(resourceProvider);
344 if (fFailedInstantiation) {
345 break;
346 }
347 }
348 continue;
349 }
350
351 Register* r = this->findOrCreateRegisterFor(cur->proxy());
352 #if GR_ALLOCATION_SPEW
353 SkDebugf("Assigning register %d to %d\n",
354 r->uniqueID(),
355 cur->proxy()->uniqueID().asUInt());
356 #endif
357 SkASSERT(!cur->proxy()->peekSurface());
358 cur->setRegister(r);
359 }
360
361 // expire all the remaining intervals to drain the active interval list
362 this->expire(std::numeric_limits<unsigned int>::max());
363 return !fFailedInstantiation;
364 }
365
makeBudgetHeadroom()366 bool GrResourceAllocator::makeBudgetHeadroom() {
367 SkASSERT(fPlanned);
368 SkASSERT(!fFailedInstantiation);
369 size_t additionalBytesNeeded = 0;
370 for (Interval* cur = fFinishedIntvls.peekHead(); cur; cur = cur->next()) {
371 GrSurfaceProxy* proxy = cur->proxy();
372 if (SkBudgeted::kNo == proxy->isBudgeted() || proxy->isInstantiated()) {
373 continue;
374 }
375
376 // N.B Fully-lazy proxies were already instantiated in planAssignment
377 if (proxy->isLazy()) {
378 additionalBytesNeeded += proxy->gpuMemorySize();
379 } else {
380 Register* r = cur->getRegister();
381 SkASSERT(r);
382 if (!r->accountedForInBudget() && !r->existingSurface()) {
383 additionalBytesNeeded += proxy->gpuMemorySize();
384 }
385 r->setAccountedForInBudget();
386 }
387 }
388 return fDContext->priv().getResourceCache()->purgeToMakeHeadroom(additionalBytesNeeded);
389 }
390
reset()391 void GrResourceAllocator::reset() {
392 // NOTE: We do not reset the failedInstantiation flag because we currently do not attempt
393 // to recover from failed instantiations. The user is responsible for checking this flag and
394 // bailing early.
395 SkDEBUGCODE(fPlanned = false;)
396 SkDEBUGCODE(fAssigned = false;)
397 SkASSERT(fActiveIntvls.empty());
398 fFinishedIntvls = IntervalList();
399 fIntvlList = IntervalList();
400 fIntvlHash.reset();
401 fUniqueKeyRegisters.reset();
402 fFreePool.reset();
403 fInternalAllocator.reset();
404 }
405
assign()406 bool GrResourceAllocator::assign() {
407 if (fFailedInstantiation) {
408 return false;
409 }
410 SkASSERT(fPlanned && !fAssigned);
411 SkDEBUGCODE(fAssigned = true;)
412 auto resourceProvider = fDContext->priv().resourceProvider();
413 while (Interval* cur = fFinishedIntvls.popHead()) {
414 if (fFailedInstantiation) {
415 break;
416 }
417 if (cur->proxy()->isInstantiated()) {
418 continue;
419 }
420 if (cur->proxy()->isLazy()) {
421 fFailedInstantiation = !cur->proxy()->priv().doLazyInstantiation(resourceProvider);
422 continue;
423 }
424 Register* r = cur->getRegister();
425 SkASSERT(r);
426 fFailedInstantiation = !r->instantiateSurface(cur->proxy(), resourceProvider);
427 }
428 return !fFailedInstantiation;
429 }
430
431 #if GR_ALLOCATION_SPEW
dumpIntervals()432 void GrResourceAllocator::dumpIntervals() {
433 // Print all the intervals while computing their range
434 SkDebugf("------------------------------------------------------------\n");
435 unsigned int min = std::numeric_limits<unsigned int>::max();
436 unsigned int max = 0;
437 for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
438 SkDebugf("{ %3d,%3d }: [%2d, %2d] - refProxys:%d surfaceRefs:%d\n",
439 cur->proxy()->uniqueID().asUInt(),
440 cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1,
441 cur->start(),
442 cur->end(),
443 cur->proxy()->priv().getProxyRefCnt(),
444 cur->proxy()->testingOnly_getBackingRefCnt());
445 min = std::min(min, cur->start());
446 max = std::max(max, cur->end());
447 }
448
449 // Draw a graph of the useage intervals
450 for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
451 SkDebugf("{ %3d,%3d }: ",
452 cur->proxy()->uniqueID().asUInt(),
453 cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1);
454 for (unsigned int i = min; i <= max; ++i) {
455 if (i >= cur->start() && i <= cur->end()) {
456 SkDebugf("x");
457 } else {
458 SkDebugf(" ");
459 }
460 }
461 SkDebugf("\n");
462 }
463 }
464 #endif
465