1 /*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/GrResourceAllocator.h"
9
10 #include "src/gpu/GrDirectContextPriv.h"
11 #include "src/gpu/GrGpuResourcePriv.h"
12 #include "src/gpu/GrOpsTask.h"
13 #include "src/gpu/GrRenderTargetProxy.h"
14 #include "src/gpu/GrResourceProvider.h"
15 #include "src/gpu/GrSurfaceProxy.h"
16 #include "src/gpu/GrSurfaceProxyPriv.h"
17
18 #ifdef SK_DEBUG
19 #include <atomic>
20
CreateUniqueID()21 uint32_t GrResourceAllocator::Interval::CreateUniqueID() {
22 static std::atomic<uint32_t> nextID{1};
23 uint32_t id;
24 do {
25 id = nextID.fetch_add(1, std::memory_order_relaxed);
26 } while (id == SK_InvalidUniqueID);
27 return id;
28 }
29
CreateUniqueID()30 uint32_t GrResourceAllocator::Register::CreateUniqueID() {
31 static std::atomic<uint32_t> nextID{1};
32 uint32_t id;
33 do {
34 id = nextID.fetch_add(1, std::memory_order_relaxed);
35 } while (id == SK_InvalidUniqueID);
36 return id;
37 }
38 #endif
39
~GrResourceAllocator()40 GrResourceAllocator::~GrResourceAllocator() {
41 SkASSERT(fFailedInstantiation || fIntvlList.empty());
42 SkASSERT(fActiveIntvls.empty());
43 SkASSERT(!fIntvlHash.count());
44 }
45
46 void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end,
47 ActualUse actualUse
48 SkDEBUGCODE(, bool isDirectDstRead)) {
49 SkASSERT(start <= end);
50 SkASSERT(!fAssigned); // We shouldn't be adding any intervals after (or during) assignment
51
52 if (proxy->canSkipResourceAllocator()) {
53 return;
54 }
55
56 // If a proxy is read only it must refer to a texture with specific content that cannot be
57 // recycled. We don't need to assign a texture to it and no other proxy can be instantiated
58 // with the same texture.
59 if (proxy->readOnly()) {
60 auto resourceProvider = fDContext->priv().resourceProvider();
61 if (proxy->isLazy() && !proxy->priv().doLazyInstantiation(resourceProvider)) {
62 fFailedInstantiation = true;
63 } else {
64 // Since we aren't going to add an interval we won't revisit this proxy in assign(). So
65 // must already be instantiated or it must be a lazy proxy that we instantiated above.
66 SkASSERT(proxy->isInstantiated());
67 }
68 return;
69 }
70 uint32_t proxyID = proxy->uniqueID().asUInt();
71 if (Interval** intvlPtr = fIntvlHash.find(proxyID)) {
72 // Revise the interval for an existing use
73 Interval* intvl = *intvlPtr;
74 #ifdef SK_DEBUG
75 if (0 == start && 0 == end) {
76 // This interval is for the initial upload to a deferred proxy. Due to the vagaries
77 // of how deferred proxies are collected they can appear as uploads multiple times
78 // in a single opsTasks' list and as uploads in several opsTasks.
79 SkASSERT(0 == intvl->start());
80 } else if (isDirectDstRead) {
81 // Direct reads from the render target itself should occur w/in the existing
82 // interval
83 SkASSERT(intvl->start() <= start && intvl->end() >= end);
84 } else {
85 SkASSERT(intvl->end() <= start && intvl->end() <= end);
86 }
87 #endif
88 if (ActualUse::kYes == actualUse) {
89 intvl->addUse();
90 }
91 intvl->extendEnd(end);
92 return;
93 }
94 Interval* newIntvl = fInternalAllocator.make<Interval>(proxy, start, end);
95
96 if (ActualUse::kYes == actualUse) {
97 newIntvl->addUse();
98 }
99 fIntvlList.insertByIncreasingStart(newIntvl);
100 fIntvlHash.set(proxyID, newIntvl);
101 }
102
103 // Tragically we have cases where we always have to make new textures.
can_proxy_use_scratch(const GrCaps & caps,GrSurfaceProxy * proxy)104 static bool can_proxy_use_scratch(const GrCaps& caps, GrSurfaceProxy* proxy) {
105 return caps.reuseScratchTextures() || proxy->asRenderTargetProxy();
106 }
107
Register(GrSurfaceProxy * originatingProxy,GrScratchKey scratchKey,GrResourceProvider * provider)108 GrResourceAllocator::Register::Register(GrSurfaceProxy* originatingProxy,
109 GrScratchKey scratchKey,
110 GrResourceProvider* provider)
111 : fOriginatingProxy(originatingProxy)
112 , fScratchKey(std::move(scratchKey)) {
113 SkASSERT(originatingProxy);
114 SkASSERT(!originatingProxy->isInstantiated());
115 SkASSERT(!originatingProxy->isLazy());
116 SkDEBUGCODE(fUniqueID = CreateUniqueID();)
117 if (scratchKey.isValid()) {
118 if (can_proxy_use_scratch(*provider->caps(), originatingProxy)) {
119 fExistingSurface = provider->findAndRefScratchTexture(fScratchKey);
120 }
121 } else {
122 SkASSERT(this->uniqueKey().isValid());
123 fExistingSurface = provider->findByUniqueKey<GrSurface>(this->uniqueKey());
124 }
125 }
126
isRecyclable(const GrCaps & caps,GrSurfaceProxy * proxy,int knownUseCount) const127 bool GrResourceAllocator::Register::isRecyclable(const GrCaps& caps,
128 GrSurfaceProxy* proxy,
129 int knownUseCount) const {
130 if (!can_proxy_use_scratch(caps, proxy)) {
131 return false;
132 }
133
134 if (!this->scratchKey().isValid()) {
135 return false; // no scratch key, no free pool
136 }
137 if (this->uniqueKey().isValid()) {
138 return false; // rely on the resource cache to hold onto uniquely-keyed surfaces.
139 }
140 // If all the refs on the proxy are known to the resource allocator then no one
141 // should be holding onto it outside of Ganesh.
142 return !proxy->refCntGreaterThan(knownUseCount);
143 }
144
instantiateSurface(GrSurfaceProxy * proxy,GrResourceProvider * resourceProvider)145 bool GrResourceAllocator::Register::instantiateSurface(GrSurfaceProxy* proxy,
146 GrResourceProvider* resourceProvider) {
147 SkASSERT(!proxy->peekSurface());
148
149 sk_sp<GrSurface> newSurface;
150 if (!fExistingSurface) {
151 if (proxy == fOriginatingProxy) {
152 newSurface = proxy->priv().createSurface(resourceProvider);
153 } else {
154 newSurface = sk_ref_sp(fOriginatingProxy->peekSurface());
155 }
156 }
157 if (!fExistingSurface && !newSurface) {
158 return false;
159 }
160
161 GrSurface* surface = newSurface ? newSurface.get() : fExistingSurface.get();
162 // Make surface budgeted if this proxy is budgeted.
163 if (SkBudgeted::kYes == proxy->isBudgeted() &&
164 GrBudgetedType::kBudgeted != surface->resourcePriv().budgetedType()) {
165 // This gets the job done but isn't quite correct. It would be better to try to
166 // match budgeted proxies w/ budgeted surfaces and unbudgeted w/ unbudgeted.
167 surface->resourcePriv().makeBudgeted();
168 }
169
170 // Propagate the proxy unique key to the surface if we have one.
171 if (const auto& uniqueKey = proxy->getUniqueKey(); uniqueKey.isValid()) {
172 if (!surface->getUniqueKey().isValid()) {
173 resourceProvider->assignUniqueKeyToResource(uniqueKey, surface);
174 }
175 SkASSERT(surface->getUniqueKey() == uniqueKey);
176 }
177 proxy->priv().assign(fExistingSurface ? fExistingSurface : std::move(newSurface));
178 return true;
179 }
180
popHead()181 GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() {
182 SkDEBUGCODE(this->validate());
183
184 Interval* temp = fHead;
185 if (temp) {
186 fHead = temp->next();
187 if (!fHead) {
188 fTail = nullptr;
189 }
190 temp->setNext(nullptr);
191 }
192
193 SkDEBUGCODE(this->validate());
194 return temp;
195 }
196
197 // TODO: fuse this with insertByIncreasingEnd
insertByIncreasingStart(Interval * intvl)198 void GrResourceAllocator::IntervalList::insertByIncreasingStart(Interval* intvl) {
199 SkDEBUGCODE(this->validate());
200 SkASSERT(!intvl->next());
201
202 if (!fHead) {
203 // 14%
204 fHead = fTail = intvl;
205 } else if (intvl->start() <= fHead->start()) {
206 // 3%
207 intvl->setNext(fHead);
208 fHead = intvl;
209 } else if (fTail->start() <= intvl->start()) {
210 // 83%
211 fTail->setNext(intvl);
212 fTail = intvl;
213 } else {
214 // almost never
215 Interval* prev = fHead;
216 Interval* next = prev->next();
217 for (; intvl->start() > next->start(); prev = next, next = next->next()) {
218 }
219
220 SkASSERT(next);
221 intvl->setNext(next);
222 prev->setNext(intvl);
223 }
224
225 SkDEBUGCODE(this->validate());
226 }
227
228 // TODO: fuse this with insertByIncreasingStart
insertByIncreasingEnd(Interval * intvl)229 void GrResourceAllocator::IntervalList::insertByIncreasingEnd(Interval* intvl) {
230 SkDEBUGCODE(this->validate());
231 SkASSERT(!intvl->next());
232
233 if (!fHead) {
234 // 14%
235 fHead = fTail = intvl;
236 } else if (intvl->end() <= fHead->end()) {
237 // 64%
238 intvl->setNext(fHead);
239 fHead = intvl;
240 } else if (fTail->end() <= intvl->end()) {
241 // 3%
242 fTail->setNext(intvl);
243 fTail = intvl;
244 } else {
245 // 19% but 81% of those land right after the list's head
246 Interval* prev = fHead;
247 Interval* next = prev->next();
248 for (; intvl->end() > next->end(); prev = next, next = next->next()) {
249 }
250
251 SkASSERT(next);
252 intvl->setNext(next);
253 prev->setNext(intvl);
254 }
255
256 SkDEBUGCODE(this->validate());
257 }
258
259 #ifdef SK_DEBUG
validate() const260 void GrResourceAllocator::IntervalList::validate() const {
261 SkASSERT(SkToBool(fHead) == SkToBool(fTail));
262
263 Interval* prev = nullptr;
264 for (Interval* cur = fHead; cur; prev = cur, cur = cur->next()) {
265 }
266
267 SkASSERT(fTail == prev);
268 }
269 #endif
270
271 // First try to reuse one of the recently allocated/used registers in the free pool.
findOrCreateRegisterFor(GrSurfaceProxy * proxy)272 GrResourceAllocator::Register* GrResourceAllocator::findOrCreateRegisterFor(GrSurfaceProxy* proxy) {
273 auto resourceProvider = fDContext->priv().resourceProvider();
274 // Handle uniquely keyed proxies
275 if (const auto& uniqueKey = proxy->getUniqueKey(); uniqueKey.isValid()) {
276 if (auto p = fUniqueKeyRegisters.find(uniqueKey)) {
277 return *p;
278 }
279 // No need for a scratch key. These don't go in the free pool.
280 Register* r = fInternalAllocator.make<Register>(proxy, GrScratchKey(), resourceProvider);
281 fUniqueKeyRegisters.set(uniqueKey, r);
282 return r;
283 }
284
285 // Then look in the free pool
286 GrScratchKey scratchKey;
287 proxy->priv().computeScratchKey(*fDContext->priv().caps(), &scratchKey);
288
289 auto filter = [] (const Register* r) {
290 return true;
291 };
292 if (Register* r = fFreePool.findAndRemove(scratchKey, filter)) {
293 return r;
294 }
295
296 return fInternalAllocator.make<Register>(proxy, std::move(scratchKey), resourceProvider);
297 }
298
299 // Remove any intervals that end before the current index. Add their registers
300 // to the free pool if possible.
expire(unsigned int curIndex)301 void GrResourceAllocator::expire(unsigned int curIndex) {
302 while (!fActiveIntvls.empty() && fActiveIntvls.peekHead()->end() < curIndex) {
303 Interval* intvl = fActiveIntvls.popHead();
304 SkASSERT(!intvl->next());
305
306 Register* r = intvl->getRegister();
307 if (r && r->isRecyclable(*fDContext->priv().caps(), intvl->proxy(), intvl->uses())) {
308 #if GR_ALLOCATION_SPEW
309 SkDebugf("putting register %d back into pool\n", r->uniqueID());
310 #endif
311 // TODO: fix this insertion so we get a more LRU-ish behavior
312 fFreePool.insert(r->scratchKey(), r);
313 }
314 fFinishedIntvls.insertByIncreasingStart(intvl);
315 }
316 }
317
planAssignment()318 bool GrResourceAllocator::planAssignment() {
319 fIntvlHash.reset(); // we don't need the interval hash anymore
320
321 SkASSERT(!fPlanned && !fAssigned);
322 SkDEBUGCODE(fPlanned = true;)
323
324 #if GR_ALLOCATION_SPEW
325 SkDebugf("assigning %d ops\n", fNumOps);
326 this->dumpIntervals();
327 #endif
328
329 auto resourceProvider = fDContext->priv().resourceProvider();
330 while (Interval* cur = fIntvlList.popHead()) {
331 this->expire(cur->start());
332 fActiveIntvls.insertByIncreasingEnd(cur);
333
334 // Already-instantiated proxies and lazy proxies don't use registers.
335 if (cur->proxy()->isInstantiated()) {
336 continue;
337 }
338
339 // Instantiate fully-lazy proxies immediately. Ignore other lazy proxies at this stage.
340 if (cur->proxy()->isLazy()) {
341 if (cur->proxy()->isFullyLazy()) {
342 fFailedInstantiation = !cur->proxy()->priv().doLazyInstantiation(resourceProvider);
343 if (fFailedInstantiation) {
344 break;
345 }
346 }
347 continue;
348 }
349
350 Register* r = this->findOrCreateRegisterFor(cur->proxy());
351 #if GR_ALLOCATION_SPEW
352 SkDebugf("Assigning register %d to %d\n",
353 r->uniqueID(),
354 cur->proxy()->uniqueID().asUInt());
355 #endif
356 SkASSERT(!cur->proxy()->peekSurface());
357 cur->setRegister(r);
358 }
359
360 // expire all the remaining intervals to drain the active interval list
361 this->expire(std::numeric_limits<unsigned int>::max());
362 return !fFailedInstantiation;
363 }
364
makeBudgetHeadroom()365 bool GrResourceAllocator::makeBudgetHeadroom() {
366 SkASSERT(fPlanned);
367 SkASSERT(!fFailedInstantiation);
368 size_t additionalBytesNeeded = 0;
369 for (Interval* cur = fFinishedIntvls.peekHead(); cur; cur = cur->next()) {
370 GrSurfaceProxy* proxy = cur->proxy();
371 if (SkBudgeted::kNo == proxy->isBudgeted() || proxy->isInstantiated()) {
372 continue;
373 }
374
375 // N.B Fully-lazy proxies were already instantiated in planAssignment
376 if (proxy->isLazy()) {
377 additionalBytesNeeded += proxy->gpuMemorySize();
378 } else {
379 Register* r = cur->getRegister();
380 SkASSERT(r);
381 if (!r->accountedForInBudget() && !r->existingSurface()) {
382 additionalBytesNeeded += proxy->gpuMemorySize();
383 }
384 r->setAccountedForInBudget();
385 }
386 }
387 return fDContext->priv().getResourceCache()->purgeToMakeHeadroom(additionalBytesNeeded);
388 }
389
reset()390 void GrResourceAllocator::reset() {
391 // NOTE: We do not reset the failedInstantiation flag because we currently do not attempt
392 // to recover from failed instantiations. The user is responsible for checking this flag and
393 // bailing early.
394 SkDEBUGCODE(fPlanned = false;)
395 SkDEBUGCODE(fAssigned = false;)
396 SkASSERT(fActiveIntvls.empty());
397 fFinishedIntvls = IntervalList();
398 fIntvlList = IntervalList();
399 fIntvlHash.reset();
400 fUniqueKeyRegisters.reset();
401 fFreePool.reset();
402 fInternalAllocator.reset();
403 }
404
assign()405 bool GrResourceAllocator::assign() {
406 if (fFailedInstantiation) {
407 return false;
408 }
409 SkASSERT(fPlanned && !fAssigned);
410 SkDEBUGCODE(fAssigned = true;)
411 auto resourceProvider = fDContext->priv().resourceProvider();
412 while (Interval* cur = fFinishedIntvls.popHead()) {
413 if (fFailedInstantiation) {
414 break;
415 }
416 if (cur->proxy()->isInstantiated()) {
417 continue;
418 }
419 if (cur->proxy()->isLazy()) {
420 fFailedInstantiation = !cur->proxy()->priv().doLazyInstantiation(resourceProvider);
421 continue;
422 }
423 Register* r = cur->getRegister();
424 SkASSERT(r);
425 fFailedInstantiation = !r->instantiateSurface(cur->proxy(), resourceProvider);
426 }
427 return !fFailedInstantiation;
428 }
429
430 #if GR_ALLOCATION_SPEW
dumpIntervals()431 void GrResourceAllocator::dumpIntervals() {
432 // Print all the intervals while computing their range
433 SkDebugf("------------------------------------------------------------\n");
434 unsigned int min = std::numeric_limits<unsigned int>::max();
435 unsigned int max = 0;
436 for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
437 SkDebugf("{ %3d,%3d }: [%2d, %2d] - refProxys:%d surfaceRefs:%d\n",
438 cur->proxy()->uniqueID().asUInt(),
439 cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1,
440 cur->start(),
441 cur->end(),
442 cur->proxy()->priv().getProxyRefCnt(),
443 cur->proxy()->testingOnly_getBackingRefCnt());
444 min = std::min(min, cur->start());
445 max = std::max(max, cur->end());
446 }
447
448 // Draw a graph of the useage intervals
449 for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
450 SkDebugf("{ %3d,%3d }: ",
451 cur->proxy()->uniqueID().asUInt(),
452 cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1);
453 for (unsigned int i = min; i <= max; ++i) {
454 if (i >= cur->start() && i <= cur->end()) {
455 SkDebugf("x");
456 } else {
457 SkDebugf(" ");
458 }
459 }
460 SkDebugf("\n");
461 }
462 }
463 #endif
464