1 /*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrResourceAllocator.h"
9
10 #include "GrDeinstantiateProxyTracker.h"
11 #include "GrGpuResourcePriv.h"
12 #include "GrOpList.h"
13 #include "GrRenderTargetProxy.h"
14 #include "GrResourceCache.h"
15 #include "GrResourceProvider.h"
16 #include "GrSurfacePriv.h"
17 #include "GrSurfaceProxy.h"
18 #include "GrSurfaceProxyPriv.h"
19 #include "GrTextureProxy.h"
20
21 #if GR_TRACK_INTERVAL_CREATION
22 #include <atomic>
23
CreateUniqueID()24 uint32_t GrResourceAllocator::Interval::CreateUniqueID() {
25 static std::atomic<uint32_t> nextID{1};
26 uint32_t id;
27 do {
28 id = nextID++;
29 } while (id == SK_InvalidUniqueID);
30 return id;
31 }
32 #endif
33
assign(sk_sp<GrSurface> s)34 void GrResourceAllocator::Interval::assign(sk_sp<GrSurface> s) {
35 SkASSERT(!fAssignedSurface);
36 fAssignedSurface = s;
37 fProxy->priv().assign(std::move(s));
38 }
39
40
markEndOfOpList(int opListIndex)41 void GrResourceAllocator::markEndOfOpList(int opListIndex) {
42 SkASSERT(!fAssigned); // We shouldn't be adding any opLists after (or during) assignment
43
44 SkASSERT(fEndOfOpListOpIndices.count() == opListIndex);
45 if (!fEndOfOpListOpIndices.empty()) {
46 SkASSERT(fEndOfOpListOpIndices.back() < this->curOp());
47 }
48
49 fEndOfOpListOpIndices.push_back(this->curOp()); // This is the first op index of the next opList
50 }
51
~GrResourceAllocator()52 GrResourceAllocator::~GrResourceAllocator() {
53 SkASSERT(fIntvlList.empty());
54 SkASSERT(fActiveIntvls.empty());
55 SkASSERT(!fIntvlHash.count());
56 }
57
58 void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end
59 SkDEBUGCODE(, bool isDirectDstRead)) {
60 SkASSERT(start <= end);
61 SkASSERT(!fAssigned); // We shouldn't be adding any intervals after (or during) assignment
62
63 // If a proxy is read only it must refer to a texture with specific content that cannot be
64 // recycled. We don't need to assign a texture to it and no other proxy can be instantiated
65 // with the same texture.
66 if (proxy->readOnly()) {
67 // Since we aren't going to add an interval we won't revisit this proxy in assign(). So it
68 // must already be instantiated or it must be a lazy proxy that we will instantiate below.
69 SkASSERT(proxy->isInstantiated() ||
70 GrSurfaceProxy::LazyState::kNot != proxy->lazyInstantiationState());
71 } else {
72 if (Interval* intvl = fIntvlHash.find(proxy->uniqueID().asUInt())) {
73 // Revise the interval for an existing use
74 #ifdef SK_DEBUG
75 if (0 == start && 0 == end) {
76 // This interval is for the initial upload to a deferred proxy. Due to the vagaries
77 // of how deferred proxies are collected they can appear as uploads multiple times
78 // in a single opLists' list and as uploads in several opLists.
79 SkASSERT(0 == intvl->start());
80 } else if (isDirectDstRead) {
81 // Direct reads from the render target itself should occur w/in the existing
82 // interval
83 SkASSERT(intvl->start() <= start && intvl->end() >= end);
84 } else {
85 SkASSERT(intvl->end() <= start && intvl->end() <= end);
86 }
87 #endif
88 intvl->extendEnd(end);
89 return;
90 }
91 Interval* newIntvl;
92 if (fFreeIntervalList) {
93 newIntvl = fFreeIntervalList;
94 fFreeIntervalList = newIntvl->next();
95 newIntvl->setNext(nullptr);
96 newIntvl->resetTo(proxy, start, end);
97 } else {
98 newIntvl = fIntervalAllocator.make<Interval>(proxy, start, end);
99 }
100
101 fIntvlList.insertByIncreasingStart(newIntvl);
102 fIntvlHash.add(newIntvl);
103 }
104
105 // Because readOnly proxies do not get a usage interval we must instantiate them here (since it
106 // won't occur in GrResourceAllocator::assign)
107 if (proxy->readOnly() || !fResourceProvider->explicitlyAllocateGPUResources()) {
108 // FIXME: remove this once we can do the lazy instantiation from assign instead.
109 if (GrSurfaceProxy::LazyState::kNot != proxy->lazyInstantiationState()) {
110 if (proxy->priv().doLazyInstantiation(fResourceProvider)) {
111 if (proxy->priv().lazyInstantiationType() ==
112 GrSurfaceProxy::LazyInstantiationType::kDeinstantiate) {
113 fDeinstantiateTracker->addProxy(proxy);
114 }
115 }
116 }
117 }
118 }
119
popHead()120 GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() {
121 SkDEBUGCODE(this->validate());
122
123 Interval* temp = fHead;
124 if (temp) {
125 fHead = temp->next();
126 if (!fHead) {
127 fTail = nullptr;
128 }
129 temp->setNext(nullptr);
130 }
131
132 SkDEBUGCODE(this->validate());
133 return temp;
134 }
135
136 // TODO: fuse this with insertByIncreasingEnd
insertByIncreasingStart(Interval * intvl)137 void GrResourceAllocator::IntervalList::insertByIncreasingStart(Interval* intvl) {
138 SkDEBUGCODE(this->validate());
139 SkASSERT(!intvl->next());
140
141 if (!fHead) {
142 // 14%
143 fHead = fTail = intvl;
144 } else if (intvl->start() <= fHead->start()) {
145 // 3%
146 intvl->setNext(fHead);
147 fHead = intvl;
148 } else if (fTail->start() <= intvl->start()) {
149 // 83%
150 fTail->setNext(intvl);
151 fTail = intvl;
152 } else {
153 // almost never
154 Interval* prev = fHead;
155 Interval* next = prev->next();
156 for (; intvl->start() > next->start(); prev = next, next = next->next()) {
157 }
158
159 SkASSERT(next);
160 intvl->setNext(next);
161 prev->setNext(intvl);
162 }
163
164 SkDEBUGCODE(this->validate());
165 }
166
167 // TODO: fuse this with insertByIncreasingStart
insertByIncreasingEnd(Interval * intvl)168 void GrResourceAllocator::IntervalList::insertByIncreasingEnd(Interval* intvl) {
169 SkDEBUGCODE(this->validate());
170 SkASSERT(!intvl->next());
171
172 if (!fHead) {
173 // 14%
174 fHead = fTail = intvl;
175 } else if (intvl->end() <= fHead->end()) {
176 // 64%
177 intvl->setNext(fHead);
178 fHead = intvl;
179 } else if (fTail->end() <= intvl->end()) {
180 // 3%
181 fTail->setNext(intvl);
182 fTail = intvl;
183 } else {
184 // 19% but 81% of those land right after the list's head
185 Interval* prev = fHead;
186 Interval* next = prev->next();
187 for (; intvl->end() > next->end(); prev = next, next = next->next()) {
188 }
189
190 SkASSERT(next);
191 intvl->setNext(next);
192 prev->setNext(intvl);
193 }
194
195 SkDEBUGCODE(this->validate());
196 }
197
198 #ifdef SK_DEBUG
validate() const199 void GrResourceAllocator::IntervalList::validate() const {
200 SkASSERT(SkToBool(fHead) == SkToBool(fTail));
201
202 Interval* prev = nullptr;
203 for (Interval* cur = fHead; cur; prev = cur, cur = cur->next()) {
204 }
205
206 SkASSERT(fTail == prev);
207 }
208 #endif
209
detachAll()210 GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::detachAll() {
211 Interval* tmp = fHead;
212 fHead = nullptr;
213 fTail = nullptr;
214 return tmp;
215 }
216
217 // 'surface' can be reused. Add it back to the free pool.
recycleSurface(sk_sp<GrSurface> surface)218 void GrResourceAllocator::recycleSurface(sk_sp<GrSurface> surface) {
219 const GrScratchKey &key = surface->resourcePriv().getScratchKey();
220
221 if (!key.isValid()) {
222 return; // can't do it w/o a valid scratch key
223 }
224
225 if (surface->getUniqueKey().isValid()) {
226 // If the surface has a unique key we throw it back into the resource cache.
227 // If things get really tight 'findSurfaceFor' may pull it back out but there is
228 // no need to have it in tight rotation.
229 return;
230 }
231
232 #if GR_ALLOCATION_SPEW
233 SkDebugf("putting surface %d back into pool\n", surface->uniqueID().asUInt());
234 #endif
235 // TODO: fix this insertion so we get a more LRU-ish behavior
236 fFreePool.insert(key, surface.release());
237 }
238
239 // First try to reuse one of the recently allocated/used GrSurfaces in the free pool.
240 // If we can't find a useable one, create a new one.
findSurfaceFor(const GrSurfaceProxy * proxy,bool needsStencil)241 sk_sp<GrSurface> GrResourceAllocator::findSurfaceFor(const GrSurfaceProxy* proxy,
242 bool needsStencil) {
243
244 if (proxy->asTextureProxy() && proxy->asTextureProxy()->getUniqueKey().isValid()) {
245 // First try to reattach to a cached version if the proxy is uniquely keyed
246 sk_sp<GrSurface> surface = fResourceProvider->findByUniqueKey<GrSurface>(
247 proxy->asTextureProxy()->getUniqueKey());
248 if (surface) {
249 if (!GrSurfaceProxyPriv::AttachStencilIfNeeded(fResourceProvider, surface.get(),
250 needsStencil)) {
251 return nullptr;
252 }
253
254 return surface;
255 }
256 }
257
258 // First look in the free pool
259 GrScratchKey key;
260
261 proxy->priv().computeScratchKey(&key);
262
263 auto filter = [&] (const GrSurface* s) {
264 return !proxy->priv().requiresNoPendingIO() || !s->surfacePriv().hasPendingIO();
265 };
266 sk_sp<GrSurface> surface(fFreePool.findAndRemove(key, filter));
267 if (surface) {
268 if (SkBudgeted::kYes == proxy->isBudgeted() &&
269 GrBudgetedType::kBudgeted != surface->resourcePriv().budgetedType()) {
270 // This gets the job done but isn't quite correct. It would be better to try to
271 // match budgeted proxies w/ budgeted surfaces and unbudgeted w/ unbudgeted.
272 surface->resourcePriv().makeBudgeted();
273 }
274
275 if (!GrSurfaceProxyPriv::AttachStencilIfNeeded(fResourceProvider, surface.get(),
276 needsStencil)) {
277 return nullptr;
278 }
279 SkASSERT(!surface->getUniqueKey().isValid());
280 return surface;
281 }
282
283 // Failing that, try to grab a new one from the resource cache
284 return proxy->priv().createSurface(fResourceProvider);
285 }
286
287 // Remove any intervals that end before the current index. Return their GrSurfaces
288 // to the free pool.
expire(unsigned int curIndex)289 void GrResourceAllocator::expire(unsigned int curIndex) {
290 while (!fActiveIntvls.empty() && fActiveIntvls.peekHead()->end() < curIndex) {
291 Interval* temp = fActiveIntvls.popHead();
292 SkASSERT(!temp->next());
293
294 if (temp->wasAssignedSurface()) {
295 sk_sp<GrSurface> surface = temp->detachSurface();
296
297 // If the proxy has an actual live ref on it that means someone wants to retain its
298 // contents. In that case we cannot recycle it (until the external holder lets
299 // go of it).
300 if (0 == temp->proxy()->priv().getProxyRefCnt()) {
301 this->recycleSurface(std::move(surface));
302 }
303 }
304
305 // Add temp to the free interval list so it can be reused
306 SkASSERT(!temp->wasAssignedSurface()); // it had better not have a ref on a surface
307 temp->setNext(fFreeIntervalList);
308 fFreeIntervalList = temp;
309 }
310 }
311
assign(int * startIndex,int * stopIndex,AssignError * outError)312 bool GrResourceAllocator::assign(int* startIndex, int* stopIndex, AssignError* outError) {
313 SkASSERT(outError);
314 *outError = AssignError::kNoError;
315
316 fIntvlHash.reset(); // we don't need the interval hash anymore
317 if (fIntvlList.empty()) {
318 return false; // nothing to render
319 }
320
321 *startIndex = fCurOpListIndex;
322 *stopIndex = fEndOfOpListOpIndices.count();
323
324 if (!fResourceProvider->explicitlyAllocateGPUResources()) {
325 fIntvlList.detachAll(); // arena allocator will clean these up for us
326 return true;
327 }
328
329 SkDEBUGCODE(fAssigned = true;)
330
331 #if GR_ALLOCATION_SPEW
332 this->dumpIntervals();
333 #endif
334 while (Interval* cur = fIntvlList.popHead()) {
335 if (fEndOfOpListOpIndices[fCurOpListIndex] < cur->start()) {
336 fCurOpListIndex++;
337 }
338
339 this->expire(cur->start());
340
341 bool needsStencil = cur->proxy()->asRenderTargetProxy()
342 ? cur->proxy()->asRenderTargetProxy()->needsStencil()
343 : false;
344
345 if (cur->proxy()->isInstantiated()) {
346 if (!GrSurfaceProxyPriv::AttachStencilIfNeeded(
347 fResourceProvider, cur->proxy()->peekSurface(), needsStencil)) {
348 *outError = AssignError::kFailedProxyInstantiation;
349 }
350
351 fActiveIntvls.insertByIncreasingEnd(cur);
352
353 if (fResourceProvider->overBudget()) {
354 // Only force intermediate draws on opList boundaries
355 if (!fIntvlList.empty() &&
356 fEndOfOpListOpIndices[fCurOpListIndex] < fIntvlList.peekHead()->start()) {
357 *stopIndex = fCurOpListIndex+1;
358
359 // This is interrupting the allocation of resources for this flush. We need to
360 // proactively clear the active interval list of any intervals that aren't
361 // guaranteed to survive the partial flush lest they become zombies (i.e.,
362 // holding a deleted surface proxy).
363 if (const Interval* tmp = fIntvlList.peekHead()) {
364 this->expire(tmp->start());
365 } else {
366 this->expire(std::numeric_limits<unsigned int>::max());
367 }
368 return true;
369 }
370 }
371
372 continue;
373 }
374
375 if (GrSurfaceProxy::LazyState::kNot != cur->proxy()->lazyInstantiationState()) {
376 if (!cur->proxy()->priv().doLazyInstantiation(fResourceProvider)) {
377 *outError = AssignError::kFailedProxyInstantiation;
378 } else {
379 if (GrSurfaceProxy::LazyInstantiationType::kDeinstantiate ==
380 cur->proxy()->priv().lazyInstantiationType()) {
381 fDeinstantiateTracker->addProxy(cur->proxy());
382 }
383 }
384 } else if (sk_sp<GrSurface> surface = this->findSurfaceFor(cur->proxy(), needsStencil)) {
385 // TODO: make getUniqueKey virtual on GrSurfaceProxy
386 GrTextureProxy* texProxy = cur->proxy()->asTextureProxy();
387
388 if (texProxy && texProxy->getUniqueKey().isValid()) {
389 if (!surface->getUniqueKey().isValid()) {
390 fResourceProvider->assignUniqueKeyToResource(texProxy->getUniqueKey(),
391 surface.get());
392 }
393 SkASSERT(surface->getUniqueKey() == texProxy->getUniqueKey());
394 }
395
396 #if GR_ALLOCATION_SPEW
397 SkDebugf("Assigning %d to %d\n",
398 surface->uniqueID().asUInt(),
399 cur->proxy()->uniqueID().asUInt());
400 #endif
401
402 cur->assign(std::move(surface));
403 } else {
404 SkASSERT(!cur->proxy()->isInstantiated());
405 *outError = AssignError::kFailedProxyInstantiation;
406 }
407
408 fActiveIntvls.insertByIncreasingEnd(cur);
409
410 if (fResourceProvider->overBudget()) {
411 // Only force intermediate draws on opList boundaries
412 if (!fIntvlList.empty() &&
413 fEndOfOpListOpIndices[fCurOpListIndex] < fIntvlList.peekHead()->start()) {
414 *stopIndex = fCurOpListIndex+1;
415
416 // This is interrupting the allocation of resources for this flush. We need to
417 // proactively clear the active interval list of any intervals that aren't
418 // guaranteed to survive the partial flush lest they become zombies (i.e.,
419 // holding a deleted surface proxy).
420 if (const Interval* tmp = fIntvlList.peekHead()) {
421 this->expire(tmp->start());
422 } else {
423 this->expire(std::numeric_limits<unsigned int>::max());
424 }
425 return true;
426 }
427 }
428 }
429
430 // expire all the remaining intervals to drain the active interval list
431 this->expire(std::numeric_limits<unsigned int>::max());
432 return true;
433 }
434
435 #if GR_ALLOCATION_SPEW
dumpIntervals()436 void GrResourceAllocator::dumpIntervals() {
437
438 // Print all the intervals while computing their range
439 unsigned int min = fNumOps+1;
440 unsigned int max = 0;
441 for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
442 SkDebugf("{ %3d,%3d }: [%2d, %2d] - proxyRefs:%d surfaceRefs:%d R:%d W:%d\n",
443 cur->proxy()->uniqueID().asUInt(),
444 cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1,
445 cur->start(),
446 cur->end(),
447 cur->proxy()->priv().getProxyRefCnt(),
448 cur->proxy()->getBackingRefCnt_TestOnly(),
449 cur->proxy()->getPendingReadCnt_TestOnly(),
450 cur->proxy()->getPendingWriteCnt_TestOnly());
451 min = SkTMin(min, cur->start());
452 max = SkTMax(max, cur->end());
453 }
454
455 // Draw a graph of the useage intervals
456 for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
457 SkDebugf("{ %3d,%3d }: ",
458 cur->proxy()->uniqueID().asUInt(),
459 cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1);
460 for (unsigned int i = min; i <= max; ++i) {
461 if (i >= cur->start() && i <= cur->end()) {
462 SkDebugf("x");
463 } else {
464 SkDebugf(" ");
465 }
466 }
467 SkDebugf("\n");
468 }
469 }
470 #endif
471