1
2 /*
3 * Copyright 2014 Google Inc.
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8
9
10 #include "GrResourceCache.h"
11 #include "GrGpuResourceCacheAccess.h"
12 #include "GrTracing.h"
13 #include "SkChecksum.h"
14 #include "SkGr.h"
15 #include "SkMessageBus.h"
16 #include "SkTSort.h"
17
18 DECLARE_SKMESSAGEBUS_MESSAGE(GrUniqueKeyInvalidatedMessage);
19
20 //////////////////////////////////////////////////////////////////////////////
21
GenerateResourceType()22 GrScratchKey::ResourceType GrScratchKey::GenerateResourceType() {
23 static int32_t gType = INHERITED::kInvalidDomain + 1;
24
25 int32_t type = sk_atomic_inc(&gType);
26 if (type > SK_MaxU16) {
27 SkFAIL("Too many Resource Types");
28 }
29
30 return static_cast<ResourceType>(type);
31 }
32
GenerateDomain()33 GrUniqueKey::Domain GrUniqueKey::GenerateDomain() {
34 static int32_t gDomain = INHERITED::kInvalidDomain + 1;
35
36 int32_t domain = sk_atomic_inc(&gDomain);
37 if (domain > SK_MaxU16) {
38 SkFAIL("Too many GrUniqueKey Domains");
39 }
40
41 return static_cast<Domain>(domain);
42 }
43
GrResourceKeyHash(const uint32_t * data,size_t size)44 uint32_t GrResourceKeyHash(const uint32_t* data, size_t size) {
45 return SkChecksum::Murmur3(data, size);
46 }
47
48 //////////////////////////////////////////////////////////////////////////////
49
50 class GrResourceCache::AutoValidate : ::SkNoncopyable {
51 public:
AutoValidate(GrResourceCache * cache)52 AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); }
~AutoValidate()53 ~AutoValidate() { fCache->validate(); }
54 private:
55 GrResourceCache* fCache;
56 };
57
58 //////////////////////////////////////////////////////////////////////////////
59
60
GrResourceCache(const GrCaps * caps)61 GrResourceCache::GrResourceCache(const GrCaps* caps)
62 : fTimestamp(0)
63 , fMaxCount(kDefaultMaxCount)
64 , fMaxBytes(kDefaultMaxSize)
65 , fMaxUnusedFlushes(kDefaultMaxUnusedFlushes)
66 #if GR_CACHE_STATS
67 , fHighWaterCount(0)
68 , fHighWaterBytes(0)
69 , fBudgetedHighWaterCount(0)
70 , fBudgetedHighWaterBytes(0)
71 #endif
72 , fBytes(0)
73 , fBudgetedCount(0)
74 , fBudgetedBytes(0)
75 , fOverBudgetCB(nullptr)
76 , fOverBudgetData(nullptr)
77 , fFlushTimestamps(nullptr)
78 , fLastFlushTimestampIndex(0)
79 , fPreferVRAMUseOverFlushes(caps->preferVRAMUseOverFlushes()) {
80 SkDEBUGCODE(fCount = 0;)
81 SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr;)
82 this->resetFlushTimestamps();
83 }
84
~GrResourceCache()85 GrResourceCache::~GrResourceCache() {
86 this->releaseAll();
87 delete[] fFlushTimestamps;
88 }
89
setLimits(int count,size_t bytes,int maxUnusedFlushes)90 void GrResourceCache::setLimits(int count, size_t bytes, int maxUnusedFlushes) {
91 fMaxCount = count;
92 fMaxBytes = bytes;
93 fMaxUnusedFlushes = maxUnusedFlushes;
94 this->resetFlushTimestamps();
95 this->purgeAsNeeded();
96 }
97
resetFlushTimestamps()98 void GrResourceCache::resetFlushTimestamps() {
99 delete[] fFlushTimestamps;
100
101 // We assume this number is a power of two when wrapping indices into the timestamp array.
102 fMaxUnusedFlushes = SkNextPow2(fMaxUnusedFlushes);
103
104 // Since our implementation is to store the timestamps of the last fMaxUnusedFlushes flush calls
105 // we just turn the feature off if that array would be large.
106 static const int kMaxSupportedTimestampHistory = 128;
107
108 if (fMaxUnusedFlushes > kMaxSupportedTimestampHistory) {
109 fFlushTimestamps = nullptr;
110 return;
111 }
112
113 fFlushTimestamps = new uint32_t[fMaxUnusedFlushes];
114 fLastFlushTimestampIndex = 0;
115 // Set all the historical flush timestamps to initially be at the beginning of time (timestamp
116 // 0).
117 sk_bzero(fFlushTimestamps, fMaxUnusedFlushes * sizeof(uint32_t));
118 }
119
insertResource(GrGpuResource * resource)120 void GrResourceCache::insertResource(GrGpuResource* resource) {
121 SkASSERT(resource);
122 SkASSERT(!this->isInCache(resource));
123 SkASSERT(!resource->wasDestroyed());
124 SkASSERT(!resource->isPurgeable());
125
126 // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
127 // up iterating over all the resources that already have timestamps.
128 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
129
130 this->addToNonpurgeableArray(resource);
131
132 size_t size = resource->gpuMemorySize();
133 SkDEBUGCODE(++fCount;)
134 fBytes += size;
135 #if GR_CACHE_STATS
136 fHighWaterCount = SkTMax(this->getResourceCount(), fHighWaterCount);
137 fHighWaterBytes = SkTMax(fBytes, fHighWaterBytes);
138 #endif
139 if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
140 ++fBudgetedCount;
141 fBudgetedBytes += size;
142 TRACE_COUNTER2(TRACE_DISABLED_BY_DEFAULT("skia.gpu.cache"), "skia budget", "used",
143 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
144 #if GR_CACHE_STATS
145 fBudgetedHighWaterCount = SkTMax(fBudgetedCount, fBudgetedHighWaterCount);
146 fBudgetedHighWaterBytes = SkTMax(fBudgetedBytes, fBudgetedHighWaterBytes);
147 #endif
148 }
149 if (resource->resourcePriv().getScratchKey().isValid()) {
150 SkASSERT(!resource->cacheAccess().isExternal());
151 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
152 }
153
154 this->purgeAsNeeded();
155 }
156
removeResource(GrGpuResource * resource)157 void GrResourceCache::removeResource(GrGpuResource* resource) {
158 this->validate();
159 SkASSERT(this->isInCache(resource));
160
161 if (resource->isPurgeable()) {
162 fPurgeableQueue.remove(resource);
163 } else {
164 this->removeFromNonpurgeableArray(resource);
165 }
166
167 size_t size = resource->gpuMemorySize();
168 SkDEBUGCODE(--fCount;)
169 fBytes -= size;
170 if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
171 --fBudgetedCount;
172 fBudgetedBytes -= size;
173 TRACE_COUNTER2(TRACE_DISABLED_BY_DEFAULT("skia.gpu.cache"), "skia budget", "used",
174 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
175 }
176
177 if (resource->resourcePriv().getScratchKey().isValid()) {
178 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
179 }
180 if (resource->getUniqueKey().isValid()) {
181 fUniqueHash.remove(resource->getUniqueKey());
182 }
183 this->validate();
184 }
185
abandonAll()186 void GrResourceCache::abandonAll() {
187 AutoValidate av(this);
188
189 while (fNonpurgeableResources.count()) {
190 GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
191 SkASSERT(!back->wasDestroyed());
192 back->cacheAccess().abandon();
193 }
194
195 while (fPurgeableQueue.count()) {
196 GrGpuResource* top = fPurgeableQueue.peek();
197 SkASSERT(!top->wasDestroyed());
198 top->cacheAccess().abandon();
199 }
200
201 SkASSERT(!fScratchMap.count());
202 SkASSERT(!fUniqueHash.count());
203 SkASSERT(!fCount);
204 SkASSERT(!this->getResourceCount());
205 SkASSERT(!fBytes);
206 SkASSERT(!fBudgetedCount);
207 SkASSERT(!fBudgetedBytes);
208 }
209
releaseAll()210 void GrResourceCache::releaseAll() {
211 AutoValidate av(this);
212
213 while(fNonpurgeableResources.count()) {
214 GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
215 SkASSERT(!back->wasDestroyed());
216 back->cacheAccess().release();
217 }
218
219 while (fPurgeableQueue.count()) {
220 GrGpuResource* top = fPurgeableQueue.peek();
221 SkASSERT(!top->wasDestroyed());
222 top->cacheAccess().release();
223 }
224
225 SkASSERT(!fScratchMap.count());
226 SkASSERT(!fUniqueHash.count());
227 SkASSERT(!fCount);
228 SkASSERT(!this->getResourceCount());
229 SkASSERT(!fBytes);
230 SkASSERT(!fBudgetedCount);
231 SkASSERT(!fBudgetedBytes);
232 }
233
234 class GrResourceCache::AvailableForScratchUse {
235 public:
AvailableForScratchUse(bool rejectPendingIO)236 AvailableForScratchUse(bool rejectPendingIO) : fRejectPendingIO(rejectPendingIO) { }
237
operator ()(const GrGpuResource * resource) const238 bool operator()(const GrGpuResource* resource) const {
239 if (resource->internalHasRef() || !resource->cacheAccess().isScratch()) {
240 return false;
241 }
242 return !fRejectPendingIO || !resource->internalHasPendingIO();
243 }
244
245 private:
246 bool fRejectPendingIO;
247 };
248
findAndRefScratchResource(const GrScratchKey & scratchKey,size_t resourceSize,uint32_t flags)249 GrGpuResource* GrResourceCache::findAndRefScratchResource(const GrScratchKey& scratchKey,
250 size_t resourceSize,
251 uint32_t flags) {
252 SkASSERT(scratchKey.isValid());
253
254 GrGpuResource* resource;
255 if (flags & (kPreferNoPendingIO_ScratchFlag | kRequireNoPendingIO_ScratchFlag)) {
256 resource = fScratchMap.find(scratchKey, AvailableForScratchUse(true));
257 if (resource) {
258 this->refAndMakeResourceMRU(resource);
259 this->validate();
260 return resource;
261 } else if (flags & kRequireNoPendingIO_ScratchFlag) {
262 return nullptr;
263 }
264 // We would prefer to consume more available VRAM rather than flushing
265 // immediately, but on ANGLE this can lead to starving of the GPU.
266 if (fPreferVRAMUseOverFlushes && this->wouldFit(resourceSize)) {
267 // kPrefer is specified, we didn't find a resource without pending io,
268 // but there is still space in our budget for the resource so force
269 // the caller to allocate a new resource.
270 return nullptr;
271 }
272 }
273 resource = fScratchMap.find(scratchKey, AvailableForScratchUse(false));
274 if (resource) {
275 this->refAndMakeResourceMRU(resource);
276 this->validate();
277 }
278 return resource;
279 }
280
willRemoveScratchKey(const GrGpuResource * resource)281 void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) {
282 SkASSERT(resource->resourcePriv().getScratchKey().isValid());
283 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
284 }
285
removeUniqueKey(GrGpuResource * resource)286 void GrResourceCache::removeUniqueKey(GrGpuResource* resource) {
287 // Someone has a ref to this resource in order to have removed the key. When the ref count
288 // reaches zero we will get a ref cnt notification and figure out what to do with it.
289 if (resource->getUniqueKey().isValid()) {
290 SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
291 fUniqueHash.remove(resource->getUniqueKey());
292 }
293 resource->cacheAccess().removeUniqueKey();
294 this->validate();
295 }
296
changeUniqueKey(GrGpuResource * resource,const GrUniqueKey & newKey)297 void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const GrUniqueKey& newKey) {
298 SkASSERT(resource);
299 SkASSERT(this->isInCache(resource));
300
301 // Remove the entry for this resource if it already has a unique key.
302 if (resource->getUniqueKey().isValid()) {
303 SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
304 fUniqueHash.remove(resource->getUniqueKey());
305 SkASSERT(nullptr == fUniqueHash.find(resource->getUniqueKey()));
306 }
307
308 // If another resource has the new key, remove its key then install the key on this resource.
309 if (newKey.isValid()) {
310 if (GrGpuResource* old = fUniqueHash.find(newKey)) {
311 // If the old resource using the key is purgeable and is unreachable, then remove it.
312 if (!old->resourcePriv().getScratchKey().isValid() && old->isPurgeable()) {
313 // release may call validate() which will assert that resource is in fUniqueHash
314 // if it has a valid key. So in debug reset the key here before we assign it.
315 SkDEBUGCODE(resource->cacheAccess().removeUniqueKey();)
316 old->cacheAccess().release();
317 } else {
318 fUniqueHash.remove(newKey);
319 old->cacheAccess().removeUniqueKey();
320 }
321 }
322 SkASSERT(nullptr == fUniqueHash.find(newKey));
323 resource->cacheAccess().setUniqueKey(newKey);
324 fUniqueHash.add(resource);
325 } else {
326 resource->cacheAccess().removeUniqueKey();
327 }
328
329 this->validate();
330 }
331
refAndMakeResourceMRU(GrGpuResource * resource)332 void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) {
333 SkASSERT(resource);
334 SkASSERT(this->isInCache(resource));
335
336 if (resource->isPurgeable()) {
337 // It's about to become unpurgeable.
338 fPurgeableQueue.remove(resource);
339 this->addToNonpurgeableArray(resource);
340 }
341 resource->ref();
342
343 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
344 this->validate();
345 }
346
notifyCntReachedZero(GrGpuResource * resource,uint32_t flags)347 void GrResourceCache::notifyCntReachedZero(GrGpuResource* resource, uint32_t flags) {
348 SkASSERT(resource);
349 SkASSERT(!resource->wasDestroyed());
350 SkASSERT(flags);
351 SkASSERT(this->isInCache(resource));
352 // This resource should always be in the nonpurgeable array when this function is called. It
353 // will be moved to the queue if it is newly purgeable.
354 SkASSERT(fNonpurgeableResources[*resource->cacheAccess().accessCacheIndex()] == resource);
355
356 if (SkToBool(ResourceAccess::kRefCntReachedZero_RefNotificationFlag & flags)) {
357 #ifdef SK_DEBUG
358 // When the timestamp overflows validate() is called. validate() checks that resources in
359 // the nonpurgeable array are indeed not purgeable. However, the movement from the array to
360 // the purgeable queue happens just below in this function. So we mark it as an exception.
361 if (resource->isPurgeable()) {
362 fNewlyPurgeableResourceForValidation = resource;
363 }
364 #endif
365 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
366 SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr);
367 }
368
369 if (!SkToBool(ResourceAccess::kAllCntsReachedZero_RefNotificationFlag & flags)) {
370 SkASSERT(!resource->isPurgeable());
371 return;
372 }
373
374 SkASSERT(resource->isPurgeable());
375 this->removeFromNonpurgeableArray(resource);
376 fPurgeableQueue.insert(resource);
377
378 if (SkBudgeted::kNo == resource->resourcePriv().isBudgeted()) {
379 // Check whether this resource could still be used as a scratch resource.
380 if (!resource->cacheAccess().isExternal() &&
381 resource->resourcePriv().getScratchKey().isValid()) {
382 // We won't purge an existing resource to make room for this one.
383 if (fBudgetedCount < fMaxCount &&
384 fBudgetedBytes + resource->gpuMemorySize() <= fMaxBytes) {
385 resource->resourcePriv().makeBudgeted();
386 return;
387 }
388 }
389 } else {
390 // Purge the resource immediately if we're over budget
391 // Also purge if the resource has neither a valid scratch key nor a unique key.
392 bool noKey = !resource->resourcePriv().getScratchKey().isValid() &&
393 !resource->getUniqueKey().isValid();
394 if (!this->overBudget() && !noKey) {
395 return;
396 }
397 }
398
399 SkDEBUGCODE(int beforeCount = this->getResourceCount();)
400 resource->cacheAccess().release();
401 // We should at least free this resource, perhaps dependent resources as well.
402 SkASSERT(this->getResourceCount() < beforeCount);
403 this->validate();
404 }
405
didChangeGpuMemorySize(const GrGpuResource * resource,size_t oldSize)406 void GrResourceCache::didChangeGpuMemorySize(const GrGpuResource* resource, size_t oldSize) {
407 // SkASSERT(!fPurging); GrPathRange increases size during flush. :(
408 SkASSERT(resource);
409 SkASSERT(this->isInCache(resource));
410
411 ptrdiff_t delta = resource->gpuMemorySize() - oldSize;
412
413 fBytes += delta;
414 #if GR_CACHE_STATS
415 fHighWaterBytes = SkTMax(fBytes, fHighWaterBytes);
416 #endif
417 if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
418 fBudgetedBytes += delta;
419 TRACE_COUNTER2(TRACE_DISABLED_BY_DEFAULT("skia.gpu.cache"), "skia budget", "used",
420 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
421 #if GR_CACHE_STATS
422 fBudgetedHighWaterBytes = SkTMax(fBudgetedBytes, fBudgetedHighWaterBytes);
423 #endif
424 }
425
426 this->purgeAsNeeded();
427 this->validate();
428 }
429
didChangeBudgetStatus(GrGpuResource * resource)430 void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
431 SkASSERT(resource);
432 SkASSERT(this->isInCache(resource));
433
434 size_t size = resource->gpuMemorySize();
435
436 if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
437 ++fBudgetedCount;
438 fBudgetedBytes += size;
439 #if GR_CACHE_STATS
440 fBudgetedHighWaterBytes = SkTMax(fBudgetedBytes, fBudgetedHighWaterBytes);
441 fBudgetedHighWaterCount = SkTMax(fBudgetedCount, fBudgetedHighWaterCount);
442 #endif
443 this->purgeAsNeeded();
444 } else {
445 --fBudgetedCount;
446 fBudgetedBytes -= size;
447 }
448 TRACE_COUNTER2(TRACE_DISABLED_BY_DEFAULT("skia.gpu.cache"), "skia budget", "used",
449 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
450
451 this->validate();
452 }
453
purgeAsNeeded()454 void GrResourceCache::purgeAsNeeded() {
455 SkTArray<GrUniqueKeyInvalidatedMessage> invalidKeyMsgs;
456 fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs);
457 if (invalidKeyMsgs.count()) {
458 this->processInvalidUniqueKeys(invalidKeyMsgs);
459 }
460
461 if (fFlushTimestamps) {
462 // Assuming kNumFlushesToDeleteUnusedResource is a power of 2.
463 SkASSERT(SkIsPow2(fMaxUnusedFlushes));
464 int oldestFlushIndex = (fLastFlushTimestampIndex + 1) & (fMaxUnusedFlushes - 1);
465
466 uint32_t oldestAllowedTimestamp = fFlushTimestamps[oldestFlushIndex];
467 while (fPurgeableQueue.count()) {
468 uint32_t oldestResourceTimestamp = fPurgeableQueue.peek()->cacheAccess().timestamp();
469 if (oldestAllowedTimestamp < oldestResourceTimestamp) {
470 break;
471 }
472 GrGpuResource* resource = fPurgeableQueue.peek();
473 SkASSERT(resource->isPurgeable());
474 resource->cacheAccess().release();
475 }
476 }
477
478 bool stillOverbudget = this->overBudget();
479 while (stillOverbudget && fPurgeableQueue.count()) {
480 GrGpuResource* resource = fPurgeableQueue.peek();
481 SkASSERT(resource->isPurgeable());
482 resource->cacheAccess().release();
483 stillOverbudget = this->overBudget();
484 }
485
486 this->validate();
487
488 if (stillOverbudget) {
489 // Despite the purge we're still over budget. Call our over budget callback. If this frees
490 // any resources then we'll get notified and take appropriate action.
491 (*fOverBudgetCB)(fOverBudgetData);
492 this->validate();
493 }
494 }
495
purgeAllUnlocked()496 void GrResourceCache::purgeAllUnlocked() {
497 // We could disable maintaining the heap property here, but it would add a lot of complexity.
498 // Moreover, this is rarely called.
499 while (fPurgeableQueue.count()) {
500 GrGpuResource* resource = fPurgeableQueue.peek();
501 SkASSERT(resource->isPurgeable());
502 resource->cacheAccess().release();
503 }
504
505 this->validate();
506 }
507
processInvalidUniqueKeys(const SkTArray<GrUniqueKeyInvalidatedMessage> & msgs)508 void GrResourceCache::processInvalidUniqueKeys(
509 const SkTArray<GrUniqueKeyInvalidatedMessage>& msgs) {
510 for (int i = 0; i < msgs.count(); ++i) {
511 GrGpuResource* resource = this->findAndRefUniqueResource(msgs[i].key());
512 if (resource) {
513 resource->resourcePriv().removeUniqueKey();
514 resource->unref(); // If this resource is now purgeable, the cache will be notified.
515 }
516 }
517 }
518
addToNonpurgeableArray(GrGpuResource * resource)519 void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) {
520 int index = fNonpurgeableResources.count();
521 *fNonpurgeableResources.append() = resource;
522 *resource->cacheAccess().accessCacheIndex() = index;
523 }
524
removeFromNonpurgeableArray(GrGpuResource * resource)525 void GrResourceCache::removeFromNonpurgeableArray(GrGpuResource* resource) {
526 int* index = resource->cacheAccess().accessCacheIndex();
527 // Fill the whole we will create in the array with the tail object, adjust its index, and
528 // then pop the array
529 GrGpuResource* tail = *(fNonpurgeableResources.end() - 1);
530 SkASSERT(fNonpurgeableResources[*index] == resource);
531 fNonpurgeableResources[*index] = tail;
532 *tail->cacheAccess().accessCacheIndex() = *index;
533 fNonpurgeableResources.pop();
534 SkDEBUGCODE(*index = -1);
535 }
536
getNextTimestamp()537 uint32_t GrResourceCache::getNextTimestamp() {
538 // If we wrap then all the existing resources will appear older than any resources that get
539 // a timestamp after the wrap.
540 if (0 == fTimestamp) {
541 int count = this->getResourceCount();
542 if (count) {
543 // Reset all the timestamps. We sort the resources by timestamp and then assign
544 // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
545 // rare.
546 SkTDArray<GrGpuResource*> sortedPurgeableResources;
547 sortedPurgeableResources.setReserve(fPurgeableQueue.count());
548
549 while (fPurgeableQueue.count()) {
550 *sortedPurgeableResources.append() = fPurgeableQueue.peek();
551 fPurgeableQueue.pop();
552 }
553
554 struct Less {
555 bool operator()(GrGpuResource* a, GrGpuResource* b) {
556 return CompareTimestamp(a,b);
557 }
558 };
559 Less less;
560 SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end() - 1, less);
561
562 // Pick resources out of the purgeable and non-purgeable arrays based on lowest
563 // timestamp and assign new timestamps.
564 int currP = 0;
565 int currNP = 0;
566 while (currP < sortedPurgeableResources.count() &&
567 currNP < fNonpurgeableResources.count()) {
568 uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp();
569 uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp();
570 SkASSERT(tsP != tsNP);
571 if (tsP < tsNP) {
572 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
573 } else {
574 // Correct the index in the nonpurgeable array stored on the resource post-sort.
575 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
576 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
577 }
578 }
579
580 // The above loop ended when we hit the end of one array. Finish the other one.
581 while (currP < sortedPurgeableResources.count()) {
582 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
583 }
584 while (currNP < fNonpurgeableResources.count()) {
585 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
586 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
587 }
588
589 // Rebuild the queue.
590 for (int i = 0; i < sortedPurgeableResources.count(); ++i) {
591 fPurgeableQueue.insert(sortedPurgeableResources[i]);
592 }
593
594 this->validate();
595 SkASSERT(count == this->getResourceCount());
596
597 // count should be the next timestamp we return.
598 SkASSERT(fTimestamp == SkToU32(count));
599
600 // The historical timestamps of flushes are now invalid.
601 this->resetFlushTimestamps();
602 }
603 }
604 return fTimestamp++;
605 }
606
notifyFlushOccurred()607 void GrResourceCache::notifyFlushOccurred() {
608 if (fFlushTimestamps) {
609 SkASSERT(SkIsPow2(fMaxUnusedFlushes));
610 fLastFlushTimestampIndex = (fLastFlushTimestampIndex + 1) & (fMaxUnusedFlushes - 1);
611 // get the timestamp before accessing fFlushTimestamps because getNextTimestamp will
612 // reallocate fFlushTimestamps on timestamp overflow.
613 uint32_t timestamp = this->getNextTimestamp();
614 fFlushTimestamps[fLastFlushTimestampIndex] = timestamp;
615 this->purgeAsNeeded();
616 }
617 }
618
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const619 void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
620 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
621 fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
622 }
623 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
624 fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
625 }
626 }
627
628 #ifdef SK_DEBUG
validate() const629 void GrResourceCache::validate() const {
630 // Reduce the frequency of validations for large resource counts.
631 static SkRandom gRandom;
632 int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
633 if (~mask && (gRandom.nextU() & mask)) {
634 return;
635 }
636
637 struct Stats {
638 size_t fBytes;
639 int fBudgetedCount;
640 size_t fBudgetedBytes;
641 int fLocked;
642 int fScratch;
643 int fCouldBeScratch;
644 int fContent;
645 const ScratchMap* fScratchMap;
646 const UniqueHash* fUniqueHash;
647
648 Stats(const GrResourceCache* cache) {
649 memset(this, 0, sizeof(*this));
650 fScratchMap = &cache->fScratchMap;
651 fUniqueHash = &cache->fUniqueHash;
652 }
653
654 void update(GrGpuResource* resource) {
655 fBytes += resource->gpuMemorySize();
656
657 if (!resource->isPurgeable()) {
658 ++fLocked;
659 }
660
661 if (resource->cacheAccess().isScratch()) {
662 SkASSERT(!resource->getUniqueKey().isValid());
663 ++fScratch;
664 SkASSERT(fScratchMap->countForKey(resource->resourcePriv().getScratchKey()));
665 SkASSERT(!resource->cacheAccess().isExternal());
666 } else if (resource->resourcePriv().getScratchKey().isValid()) {
667 SkASSERT(SkBudgeted::kNo == resource->resourcePriv().isBudgeted() ||
668 resource->getUniqueKey().isValid());
669 ++fCouldBeScratch;
670 SkASSERT(fScratchMap->countForKey(resource->resourcePriv().getScratchKey()));
671 SkASSERT(!resource->cacheAccess().isExternal());
672 }
673 const GrUniqueKey& uniqueKey = resource->getUniqueKey();
674 if (uniqueKey.isValid()) {
675 ++fContent;
676 SkASSERT(fUniqueHash->find(uniqueKey) == resource);
677 SkASSERT(!resource->cacheAccess().isExternal());
678 SkASSERT(SkBudgeted::kYes == resource->resourcePriv().isBudgeted());
679 }
680
681 if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
682 ++fBudgetedCount;
683 fBudgetedBytes += resource->gpuMemorySize();
684 }
685 }
686 };
687
688 Stats stats(this);
689
690 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
691 SkASSERT(!fNonpurgeableResources[i]->isPurgeable() ||
692 fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]);
693 SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i);
694 SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
695 stats.update(fNonpurgeableResources[i]);
696 }
697 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
698 SkASSERT(fPurgeableQueue.at(i)->isPurgeable());
699 SkASSERT(*fPurgeableQueue.at(i)->cacheAccess().accessCacheIndex() == i);
700 SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
701 stats.update(fPurgeableQueue.at(i));
702 }
703
704 SkASSERT(fCount == this->getResourceCount());
705 SkASSERT(fBudgetedCount <= fCount);
706 SkASSERT(fBudgetedBytes <= fBytes);
707 SkASSERT(stats.fBytes == fBytes);
708 SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
709 SkASSERT(stats.fBudgetedCount == fBudgetedCount);
710 #if GR_CACHE_STATS
711 SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount);
712 SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes);
713 SkASSERT(fBytes <= fHighWaterBytes);
714 SkASSERT(fCount <= fHighWaterCount);
715 SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes);
716 SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount);
717 #endif
718 SkASSERT(stats.fContent == fUniqueHash.count());
719 SkASSERT(stats.fScratch + stats.fCouldBeScratch == fScratchMap.count());
720
721 // This assertion is not currently valid because we can be in recursive notifyCntReachedZero()
722 // calls. This will be fixed when subresource registration is explicit.
723 // bool overBudget = budgetedBytes > fMaxBytes || budgetedCount > fMaxCount;
724 // SkASSERT(!overBudget || locked == count || fPurging);
725 }
726
isInCache(const GrGpuResource * resource) const727 bool GrResourceCache::isInCache(const GrGpuResource* resource) const {
728 int index = *resource->cacheAccess().accessCacheIndex();
729 if (index < 0) {
730 return false;
731 }
732 if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
733 return true;
734 }
735 if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) {
736 return true;
737 }
738 SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
739 return false;
740 }
741
742 #endif
743