1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8
9 #include "GrResourceCache.h"
10
11 #include "GrCaps.h"
12 #include "GrGpuResourceCacheAccess.h"
13 #include "GrTracing.h"
14 #include "SkGr.h"
15 #include "SkMessageBus.h"
16 #include "SkOpts.h"
17 #include "SkTSort.h"
18
19 DECLARE_SKMESSAGEBUS_MESSAGE(GrUniqueKeyInvalidatedMessage);
20
21 DECLARE_SKMESSAGEBUS_MESSAGE(GrGpuResourceFreedMessage);
22
23 //////////////////////////////////////////////////////////////////////////////
24
GenerateResourceType()25 GrScratchKey::ResourceType GrScratchKey::GenerateResourceType() {
26 static int32_t gType = INHERITED::kInvalidDomain + 1;
27
28 int32_t type = sk_atomic_inc(&gType);
29 if (type > SK_MaxU16) {
30 SkFAIL("Too many Resource Types");
31 }
32
33 return static_cast<ResourceType>(type);
34 }
35
GenerateDomain()36 GrUniqueKey::Domain GrUniqueKey::GenerateDomain() {
37 static int32_t gDomain = INHERITED::kInvalidDomain + 1;
38
39 int32_t domain = sk_atomic_inc(&gDomain);
40 if (domain > SK_MaxU16) {
41 SkFAIL("Too many GrUniqueKey Domains");
42 }
43
44 return static_cast<Domain>(domain);
45 }
46
GrResourceKeyHash(const uint32_t * data,size_t size)47 uint32_t GrResourceKeyHash(const uint32_t* data, size_t size) {
48 return SkOpts::hash(data, size);
49 }
50
51 //////////////////////////////////////////////////////////////////////////////
52
53 class GrResourceCache::AutoValidate : ::SkNoncopyable {
54 public:
AutoValidate(GrResourceCache * cache)55 AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); }
~AutoValidate()56 ~AutoValidate() { fCache->validate(); }
57 private:
58 GrResourceCache* fCache;
59 };
60
61 //////////////////////////////////////////////////////////////////////////////
62
63
GrResourceCache(const GrCaps * caps,uint32_t contextUniqueID)64 GrResourceCache::GrResourceCache(const GrCaps* caps, uint32_t contextUniqueID)
65 : fTimestamp(0)
66 , fMaxCount(kDefaultMaxCount)
67 , fMaxBytes(kDefaultMaxSize)
68 , fMaxUnusedFlushes(kDefaultMaxUnusedFlushes)
69 #if GR_CACHE_STATS
70 , fHighWaterCount(0)
71 , fHighWaterBytes(0)
72 , fBudgetedHighWaterCount(0)
73 , fBudgetedHighWaterBytes(0)
74 #endif
75 , fBytes(0)
76 , fBudgetedCount(0)
77 , fBudgetedBytes(0)
78 , fPurgeableBytes(0)
79 , fRequestFlush(false)
80 , fExternalFlushCnt(0)
81 , fContextUniqueID(contextUniqueID)
82 , fPreferVRAMUseOverFlushes(caps->preferVRAMUseOverFlushes()) {
83 SkDEBUGCODE(fCount = 0;)
84 SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr;)
85 }
86
~GrResourceCache()87 GrResourceCache::~GrResourceCache() {
88 this->releaseAll();
89 }
90
setLimits(int count,size_t bytes,int maxUnusedFlushes)91 void GrResourceCache::setLimits(int count, size_t bytes, int maxUnusedFlushes) {
92 fMaxCount = count;
93 fMaxBytes = bytes;
94 fMaxUnusedFlushes = maxUnusedFlushes;
95 this->purgeAsNeeded();
96 }
97
insertResource(GrGpuResource * resource)98 void GrResourceCache::insertResource(GrGpuResource* resource) {
99 SkASSERT(resource);
100 SkASSERT(!this->isInCache(resource));
101 SkASSERT(!resource->wasDestroyed());
102 SkASSERT(!resource->isPurgeable());
103
104 // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
105 // up iterating over all the resources that already have timestamps.
106 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
107
108 this->addToNonpurgeableArray(resource);
109
110 size_t size = resource->gpuMemorySize();
111 SkDEBUGCODE(++fCount;)
112 fBytes += size;
113 #if GR_CACHE_STATS
114 fHighWaterCount = SkTMax(this->getResourceCount(), fHighWaterCount);
115 fHighWaterBytes = SkTMax(fBytes, fHighWaterBytes);
116 #endif
117 if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
118 ++fBudgetedCount;
119 fBudgetedBytes += size;
120 TRACE_COUNTER2(TRACE_DISABLED_BY_DEFAULT("skia.gpu.cache"), "skia budget", "used",
121 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
122 #if GR_CACHE_STATS
123 fBudgetedHighWaterCount = SkTMax(fBudgetedCount, fBudgetedHighWaterCount);
124 fBudgetedHighWaterBytes = SkTMax(fBudgetedBytes, fBudgetedHighWaterBytes);
125 #endif
126 }
127 if (resource->resourcePriv().getScratchKey().isValid() &&
128 !resource->getUniqueKey().isValid()) {
129 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
130 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
131 }
132
133 this->purgeAsNeeded();
134 }
135
removeResource(GrGpuResource * resource)136 void GrResourceCache::removeResource(GrGpuResource* resource) {
137 this->validate();
138 SkASSERT(this->isInCache(resource));
139
140 size_t size = resource->gpuMemorySize();
141 if (resource->isPurgeable()) {
142 fPurgeableQueue.remove(resource);
143 fPurgeableBytes -= size;
144 } else {
145 this->removeFromNonpurgeableArray(resource);
146 }
147
148 SkDEBUGCODE(--fCount;)
149 fBytes -= size;
150 if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
151 --fBudgetedCount;
152 fBudgetedBytes -= size;
153 TRACE_COUNTER2(TRACE_DISABLED_BY_DEFAULT("skia.gpu.cache"), "skia budget", "used",
154 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
155 }
156
157 if (resource->resourcePriv().getScratchKey().isValid() &&
158 !resource->getUniqueKey().isValid()) {
159 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
160 }
161 if (resource->getUniqueKey().isValid()) {
162 fUniqueHash.remove(resource->getUniqueKey());
163 }
164 this->validate();
165 }
166
abandonAll()167 void GrResourceCache::abandonAll() {
168 AutoValidate av(this);
169
170 while (fNonpurgeableResources.count()) {
171 GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
172 SkASSERT(!back->wasDestroyed());
173 back->cacheAccess().abandon();
174 }
175
176 while (fPurgeableQueue.count()) {
177 GrGpuResource* top = fPurgeableQueue.peek();
178 SkASSERT(!top->wasDestroyed());
179 top->cacheAccess().abandon();
180 }
181
182 SkASSERT(!fScratchMap.count());
183 SkASSERT(!fUniqueHash.count());
184 SkASSERT(!fCount);
185 SkASSERT(!this->getResourceCount());
186 SkASSERT(!fBytes);
187 SkASSERT(!fBudgetedCount);
188 SkASSERT(!fBudgetedBytes);
189 SkASSERT(!fPurgeableBytes);
190 }
191
releaseAll()192 void GrResourceCache::releaseAll() {
193 AutoValidate av(this);
194
195 this->processFreedGpuResources();
196
197 while(fNonpurgeableResources.count()) {
198 GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
199 SkASSERT(!back->wasDestroyed());
200 back->cacheAccess().release();
201 }
202
203 while (fPurgeableQueue.count()) {
204 GrGpuResource* top = fPurgeableQueue.peek();
205 SkASSERT(!top->wasDestroyed());
206 top->cacheAccess().release();
207 }
208
209 SkASSERT(!fScratchMap.count());
210 SkASSERT(!fUniqueHash.count());
211 SkASSERT(!fCount);
212 SkASSERT(!this->getResourceCount());
213 SkASSERT(!fBytes);
214 SkASSERT(!fBudgetedCount);
215 SkASSERT(!fBudgetedBytes);
216 SkASSERT(!fPurgeableBytes);
217 }
218
219 class GrResourceCache::AvailableForScratchUse {
220 public:
AvailableForScratchUse(bool rejectPendingIO)221 AvailableForScratchUse(bool rejectPendingIO) : fRejectPendingIO(rejectPendingIO) { }
222
operator ()(const GrGpuResource * resource) const223 bool operator()(const GrGpuResource* resource) const {
224 SkASSERT(!resource->getUniqueKey().isValid() &&
225 resource->resourcePriv().getScratchKey().isValid());
226 if (resource->internalHasRef() || !resource->cacheAccess().isScratch()) {
227 return false;
228 }
229 return !fRejectPendingIO || !resource->internalHasPendingIO();
230 }
231
232 private:
233 bool fRejectPendingIO;
234 };
235
findAndRefScratchResource(const GrScratchKey & scratchKey,size_t resourceSize,uint32_t flags)236 GrGpuResource* GrResourceCache::findAndRefScratchResource(const GrScratchKey& scratchKey,
237 size_t resourceSize,
238 uint32_t flags) {
239 SkASSERT(scratchKey.isValid());
240
241 GrGpuResource* resource;
242 if (flags & (kPreferNoPendingIO_ScratchFlag | kRequireNoPendingIO_ScratchFlag)) {
243 resource = fScratchMap.find(scratchKey, AvailableForScratchUse(true));
244 if (resource) {
245 this->refAndMakeResourceMRU(resource);
246 this->validate();
247 return resource;
248 } else if (flags & kRequireNoPendingIO_ScratchFlag) {
249 return nullptr;
250 }
251 // We would prefer to consume more available VRAM rather than flushing
252 // immediately, but on ANGLE this can lead to starving of the GPU.
253 if (fPreferVRAMUseOverFlushes && this->wouldFit(resourceSize)) {
254 // kPrefer is specified, we didn't find a resource without pending io,
255 // but there is still space in our budget for the resource so force
256 // the caller to allocate a new resource.
257 return nullptr;
258 }
259 }
260 resource = fScratchMap.find(scratchKey, AvailableForScratchUse(false));
261 if (resource) {
262 this->refAndMakeResourceMRU(resource);
263 this->validate();
264 }
265 return resource;
266 }
267
willRemoveScratchKey(const GrGpuResource * resource)268 void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) {
269 SkASSERT(resource->resourcePriv().getScratchKey().isValid());
270 if (!resource->getUniqueKey().isValid()) {
271 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
272 }
273 }
274
removeUniqueKey(GrGpuResource * resource)275 void GrResourceCache::removeUniqueKey(GrGpuResource* resource) {
276 // Someone has a ref to this resource in order to have removed the key. When the ref count
277 // reaches zero we will get a ref cnt notification and figure out what to do with it.
278 if (resource->getUniqueKey().isValid()) {
279 SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
280 fUniqueHash.remove(resource->getUniqueKey());
281 }
282 resource->cacheAccess().removeUniqueKey();
283
284 if (resource->resourcePriv().getScratchKey().isValid()) {
285 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
286 }
287
288 this->validate();
289 }
290
changeUniqueKey(GrGpuResource * resource,const GrUniqueKey & newKey)291 void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const GrUniqueKey& newKey) {
292 SkASSERT(resource);
293 SkASSERT(this->isInCache(resource));
294
295 // If another resource has the new key, remove its key then install the key on this resource.
296 if (newKey.isValid()) {
297 // Remove the entry for this resource if it already has a unique key.
298 if (resource->getUniqueKey().isValid()) {
299 SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
300 fUniqueHash.remove(resource->getUniqueKey());
301 SkASSERT(nullptr == fUniqueHash.find(resource->getUniqueKey()));
302 } else {
303 // 'resource' didn't have a valid unique key before so it is switching sides. Remove it
304 // from the ScratchMap
305 if (resource->resourcePriv().getScratchKey().isValid()) {
306 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
307 }
308 }
309
310 if (GrGpuResource* old = fUniqueHash.find(newKey)) {
311 // If the old resource using the key is purgeable and is unreachable, then remove it.
312 if (!old->resourcePriv().getScratchKey().isValid() && old->isPurgeable()) {
313 // release may call validate() which will assert that resource is in fUniqueHash
314 // if it has a valid key. So in debug reset the key here before we assign it.
315 SkDEBUGCODE(resource->cacheAccess().removeUniqueKey();)
316 old->cacheAccess().release();
317 } else {
318 this->removeUniqueKey(old);
319 }
320 }
321 SkASSERT(nullptr == fUniqueHash.find(newKey));
322 resource->cacheAccess().setUniqueKey(newKey);
323 fUniqueHash.add(resource);
324 } else {
325 this->removeUniqueKey(resource);
326 }
327
328 this->validate();
329 }
330
refAndMakeResourceMRU(GrGpuResource * resource)331 void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) {
332 SkASSERT(resource);
333 SkASSERT(this->isInCache(resource));
334
335 if (resource->isPurgeable()) {
336 // It's about to become unpurgeable.
337 fPurgeableBytes -= resource->gpuMemorySize();
338 fPurgeableQueue.remove(resource);
339 this->addToNonpurgeableArray(resource);
340 }
341 resource->ref();
342
343 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
344 this->validate();
345 }
346
notifyCntReachedZero(GrGpuResource * resource,uint32_t flags)347 void GrResourceCache::notifyCntReachedZero(GrGpuResource* resource, uint32_t flags) {
348 SkASSERT(resource);
349 SkASSERT(!resource->wasDestroyed());
350 SkASSERT(flags);
351 SkASSERT(this->isInCache(resource));
352 // This resource should always be in the nonpurgeable array when this function is called. It
353 // will be moved to the queue if it is newly purgeable.
354 SkASSERT(fNonpurgeableResources[*resource->cacheAccess().accessCacheIndex()] == resource);
355
356 if (SkToBool(ResourceAccess::kRefCntReachedZero_RefNotificationFlag & flags)) {
357 #ifdef SK_DEBUG
358 // When the timestamp overflows validate() is called. validate() checks that resources in
359 // the nonpurgeable array are indeed not purgeable. However, the movement from the array to
360 // the purgeable queue happens just below in this function. So we mark it as an exception.
361 if (resource->isPurgeable()) {
362 fNewlyPurgeableResourceForValidation = resource;
363 }
364 #endif
365 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
366 SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr);
367 }
368
369 if (!SkToBool(ResourceAccess::kAllCntsReachedZero_RefNotificationFlag & flags)) {
370 SkASSERT(!resource->isPurgeable());
371 return;
372 }
373
374 SkASSERT(resource->isPurgeable());
375 this->removeFromNonpurgeableArray(resource);
376 fPurgeableQueue.insert(resource);
377 resource->cacheAccess().setFlushCntWhenResourceBecamePurgeable(fExternalFlushCnt);
378 resource->cacheAccess().setTimeWhenResourceBecomePurgeable();
379 fPurgeableBytes += resource->gpuMemorySize();
380
381 if (SkBudgeted::kNo == resource->resourcePriv().isBudgeted()) {
382 // Check whether this resource could still be used as a scratch resource.
383 if (!resource->resourcePriv().refsWrappedObjects() &&
384 resource->resourcePriv().getScratchKey().isValid()) {
385 // We won't purge an existing resource to make room for this one.
386 if (fBudgetedCount < fMaxCount &&
387 fBudgetedBytes + resource->gpuMemorySize() <= fMaxBytes) {
388 resource->resourcePriv().makeBudgeted();
389 return;
390 }
391 }
392 } else {
393 // Purge the resource immediately if we're over budget
394 // Also purge if the resource has neither a valid scratch key nor a unique key.
395 bool noKey = !resource->resourcePriv().getScratchKey().isValid() &&
396 !resource->getUniqueKey().isValid();
397 if (!this->overBudget() && !noKey) {
398 return;
399 }
400 }
401
402 SkDEBUGCODE(int beforeCount = this->getResourceCount();)
403 resource->cacheAccess().release();
404 // We should at least free this resource, perhaps dependent resources as well.
405 SkASSERT(this->getResourceCount() < beforeCount);
406 this->validate();
407 }
408
didChangeGpuMemorySize(const GrGpuResource * resource,size_t oldSize)409 void GrResourceCache::didChangeGpuMemorySize(const GrGpuResource* resource, size_t oldSize) {
410 // SkASSERT(!fPurging); GrPathRange increases size during flush. :(
411 SkASSERT(resource);
412 SkASSERT(this->isInCache(resource));
413
414 ptrdiff_t delta = resource->gpuMemorySize() - oldSize;
415
416 fBytes += delta;
417 #if GR_CACHE_STATS
418 fHighWaterBytes = SkTMax(fBytes, fHighWaterBytes);
419 #endif
420 if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
421 fBudgetedBytes += delta;
422 TRACE_COUNTER2(TRACE_DISABLED_BY_DEFAULT("skia.gpu.cache"), "skia budget", "used",
423 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
424 #if GR_CACHE_STATS
425 fBudgetedHighWaterBytes = SkTMax(fBudgetedBytes, fBudgetedHighWaterBytes);
426 #endif
427 }
428
429 this->purgeAsNeeded();
430 this->validate();
431 }
432
didChangeBudgetStatus(GrGpuResource * resource)433 void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
434 SkASSERT(resource);
435 SkASSERT(this->isInCache(resource));
436
437 size_t size = resource->gpuMemorySize();
438
439 if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
440 ++fBudgetedCount;
441 fBudgetedBytes += size;
442 #if GR_CACHE_STATS
443 fBudgetedHighWaterBytes = SkTMax(fBudgetedBytes, fBudgetedHighWaterBytes);
444 fBudgetedHighWaterCount = SkTMax(fBudgetedCount, fBudgetedHighWaterCount);
445 #endif
446 this->purgeAsNeeded();
447 } else {
448 --fBudgetedCount;
449 fBudgetedBytes -= size;
450 }
451 TRACE_COUNTER2(TRACE_DISABLED_BY_DEFAULT("skia.gpu.cache"), "skia budget", "used",
452 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
453
454 this->validate();
455 }
456
purgeAsNeeded()457 void GrResourceCache::purgeAsNeeded() {
458 SkTArray<GrUniqueKeyInvalidatedMessage> invalidKeyMsgs;
459 fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs);
460 if (invalidKeyMsgs.count()) {
461 this->processInvalidUniqueKeys(invalidKeyMsgs);
462 }
463
464 this->processFreedGpuResources();
465
466 if (fMaxUnusedFlushes > 0) {
467 // We want to know how many complete flushes have occurred without the resource being used.
468 // If the resource was tagged when fExternalFlushCnt was N then this means it became
469 // purgeable during activity that became the N+1th flush. So when the flush count is N+2
470 // it has sat in the purgeable queue for one entire flush.
471 uint32_t oldestAllowedFlushCnt = fExternalFlushCnt - fMaxUnusedFlushes - 1;
472 // check for underflow
473 if (oldestAllowedFlushCnt < fExternalFlushCnt) {
474 while (fPurgeableQueue.count()) {
475 uint32_t flushWhenResourceBecamePurgeable =
476 fPurgeableQueue.peek()->cacheAccess().flushCntWhenResourceBecamePurgeable();
477 if (oldestAllowedFlushCnt < flushWhenResourceBecamePurgeable) {
478 // Resources were given both LRU timestamps and tagged with a flush cnt when
479 // they first became purgeable. The LRU timestamp won't change again until the
480 // resource is made non-purgeable again. So, at this point all the remaining
481 // resources in the timestamp-sorted queue will have a flush count >= to this
482 // one.
483 break;
484 }
485 GrGpuResource* resource = fPurgeableQueue.peek();
486 SkASSERT(resource->isPurgeable());
487 resource->cacheAccess().release();
488 }
489 }
490 }
491
492 bool stillOverbudget = this->overBudget();
493 while (stillOverbudget && fPurgeableQueue.count()) {
494 GrGpuResource* resource = fPurgeableQueue.peek();
495 SkASSERT(resource->isPurgeable());
496 resource->cacheAccess().release();
497 stillOverbudget = this->overBudget();
498 }
499
500 this->validate();
501
502 if (stillOverbudget) {
503 // Set this so that GrDrawingManager will issue a flush to free up resources with pending
504 // IO that we were unable to purge in this pass.
505 fRequestFlush = true;
506 }
507 }
508
purgeAllUnlocked()509 void GrResourceCache::purgeAllUnlocked() {
510 // We could disable maintaining the heap property here, but it would add a lot of complexity.
511 // Moreover, this is rarely called.
512 while (fPurgeableQueue.count()) {
513 GrGpuResource* resource = fPurgeableQueue.peek();
514 SkASSERT(resource->isPurgeable());
515 resource->cacheAccess().release();
516 }
517
518 this->validate();
519 }
520
purgeResourcesNotUsedSince(GrStdSteadyClock::time_point purgeTime)521 void GrResourceCache::purgeResourcesNotUsedSince(GrStdSteadyClock::time_point purgeTime) {
522 while (fPurgeableQueue.count()) {
523 const GrStdSteadyClock::time_point resourceTime =
524 fPurgeableQueue.peek()->cacheAccess().timeWhenResourceBecamePurgeable();
525 if (resourceTime >= purgeTime) {
526 // Resources were given both LRU timestamps and tagged with a frame number when
527 // they first became purgeable. The LRU timestamp won't change again until the
528 // resource is made non-purgeable again. So, at this point all the remaining
529 // resources in the timestamp-sorted queue will have a frame number >= to this
530 // one.
531 break;
532 }
533 GrGpuResource* resource = fPurgeableQueue.peek();
534 SkASSERT(resource->isPurgeable());
535 resource->cacheAccess().release();
536 }
537 }
538
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)539 void GrResourceCache::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
540
541 const size_t tmpByteBudget = SkTMax((size_t)0, fBytes - bytesToPurge);
542 bool stillOverbudget = tmpByteBudget < fBytes;
543
544 if (preferScratchResources && bytesToPurge < fPurgeableBytes) {
545 // Sort the queue
546 fPurgeableQueue.sort();
547
548 // Make a list of the scratch resources to delete
549 SkTDArray<GrGpuResource*> scratchResources;
550 size_t scratchByteCount = 0;
551 for (int i = 0; i < fPurgeableQueue.count() && stillOverbudget; i++) {
552 GrGpuResource* resource = fPurgeableQueue.at(i);
553 SkASSERT(resource->isPurgeable());
554 if (!resource->getUniqueKey().isValid()) {
555 *scratchResources.append() = resource;
556 scratchByteCount += resource->gpuMemorySize();
557 stillOverbudget = tmpByteBudget < fBytes - scratchByteCount;
558 }
559 }
560
561 // Delete the scratch resources. This must be done as a separate pass
562 // to avoid messing up the sorted order of the queue
563 for (int i = 0; i < scratchResources.count(); i++) {
564 scratchResources.getAt(i)->cacheAccess().release();
565 }
566 stillOverbudget = tmpByteBudget < fBytes;
567
568 this->validate();
569 }
570
571 // Purge any remaining resources in LRU order
572 if (stillOverbudget) {
573 const size_t cachedByteCount = fMaxBytes;
574 fMaxBytes = tmpByteBudget;
575 this->purgeAsNeeded();
576 fMaxBytes = cachedByteCount;
577 }
578 }
579
processInvalidUniqueKeys(const SkTArray<GrUniqueKeyInvalidatedMessage> & msgs)580 void GrResourceCache::processInvalidUniqueKeys(
581 const SkTArray<GrUniqueKeyInvalidatedMessage>& msgs) {
582 for (int i = 0; i < msgs.count(); ++i) {
583 GrGpuResource* resource = this->findAndRefUniqueResource(msgs[i].key());
584 if (resource) {
585 resource->resourcePriv().removeUniqueKey();
586 resource->unref(); // If this resource is now purgeable, the cache will be notified.
587 }
588 }
589 }
590
insertCrossContextGpuResource(GrGpuResource * resource)591 void GrResourceCache::insertCrossContextGpuResource(GrGpuResource* resource) {
592 resource->ref();
593 }
594
processFreedGpuResources()595 void GrResourceCache::processFreedGpuResources() {
596 SkTArray<GrGpuResourceFreedMessage> msgs;
597 fFreedGpuResourceInbox.poll(&msgs);
598 for (int i = 0; i < msgs.count(); ++i) {
599 if (msgs[i].fOwningUniqueID == fContextUniqueID) {
600 msgs[i].fResource->unref();
601 }
602 }
603 }
604
addToNonpurgeableArray(GrGpuResource * resource)605 void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) {
606 int index = fNonpurgeableResources.count();
607 *fNonpurgeableResources.append() = resource;
608 *resource->cacheAccess().accessCacheIndex() = index;
609 }
610
removeFromNonpurgeableArray(GrGpuResource * resource)611 void GrResourceCache::removeFromNonpurgeableArray(GrGpuResource* resource) {
612 int* index = resource->cacheAccess().accessCacheIndex();
613 // Fill the whole we will create in the array with the tail object, adjust its index, and
614 // then pop the array
615 GrGpuResource* tail = *(fNonpurgeableResources.end() - 1);
616 SkASSERT(fNonpurgeableResources[*index] == resource);
617 fNonpurgeableResources[*index] = tail;
618 *tail->cacheAccess().accessCacheIndex() = *index;
619 fNonpurgeableResources.pop();
620 SkDEBUGCODE(*index = -1);
621 }
622
getNextTimestamp()623 uint32_t GrResourceCache::getNextTimestamp() {
624 // If we wrap then all the existing resources will appear older than any resources that get
625 // a timestamp after the wrap.
626 if (0 == fTimestamp) {
627 int count = this->getResourceCount();
628 if (count) {
629 // Reset all the timestamps. We sort the resources by timestamp and then assign
630 // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
631 // rare.
632 SkTDArray<GrGpuResource*> sortedPurgeableResources;
633 sortedPurgeableResources.setReserve(fPurgeableQueue.count());
634
635 while (fPurgeableQueue.count()) {
636 *sortedPurgeableResources.append() = fPurgeableQueue.peek();
637 fPurgeableQueue.pop();
638 }
639
640 SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end() - 1,
641 CompareTimestamp);
642
643 // Pick resources out of the purgeable and non-purgeable arrays based on lowest
644 // timestamp and assign new timestamps.
645 int currP = 0;
646 int currNP = 0;
647 while (currP < sortedPurgeableResources.count() &&
648 currNP < fNonpurgeableResources.count()) {
649 uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp();
650 uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp();
651 SkASSERT(tsP != tsNP);
652 if (tsP < tsNP) {
653 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
654 } else {
655 // Correct the index in the nonpurgeable array stored on the resource post-sort.
656 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
657 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
658 }
659 }
660
661 // The above loop ended when we hit the end of one array. Finish the other one.
662 while (currP < sortedPurgeableResources.count()) {
663 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
664 }
665 while (currNP < fNonpurgeableResources.count()) {
666 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
667 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
668 }
669
670 // Rebuild the queue.
671 for (int i = 0; i < sortedPurgeableResources.count(); ++i) {
672 fPurgeableQueue.insert(sortedPurgeableResources[i]);
673 }
674
675 this->validate();
676 SkASSERT(count == this->getResourceCount());
677
678 // count should be the next timestamp we return.
679 SkASSERT(fTimestamp == SkToU32(count));
680 }
681 }
682 return fTimestamp++;
683 }
684
notifyFlushOccurred(FlushType type)685 void GrResourceCache::notifyFlushOccurred(FlushType type) {
686 switch (type) {
687 case FlushType::kCacheRequested:
688 SkASSERT(fRequestFlush);
689 fRequestFlush = false;
690 break;
691 case FlushType::kExternal:
692 ++fExternalFlushCnt;
693 if (0 == fExternalFlushCnt) {
694 // When this wraps just reset all the purgeable resources' last used flush state.
695 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
696 fPurgeableQueue.at(i)->cacheAccess().setFlushCntWhenResourceBecamePurgeable(0);
697 }
698 }
699 break;
700 }
701 this->purgeAsNeeded();
702 }
703
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const704 void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
705 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
706 fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
707 }
708 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
709 fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
710 }
711 }
712
713 #ifdef SK_DEBUG
validate() const714 void GrResourceCache::validate() const {
715 // Reduce the frequency of validations for large resource counts.
716 static SkRandom gRandom;
717 int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
718 if (~mask && (gRandom.nextU() & mask)) {
719 return;
720 }
721
722 struct Stats {
723 size_t fBytes;
724 int fBudgetedCount;
725 size_t fBudgetedBytes;
726 int fLocked;
727 int fScratch;
728 int fCouldBeScratch;
729 int fContent;
730 const ScratchMap* fScratchMap;
731 const UniqueHash* fUniqueHash;
732
733 Stats(const GrResourceCache* cache) {
734 memset(this, 0, sizeof(*this));
735 fScratchMap = &cache->fScratchMap;
736 fUniqueHash = &cache->fUniqueHash;
737 }
738
739 void update(GrGpuResource* resource) {
740 fBytes += resource->gpuMemorySize();
741
742 if (!resource->isPurgeable()) {
743 ++fLocked;
744 }
745
746 const GrScratchKey& scratchKey = resource->resourcePriv().getScratchKey();
747 const GrUniqueKey& uniqueKey = resource->getUniqueKey();
748
749 if (resource->cacheAccess().isScratch()) {
750 SkASSERT(!uniqueKey.isValid());
751 ++fScratch;
752 SkASSERT(fScratchMap->countForKey(scratchKey));
753 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
754 } else if (scratchKey.isValid()) {
755 SkASSERT(SkBudgeted::kNo == resource->resourcePriv().isBudgeted() ||
756 uniqueKey.isValid());
757 if (!uniqueKey.isValid()) {
758 ++fCouldBeScratch;
759 SkASSERT(fScratchMap->countForKey(scratchKey));
760 }
761 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
762 }
763 if (uniqueKey.isValid()) {
764 ++fContent;
765 SkASSERT(fUniqueHash->find(uniqueKey) == resource);
766 SkASSERT(SkBudgeted::kYes == resource->resourcePriv().isBudgeted() ||
767 resource->resourcePriv().refsWrappedObjects());
768
769 if (scratchKey.isValid()) {
770 SkASSERT(!fScratchMap->has(resource, scratchKey));
771 }
772 }
773
774 if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
775 ++fBudgetedCount;
776 fBudgetedBytes += resource->gpuMemorySize();
777 }
778 }
779 };
780
781 {
782 ScratchMap::ConstIter iter(&fScratchMap);
783
784 int count = 0;
785 for ( ; !iter.done(); ++iter) {
786 const GrGpuResource* resource = *iter;
787 SkASSERT(resource->resourcePriv().getScratchKey().isValid());
788 SkASSERT(!resource->getUniqueKey().isValid());
789 count++;
790 }
791 SkASSERT(count == fScratchMap.count()); // ensure the iterator is working correctly
792 }
793
794 Stats stats(this);
795 size_t purgeableBytes = 0;
796
797 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
798 SkASSERT(!fNonpurgeableResources[i]->isPurgeable() ||
799 fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]);
800 SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i);
801 SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
802 stats.update(fNonpurgeableResources[i]);
803 }
804 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
805 SkASSERT(fPurgeableQueue.at(i)->isPurgeable());
806 SkASSERT(*fPurgeableQueue.at(i)->cacheAccess().accessCacheIndex() == i);
807 SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
808 stats.update(fPurgeableQueue.at(i));
809 purgeableBytes += fPurgeableQueue.at(i)->gpuMemorySize();
810 }
811
812 SkASSERT(fCount == this->getResourceCount());
813 SkASSERT(fBudgetedCount <= fCount);
814 SkASSERT(fBudgetedBytes <= fBytes);
815 SkASSERT(stats.fBytes == fBytes);
816 SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
817 SkASSERT(stats.fBudgetedCount == fBudgetedCount);
818 SkASSERT(purgeableBytes == fPurgeableBytes);
819 #if GR_CACHE_STATS
820 SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount);
821 SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes);
822 SkASSERT(fBytes <= fHighWaterBytes);
823 SkASSERT(fCount <= fHighWaterCount);
824 SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes);
825 SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount);
826 #endif
827 SkASSERT(stats.fContent == fUniqueHash.count());
828 SkASSERT(stats.fScratch + stats.fCouldBeScratch == fScratchMap.count());
829
830 // This assertion is not currently valid because we can be in recursive notifyCntReachedZero()
831 // calls. This will be fixed when subresource registration is explicit.
832 // bool overBudget = budgetedBytes > fMaxBytes || budgetedCount > fMaxCount;
833 // SkASSERT(!overBudget || locked == count || fPurging);
834 }
835
isInCache(const GrGpuResource * resource) const836 bool GrResourceCache::isInCache(const GrGpuResource* resource) const {
837 int index = *resource->cacheAccess().accessCacheIndex();
838 if (index < 0) {
839 return false;
840 }
841 if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
842 return true;
843 }
844 if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) {
845 return true;
846 }
847 SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
848 return false;
849 }
850
851 #endif
852