1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8
9 #include "GrResourceCache.h"
10
11 #include "GrCaps.h"
12 #include "GrGpuResourceCacheAccess.h"
13 #include "GrProxyProvider.h"
14 #include "GrTexture.h"
15 #include "GrTextureProxyCacheAccess.h"
16 #include "GrTracing.h"
17 #include "SkGr.h"
18 #include "SkMessageBus.h"
19 #include "SkOpts.h"
20 #include "SkTSort.h"
21
22 DECLARE_SKMESSAGEBUS_MESSAGE(GrUniqueKeyInvalidatedMessage);
23
24 DECLARE_SKMESSAGEBUS_MESSAGE(GrGpuResourceFreedMessage);
25
26 //////////////////////////////////////////////////////////////////////////////
27
GenerateResourceType()28 GrScratchKey::ResourceType GrScratchKey::GenerateResourceType() {
29 static int32_t gType = INHERITED::kInvalidDomain + 1;
30
31 int32_t type = sk_atomic_inc(&gType);
32 if (type > SK_MaxU16) {
33 SK_ABORT("Too many Resource Types");
34 }
35
36 return static_cast<ResourceType>(type);
37 }
38
GenerateDomain()39 GrUniqueKey::Domain GrUniqueKey::GenerateDomain() {
40 static int32_t gDomain = INHERITED::kInvalidDomain + 1;
41
42 int32_t domain = sk_atomic_inc(&gDomain);
43 if (domain > SK_MaxU16) {
44 SK_ABORT("Too many GrUniqueKey Domains");
45 }
46
47 return static_cast<Domain>(domain);
48 }
49
GrResourceKeyHash(const uint32_t * data,size_t size)50 uint32_t GrResourceKeyHash(const uint32_t* data, size_t size) {
51 return SkOpts::hash(data, size);
52 }
53
54 //////////////////////////////////////////////////////////////////////////////
55
56 class GrResourceCache::AutoValidate : ::SkNoncopyable {
57 public:
AutoValidate(GrResourceCache * cache)58 AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); }
~AutoValidate()59 ~AutoValidate() { fCache->validate(); }
60 private:
61 GrResourceCache* fCache;
62 };
63
64 //////////////////////////////////////////////////////////////////////////////
65
66
GrResourceCache(const GrCaps * caps,uint32_t contextUniqueID)67 GrResourceCache::GrResourceCache(const GrCaps* caps, uint32_t contextUniqueID)
68 : fProxyProvider(nullptr)
69 , fTimestamp(0)
70 , fMaxCount(kDefaultMaxCount)
71 , fMaxBytes(kDefaultMaxSize)
72 , fMaxUnusedFlushes(kDefaultMaxUnusedFlushes)
73 #if GR_CACHE_STATS
74 , fHighWaterCount(0)
75 , fHighWaterBytes(0)
76 , fBudgetedHighWaterCount(0)
77 , fBudgetedHighWaterBytes(0)
78 #endif
79 , fBytes(0)
80 , fBudgetedCount(0)
81 , fBudgetedBytes(0)
82 , fPurgeableBytes(0)
83 , fRequestFlush(false)
84 , fExternalFlushCnt(0)
85 , fContextUniqueID(contextUniqueID)
86 , fPreferVRAMUseOverFlushes(caps->preferVRAMUseOverFlushes()) {
87 SkDEBUGCODE(fCount = 0;)
88 SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr;)
89 }
90
~GrResourceCache()91 GrResourceCache::~GrResourceCache() {
92 this->releaseAll();
93 }
94
setLimits(int count,size_t bytes,int maxUnusedFlushes)95 void GrResourceCache::setLimits(int count, size_t bytes, int maxUnusedFlushes) {
96 fMaxCount = count;
97 fMaxBytes = bytes;
98 fMaxUnusedFlushes = maxUnusedFlushes;
99 this->purgeAsNeeded();
100 }
101
insertResource(GrGpuResource * resource)102 void GrResourceCache::insertResource(GrGpuResource* resource) {
103 SkASSERT(resource);
104 SkASSERT(!this->isInCache(resource));
105 SkASSERT(!resource->wasDestroyed());
106 SkASSERT(!resource->isPurgeable());
107
108 // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
109 // up iterating over all the resources that already have timestamps.
110 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
111
112 this->addToNonpurgeableArray(resource);
113
114 size_t size = resource->gpuMemorySize();
115 SkDEBUGCODE(++fCount;)
116 fBytes += size;
117 #if GR_CACHE_STATS
118 fHighWaterCount = SkTMax(this->getResourceCount(), fHighWaterCount);
119 fHighWaterBytes = SkTMax(fBytes, fHighWaterBytes);
120 #endif
121 if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
122 ++fBudgetedCount;
123 fBudgetedBytes += size;
124 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
125 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
126 #if GR_CACHE_STATS
127 fBudgetedHighWaterCount = SkTMax(fBudgetedCount, fBudgetedHighWaterCount);
128 fBudgetedHighWaterBytes = SkTMax(fBudgetedBytes, fBudgetedHighWaterBytes);
129 #endif
130 }
131 if (resource->resourcePriv().getScratchKey().isValid() &&
132 !resource->getUniqueKey().isValid()) {
133 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
134 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
135 }
136
137 this->purgeAsNeeded();
138 }
139
removeResource(GrGpuResource * resource)140 void GrResourceCache::removeResource(GrGpuResource* resource) {
141 this->validate();
142 SkASSERT(this->isInCache(resource));
143
144 size_t size = resource->gpuMemorySize();
145 if (resource->isPurgeable()) {
146 fPurgeableQueue.remove(resource);
147 fPurgeableBytes -= size;
148 } else {
149 this->removeFromNonpurgeableArray(resource);
150 }
151
152 SkDEBUGCODE(--fCount;)
153 fBytes -= size;
154 if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
155 --fBudgetedCount;
156 fBudgetedBytes -= size;
157 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
158 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
159 }
160
161 if (resource->resourcePriv().getScratchKey().isValid() &&
162 !resource->getUniqueKey().isValid()) {
163 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
164 }
165 if (resource->getUniqueKey().isValid()) {
166 fUniqueHash.remove(resource->getUniqueKey());
167 }
168 this->validate();
169 }
170
abandonAll()171 void GrResourceCache::abandonAll() {
172 AutoValidate av(this);
173
174 while (fNonpurgeableResources.count()) {
175 GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
176 SkASSERT(!back->wasDestroyed());
177 back->cacheAccess().abandon();
178 }
179
180 while (fPurgeableQueue.count()) {
181 GrGpuResource* top = fPurgeableQueue.peek();
182 SkASSERT(!top->wasDestroyed());
183 top->cacheAccess().abandon();
184 }
185
186 SkASSERT(!fScratchMap.count());
187 SkASSERT(!fUniqueHash.count());
188 SkASSERT(!fCount);
189 SkASSERT(!this->getResourceCount());
190 SkASSERT(!fBytes);
191 SkASSERT(!fBudgetedCount);
192 SkASSERT(!fBudgetedBytes);
193 SkASSERT(!fPurgeableBytes);
194 }
195
releaseAll()196 void GrResourceCache::releaseAll() {
197 AutoValidate av(this);
198
199 this->processFreedGpuResources();
200
201 SkASSERT(fProxyProvider); // better have called setProxyProvider
202 // We must remove the uniqueKeys from the proxies here. While they possess a uniqueKey
203 // they also have a raw pointer back to this class (which is presumably going away)!
204 fProxyProvider->removeAllUniqueKeys();
205
206 while(fNonpurgeableResources.count()) {
207 GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
208 SkASSERT(!back->wasDestroyed());
209 back->cacheAccess().release();
210 }
211
212 while (fPurgeableQueue.count()) {
213 GrGpuResource* top = fPurgeableQueue.peek();
214 SkASSERT(!top->wasDestroyed());
215 top->cacheAccess().release();
216 }
217
218 SkASSERT(!fScratchMap.count());
219 SkASSERT(!fUniqueHash.count());
220 SkASSERT(!fCount);
221 SkASSERT(!this->getResourceCount());
222 SkASSERT(!fBytes);
223 SkASSERT(!fBudgetedCount);
224 SkASSERT(!fBudgetedBytes);
225 SkASSERT(!fPurgeableBytes);
226 }
227
228 class GrResourceCache::AvailableForScratchUse {
229 public:
AvailableForScratchUse(bool rejectPendingIO)230 AvailableForScratchUse(bool rejectPendingIO) : fRejectPendingIO(rejectPendingIO) { }
231
operator ()(const GrGpuResource * resource) const232 bool operator()(const GrGpuResource* resource) const {
233 SkASSERT(!resource->getUniqueKey().isValid() &&
234 resource->resourcePriv().getScratchKey().isValid());
235 if (resource->internalHasRef() || !resource->cacheAccess().isScratch()) {
236 return false;
237 }
238 return !fRejectPendingIO || !resource->internalHasPendingIO();
239 }
240
241 private:
242 bool fRejectPendingIO;
243 };
244
findAndRefScratchResource(const GrScratchKey & scratchKey,size_t resourceSize,uint32_t flags)245 GrGpuResource* GrResourceCache::findAndRefScratchResource(const GrScratchKey& scratchKey,
246 size_t resourceSize,
247 uint32_t flags) {
248 SkASSERT(scratchKey.isValid());
249
250 GrGpuResource* resource;
251 if (flags & (kPreferNoPendingIO_ScratchFlag | kRequireNoPendingIO_ScratchFlag)) {
252 resource = fScratchMap.find(scratchKey, AvailableForScratchUse(true));
253 if (resource) {
254 this->refAndMakeResourceMRU(resource);
255 this->validate();
256 return resource;
257 } else if (flags & kRequireNoPendingIO_ScratchFlag) {
258 return nullptr;
259 }
260 // We would prefer to consume more available VRAM rather than flushing
261 // immediately, but on ANGLE this can lead to starving of the GPU.
262 if (fPreferVRAMUseOverFlushes && this->wouldFit(resourceSize)) {
263 // kPrefer is specified, we didn't find a resource without pending io,
264 // but there is still space in our budget for the resource so force
265 // the caller to allocate a new resource.
266 return nullptr;
267 }
268 }
269 resource = fScratchMap.find(scratchKey, AvailableForScratchUse(false));
270 if (resource) {
271 this->refAndMakeResourceMRU(resource);
272 this->validate();
273 }
274 return resource;
275 }
276
willRemoveScratchKey(const GrGpuResource * resource)277 void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) {
278 SkASSERT(resource->resourcePriv().getScratchKey().isValid());
279 if (!resource->getUniqueKey().isValid()) {
280 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
281 }
282 }
283
removeUniqueKey(GrGpuResource * resource)284 void GrResourceCache::removeUniqueKey(GrGpuResource* resource) {
285 // Someone has a ref to this resource in order to have removed the key. When the ref count
286 // reaches zero we will get a ref cnt notification and figure out what to do with it.
287 if (resource->getUniqueKey().isValid()) {
288 SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
289 fUniqueHash.remove(resource->getUniqueKey());
290 }
291 resource->cacheAccess().removeUniqueKey();
292
293 if (resource->resourcePriv().getScratchKey().isValid()) {
294 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
295 }
296
297 this->validate();
298 }
299
changeUniqueKey(GrGpuResource * resource,const GrUniqueKey & newKey)300 void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const GrUniqueKey& newKey) {
301 SkASSERT(resource);
302 SkASSERT(this->isInCache(resource));
303
304 // If another resource has the new key, remove its key then install the key on this resource.
305 if (newKey.isValid()) {
306 if (GrGpuResource* old = fUniqueHash.find(newKey)) {
307 // If the old resource using the key is purgeable and is unreachable, then remove it.
308 if (!old->resourcePriv().getScratchKey().isValid() && old->isPurgeable()) {
309 old->cacheAccess().release();
310 } else {
311 this->removeUniqueKey(old);
312 }
313 }
314 SkASSERT(nullptr == fUniqueHash.find(newKey));
315
316 // Remove the entry for this resource if it already has a unique key.
317 if (resource->getUniqueKey().isValid()) {
318 SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
319 fUniqueHash.remove(resource->getUniqueKey());
320 SkASSERT(nullptr == fUniqueHash.find(resource->getUniqueKey()));
321 } else {
322 // 'resource' didn't have a valid unique key before so it is switching sides. Remove it
323 // from the ScratchMap
324 if (resource->resourcePriv().getScratchKey().isValid()) {
325 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
326 }
327 }
328
329 resource->cacheAccess().setUniqueKey(newKey);
330 fUniqueHash.add(resource);
331 } else {
332 this->removeUniqueKey(resource);
333 }
334
335 this->validate();
336 }
337
refAndMakeResourceMRU(GrGpuResource * resource)338 void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) {
339 SkASSERT(resource);
340 SkASSERT(this->isInCache(resource));
341
342 if (resource->isPurgeable()) {
343 // It's about to become unpurgeable.
344 fPurgeableBytes -= resource->gpuMemorySize();
345 fPurgeableQueue.remove(resource);
346 this->addToNonpurgeableArray(resource);
347 }
348 resource->ref();
349
350 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
351 this->validate();
352 }
353
notifyCntReachedZero(GrGpuResource * resource,uint32_t flags)354 void GrResourceCache::notifyCntReachedZero(GrGpuResource* resource, uint32_t flags) {
355 SkASSERT(resource);
356 SkASSERT(!resource->wasDestroyed());
357 SkASSERT(flags);
358 SkASSERT(this->isInCache(resource));
359 // This resource should always be in the nonpurgeable array when this function is called. It
360 // will be moved to the queue if it is newly purgeable.
361 SkASSERT(fNonpurgeableResources[*resource->cacheAccess().accessCacheIndex()] == resource);
362
363 if (SkToBool(ResourceAccess::kRefCntReachedZero_RefNotificationFlag & flags)) {
364 #ifdef SK_DEBUG
365 // When the timestamp overflows validate() is called. validate() checks that resources in
366 // the nonpurgeable array are indeed not purgeable. However, the movement from the array to
367 // the purgeable queue happens just below in this function. So we mark it as an exception.
368 if (resource->isPurgeable()) {
369 fNewlyPurgeableResourceForValidation = resource;
370 }
371 #endif
372 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
373 SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr);
374 }
375
376 if (!SkToBool(ResourceAccess::kAllCntsReachedZero_RefNotificationFlag & flags)) {
377 SkASSERT(!resource->isPurgeable());
378 return;
379 }
380
381 SkASSERT(resource->isPurgeable());
382 this->removeFromNonpurgeableArray(resource);
383 fPurgeableQueue.insert(resource);
384 resource->cacheAccess().setFlushCntWhenResourceBecamePurgeable(fExternalFlushCnt);
385 resource->cacheAccess().setTimeWhenResourceBecomePurgeable();
386 fPurgeableBytes += resource->gpuMemorySize();
387
388 if (SkBudgeted::kNo == resource->resourcePriv().isBudgeted()) {
389 // Check whether this resource could still be used as a scratch resource.
390 if (!resource->resourcePriv().refsWrappedObjects() &&
391 resource->resourcePriv().getScratchKey().isValid()) {
392 // We won't purge an existing resource to make room for this one.
393 if (fBudgetedCount < fMaxCount &&
394 fBudgetedBytes + resource->gpuMemorySize() <= fMaxBytes) {
395 resource->resourcePriv().makeBudgeted();
396 return;
397 }
398 }
399 } else {
400 // Purge the resource immediately if we're over budget
401 // Also purge if the resource has neither a valid scratch key nor a unique key.
402 bool noKey = !resource->resourcePriv().getScratchKey().isValid() &&
403 !resource->getUniqueKey().isValid();
404 if (!this->overBudget() && !noKey) {
405 return;
406 }
407 }
408
409 SkDEBUGCODE(int beforeCount = this->getResourceCount();)
410 resource->cacheAccess().release();
411 // We should at least free this resource, perhaps dependent resources as well.
412 SkASSERT(this->getResourceCount() < beforeCount);
413 this->validate();
414 }
415
didChangeGpuMemorySize(const GrGpuResource * resource,size_t oldSize)416 void GrResourceCache::didChangeGpuMemorySize(const GrGpuResource* resource, size_t oldSize) {
417 // SkASSERT(!fPurging); GrPathRange increases size during flush. :(
418 SkASSERT(resource);
419 SkASSERT(this->isInCache(resource));
420
421 ptrdiff_t delta = resource->gpuMemorySize() - oldSize;
422
423 fBytes += delta;
424 #if GR_CACHE_STATS
425 fHighWaterBytes = SkTMax(fBytes, fHighWaterBytes);
426 #endif
427 if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
428 fBudgetedBytes += delta;
429 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
430 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
431 #if GR_CACHE_STATS
432 fBudgetedHighWaterBytes = SkTMax(fBudgetedBytes, fBudgetedHighWaterBytes);
433 #endif
434 }
435
436 this->purgeAsNeeded();
437 this->validate();
438 }
439
didChangeBudgetStatus(GrGpuResource * resource)440 void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
441 SkASSERT(resource);
442 SkASSERT(this->isInCache(resource));
443
444 size_t size = resource->gpuMemorySize();
445
446 if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
447 ++fBudgetedCount;
448 fBudgetedBytes += size;
449 #if GR_CACHE_STATS
450 fBudgetedHighWaterBytes = SkTMax(fBudgetedBytes, fBudgetedHighWaterBytes);
451 fBudgetedHighWaterCount = SkTMax(fBudgetedCount, fBudgetedHighWaterCount);
452 #endif
453 this->purgeAsNeeded();
454 } else {
455 --fBudgetedCount;
456 fBudgetedBytes -= size;
457 }
458 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
459 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
460
461 this->validate();
462 }
463
purgeAsNeeded()464 void GrResourceCache::purgeAsNeeded() {
465 SkTArray<GrUniqueKeyInvalidatedMessage> invalidKeyMsgs;
466 fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs);
467 if (invalidKeyMsgs.count()) {
468 this->processInvalidUniqueKeys(invalidKeyMsgs);
469 }
470
471 this->processFreedGpuResources();
472
473 if (fMaxUnusedFlushes > 0) {
474 // We want to know how many complete flushes have occurred without the resource being used.
475 // If the resource was tagged when fExternalFlushCnt was N then this means it became
476 // purgeable during activity that became the N+1th flush. So when the flush count is N+2
477 // it has sat in the purgeable queue for one entire flush.
478 uint32_t oldestAllowedFlushCnt = fExternalFlushCnt - fMaxUnusedFlushes - 1;
479 // check for underflow
480 if (oldestAllowedFlushCnt < fExternalFlushCnt) {
481 while (fPurgeableQueue.count()) {
482 uint32_t flushWhenResourceBecamePurgeable =
483 fPurgeableQueue.peek()->cacheAccess().flushCntWhenResourceBecamePurgeable();
484 if (oldestAllowedFlushCnt < flushWhenResourceBecamePurgeable) {
485 // Resources were given both LRU timestamps and tagged with a flush cnt when
486 // they first became purgeable. The LRU timestamp won't change again until the
487 // resource is made non-purgeable again. So, at this point all the remaining
488 // resources in the timestamp-sorted queue will have a flush count >= to this
489 // one.
490 break;
491 }
492 GrGpuResource* resource = fPurgeableQueue.peek();
493 SkASSERT(resource->isPurgeable());
494 resource->cacheAccess().release();
495 }
496 }
497 }
498
499 bool stillOverbudget = this->overBudget();
500 while (stillOverbudget && fPurgeableQueue.count()) {
501 GrGpuResource* resource = fPurgeableQueue.peek();
502 SkASSERT(resource->isPurgeable());
503 resource->cacheAccess().release();
504 stillOverbudget = this->overBudget();
505 }
506
507 this->validate();
508
509 if (stillOverbudget) {
510 // Set this so that GrDrawingManager will issue a flush to free up resources with pending
511 // IO that we were unable to purge in this pass.
512 fRequestFlush = true;
513 }
514 }
515
purgeAllUnlocked()516 void GrResourceCache::purgeAllUnlocked() {
517 // We could disable maintaining the heap property here, but it would add a lot of complexity.
518 // Moreover, this is rarely called.
519 while (fPurgeableQueue.count()) {
520 GrGpuResource* resource = fPurgeableQueue.peek();
521 SkASSERT(resource->isPurgeable());
522 resource->cacheAccess().release();
523 }
524
525 this->validate();
526 }
527
purgeResourcesNotUsedSince(GrStdSteadyClock::time_point purgeTime)528 void GrResourceCache::purgeResourcesNotUsedSince(GrStdSteadyClock::time_point purgeTime) {
529 while (fPurgeableQueue.count()) {
530 const GrStdSteadyClock::time_point resourceTime =
531 fPurgeableQueue.peek()->cacheAccess().timeWhenResourceBecamePurgeable();
532 if (resourceTime >= purgeTime) {
533 // Resources were given both LRU timestamps and tagged with a frame number when
534 // they first became purgeable. The LRU timestamp won't change again until the
535 // resource is made non-purgeable again. So, at this point all the remaining
536 // resources in the timestamp-sorted queue will have a frame number >= to this
537 // one.
538 break;
539 }
540 GrGpuResource* resource = fPurgeableQueue.peek();
541 SkASSERT(resource->isPurgeable());
542 resource->cacheAccess().release();
543 }
544 }
545
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)546 void GrResourceCache::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
547
548 const size_t tmpByteBudget = SkTMax((size_t)0, fBytes - bytesToPurge);
549 bool stillOverbudget = tmpByteBudget < fBytes;
550
551 if (preferScratchResources && bytesToPurge < fPurgeableBytes) {
552 // Sort the queue
553 fPurgeableQueue.sort();
554
555 // Make a list of the scratch resources to delete
556 SkTDArray<GrGpuResource*> scratchResources;
557 size_t scratchByteCount = 0;
558 for (int i = 0; i < fPurgeableQueue.count() && stillOverbudget; i++) {
559 GrGpuResource* resource = fPurgeableQueue.at(i);
560 SkASSERT(resource->isPurgeable());
561 if (!resource->getUniqueKey().isValid()) {
562 *scratchResources.append() = resource;
563 scratchByteCount += resource->gpuMemorySize();
564 stillOverbudget = tmpByteBudget < fBytes - scratchByteCount;
565 }
566 }
567
568 // Delete the scratch resources. This must be done as a separate pass
569 // to avoid messing up the sorted order of the queue
570 for (int i = 0; i < scratchResources.count(); i++) {
571 scratchResources.getAt(i)->cacheAccess().release();
572 }
573 stillOverbudget = tmpByteBudget < fBytes;
574
575 this->validate();
576 }
577
578 // Purge any remaining resources in LRU order
579 if (stillOverbudget) {
580 const size_t cachedByteCount = fMaxBytes;
581 fMaxBytes = tmpByteBudget;
582 this->purgeAsNeeded();
583 fMaxBytes = cachedByteCount;
584 }
585 }
586
processInvalidUniqueKeys(const SkTArray<GrUniqueKeyInvalidatedMessage> & msgs)587 void GrResourceCache::processInvalidUniqueKeys(
588 const SkTArray<GrUniqueKeyInvalidatedMessage>& msgs) {
589 SkASSERT(fProxyProvider); // better have called setProxyProvider
590
591 for (int i = 0; i < msgs.count(); ++i) {
592 fProxyProvider->processInvalidProxyUniqueKey(msgs[i].key());
593
594 GrGpuResource* resource = this->findAndRefUniqueResource(msgs[i].key());
595 if (resource) {
596 resource->resourcePriv().removeUniqueKey();
597 resource->unref(); // If this resource is now purgeable, the cache will be notified.
598 }
599 }
600 }
601
insertCrossContextGpuResource(GrGpuResource * resource)602 void GrResourceCache::insertCrossContextGpuResource(GrGpuResource* resource) {
603 resource->ref();
604 }
605
processFreedGpuResources()606 void GrResourceCache::processFreedGpuResources() {
607 SkTArray<GrGpuResourceFreedMessage> msgs;
608 fFreedGpuResourceInbox.poll(&msgs);
609 for (int i = 0; i < msgs.count(); ++i) {
610 if (msgs[i].fOwningUniqueID == fContextUniqueID) {
611 msgs[i].fResource->unref();
612 }
613 }
614 }
615
addToNonpurgeableArray(GrGpuResource * resource)616 void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) {
617 int index = fNonpurgeableResources.count();
618 *fNonpurgeableResources.append() = resource;
619 *resource->cacheAccess().accessCacheIndex() = index;
620 }
621
removeFromNonpurgeableArray(GrGpuResource * resource)622 void GrResourceCache::removeFromNonpurgeableArray(GrGpuResource* resource) {
623 int* index = resource->cacheAccess().accessCacheIndex();
624 // Fill the whole we will create in the array with the tail object, adjust its index, and
625 // then pop the array
626 GrGpuResource* tail = *(fNonpurgeableResources.end() - 1);
627 SkASSERT(fNonpurgeableResources[*index] == resource);
628 fNonpurgeableResources[*index] = tail;
629 *tail->cacheAccess().accessCacheIndex() = *index;
630 fNonpurgeableResources.pop();
631 SkDEBUGCODE(*index = -1);
632 }
633
getNextTimestamp()634 uint32_t GrResourceCache::getNextTimestamp() {
635 // If we wrap then all the existing resources will appear older than any resources that get
636 // a timestamp after the wrap.
637 if (0 == fTimestamp) {
638 int count = this->getResourceCount();
639 if (count) {
640 // Reset all the timestamps. We sort the resources by timestamp and then assign
641 // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
642 // rare.
643 SkTDArray<GrGpuResource*> sortedPurgeableResources;
644 sortedPurgeableResources.setReserve(fPurgeableQueue.count());
645
646 while (fPurgeableQueue.count()) {
647 *sortedPurgeableResources.append() = fPurgeableQueue.peek();
648 fPurgeableQueue.pop();
649 }
650
651 SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end() - 1,
652 CompareTimestamp);
653
654 // Pick resources out of the purgeable and non-purgeable arrays based on lowest
655 // timestamp and assign new timestamps.
656 int currP = 0;
657 int currNP = 0;
658 while (currP < sortedPurgeableResources.count() &&
659 currNP < fNonpurgeableResources.count()) {
660 uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp();
661 uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp();
662 SkASSERT(tsP != tsNP);
663 if (tsP < tsNP) {
664 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
665 } else {
666 // Correct the index in the nonpurgeable array stored on the resource post-sort.
667 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
668 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
669 }
670 }
671
672 // The above loop ended when we hit the end of one array. Finish the other one.
673 while (currP < sortedPurgeableResources.count()) {
674 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
675 }
676 while (currNP < fNonpurgeableResources.count()) {
677 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
678 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
679 }
680
681 // Rebuild the queue.
682 for (int i = 0; i < sortedPurgeableResources.count(); ++i) {
683 fPurgeableQueue.insert(sortedPurgeableResources[i]);
684 }
685
686 this->validate();
687 SkASSERT(count == this->getResourceCount());
688
689 // count should be the next timestamp we return.
690 SkASSERT(fTimestamp == SkToU32(count));
691 }
692 }
693 return fTimestamp++;
694 }
695
notifyFlushOccurred(FlushType type)696 void GrResourceCache::notifyFlushOccurred(FlushType type) {
697 switch (type) {
698 case FlushType::kCacheRequested:
699 SkASSERT(fRequestFlush);
700 fRequestFlush = false;
701 break;
702 case FlushType::kExternal:
703 ++fExternalFlushCnt;
704 if (0 == fExternalFlushCnt) {
705 // When this wraps just reset all the purgeable resources' last used flush state.
706 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
707 fPurgeableQueue.at(i)->cacheAccess().setFlushCntWhenResourceBecamePurgeable(0);
708 }
709 }
710 break;
711 }
712 this->purgeAsNeeded();
713 }
714
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const715 void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
716 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
717 fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
718 }
719 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
720 fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
721 }
722 }
723
724 #ifdef SK_DEBUG
validate() const725 void GrResourceCache::validate() const {
726 // Reduce the frequency of validations for large resource counts.
727 static SkRandom gRandom;
728 int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
729 if (~mask && (gRandom.nextU() & mask)) {
730 return;
731 }
732
733 struct Stats {
734 size_t fBytes;
735 int fBudgetedCount;
736 size_t fBudgetedBytes;
737 int fLocked;
738 int fScratch;
739 int fCouldBeScratch;
740 int fContent;
741 const ScratchMap* fScratchMap;
742 const UniqueHash* fUniqueHash;
743
744 Stats(const GrResourceCache* cache) {
745 memset(this, 0, sizeof(*this));
746 fScratchMap = &cache->fScratchMap;
747 fUniqueHash = &cache->fUniqueHash;
748 }
749
750 void update(GrGpuResource* resource) {
751 fBytes += resource->gpuMemorySize();
752
753 if (!resource->isPurgeable()) {
754 ++fLocked;
755 }
756
757 const GrScratchKey& scratchKey = resource->resourcePriv().getScratchKey();
758 const GrUniqueKey& uniqueKey = resource->getUniqueKey();
759
760 if (resource->cacheAccess().isScratch()) {
761 SkASSERT(!uniqueKey.isValid());
762 ++fScratch;
763 SkASSERT(fScratchMap->countForKey(scratchKey));
764 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
765 } else if (scratchKey.isValid()) {
766 SkASSERT(SkBudgeted::kNo == resource->resourcePriv().isBudgeted() ||
767 uniqueKey.isValid());
768 if (!uniqueKey.isValid()) {
769 ++fCouldBeScratch;
770 SkASSERT(fScratchMap->countForKey(scratchKey));
771 }
772 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
773 }
774 if (uniqueKey.isValid()) {
775 ++fContent;
776 SkASSERT(fUniqueHash->find(uniqueKey) == resource);
777 SkASSERT(SkBudgeted::kYes == resource->resourcePriv().isBudgeted() ||
778 resource->resourcePriv().refsWrappedObjects());
779
780 if (scratchKey.isValid()) {
781 SkASSERT(!fScratchMap->has(resource, scratchKey));
782 }
783 }
784
785 if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
786 ++fBudgetedCount;
787 fBudgetedBytes += resource->gpuMemorySize();
788 }
789 }
790 };
791
792 {
793 ScratchMap::ConstIter iter(&fScratchMap);
794
795 int count = 0;
796 for ( ; !iter.done(); ++iter) {
797 const GrGpuResource* resource = *iter;
798 SkASSERT(resource->resourcePriv().getScratchKey().isValid());
799 SkASSERT(!resource->getUniqueKey().isValid());
800 count++;
801 }
802 SkASSERT(count == fScratchMap.count()); // ensure the iterator is working correctly
803 }
804
805 Stats stats(this);
806 size_t purgeableBytes = 0;
807
808 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
809 SkASSERT(!fNonpurgeableResources[i]->isPurgeable() ||
810 fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]);
811 SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i);
812 SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
813 stats.update(fNonpurgeableResources[i]);
814 }
815 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
816 SkASSERT(fPurgeableQueue.at(i)->isPurgeable());
817 SkASSERT(*fPurgeableQueue.at(i)->cacheAccess().accessCacheIndex() == i);
818 SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
819 stats.update(fPurgeableQueue.at(i));
820 purgeableBytes += fPurgeableQueue.at(i)->gpuMemorySize();
821 }
822
823 SkASSERT(fCount == this->getResourceCount());
824 SkASSERT(fBudgetedCount <= fCount);
825 SkASSERT(fBudgetedBytes <= fBytes);
826 SkASSERT(stats.fBytes == fBytes);
827 SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
828 SkASSERT(stats.fBudgetedCount == fBudgetedCount);
829 SkASSERT(purgeableBytes == fPurgeableBytes);
830 #if GR_CACHE_STATS
831 SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount);
832 SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes);
833 SkASSERT(fBytes <= fHighWaterBytes);
834 SkASSERT(fCount <= fHighWaterCount);
835 SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes);
836 SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount);
837 #endif
838 SkASSERT(stats.fContent == fUniqueHash.count());
839 SkASSERT(stats.fScratch + stats.fCouldBeScratch == fScratchMap.count());
840
841 // This assertion is not currently valid because we can be in recursive notifyCntReachedZero()
842 // calls. This will be fixed when subresource registration is explicit.
843 // bool overBudget = budgetedBytes > fMaxBytes || budgetedCount > fMaxCount;
844 // SkASSERT(!overBudget || locked == count || fPurging);
845 }
846
isInCache(const GrGpuResource * resource) const847 bool GrResourceCache::isInCache(const GrGpuResource* resource) const {
848 int index = *resource->cacheAccess().accessCacheIndex();
849 if (index < 0) {
850 return false;
851 }
852 if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
853 return true;
854 }
855 if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) {
856 return true;
857 }
858 SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
859 return false;
860 }
861
862 #endif
863