• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/GrResourceCache.h"
9 #include <atomic>
10 #include <vector>
11 #include "include/gpu/GrDirectContext.h"
12 #include "include/private/SingleOwner.h"
13 #include "include/private/SkTo.h"
14 #include "include/utils/SkRandom.h"
15 #include "src/core/SkMessageBus.h"
16 #include "src/core/SkOpts.h"
17 #include "src/core/SkScopeExit.h"
18 #include "src/core/SkTSort.h"
19 #include "src/gpu/GrCaps.h"
20 #include "src/gpu/GrDirectContextPriv.h"
21 #include "src/gpu/GrGpuResourceCacheAccess.h"
22 #include "src/gpu/GrProxyProvider.h"
23 #include "src/gpu/GrTexture.h"
24 #include "src/gpu/GrTextureProxyCacheAccess.h"
25 #include "src/gpu/GrThreadSafeCache.h"
26 #include "src/gpu/GrTracing.h"
27 #include "src/gpu/SkGr.h"
28 
29 DECLARE_SKMESSAGEBUS_MESSAGE(skgpu::UniqueKeyInvalidatedMessage, uint32_t, true);
30 
31 DECLARE_SKMESSAGEBUS_MESSAGE(GrTextureFreedMessage, GrDirectContext::DirectContextID, true);
32 
33 #define ASSERT_SINGLE_OWNER SKGPU_ASSERT_SINGLE_OWNER(fSingleOwner)
34 
35 //////////////////////////////////////////////////////////////////////////////
36 
37 class GrResourceCache::AutoValidate : ::SkNoncopyable {
38 public:
AutoValidate(GrResourceCache * cache)39     AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); }
~AutoValidate()40     ~AutoValidate() { fCache->validate(); }
41 private:
42     GrResourceCache* fCache;
43 };
44 
45 //////////////////////////////////////////////////////////////////////////////
46 
47 inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref() = default;
48 
TextureAwaitingUnref(GrTexture * texture)49 inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(GrTexture* texture)
50         : fTexture(texture), fNumUnrefs(1) {}
51 
TextureAwaitingUnref(TextureAwaitingUnref && that)52 inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(TextureAwaitingUnref&& that) {
53     fTexture = std::exchange(that.fTexture, nullptr);
54     fNumUnrefs = std::exchange(that.fNumUnrefs, 0);
55 }
56 
operator =(TextureAwaitingUnref && that)57 inline GrResourceCache::TextureAwaitingUnref& GrResourceCache::TextureAwaitingUnref::operator=(
58         TextureAwaitingUnref&& that) {
59     fTexture = std::exchange(that.fTexture, nullptr);
60     fNumUnrefs = std::exchange(that.fNumUnrefs, 0);
61     return *this;
62 }
63 
~TextureAwaitingUnref()64 inline GrResourceCache::TextureAwaitingUnref::~TextureAwaitingUnref() {
65     if (fTexture) {
66         for (int i = 0; i < fNumUnrefs; ++i) {
67             fTexture->unref();
68         }
69     }
70 }
71 
addRef()72 inline void GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref::addRef() { ++fNumUnrefs; }
73 
unref()74 inline void GrResourceCache::TextureAwaitingUnref::unref() {
75     SkASSERT(fNumUnrefs > 0);
76     fTexture->unref();
77     --fNumUnrefs;
78 }
79 
finished()80 inline bool GrResourceCache::TextureAwaitingUnref::finished() { return !fNumUnrefs; }
81 
82 //////////////////////////////////////////////////////////////////////////////
83 
GrResourceCache(skgpu::SingleOwner * singleOwner,GrDirectContext::DirectContextID owningContextID,uint32_t familyID)84 GrResourceCache::GrResourceCache(skgpu::SingleOwner* singleOwner,
85                                  GrDirectContext::DirectContextID owningContextID,
86                                  uint32_t familyID)
87         : fInvalidUniqueKeyInbox(familyID)
88         , fFreedTextureInbox(owningContextID)
89         , fOwningContextID(owningContextID)
90         , fContextUniqueID(familyID)
91         , fSingleOwner(singleOwner) {
92     SkASSERT(owningContextID.isValid());
93     SkASSERT(familyID != SK_InvalidUniqueID);
94 }
95 
~GrResourceCache()96 GrResourceCache::~GrResourceCache() {
97     this->releaseAll();
98 }
99 
setLimit(size_t bytes)100 void GrResourceCache::setLimit(size_t bytes) {
101     fMaxBytes = bytes;
102     this->purgeAsNeeded();
103 }
104 
insertResource(GrGpuResource * resource)105 void GrResourceCache::insertResource(GrGpuResource* resource) {
106     ASSERT_SINGLE_OWNER
107     SkASSERT(resource);
108     SkASSERT(!this->isInCache(resource));
109     SkASSERT(!resource->wasDestroyed());
110     SkASSERT(!resource->resourcePriv().isPurgeable());
111 
112     // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
113     // up iterating over all the resources that already have timestamps.
114     resource->cacheAccess().setTimestamp(this->getNextTimestamp());
115 
116     this->addToNonpurgeableArray(resource);
117 
118     size_t size = resource->gpuMemorySize();
119     SkDEBUGCODE(++fCount;)
120     fBytes += size;
121 #if GR_CACHE_STATS
122     fHighWaterCount = std::max(this->getResourceCount(), fHighWaterCount);
123     fHighWaterBytes = std::max(fBytes, fHighWaterBytes);
124 #endif
125     if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
126         ++fBudgetedCount;
127         fBudgetedBytes += size;
128         TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
129                        fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
130 #if GR_CACHE_STATS
131         fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
132         fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
133 #endif
134     }
135     SkASSERT(!resource->cacheAccess().isUsableAsScratch());
136     this->purgeAsNeeded();
137 }
138 
removeResource(GrGpuResource * resource)139 void GrResourceCache::removeResource(GrGpuResource* resource) {
140     ASSERT_SINGLE_OWNER
141     this->validate();
142     SkASSERT(this->isInCache(resource));
143 
144     size_t size = resource->gpuMemorySize();
145     if (resource->resourcePriv().isPurgeable()) {
146         fPurgeableQueue.remove(resource);
147         fPurgeableBytes -= size;
148     } else {
149         this->removeFromNonpurgeableArray(resource);
150     }
151 
152     SkDEBUGCODE(--fCount;)
153     fBytes -= size;
154     if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
155         --fBudgetedCount;
156         fBudgetedBytes -= size;
157         TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
158                        fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
159     }
160 
161     if (resource->cacheAccess().isUsableAsScratch()) {
162         fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
163     }
164     if (resource->getUniqueKey().isValid()) {
165         fUniqueHash.remove(resource->getUniqueKey());
166     }
167     this->validate();
168 }
169 
abandonAll()170 void GrResourceCache::abandonAll() {
171     AutoValidate av(this);
172 
173     // We need to make sure to free any resources that were waiting on a free message but never
174     // received one.
175     fTexturesAwaitingUnref.reset();
176 
177     while (fNonpurgeableResources.count()) {
178         GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
179         SkASSERT(!back->wasDestroyed());
180         back->cacheAccess().abandon();
181     }
182 
183     while (fPurgeableQueue.count()) {
184         GrGpuResource* top = fPurgeableQueue.peek();
185         SkASSERT(!top->wasDestroyed());
186         top->cacheAccess().abandon();
187     }
188 
189     fThreadSafeCache->dropAllRefs();
190 
191     SkASSERT(!fScratchMap.count());
192     SkASSERT(!fUniqueHash.count());
193     SkASSERT(!fCount);
194     SkASSERT(!this->getResourceCount());
195     SkASSERT(!fBytes);
196     SkASSERT(!fBudgetedCount);
197     SkASSERT(!fBudgetedBytes);
198     SkASSERT(!fPurgeableBytes);
199     SkASSERT(!fTexturesAwaitingUnref.count());
200 }
201 
releaseAll()202 void GrResourceCache::releaseAll() {
203     AutoValidate av(this);
204 
205     fThreadSafeCache->dropAllRefs();
206 
207     this->processFreedGpuResources();
208 
209     // We need to make sure to free any resources that were waiting on a free message but never
210     // received one.
211     fTexturesAwaitingUnref.reset();
212 
213     SkASSERT(fProxyProvider); // better have called setProxyProvider
214     SkASSERT(fThreadSafeCache); // better have called setThreadSafeCache too
215 
216     // We must remove the uniqueKeys from the proxies here. While they possess a uniqueKey
217     // they also have a raw pointer back to this class (which is presumably going away)!
218     fProxyProvider->removeAllUniqueKeys();
219 
220     while (fNonpurgeableResources.count()) {
221         GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
222         SkASSERT(!back->wasDestroyed());
223         back->cacheAccess().release();
224     }
225 
226     while (fPurgeableQueue.count()) {
227         GrGpuResource* top = fPurgeableQueue.peek();
228         SkASSERT(!top->wasDestroyed());
229         top->cacheAccess().release();
230     }
231 
232     SkASSERT(!fScratchMap.count());
233     SkASSERT(!fUniqueHash.count());
234     SkASSERT(!fCount);
235     SkASSERT(!this->getResourceCount());
236     SkASSERT(!fBytes);
237     SkASSERT(!fBudgetedCount);
238     SkASSERT(!fBudgetedBytes);
239     SkASSERT(!fPurgeableBytes);
240     SkASSERT(!fTexturesAwaitingUnref.count());
241 }
242 
refResource(GrGpuResource * resource)243 void GrResourceCache::refResource(GrGpuResource* resource) {
244     SkASSERT(resource);
245     SkASSERT(resource->getContext()->priv().getResourceCache() == this);
246     if (resource->cacheAccess().hasRef()) {
247         resource->ref();
248     } else {
249         this->refAndMakeResourceMRU(resource);
250     }
251     this->validate();
252 }
253 
254 class GrResourceCache::AvailableForScratchUse {
255 public:
AvailableForScratchUse()256     AvailableForScratchUse() { }
257 
operator ()(const GrGpuResource * resource) const258     bool operator()(const GrGpuResource* resource) const {
259         // Everything that is in the scratch map should be usable as a
260         // scratch resource.
261         return true;
262     }
263 };
264 
findAndRefScratchResource(const skgpu::ScratchKey & scratchKey)265 GrGpuResource* GrResourceCache::findAndRefScratchResource(const skgpu::ScratchKey& scratchKey) {
266     SkASSERT(scratchKey.isValid());
267 
268     GrGpuResource* resource = fScratchMap.find(scratchKey, AvailableForScratchUse());
269     if (resource) {
270         fScratchMap.remove(scratchKey, resource);
271         this->refAndMakeResourceMRU(resource);
272         this->validate();
273     }
274     return resource;
275 }
276 
willRemoveScratchKey(const GrGpuResource * resource)277 void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) {
278     ASSERT_SINGLE_OWNER
279     SkASSERT(resource->resourcePriv().getScratchKey().isValid());
280     if (resource->cacheAccess().isUsableAsScratch()) {
281         fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
282     }
283 }
284 
removeUniqueKey(GrGpuResource * resource)285 void GrResourceCache::removeUniqueKey(GrGpuResource* resource) {
286     ASSERT_SINGLE_OWNER
287     // Someone has a ref to this resource in order to have removed the key. When the ref count
288     // reaches zero we will get a ref cnt notification and figure out what to do with it.
289     if (resource->getUniqueKey().isValid()) {
290         SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
291         fUniqueHash.remove(resource->getUniqueKey());
292     }
293     resource->cacheAccess().removeUniqueKey();
294     if (resource->cacheAccess().isUsableAsScratch()) {
295         fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
296     }
297 
298     // Removing a unique key from a kUnbudgetedCacheable resource would make the resource
299     // require purging. However, the resource must be ref'ed to get here and therefore can't
300     // be purgeable. We'll purge it when the refs reach zero.
301     SkASSERT(!resource->resourcePriv().isPurgeable());
302     this->validate();
303 }
304 
changeUniqueKey(GrGpuResource * resource,const skgpu::UniqueKey & newKey)305 void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const skgpu::UniqueKey& newKey) {
306     ASSERT_SINGLE_OWNER
307     SkASSERT(resource);
308     SkASSERT(this->isInCache(resource));
309 
310     // If another resource has the new key, remove its key then install the key on this resource.
311     if (newKey.isValid()) {
312         if (GrGpuResource* old = fUniqueHash.find(newKey)) {
313             // If the old resource using the key is purgeable and is unreachable, then remove it.
314             if (!old->resourcePriv().getScratchKey().isValid() &&
315                 old->resourcePriv().isPurgeable()) {
316                 old->cacheAccess().release();
317             } else {
318                 // removeUniqueKey expects an external owner of the resource.
319                 this->removeUniqueKey(sk_ref_sp(old).get());
320             }
321         }
322         SkASSERT(nullptr == fUniqueHash.find(newKey));
323 
324         // Remove the entry for this resource if it already has a unique key.
325         if (resource->getUniqueKey().isValid()) {
326             SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
327             fUniqueHash.remove(resource->getUniqueKey());
328             SkASSERT(nullptr == fUniqueHash.find(resource->getUniqueKey()));
329         } else {
330             // 'resource' didn't have a valid unique key before so it is switching sides. Remove it
331             // from the ScratchMap. The isUsableAsScratch call depends on us not adding the new
332             // unique key until after this check.
333             if (resource->cacheAccess().isUsableAsScratch()) {
334                 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
335             }
336         }
337 
338         resource->cacheAccess().setUniqueKey(newKey);
339         fUniqueHash.add(resource);
340     } else {
341         this->removeUniqueKey(resource);
342     }
343 
344     this->validate();
345 }
346 
refAndMakeResourceMRU(GrGpuResource * resource)347 void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) {
348     ASSERT_SINGLE_OWNER
349     SkASSERT(resource);
350     SkASSERT(this->isInCache(resource));
351 
352     if (resource->resourcePriv().isPurgeable()) {
353         // It's about to become unpurgeable.
354         fPurgeableBytes -= resource->gpuMemorySize();
355         fPurgeableQueue.remove(resource);
356         this->addToNonpurgeableArray(resource);
357     } else if (!resource->cacheAccess().hasRefOrCommandBufferUsage() &&
358                resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
359         SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable > 0);
360         fNumBudgetedResourcesFlushWillMakePurgeable--;
361     }
362     resource->cacheAccess().ref();
363 
364     resource->cacheAccess().setTimestamp(this->getNextTimestamp());
365     this->validate();
366 }
367 
notifyARefCntReachedZero(GrGpuResource * resource,GrGpuResource::LastRemovedRef removedRef)368 void GrResourceCache::notifyARefCntReachedZero(GrGpuResource* resource,
369                                                GrGpuResource::LastRemovedRef removedRef) {
370     ASSERT_SINGLE_OWNER
371     SkASSERT(resource);
372     SkASSERT(!resource->wasDestroyed());
373     SkASSERT(this->isInCache(resource));
374     // This resource should always be in the nonpurgeable array when this function is called. It
375     // will be moved to the queue if it is newly purgeable.
376     SkASSERT(fNonpurgeableResources[*resource->cacheAccess().accessCacheIndex()] == resource);
377 
378     if (removedRef == GrGpuResource::LastRemovedRef::kMainRef) {
379         if (resource->cacheAccess().isUsableAsScratch()) {
380             fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
381         }
382     }
383 
384     if (resource->cacheAccess().hasRefOrCommandBufferUsage()) {
385         this->validate();
386         return;
387     }
388 
389 #ifdef SK_DEBUG
390     // When the timestamp overflows validate() is called. validate() checks that resources in
391     // the nonpurgeable array are indeed not purgeable. However, the movement from the array to
392     // the purgeable queue happens just below in this function. So we mark it as an exception.
393     if (resource->resourcePriv().isPurgeable()) {
394         fNewlyPurgeableResourceForValidation = resource;
395     }
396 #endif
397     resource->cacheAccess().setTimestamp(this->getNextTimestamp());
398     SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr);
399 
400     if (!resource->resourcePriv().isPurgeable() &&
401         resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
402         ++fNumBudgetedResourcesFlushWillMakePurgeable;
403     }
404 
405     if (!resource->resourcePriv().isPurgeable()) {
406         this->validate();
407         return;
408     }
409 
410     this->removeFromNonpurgeableArray(resource);
411     fPurgeableQueue.insert(resource);
412     resource->cacheAccess().setTimeWhenResourceBecomePurgeable();
413     fPurgeableBytes += resource->gpuMemorySize();
414 
415     bool hasUniqueKey = resource->getUniqueKey().isValid();
416 
417     GrBudgetedType budgetedType = resource->resourcePriv().budgetedType();
418 
419     if (budgetedType == GrBudgetedType::kBudgeted) {
420         // Purge the resource immediately if we're over budget
421         // Also purge if the resource has neither a valid scratch key nor a unique key.
422         bool hasKey = resource->resourcePriv().getScratchKey().isValid() || hasUniqueKey;
423         if (!this->overBudget() && hasKey) {
424             return;
425         }
426     } else {
427         // We keep unbudgeted resources with a unique key in the purgeable queue of the cache so
428         // they can be reused again by the image connected to the unique key.
429         if (hasUniqueKey && budgetedType == GrBudgetedType::kUnbudgetedCacheable) {
430             return;
431         }
432         // Check whether this resource could still be used as a scratch resource.
433         if (!resource->resourcePriv().refsWrappedObjects() &&
434             resource->resourcePriv().getScratchKey().isValid()) {
435             // We won't purge an existing resource to make room for this one.
436             if (this->wouldFit(resource->gpuMemorySize())) {
437                 resource->resourcePriv().makeBudgeted();
438                 return;
439             }
440         }
441     }
442 
443     SkDEBUGCODE(int beforeCount = this->getResourceCount();)
444     resource->cacheAccess().release();
445     // We should at least free this resource, perhaps dependent resources as well.
446     SkASSERT(this->getResourceCount() < beforeCount);
447     this->validate();
448 }
449 
didChangeBudgetStatus(GrGpuResource * resource)450 void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
451     ASSERT_SINGLE_OWNER
452     SkASSERT(resource);
453     SkASSERT(this->isInCache(resource));
454 
455     size_t size = resource->gpuMemorySize();
456     // Changing from BudgetedType::kUnbudgetedCacheable to another budgeted type could make
457     // resource become purgeable. However, we should never allow that transition. Wrapped
458     // resources are the only resources that can be in that state and they aren't allowed to
459     // transition from one budgeted state to another.
460     SkDEBUGCODE(bool wasPurgeable = resource->resourcePriv().isPurgeable());
461     if (resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
462         ++fBudgetedCount;
463         fBudgetedBytes += size;
464 #if GR_CACHE_STATS
465         fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
466         fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
467 #endif
468         if (!resource->resourcePriv().isPurgeable() &&
469             !resource->cacheAccess().hasRefOrCommandBufferUsage()) {
470             ++fNumBudgetedResourcesFlushWillMakePurgeable;
471         }
472         if (resource->cacheAccess().isUsableAsScratch()) {
473             fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
474         }
475         this->purgeAsNeeded();
476     } else {
477         SkASSERT(resource->resourcePriv().budgetedType() != GrBudgetedType::kUnbudgetedCacheable);
478         --fBudgetedCount;
479         fBudgetedBytes -= size;
480         if (!resource->resourcePriv().isPurgeable() &&
481             !resource->cacheAccess().hasRefOrCommandBufferUsage()) {
482             --fNumBudgetedResourcesFlushWillMakePurgeable;
483         }
484         if (!resource->cacheAccess().hasRef() && !resource->getUniqueKey().isValid() &&
485             resource->resourcePriv().getScratchKey().isValid()) {
486             fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
487         }
488     }
489     SkASSERT(wasPurgeable == resource->resourcePriv().isPurgeable());
490     TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
491                    fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
492 
493     this->validate();
494 }
495 
purgeAsNeeded()496 void GrResourceCache::purgeAsNeeded() {
497     SkTArray<skgpu::UniqueKeyInvalidatedMessage> invalidKeyMsgs;
498     fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs);
499     if (invalidKeyMsgs.count()) {
500         SkASSERT(fProxyProvider);
501 
502         for (int i = 0; i < invalidKeyMsgs.count(); ++i) {
503             if (invalidKeyMsgs[i].inThreadSafeCache()) {
504                 fThreadSafeCache->remove(invalidKeyMsgs[i].key());
505                 SkASSERT(!fThreadSafeCache->has(invalidKeyMsgs[i].key()));
506             } else {
507                 fProxyProvider->processInvalidUniqueKey(
508                                                     invalidKeyMsgs[i].key(), nullptr,
509                                                     GrProxyProvider::InvalidateGPUResource::kYes);
510                 SkASSERT(!this->findAndRefUniqueResource(invalidKeyMsgs[i].key()));
511             }
512         }
513     }
514 
515     this->processFreedGpuResources();
516 
517     bool stillOverbudget = this->overBudget();
518     while (stillOverbudget && fPurgeableQueue.count()) {
519         GrGpuResource* resource = fPurgeableQueue.peek();
520         SkASSERT(resource->resourcePriv().isPurgeable());
521         resource->cacheAccess().release();
522         stillOverbudget = this->overBudget();
523     }
524 
525     if (stillOverbudget) {
526         fThreadSafeCache->dropUniqueRefs(this);
527 
528         stillOverbudget = this->overBudget();
529         while (stillOverbudget && fPurgeableQueue.count()) {
530             GrGpuResource* resource = fPurgeableQueue.peek();
531             SkASSERT(resource->resourcePriv().isPurgeable());
532             resource->cacheAccess().release();
533             stillOverbudget = this->overBudget();
534         }
535     }
536 
537     this->validate();
538 }
539 
purgeUnlockedResources(const GrStdSteadyClock::time_point * purgeTime,bool scratchResourcesOnly)540 void GrResourceCache::purgeUnlockedResources(const GrStdSteadyClock::time_point* purgeTime,
541                                              bool scratchResourcesOnly) {
542 
543     if (!scratchResourcesOnly) {
544         if (purgeTime) {
545             fThreadSafeCache->dropUniqueRefsOlderThan(*purgeTime);
546         } else {
547             fThreadSafeCache->dropUniqueRefs(nullptr);
548         }
549 
550         // We could disable maintaining the heap property here, but it would add a lot of
551         // complexity. Moreover, this is rarely called.
552         while (fPurgeableQueue.count()) {
553             GrGpuResource* resource = fPurgeableQueue.peek();
554 
555             const GrStdSteadyClock::time_point resourceTime =
556                     resource->cacheAccess().timeWhenResourceBecamePurgeable();
557             if (purgeTime && resourceTime >= *purgeTime) {
558                 // Resources were given both LRU timestamps and tagged with a frame number when
559                 // they first became purgeable. The LRU timestamp won't change again until the
560                 // resource is made non-purgeable again. So, at this point all the remaining
561                 // resources in the timestamp-sorted queue will have a frame number >= to this
562                 // one.
563                 break;
564             }
565 
566             SkASSERT(resource->resourcePriv().isPurgeable());
567             resource->cacheAccess().release();
568         }
569     } else {
570         // Early out if the very first item is too new to purge to avoid sorting the queue when
571         // nothing will be deleted.
572         if (purgeTime && fPurgeableQueue.count() &&
573             fPurgeableQueue.peek()->cacheAccess().timeWhenResourceBecamePurgeable() >= *purgeTime) {
574             return;
575         }
576 
577         // Sort the queue
578         fPurgeableQueue.sort();
579 
580         // Make a list of the scratch resources to delete
581         SkTDArray<GrGpuResource*> scratchResources;
582         for (int i = 0; i < fPurgeableQueue.count(); i++) {
583             GrGpuResource* resource = fPurgeableQueue.at(i);
584 
585             const GrStdSteadyClock::time_point resourceTime =
586                     resource->cacheAccess().timeWhenResourceBecamePurgeable();
587             if (purgeTime && resourceTime >= *purgeTime) {
588                 // scratch or not, all later iterations will be too recently used to purge.
589                 break;
590             }
591             SkASSERT(resource->resourcePriv().isPurgeable());
592             if (!resource->getUniqueKey().isValid()) {
593                 *scratchResources.append() = resource;
594             }
595         }
596 
597         // Delete the scratch resources. This must be done as a separate pass
598         // to avoid messing up the sorted order of the queue
599         for (int i = 0; i < scratchResources.count(); i++) {
600             scratchResources.getAt(i)->cacheAccess().release();
601         }
602     }
603 
604     this->validate();
605 }
606 
purgeToMakeHeadroom(size_t desiredHeadroomBytes)607 bool GrResourceCache::purgeToMakeHeadroom(size_t desiredHeadroomBytes) {
608     AutoValidate av(this);
609     if (desiredHeadroomBytes > fMaxBytes) {
610         return false;
611     }
612     if (this->wouldFit(desiredHeadroomBytes)) {
613         return true;
614     }
615     fPurgeableQueue.sort();
616 
617     size_t projectedBudget = fBudgetedBytes;
618     int purgeCnt = 0;
619     for (int i = 0; i < fPurgeableQueue.count(); i++) {
620         GrGpuResource* resource = fPurgeableQueue.at(i);
621         if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
622             projectedBudget -= resource->gpuMemorySize();
623         }
624         if (projectedBudget + desiredHeadroomBytes <= fMaxBytes) {
625             purgeCnt = i + 1;
626             break;
627         }
628     }
629     if (purgeCnt == 0) {
630         return false;
631     }
632 
633     // Success! Release the resources.
634     // Copy to array first so we don't mess with the queue.
635     std::vector<GrGpuResource*> resources;
636     resources.reserve(purgeCnt);
637     for (int i = 0; i < purgeCnt; i++) {
638         resources.push_back(fPurgeableQueue.at(i));
639     }
640     for (GrGpuResource* resource : resources) {
641         resource->cacheAccess().release();
642     }
643     return true;
644 }
645 
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)646 void GrResourceCache::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
647 
648     const size_t tmpByteBudget = std::max((size_t)0, fBytes - bytesToPurge);
649     bool stillOverbudget = tmpByteBudget < fBytes;
650 
651     if (preferScratchResources && bytesToPurge < fPurgeableBytes) {
652         // Sort the queue
653         fPurgeableQueue.sort();
654 
655         // Make a list of the scratch resources to delete
656         SkTDArray<GrGpuResource*> scratchResources;
657         size_t scratchByteCount = 0;
658         for (int i = 0; i < fPurgeableQueue.count() && stillOverbudget; i++) {
659             GrGpuResource* resource = fPurgeableQueue.at(i);
660             SkASSERT(resource->resourcePriv().isPurgeable());
661             if (!resource->getUniqueKey().isValid()) {
662                 *scratchResources.append() = resource;
663                 scratchByteCount += resource->gpuMemorySize();
664                 stillOverbudget = tmpByteBudget < fBytes - scratchByteCount;
665             }
666         }
667 
668         // Delete the scratch resources. This must be done as a separate pass
669         // to avoid messing up the sorted order of the queue
670         for (int i = 0; i < scratchResources.count(); i++) {
671             scratchResources.getAt(i)->cacheAccess().release();
672         }
673         stillOverbudget = tmpByteBudget < fBytes;
674 
675         this->validate();
676     }
677 
678     // Purge any remaining resources in LRU order
679     if (stillOverbudget) {
680         const size_t cachedByteCount = fMaxBytes;
681         fMaxBytes = tmpByteBudget;
682         this->purgeAsNeeded();
683         fMaxBytes = cachedByteCount;
684     }
685 }
686 
requestsFlush() const687 bool GrResourceCache::requestsFlush() const {
688     return this->overBudget() && !fPurgeableQueue.count() &&
689            fNumBudgetedResourcesFlushWillMakePurgeable > 0;
690 }
691 
insertDelayedTextureUnref(GrTexture * texture)692 void GrResourceCache::insertDelayedTextureUnref(GrTexture* texture) {
693     texture->ref();
694     uint32_t id = texture->uniqueID().asUInt();
695     if (auto* data = fTexturesAwaitingUnref.find(id)) {
696         data->addRef();
697     } else {
698         fTexturesAwaitingUnref.set(id, {texture});
699     }
700 }
701 
processFreedGpuResources()702 void GrResourceCache::processFreedGpuResources() {
703     if (!fTexturesAwaitingUnref.count()) {
704         return;
705     }
706 
707     SkTArray<GrTextureFreedMessage> msgs;
708     fFreedTextureInbox.poll(&msgs);
709     for (int i = 0; i < msgs.count(); ++i) {
710         SkASSERT(msgs[i].fIntendedRecipient == fOwningContextID);
711         uint32_t id = msgs[i].fTexture->uniqueID().asUInt();
712         TextureAwaitingUnref* info = fTexturesAwaitingUnref.find(id);
713         // If the GrContext was released or abandoned then fTexturesAwaitingUnref should have been
714         // empty and we would have returned early above. Thus, any texture from a message should be
715         // in the list of fTexturesAwaitingUnref.
716         SkASSERT(info);
717         info->unref();
718         if (info->finished()) {
719             fTexturesAwaitingUnref.remove(id);
720         }
721     }
722 }
723 
addToNonpurgeableArray(GrGpuResource * resource)724 void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) {
725     int index = fNonpurgeableResources.count();
726     *fNonpurgeableResources.append() = resource;
727     *resource->cacheAccess().accessCacheIndex() = index;
728 }
729 
removeFromNonpurgeableArray(GrGpuResource * resource)730 void GrResourceCache::removeFromNonpurgeableArray(GrGpuResource* resource) {
731     int* index = resource->cacheAccess().accessCacheIndex();
732     // Fill the hole we will create in the array with the tail object, adjust its index, and
733     // then pop the array
734     GrGpuResource* tail = *(fNonpurgeableResources.end() - 1);
735     SkASSERT(fNonpurgeableResources[*index] == resource);
736     fNonpurgeableResources[*index] = tail;
737     *tail->cacheAccess().accessCacheIndex() = *index;
738     fNonpurgeableResources.pop();
739     SkDEBUGCODE(*index = -1);
740 }
741 
getNextTimestamp()742 uint32_t GrResourceCache::getNextTimestamp() {
743     // If we wrap then all the existing resources will appear older than any resources that get
744     // a timestamp after the wrap.
745     if (0 == fTimestamp) {
746         int count = this->getResourceCount();
747         if (count) {
748             // Reset all the timestamps. We sort the resources by timestamp and then assign
749             // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
750             // rare.
751             SkTDArray<GrGpuResource*> sortedPurgeableResources;
752             sortedPurgeableResources.setReserve(fPurgeableQueue.count());
753 
754             while (fPurgeableQueue.count()) {
755                 *sortedPurgeableResources.append() = fPurgeableQueue.peek();
756                 fPurgeableQueue.pop();
757             }
758 
759             SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end(),
760                      CompareTimestamp);
761 
762             // Pick resources out of the purgeable and non-purgeable arrays based on lowest
763             // timestamp and assign new timestamps.
764             int currP = 0;
765             int currNP = 0;
766             while (currP < sortedPurgeableResources.count() &&
767                    currNP < fNonpurgeableResources.count()) {
768                 uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp();
769                 uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp();
770                 SkASSERT(tsP != tsNP);
771                 if (tsP < tsNP) {
772                     sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
773                 } else {
774                     // Correct the index in the nonpurgeable array stored on the resource post-sort.
775                     *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
776                     fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
777                 }
778             }
779 
780             // The above loop ended when we hit the end of one array. Finish the other one.
781             while (currP < sortedPurgeableResources.count()) {
782                 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
783             }
784             while (currNP < fNonpurgeableResources.count()) {
785                 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
786                 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
787             }
788 
789             // Rebuild the queue.
790             for (int i = 0; i < sortedPurgeableResources.count(); ++i) {
791                 fPurgeableQueue.insert(sortedPurgeableResources[i]);
792             }
793 
794             this->validate();
795             SkASSERT(count == this->getResourceCount());
796 
797             // count should be the next timestamp we return.
798             SkASSERT(fTimestamp == SkToU32(count));
799         }
800     }
801     return fTimestamp++;
802 }
803 
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const804 void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
805     for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
806         fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
807     }
808     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
809         fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
810     }
811 }
812 
813 #if GR_CACHE_STATS
getStats(Stats * stats) const814 void GrResourceCache::getStats(Stats* stats) const {
815     stats->reset();
816 
817     stats->fTotal = this->getResourceCount();
818     stats->fNumNonPurgeable = fNonpurgeableResources.count();
819     stats->fNumPurgeable = fPurgeableQueue.count();
820 
821     for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
822         stats->update(fNonpurgeableResources[i]);
823     }
824     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
825         stats->update(fPurgeableQueue.at(i));
826     }
827 }
828 
829 #if GR_TEST_UTILS
dumpStats(SkString * out) const830 void GrResourceCache::dumpStats(SkString* out) const {
831     this->validate();
832 
833     Stats stats;
834 
835     this->getStats(&stats);
836 
837     float byteUtilization = (100.f * fBudgetedBytes) / fMaxBytes;
838 
839     out->appendf("Budget: %d bytes\n", (int)fMaxBytes);
840     out->appendf("\t\tEntry Count: current %d"
841                  " (%d budgeted, %d wrapped, %d locked, %d scratch), high %d\n",
842                  stats.fTotal, fBudgetedCount, stats.fWrapped, stats.fNumNonPurgeable,
843                  stats.fScratch, fHighWaterCount);
844     out->appendf("\t\tEntry Bytes: current %d (budgeted %d, %.2g%% full, %d unbudgeted) high %d\n",
845                  SkToInt(fBytes), SkToInt(fBudgetedBytes), byteUtilization,
846                  SkToInt(stats.fUnbudgetedSize), SkToInt(fHighWaterBytes));
847 }
848 
dumpStatsKeyValuePairs(SkTArray<SkString> * keys,SkTArray<double> * values) const849 void GrResourceCache::dumpStatsKeyValuePairs(SkTArray<SkString>* keys,
850                                              SkTArray<double>* values) const {
851     this->validate();
852 
853     Stats stats;
854     this->getStats(&stats);
855 
856     keys->push_back(SkString("gpu_cache_purgable_entries")); values->push_back(stats.fNumPurgeable);
857 }
858 #endif // GR_TEST_UTILS
859 #endif // GR_CACHE_STATS
860 
861 #ifdef SK_DEBUG
validate() const862 void GrResourceCache::validate() const {
863     // Reduce the frequency of validations for large resource counts.
864     static SkRandom gRandom;
865     int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
866     if (~mask && (gRandom.nextU() & mask)) {
867         return;
868     }
869 
870     struct Stats {
871         size_t fBytes;
872         int fBudgetedCount;
873         size_t fBudgetedBytes;
874         int fLocked;
875         int fScratch;
876         int fCouldBeScratch;
877         int fContent;
878         const ScratchMap* fScratchMap;
879         const UniqueHash* fUniqueHash;
880 
881         Stats(const GrResourceCache* cache) {
882             memset(this, 0, sizeof(*this));
883             fScratchMap = &cache->fScratchMap;
884             fUniqueHash = &cache->fUniqueHash;
885         }
886 
887         void update(GrGpuResource* resource) {
888             fBytes += resource->gpuMemorySize();
889 
890             if (!resource->resourcePriv().isPurgeable()) {
891                 ++fLocked;
892             }
893 
894             const skgpu::ScratchKey& scratchKey = resource->resourcePriv().getScratchKey();
895             const skgpu::UniqueKey& uniqueKey = resource->getUniqueKey();
896 
897             if (resource->cacheAccess().isUsableAsScratch()) {
898                 SkASSERT(!uniqueKey.isValid());
899                 SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType());
900                 SkASSERT(!resource->cacheAccess().hasRef());
901                 ++fScratch;
902                 SkASSERT(fScratchMap->countForKey(scratchKey));
903                 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
904             } else if (scratchKey.isValid()) {
905                 SkASSERT(GrBudgetedType::kBudgeted != resource->resourcePriv().budgetedType() ||
906                          uniqueKey.isValid() || resource->cacheAccess().hasRef());
907                 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
908                 SkASSERT(!fScratchMap->has(resource, scratchKey));
909             }
910             if (uniqueKey.isValid()) {
911                 ++fContent;
912                 SkASSERT(fUniqueHash->find(uniqueKey) == resource);
913                 SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType() ||
914                          resource->resourcePriv().refsWrappedObjects());
915             }
916 
917             if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
918                 ++fBudgetedCount;
919                 fBudgetedBytes += resource->gpuMemorySize();
920             }
921         }
922     };
923 
924     {
925         int count = 0;
926         fScratchMap.foreach([&](const GrGpuResource& resource) {
927             SkASSERT(resource.cacheAccess().isUsableAsScratch());
928             count++;
929         });
930         SkASSERT(count == fScratchMap.count());
931     }
932 
933     Stats stats(this);
934     size_t purgeableBytes = 0;
935     int numBudgetedResourcesFlushWillMakePurgeable = 0;
936 
937     for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
938         SkASSERT(!fNonpurgeableResources[i]->resourcePriv().isPurgeable() ||
939                  fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]);
940         SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i);
941         SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
942         if (fNonpurgeableResources[i]->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted &&
943             !fNonpurgeableResources[i]->cacheAccess().hasRefOrCommandBufferUsage() &&
944             fNewlyPurgeableResourceForValidation != fNonpurgeableResources[i]) {
945             ++numBudgetedResourcesFlushWillMakePurgeable;
946         }
947         stats.update(fNonpurgeableResources[i]);
948     }
949     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
950         SkASSERT(fPurgeableQueue.at(i)->resourcePriv().isPurgeable());
951         SkASSERT(*fPurgeableQueue.at(i)->cacheAccess().accessCacheIndex() == i);
952         SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
953         stats.update(fPurgeableQueue.at(i));
954         purgeableBytes += fPurgeableQueue.at(i)->gpuMemorySize();
955     }
956 
957     SkASSERT(fCount == this->getResourceCount());
958     SkASSERT(fBudgetedCount <= fCount);
959     SkASSERT(fBudgetedBytes <= fBytes);
960     SkASSERT(stats.fBytes == fBytes);
961     SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable ==
962              numBudgetedResourcesFlushWillMakePurgeable);
963     SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
964     SkASSERT(stats.fBudgetedCount == fBudgetedCount);
965     SkASSERT(purgeableBytes == fPurgeableBytes);
966 #if GR_CACHE_STATS
967     SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount);
968     SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes);
969     SkASSERT(fBytes <= fHighWaterBytes);
970     SkASSERT(fCount <= fHighWaterCount);
971     SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes);
972     SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount);
973 #endif
974     SkASSERT(stats.fContent == fUniqueHash.count());
975     SkASSERT(stats.fScratch == fScratchMap.count());
976 
977     // This assertion is not currently valid because we can be in recursive notifyCntReachedZero()
978     // calls. This will be fixed when subresource registration is explicit.
979     // bool overBudget = budgetedBytes > fMaxBytes || budgetedCount > fMaxCount;
980     // SkASSERT(!overBudget || locked == count || fPurging);
981 }
982 
isInCache(const GrGpuResource * resource) const983 bool GrResourceCache::isInCache(const GrGpuResource* resource) const {
984     int index = *resource->cacheAccess().accessCacheIndex();
985     if (index < 0) {
986         return false;
987     }
988     if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
989         return true;
990     }
991     if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) {
992         return true;
993     }
994     SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
995     return false;
996 }
997 
998 #endif // SK_DEBUG
999 
1000 #if GR_TEST_UTILS
1001 
countUniqueKeysWithTag(const char * tag) const1002 int GrResourceCache::countUniqueKeysWithTag(const char* tag) const {
1003     int count = 0;
1004     fUniqueHash.foreach([&](const GrGpuResource& resource){
1005         if (0 == strcmp(tag, resource.getUniqueKey().tag())) {
1006             ++count;
1007         }
1008     });
1009     return count;
1010 }
1011 
changeTimestamp(uint32_t newTimestamp)1012 void GrResourceCache::changeTimestamp(uint32_t newTimestamp) {
1013     fTimestamp = newTimestamp;
1014 }
1015 
1016 #endif // GR_TEST_UTILS
1017