• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/GrResourceCache.h"
9 #include <atomic>
10 #include <vector>
11 #include "include/gpu/GrDirectContext.h"
12 #include "include/private/GrSingleOwner.h"
13 #include "include/private/SkTo.h"
14 #include "include/utils/SkRandom.h"
15 #include "src/core/SkMessageBus.h"
16 #include "src/core/SkOpts.h"
17 #include "src/core/SkScopeExit.h"
18 #include "src/core/SkTSort.h"
19 #include "src/gpu/GrCaps.h"
20 #include "src/gpu/GrDirectContextPriv.h"
21 #include "src/gpu/GrGpuResourceCacheAccess.h"
22 #include "src/gpu/GrProxyProvider.h"
23 #include "src/gpu/GrTexture.h"
24 #include "src/gpu/GrTextureProxyCacheAccess.h"
25 #include "src/gpu/GrThreadSafeCache.h"
26 #include "src/gpu/GrTracing.h"
27 #include "src/gpu/SkGr.h"
28 
29 DECLARE_SKMESSAGEBUS_MESSAGE(GrUniqueKeyInvalidatedMessage, uint32_t, true);
30 
31 DECLARE_SKMESSAGEBUS_MESSAGE(GrTextureFreedMessage, GrDirectContext::DirectContextID, true);
32 
33 #define ASSERT_SINGLE_OWNER GR_ASSERT_SINGLE_OWNER(fSingleOwner)
34 
35 //////////////////////////////////////////////////////////////////////////////
36 
GenerateResourceType()37 GrScratchKey::ResourceType GrScratchKey::GenerateResourceType() {
38     static std::atomic<int32_t> nextType{INHERITED::kInvalidDomain + 1};
39 
40     int32_t type = nextType.fetch_add(1, std::memory_order_relaxed);
41     if (type > SkTo<int32_t>(UINT16_MAX)) {
42         SK_ABORT("Too many Resource Types");
43     }
44 
45     return static_cast<ResourceType>(type);
46 }
47 
GenerateDomain()48 GrUniqueKey::Domain GrUniqueKey::GenerateDomain() {
49     static std::atomic<int32_t> nextDomain{INHERITED::kInvalidDomain + 1};
50 
51     int32_t domain = nextDomain.fetch_add(1, std::memory_order_relaxed);
52     if (domain > SkTo<int32_t>(UINT16_MAX)) {
53         SK_ABORT("Too many GrUniqueKey Domains");
54     }
55 
56     return static_cast<Domain>(domain);
57 }
58 
GrResourceKeyHash(const uint32_t * data,size_t size)59 uint32_t GrResourceKeyHash(const uint32_t* data, size_t size) {
60     return SkOpts::hash(data, size);
61 }
62 
63 //////////////////////////////////////////////////////////////////////////////
64 
65 class GrResourceCache::AutoValidate : ::SkNoncopyable {
66 public:
AutoValidate(GrResourceCache * cache)67     AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); }
~AutoValidate()68     ~AutoValidate() { fCache->validate(); }
69 private:
70     GrResourceCache* fCache;
71 };
72 
73 //////////////////////////////////////////////////////////////////////////////
74 
75 inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref() = default;
76 
TextureAwaitingUnref(GrTexture * texture)77 inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(GrTexture* texture)
78         : fTexture(texture), fNumUnrefs(1) {}
79 
TextureAwaitingUnref(TextureAwaitingUnref && that)80 inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(TextureAwaitingUnref&& that) {
81     fTexture = std::exchange(that.fTexture, nullptr);
82     fNumUnrefs = std::exchange(that.fNumUnrefs, 0);
83 }
84 
operator =(TextureAwaitingUnref && that)85 inline GrResourceCache::TextureAwaitingUnref& GrResourceCache::TextureAwaitingUnref::operator=(
86         TextureAwaitingUnref&& that) {
87     fTexture = std::exchange(that.fTexture, nullptr);
88     fNumUnrefs = std::exchange(that.fNumUnrefs, 0);
89     return *this;
90 }
91 
~TextureAwaitingUnref()92 inline GrResourceCache::TextureAwaitingUnref::~TextureAwaitingUnref() {
93     if (fTexture) {
94         for (int i = 0; i < fNumUnrefs; ++i) {
95             fTexture->unref();
96         }
97     }
98 }
99 
addRef()100 inline void GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref::addRef() { ++fNumUnrefs; }
101 
unref()102 inline void GrResourceCache::TextureAwaitingUnref::unref() {
103     SkASSERT(fNumUnrefs > 0);
104     fTexture->unref();
105     --fNumUnrefs;
106 }
107 
finished()108 inline bool GrResourceCache::TextureAwaitingUnref::finished() { return !fNumUnrefs; }
109 
110 //////////////////////////////////////////////////////////////////////////////
111 
GrResourceCache(GrSingleOwner * singleOwner,GrDirectContext::DirectContextID owningContextID,uint32_t familyID)112 GrResourceCache::GrResourceCache(GrSingleOwner* singleOwner,
113                                  GrDirectContext::DirectContextID owningContextID,
114                                  uint32_t familyID)
115         : fInvalidUniqueKeyInbox(familyID)
116         , fFreedTextureInbox(owningContextID)
117         , fOwningContextID(owningContextID)
118         , fContextUniqueID(familyID)
119         , fSingleOwner(singleOwner) {
120     SkASSERT(owningContextID.isValid());
121     SkASSERT(familyID != SK_InvalidUniqueID);
122 }
123 
~GrResourceCache()124 GrResourceCache::~GrResourceCache() {
125     this->releaseAll();
126 }
127 
setLimit(size_t bytes)128 void GrResourceCache::setLimit(size_t bytes) {
129     fMaxBytes = bytes;
130     this->purgeAsNeeded();
131 }
132 
insertResource(GrGpuResource * resource)133 void GrResourceCache::insertResource(GrGpuResource* resource) {
134     ASSERT_SINGLE_OWNER
135     SkASSERT(resource);
136     SkASSERT(!this->isInCache(resource));
137     SkASSERT(!resource->wasDestroyed());
138     SkASSERT(!resource->resourcePriv().isPurgeable());
139     if (!resource || this->isInCache(resource) || resource->wasDestroyed() || resource->resourcePriv().isPurgeable()) {
140         SkDebugf("OHOS GrResourceCache::insertResource resource is invalid!!!");
141         return;
142     }
143     // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
144     // up iterating over all the resources that already have timestamps.
145     resource->cacheAccess().setTimestamp(this->getNextTimestamp());
146 
147     this->addToNonpurgeableArray(resource);
148 
149     size_t size = resource->gpuMemorySize();
150     SkDEBUGCODE(++fCount;)
151     fBytes += size;
152 #if GR_CACHE_STATS
153     fHighWaterCount = std::max(this->getResourceCount(), fHighWaterCount);
154     fHighWaterBytes = std::max(fBytes, fHighWaterBytes);
155 #endif
156     if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
157         ++fBudgetedCount;
158         fBudgetedBytes += size;
159         TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
160                        fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
161 #if GR_CACHE_STATS
162         fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
163         fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
164 #endif
165     }
166     SkASSERT(!resource->cacheAccess().isUsableAsScratch());
167     this->purgeAsNeeded();
168 }
169 
removeResource(GrGpuResource * resource)170 void GrResourceCache::removeResource(GrGpuResource* resource) {
171     ASSERT_SINGLE_OWNER
172     this->validate();
173     SkASSERT(this->isInCache(resource));
174 
175     size_t size = resource->gpuMemorySize();
176     if (resource->resourcePriv().isPurgeable() && this->isInPurgeableCache(resource)) {
177         fPurgeableQueue.remove(resource);
178         fPurgeableBytes -= size;
179     } else if (this->isInNonpurgeableCache(resource)) {
180         this->removeFromNonpurgeableArray(resource);
181     }
182 
183     SkDEBUGCODE(--fCount;)
184     fBytes -= size;
185     if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
186         --fBudgetedCount;
187         fBudgetedBytes -= size;
188         TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
189                        fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
190     }
191 
192     if (resource->cacheAccess().isUsableAsScratch()) {
193         fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
194     }
195     if (resource->getUniqueKey().isValid()) {
196         fUniqueHash.remove(resource->getUniqueKey());
197     }
198     this->validate();
199 }
200 
abandonAll()201 void GrResourceCache::abandonAll() {
202     AutoValidate av(this);
203 
204     // We need to make sure to free any resources that were waiting on a free message but never
205     // received one.
206     fTexturesAwaitingUnref.reset();
207 
208     while (fNonpurgeableResources.count()) {
209         GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
210         SkASSERT(!back->wasDestroyed());
211         back->cacheAccess().abandon();
212     }
213 
214     while (fPurgeableQueue.count()) {
215         GrGpuResource* top = fPurgeableQueue.peek();
216         SkASSERT(!top->wasDestroyed());
217         top->cacheAccess().abandon();
218     }
219 
220     fThreadSafeCache->dropAllRefs();
221 
222     SkASSERT(!fScratchMap.count());
223     SkASSERT(!fUniqueHash.count());
224     SkASSERT(!fCount);
225     SkASSERT(!this->getResourceCount());
226     SkASSERT(!fBytes);
227     SkASSERT(!fBudgetedCount);
228     SkASSERT(!fBudgetedBytes);
229     SkASSERT(!fPurgeableBytes);
230     SkASSERT(!fTexturesAwaitingUnref.count());
231 }
232 
releaseAll()233 void GrResourceCache::releaseAll() {
234     AutoValidate av(this);
235 
236     fThreadSafeCache->dropAllRefs();
237 
238     this->processFreedGpuResources();
239 
240     // We need to make sure to free any resources that were waiting on a free message but never
241     // received one.
242     fTexturesAwaitingUnref.reset();
243 
244     SkASSERT(fProxyProvider); // better have called setProxyProvider
245     SkASSERT(fThreadSafeCache); // better have called setThreadSafeCache too
246 
247     // We must remove the uniqueKeys from the proxies here. While they possess a uniqueKey
248     // they also have a raw pointer back to this class (which is presumably going away)!
249     fProxyProvider->removeAllUniqueKeys();
250 
251     while (fNonpurgeableResources.count()) {
252         GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
253         SkASSERT(!back->wasDestroyed());
254         back->cacheAccess().release();
255     }
256 
257     while (fPurgeableQueue.count()) {
258         GrGpuResource* top = fPurgeableQueue.peek();
259         SkASSERT(!top->wasDestroyed());
260         top->cacheAccess().release();
261     }
262 
263     SkASSERT(!fScratchMap.count());
264     SkASSERT(!fUniqueHash.count());
265     SkASSERT(!fCount);
266     SkASSERT(!this->getResourceCount());
267     SkASSERT(!fBytes);
268     SkASSERT(!fBudgetedCount);
269     SkASSERT(!fBudgetedBytes);
270     SkASSERT(!fPurgeableBytes);
271     SkASSERT(!fTexturesAwaitingUnref.count());
272 }
273 
releaseByTag(const GrGpuResourceTag tag)274 void GrResourceCache::releaseByTag(const GrGpuResourceTag tag) {
275     AutoValidate av(this);
276     this->processFreedGpuResources();
277     SkASSERT(fProxyProvider); // better have called setProxyProvider
278     std::vector<GrGpuResource*> recycleVector;
279     for (int i = 0; i < fNonpurgeableResources.count(); i++) {
280         GrGpuResource* resource = fNonpurgeableResources[i];
281         if (tag.filter(resource->getResourceTag())) {
282             recycleVector.emplace_back(resource);
283             if (resource->getUniqueKey().isValid()) {
284                 fProxyProvider->processInvalidUniqueKey(resource->getUniqueKey(), nullptr,
285                     GrProxyProvider::InvalidateGPUResource::kNo);
286             }
287         }
288     }
289 
290     for (int i = 0; i < fPurgeableQueue.count(); i++) {
291         GrGpuResource* resource = fPurgeableQueue.at(i);
292         if (tag.filter(resource->getResourceTag())) {
293             recycleVector.emplace_back(resource);
294             if (resource->getUniqueKey().isValid()) {
295                 fProxyProvider->processInvalidUniqueKey(resource->getUniqueKey(), nullptr,
296                     GrProxyProvider::InvalidateGPUResource::kNo);
297             }
298         }
299     }
300 
301     for (auto resource : recycleVector) {
302         SkASSERT(!resource->wasDestroyed());
303         resource->cacheAccess().release();
304     }
305 }
306 
setCurrentGrResourceTag(const GrGpuResourceTag tag)307 void GrResourceCache::setCurrentGrResourceTag(const GrGpuResourceTag tag) {
308     if (tag.isGrTagValid()) {
309         grResourceTagCacheStack.push(tag);
310         return;
311     }
312     if (!grResourceTagCacheStack.empty()) {
313         grResourceTagCacheStack.pop();
314     }
315 }
316 
getCurrentGrResourceTag() const317 GrGpuResourceTag GrResourceCache::getCurrentGrResourceTag() const {
318     if (grResourceTagCacheStack.empty()) {
319         return{};
320     }
321     return grResourceTagCacheStack.top();
322 }
323 
getAllGrGpuResourceTags() const324 std::set<GrGpuResourceTag> GrResourceCache::getAllGrGpuResourceTags() const {
325     std::set<GrGpuResourceTag> result;
326     for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
327         auto tag = fNonpurgeableResources[i]->getResourceTag();
328         result.insert(tag);
329     }
330     return result;
331 }
332 
refResource(GrGpuResource * resource)333 void GrResourceCache::refResource(GrGpuResource* resource) {
334     SkASSERT(resource);
335     SkASSERT(resource->getContext()->priv().getResourceCache() == this);
336     if (resource->cacheAccess().hasRef()) {
337         resource->ref();
338     } else {
339         this->refAndMakeResourceMRU(resource);
340     }
341     this->validate();
342 }
343 
344 class GrResourceCache::AvailableForScratchUse {
345 public:
AvailableForScratchUse()346     AvailableForScratchUse() { }
347 
operator ()(const GrGpuResource * resource) const348     bool operator()(const GrGpuResource* resource) const {
349         // Everything that is in the scratch map should be usable as a
350         // scratch resource.
351         return true;
352     }
353 };
354 
findAndRefScratchResource(const GrScratchKey & scratchKey)355 GrGpuResource* GrResourceCache::findAndRefScratchResource(const GrScratchKey& scratchKey) {
356     SkASSERT(scratchKey.isValid());
357 
358     GrGpuResource* resource = fScratchMap.find(scratchKey, AvailableForScratchUse());
359     if (resource) {
360         fScratchMap.remove(scratchKey, resource);
361         if (!this->isInCache(resource)) {
362             SkDebugf("OHOS GrResourceCache::findAndRefScratchResource not in cache, return!!!");
363             return nullptr;
364         }
365         this->refAndMakeResourceMRU(resource);
366         this->validate();
367     }
368     return resource;
369 }
370 
willRemoveScratchKey(const GrGpuResource * resource)371 void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) {
372     ASSERT_SINGLE_OWNER
373     SkASSERT(resource->resourcePriv().getScratchKey().isValid());
374     if (resource->cacheAccess().isUsableAsScratch()) {
375         fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
376     }
377 }
378 
removeUniqueKey(GrGpuResource * resource)379 void GrResourceCache::removeUniqueKey(GrGpuResource* resource) {
380     ASSERT_SINGLE_OWNER
381     // Someone has a ref to this resource in order to have removed the key. When the ref count
382     // reaches zero we will get a ref cnt notification and figure out what to do with it.
383     if (resource->getUniqueKey().isValid()) {
384         SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
385         fUniqueHash.remove(resource->getUniqueKey());
386     }
387     resource->cacheAccess().removeUniqueKey();
388     if (resource->cacheAccess().isUsableAsScratch()) {
389         fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
390     }
391 
392     // Removing a unique key from a kUnbudgetedCacheable resource would make the resource
393     // require purging. However, the resource must be ref'ed to get here and therefore can't
394     // be purgeable. We'll purge it when the refs reach zero.
395     SkASSERT(!resource->resourcePriv().isPurgeable());
396     this->validate();
397 }
398 
changeUniqueKey(GrGpuResource * resource,const GrUniqueKey & newKey)399 void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const GrUniqueKey& newKey) {
400     ASSERT_SINGLE_OWNER
401     SkASSERT(resource);
402     SkASSERT(this->isInCache(resource));
403 
404     // If another resource has the new key, remove its key then install the key on this resource.
405     if (newKey.isValid()) {
406         if (GrGpuResource* old = fUniqueHash.find(newKey)) {
407             // If the old resource using the key is purgeable and is unreachable, then remove it.
408             if (!old->resourcePriv().getScratchKey().isValid() &&
409                 old->resourcePriv().isPurgeable()) {
410                 old->cacheAccess().release();
411             } else {
412                 // removeUniqueKey expects an external owner of the resource.
413                 this->removeUniqueKey(sk_ref_sp(old).get());
414             }
415         }
416         SkASSERT(nullptr == fUniqueHash.find(newKey));
417 
418         // Remove the entry for this resource if it already has a unique key.
419         if (resource->getUniqueKey().isValid()) {
420             SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
421             fUniqueHash.remove(resource->getUniqueKey());
422             SkASSERT(nullptr == fUniqueHash.find(resource->getUniqueKey()));
423         } else {
424             // 'resource' didn't have a valid unique key before so it is switching sides. Remove it
425             // from the ScratchMap. The isUsableAsScratch call depends on us not adding the new
426             // unique key until after this check.
427             if (resource->cacheAccess().isUsableAsScratch()) {
428                 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
429             }
430         }
431 
432         resource->cacheAccess().setUniqueKey(newKey);
433         fUniqueHash.add(resource);
434     } else {
435         this->removeUniqueKey(resource);
436     }
437 
438     this->validate();
439 }
440 
refAndMakeResourceMRU(GrGpuResource * resource)441 void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) {
442     ASSERT_SINGLE_OWNER
443     SkASSERT(resource);
444     SkASSERT(this->isInCache(resource));
445 
446     if (resource->resourcePriv().isPurgeable()) {
447         // It's about to become unpurgeable.
448         if (this->isInPurgeableCache(resource)) {
449             fPurgeableBytes -= resource->gpuMemorySize();
450             fPurgeableQueue.remove(resource);
451         }
452         if (!this->isInNonpurgeableCache(resource)) {
453             this->addToNonpurgeableArray(resource);
454         } else {
455             SkDebugf("OHOS resource in isInNonpurgeableCache, do not add again!");
456         }
457     } else if (!resource->cacheAccess().hasRefOrCommandBufferUsage() &&
458                resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
459         SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable > 0);
460         fNumBudgetedResourcesFlushWillMakePurgeable--;
461     }
462     resource->cacheAccess().ref();
463 
464     resource->cacheAccess().setTimestamp(this->getNextTimestamp());
465     this->validate();
466 }
467 
notifyARefCntReachedZero(GrGpuResource * resource,GrGpuResource::LastRemovedRef removedRef)468 void GrResourceCache::notifyARefCntReachedZero(GrGpuResource* resource,
469                                                GrGpuResource::LastRemovedRef removedRef) {
470     ASSERT_SINGLE_OWNER
471     SkASSERT(resource);
472     SkASSERT(!resource->wasDestroyed());
473     SkASSERT(this->isInCache(resource));
474     // This resource should always be in the nonpurgeable array when this function is called. It
475     // will be moved to the queue if it is newly purgeable.
476     SkASSERT(fNonpurgeableResources[*resource->cacheAccess().accessCacheIndex()] == resource);
477     if (!resource || resource->wasDestroyed() || this->isInPurgeableCache(resource) ||
478         !this->isInNonpurgeableCache(resource)) {
479         SkDebugf("OHOS GrResourceCache::notifyARefCntReachedZero return!");
480         return;
481     }
482     if (removedRef == GrGpuResource::LastRemovedRef::kMainRef) {
483         if (resource->cacheAccess().isUsableAsScratch()) {
484             fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
485         }
486     }
487 
488     if (resource->cacheAccess().hasRefOrCommandBufferUsage()) {
489         this->validate();
490         return;
491     }
492 
493 #ifdef SK_DEBUG
494     // When the timestamp overflows validate() is called. validate() checks that resources in
495     // the nonpurgeable array are indeed not purgeable. However, the movement from the array to
496     // the purgeable queue happens just below in this function. So we mark it as an exception.
497     if (resource->resourcePriv().isPurgeable()) {
498         fNewlyPurgeableResourceForValidation = resource;
499     }
500 #endif
501     resource->cacheAccess().setTimestamp(this->getNextTimestamp());
502     SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr);
503 
504     if (!resource->resourcePriv().isPurgeable() &&
505         resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
506         ++fNumBudgetedResourcesFlushWillMakePurgeable;
507     }
508 
509     if (!resource->resourcePriv().isPurgeable()) {
510         this->validate();
511         return;
512     }
513 
514     this->removeFromNonpurgeableArray(resource);
515     fPurgeableQueue.insert(resource);
516     resource->cacheAccess().setTimeWhenResourceBecomePurgeable();
517     fPurgeableBytes += resource->gpuMemorySize();
518 
519     bool hasUniqueKey = resource->getUniqueKey().isValid();
520 
521     GrBudgetedType budgetedType = resource->resourcePriv().budgetedType();
522 
523     if (budgetedType == GrBudgetedType::kBudgeted) {
524         // Purge the resource immediately if we're over budget
525         // Also purge if the resource has neither a valid scratch key nor a unique key.
526         bool hasKey = resource->resourcePriv().getScratchKey().isValid() || hasUniqueKey;
527         if (!this->overBudget() && hasKey) {
528             return;
529         }
530     } else {
531         // We keep unbudgeted resources with a unique key in the purgeable queue of the cache so
532         // they can be reused again by the image connected to the unique key.
533         if (hasUniqueKey && budgetedType == GrBudgetedType::kUnbudgetedCacheable) {
534             return;
535         }
536         // Check whether this resource could still be used as a scratch resource.
537         if (!resource->resourcePriv().refsWrappedObjects() &&
538             resource->resourcePriv().getScratchKey().isValid()) {
539             // We won't purge an existing resource to make room for this one.
540             if (this->wouldFit(resource->gpuMemorySize())) {
541                 resource->resourcePriv().makeBudgeted();
542                 return;
543             }
544         }
545     }
546 
547     SkDEBUGCODE(int beforeCount = this->getResourceCount();)
548     resource->cacheAccess().release();
549     // We should at least free this resource, perhaps dependent resources as well.
550     SkASSERT(this->getResourceCount() < beforeCount);
551     this->validate();
552 }
553 
didChangeBudgetStatus(GrGpuResource * resource)554 void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
555     ASSERT_SINGLE_OWNER
556     SkASSERT(resource);
557     SkASSERT(this->isInCache(resource));
558 
559     size_t size = resource->gpuMemorySize();
560     // Changing from BudgetedType::kUnbudgetedCacheable to another budgeted type could make
561     // resource become purgeable. However, we should never allow that transition. Wrapped
562     // resources are the only resources that can be in that state and they aren't allowed to
563     // transition from one budgeted state to another.
564     SkDEBUGCODE(bool wasPurgeable = resource->resourcePriv().isPurgeable());
565     if (resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
566         ++fBudgetedCount;
567         fBudgetedBytes += size;
568 #if GR_CACHE_STATS
569         fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
570         fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
571 #endif
572         if (!resource->resourcePriv().isPurgeable() &&
573             !resource->cacheAccess().hasRefOrCommandBufferUsage()) {
574             ++fNumBudgetedResourcesFlushWillMakePurgeable;
575         }
576         if (resource->cacheAccess().isUsableAsScratch()) {
577             fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
578         }
579         this->purgeAsNeeded();
580     } else {
581         SkASSERT(resource->resourcePriv().budgetedType() != GrBudgetedType::kUnbudgetedCacheable);
582         --fBudgetedCount;
583         fBudgetedBytes -= size;
584         if (!resource->resourcePriv().isPurgeable() &&
585             !resource->cacheAccess().hasRefOrCommandBufferUsage()) {
586             --fNumBudgetedResourcesFlushWillMakePurgeable;
587         }
588         if (!resource->cacheAccess().hasRef() && !resource->getUniqueKey().isValid() &&
589             resource->resourcePriv().getScratchKey().isValid()) {
590             fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
591         }
592     }
593     SkASSERT(wasPurgeable == resource->resourcePriv().isPurgeable());
594     TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
595                    fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
596 
597     this->validate();
598 }
599 
purgeAsNeeded()600 void GrResourceCache::purgeAsNeeded() {
601     SkTArray<GrUniqueKeyInvalidatedMessage> invalidKeyMsgs;
602     fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs);
603     if (invalidKeyMsgs.count()) {
604         SkASSERT(fProxyProvider);
605 
606         for (int i = 0; i < invalidKeyMsgs.count(); ++i) {
607             if (invalidKeyMsgs[i].inThreadSafeCache()) {
608                 fThreadSafeCache->remove(invalidKeyMsgs[i].key());
609                 SkASSERT(!fThreadSafeCache->has(invalidKeyMsgs[i].key()));
610             } else {
611                 fProxyProvider->processInvalidUniqueKey(
612                                                     invalidKeyMsgs[i].key(), nullptr,
613                                                     GrProxyProvider::InvalidateGPUResource::kYes);
614                 SkASSERT(!this->findAndRefUniqueResource(invalidKeyMsgs[i].key()));
615             }
616         }
617     }
618 
619     this->processFreedGpuResources();
620 
621     bool stillOverbudget = this->overBudget();
622     while (stillOverbudget && fPurgeableQueue.count()) {
623         GrGpuResource* resource = fPurgeableQueue.peek();
624         if (!resource->resourcePriv().isPurgeable()) {
625             SkDebugf("OHOS GrResourceCache::purgeAsNeeded() resource is nonPurgeable");
626             continue;
627         }
628         SkASSERT(resource->resourcePriv().isPurgeable());
629         resource->cacheAccess().release();
630         stillOverbudget = this->overBudget();
631     }
632 
633     if (stillOverbudget) {
634         fThreadSafeCache->dropUniqueRefs(this);
635 
636         stillOverbudget = this->overBudget();
637         while (stillOverbudget && fPurgeableQueue.count()) {
638             GrGpuResource* resource = fPurgeableQueue.peek();
639             if (!resource->resourcePriv().isPurgeable()) {
640                 SkDebugf("OHOS GrResourceCache::purgeAsNeeded() resource is nonPurgeable after dropUniqueRefs");
641                 continue;
642             }
643             SkASSERT(resource->resourcePriv().isPurgeable());
644             resource->cacheAccess().release();
645             stillOverbudget = this->overBudget();
646         }
647     }
648 
649     this->validate();
650 }
651 
purgeUnlockedResources(const GrStdSteadyClock::time_point * purgeTime,bool scratchResourcesOnly)652 void GrResourceCache::purgeUnlockedResources(const GrStdSteadyClock::time_point* purgeTime,
653                                              bool scratchResourcesOnly) {
654 
655     if (!scratchResourcesOnly) {
656         if (purgeTime) {
657             fThreadSafeCache->dropUniqueRefsOlderThan(*purgeTime);
658         } else {
659             fThreadSafeCache->dropUniqueRefs(nullptr);
660         }
661 
662         // We could disable maintaining the heap property here, but it would add a lot of
663         // complexity. Moreover, this is rarely called.
664         while (fPurgeableQueue.count()) {
665             GrGpuResource* resource = fPurgeableQueue.peek();
666 
667             const GrStdSteadyClock::time_point resourceTime =
668                     resource->cacheAccess().timeWhenResourceBecamePurgeable();
669             if (purgeTime && resourceTime >= *purgeTime) {
670                 // Resources were given both LRU timestamps and tagged with a frame number when
671                 // they first became purgeable. The LRU timestamp won't change again until the
672                 // resource is made non-purgeable again. So, at this point all the remaining
673                 // resources in the timestamp-sorted queue will have a frame number >= to this
674                 // one.
675                 break;
676             }
677 
678             SkASSERT(resource->resourcePriv().isPurgeable());
679             resource->cacheAccess().release();
680         }
681     } else {
682         // Early out if the very first item is too new to purge to avoid sorting the queue when
683         // nothing will be deleted.
684         if (purgeTime && fPurgeableQueue.count() &&
685             fPurgeableQueue.peek()->cacheAccess().timeWhenResourceBecamePurgeable() >= *purgeTime) {
686             return;
687         }
688 
689         // Sort the queue
690         fPurgeableQueue.sort();
691 
692         // Make a list of the scratch resources to delete
693         SkTDArray<GrGpuResource*> scratchResources;
694         for (int i = 0; i < fPurgeableQueue.count(); i++) {
695             GrGpuResource* resource = fPurgeableQueue.at(i);
696 
697             const GrStdSteadyClock::time_point resourceTime =
698                     resource->cacheAccess().timeWhenResourceBecamePurgeable();
699             if (purgeTime && resourceTime >= *purgeTime) {
700                 // scratch or not, all later iterations will be too recently used to purge.
701                 break;
702             }
703             SkASSERT(resource->resourcePriv().isPurgeable());
704             if (!resource->getUniqueKey().isValid()) {
705                 *scratchResources.append() = resource;
706             }
707         }
708 
709         // Delete the scratch resources. This must be done as a separate pass
710         // to avoid messing up the sorted order of the queue
711         for (int i = 0; i < scratchResources.count(); i++) {
712             scratchResources.getAt(i)->cacheAccess().release();
713         }
714     }
715 
716     this->validate();
717 }
718 
purgeUnlockAndSafeCacheGpuResources()719 void GrResourceCache::purgeUnlockAndSafeCacheGpuResources() {
720     fThreadSafeCache->dropUniqueRefs(nullptr);
721     // Sort the queue
722     fPurgeableQueue.sort();
723 
724     //Make a list of the scratch resources to delete
725     SkTDArray<GrGpuResource*> scratchResources;
726     for (int i = 0; i < fPurgeableQueue.count(); i++) {
727         GrGpuResource* resource = fPurgeableQueue.at(i);
728         SkASSERT(resource->resourcePriv().isPurgeable());
729         if (!resource->getUniqueKey().isValid()) {
730             *scratchResources.append() = resource;
731         }
732     }
733 
734     //Delete the scatch resource. This must be done as a separate pass
735     //to avoid messing up the sorted order of the queue
736     for (int i = 0; i <scratchResources.count(); i++) {
737         scratchResources.getAt(i)->cacheAccess().release();
738     }
739 
740     this->validate();
741 }
742 
purgeUnlockedResourcesByTag(bool scratchResourcesOnly,const GrGpuResourceTag tag)743 void GrResourceCache::purgeUnlockedResourcesByTag(bool scratchResourcesOnly, const GrGpuResourceTag tag) {
744     // Sort the queue
745     fPurgeableQueue.sort();
746 
747     //Make a list of the scratch resources to delete
748     SkTDArray<GrGpuResource*> scratchResources;
749     for (int i = 0; i < fPurgeableQueue.count(); i++) {
750         GrGpuResource* resource = fPurgeableQueue.at(i);
751         SkASSERT(resource->resourcePriv().isPurgeable());
752         if (tag.filter(resource->getResourceTag()) && (!scratchResourcesOnly || !resource->getUniqueKey().isValid())) {
753             *scratchResources.append() = resource;
754         }
755     }
756 
757     //Delete the scatch resource. This must be done as a separate pass
758     //to avoid messing up the sorted order of the queue
759     for (int i = 0; i <scratchResources.count(); i++) {
760         scratchResources.getAt(i)->cacheAccess().release();
761     }
762 
763     this->validate();
764 }
765 
purgeToMakeHeadroom(size_t desiredHeadroomBytes)766 bool GrResourceCache::purgeToMakeHeadroom(size_t desiredHeadroomBytes) {
767     AutoValidate av(this);
768     if (desiredHeadroomBytes > fMaxBytes) {
769         return false;
770     }
771     if (this->wouldFit(desiredHeadroomBytes)) {
772         return true;
773     }
774     fPurgeableQueue.sort();
775 
776     size_t projectedBudget = fBudgetedBytes;
777     int purgeCnt = 0;
778     for (int i = 0; i < fPurgeableQueue.count(); i++) {
779         GrGpuResource* resource = fPurgeableQueue.at(i);
780         if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
781             projectedBudget -= resource->gpuMemorySize();
782         }
783         if (projectedBudget + desiredHeadroomBytes <= fMaxBytes) {
784             purgeCnt = i + 1;
785             break;
786         }
787     }
788     if (purgeCnt == 0) {
789         return false;
790     }
791 
792     // Success! Release the resources.
793     // Copy to array first so we don't mess with the queue.
794     std::vector<GrGpuResource*> resources;
795     resources.reserve(purgeCnt);
796     for (int i = 0; i < purgeCnt; i++) {
797         resources.push_back(fPurgeableQueue.at(i));
798     }
799     for (GrGpuResource* resource : resources) {
800         resource->cacheAccess().release();
801     }
802     return true;
803 }
804 
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)805 void GrResourceCache::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
806 
807     const size_t tmpByteBudget = std::max((size_t)0, fBytes - bytesToPurge);
808     bool stillOverbudget = tmpByteBudget < fBytes;
809 
810     if (preferScratchResources && bytesToPurge < fPurgeableBytes) {
811         // Sort the queue
812         fPurgeableQueue.sort();
813 
814         // Make a list of the scratch resources to delete
815         SkTDArray<GrGpuResource*> scratchResources;
816         size_t scratchByteCount = 0;
817         for (int i = 0; i < fPurgeableQueue.count() && stillOverbudget; i++) {
818             GrGpuResource* resource = fPurgeableQueue.at(i);
819             SkASSERT(resource->resourcePriv().isPurgeable());
820             if (!resource->getUniqueKey().isValid()) {
821                 *scratchResources.append() = resource;
822                 scratchByteCount += resource->gpuMemorySize();
823                 stillOverbudget = tmpByteBudget < fBytes - scratchByteCount;
824             }
825         }
826 
827         // Delete the scratch resources. This must be done as a separate pass
828         // to avoid messing up the sorted order of the queue
829         for (int i = 0; i < scratchResources.count(); i++) {
830             scratchResources.getAt(i)->cacheAccess().release();
831         }
832         stillOverbudget = tmpByteBudget < fBytes;
833 
834         this->validate();
835     }
836 
837     // Purge any remaining resources in LRU order
838     if (stillOverbudget) {
839         const size_t cachedByteCount = fMaxBytes;
840         fMaxBytes = tmpByteBudget;
841         this->purgeAsNeeded();
842         fMaxBytes = cachedByteCount;
843     }
844 }
845 
requestsFlush() const846 bool GrResourceCache::requestsFlush() const {
847     return this->overBudget() && !fPurgeableQueue.count() &&
848            fNumBudgetedResourcesFlushWillMakePurgeable > 0;
849 }
850 
insertDelayedTextureUnref(GrTexture * texture)851 void GrResourceCache::insertDelayedTextureUnref(GrTexture* texture) {
852     texture->ref();
853     uint32_t id = texture->uniqueID().asUInt();
854     if (auto* data = fTexturesAwaitingUnref.find(id)) {
855         data->addRef();
856     } else {
857         fTexturesAwaitingUnref.set(id, {texture});
858     }
859 }
860 
processFreedGpuResources()861 void GrResourceCache::processFreedGpuResources() {
862     if (!fTexturesAwaitingUnref.count()) {
863         return;
864     }
865 
866     SkTArray<GrTextureFreedMessage> msgs;
867     fFreedTextureInbox.poll(&msgs);
868     for (int i = 0; i < msgs.count(); ++i) {
869         SkASSERT(msgs[i].fIntendedRecipient == fOwningContextID);
870         uint32_t id = msgs[i].fTexture->uniqueID().asUInt();
871         TextureAwaitingUnref* info = fTexturesAwaitingUnref.find(id);
872         // If the GrContext was released or abandoned then fTexturesAwaitingUnref should have been
873         // empty and we would have returned early above. Thus, any texture from a message should be
874         // in the list of fTexturesAwaitingUnref.
875         SkASSERT(info);
876         info->unref();
877         if (info->finished()) {
878             fTexturesAwaitingUnref.remove(id);
879         }
880     }
881 }
882 
addToNonpurgeableArray(GrGpuResource * resource)883 void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) {
884     int index = fNonpurgeableResources.count();
885     *fNonpurgeableResources.append() = resource;
886     *resource->cacheAccess().accessCacheIndex() = index;
887 }
888 
removeFromNonpurgeableArray(GrGpuResource * resource)889 void GrResourceCache::removeFromNonpurgeableArray(GrGpuResource* resource) {
890     int* index = resource->cacheAccess().accessCacheIndex();
891     // Fill the hole we will create in the array with the tail object, adjust its index, and
892     // then pop the array
893     GrGpuResource* tail = *(fNonpurgeableResources.end() - 1);
894     SkASSERT(fNonpurgeableResources[*index] == resource);
895     fNonpurgeableResources[*index] = tail;
896     *tail->cacheAccess().accessCacheIndex() = *index;
897     fNonpurgeableResources.pop();
898     SkDEBUGCODE(*index = -1);
899 }
900 
getNextTimestamp()901 uint32_t GrResourceCache::getNextTimestamp() {
902     // If we wrap then all the existing resources will appear older than any resources that get
903     // a timestamp after the wrap.
904     if (0 == fTimestamp) {
905         int count = this->getResourceCount();
906         if (count) {
907             // Reset all the timestamps. We sort the resources by timestamp and then assign
908             // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
909             // rare.
910             SkTDArray<GrGpuResource*> sortedPurgeableResources;
911             sortedPurgeableResources.setReserve(fPurgeableQueue.count());
912 
913             while (fPurgeableQueue.count()) {
914                 *sortedPurgeableResources.append() = fPurgeableQueue.peek();
915                 fPurgeableQueue.pop();
916             }
917 
918             SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end(),
919                      CompareTimestamp);
920 
921             // Pick resources out of the purgeable and non-purgeable arrays based on lowest
922             // timestamp and assign new timestamps.
923             int currP = 0;
924             int currNP = 0;
925             while (currP < sortedPurgeableResources.count() &&
926                    currNP < fNonpurgeableResources.count()) {
927                 uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp();
928                 uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp();
929                 SkASSERT(tsP != tsNP);
930                 if (tsP < tsNP) {
931                     sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
932                 } else {
933                     // Correct the index in the nonpurgeable array stored on the resource post-sort.
934                     *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
935                     fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
936                 }
937             }
938 
939             // The above loop ended when we hit the end of one array. Finish the other one.
940             while (currP < sortedPurgeableResources.count()) {
941                 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
942             }
943             while (currNP < fNonpurgeableResources.count()) {
944                 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
945                 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
946             }
947 
948             // Rebuild the queue.
949             for (int i = 0; i < sortedPurgeableResources.count(); ++i) {
950                 fPurgeableQueue.insert(sortedPurgeableResources[i]);
951             }
952 
953             this->validate();
954             SkASSERT(count == this->getResourceCount());
955 
956             // count should be the next timestamp we return.
957             SkASSERT(fTimestamp == SkToU32(count));
958         }
959     }
960     return fTimestamp++;
961 }
962 
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const963 void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
964     for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
965         fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
966     }
967     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
968         fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
969     }
970 }
971 
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump,GrGpuResourceTag tag) const972 void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump, GrGpuResourceTag tag) const {
973     for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
974         if (tag.filter(fNonpurgeableResources[i]->getResourceTag())) {
975             fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
976         }
977     }
978     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
979         if (tag.filter(fPurgeableQueue.at(i)->getResourceTag())) {
980             fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
981         }
982     }
983 }
984 
985 #if GR_CACHE_STATS
getStats(Stats * stats) const986 void GrResourceCache::getStats(Stats* stats) const {
987     stats->reset();
988 
989     stats->fTotal = this->getResourceCount();
990     stats->fNumNonPurgeable = fNonpurgeableResources.count();
991     stats->fNumPurgeable = fPurgeableQueue.count();
992 
993     for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
994         stats->update(fNonpurgeableResources[i]);
995     }
996     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
997         stats->update(fPurgeableQueue.at(i));
998     }
999 }
1000 
1001 #if GR_TEST_UTILS
dumpStats(SkString * out) const1002 void GrResourceCache::dumpStats(SkString* out) const {
1003     this->validate();
1004 
1005     Stats stats;
1006 
1007     this->getStats(&stats);
1008 
1009     float byteUtilization = (100.f * fBudgetedBytes) / fMaxBytes;
1010 
1011     out->appendf("Budget: %d bytes\n", (int)fMaxBytes);
1012     out->appendf("\t\tEntry Count: current %d"
1013                  " (%d budgeted, %d wrapped, %d locked, %d scratch), high %d\n",
1014                  stats.fTotal, fBudgetedCount, stats.fWrapped, stats.fNumNonPurgeable,
1015                  stats.fScratch, fHighWaterCount);
1016     out->appendf("\t\tEntry Bytes: current %d (budgeted %d, %.2g%% full, %d unbudgeted) high %d\n",
1017                  SkToInt(fBytes), SkToInt(fBudgetedBytes), byteUtilization,
1018                  SkToInt(stats.fUnbudgetedSize), SkToInt(fHighWaterBytes));
1019 }
1020 
dumpStatsKeyValuePairs(SkTArray<SkString> * keys,SkTArray<double> * values) const1021 void GrResourceCache::dumpStatsKeyValuePairs(SkTArray<SkString>* keys,
1022                                              SkTArray<double>* values) const {
1023     this->validate();
1024 
1025     Stats stats;
1026     this->getStats(&stats);
1027 
1028     keys->push_back(SkString("gpu_cache_purgable_entries")); values->push_back(stats.fNumPurgeable);
1029 }
1030 #endif // GR_TEST_UTILS
1031 #endif // GR_CACHE_STATS
1032 
1033 #ifdef SK_DEBUG
validate() const1034 void GrResourceCache::validate() const {
1035     // Reduce the frequency of validations for large resource counts.
1036     static SkRandom gRandom;
1037     int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
1038     if (~mask && (gRandom.nextU() & mask)) {
1039         return;
1040     }
1041 
1042     struct Stats {
1043         size_t fBytes;
1044         int fBudgetedCount;
1045         size_t fBudgetedBytes;
1046         int fLocked;
1047         int fScratch;
1048         int fCouldBeScratch;
1049         int fContent;
1050         const ScratchMap* fScratchMap;
1051         const UniqueHash* fUniqueHash;
1052 
1053         Stats(const GrResourceCache* cache) {
1054             memset(this, 0, sizeof(*this));
1055             fScratchMap = &cache->fScratchMap;
1056             fUniqueHash = &cache->fUniqueHash;
1057         }
1058 
1059         void update(GrGpuResource* resource) {
1060             fBytes += resource->gpuMemorySize();
1061 
1062             if (!resource->resourcePriv().isPurgeable()) {
1063                 ++fLocked;
1064             }
1065 
1066             const GrScratchKey& scratchKey = resource->resourcePriv().getScratchKey();
1067             const GrUniqueKey& uniqueKey = resource->getUniqueKey();
1068 
1069             if (resource->cacheAccess().isUsableAsScratch()) {
1070                 SkASSERT(!uniqueKey.isValid());
1071                 SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType());
1072                 SkASSERT(!resource->cacheAccess().hasRef());
1073                 ++fScratch;
1074                 SkASSERT(fScratchMap->countForKey(scratchKey));
1075                 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
1076             } else if (scratchKey.isValid()) {
1077                 SkASSERT(GrBudgetedType::kBudgeted != resource->resourcePriv().budgetedType() ||
1078                          uniqueKey.isValid() || resource->cacheAccess().hasRef());
1079                 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
1080                 SkASSERT(!fScratchMap->has(resource, scratchKey));
1081             }
1082             if (uniqueKey.isValid()) {
1083                 ++fContent;
1084                 SkASSERT(fUniqueHash->find(uniqueKey) == resource);
1085                 SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType() ||
1086                          resource->resourcePriv().refsWrappedObjects());
1087             }
1088 
1089             if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
1090                 ++fBudgetedCount;
1091                 fBudgetedBytes += resource->gpuMemorySize();
1092             }
1093         }
1094     };
1095 
1096     {
1097         int count = 0;
1098         fScratchMap.foreach([&](const GrGpuResource& resource) {
1099             SkASSERT(resource.cacheAccess().isUsableAsScratch());
1100             count++;
1101         });
1102         SkASSERT(count == fScratchMap.count());
1103     }
1104 
1105     Stats stats(this);
1106     size_t purgeableBytes = 0;
1107     int numBudgetedResourcesFlushWillMakePurgeable = 0;
1108 
1109     for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
1110         SkASSERT(!fNonpurgeableResources[i]->resourcePriv().isPurgeable() ||
1111                  fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]);
1112         SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i);
1113         SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
1114         if (fNonpurgeableResources[i]->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted &&
1115             !fNonpurgeableResources[i]->cacheAccess().hasRefOrCommandBufferUsage() &&
1116             fNewlyPurgeableResourceForValidation != fNonpurgeableResources[i]) {
1117             ++numBudgetedResourcesFlushWillMakePurgeable;
1118         }
1119         stats.update(fNonpurgeableResources[i]);
1120     }
1121     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1122         SkASSERT(fPurgeableQueue.at(i)->resourcePriv().isPurgeable());
1123         SkASSERT(*fPurgeableQueue.at(i)->cacheAccess().accessCacheIndex() == i);
1124         SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
1125         stats.update(fPurgeableQueue.at(i));
1126         purgeableBytes += fPurgeableQueue.at(i)->gpuMemorySize();
1127     }
1128 
1129     SkASSERT(fCount == this->getResourceCount());
1130     SkASSERT(fBudgetedCount <= fCount);
1131     SkASSERT(fBudgetedBytes <= fBytes);
1132     SkASSERT(stats.fBytes == fBytes);
1133     SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable ==
1134              numBudgetedResourcesFlushWillMakePurgeable);
1135     SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
1136     SkASSERT(stats.fBudgetedCount == fBudgetedCount);
1137     SkASSERT(purgeableBytes == fPurgeableBytes);
1138 #if GR_CACHE_STATS
1139     SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount);
1140     SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes);
1141     SkASSERT(fBytes <= fHighWaterBytes);
1142     SkASSERT(fCount <= fHighWaterCount);
1143     SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes);
1144     SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount);
1145 #endif
1146     SkASSERT(stats.fContent == fUniqueHash.count());
1147     SkASSERT(stats.fScratch == fScratchMap.count());
1148 
1149     // This assertion is not currently valid because we can be in recursive notifyCntReachedZero()
1150     // calls. This will be fixed when subresource registration is explicit.
1151     // bool overBudget = budgetedBytes > fMaxBytes || budgetedCount > fMaxCount;
1152     // SkASSERT(!overBudget || locked == count || fPurging);
1153 }
1154 #endif // SK_DEBUG
1155 
isInCache(const GrGpuResource * resource) const1156 bool GrResourceCache::isInCache(const GrGpuResource* resource) const {
1157     int index = *resource->cacheAccess().accessCacheIndex();
1158     if (index < 0) {
1159         return false;
1160     }
1161     if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
1162         return true;
1163     }
1164     if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) {
1165         return true;
1166     }
1167     SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
1168     return false;
1169 }
1170 
isInPurgeableCache(const GrGpuResource * resource) const1171 bool GrResourceCache::isInPurgeableCache(const GrGpuResource* resource) const {
1172     int index = *resource->cacheAccess().accessCacheIndex();
1173     if (index < 0) {
1174         return false;
1175     }
1176     if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
1177         return true;
1178     }
1179     SkDEBUGFAIL("OHOS Resource index should be -1 or the resource should be in the cache.");
1180     return false;
1181 }
1182 
isInNonpurgeableCache(const GrGpuResource * resource) const1183 bool GrResourceCache::isInNonpurgeableCache(const GrGpuResource* resource) const {
1184     int index = *resource->cacheAccess().accessCacheIndex();
1185     if (index < 0) {
1186         return false;
1187     }
1188     if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) {
1189         return true;
1190     }
1191     SkDEBUGFAIL("OHOS Resource index should be -1 or the resource should be in the cache.");
1192     return false;
1193 }
1194 
1195 #if GR_TEST_UTILS
1196 
countUniqueKeysWithTag(const char * tag) const1197 int GrResourceCache::countUniqueKeysWithTag(const char* tag) const {
1198     int count = 0;
1199     fUniqueHash.foreach([&](const GrGpuResource& resource){
1200         if (0 == strcmp(tag, resource.getUniqueKey().tag())) {
1201             ++count;
1202         }
1203     });
1204     return count;
1205 }
1206 
changeTimestamp(uint32_t newTimestamp)1207 void GrResourceCache::changeTimestamp(uint32_t newTimestamp) {
1208     fTimestamp = newTimestamp;
1209 }
1210 
1211 #endif // GR_TEST_UTILS
1212